[turboshaft] Introduce new Turboshaft type system

Bug: v8:12783
Change-Id: Id5d3ce17f0dc8cec1b2b257585290bed72dd9fd9
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4057111
Commit-Queue: Nico Hartmann <nicohartmann@chromium.org>
Reviewed-by: Clemens Backes <clemensb@chromium.org>
Owners-Override: Nico Hartmann <nicohartmann@chromium.org>
Reviewed-by: Darius Mercadier <dmercadier@chromium.org>
Cr-Commit-Position: refs/heads/main@{#84765}
This commit is contained in:
Nico Hartmann 2022-12-09 18:00:05 +01:00 committed by V8 LUCI CQ
parent ccddea063b
commit b3ffda44f2
43 changed files with 3204 additions and 267 deletions

View File

@ -962,6 +962,7 @@ filegroup(
"src/objects/templates.tq",
"src/objects/torque-defined-classes.tq",
"src/objects/turbofan-types.tq",
"src/objects/turboshaft-types.tq",
"test/torque/test-torque.tq",
"third_party/v8/builtins/array-sort.tq",
] + select({
@ -1917,6 +1918,8 @@ filegroup(
"src/objects/transitions.h",
"src/objects/turbofan-types-inl.h",
"src/objects/turbofan-types.h",
"src/objects/turboshaft-types-inl.h",
"src/objects/turboshaft-types.h",
"src/objects/type-hints.cc",
"src/objects/type-hints.h",
"src/objects/value-serializer.cc",
@ -2886,6 +2889,7 @@ filegroup(
"src/compiler/store-store-elimination.h",
"src/compiler/turboshaft/assembler.cc",
"src/compiler/turboshaft/assembler.h",
"src/compiler/turboshaft/assert-types-reducer.h",
"src/compiler/turboshaft/branch-elimination-reducer.h",
"src/compiler/turboshaft/decompression-optimization.cc",
"src/compiler/turboshaft/decompression-optimization.h",
@ -2918,6 +2922,9 @@ filegroup(
"src/compiler/turboshaft/simplify-tf-loops.cc",
"src/compiler/turboshaft/simplify-tf-loops.h",
"src/compiler/turboshaft/snapshot-table.h",
"src/compiler/turboshaft/type-inference-reducer.h",
"src/compiler/turboshaft/types.cc",
"src/compiler/turboshaft/types.h",
"src/compiler/turboshaft/utils.cc",
"src/compiler/turboshaft/utils.h",
"src/compiler/turboshaft/value-numbering-reducer.h",

View File

@ -1876,6 +1876,7 @@ torque_files = [
"src/objects/templates.tq",
"src/objects/torque-defined-classes.tq",
"src/objects/turbofan-types.tq",
"src/objects/turboshaft-types.tq",
"test/torque/test-torque.tq",
"third_party/v8/builtins/array-sort.tq",
]
@ -2951,6 +2952,7 @@ v8_header_set("v8_internal_headers") {
"src/compiler/state-values-utils.h",
"src/compiler/store-store-elimination.h",
"src/compiler/turboshaft/assembler.h",
"src/compiler/turboshaft/assert-types-reducer.h",
"src/compiler/turboshaft/branch-elimination-reducer.h",
"src/compiler/turboshaft/decompression-optimization.h",
"src/compiler/turboshaft/deopt-data.h",
@ -2972,6 +2974,8 @@ v8_header_set("v8_internal_headers") {
"src/compiler/turboshaft/sidetable.h",
"src/compiler/turboshaft/simplify-tf-loops.h",
"src/compiler/turboshaft/snapshot-table.h",
"src/compiler/turboshaft/type-inference-reducer.h",
"src/compiler/turboshaft/types.h",
"src/compiler/turboshaft/utils.h",
"src/compiler/turboshaft/value-numbering-reducer.h",
"src/compiler/turboshaft/variable-reducer.h",
@ -3435,6 +3439,8 @@ v8_header_set("v8_internal_headers") {
"src/objects/transitions.h",
"src/objects/turbofan-types-inl.h",
"src/objects/turbofan-types.h",
"src/objects/turboshaft-types-inl.h",
"src/objects/turboshaft-types.h",
"src/objects/type-hints.h",
"src/objects/value-serializer.h",
"src/objects/visitors-inl.h",
@ -4302,6 +4308,7 @@ v8_source_set("v8_turboshaft") {
"src/compiler/turboshaft/recreate-schedule.cc",
"src/compiler/turboshaft/representations.cc",
"src/compiler/turboshaft/simplify-tf-loops.cc",
"src/compiler/turboshaft/types.cc",
"src/compiler/turboshaft/utils.cc",
]
@ -5697,14 +5704,12 @@ if (v8_use_libm_trig_functions) {
"third_party/glibc/src/sysdeps/ieee754/dbl-64/dla.h",
"third_party/glibc/src/sysdeps/ieee754/dbl-64/endian.h",
"third_party/glibc/src/sysdeps/ieee754/dbl-64/mydefs.h",
"third_party/glibc/src/sysdeps/ieee754/dbl-64/sincostab.c",
"third_party/glibc/src/sysdeps/ieee754/dbl-64/s_sin.c",
"third_party/glibc/src/sysdeps/ieee754/dbl-64/sincostab.c",
"third_party/glibc/src/sysdeps/ieee754/dbl-64/trig.h",
"third_party/glibc/src/sysdeps/ieee754/dbl-64/usncs.h",
]
configs += [
"//build/config/compiler:no_chromium_code",
]
configs += [ "//build/config/compiler:no_chromium_code" ]
configs -= [ "//build/config/compiler:chromium_code" ]
if (!is_debug) {
# Build code using -O3, see: crbug.com/1084371.

View File

@ -62,7 +62,7 @@ class BitField final {
}
// Returns a type U with the bit field value updated.
static constexpr U update(U previous, T value) {
V8_NODISCARD static constexpr U update(U previous, T value) {
return (previous & ~kMask) | encode(value);
}

View File

@ -15,16 +15,16 @@ namespace v8::base {
// Returns true iff the {element} is found in the {container}.
template <typename C, typename T>
bool contains(const C& container, const T& element) {
const auto e = end(container);
return std::find(begin(container), e, element) != e;
const auto e = std::end(container);
return std::find(std::begin(container), e, element) != e;
}
// Returns the first index of {element} in {container}. Returns std::nullopt if
// {container} does not contain {element}.
template <typename C, typename T>
std::optional<size_t> index_of(const C& container, const T& element) {
const auto b = begin(container);
const auto e = end(container);
const auto b = std::begin(container);
const auto e = std::end(container);
if (auto it = std::find(b, e, element); it != e) {
return {std::distance(b, it)};
}
@ -35,8 +35,8 @@ std::optional<size_t> index_of(const C& container, const T& element) {
// {predicate}. Returns std::nullopt if no element satisfies {predicate}.
template <typename C, typename P>
std::optional<size_t> index_of_if(const C& container, const P& predicate) {
const auto b = begin(container);
const auto e = end(container);
const auto b = std::begin(container);
const auto e = std::end(container);
if (auto it = std::find_if(b, e, predicate); it != e) {
return {std::distance(b, it)};
}
@ -49,9 +49,9 @@ std::optional<size_t> index_of_if(const C& container, const P& predicate) {
template <typename C>
inline size_t erase_at(C& container, size_t index, size_t count = 1) {
// TODO(C++20): Replace with std::erase.
if (size(container) <= index) return 0;
auto start = begin(container) + index;
count = std::min<size_t>(count, std::distance(start, end(container)));
if (std::size(container) <= index) return 0;
auto start = std::begin(container) + index;
count = std::min<size_t>(count, std::distance(start, std::end(container)));
container.erase(start, start + count);
return count;
}
@ -71,28 +71,38 @@ inline size_t erase_if(C& container, const P& predicate) {
// Helper for std::count_if.
template <typename C, typename P>
inline size_t count_if(const C& container, const P& predicate) {
return std::count_if(begin(container), end(container), predicate);
return std::count_if(std::begin(container), std::end(container), predicate);
}
// Helper for std::all_of.
template <typename C, typename P>
inline bool all_of(const C& container, const P& predicate) {
return std::all_of(begin(container), end(container), predicate);
return std::all_of(std::begin(container), std::end(container), predicate);
}
// Helper for std::none_of.
template <typename C, typename P>
inline bool none_of(const C& container, const P& predicate) {
return std::none_of(begin(container), end(container), predicate);
return std::none_of(std::begin(container), std::end(container), predicate);
}
// Helper for std::sort.
template <typename C>
inline void sort(C& container) {
std::sort(std::begin(container), std::end(container));
}
template <typename C, typename Comp>
inline void sort(C& container, Comp comp) {
std::sort(std::begin(container), std::end(container), comp);
}
// Returns true iff all elements of {container} compare equal using operator==.
template <typename C>
inline bool all_equal(const C& container) {
if (size(container) <= 1) return true;
auto b = begin(container);
if (std::size(container) <= 1) return true;
auto b = std::begin(container);
const auto& value = *b;
return std::all_of(++b, end(container),
return std::all_of(++b, std::end(container),
[&](const auto& v) { return v == value; });
}
@ -100,15 +110,15 @@ inline bool all_equal(const C& container) {
// operator==.
template <typename C, typename T>
inline bool all_equal(const C& container, const T& value) {
return std::all_of(begin(container), end(container),
return std::all_of(std::begin(container), std::end(container),
[&](const auto& v) { return v == value; });
}
// Appends to vector {v} all the elements in the range {begin(container)} and
// {end(container)}.
template <typename T, typename A, typename C>
inline void vector_append(std::vector<T, A>& v, const C& container) {
v.insert(end(v), begin(container), end(container));
// Appends to vector {v} all the elements in the range {std::begin(container)}
// and {std::end(container)}.
template <typename V, typename C>
inline void vector_append(V& v, const C& container) {
v.insert(std::end(v), std::begin(container), std::end(container));
}
} // namespace v8::base

View File

@ -110,6 +110,15 @@ class SmallVector {
bool empty() const { return end_ == begin_; }
size_t capacity() const { return end_of_storage_ - begin_; }
T& front() {
DCHECK_NE(0, size());
return begin_[0];
}
const T& front() const {
DCHECK_NE(0, size());
return begin_[0];
}
T& back() {
DCHECK_NE(0, size());
return end_[-1];
@ -146,6 +155,30 @@ class SmallVector {
end_ -= count;
}
T* insert(T* pos, const T& value) { return insert(pos, 1, value); }
T* insert(T* pos, size_t count, const T& value) {
DCHECK_LE(pos, end_);
size_t offset = pos - begin_;
size_t elements_to_move = end_ - pos;
resize_no_init(size() + count);
pos = begin_ + offset;
std::memmove(pos + count, pos, elements_to_move);
std::fill_n(pos, count, value);
return pos;
}
template <typename It>
T* insert(T* pos, It begin, It end) {
DCHECK_LE(pos, end_);
size_t offset = pos - begin_;
size_t count = std::distance(begin, end);
size_t elements_to_move = end_ - pos;
resize_no_init(size() + count);
pos = begin_ + offset;
std::memmove(pos + count, pos, elements_to_move);
std::copy(begin, end, pos);
return pos;
}
void resize_no_init(size_t new_size) {
// Resizing without initialization is safe if T is trivially copyable.
ASSERT_TRIVIALLY_COPYABLE(T);

View File

@ -263,6 +263,9 @@ Convert<int32, intptr>(i: intptr): int32 {
Convert<int32, int64>(i: int64): int32 {
return TruncateInt64ToInt32(i);
}
Convert<uint32, uint64>(i: uint64): uint32 {
return Unsigned(TruncateInt64ToInt32(Signed(i)));
}
Convert<int32, Number>(n: Number): int32 {
typeswitch (n) {
case (s: Smi): {

View File

@ -119,6 +119,11 @@ struct Slice<T: type, Reference: type> {
return this.TryAtIndex(i) otherwise unreachable;
}
macro AtIndex(index: uint32): Reference {
const i: intptr = Convert<intptr>(index);
return this.TryAtIndex(i) otherwise unreachable;
}
macro Iterator(): SliceIterator<T, Reference> {
const end = this.offset + TimesSizeOf<T>(this.length);
return SliceIterator<T, Reference>{

View File

@ -317,6 +317,20 @@ constexpr auto NewHeapNumberDescriptor::registers() {
return RegisterArray(ecx);
}
// static
constexpr auto CheckTurboshaftFloat32TypeDescriptor::registers() {
// Work around using eax, whose register code is 0, and leads to the FP
// parameter being passed via xmm0, which is not allocatable on ia32.
return RegisterArray(ecx);
}
// static
constexpr auto CheckTurboshaftFloat64TypeDescriptor::registers() {
// Work around using eax, whose register code is 0, and leads to the FP
// parameter being passed via xmm0, which is not allocatable on ia32.
return RegisterArray(ecx);
}
} // namespace internal
} // namespace v8

View File

@ -2120,6 +2120,40 @@ class UnaryOp_BaselineDescriptor
DECLARE_DESCRIPTOR(UnaryOp_BaselineDescriptor)
};
class CheckTurboshaftFloat32TypeDescriptor
: public StaticCallInterfaceDescriptor<
CheckTurboshaftFloat32TypeDescriptor> {
public:
DEFINE_RESULT_AND_PARAMETERS(1, kValue, kExpectedType, kNodeId)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::TaggedPointer(),
MachineTypeOf<Float32T>::value,
MachineType::TaggedPointer(),
MachineType::TaggedSigned())
DECLARE_DEFAULT_DESCRIPTOR(CheckTurboshaftFloat32TypeDescriptor)
#if V8_TARGET_ARCH_IA32
// We need a custom descriptor on ia32 to avoid using xmm0.
static constexpr inline auto registers();
#endif
};
class CheckTurboshaftFloat64TypeDescriptor
: public StaticCallInterfaceDescriptor<
CheckTurboshaftFloat64TypeDescriptor> {
public:
DEFINE_RESULT_AND_PARAMETERS(1, kValue, kExpectedType, kNodeId)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::TaggedPointer(),
MachineTypeOf<Float64T>::value,
MachineType::TaggedPointer(),
MachineType::TaggedSigned())
DECLARE_DEFAULT_DESCRIPTOR(CheckTurboshaftFloat64TypeDescriptor)
#if V8_TARGET_ARCH_IA32
// We need a custom descriptor on ia32 to avoid using xmm0.
static constexpr inline auto registers();
#endif
};
#define DEFINE_TFS_BUILTIN_DESCRIPTOR(Name, ...) \
class Name##Descriptor \
: public StaticCallInterfaceDescriptor<Name##Descriptor> { \

View File

@ -80,6 +80,7 @@
#include "src/compiler/simplified-operator.h"
#include "src/compiler/store-store-elimination.h"
#include "src/compiler/turboshaft/assembler.h"
#include "src/compiler/turboshaft/assert-types-reducer.h"
#include "src/compiler/turboshaft/branch-elimination-reducer.h"
#include "src/compiler/turboshaft/decompression-optimization.h"
#include "src/compiler/turboshaft/graph-builder.h"
@ -92,6 +93,8 @@
#include "src/compiler/turboshaft/recreate-schedule.h"
#include "src/compiler/turboshaft/select-lowering-reducer.h"
#include "src/compiler/turboshaft/simplify-tf-loops.h"
#include "src/compiler/turboshaft/type-inference-reducer.h"
#include "src/compiler/turboshaft/types.h"
#include "src/compiler/turboshaft/value-numbering-reducer.h"
#include "src/compiler/turboshaft/variable-reducer.h"
#include "src/compiler/type-narrowing-reducer.h"
@ -2079,8 +2082,9 @@ struct BuildTurboshaftPhase {
data->reset_schedule();
data->CreateTurboshaftGraph();
if (auto bailout = turboshaft::BuildGraph(
schedule, data->graph_zone(), temp_zone, &data->turboshaft_graph(),
linkage, data->source_positions(), data->node_origins())) {
schedule, data->isolate(), data->graph_zone(), temp_zone,
&data->turboshaft_graph(), linkage, data->source_positions(),
data->node_origins())) {
return bailout;
}
return {};
@ -2104,6 +2108,28 @@ struct OptimizeTurboshaftPhase {
}
};
struct TurboshaftTypeInferencePhase {
DECL_PIPELINE_PHASE_CONSTANTS(TurboshaftTypeInference)
void Run(PipelineData* data, Zone* temp_zone) {
DCHECK(data->HasTurboshaftGraph());
UnparkedScopeIfNeeded scope(data->broker());
if (v8_flags.turboshaft_assert_types) {
turboshaft::OptimizationPhase<turboshaft::AssertTypesReducer,
turboshaft::ValueNumberingReducer,
turboshaft::TypeInferenceReducer>::
Run(&data->turboshaft_graph(), temp_zone, data->node_origins(),
std::tuple{turboshaft::TypeInferenceReducerArgs{data->isolate()},
turboshaft::AssertTypesReducerArgs{data->isolate()}});
} else {
turboshaft::OptimizationPhase<turboshaft::TypeInferenceReducer>::Run(
&data->turboshaft_graph(), temp_zone, data->node_origins(),
std::tuple{turboshaft::TypeInferenceReducerArgs{data->isolate()}});
}
}
};
struct TurboshaftRecreateSchedulePhase {
DECL_PIPELINE_PHASE_CONSTANTS(TurboshaftRecreateSchedule)
@ -2682,6 +2708,17 @@ struct PrintTurboshaftGraphPhase {
op.PrintOptions(stream);
return true;
});
PrintTurboshaftCustomDataPerOperation(
data->info(), "Types", data->turboshaft_graph(),
[](std::ostream& stream, const turboshaft::Graph& graph,
turboshaft::OpIndex index) -> bool {
turboshaft::Type type = graph.operation_types()[index];
if (!type.IsInvalid() && !type.IsNone()) {
type.PrintTo(stream);
return true;
}
return false;
});
}
if (data->info()->trace_turbo_graph()) {
@ -3060,6 +3097,9 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
Run<PrintTurboshaftGraphPhase>(
DecompressionOptimizationPhase::phase_name());
Run<TurboshaftTypeInferencePhase>();
Run<PrintTurboshaftGraphPhase>(TurboshaftTypeInferencePhase::phase_name());
Run<TurboshaftRecreateSchedulePhase>(linkage);
TraceSchedule(data->info(), data, data->schedule(),
TurboshaftRecreateSchedulePhase::phase_name());

View File

@ -15,6 +15,7 @@
#include "src/base/macros.h"
#include "src/base/small-vector.h"
#include "src/base/template-utils.h"
#include "src/codegen/callable.h"
#include "src/codegen/reloc-info.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/turboshaft/graph.h"
@ -57,6 +58,16 @@ class ReducerStack<Assembler> {
Assembler& Asm() { return *static_cast<Assembler*>(this); }
};
template <typename Next>
class ReducerBase;
template <typename Next>
struct next_is_bottom_of_assembler_stack
: public std::integral_constant<bool, false> {};
template <typename A>
struct next_is_bottom_of_assembler_stack<ReducerStack<A, ReducerBase>>
: public std::integral_constant<bool, true> {};
// LABEL_BLOCK is used in Reducers to have a single call forwarding to the next
// reducer without change. A typical use would be:
//
@ -972,6 +983,26 @@ class AssemblerOpInterface {
return stack().Bind(if_true);
}
OpIndex CallBuiltin(Builtin builtin, OpIndex frame_state,
const base::Vector<OpIndex>& arguments,
Isolate* isolate) {
Callable const callable = Builtins::CallableFor(isolate, builtin);
Zone* graph_zone = stack().output_graph().graph_zone();
const CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
graph_zone, callable.descriptor(),
callable.descriptor().GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoThrow | Operator::kNoDeopt);
DCHECK_EQ(call_descriptor->NeedsFrameState(), frame_state.valid());
const TSCallDescriptor* ts_call_descriptor =
TSCallDescriptor::Create(call_descriptor, graph_zone);
OpIndex callee = stack().HeapConstant(callable.code());
return stack().Call(callee, frame_state, arguments, ts_call_descriptor);
}
private:
Assembler& stack() { return *static_cast<Assembler*>(this); }
};

View File

@ -0,0 +1,214 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_TURBOSHAFT_ASSERT_TYPES_REDUCER_H_
#define V8_COMPILER_TURBOSHAFT_ASSERT_TYPES_REDUCER_H_
#include <limits>
#include "src/base/logging.h"
#include "src/base/template-utils.h"
#include "src/base/vector.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/frame.h"
#include "src/compiler/turboshaft/assembler.h"
#include "src/compiler/turboshaft/operations.h"
#include "src/compiler/turboshaft/representations.h"
#include "src/compiler/turboshaft/sidetable.h"
#include "src/compiler/turboshaft/types.h"
#include "src/heap/parked-scope.h"
namespace v8::internal::compiler::turboshaft {
class DetectReentranceScope {
public:
explicit DetectReentranceScope(bool* flag)
: is_reentrant_(*flag), flag_(flag) {
*flag_ = true;
}
~DetectReentranceScope() { *flag_ = is_reentrant_; }
bool IsReentrant() const { return is_reentrant_; }
private:
bool is_reentrant_;
bool* flag_;
};
struct AssertTypesReducerArgs {
Isolate* isolate;
};
template <class Next>
class AssertTypesReducer : public Next {
public:
using Next::Asm;
using ArgT =
base::append_tuple_type<typename Next::ArgT, AssertTypesReducerArgs>;
template <typename... Args>
explicit AssertTypesReducer(const std::tuple<Args...>& args)
: Next(args), isolate_(std::get<AssertTypesReducerArgs>(args).isolate) {}
uint32_t NoContextConstant() { return IntToSmi(Context::kNoContext); }
OpIndex ReducePhi(base::Vector<const OpIndex> inputs,
RegisterRepresentation rep) {
OpIndex index = Next::ReducePhi(inputs, rep);
if (!index.valid()) return index;
Type type = TypeOf(index);
if (type.IsInvalid()) return index;
// For now allow Type::Any().
if (type.IsAny()) return index;
DetectReentranceScope reentrance_scope(&emitting_asserts_);
DCHECK(!reentrance_scope.IsReentrant());
InsertTypeAssert(rep, index, type);
return index;
}
OpIndex ReduceConstant(ConstantOp::Kind kind, ConstantOp::Storage value) {
OpIndex index = Next::ReduceConstant(kind, value);
if (!index.valid()) return index;
Type type = TypeOf(index);
if (type.IsInvalid()) return index;
DetectReentranceScope reentrance_scope(&emitting_asserts_);
if (reentrance_scope.IsReentrant()) return index;
RegisterRepresentation rep = ConstantOp::Representation(kind);
switch (kind) {
case ConstantOp::Kind::kWord32:
case ConstantOp::Kind::kWord64:
case ConstantOp::Kind::kFloat32:
case ConstantOp::Kind::kFloat64:
InsertTypeAssert(rep, index, type);
break;
case ConstantOp::Kind::kNumber:
case ConstantOp::Kind::kTaggedIndex:
case ConstantOp::Kind::kExternal:
case ConstantOp::Kind::kHeapObject:
case ConstantOp::Kind::kCompressedHeapObject:
case ConstantOp::Kind::kRelocatableWasmCall:
case ConstantOp::Kind::kRelocatableWasmStubCall:
// TODO(nicohartmann@): Support remaining {kind}s.
UNIMPLEMENTED();
}
return index;
}
OpIndex ReduceWordBinop(OpIndex left, OpIndex right, WordBinopOp::Kind kind,
WordRepresentation rep) {
OpIndex index = Next::ReduceWordBinop(left, right, kind, rep);
if (!index.valid()) return index;
Type type = TypeOf(index);
if (type.IsInvalid()) return index;
DetectReentranceScope reentrance_scope(&emitting_asserts_);
DCHECK(!reentrance_scope.IsReentrant());
InsertTypeAssert(rep, index, type);
return index;
}
OpIndex ReduceFloatBinop(OpIndex left, OpIndex right, FloatBinopOp::Kind kind,
FloatRepresentation rep) {
OpIndex index = Next::ReduceFloatBinop(left, right, kind, rep);
if (!index.valid()) return index;
Type type = TypeOf(index);
if (type.IsInvalid()) return index;
DetectReentranceScope reentrance_scope(&emitting_asserts_);
DCHECK(!reentrance_scope.IsReentrant());
InsertTypeAssert(rep, index, type);
return index;
}
void InsertTypeAssert(RegisterRepresentation rep, OpIndex value,
const Type& type) {
DCHECK(!type.IsInvalid());
if (type.IsNone()) {
Asm().Unreachable();
return;
}
auto GenerateBuiltinCall =
[this](Builtin builtin, OpIndex original_value,
base::SmallVector<OpIndex, 6> actual_value_indices,
const Type& type) {
uint32_t op_id = static_cast<uint32_t>(IntToSmi(original_value.id()));
// Add expected type and operation id.
Handle<TurboshaftType> expected_type = type.AllocateOnHeap(factory());
actual_value_indices.push_back(Asm().HeapConstant(expected_type));
actual_value_indices.push_back(Asm().Word32Constant(op_id));
actual_value_indices.push_back(
Asm().Word32Constant(NoContextConstant()));
Asm().CallBuiltin(
builtin, OpIndex::Invalid(),
{actual_value_indices.data(), actual_value_indices.size()},
isolate_);
// Used for debugging
// PrintF("Inserted assert for %3d:%-40s (%s)\n", original_value.id(),
// Asm().output_graph().Get(original_value).ToString().c_str(),
// type.ToString().c_str());
};
switch (rep.value()) {
case RegisterRepresentation::Word32(): {
DCHECK(type.IsWord32());
base::SmallVector<OpIndex, 6> actual_value_indices = {value};
GenerateBuiltinCall(Builtin::kCheckTurboshaftWord32Type, value,
std::move(actual_value_indices), type);
break;
}
case RegisterRepresentation::Word64(): {
DCHECK(type.IsWord64());
OpIndex value_high = Asm().Word64ShiftRightLogical(
value, Asm().Word64Constant(static_cast<uint64_t>(32)));
OpIndex value_low = value; // Use implicit truncation to word32.
base::SmallVector<OpIndex, 6> actual_value_indices = {value_high,
value_low};
GenerateBuiltinCall(Builtin::kCheckTurboshaftWord64Type, value,
std::move(actual_value_indices), type);
break;
}
case RegisterRepresentation::Float32(): {
DCHECK(type.IsFloat32());
base::SmallVector<OpIndex, 6> actual_value_indices = {value};
GenerateBuiltinCall(Builtin::kCheckTurboshaftFloat32Type, value,
std::move(actual_value_indices), type);
break;
}
case RegisterRepresentation::Float64(): {
DCHECK(type.IsFloat64());
base::SmallVector<OpIndex, 6> actual_value_indices = {value};
GenerateBuiltinCall(Builtin::kCheckTurboshaftFloat64Type, value,
std::move(actual_value_indices), type);
break;
}
case RegisterRepresentation::Tagged():
case RegisterRepresentation::Compressed():
// TODO(nicohartmann@): Handle remaining cases.
break;
}
}
Type TypeOf(const OpIndex index) {
return Asm().output_graph().operation_types()[index];
}
private:
Factory* factory() { return isolate_->factory(); }
Isolate* isolate_;
bool emitting_asserts_ = false;
};
} // namespace v8::internal::compiler::turboshaft
#endif // V8_COMPILER_TURBOSHAFT_ASSERT_TYPES_REDUCER_H_

View File

@ -26,7 +26,7 @@ V8_INLINE size_t fast_hash_combine(T const& v, Ts const&... vs);
template <class T>
struct fast_hash {
size_t operator()(const T& v) {
size_t operator()(const T& v) const {
if constexpr (std::is_enum<T>::value) {
return static_cast<size_t>(v);
} else {
@ -37,12 +37,13 @@ struct fast_hash {
template <class... Ts>
struct fast_hash<std::tuple<Ts...>> {
size_t operator()(const std::tuple<Ts...>& v) {
size_t operator()(const std::tuple<Ts...>& v) const {
return impl(v, std::make_index_sequence<sizeof...(Ts)>());
}
template <size_t... I>
V8_INLINE size_t impl(std::tuple<Ts...> const& v, std::index_sequence<I...>) {
V8_INLINE size_t impl(std::tuple<Ts...> const& v,
std::index_sequence<I...>) const {
return fast_hash_combine(std::get<I>(v)...);
}
};
@ -63,7 +64,7 @@ V8_INLINE size_t fast_hash_range(Iterator first, Iterator last) {
template <typename T>
struct fast_hash<base::Vector<T>> {
V8_INLINE size_t operator()(base::Vector<T> v) {
V8_INLINE size_t operator()(base::Vector<T> v) const {
return fast_hash_range(v.begin(), v.end());
}
};

View File

@ -941,10 +941,10 @@ OpIndex GraphBuilder::Process(
OpIndex value = Map(node->InputAt(1));
FieldAccess const& access = FieldAccessOf(node->op());
// External pointer must never be stored by optimized code.
DCHECK(!access.type.Is(Type::ExternalPointer()) ||
DCHECK(!access.type.Is(compiler::Type::ExternalPointer()) ||
!V8_ENABLE_SANDBOX_BOOL);
// SandboxedPointers are not currently stored by optimized code.
DCHECK(!access.type.Is(Type::SandboxedPointer()));
DCHECK(!access.type.Is(compiler::Type::SandboxedPointer()));
#ifdef V8_ENABLE_SANDBOX
if (access.is_bounded_size_access) {
@ -991,7 +991,8 @@ OpIndex GraphBuilder::Process(
MemoryRepresentation rep =
MemoryRepresentation::FromMachineType(machine_type);
#ifdef V8_ENABLE_SANDBOX
bool is_sandboxed_external = access.type.Is(Type::ExternalPointer());
bool is_sandboxed_external =
access.type.Is(compiler::Type::ExternalPointer());
if (is_sandboxed_external) {
// Fields for sandboxed external pointer contain a 32-bit handle, not a
// 64-bit raw pointer.
@ -1032,9 +1033,9 @@ OpIndex GraphBuilder::Process(
} // namespace
base::Optional<BailoutReason> BuildGraph(Schedule* schedule, Zone* graph_zone,
Zone* phase_zone, Graph* graph,
Linkage* linkage,
base::Optional<BailoutReason> BuildGraph(Schedule* schedule, Isolate* isolate,
Zone* graph_zone, Zone* phase_zone,
Graph* graph, Linkage* linkage,
SourcePositionTable* source_positions,
NodeOriginTable* origins) {
GraphBuilder builder{

View File

@ -14,9 +14,9 @@ class Schedule;
class SourcePositionTable;
}
namespace v8::internal::compiler::turboshaft {
base::Optional<BailoutReason> BuildGraph(Schedule* schedule, Zone* graph_zone,
Zone* phase_zone, Graph* graph,
Linkage* linkage,
base::Optional<BailoutReason> BuildGraph(Schedule* schedule, Isolate* isolate,
Zone* graph_zone, Zone* phase_zone,
Graph* graph, Linkage* linkage,
SourcePositionTable* source_positions,
NodeOriginTable* origins);
}

View File

@ -17,6 +17,7 @@
#include "src/codegen/source-position.h"
#include "src/compiler/turboshaft/operations.h"
#include "src/compiler/turboshaft/sidetable.h"
#include "src/compiler/turboshaft/types.h"
#include "src/zone/zone-containers.h"
namespace v8::internal::compiler::turboshaft {
@ -427,7 +428,8 @@ class Graph {
all_blocks_(graph_zone),
graph_zone_(graph_zone),
source_positions_(graph_zone),
operation_origins_(graph_zone) {}
operation_origins_(graph_zone),
operation_types_(graph_zone) {}
// Reset the graph to recycle its memory.
void Reset() {
@ -435,6 +437,7 @@ class Graph {
bound_blocks_.clear();
source_positions_.Reset();
operation_origins_.Reset();
operation_types_.Reset();
next_block_ = 0;
dominator_tree_depth_ = 0;
}
@ -723,6 +726,10 @@ class Graph {
GrowingSidetable<OpIndex>& operation_origins() { return operation_origins_; }
uint32_t DominatorTreeDepth() const { return dominator_tree_depth_; }
const GrowingSidetable<Type>& operation_types() const {
return operation_types_;
}
GrowingSidetable<Type>& operation_types() { return operation_types_; }
Graph& GetOrCreateCompanion() {
if (!companion_) {
@ -745,6 +752,7 @@ class Graph {
std::swap(graph_zone_, companion.graph_zone_);
std::swap(source_positions_, companion.source_positions_);
std::swap(operation_origins_, companion.operation_origins_);
std::swap(operation_types_, companion.operation_types_);
#ifdef DEBUG
// Update generation index.
DCHECK_EQ(generation_ + 1, companion.generation_);
@ -815,6 +823,7 @@ class Graph {
GrowingSidetable<SourcePosition> source_positions_;
GrowingSidetable<OpIndex> operation_origins_;
uint32_t dominator_tree_depth_ = 0;
GrowingSidetable<Type> operation_types_;
std::unique_ptr<Graph> companion_ = {};
#ifdef DEBUG

View File

@ -77,7 +77,7 @@ std::ostream& operator<<(std::ostream& os, OpIndex idx);
template <>
struct fast_hash<OpIndex> {
V8_INLINE size_t operator()(OpIndex op) { return op.id(); }
V8_INLINE size_t operator()(OpIndex op) const { return op.id(); }
};
V8_INLINE size_t hash_value(OpIndex op) { return base::hash_value(op.id()); }
@ -108,7 +108,7 @@ class BlockIndex {
template <>
struct fast_hash<BlockIndex> {
V8_INLINE size_t operator()(BlockIndex op) { return op.id(); }
V8_INLINE size_t operator()(BlockIndex op) const { return op.id(); }
};
V8_INLINE size_t hash_value(BlockIndex op) { return base::hash_value(op.id()); }

View File

@ -883,6 +883,16 @@ struct ComparisonOp : FixedArityOperationT<2, ComparisonOp> {
}
auto options() const { return std::tuple{kind, rep}; }
static bool IsLessThan(Kind kind) {
switch (kind) {
case Kind::kSignedLessThan:
case Kind::kUnsignedLessThan:
return true;
case Kind::kSignedLessThanOrEqual:
case Kind::kUnsignedLessThanOrEqual:
return false;
}
}
static bool IsSigned(Kind kind) {
switch (kind) {
case Kind::kSignedLessThan:

View File

@ -22,13 +22,18 @@ namespace v8::internal::compiler::turboshaft {
// This sidetable is a conceptually infinite mapping from Turboshaft operation
// indices to values. It grows automatically and default-initializes the table
// when accessed out-of-bounds.
template <class T>
template <class T, class Key = OpIndex>
class GrowingSidetable {
public:
static_assert(std::is_same_v<Key, OpIndex> ||
std::is_same_v<Key, BlockIndex>);
explicit GrowingSidetable(Zone* zone) : table_(zone) {}
T& operator[](OpIndex op) {
size_t i = op.id();
GrowingSidetable(size_t size, const T& initial_value, Zone* zone)
: table_(size, initial_value, zone) {}
T& operator[](Key index) {
size_t i = index.id();
if (V8_UNLIKELY(i >= table_.size())) {
table_.resize(NextSize(i));
// Make sure we also get access to potential over-allocation by
@ -38,8 +43,8 @@ class GrowingSidetable {
return table_[i];
}
const T& operator[](OpIndex op) const {
size_t i = op.id();
const T& operator[](Key index) const {
size_t i = index.id();
if (V8_UNLIKELY(i >= table_.size())) {
table_.resize(NextSize(i));
// Make sure we also get access to potential over-allocation by
@ -89,6 +94,11 @@ class FixedSidetable {
ZoneVector<T> table_;
};
template <typename T>
using GrowingBlockSidetable = GrowingSidetable<T, BlockIndex>;
template <typename T>
using FixedBlockSidetable = FixedSidetable<T, BlockIndex>;
} // namespace v8::internal::compiler::turboshaft
#endif // V8_COMPILER_TURBOSHAFT_SIDETABLE_H_

View File

@ -0,0 +1,861 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_TURBOSHAFT_TYPE_INFERENCE_REDUCER_H_
#define V8_COMPILER_TURBOSHAFT_TYPE_INFERENCE_REDUCER_H_
#include <limits>
#include "src/base/logging.h"
#include "src/base/vector.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/turboshaft/assembler.h"
#include "src/compiler/turboshaft/operations.h"
#include "src/compiler/turboshaft/representations.h"
#include "src/compiler/turboshaft/sidetable.h"
#include "src/compiler/turboshaft/snapshot-table.h"
#include "src/compiler/turboshaft/types.h"
// #define TRACE_TYPING(...) PrintF(__VA_ARGS__)
#define TRACE_TYPING(...) ((void)0)
namespace v8::internal::compiler::turboshaft {
namespace {
// Returns the array's least element, ignoring NaN.
// There must be at least one non-NaN element.
// Any -0 is converted to 0.
template <typename T, size_t N>
T array_min(const std::array<T, N>& a) {
DCHECK_NE(0, N);
T x = +std::numeric_limits<T>::infinity();
for (size_t i = 0; i < N; ++i) {
if (!std::isnan(a[i])) {
x = std::min(a[i], x);
}
}
DCHECK(!std::isnan(x));
return x == T{0} ? T{0} : x; // -0 -> 0
}
// Returns the array's greatest element, ignoring NaN.
// There must be at least one non-NaN element.
// Any -0 is converted to 0.
template <typename T, size_t N>
T array_max(const std::array<T, N>& a) {
DCHECK_NE(0, N);
T x = -std::numeric_limits<T>::infinity();
for (size_t i = 0; i < N; ++i) {
if (!std::isnan(a[i])) {
x = std::max(a[i], x);
}
}
DCHECK(!std::isnan(x));
return x == T{0} ? T{0} : x; // -0 -> 0
}
} // namespace
template <size_t Bits>
struct WordOperationTyper {
static_assert(Bits == 32 || Bits == 64);
using word_t = uint_type<Bits>;
using type_t = WordType<Bits>;
using ElementsVector = base::SmallVector<word_t, type_t::kMaxSetSize * 2>;
static type_t FromElements(ElementsVector elements, Zone* zone) {
base::sort(elements);
auto it = std::unique(elements.begin(), elements.end());
elements.pop_back(std::distance(it, elements.end()));
DCHECK(!elements.empty());
if (elements.size() <= type_t::kMaxSetSize) {
return type_t::Set(elements, zone);
}
auto range =
MakeRange(base::Vector<const word_t>{elements.data(), elements.size()});
auto result = type_t::Range(range.first, range.second, zone);
DCHECK(
base::all_of(elements, [&](word_t e) { return result.Contains(e); }));
return result;
}
static std::pair<word_t, word_t> MakeRange(const type_t& t) {
if (t.is_range()) return t.range();
DCHECK(t.is_set());
return MakeRange(t.set_elements());
}
static std::pair<word_t, word_t> MakeRange(
const base::Vector<const word_t>& elements) {
DCHECK(!elements.empty());
DCHECK(detail::is_unique_and_sorted(elements));
if (elements[elements.size() - 1] - elements[0] <=
std::numeric_limits<word_t>::max() / 2) {
// Construct a non-wrapping range.
return {elements[0], elements[elements.size() - 1]};
}
// Construct a wrapping range.
size_t from_index = elements.size() - 1;
size_t to_index = 0;
while (to_index + 1 < from_index) {
if ((elements[to_index + 1] - elements[to_index]) <
(elements[from_index] - elements[from_index - 1])) {
++to_index;
} else {
++from_index;
}
}
return {elements[from_index], elements[to_index]};
}
static word_t distance(const std::pair<word_t, word_t>& range) {
return is_wrapping(range) ? (std::numeric_limits<word_t>::max() -
range.first + range.second)
: range.second - range.first;
}
static bool is_wrapping(const std::pair<word_t, word_t>& range) {
return range.first > range.second;
}
static Type Add(const type_t& lhs, const type_t& rhs, Zone* zone) {
if (lhs.is_any() || rhs.is_any()) return type_t::Any();
// If both sides are decently small sets, we produce the product set.
if (lhs.is_set() && rhs.is_set()) {
ElementsVector result_elements;
for (int i = 0; i < lhs.set_size(); ++i) {
for (int j = 0; j < rhs.set_size(); ++j) {
result_elements.push_back(lhs.set_element(i) + rhs.set_element(j));
}
}
return FromElements(std::move(result_elements), zone);
}
// Otherwise just construct a range.
std::pair<word_t, word_t> x = MakeRange(lhs);
std::pair<word_t, word_t> y = MakeRange(rhs);
// If the result would not be a complete range, we compute it.
// Check: (lhs.to + rhs.to + 1) - (lhs.from + rhs.from + 1) < max
// =====> (lhs.to - lhs.from) + (rhs.to - rhs.from) < max
// =====> (lhs.to - lhs.from) < max - (rhs.to - rhs.from)
if (distance(x) < std::numeric_limits<word_t>::max() - distance(y)) {
return type_t::Range(x.first + y.first, x.second + y.second, zone);
}
return type_t::Any();
}
static Type Subtract(const type_t& lhs, const type_t& rhs, Zone* zone) {
if (lhs.is_any() || rhs.is_any()) return type_t::Any();
// If both sides are decently small sets, we produce the product set.
if (lhs.is_set() && rhs.is_set()) {
ElementsVector result_elements;
for (int i = 0; i < lhs.set_size(); ++i) {
for (int j = 0; j < rhs.set_size(); ++j) {
result_elements.push_back(lhs.set_element(i) - rhs.set_element(j));
}
}
return FromElements(std::move(result_elements), zone);
}
// Otherwise just construct a range.
std::pair<word_t, word_t> x = MakeRange(lhs);
std::pair<word_t, word_t> y = MakeRange(rhs);
if (is_wrapping(x) && is_wrapping(y)) {
return type_t::Range(x.first - y.second, x.second - y.first, zone);
}
// TODO(nicohartmann@): Improve the wrapping cases.
return type_t::Any();
}
};
template <size_t Bits>
struct FloatOperationTyper {
static_assert(Bits == 32 || Bits == 64);
using float_t = std::conditional_t<Bits == 32, float, double>;
using type_t = FloatType<Bits>;
static constexpr int kSetThreshold = type_t::kMaxSetSize;
static type_t Range(float_t min, float_t max, bool maybe_nan, Zone* zone) {
DCHECK_LE(min, max);
if (min == max) return Set({min}, maybe_nan, zone);
return type_t::Range(
min, max, maybe_nan ? type_t::kNaN : type_t::kNoSpecialValues, zone);
}
static type_t Set(std::vector<float_t> elements, bool maybe_nan, Zone* zone) {
base::sort(elements);
elements.erase(std::unique(elements.begin(), elements.end()),
elements.end());
if (base::erase_if(elements, [](float_t v) { return std::isnan(v); }) > 0) {
maybe_nan = true;
}
return type_t::Set(
elements, maybe_nan ? type_t::kNaN : type_t::kNoSpecialValues, zone);
}
// Tries to construct the product of two sets where values are generated using
// {combine}. Returns Type::Invalid() if a set cannot be constructed (e.g.
// because the result exceeds the maximal number of set elements).
static Type ProductSet(const type_t& l, const type_t& r, bool maybe_nan,
Zone* zone,
std::function<float_t(float_t, float_t)> combine) {
DCHECK(l.is_set());
DCHECK(r.is_set());
std::vector<float_t> results;
for (int i = 0; i < l.set_size(); ++i) {
for (int j = 0; j < r.set_size(); ++j) {
results.push_back(combine(l.set_element(i), r.set_element(j)));
}
}
maybe_nan = (base::erase_if(results,
[](float_t v) { return std::isnan(v); }) > 0) ||
maybe_nan;
base::sort(results);
auto it = std::unique(results.begin(), results.end());
if (std::distance(results.begin(), it) > kSetThreshold)
return Type::Invalid();
results.erase(it, results.end());
return Set(std::move(results),
maybe_nan ? type_t::kNaN : type_t::kNoSpecialValues, zone);
}
static Type Add(const type_t& l, const type_t& r, Zone* zone) {
if (l.is_only_nan() || r.is_only_nan()) return type_t::NaN();
bool maybe_nan = l.has_nan() || r.has_nan();
// If both sides are decently small sets, we produce the product set.
auto combine = [](float_t a, float_t b) { return a + b; };
if (l.is_set() && r.is_set()) {
auto result = ProductSet(l, r, maybe_nan, zone, combine);
if (!result.IsInvalid()) return result;
}
// Otherwise just construct a range.
auto [l_min, l_max] = l.minmax();
auto [r_min, r_max] = r.minmax();
std::array<float_t, 4> results;
results[0] = l_min + r_min;
results[1] = l_min + r_max;
results[2] = l_max + r_min;
results[3] = l_max + r_max;
int nans = 0;
for (int i = 0; i < 4; ++i) {
if (std::isnan(results[i])) ++nans;
}
if (nans >= 4) {
// All combinations of inputs produce NaN.
return type_t::NaN();
}
maybe_nan = maybe_nan || nans > 0;
const float_t result_min = array_min(results);
const float_t result_max = array_max(results);
return Range(result_min, result_max, maybe_nan, zone);
}
static Type Subtract(const type_t& l, const type_t& r, Zone* zone) {
if (l.is_only_nan() || r.is_only_nan()) return type_t::NaN();
bool maybe_nan = l.has_nan() || r.has_nan();
// If both sides are decently small sets, we produce the product set.
auto combine = [](float_t a, float_t b) { return a - b; };
if (l.is_set() && r.is_set()) {
auto result = ProductSet(l, r, maybe_nan, zone, combine);
if (!result.IsInvalid()) return result;
}
// Otherwise just construct a range.
auto [l_min, l_max] = l.minmax();
auto [r_min, r_max] = r.minmax();
std::array<float_t, 4> results;
results[0] = l_min - r_min;
results[1] = l_min - r_max;
results[2] = l_max - r_min;
results[3] = l_max - r_max;
int nans = 0;
for (int i = 0; i < 4; ++i) {
if (std::isnan(results[i])) ++nans;
}
if (nans >= 4) {
// All combinations of inputs produce NaN.
return type_t::NaN();
}
maybe_nan = maybe_nan || nans > 0;
const float_t result_min = array_min(results);
const float_t result_max = array_max(results);
return Range(result_min, result_max, maybe_nan, zone);
}
};
class Typer {
public:
static Type TypeConstant(ConstantOp::Kind kind, ConstantOp::Storage value) {
switch (kind) {
case ConstantOp::Kind::kFloat32:
if (std::isnan(value.float32)) return Float32Type::NaN();
return Float32Type::Constant(value.float32);
case ConstantOp::Kind::kFloat64:
if (std::isnan(value.float64)) return Float64Type::NaN();
return Float64Type::Constant(value.float64);
case ConstantOp::Kind::kWord32:
return Word32Type::Constant(static_cast<uint32_t>(value.integral));
case ConstantOp::Kind::kWord64:
return Word64Type::Constant(static_cast<uint64_t>(value.integral));
default:
// TODO(nicohartmann@): Support remaining {kind}s.
return Type::Invalid();
}
}
static Type LeastUpperBound(const Type& lhs, const Type& rhs, Zone* zone) {
if (lhs.IsAny() || rhs.IsAny()) return Type::Any();
if (lhs.IsNone()) return rhs;
if (rhs.IsNone()) return lhs;
// TODO(nicohartmann@): We might use more precise types here but currently
// there is not much benefit in that.
if (lhs.kind() != rhs.kind()) return Type::Any();
switch (lhs.kind()) {
case Type::Kind::kInvalid:
UNREACHABLE();
case Type::Kind::kNone:
UNREACHABLE();
case Type::Kind::kWord32:
return Word32Type::LeastUpperBound(lhs.AsWord32(), rhs.AsWord32(),
zone);
case Type::Kind::kWord64:
return Word64Type::LeastUpperBound(lhs.AsWord64(), rhs.AsWord64(),
zone);
case Type::Kind::kFloat32:
return Float32Type::LeastUpperBound(lhs.AsFloat32(), rhs.AsFloat32(),
zone);
case Type::Kind::kFloat64:
return Float64Type::LeastUpperBound(lhs.AsFloat64(), rhs.AsFloat64(),
zone);
case Type::Kind::kAny:
UNREACHABLE();
}
}
static Type TypeWord32Add(const Type& lhs, const Type& rhs, Zone* zone) {
if (lhs.IsNone() || rhs.IsNone()) return Type::None();
auto l = TruncateWord32Input(lhs, true);
auto r = TruncateWord32Input(rhs, true);
return WordOperationTyper<32>::Add(l, r, zone);
}
static Type TypeWord32Sub(const Type& lhs, const Type& rhs, Zone* zone) {
if (lhs.IsNone() || rhs.IsNone()) return Type::None();
auto l = TruncateWord32Input(lhs, true);
auto r = TruncateWord32Input(rhs, true);
return WordOperationTyper<32>::Subtract(l, r, zone);
}
static Type TypeWord64Add(const Type& lhs, const Type& rhs, Zone* zone) {
if (lhs.IsNone() || rhs.IsNone()) return Type::None();
if (!InputIs(lhs, Type::Kind::kWord64) ||
!InputIs(rhs, Type::Kind::kWord64)) {
return Word64Type::Any();
}
const auto& l = lhs.AsWord64();
const auto& r = rhs.AsWord64();
return WordOperationTyper<64>::Add(l, r, zone);
}
static Type TypeWord64Sub(const Type& lhs, const Type& rhs, Zone* zone) {
if (lhs.IsNone() || rhs.IsNone()) return Type::None();
if (!InputIs(lhs, Type::Kind::kWord64) ||
!InputIs(rhs, Type::Kind::kWord64)) {
return Word64Type::Any();
}
const auto& l = lhs.AsWord64();
const auto& r = rhs.AsWord64();
return WordOperationTyper<64>::Subtract(l, r, zone);
}
static Type TypeFloat32Add(const Type& lhs, const Type& rhs, Zone* zone) {
if (lhs.IsNone() || rhs.IsNone()) return Type::None();
if (!InputIs(lhs, Type::Kind::kFloat32) ||
!InputIs(rhs, Type::Kind::kFloat32)) {
return Float32Type::Any();
}
const auto& l = lhs.AsFloat32();
const auto& r = rhs.AsFloat32();
return FloatOperationTyper<32>::Add(l, r, zone);
}
static Type TypeFloat32Sub(const Type& lhs, const Type& rhs, Zone* zone) {
if (lhs.IsNone() || rhs.IsNone()) return Type::None();
if (!InputIs(lhs, Type::Kind::kFloat32) ||
!InputIs(rhs, Type::Kind::kFloat32)) {
return Float32Type::Any();
}
const auto& l = lhs.AsFloat32();
const auto& r = rhs.AsFloat32();
return FloatOperationTyper<32>::Subtract(l, r, zone);
}
static Type TypeFloat64Add(const Type& lhs, const Type& rhs, Zone* zone) {
if (lhs.IsNone() || rhs.IsNone()) return Type::None();
if (!InputIs(lhs, Type::Kind::kFloat64) ||
!InputIs(rhs, Type::Kind::kFloat64)) {
return Float64Type::Any();
}
const auto& l = lhs.AsFloat64();
const auto& r = rhs.AsFloat64();
return FloatOperationTyper<64>::Add(l, r, zone);
}
static Type TypeFloat64Sub(const Type& lhs, const Type& rhs, Zone* zone) {
if (lhs.IsNone() || rhs.IsNone()) return Type::None();
if (!InputIs(lhs, Type::Kind::kFloat64) ||
!InputIs(rhs, Type::Kind::kFloat64)) {
return Float64Type::Any();
}
const auto& l = lhs.AsFloat64();
const auto& r = rhs.AsFloat64();
return FloatOperationTyper<64>::Subtract(l, r, zone);
}
static Word32Type TruncateWord32Input(const Type& input,
bool implicit_word64_narrowing) {
DCHECK(!input.IsInvalid());
DCHECK(!input.IsNone());
if (input.IsAny()) {
if (allow_invalid_inputs()) return Word32Type::Any();
} else if (input.IsWord32()) {
return input.AsWord32();
} else if (input.IsWord64() && implicit_word64_narrowing) {
// The input is implicitly converted to word32.
const auto& w64 = input.AsWord64();
if (auto constant_opt = w64.try_get_constant()) {
return Word32Type::Constant(static_cast<uint32_t>(*constant_opt));
}
// TODO(nicohartmann@): Compute a more precise range here.
return Word32Type::Any();
}
UNREACHABLE();
}
static bool InputIs(const Type& input, Type::Kind expected) {
if (input.IsInvalid()) {
if (allow_invalid_inputs()) return false;
} else if (input.kind() == expected) {
return true;
} else if (input.IsAny()) {
if (allow_invalid_inputs()) return false;
}
UNREACHABLE();
}
// For now we allow invalid inputs (which will then just lead to very generic
// typing). Once all operations are implemented, we are going to disable this.
static bool allow_invalid_inputs() { return true; }
};
struct TypeInferenceReducerArgs {
Isolate* isolate;
};
template <class Next>
class TypeInferenceReducer : public Next {
static_assert(next_is_bottom_of_assembler_stack<Next>::value);
using table_t = SnapshotTable<Type>;
public:
using Next::Asm;
using ArgT =
base::append_tuple_type<typename Next::ArgT, TypeInferenceReducerArgs>;
template <typename... Args>
explicit TypeInferenceReducer(const std::tuple<Args...>& args)
: Next(args),
types_(Asm().output_graph().operation_types()),
table_(Asm().phase_zone()),
op_to_key_mapping_(Asm().phase_zone()),
block_to_snapshot_mapping_(Asm().input_graph().block_count(),
base::nullopt, Asm().phase_zone()),
predecessors_(Asm().phase_zone()),
isolate_(std::get<TypeInferenceReducerArgs>(args).isolate) {}
void Bind(Block* new_block, const Block* origin) {
Next::Bind(new_block, origin);
// Seal the current block first.
if (table_.IsSealed()) {
DCHECK_NULL(current_block_);
} else {
// If we bind a new block while the previous one is still unsealed, we
// finalize it.
DCHECK_NOT_NULL(current_block_);
DCHECK(current_block_->index().valid());
block_to_snapshot_mapping_[current_block_->index()] = table_.Seal();
current_block_ = nullptr;
}
// Collect the snapshots of all predecessors.
{
predecessors_.clear();
for (const Block* pred = new_block->LastPredecessor(); pred != nullptr;
pred = pred->NeighboringPredecessor()) {
base::Optional<table_t::Snapshot> pred_snapshot =
block_to_snapshot_mapping_[pred->index()];
DCHECK(pred_snapshot.has_value());
predecessors_.push_back(pred_snapshot.value());
}
std::reverse(predecessors_.begin(), predecessors_.end());
}
// Start a new snapshot for this block by merging information from
// predecessors.
{
auto MergeTypes = [&](table_t::Key,
base::Vector<Type> predecessors) -> Type {
DCHECK_GT(predecessors.size(), 0);
Type result_type = predecessors[0];
for (size_t i = 1; i < predecessors.size(); ++i) {
result_type = Typer::LeastUpperBound(result_type, predecessors[i],
Asm().graph_zone());
}
return result_type;
};
table_.StartNewSnapshot(base::VectorOf(predecessors_), MergeTypes);
}
// Check if the predecessor is a branch that allows us to refine a few
// types.
if (new_block->HasExactlyNPredecessors(1)) {
Block* predecessor = new_block->LastPredecessor();
const Operation& terminator =
predecessor->LastOperation(Asm().output_graph());
if (const BranchOp* branch = terminator.TryCast<BranchOp>()) {
DCHECK(branch->if_true == new_block || branch->if_false == new_block);
RefineTypesAfterBranch(branch, branch->if_true == new_block);
}
}
current_block_ = new_block;
}
void RefineTypesAfterBranch(const BranchOp* branch, bool then_branch) {
Zone* zone = Asm().graph_zone();
// Inspect branch condition.
const Operation& condition = Asm().output_graph().Get(branch->condition());
if (const ComparisonOp* comparison = condition.TryCast<ComparisonOp>()) {
Type lhs = GetType(comparison->left());
Type rhs = GetType(comparison->right());
// If we don't have proper types, there is nothing we can do.
if (lhs.IsInvalid() || rhs.IsInvalid()) return;
// TODO(nicohartmann@): Might get rid of this once everything is properly
// typed.
if (lhs.IsAny() || rhs.IsAny()) return;
DCHECK(!lhs.IsNone());
DCHECK(!rhs.IsNone());
const bool is_signed = ComparisonOp::IsSigned(comparison->kind);
const bool is_less_than = ComparisonOp::IsLessThan(comparison->kind);
Type l_refined;
Type r_refined;
switch (comparison->rep.value()) {
case RegisterRepresentation::Word32(): {
if (is_signed) {
// TODO(nicohartmann@): Support signed comparison.
return;
}
lhs = Typer::TruncateWord32Input(lhs, true);
rhs = Typer::TruncateWord32Input(rhs, true);
Word32Type l = lhs.AsWord32();
Word32Type r = rhs.AsWord32();
uint32_t l_min, l_max, r_min, r_max;
if (then_branch) {
l_min = 0;
l_max = r.unsigned_max();
r_min = l.unsigned_min();
r_max = std::numeric_limits<uint32_t>::max();
if (is_less_than) {
l_max = next_smaller(l_max);
r_min = next_larger(r_min);
}
} else {
l_min = r.unsigned_min();
l_max = std::numeric_limits<uint32_t>::max();
r_min = 0;
r_max = l.unsigned_max();
if (!is_less_than) {
l_min = next_larger(l_min);
r_max = next_smaller(r_max);
}
}
auto l_restrict = Word32Type::Range(l_min, l_max, zone);
auto r_restrict = Word32Type::Range(r_min, r_max, zone);
l_refined = Word32Type::Intersect(
l, l_restrict, Type::ResolutionMode::kOverApproximate, zone);
r_refined = Word32Type::Intersect(
r, r_restrict, Type::ResolutionMode::kOverApproximate, zone);
break;
}
case RegisterRepresentation::Float64(): {
constexpr double infty = std::numeric_limits<double>::infinity();
Float64Type l = lhs.AsFloat64();
Float64Type r = rhs.AsFloat64();
double l_min, l_max, r_min, r_max;
uint32_t special_values = Float64Type::kNoSpecialValues;
if (then_branch) {
l_min = -infty;
l_max = r.max();
r_min = l.min();
r_max = infty;
if (is_less_than) {
l_max = next_smaller(l_max);
r_min = next_larger(r_min);
}
} else {
l_min = r.min();
l_max = infty;
r_min = -infty;
r_max = l.max();
special_values = Float64Type::kNaN;
if (!is_less_than) {
l_min = next_larger(l_min);
r_max = next_smaller(r_max);
}
}
auto l_restrict =
Float64Type::Range(l_min, l_max, special_values, zone);
auto r_restrict =
Float64Type::Range(r_min, r_max, special_values, zone);
l_refined = Float64Type::Intersect(l, l_restrict, zone);
r_refined = Float64Type::Intersect(r, r_restrict, zone);
break;
}
default:
return;
}
const std::string branch_str = branch->ToString().substr(0, 40);
USE(branch_str);
TRACE_TYPING("\033[32mBr %3d:%-40s\033[0m\n",
Asm().output_graph().Index(*branch).id(),
branch_str.c_str());
RefineOperationType(comparison->left(), l_refined,
then_branch ? 'T' : 'F');
RefineOperationType(comparison->right(), r_refined,
then_branch ? 'T' : 'F');
}
}
void RefineOperationType(OpIndex op, const Type& type,
char case_for_tracing) {
DCHECK(op.valid());
DCHECK(!type.IsInvalid());
TRACE_TYPING("\033[32m %c: %3d:%-40s ~~> %s\033[0m\n", case_for_tracing,
op.id(),
Asm().output_graph().Get(op).ToString().substr(0, 40).c_str(),
type.ToString().c_str());
SetType(op, type);
// TODO(nicohartmann@): One could push the refined type deeper into the
// operations.
}
Type TypeForRepresentation(RegisterRepresentation rep) {
switch (rep.value()) {
case RegisterRepresentation::Word32():
return Word32Type::Any();
case RegisterRepresentation::Word64():
return Word64Type::Any();
case RegisterRepresentation::Float32():
return Float32Type::Any();
case RegisterRepresentation::Float64():
return Float64Type::Any();
case RegisterRepresentation::Tagged():
case RegisterRepresentation::Compressed():
// TODO(nicohartmann@): Support these representations.
return Type::Any();
}
}
OpIndex ReducePhi(base::Vector<const OpIndex> inputs,
RegisterRepresentation rep) {
OpIndex index = Next::ReducePhi(inputs, rep);
Type result_type = Type::None();
for (const OpIndex input : inputs) {
Type type = types_[input];
if (type.IsInvalid()) {
type = TypeForRepresentation(rep);
}
// TODO(nicohartmann@): Should all temporary types be in the
// graph_zone()?
result_type =
Typer::LeastUpperBound(result_type, type, Asm().graph_zone());
}
SetType(index, result_type);
return index;
}
OpIndex ReduceConstant(ConstantOp::Kind kind, ConstantOp::Storage value) {
OpIndex index = Next::ReduceConstant(kind, value);
if (!index.valid()) return index;
Type type = Typer::TypeConstant(kind, value);
SetType(index, type);
return index;
}
OpIndex ReduceWordBinop(OpIndex left, OpIndex right, WordBinopOp::Kind kind,
WordRepresentation rep) {
OpIndex index = Next::ReduceWordBinop(left, right, kind, rep);
if (!index.valid()) return index;
Type left_type = GetType(left);
Type right_type = GetType(right);
if (left_type.IsInvalid() || right_type.IsInvalid()) return index;
Zone* zone = Asm().graph_zone();
Type result_type = Type::Invalid();
if (rep == WordRepresentation::Word32()) {
switch (kind) {
case WordBinopOp::Kind::kAdd:
result_type = Typer::TypeWord32Add(left_type, right_type, zone);
break;
case WordBinopOp::Kind::kSub:
result_type = Typer::TypeWord32Sub(left_type, right_type, zone);
break;
default:
// TODO(nicohartmann@): Support remaining {kind}s.
break;
}
} else {
DCHECK_EQ(rep, WordRepresentation::Word64());
switch (kind) {
case WordBinopOp::Kind::kAdd:
result_type = Typer::TypeWord64Add(left_type, right_type, zone);
break;
case WordBinopOp::Kind::kSub:
result_type = Typer::TypeWord64Sub(left_type, right_type, zone);
break;
default:
// TODO(nicohartmann@): Support remaining {kind}s.
break;
}
}
SetType(index, result_type);
return index;
}
OpIndex ReduceFloatBinop(OpIndex left, OpIndex right, FloatBinopOp::Kind kind,
FloatRepresentation rep) {
OpIndex index = Next::ReduceFloatBinop(left, right, kind, rep);
if (!index.valid()) return index;
Type result_type = Type::Invalid();
Type left_type = GetType(left);
Type right_type = GetType(right);
if (!left_type.IsInvalid() && !right_type.IsInvalid()) {
if (rep == FloatRepresentation::Float32()) {
switch (kind) {
case FloatBinopOp::Kind::kAdd:
result_type = Typer::TypeFloat32Add(left_type, right_type,
Asm().graph_zone());
break;
case FloatBinopOp::Kind::kSub:
result_type = Typer::TypeFloat32Sub(left_type, right_type,
Asm().graph_zone());
break;
default:
// TODO(nicohartmann@): Support remaining {kind}s.
break;
}
} else {
DCHECK_EQ(rep, FloatRepresentation::Float64());
switch (kind) {
case FloatBinopOp::Kind::kAdd:
result_type = Typer::TypeFloat64Add(left_type, right_type,
Asm().graph_zone());
break;
case FloatBinopOp::Kind::kSub:
result_type = Typer::TypeFloat64Sub(left_type, right_type,
Asm().graph_zone());
break;
default:
// TODO(nicohartmann@): Support remaining {kind}s.
break;
}
}
}
SetType(index, result_type);
return index;
}
Type GetType(const OpIndex index) {
if (auto key = op_to_key_mapping_[index]) return table_.Get(*key);
return Type::Invalid();
}
void SetType(const OpIndex index, const Type& result_type) {
if (!result_type.IsInvalid()) {
if (auto key_opt = op_to_key_mapping_[index]) {
table_.Set(*key_opt, result_type);
DCHECK(!types_[index].IsInvalid());
} else {
auto key = table_.NewKey(Type::None());
table_.Set(key, result_type);
types_[index] = result_type;
op_to_key_mapping_[index] = key;
}
}
TRACE_TYPING(
"\033[%smType %3d:%-40s ==> %s\033[0m\n",
(result_type.IsInvalid() ? "31" : "32"), index.id(),
Asm().output_graph().Get(index).ToString().substr(0, 40).c_str(),
(result_type.IsInvalid() ? "" : result_type.ToString().c_str()));
}
private:
GrowingSidetable<Type>& types_;
table_t table_;
const Block* current_block_ = nullptr;
GrowingSidetable<base::Optional<table_t::Key>> op_to_key_mapping_;
GrowingBlockSidetable<base::Optional<table_t::Snapshot>>
block_to_snapshot_mapping_;
// {predecessors_} is used during merging, but we use an instance variable for
// it, in order to save memory and not reallocate it for each merge.
ZoneVector<table_t::Snapshot> predecessors_;
Isolate* isolate_;
};
} // namespace v8::internal::compiler::turboshaft
#endif // V8_COMPILER_TURBOSHAFT_TYPE_INFERENCE_REDUCER_H_

View File

@ -0,0 +1,548 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/turboshaft/types.h"
#include <sstream>
#include "src/base/logging.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/representation-change.h"
#include "src/heap/factory.h"
#include "src/objects/turboshaft-types-inl.h"
namespace v8::internal::compiler::turboshaft {
namespace {
std::pair<uint32_t, uint32_t> uint64_to_high_low(uint64_t value) {
return {static_cast<uint32_t>(value >> 32), static_cast<uint32_t>(value)};
}
} // namespace
bool Type::Equals(const Type& other) const {
DCHECK(!IsInvalid());
DCHECK(!other.IsInvalid());
if (kind_ != other.kind_) return false;
switch (kind_) {
case Kind::kInvalid:
UNREACHABLE();
case Kind::kNone:
return true;
case Kind::kWord32:
return AsWord32().Equals(other.AsWord32());
case Kind::kWord64:
return AsWord64().Equals(other.AsWord64());
case Kind::kFloat32:
return AsFloat32().Equals(other.AsFloat32());
case Kind::kFloat64:
return AsFloat64().Equals(other.AsFloat64());
case Kind::kAny:
return true;
}
}
void Type::PrintTo(std::ostream& stream) const {
switch (kind_) {
case Kind::kInvalid:
UNREACHABLE();
case Kind::kNone:
stream << "None";
break;
case Kind::kWord32: {
AsWord32().PrintTo(stream);
break;
}
case Kind::kWord64: {
AsWord64().PrintTo(stream);
break;
}
case Kind::kFloat32: {
AsFloat32().PrintTo(stream);
break;
}
case Kind::kFloat64: {
AsFloat64().PrintTo(stream);
break;
}
case Kind::kAny: {
stream << "Any";
break;
}
}
}
void Type::Print() const {
StdoutStream os;
PrintTo(os);
os << std::endl;
}
Handle<TurboshaftType> Type::AllocateOnHeap(Factory* factory) const {
DCHECK_NOT_NULL(factory);
switch (kind_) {
case Kind::kInvalid:
UNREACHABLE();
case Kind::kNone:
UNIMPLEMENTED();
case Kind::kWord32:
return AsWord32().AllocateOnHeap(factory);
case Kind::kWord64:
return AsWord64().AllocateOnHeap(factory);
case Kind::kFloat32:
return AsFloat32().AllocateOnHeap(factory);
case Kind::kFloat64:
return AsFloat64().AllocateOnHeap(factory);
case Kind::kAny:
UNIMPLEMENTED();
}
}
template <size_t Bits>
bool WordType<Bits>::Contains(word_t value) const {
switch (sub_kind()) {
case SubKind::kRange: {
if (is_wrapping()) return range_to() >= value || range_from() <= value;
return range_from() <= value && value <= range_to();
}
case SubKind::kSet: {
for (int i = 0; i < set_size(); ++i) {
if (set_element(i) == value) return true;
}
return false;
}
}
}
template <size_t Bits>
bool WordType<Bits>::Equals(const WordType<Bits>& other) const {
if (sub_kind() != other.sub_kind()) return false;
switch (sub_kind()) {
case SubKind::kRange:
return (range_from() == other.range_from() &&
range_to() == other.range_to()) ||
(is_any() && other.is_any());
case SubKind::kSet: {
if (set_size() != other.set_size()) return false;
for (int i = 0; i < set_size(); ++i) {
if (set_element(i) != other.set_element(i)) return false;
}
return true;
}
}
}
template <size_t Bits, typename word_t = typename WordType<Bits>::word_t>
WordType<Bits> LeastUpperBoundFromRanges(word_t l_from, word_t l_to,
word_t r_from, word_t r_to,
Zone* zone) {
const bool lhs_wrapping = l_to < l_from;
const bool rhs_wrapping = r_to < r_from;
// Case 1: Both ranges non-wrapping
// lhs ---|XXX|-- --|XXX|--- -|XXXXXX|- ---|XX|--- -|XX|------
// rhs -|XXX|---- ----|XXX|- ---|XX|--- -|XXXXXX|- ------|XX|-
// ==> -|XXXXX|-- --|XXXXX|- -|XXXXXX|- -|XXXXXX|- -|XXXXXXX|-
if (!lhs_wrapping && !rhs_wrapping) {
return WordType<Bits>::Range(std::min(l_from, r_from), std::max(l_to, r_to),
zone);
}
// Case 2: Both ranges wrapping
// lhs XXX|----|XXX X|---|XXXXXX XXXXXX|---|X XX|--|XXXXXX
// rhs X|---|XXXXXX XXX|----|XXX XX|--|XXXXXX XXXXXX|--|XX
// ==> XXX|-|XXXXXX XXX|-|XXXXXX XXXXXXXXXXXX XXXXXXXXXXXX
if (lhs_wrapping && rhs_wrapping) {
const auto from = std::min(l_from, r_from);
const auto to = std::max(l_to, r_to);
if (to >= from) return WordType<Bits>::Any();
auto result = WordType<Bits>::Range(from, to, zone);
DCHECK(result.is_wrapping());
return result;
}
if (rhs_wrapping)
return LeastUpperBoundFromRanges<Bits>(r_from, r_to, l_from, l_to, zone);
DCHECK(lhs_wrapping);
DCHECK(!rhs_wrapping);
// Case 3 & 4: lhs is wrapping, rhs is not
// lhs XXX|----|XXX XXX|----|XXX XXXXX|--|XXX X|-------|XX
// rhs -------|XX|- -|XX|------- ----|XXXXX|- ---|XX|-----
// ==> XXX|---|XXXX XXXX|---|XXX XXXXXXXXXXXX XXXXXX|--|XX
if (r_from <= l_to) {
if (r_to <= l_to)
return WordType<Bits>::Range(l_from, l_to, zone); // y covered by x
if (r_to >= l_from) return WordType<Bits>::Any(); // ex3
auto result = WordType<Bits>::Range(l_from, r_to, zone); // ex 1
DCHECK(result.is_wrapping());
DCHECK(!result.is_any());
return result;
} else if (r_to >= l_from) {
if (r_from >= l_from)
return WordType<Bits>::Range(l_from, l_to, zone); // y covered by x
DCHECK_GT(r_from, l_to); // handled above
auto result = WordType<Bits>::Range(r_from, l_to, zone); // ex 2
DCHECK(result.is_wrapping());
DCHECK(!result.is_any());
return result;
} else {
const auto df = r_from - l_to;
const auto dt = l_from - r_to;
WordType<Bits> result =
df > dt ? WordType<Bits>::Range(r_from, l_to, zone) // ex 4
: WordType<Bits>::Range(l_from, r_to, zone);
DCHECK(result.is_wrapping());
DCHECK(!result.is_any());
return result;
}
}
template <size_t Bits>
// static
WordType<Bits> WordType<Bits>::LeastUpperBound(const WordType<Bits>& lhs,
const WordType<Bits>& rhs,
Zone* zone) {
if (lhs.is_set()) {
if (!rhs.is_set()) {
if (lhs.set_size() == 1) {
word_t e = lhs.set_element(0);
if (rhs.is_wrapping()) {
// If {rhs} already contains e, {rhs} is the upper bound.
if (e <= rhs.range_to() || rhs.range_from() <= e) return rhs;
if (lhs.Contains(e)) return lhs;
return (e - rhs.range_to() < rhs.range_from() - e)
? Range(rhs.range_from(), e, zone)
: Range(e, rhs.range_to(), zone);
}
return Range(std::min(e, rhs.range_from()), std::max(e, rhs.range_to()),
zone);
}
// TODO(nicohartmann@): A wrapping range may be a better fit in some
// cases.
return LeastUpperBoundFromRanges<Bits>(
lhs.unsigned_min(), lhs.unsigned_max(), rhs.range_from(),
rhs.range_to(), zone);
}
// Both sides are sets. We try to construct the combined set.
base::SmallVector<word_t, kMaxSetSize * 2> result_elements;
base::vector_append(result_elements, lhs.set_elements());
base::vector_append(result_elements, rhs.set_elements());
DCHECK(!result_elements.empty());
base::sort(result_elements);
auto it = std::unique(result_elements.begin(), result_elements.end());
result_elements.pop_back(std::distance(it, result_elements.end()));
if (result_elements.size() <= kMaxSetSize) {
return Set(result_elements, zone);
}
// We have to construct a range instead.
// TODO(nicohartmann@): A wrapping range may be a better fit in some cases.
return Range(result_elements.front(), result_elements.back(), zone);
} else if (rhs.is_set()) {
return LeastUpperBound(rhs, lhs, zone);
}
// Both sides are ranges.
return LeastUpperBoundFromRanges<Bits>(
lhs.range_from(), lhs.range_to(), rhs.range_from(), rhs.range_to(), zone);
}
template <size_t Bits>
Type WordType<Bits>::Intersect(const WordType<Bits>& lhs,
const WordType<Bits>& rhs,
ResolutionMode resolution_mode, Zone* zone) {
if (lhs.is_any()) return rhs;
if (rhs.is_any()) return lhs;
if (lhs.is_set() || rhs.is_set()) {
const auto& x = lhs.is_set() ? lhs : rhs;
const auto& y = lhs.is_set() ? rhs : lhs;
base::SmallVector<word_t, kMaxSetSize * 2> result_elements;
for (int i = 0; i < x.set_size(); ++i) {
const word_t element = x.set_element(i);
if (y.Contains(element)) result_elements.push_back(element);
}
if (result_elements.empty()) return Type::None();
DCHECK(detail::is_unique_and_sorted(result_elements));
return Set(result_elements, zone);
}
DCHECK(lhs.is_range() && rhs.is_range());
const bool lhs_wrapping = lhs.is_wrapping();
if (!lhs_wrapping && !rhs.is_wrapping()) {
const auto result_from = std::max(lhs.range_from(), rhs.range_from());
const auto result_to = std::min(lhs.range_to(), rhs.range_to());
return result_to < result_from
? Type::None()
: WordType::Range(result_from, result_to, zone);
}
if (lhs_wrapping && rhs.is_wrapping()) {
const auto result_from = std::max(lhs.range_from(), rhs.range_from());
const auto result_to = std::min(lhs.range_to(), rhs.range_to());
auto result = WordType::Range(result_from, result_to, zone);
DCHECK(result.is_wrapping());
return result;
}
const auto& x = lhs_wrapping ? lhs : rhs;
const auto& y = lhs_wrapping ? rhs : lhs;
DCHECK(x.is_wrapping());
DCHECK(!y.is_wrapping());
auto subrange_low = Intersect(y, Range(0, x.range_to(), zone),
ResolutionMode::kPreciseOrInvalid, zone);
DCHECK(!subrange_low.IsInvalid());
auto subrange_high = Intersect(
y, Range(x.range_from(), std::numeric_limits<word_t>::max(), zone),
ResolutionMode::kPreciseOrInvalid, zone);
DCHECK(!subrange_high.IsInvalid());
if (subrange_low.IsNone()) return subrange_high;
if (subrange_high.IsNone()) return subrange_low;
auto s_l = subrange_low.template AsWord<Bits>();
auto s_h = subrange_high.template AsWord<Bits>();
switch (resolution_mode) {
case ResolutionMode::kPreciseOrInvalid:
return Type::Invalid();
case ResolutionMode::kOverApproximate:
return LeastUpperBound(s_l, s_h, zone);
case ResolutionMode::kGreatestLowerBound:
return (s_l.unsigned_max() - s_l.unsigned_min() <
s_h.unsigned_max() - s_h.unsigned_min())
? s_h
: s_l;
}
}
template <size_t Bits>
void WordType<Bits>::PrintTo(std::ostream& stream) const {
stream << (Bits == 32 ? "Word32" : "Word64");
switch (sub_kind()) {
case SubKind::kRange:
stream << "[" << range_from() << ", " << range_to() << "]";
break;
case SubKind::kSet:
stream << "{";
for (int i = 0; i < set_size(); ++i) {
if (i != 0) stream << ", ";
stream << set_element(i);
}
stream << "}";
break;
}
}
template <size_t Bits>
Handle<TurboshaftType> WordType<Bits>::AllocateOnHeap(Factory* factory) const {
if constexpr (Bits == 32) {
if (is_range()) {
return factory->NewTurboshaftWord32RangeType(range_from(), range_to(),
AllocationType::kYoung);
} else {
DCHECK(is_set());
auto result = factory->NewTurboshaftWord32SetType(set_size(),
AllocationType::kYoung);
for (int i = 0; i < set_size(); ++i) {
result->set_elements(i, set_element(i));
}
return result;
}
} else {
if (is_range()) {
const auto [from_high, from_low] = uint64_to_high_low(range_from());
const auto [to_high, to_low] = uint64_to_high_low(range_to());
return factory->NewTurboshaftWord64RangeType(
from_high, from_low, to_high, to_low, AllocationType::kYoung);
} else {
DCHECK(is_set());
auto result = factory->NewTurboshaftWord64SetType(set_size(),
AllocationType::kYoung);
for (int i = 0; i < set_size(); ++i) {
const auto [high, low] = uint64_to_high_low(set_element(i));
result->set_elements_high(i, high);
result->set_elements_low(i, low);
}
return result;
}
}
}
template <size_t Bits>
bool FloatType<Bits>::Contains(float_t value) const {
if (std::isnan(value)) return has_nan();
switch (sub_kind()) {
case SubKind::kOnlyNan:
return false;
case SubKind::kRange: {
return range_min() <= value && value <= range_max();
}
case SubKind::kSet: {
for (int i = 0; i < set_size(); ++i) {
if (set_element(i) == value) return true;
}
return false;
}
}
}
template <size_t Bits>
bool FloatType<Bits>::Equals(const FloatType<Bits>& other) const {
if (sub_kind() != other.sub_kind()) return false;
if (special_values() != other.special_values()) return false;
switch (sub_kind()) {
case SubKind::kOnlyNan:
return true;
case SubKind::kRange: {
return range() == other.range();
}
case SubKind::kSet: {
if (set_size() != other.set_size()) {
return false;
}
for (int i = 0; i < set_size(); ++i) {
if (set_element(i) != other.set_element(i)) return false;
}
return true;
}
}
}
template <size_t Bits>
// static
FloatType<Bits> FloatType<Bits>::LeastUpperBound(const FloatType<Bits>& lhs,
const FloatType<Bits>& rhs,
Zone* zone) {
uint32_t special_values =
(lhs.has_nan() || rhs.has_nan()) ? Special::kNaN : 0;
if (lhs.is_any() || rhs.is_any()) {
return Any(special_values);
}
const bool lhs_finite = lhs.is_set() || lhs.is_only_nan();
const bool rhs_finite = rhs.is_set() || rhs.is_only_nan();
if (lhs_finite && rhs_finite) {
base::SmallVector<float_t, kMaxSetSize * 2> result_elements;
if (lhs.is_set()) base::vector_append(result_elements, lhs.set_elements());
if (rhs.is_set()) base::vector_append(result_elements, rhs.set_elements());
if (result_elements.empty()) {
DCHECK_EQ(special_values, Special::kNaN);
return NaN();
}
base::sort(result_elements);
auto it = std::unique(result_elements.begin(), result_elements.end());
result_elements.pop_back(std::distance(it, result_elements.end()));
if (result_elements.size() <= kMaxSetSize) {
return Set(result_elements, special_values, zone);
}
return Range(result_elements.front(), result_elements.back(),
special_values, zone);
}
// We need to construct a range.
float_t result_min = std::min(lhs.min(), rhs.min());
float_t result_max = std::max(lhs.max(), rhs.max());
return Range(result_min, result_max, special_values, zone);
}
template <size_t Bits>
// static
Type FloatType<Bits>::Intersect(const FloatType<Bits>& lhs,
const FloatType<Bits>& rhs, Zone* zone) {
auto UpdateSpecials = [](const FloatType& t, uint32_t special_values) {
if (t.special_values() == special_values) return t;
auto result = t;
result.bitfield_ = special_values;
DCHECK_EQ(result.bitfield_, result.special_values());
return result;
};
const bool has_nan = lhs.has_nan() && rhs.has_nan();
if (lhs.is_any()) return UpdateSpecials(rhs, has_nan ? kNaN : 0);
if (rhs.is_any()) return UpdateSpecials(lhs, has_nan ? kNaN : 0);
if (lhs.is_only_nan() || rhs.is_only_nan()) {
return has_nan ? NaN() : Type::None();
}
if (lhs.is_set() || rhs.is_set()) {
const auto& x = lhs.is_set() ? lhs : rhs;
const auto& y = lhs.is_set() ? rhs : lhs;
base::SmallVector<float_t, kMaxSetSize * 2> result_elements;
for (int i = 0; i < x.set_size(); ++i) {
const float_t element = x.set_element(i);
if (y.Contains(element)) result_elements.push_back(element);
}
if (result_elements.empty()) {
return has_nan ? NaN() : Type::None();
}
DCHECK(detail::is_unique_and_sorted(result_elements));
return Set(result_elements, has_nan ? kNaN : 0, zone);
}
DCHECK(lhs.is_range() && rhs.is_range());
const float_t result_min = std::min(lhs.min(), rhs.min());
const float_t result_max = std::max(lhs.max(), rhs.max());
if (result_min < result_max) {
return Range(result_min, result_max, has_nan ? kNaN : kNoSpecialValues,
zone);
} else if (result_min == result_max) {
return Set({result_min}, has_nan ? kNaN : 0, zone);
}
return has_nan ? NaN() : Type::None();
}
template <size_t Bits>
void FloatType<Bits>::PrintTo(std::ostream& stream) const {
stream << (Bits == 32 ? "Float32" : "Float64");
switch (sub_kind()) {
case SubKind::kOnlyNan:
stream << "NaN";
break;
case SubKind::kRange:
stream << "[" << range_min() << ", " << range_max()
<< (has_nan() ? "]+NaN" : "]");
break;
case SubKind::kSet:
stream << "{";
for (int i = 0; i < set_size(); ++i) {
if (i != 0) stream << ", ";
stream << set_element(i);
}
stream << (has_nan() ? "}+NaN" : "}");
break;
}
}
template <size_t Bits>
Handle<TurboshaftType> FloatType<Bits>::AllocateOnHeap(Factory* factory) const {
float_t min = 0.0f, max = 0.0f;
constexpr uint32_t padding = 0;
if (is_only_nan()) {
min = std::numeric_limits<float_t>::infinity();
max = -std::numeric_limits<float_t>::infinity();
return factory->NewTurboshaftFloat64RangeType(1, padding, min, max,
AllocationType::kYoung);
} else if (is_range()) {
std::tie(min, max) = minmax();
return factory->NewTurboshaftFloat64RangeType(
has_nan() ? 1 : 0, padding, min, max, AllocationType::kYoung);
} else {
DCHECK(is_set());
auto result = factory->NewTurboshaftFloat64SetType(
has_nan() ? 1 : 0, set_size(), AllocationType::kYoung);
for (int i = 0; i < set_size(); ++i) {
result->set_elements(i, set_element(i));
}
return result;
}
}
template class WordType<32>;
template class WordType<64>;
template class FloatType<32>;
template class FloatType<64>;
} // namespace v8::internal::compiler::turboshaft

View File

@ -0,0 +1,644 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_TURBOSHAFT_TYPES_H_
#define V8_COMPILER_TURBOSHAFT_TYPES_H_
#include <cmath>
#include <limits>
#include "src/base/container-utils.h"
#include "src/base/logging.h"
#include "src/base/small-vector.h"
#include "src/common/globals.h"
#include "src/objects/turboshaft-types.h"
#include "src/utils/ostreams.h"
#include "src/zone/zone-containers.h"
namespace v8::internal {
class Factory;
}
namespace v8::internal::compiler::turboshaft {
namespace detail {
template <typename T>
inline bool is_unique_and_sorted(const T& container) {
if (std::size(container) <= 1) return true;
auto cur = std::begin(container);
auto next = cur;
for (++next; next != std::end(container); ++cur, ++next) {
if (!(*cur < *next)) return false;
}
return true;
}
template <size_t Bits>
struct TypeForBits;
template <>
struct TypeForBits<32> {
using uint_type = uint32_t;
using float_type = float;
static constexpr float_type nan =
std::numeric_limits<float_type>::quiet_NaN();
};
template <>
struct TypeForBits<64> {
using uint_type = uint64_t;
using float_type = double;
static constexpr float_type nan =
std::numeric_limits<float_type>::quiet_NaN();
};
struct Payload_Empty {};
template <typename T>
struct Payload_Range {
T min;
T max;
};
template <typename T>
struct Payload_InlineSet {
T elements[2];
};
template <typename T>
struct Payload_OutlineSet {
T* array;
};
} // namespace detail
template <typename T>
std::enable_if_t<std::is_floating_point<T>::value, T> next_smaller(T v) {
DCHECK(!std::isnan(v));
DCHECK_LT(-std::numeric_limits<T>::infinity(), v);
return std::nextafter(v, -std::numeric_limits<T>::infinity());
}
template <typename T>
std::enable_if_t<std::is_floating_point<T>::value, T> next_larger(T v) {
DCHECK(!std::isnan(v));
DCHECK_LT(v, std::numeric_limits<T>::infinity());
return std::nextafter(v, std::numeric_limits<T>::infinity());
}
template <typename T>
std::enable_if_t<std::is_integral<T>::value, T> next_smaller(T v) {
DCHECK_LT(std::numeric_limits<T>::min(), v);
return v - 1;
}
template <typename T>
std::enable_if_t<std::is_integral<T>::value, T> next_larger(T v) {
DCHECK_LT(v, std::numeric_limits<T>::max());
return v + 1;
}
template <size_t Bits>
using uint_type = typename detail::TypeForBits<Bits>::uint_type;
template <size_t Bits>
using float_type = typename detail::TypeForBits<Bits>::float_type;
template <size_t Bits>
constexpr float_type<Bits> nan_v = detail::TypeForBits<Bits>::nan;
template <size_t Bits>
class WordType;
template <size_t Bits>
class FloatType;
using Word32Type = WordType<32>;
using Word64Type = WordType<64>;
using Float32Type = FloatType<32>;
using Float64Type = FloatType<64>;
class V8_EXPORT_PRIVATE Type {
public:
enum class Kind : uint8_t {
kInvalid,
kNone,
kWord32,
kWord64,
kFloat32,
kFloat64,
kAny,
};
// Some operations cannot express the result precisely in a type, e.g. when an
// intersection with a wrapping range may produce to disconnect subranges,
// which cannot be represented. {ResolutionMode} allows to specify what the
// operation should do when the result cannot be represented precisely.
enum class ResolutionMode {
// Return Type::Invalid().
kPreciseOrInvalid,
// Return a safe over approximation.
kOverApproximate,
// Return the greatest lower bound that can be represented.
kGreatestLowerBound,
};
Type() : Type(Kind::kInvalid) {}
// Type constructors
static inline Type Invalid() { return Type(); }
static inline Type None() { return Type(Kind::kNone); }
static inline Type Any() { return Type(Kind::kAny); }
// Checks and casts
inline Kind kind() const { return kind_; }
inline bool IsInvalid() const { return kind_ == Kind::kInvalid; }
inline bool IsNone() const { return kind_ == Kind::kNone; }
inline bool IsWord32() const { return kind_ == Kind::kWord32; }
inline bool IsWord64() const { return kind_ == Kind::kWord64; }
inline bool IsFloat32() const { return kind_ == Kind::kFloat32; }
inline bool IsFloat64() const { return kind_ == Kind::kFloat64; }
inline bool IsAny() const { return kind_ == Kind::kAny; }
template <size_t B>
inline bool IsWord() const {
if constexpr (B == 32)
return IsWord32();
else
return IsWord64();
}
// Casts
inline const Word32Type& AsWord32() const;
inline const Word64Type& AsWord64() const;
inline const Float32Type& AsFloat32() const;
inline const Float64Type& AsFloat64() const;
template <size_t B>
inline const auto& AsWord() const {
if constexpr (B == 32)
return AsWord32();
else
return AsWord64();
}
// Comparison
bool Equals(const Type& other) const;
// Printing
void PrintTo(std::ostream& stream) const;
void Print() const;
std::string ToString() const {
std::stringstream stream;
PrintTo(stream);
return stream.str();
}
// Other functions
Handle<TurboshaftType> AllocateOnHeap(Factory* factory) const;
protected:
template <typename Payload>
Type(Kind kind, uint8_t sub_kind, uint8_t set_size, uint32_t bitfield,
uint8_t reserved, const Payload& payload)
: kind_(kind),
sub_kind_(sub_kind),
set_size_(set_size),
reserved_(reserved),
bitfield_(bitfield) {
static_assert(sizeof(Payload) <= sizeof(payload_));
memcpy(&payload_[0], &payload, sizeof(Payload));
if constexpr (sizeof(Payload) < sizeof(payload_)) {
memset(reinterpret_cast<uint8_t*>(&payload_[0]) + sizeof(Payload), 0x00,
sizeof(payload_) - sizeof(Payload));
}
}
template <typename Payload>
const Payload& payload() const {
static_assert(sizeof(Payload) <= sizeof(payload_));
return *reinterpret_cast<const Payload*>(&payload_[0]);
}
Kind kind_;
uint8_t sub_kind_;
uint8_t set_size_;
uint8_t reserved_;
uint32_t bitfield_;
private:
// Access through payload<>().
uint64_t payload_[2]; // Type specific data
explicit Type(Kind kind) : Type(kind, 0, 0, 0, 0, detail::Payload_Empty{}) {
DCHECK(kind == Kind::kInvalid || kind == Kind::kNone || kind == Kind::kAny);
}
};
static_assert(sizeof(Type) == 24);
template <size_t Bits>
class WordType : public Type {
static_assert(Bits == 32 || Bits == 64);
friend class Type;
static constexpr int kMaxInlineSetSize = 2;
enum class SubKind : uint8_t {
kRange,
kSet,
};
public:
static constexpr int kMaxSetSize = 8;
using word_t = uint_type<Bits>;
using value_type = word_t;
// Constructors
static WordType Any() {
return Range(0, std::numeric_limits<word_t>::max(), nullptr);
}
static WordType Range(word_t from, word_t to, Zone* zone) {
// Normalize ranges smaller than {kMaxSetSize} to sets.
if (to >= from) {
// (to - from + 1) <= kMaxSetSize
if (to - from <= kMaxSetSize - 1) {
// Normalizing non-wrapping ranges to a Set.
base::SmallVector<word_t, kMaxSetSize> elements;
for (word_t i = from; i < to; ++i) elements.push_back(i);
elements.push_back(to);
return Set(elements, zone);
}
} else {
// (max - from + 1) + (to + 1) <= kMaxSetSize
if ((std::numeric_limits<word_t>::max() - from + to) <= kMaxSetSize - 2) {
// Normalizing wrapping ranges to a Set.
base::SmallVector<word_t, kMaxSetSize> elements;
for (word_t i = from; i < std::numeric_limits<word_t>::max(); ++i) {
elements.push_back(i);
}
elements.push_back(std::numeric_limits<word_t>::max());
for (word_t i = 0; i < to; ++i) elements.push_back(i);
elements.push_back(to);
base::sort(elements);
return Set(elements, zone);
}
}
return WordType{SubKind::kRange, 0, Payload_Range{from, to}};
}
template <size_t N>
static WordType Set(const base::SmallVector<word_t, N>& elements,
Zone* zone) {
return Set(base::Vector<const word_t>{elements.data(), elements.size()},
zone);
}
static WordType Set(const std::vector<word_t>& elements, Zone* zone) {
return Set(base::Vector<const word_t>{elements.data(), elements.size()},
zone);
}
static WordType Set(const std::initializer_list<word_t>& elements,
Zone* zone) {
return Set(base::Vector<const word_t>{elements.begin(), elements.size()},
zone);
}
static WordType Set(const base::Vector<const word_t>& elements, Zone* zone) {
DCHECK(detail::is_unique_and_sorted(elements));
DCHECK_IMPLIES(elements.size() > kMaxInlineSetSize, zone != nullptr);
DCHECK_GT(elements.size(), 0);
DCHECK_LE(elements.size(), kMaxSetSize);
if (elements.size() <= kMaxInlineSetSize) {
// Use inline storage.
Payload_InlineSet p;
DCHECK_LT(0, elements.size());
p.elements[0] = elements[0];
if (elements.size() > 1) p.elements[1] = elements[1];
return WordType{SubKind::kSet, static_cast<uint8_t>(elements.size()), p};
} else {
// Allocate storage in the zone.
Payload_OutlineSet p;
p.array = zone->NewArray<word_t>(elements.size());
DCHECK_NOT_NULL(p.array);
for (size_t i = 0; i < elements.size(); ++i) p.array[i] = elements[i];
return WordType{SubKind::kSet, static_cast<uint8_t>(elements.size()), p};
}
}
static WordType Constant(word_t constant) { return Set({constant}, nullptr); }
// Checks
bool is_range() const { return sub_kind() == SubKind::kRange; }
bool is_set() const { return sub_kind() == SubKind::kSet; }
bool is_any() const { return is_range() && range_to() + 1 == range_from(); }
bool is_constant() const {
DCHECK_EQ(set_size_ > 0, is_set());
return set_size_ == 1;
}
bool is_wrapping() const { return is_range() && range_from() > range_to(); }
// Accessors
word_t range_from() const {
DCHECK(is_range());
return payload<Payload_Range>().min;
}
word_t range_to() const {
DCHECK(is_range());
return payload<Payload_Range>().max;
}
std::pair<word_t, word_t> range() const {
DCHECK(is_range());
return {range_from(), range_to()};
}
int set_size() const {
DCHECK(is_set());
return static_cast<int>(set_size_);
}
word_t set_element(int index) const {
DCHECK(is_set());
DCHECK_GE(index, 0);
DCHECK_LT(index, set_size());
return set_elements()[index];
}
base::Vector<const word_t> set_elements() const {
DCHECK(is_set());
if (set_size() <= kMaxInlineSetSize) {
return base::Vector<const word_t>(payload<Payload_InlineSet>().elements,
set_size());
} else {
return base::Vector<const word_t>(payload<Payload_OutlineSet>().array,
set_size());
}
}
base::Optional<word_t> try_get_constant() const {
if (!is_constant()) return base::nullopt;
DCHECK(is_set());
DCHECK_EQ(set_size(), 1);
return set_element(0);
}
word_t unsigned_min() const {
switch (sub_kind()) {
case SubKind::kRange:
return is_wrapping() ? word_t{0} : range_from();
case SubKind::kSet:
return set_element(0);
}
}
word_t unsigned_max() const {
switch (sub_kind()) {
case SubKind::kRange:
return is_wrapping() ? std::numeric_limits<word_t>::max() : range_to();
case SubKind::kSet:
DCHECK_GE(set_size(), 1);
return set_element(set_size() - 1);
}
}
// Misc
bool Contains(word_t value) const;
bool Equals(const WordType<Bits>& other) const;
static WordType LeastUpperBound(const WordType& lhs, const WordType& rhs,
Zone* zone);
static Type Intersect(const WordType& lhs, const WordType& rhs,
ResolutionMode resolution_mode, Zone* zone);
void PrintTo(std::ostream& stream) const;
Handle<TurboshaftType> AllocateOnHeap(Factory* factory) const;
private:
static constexpr Kind KIND = Bits == 32 ? Kind::kWord32 : Kind::kWord64;
using Payload_Range = detail::Payload_Range<word_t>;
using Payload_InlineSet = detail::Payload_InlineSet<word_t>;
using Payload_OutlineSet = detail::Payload_OutlineSet<word_t>;
SubKind sub_kind() const { return static_cast<SubKind>(sub_kind_); }
template <typename Payload>
WordType(SubKind sub_kind, uint8_t set_size, const Payload& payload)
: Type(KIND, static_cast<uint8_t>(sub_kind), set_size, 0, 0, payload) {}
};
template <size_t Bits>
class FloatType : public Type {
static_assert(Bits == 32 || Bits == 64);
friend class Type;
static constexpr int kMaxInlineSetSize = 2;
enum class SubKind : uint8_t {
kRange,
kSet,
kOnlyNan,
};
public:
static constexpr int kMaxSetSize = 8;
using float_t = float_type<Bits>;
using value_type = float_t;
enum Special : uint32_t {
kNoSpecialValues = 0x0,
kNaN = 0x1,
};
// Constructors
static FloatType NaN() {
return FloatType{SubKind::kOnlyNan, 0, Special::kNaN, Payload_OnlyNan{}};
}
static FloatType Any(uint32_t special_values = Special::kNaN) {
return FloatType::Range(-std::numeric_limits<float_t>::infinity(),
std::numeric_limits<float_t>::infinity(),
special_values, nullptr);
}
static FloatType Range(float_t min, float_t max, Zone* zone) {
return Range(min, max, Special::kNoSpecialValues, zone);
}
static FloatType Range(float_t min, float_t max, uint32_t special_values,
Zone* zone) {
DCHECK(!std::isnan(min));
DCHECK(!std::isnan(max));
DCHECK_LE(min, max);
if (min == max) return Set({min}, zone);
return FloatType{SubKind::kRange, 0, special_values,
Payload_Range{min, max}};
}
template <size_t N>
static FloatType Set(const base::SmallVector<const float_t, N>& elements,
Zone* zone) {
return Set(elements, Special::kNoSpecialValues, zone);
}
template <size_t N>
static FloatType Set(const base::SmallVector<float_t, N>& elements,
uint32_t special_values, Zone* zone) {
return Set(base::Vector<const float_t>{elements.data(), elements.size()},
special_values, zone);
}
static FloatType Set(const std::initializer_list<float_t>& elements,
uint32_t special_values, Zone* zone) {
return Set(base::Vector<const float_t>{elements.begin(), elements.size()},
special_values, zone);
}
static FloatType Set(const std::vector<float_t>& elements, Zone* zone) {
return Set(elements, Special::kNoSpecialValues, zone);
}
static FloatType Set(const std::vector<float_t>& elements,
uint32_t special_values, Zone* zone) {
return Set(base::Vector<const float_t>{elements.data(), elements.size()},
special_values, zone);
}
static FloatType Set(const base::Vector<const float_t>& elements,
uint32_t special_values, Zone* zone) {
DCHECK(detail::is_unique_and_sorted(elements));
// NaN should be passed via {special_values} rather than {elements}.
DCHECK(base::none_of(elements, [](float_t f) { return std::isnan(f); }));
DCHECK_IMPLIES(elements.size() > kMaxInlineSetSize, zone != nullptr);
DCHECK_GT(elements.size(), 0);
DCHECK_LE(elements.size(), kMaxSetSize);
if (elements.size() <= kMaxInlineSetSize) {
// Use inline storage.
Payload_InlineSet p;
DCHECK_LT(0, elements.size());
p.elements[0] = elements[0];
if (elements.size() > 1) p.elements[1] = elements[1];
return FloatType{SubKind::kSet, static_cast<uint8_t>(elements.size()),
special_values, p};
} else {
// Allocate storage in the zone.
Payload_OutlineSet p;
p.array = zone->NewArray<float_t>(elements.size());
DCHECK_NOT_NULL(p.array);
for (size_t i = 0; i < elements.size(); ++i) p.array[i] = elements[i];
return FloatType{SubKind::kSet, static_cast<uint8_t>(elements.size()),
special_values, p};
}
}
static FloatType Constant(float_t constant) {
return Set({constant}, 0, nullptr);
}
// Checks
bool is_only_nan() const {
DCHECK_IMPLIES(sub_kind() == SubKind::kOnlyNan, has_nan());
return sub_kind() == SubKind::kOnlyNan;
}
bool is_range() const { return sub_kind() == SubKind::kRange; }
bool is_set() const { return sub_kind() == SubKind::kSet; }
bool is_any() const {
return is_range() &&
range_min() == -std::numeric_limits<float_t>::infinity() &&
range_max() == std::numeric_limits<float_t>::infinity();
}
bool is_constant() const {
DCHECK_EQ(set_size_ > 0, is_set());
return set_size_ == 1 && !has_nan();
}
uint32_t special_values() const { return bitfield_; }
bool has_nan() const { return (special_values() & Special::kNaN) != 0; }
// Accessors
float_t range_min() const {
DCHECK(is_range());
return payload<Payload_Range>().min;
}
float_t range_max() const {
DCHECK(is_range());
return payload<Payload_Range>().max;
}
std::pair<float_t, float_t> range() const {
DCHECK(is_range());
return {range_min(), range_max()};
}
int set_size() const {
DCHECK(is_set());
return static_cast<int>(set_size_);
}
float_t set_element(int index) const {
DCHECK(is_set());
DCHECK_GE(index, 0);
DCHECK_LT(index, set_size());
return set_elements()[index];
}
base::Vector<const float_t> set_elements() const {
DCHECK(is_set());
if (set_size() <= kMaxInlineSetSize) {
return base::Vector<const float_t>(payload<Payload_InlineSet>().elements,
set_size());
} else {
return base::Vector<const float_t>(payload<Payload_OutlineSet>().array,
set_size());
}
}
float_t min() const {
switch (sub_kind()) {
case SubKind::kOnlyNan:
return nan_v<Bits>;
case SubKind::kRange:
return range_min();
case SubKind::kSet:
return set_element(0);
}
}
float_t max() const {
switch (sub_kind()) {
case SubKind::kOnlyNan:
return nan_v<Bits>;
case SubKind::kRange:
return range_max();
case SubKind::kSet:
return set_element(set_size() - 1);
}
}
std::pair<float_t, float_t> minmax() const { return {min(), max()}; }
base::Optional<float_t> try_get_constant() const {
if (!is_constant()) return base::nullopt;
DCHECK(is_set());
DCHECK_EQ(set_size(), 1);
return set_element(0);
}
// Misc
bool Contains(float_t value) const;
bool Equals(const FloatType& other) const;
static FloatType LeastUpperBound(const FloatType& lhs, const FloatType& rhs,
Zone* zone);
static Type Intersect(const FloatType& lhs, const FloatType& rhs, Zone* zone);
void PrintTo(std::ostream& stream) const;
Handle<TurboshaftType> AllocateOnHeap(Factory* factory) const;
private:
static constexpr Kind KIND = Bits == 32 ? Kind::kFloat32 : Kind::kFloat64;
SubKind sub_kind() const { return static_cast<SubKind>(sub_kind_); }
using Payload_Range = detail::Payload_Range<float_t>;
using Payload_InlineSet = detail::Payload_InlineSet<float_t>;
using Payload_OutlineSet = detail::Payload_OutlineSet<float_t>;
using Payload_OnlyNan = detail::Payload_Empty;
template <typename Payload>
FloatType(SubKind sub_kind, uint8_t set_size, uint32_t special_values,
const Payload& payload)
: Type(KIND, static_cast<uint8_t>(sub_kind), set_size, special_values, 0,
payload) {
DCHECK_EQ(special_values & ~Special::kNaN, 0);
}
};
const Word32Type& Type::AsWord32() const {
DCHECK(IsWord32());
return *static_cast<const Word32Type*>(this);
}
const Word64Type& Type::AsWord64() const {
DCHECK(IsWord64());
return *static_cast<const Word64Type*>(this);
}
const Float32Type& Type::AsFloat32() const {
DCHECK(IsFloat32());
return *static_cast<const Float32Type*>(this);
}
const Float64Type& Type::AsFloat64() const {
DCHECK(IsFloat64());
return *static_cast<const Float64Type*>(this);
}
inline std::ostream& operator<<(std::ostream& stream, const Type& type) {
type.PrintTo(stream);
return stream;
}
inline bool operator==(const Type& lhs, const Type& rhs) {
return lhs.Equals(rhs);
}
} // namespace v8::internal::compiler::turboshaft
#endif // V8_COMPILER_TURBOSHAFT_TYPES_H_

View File

@ -37,6 +37,7 @@
#include "src/objects/objects-inl.h"
#include "src/objects/objects.h"
#include "src/objects/turbofan-types-inl.h"
#include "src/objects/turboshaft-types-inl.h"
#include "src/roots/roots.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-break-iterator-inl.h"

View File

@ -547,6 +547,10 @@ DEFINE_BOOL(assert_types, false,
"generate runtime type assertions to test the typer")
// TODO(tebbi): Support allocating types from background thread.
DEFINE_NEG_IMPLICATION(assert_types, concurrent_recompilation)
DEFINE_BOOL(
turboshaft_assert_types, false,
"generate runtime type assertions to test the turboshaft type system")
DEFINE_NEG_IMPLICATION(turboshaft_assert_types, concurrent_recompilation)
// Enable verification of SimplifiedLowering in debug builds.
DEFINE_BOOL(verify_simplified_lowering, DEBUG_BOOL,

View File

@ -44,6 +44,7 @@
#include "src/objects/template-objects-inl.h"
#include "src/objects/torque-defined-classes-inl.h"
#include "src/objects/turbofan-types.h"
#include "src/objects/turboshaft-types.h"
#include "src/regexp/regexp.h"
#if V8_ENABLE_WEBASSEMBLY

View File

@ -375,6 +375,7 @@ class RuntimeCallTimer final {
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BuildTurboshaft) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, OptimizeTurboshaft) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TurboshaftRecreateSchedule) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TurboshaftTypeInference) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TypeAssertions) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TypedLowering) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Typer) \

View File

@ -92,6 +92,7 @@
#include "src/objects/torque-defined-classes-inl.h"
#include "src/objects/transitions-inl.h"
#include "src/objects/turbofan-types-inl.h"
#include "src/objects/turboshaft-types-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-break-iterator-inl.h"

View File

@ -240,6 +240,16 @@ class ZoneForwardList;
V(TemplateLiteralObject) \
V(ThinString) \
V(TransitionArray) \
V(TurboshaftFloat64RangeType) \
V(TurboshaftFloat64SetType) \
V(TurboshaftFloat64Type) \
V(TurboshaftType) \
V(TurboshaftWord32RangeType) \
V(TurboshaftWord32SetType) \
V(TurboshaftWord32Type) \
V(TurboshaftWord64RangeType) \
V(TurboshaftWord64SetType) \
V(TurboshaftWord64Type) \
V(UncompiledData) \
V(UncompiledDataWithPreparseData) \
V(UncompiledDataWithoutPreparseData) \

View File

@ -36,6 +36,7 @@
#include "src/objects/torque-defined-classes-inl.h"
#include "src/objects/transitions.h"
#include "src/objects/turbofan-types-inl.h"
#include "src/objects/turboshaft-types-inl.h"
#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-objects-inl.h"

View File

@ -0,0 +1,33 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_OBJECTS_TURBOSHAFT_TYPES_INL_H_
#define V8_OBJECTS_TURBOSHAFT_TYPES_INL_H_
#include "src/heap/heap-write-barrier.h"
#include "src/objects/turboshaft-types.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
namespace v8::internal {
#include "torque-generated/src/objects/turboshaft-types-tq-inl.inc"
TQ_OBJECT_CONSTRUCTORS_IMPL(TurboshaftType)
TQ_OBJECT_CONSTRUCTORS_IMPL(TurboshaftWord32Type)
TQ_OBJECT_CONSTRUCTORS_IMPL(TurboshaftWord32RangeType)
TQ_OBJECT_CONSTRUCTORS_IMPL(TurboshaftWord32SetType)
TQ_OBJECT_CONSTRUCTORS_IMPL(TurboshaftWord64Type)
TQ_OBJECT_CONSTRUCTORS_IMPL(TurboshaftWord64RangeType)
TQ_OBJECT_CONSTRUCTORS_IMPL(TurboshaftWord64SetType)
TQ_OBJECT_CONSTRUCTORS_IMPL(TurboshaftFloat64Type)
TQ_OBJECT_CONSTRUCTORS_IMPL(TurboshaftFloat64RangeType)
TQ_OBJECT_CONSTRUCTORS_IMPL(TurboshaftFloat64SetType)
} // namespace v8::internal
#include "src/objects/object-macros-undef.h"
#endif // V8_OBJECTS_TURBOSHAFT_TYPES_INL_H_

View File

@ -0,0 +1,110 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_OBJECTS_TURBOSHAFT_TYPES_H_
#define V8_OBJECTS_TURBOSHAFT_TYPES_H_
#include "src/common/globals.h"
#include "src/objects/heap-object.h"
#include "torque-generated/bit-fields.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
namespace v8::internal {
#include "torque-generated/src/objects/turboshaft-types-tq.inc"
class TurboshaftType
: public TorqueGeneratedTurboshaftType<TurboshaftType, HeapObject> {
public:
TQ_OBJECT_CONSTRUCTORS(TurboshaftType)
};
class TurboshaftWord32Type
: public TorqueGeneratedTurboshaftWord32Type<TurboshaftWord32Type,
TurboshaftType> {
public:
class BodyDescriptor;
TQ_OBJECT_CONSTRUCTORS(TurboshaftWord32Type)
};
class TurboshaftWord32RangeType
: public TorqueGeneratedTurboshaftWord32RangeType<TurboshaftWord32RangeType,
TurboshaftWord32Type> {
public:
class BodyDescriptor;
TQ_OBJECT_CONSTRUCTORS(TurboshaftWord32RangeType)
};
class TurboshaftWord32SetType
: public TorqueGeneratedTurboshaftWord32SetType<TurboshaftWord32SetType,
TurboshaftWord32Type> {
public:
class BodyDescriptor;
TQ_OBJECT_CONSTRUCTORS(TurboshaftWord32SetType)
};
class TurboshaftWord64Type
: public TorqueGeneratedTurboshaftWord64Type<TurboshaftWord64Type,
TurboshaftType> {
public:
class BodyDescriptor;
TQ_OBJECT_CONSTRUCTORS(TurboshaftWord64Type)
};
class TurboshaftWord64RangeType
: public TorqueGeneratedTurboshaftWord64RangeType<TurboshaftWord64RangeType,
TurboshaftWord64Type> {
public:
class BodyDescriptor;
TQ_OBJECT_CONSTRUCTORS(TurboshaftWord64RangeType)
};
class TurboshaftWord64SetType
: public TorqueGeneratedTurboshaftWord64SetType<TurboshaftWord64SetType,
TurboshaftWord64Type> {
public:
class BodyDescriptor;
TQ_OBJECT_CONSTRUCTORS(TurboshaftWord64SetType)
};
class TurboshaftFloat64Type
: public TorqueGeneratedTurboshaftFloat64Type<TurboshaftFloat64Type,
TurboshaftType> {
public:
class BodyDescriptor;
TQ_OBJECT_CONSTRUCTORS(TurboshaftFloat64Type)
};
class TurboshaftFloat64RangeType
: public TorqueGeneratedTurboshaftFloat64RangeType<
TurboshaftFloat64RangeType, TurboshaftFloat64Type> {
public:
class BodyDescriptor;
TQ_OBJECT_CONSTRUCTORS(TurboshaftFloat64RangeType)
};
class TurboshaftFloat64SetType
: public TorqueGeneratedTurboshaftFloat64SetType<TurboshaftFloat64SetType,
TurboshaftFloat64Type> {
public:
class BodyDescriptor;
TQ_OBJECT_CONSTRUCTORS(TurboshaftFloat64SetType)
};
} // namespace v8::internal
#include "src/objects/object-macros-undef.h"
#endif // V8_OBJECTS_TURBOSHAFT_TYPES_H_

View File

@ -0,0 +1,227 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/objects/turboshaft-types.h"
@abstract
extern class TurboshaftType extends HeapObject {
}
@generateBodyDescriptor
@generateUniqueMap
@generateFactoryFunction
extern class TurboshaftWord32Type extends TurboshaftType {
}
@generateBodyDescriptor
@generateUniqueMap
@generateFactoryFunction
extern class TurboshaftWord32RangeType extends TurboshaftWord32Type {
from: uint32;
to: uint32;
}
@generateBodyDescriptor
@generateUniqueMap
@generateFactoryFunction
extern class TurboshaftWord32SetType extends TurboshaftWord32Type {
const set_size: uint32;
elements[set_size]: uint32;
}
@generateBodyDescriptor
@generateUniqueMap
@generateFactoryFunction
extern class TurboshaftWord64Type extends TurboshaftType {
}
@generateBodyDescriptor
@generateUniqueMap
@generateFactoryFunction
extern class TurboshaftWord64RangeType extends TurboshaftWord64Type {
from_high: uint32;
from_low: uint32;
to_high: uint32;
to_low: uint32;
}
@generateBodyDescriptor
@generateUniqueMap
@generateFactoryFunction
extern class TurboshaftWord64SetType extends TurboshaftWord64Type {
const set_size: uint32;
elements_high[set_size]: uint32;
elements_low[set_size]: uint32;
}
@generateBodyDescriptor
@generateUniqueMap
@generateFactoryFunction
extern class TurboshaftFloat64Type extends TurboshaftType {
has_nan: uint32;
}
@generateBodyDescriptor
@generateUniqueMap
@generateFactoryFunction
extern class TurboshaftFloat64RangeType extends TurboshaftFloat64Type {
_padding: uint32;
min: float64;
max: float64;
}
@generateBodyDescriptor
@generateUniqueMap
@generateFactoryFunction
extern class TurboshaftFloat64SetType extends TurboshaftFloat64Type {
const set_size: uint32;
elements[set_size]: float64;
}
macro TestTurboshaftWord32Type(
value: uint32, expected: TurboshaftWord32Type): bool {
typeswitch (expected) {
case (range: TurboshaftWord32RangeType): {
if (range.from > range.to) {
return value <= range.to || range.from <= value;
}
return range.from <= value && value <= range.to;
}
case (set: TurboshaftWord32SetType): {
for (let i: uint32 = 0; i < set.set_size; ++i) {
if (set.elements[i] == value) return true;
}
return false;
}
case (TurboshaftWord32Type): {
unreachable;
}
}
}
macro CompareUint64HighLow(
lhsHigh: uint32, lhsLow: uint32, rhsHigh: uint32, rhsLow: uint32): int32 {
if (lhsHigh == rhsHigh) {
if (lhsLow == rhsLow) return 0;
return lhsLow < rhsLow ? Convert<int32>(-1) : 1;
} else {
return lhsHigh < rhsHigh ? Convert<int32>(-1) : 1;
}
}
macro TestTurboshaftWord64Type(
valueHigh: uint32, valueLow: uint32, expected: TurboshaftWord64Type): bool {
typeswitch (expected) {
case (range: TurboshaftWord64RangeType): {
const greaterThanOrEqualFrom =
CompareUint64HighLow(
valueHigh, valueLow, range.from_high, range.from_low) >= 0;
const lessThanOrEqualTo =
CompareUint64HighLow(
valueHigh, valueLow, range.to_high, range.to_low) <= 0;
const isWrapping =
CompareUint64HighLow(
range.from_high, range.from_low, range.to_high, range.to_low) < 0;
return (isWrapping && (greaterThanOrEqualFrom || lessThanOrEqualTo)) ||
(greaterThanOrEqualFrom && lessThanOrEqualTo);
}
case (set: TurboshaftWord64SetType): {
for (let i: uint32 = 0; i < set.set_size; ++i) {
if (CompareUint64HighLow(
set.elements_high[i], set.elements_low[i], valueHigh,
valueLow) == 0) {
return true;
}
}
return false;
}
case (TurboshaftWord64Type): {
unreachable;
}
}
}
macro TestTurboshaftFloat64Type(
value: float64, expected: TurboshaftFloat64Type): bool {
if (Float64IsNaN(value)) return expected.has_nan != 0;
typeswitch (expected) {
case (range: TurboshaftFloat64RangeType): {
return range.min <= value && value <= range.max;
}
case (set: TurboshaftFloat64SetType): {
const delta = 0.000001;
for (let i: uint32 = 0; i < set.set_size; ++i) {
if (set.elements[i] - delta <= value &&
value <= set.elements[i] + delta)
return true;
}
return false;
}
case (TurboshaftFloat64Type): {
unreachable;
}
}
}
builtin CheckTurboshaftWord32Type(implicit context: Context)(
value: uint32, expectedType: TurboshaftWord32Type, nodeId: Smi): Undefined {
if (TestTurboshaftWord32Type(value, expectedType)) {
return Undefined;
}
Print('Type assertion failed!');
Print('Node id', nodeId);
Print('Actual value', Convert<Number>(value));
Print('Expected type', expectedType);
unreachable;
}
builtin CheckTurboshaftWord64Type(implicit context: Context)(
valueHigh: uint32, valueLow: uint32, expectedType: TurboshaftWord64Type,
nodeId: Smi): Undefined {
if (TestTurboshaftWord64Type(valueHigh, valueLow, expectedType)) {
return Undefined;
}
Print('Type assertion failed!');
Print('Node id', nodeId);
Print('Actual value (high)', Convert<Number>(valueHigh));
Print('Actual vlaue (low)', Convert<Number>(valueLow));
Print('Expected type', expectedType);
unreachable;
}
// Builtin needs custom interface descriptor to allow float32 argument type.
@customInterfaceDescriptor
builtin CheckTurboshaftFloat32Type(implicit context: Context)(
value: float32, expectedType: TurboshaftFloat64Type, nodeId: Smi):
Undefined {
const v = Convert<float64>(value);
if (TestTurboshaftFloat64Type(v, expectedType)) {
return Undefined;
}
Print('Type assertion failed!');
Print('Node id', nodeId);
Print('Actual value', Convert<Number>(v));
Print('Expected type', expectedType);
unreachable;
}
// Builtin needs custom interface descriptor to allow float64 argument type.
@customInterfaceDescriptor
builtin CheckTurboshaftFloat64Type(implicit context: Context)(
value: float64, expectedType: TurboshaftFloat64Type, nodeId: Smi):
Undefined {
if (TestTurboshaftFloat64Type(value, expectedType)) {
return Undefined;
}
Print('Type assertion failed!');
Print('Node id', nodeId);
Print('Actual value', Convert<Number>(value));
Print('Expected type', expectedType);
unreachable;
}

View File

@ -1080,10 +1080,13 @@ struct TorqueBuiltinDeclaration : BuiltinDeclaration {
bool javascript_linkage, Identifier* name,
ParameterList parameters,
TypeExpression* return_type,
bool has_custom_interface_descriptor,
base::Optional<Statement*> body)
: BuiltinDeclaration(kKind, pos, javascript_linkage, transitioning, name,
std::move(parameters), return_type),
has_custom_interface_descriptor(has_custom_interface_descriptor),
body(body) {}
bool has_custom_interface_descriptor;
base::Optional<Statement*> body;
};

View File

@ -124,6 +124,9 @@ static const char* const ANNOTATION_CPP_RELEASE_STORE = "@cppReleaseStore";
static const char* const ANNOTATION_CPP_ACQUIRE_LOAD = "@cppAcquireLoad";
// Generate BodyDescriptor using IterateCustomWeakPointers.
static const char* const ANNOTATION_CUSTOM_WEAK_MARKING = "@customWeakMarking";
// Do not generate a interface descriptor for this builtin.
static const char* const ANNOTATION_CUSTOM_INTERFACE_DESCRIPTOR =
"@customInterfaceDescriptor";
inline bool IsConstexprName(const std::string& name) {
return name.substr(0, std::strlen(CONSTEXPR_TYPE_PREFIX)) ==

View File

@ -498,22 +498,30 @@ class Method : public TorqueMacro {
class Builtin : public Callable {
public:
enum Kind { kStub, kFixedArgsJavaScript, kVarArgsJavaScript };
enum class Flag { kNone = 0, kCustomInterfaceDescriptor = 1 << 0 };
using Flags = base::Flags<Flag>;
DECLARE_DECLARABLE_BOILERPLATE(Builtin, builtin)
Kind kind() const { return kind_; }
Flags flags() const { return flags_; }
bool IsStub() const { return kind_ == kStub; }
bool IsVarArgsJavaScript() const { return kind_ == kVarArgsJavaScript; }
bool IsFixedArgsJavaScript() const { return kind_ == kFixedArgsJavaScript; }
bool HasCustomInterfaceDescriptor() const {
return flags_ & Flag::kCustomInterfaceDescriptor;
}
private:
friend class Declarations;
Builtin(std::string external_name, std::string readable_name,
Builtin::Kind kind, const Signature& signature,
Builtin::Kind kind, Flags flags, const Signature& signature,
base::Optional<Statement*> body)
: Callable(Declarable::kBuiltin, std::move(external_name),
std::move(readable_name), signature, body),
kind_(kind) {}
kind_(kind),
flags_(flags) {}
Kind kind_;
Flags flags_;
};
class RuntimeFunction : public Callable {

View File

@ -68,6 +68,12 @@ Builtin* DeclarationVisitor::CreateBuiltin(BuiltinDeclaration* decl,
Builtin::Kind kind = !javascript ? Builtin::kStub
: varargs ? Builtin::kVarArgsJavaScript
: Builtin::kFixedArgsJavaScript;
bool has_custom_interface_descriptor = false;
if (decl->kind == AstNode::Kind::kTorqueBuiltinDeclaration) {
has_custom_interface_descriptor =
static_cast<TorqueBuiltinDeclaration*>(decl)
->has_custom_interface_descriptor;
}
if (varargs && !javascript) {
Error("Rest parameters require ", decl->name,
@ -92,11 +98,24 @@ Builtin* DeclarationVisitor::CreateBuiltin(BuiltinDeclaration* decl,
}
for (size_t i = 0; i < signature.types().size(); ++i) {
if (signature.types()[i]->StructSupertype()) {
const Type* parameter_type = signature.types()[i];
if (parameter_type->StructSupertype()) {
Error("Builtin do not support structs as arguments, but argument ",
signature.parameter_names[i], " has type ", *signature.types()[i],
".");
}
if (parameter_type->IsFloat32() || parameter_type->IsFloat64()) {
if (!has_custom_interface_descriptor) {
Error("Builtin ", external_name,
" needs a custom interface descriptor, "
"because it uses type ",
*parameter_type, " for argument ", signature.parameter_names[i],
". One reason being "
"that the default descriptor defines xmm0 to be the first "
"floating point argument register, which is current used as "
"scratch on ia32 and cannot be allocated.");
}
}
}
if (signature.return_type->StructSupertype() && javascript) {
@ -110,9 +129,12 @@ Builtin* DeclarationVisitor::CreateBuiltin(BuiltinDeclaration* decl,
Error("Builtins cannot have return type void.");
}
Builtin* builtin = Declarations::CreateBuiltin(std::move(external_name),
std::move(readable_name), kind,
std::move(signature), body);
Builtin::Flags flags = Builtin::Flag::kNone;
if (has_custom_interface_descriptor)
flags |= Builtin::Flag::kCustomInterfaceDescriptor;
Builtin* builtin = Declarations::CreateBuiltin(
std::move(external_name), std::move(readable_name), kind, flags,
std::move(signature), body);
// TODO(v8:12261): Recheck this.
// builtin->SetIdentifierPosition(decl->name->pos);
return builtin;

View File

@ -256,21 +256,20 @@ Intrinsic* Declarations::DeclareIntrinsic(const std::string& name,
Builtin* Declarations::CreateBuiltin(std::string external_name,
std::string readable_name,
Builtin::Kind kind, Signature signature,
Builtin::Kind kind, Builtin::Flags flags,
Signature signature,
base::Optional<Statement*> body) {
return RegisterDeclarable(std::unique_ptr<Builtin>(
new Builtin(std::move(external_name), std::move(readable_name), kind,
std::move(signature), body)));
flags, std::move(signature), body)));
}
Builtin* Declarations::DeclareBuiltin(const std::string& name,
Builtin::Kind kind,
Builtin::Kind kind, Builtin::Flags flags,
const Signature& signature,
base::Optional<Statement*> body) {
CheckAlreadyDeclared<Builtin>(name, "builtin");
return Declare(name, CreateBuiltin(name, name, kind, signature, body));
return Declare(name, CreateBuiltin(name, name, kind, flags, signature, body));
}
RuntimeFunction* Declarations::DeclareRuntimeFunction(

View File

@ -123,9 +123,10 @@ class Declarations {
static Builtin* CreateBuiltin(std::string external_name,
std::string readable_name, Builtin::Kind kind,
Signature signature,
Builtin::Flags flags, Signature signature,
base::Optional<Statement*> body);
static Builtin* DeclareBuiltin(const std::string& name, Builtin::Kind kind,
Builtin::Flags flags,
const Signature& signature,
base::Optional<Statement*> body);

View File

@ -3579,43 +3579,47 @@ void ImplementationVisitor::GenerateBuiltinDefinitionsAndInterfaceDescriptors(
if (builtin->IsStub()) {
builtin_definitions << "TFC(" << builtin->ExternalName() << ", "
<< builtin->ExternalName();
std::string descriptor_name = builtin->ExternalName() + "Descriptor";
bool has_context_parameter = builtin->signature().HasContextParameter();
size_t kFirstNonContextParameter = has_context_parameter ? 1 : 0;
TypeVector return_types = LowerType(builtin->signature().return_type);
if (!builtin->HasCustomInterfaceDescriptor()) {
std::string descriptor_name = builtin->ExternalName() + "Descriptor";
bool has_context_parameter =
builtin->signature().HasContextParameter();
size_t kFirstNonContextParameter = has_context_parameter ? 1 : 0;
TypeVector return_types = LowerType(builtin->signature().return_type);
interface_descriptors << "class " << descriptor_name
<< " : public StaticCallInterfaceDescriptor<"
<< descriptor_name << "> {\n";
interface_descriptors << "class " << descriptor_name
<< " : public StaticCallInterfaceDescriptor<"
<< descriptor_name << "> {\n";
interface_descriptors << " public:\n";
interface_descriptors << " public:\n";
if (has_context_parameter) {
interface_descriptors << " DEFINE_RESULT_AND_PARAMETERS(";
} else {
interface_descriptors << " DEFINE_RESULT_AND_PARAMETERS_NO_CONTEXT(";
if (has_context_parameter) {
interface_descriptors << " DEFINE_RESULT_AND_PARAMETERS(";
} else {
interface_descriptors
<< " DEFINE_RESULT_AND_PARAMETERS_NO_CONTEXT(";
}
interface_descriptors << return_types.size();
for (size_t i = kFirstNonContextParameter;
i < builtin->parameter_names().size(); ++i) {
Identifier* parameter = builtin->parameter_names()[i];
interface_descriptors << ", k" << CamelifyString(parameter->value);
}
interface_descriptors << ")\n";
interface_descriptors << " DEFINE_RESULT_AND_PARAMETER_TYPES(";
PrintCommaSeparatedList(interface_descriptors, return_types,
MachineTypeString);
for (size_t i = kFirstNonContextParameter;
i < builtin->parameter_names().size(); ++i) {
const Type* type = builtin->signature().parameter_types.types[i];
interface_descriptors << ", " << MachineTypeString(type);
}
interface_descriptors << ")\n";
interface_descriptors << " DECLARE_DEFAULT_DESCRIPTOR("
<< descriptor_name << ")\n";
interface_descriptors << "};\n\n";
}
interface_descriptors << return_types.size();
for (size_t i = kFirstNonContextParameter;
i < builtin->parameter_names().size(); ++i) {
Identifier* parameter = builtin->parameter_names()[i];
interface_descriptors << ", k" << CamelifyString(parameter->value);
}
interface_descriptors << ")\n";
interface_descriptors << " DEFINE_RESULT_AND_PARAMETER_TYPES(";
PrintCommaSeparatedList(interface_descriptors, return_types,
MachineTypeString);
for (size_t i = kFirstNonContextParameter;
i < builtin->parameter_names().size(); ++i) {
const Type* type = builtin->signature().parameter_types.types[i];
interface_descriptors << ", " << MachineTypeString(type);
}
interface_descriptors << ")\n";
interface_descriptors << " DECLARE_DEFAULT_DESCRIPTOR("
<< descriptor_name << ")\n";
interface_descriptors << "};\n\n";
} else {
builtin_definitions << "TFJ(" << builtin->ExternalName();
if (builtin->IsVarArgsJavaScript()) {

View File

@ -664,6 +664,8 @@ base::Optional<ParseResult> MakeTorqueMacroDeclaration(
base::Optional<ParseResult> MakeTorqueBuiltinDeclaration(
ParseResultIterator* child_results) {
const bool has_custom_interface_descriptor = HasAnnotation(
child_results, ANNOTATION_CUSTOM_INTERFACE_DESCRIPTOR, "builtin");
auto transitioning = child_results->NextAs<bool>();
auto javascript_linkage = child_results->NextAs<bool>();
auto name = child_results->NextAs<Identifier*>();
@ -678,7 +680,8 @@ base::Optional<ParseResult> MakeTorqueBuiltinDeclaration(
auto return_type = child_results->NextAs<TypeExpression*>();
auto body = child_results->NextAs<base::Optional<Statement*>>();
CallableDeclaration* declaration = MakeNode<TorqueBuiltinDeclaration>(
transitioning, javascript_linkage, name, args, return_type, body);
transitioning, javascript_linkage, name, args, return_type,
has_custom_interface_descriptor, body);
Declaration* result = declaration;
if (generic_parameters.empty()) {
if (!body) ReportError("A non-generic declaration needs a body.");
@ -2833,8 +2836,8 @@ struct TorqueGrammar : Grammar {
&parameterListNoVararg, &returnType, optionalLabelList,
&optionalBody},
AsSingletonVector<Declaration*, MakeTorqueMacroDeclaration>()),
Rule({CheckIf(Token("transitioning")), CheckIf(Token("javascript")),
Token("builtin"), &name,
Rule({annotations, CheckIf(Token("transitioning")),
CheckIf(Token("javascript")), Token("builtin"), &name,
TryOrDefault<GenericParameters>(&genericParameters),
&parameterListAllowVararg, &returnType, &optionalBody},
AsSingletonVector<Declaration*, MakeTorqueBuiltinDeclaration>()),

View File

@ -127,6 +127,8 @@ class V8_EXPORT_PRIVATE Type : public TypeBase {
return IsAbstractName(CONSTEXPR_BOOL_TYPE_STRING);
}
bool IsVoidOrNever() const { return IsVoid() || IsNever(); }
bool IsFloat32() const { return IsAbstractName(FLOAT32_TYPE_STRING); }
bool IsFloat64() const { return IsAbstractName(FLOAT64_TYPE_STRING); }
std::string GetGeneratedTypeName() const;
std::string GetGeneratedTNodeTypeName() const;
virtual bool IsConstexpr() const {

View File

@ -126,15 +126,20 @@ INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE = {
# The conflicts might be directly contradictory flags or be caused by the
# implications defined in flag-definitions.h.
INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG = {
"--concurrent-recompilation": ["--predictable", "--assert-types"],
"--parallel-compile-tasks-for-eager-toplevel": ["--predictable"],
"--parallel-compile-tasks-for-lazy": ["--predictable"],
"--gc-interval=*": ["--gc-interval=*"],
"--optimize-for-size": ["--max-semi-space-size=*"],
"--stress_concurrent_allocation":
"--concurrent-recompilation": [
"--predictable", "--assert-types", "--turboshaft-assert-types"
],
"--parallel-compile-tasks-for-eager-toplevel": ["--predictable"],
"--parallel-compile-tasks-for-lazy": ["--predictable"],
"--gc-interval=*": ["--gc-interval=*"],
"--optimize-for-size": ["--max-semi-space-size=*"],
"--stress_concurrent_allocation":
INCOMPATIBLE_FLAGS_PER_VARIANT["stress_concurrent_allocation"],
"--stress-concurrent-inlining":
"--stress-concurrent-inlining":
INCOMPATIBLE_FLAGS_PER_VARIANT["stress_concurrent_inlining"],
"--turboshaft-assert-types": [
"--concurrent-recompilation", "--stress-concurrent-inlining"
],
}
SLOW_VARIANTS = set([

View File

@ -103,15 +103,15 @@ INSTANCE_TYPES = {
192: "FIXED_DOUBLE_ARRAY_TYPE",
193: "INTERNAL_CLASS_WITH_SMI_ELEMENTS_TYPE",
194: "SLOPPY_ARGUMENTS_ELEMENTS_TYPE",
195: "TURBOFAN_BITSET_TYPE_TYPE",
196: "TURBOFAN_HEAP_CONSTANT_TYPE_TYPE",
197: "TURBOFAN_OTHER_NUMBER_CONSTANT_TYPE_TYPE",
198: "TURBOFAN_RANGE_TYPE_TYPE",
199: "TURBOFAN_UNION_TYPE_TYPE",
200: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
201: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_AND_JOB_TYPE",
202: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
203: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_WITH_JOB_TYPE",
195: "TURBOSHAFT_FLOAT64_TYPE_TYPE",
196: "TURBOSHAFT_FLOAT64_RANGE_TYPE_TYPE",
197: "TURBOSHAFT_FLOAT64_SET_TYPE_TYPE",
198: "TURBOSHAFT_WORD32_TYPE_TYPE",
199: "TURBOSHAFT_WORD32_RANGE_TYPE_TYPE",
200: "TURBOSHAFT_WORD32_SET_TYPE_TYPE",
201: "TURBOSHAFT_WORD64_TYPE_TYPE",
202: "TURBOSHAFT_WORD64_RANGE_TYPE_TYPE",
203: "TURBOSHAFT_WORD64_SET_TYPE_TYPE",
204: "FOREIGN_TYPE",
205: "AWAIT_CONTEXT_TYPE",
206: "BLOCK_CONTEXT_TYPE",
@ -123,64 +123,73 @@ INSTANCE_TYPES = {
212: "NATIVE_CONTEXT_TYPE",
213: "SCRIPT_CONTEXT_TYPE",
214: "WITH_CONTEXT_TYPE",
215: "WASM_FUNCTION_DATA_TYPE",
216: "WASM_CAPI_FUNCTION_DATA_TYPE",
217: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
218: "WASM_JS_FUNCTION_DATA_TYPE",
219: "EXPORTED_SUB_CLASS_BASE_TYPE",
220: "EXPORTED_SUB_CLASS_TYPE",
221: "EXPORTED_SUB_CLASS2_TYPE",
222: "SMALL_ORDERED_HASH_MAP_TYPE",
223: "SMALL_ORDERED_HASH_SET_TYPE",
224: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
225: "ABSTRACT_INTERNAL_CLASS_SUBCLASS1_TYPE",
226: "ABSTRACT_INTERNAL_CLASS_SUBCLASS2_TYPE",
227: "DESCRIPTOR_ARRAY_TYPE",
228: "STRONG_DESCRIPTOR_ARRAY_TYPE",
229: "SOURCE_TEXT_MODULE_TYPE",
230: "SYNTHETIC_MODULE_TYPE",
231: "WEAK_FIXED_ARRAY_TYPE",
232: "TRANSITION_ARRAY_TYPE",
233: "ACCESSOR_INFO_TYPE",
234: "CALL_HANDLER_INFO_TYPE",
235: "CELL_TYPE",
236: "CODE_TYPE",
237: "CODE_DATA_CONTAINER_TYPE",
238: "COVERAGE_INFO_TYPE",
239: "EMBEDDER_DATA_ARRAY_TYPE",
240: "FEEDBACK_METADATA_TYPE",
241: "FEEDBACK_VECTOR_TYPE",
242: "FILLER_TYPE",
243: "FREE_SPACE_TYPE",
244: "INTERNAL_CLASS_TYPE",
245: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE",
246: "MAP_TYPE",
247: "MEGA_DOM_HANDLER_TYPE",
248: "ON_HEAP_BASIC_BLOCK_PROFILER_DATA_TYPE",
249: "PREPARSE_DATA_TYPE",
250: "PROPERTY_ARRAY_TYPE",
251: "PROPERTY_CELL_TYPE",
252: "SCOPE_INFO_TYPE",
253: "SHARED_FUNCTION_INFO_TYPE",
254: "SMI_BOX_TYPE",
255: "SMI_PAIR_TYPE",
256: "SORT_STATE_TYPE",
257: "SWISS_NAME_DICTIONARY_TYPE",
258: "WASM_API_FUNCTION_REF_TYPE",
259: "WASM_CONTINUATION_OBJECT_TYPE",
260: "WASM_INTERNAL_FUNCTION_TYPE",
261: "WASM_RESUME_DATA_TYPE",
262: "WASM_STRING_VIEW_ITER_TYPE",
263: "WASM_TYPE_INFO_TYPE",
264: "WEAK_ARRAY_LIST_TYPE",
265: "WEAK_CELL_TYPE",
266: "WASM_ARRAY_TYPE",
267: "WASM_STRUCT_TYPE",
268: "JS_PROXY_TYPE",
215: "TURBOFAN_BITSET_TYPE_TYPE",
216: "TURBOFAN_HEAP_CONSTANT_TYPE_TYPE",
217: "TURBOFAN_OTHER_NUMBER_CONSTANT_TYPE_TYPE",
218: "TURBOFAN_RANGE_TYPE_TYPE",
219: "TURBOFAN_UNION_TYPE_TYPE",
220: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
221: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_AND_JOB_TYPE",
222: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
223: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_WITH_JOB_TYPE",
224: "WASM_FUNCTION_DATA_TYPE",
225: "WASM_CAPI_FUNCTION_DATA_TYPE",
226: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
227: "WASM_JS_FUNCTION_DATA_TYPE",
228: "EXPORTED_SUB_CLASS_BASE_TYPE",
229: "EXPORTED_SUB_CLASS_TYPE",
230: "EXPORTED_SUB_CLASS2_TYPE",
231: "SMALL_ORDERED_HASH_MAP_TYPE",
232: "SMALL_ORDERED_HASH_SET_TYPE",
233: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
234: "ABSTRACT_INTERNAL_CLASS_SUBCLASS1_TYPE",
235: "ABSTRACT_INTERNAL_CLASS_SUBCLASS2_TYPE",
236: "DESCRIPTOR_ARRAY_TYPE",
237: "STRONG_DESCRIPTOR_ARRAY_TYPE",
238: "SOURCE_TEXT_MODULE_TYPE",
239: "SYNTHETIC_MODULE_TYPE",
240: "WEAK_FIXED_ARRAY_TYPE",
241: "TRANSITION_ARRAY_TYPE",
242: "ACCESSOR_INFO_TYPE",
243: "CALL_HANDLER_INFO_TYPE",
244: "CELL_TYPE",
245: "CODE_TYPE",
246: "CODE_DATA_CONTAINER_TYPE",
247: "COVERAGE_INFO_TYPE",
248: "EMBEDDER_DATA_ARRAY_TYPE",
249: "FEEDBACK_METADATA_TYPE",
250: "FEEDBACK_VECTOR_TYPE",
251: "FILLER_TYPE",
252: "FREE_SPACE_TYPE",
253: "INTERNAL_CLASS_TYPE",
254: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE",
255: "MAP_TYPE",
256: "MEGA_DOM_HANDLER_TYPE",
257: "ON_HEAP_BASIC_BLOCK_PROFILER_DATA_TYPE",
258: "PREPARSE_DATA_TYPE",
259: "PROPERTY_ARRAY_TYPE",
260: "PROPERTY_CELL_TYPE",
261: "SCOPE_INFO_TYPE",
262: "SHARED_FUNCTION_INFO_TYPE",
263: "SMI_BOX_TYPE",
264: "SMI_PAIR_TYPE",
265: "SORT_STATE_TYPE",
266: "SWISS_NAME_DICTIONARY_TYPE",
267: "WASM_API_FUNCTION_REF_TYPE",
268: "WASM_CONTINUATION_OBJECT_TYPE",
269: "WASM_INTERNAL_FUNCTION_TYPE",
270: "WASM_RESUME_DATA_TYPE",
271: "WASM_STRING_VIEW_ITER_TYPE",
272: "WASM_TYPE_INFO_TYPE",
273: "WEAK_ARRAY_LIST_TYPE",
274: "WEAK_CELL_TYPE",
275: "WASM_ARRAY_TYPE",
276: "WASM_STRUCT_TYPE",
277: "JS_PROXY_TYPE",
1057: "JS_OBJECT_TYPE",
269: "JS_GLOBAL_OBJECT_TYPE",
270: "JS_GLOBAL_PROXY_TYPE",
271: "JS_MODULE_NAMESPACE_TYPE",
278: "JS_GLOBAL_OBJECT_TYPE",
279: "JS_GLOBAL_PROXY_TYPE",
280: "JS_MODULE_NAMESPACE_TYPE",
1040: "JS_SPECIAL_API_OBJECT_TYPE",
1041: "JS_PRIMITIVE_WRAPPER_TYPE",
1058: "JS_API_OBJECT_TYPE",
@ -285,16 +294,16 @@ INSTANCE_TYPES = {
# List of known V8 maps.
KNOWN_MAPS = {
("read_only_space", 0x02141): (246, "MetaMap"),
("read_only_space", 0x02141): (255, "MetaMap"),
("read_only_space", 0x02169): (131, "NullMap"),
("read_only_space", 0x02191): (228, "StrongDescriptorArrayMap"),
("read_only_space", 0x021b9): (264, "WeakArrayListMap"),
("read_only_space", 0x02191): (237, "StrongDescriptorArrayMap"),
("read_only_space", 0x021b9): (273, "WeakArrayListMap"),
("read_only_space", 0x021fd): (154, "EnumCacheMap"),
("read_only_space", 0x02231): (175, "FixedArrayMap"),
("read_only_space", 0x0227d): (8, "OneByteInternalizedStringMap"),
("read_only_space", 0x022c9): (243, "FreeSpaceMap"),
("read_only_space", 0x022f1): (242, "OnePointerFillerMap"),
("read_only_space", 0x02319): (242, "TwoPointerFillerMap"),
("read_only_space", 0x022c9): (252, "FreeSpaceMap"),
("read_only_space", 0x022f1): (251, "OnePointerFillerMap"),
("read_only_space", 0x02319): (251, "TwoPointerFillerMap"),
("read_only_space", 0x02341): (131, "UninitializedMap"),
("read_only_space", 0x023b9): (131, "UndefinedMap"),
("read_only_space", 0x023fd): (130, "HeapNumberMap"),
@ -305,15 +314,15 @@ KNOWN_MAPS = {
("read_only_space", 0x02585): (176, "HashTableMap"),
("read_only_space", 0x025ad): (128, "SymbolMap"),
("read_only_space", 0x025d5): (40, "OneByteStringMap"),
("read_only_space", 0x025fd): (252, "ScopeInfoMap"),
("read_only_space", 0x02625): (253, "SharedFunctionInfoMap"),
("read_only_space", 0x0264d): (236, "CodeMap"),
("read_only_space", 0x02675): (235, "CellMap"),
("read_only_space", 0x0269d): (251, "GlobalPropertyCellMap"),
("read_only_space", 0x025fd): (261, "ScopeInfoMap"),
("read_only_space", 0x02625): (262, "SharedFunctionInfoMap"),
("read_only_space", 0x0264d): (245, "CodeMap"),
("read_only_space", 0x02675): (244, "CellMap"),
("read_only_space", 0x0269d): (260, "GlobalPropertyCellMap"),
("read_only_space", 0x026c5): (204, "ForeignMap"),
("read_only_space", 0x026ed): (232, "TransitionArrayMap"),
("read_only_space", 0x026ed): (241, "TransitionArrayMap"),
("read_only_space", 0x02715): (45, "ThinOneByteStringMap"),
("read_only_space", 0x0273d): (241, "FeedbackVectorMap"),
("read_only_space", 0x0273d): (250, "FeedbackVectorMap"),
("read_only_space", 0x02775): (131, "ArgumentsMarkerMap"),
("read_only_space", 0x027d5): (131, "ExceptionMap"),
("read_only_space", 0x02831): (131, "TerminationExceptionMap"),
@ -321,17 +330,17 @@ KNOWN_MAPS = {
("read_only_space", 0x028f9): (131, "StaleRegisterMap"),
("read_only_space", 0x02959): (189, "ScriptContextTableMap"),
("read_only_space", 0x02981): (187, "ClosureFeedbackCellArrayMap"),
("read_only_space", 0x029a9): (240, "FeedbackMetadataArrayMap"),
("read_only_space", 0x029a9): (249, "FeedbackMetadataArrayMap"),
("read_only_space", 0x029d1): (175, "ArrayListMap"),
("read_only_space", 0x029f9): (129, "BigIntMap"),
("read_only_space", 0x02a21): (188, "ObjectBoilerplateDescriptionMap"),
("read_only_space", 0x02a49): (191, "BytecodeArrayMap"),
("read_only_space", 0x02a71): (237, "CodeDataContainerMap"),
("read_only_space", 0x02a99): (238, "CoverageInfoMap"),
("read_only_space", 0x02a71): (246, "CodeDataContainerMap"),
("read_only_space", 0x02a99): (247, "CoverageInfoMap"),
("read_only_space", 0x02ac1): (192, "FixedDoubleArrayMap"),
("read_only_space", 0x02ae9): (178, "GlobalDictionaryMap"),
("read_only_space", 0x02b11): (156, "ManyClosuresCellMap"),
("read_only_space", 0x02b39): (247, "MegaDomHandlerMap"),
("read_only_space", 0x02b39): (256, "MegaDomHandlerMap"),
("read_only_space", 0x02b61): (175, "ModuleInfoMap"),
("read_only_space", 0x02b89): (179, "NameDictionaryMap"),
("read_only_space", 0x02bb1): (156, "NoClosuresCellMap"),
@ -342,31 +351,31 @@ KNOWN_MAPS = {
("read_only_space", 0x02c79): (180, "NameToIndexHashTableMap"),
("read_only_space", 0x02ca1): (185, "RegisteredSymbolTableMap"),
("read_only_space", 0x02cc9): (184, "OrderedNameDictionaryMap"),
("read_only_space", 0x02cf1): (249, "PreparseDataMap"),
("read_only_space", 0x02d19): (250, "PropertyArrayMap"),
("read_only_space", 0x02d41): (233, "AccessorInfoMap"),
("read_only_space", 0x02d69): (234, "SideEffectCallHandlerInfoMap"),
("read_only_space", 0x02d91): (234, "SideEffectFreeCallHandlerInfoMap"),
("read_only_space", 0x02db9): (234, "NextCallSideEffectFreeCallHandlerInfoMap"),
("read_only_space", 0x02cf1): (258, "PreparseDataMap"),
("read_only_space", 0x02d19): (259, "PropertyArrayMap"),
("read_only_space", 0x02d41): (242, "AccessorInfoMap"),
("read_only_space", 0x02d69): (243, "SideEffectCallHandlerInfoMap"),
("read_only_space", 0x02d91): (243, "SideEffectFreeCallHandlerInfoMap"),
("read_only_space", 0x02db9): (243, "NextCallSideEffectFreeCallHandlerInfoMap"),
("read_only_space", 0x02de1): (186, "SimpleNumberDictionaryMap"),
("read_only_space", 0x02e09): (222, "SmallOrderedHashMapMap"),
("read_only_space", 0x02e31): (223, "SmallOrderedHashSetMap"),
("read_only_space", 0x02e59): (224, "SmallOrderedNameDictionaryMap"),
("read_only_space", 0x02e81): (229, "SourceTextModuleMap"),
("read_only_space", 0x02ea9): (257, "SwissNameDictionaryMap"),
("read_only_space", 0x02ed1): (230, "SyntheticModuleMap"),
("read_only_space", 0x02ef9): (258, "WasmApiFunctionRefMap"),
("read_only_space", 0x02f21): (216, "WasmCapiFunctionDataMap"),
("read_only_space", 0x02f49): (217, "WasmExportedFunctionDataMap"),
("read_only_space", 0x02f71): (260, "WasmInternalFunctionMap"),
("read_only_space", 0x02f99): (218, "WasmJSFunctionDataMap"),
("read_only_space", 0x02fc1): (261, "WasmResumeDataMap"),
("read_only_space", 0x02fe9): (263, "WasmTypeInfoMap"),
("read_only_space", 0x03011): (259, "WasmContinuationObjectMap"),
("read_only_space", 0x03039): (231, "WeakFixedArrayMap"),
("read_only_space", 0x02e09): (231, "SmallOrderedHashMapMap"),
("read_only_space", 0x02e31): (232, "SmallOrderedHashSetMap"),
("read_only_space", 0x02e59): (233, "SmallOrderedNameDictionaryMap"),
("read_only_space", 0x02e81): (238, "SourceTextModuleMap"),
("read_only_space", 0x02ea9): (266, "SwissNameDictionaryMap"),
("read_only_space", 0x02ed1): (239, "SyntheticModuleMap"),
("read_only_space", 0x02ef9): (267, "WasmApiFunctionRefMap"),
("read_only_space", 0x02f21): (225, "WasmCapiFunctionDataMap"),
("read_only_space", 0x02f49): (226, "WasmExportedFunctionDataMap"),
("read_only_space", 0x02f71): (269, "WasmInternalFunctionMap"),
("read_only_space", 0x02f99): (227, "WasmJSFunctionDataMap"),
("read_only_space", 0x02fc1): (270, "WasmResumeDataMap"),
("read_only_space", 0x02fe9): (272, "WasmTypeInfoMap"),
("read_only_space", 0x03011): (268, "WasmContinuationObjectMap"),
("read_only_space", 0x03039): (240, "WeakFixedArrayMap"),
("read_only_space", 0x03061): (177, "EphemeronHashTableMap"),
("read_only_space", 0x03089): (239, "EmbedderDataArrayMap"),
("read_only_space", 0x030b1): (265, "WeakCellMap"),
("read_only_space", 0x03089): (248, "EmbedderDataArrayMap"),
("read_only_space", 0x030b1): (274, "WeakCellMap"),
("read_only_space", 0x030d9): (32, "StringMap"),
("read_only_space", 0x03101): (41, "ConsOneByteStringMap"),
("read_only_space", 0x03129): (33, "ConsStringMap"),
@ -431,38 +440,47 @@ KNOWN_MAPS = {
("read_only_space", 0x07b41): (173, "WasmExceptionTagMap"),
("read_only_space", 0x07b69): (174, "WasmIndirectFunctionTableMap"),
("read_only_space", 0x07b91): (194, "SloppyArgumentsElementsMap"),
("read_only_space", 0x07bb9): (227, "DescriptorArrayMap"),
("read_only_space", 0x07be1): (202, "UncompiledDataWithoutPreparseDataMap"),
("read_only_space", 0x07c09): (200, "UncompiledDataWithPreparseDataMap"),
("read_only_space", 0x07c31): (203, "UncompiledDataWithoutPreparseDataWithJobMap"),
("read_only_space", 0x07c59): (201, "UncompiledDataWithPreparseDataAndJobMap"),
("read_only_space", 0x07c81): (248, "OnHeapBasicBlockProfilerDataMap"),
("read_only_space", 0x07ca9): (195, "TurbofanBitsetTypeMap"),
("read_only_space", 0x07cd1): (199, "TurbofanUnionTypeMap"),
("read_only_space", 0x07cf9): (198, "TurbofanRangeTypeMap"),
("read_only_space", 0x07d21): (196, "TurbofanHeapConstantTypeMap"),
("read_only_space", 0x07d49): (197, "TurbofanOtherNumberConstantTypeMap"),
("read_only_space", 0x07d71): (244, "InternalClassMap"),
("read_only_space", 0x07d99): (255, "SmiPairMap"),
("read_only_space", 0x07dc1): (254, "SmiBoxMap"),
("read_only_space", 0x07de9): (219, "ExportedSubClassBaseMap"),
("read_only_space", 0x07e11): (220, "ExportedSubClassMap"),
("read_only_space", 0x07e39): (225, "AbstractInternalClassSubclass1Map"),
("read_only_space", 0x07e61): (226, "AbstractInternalClassSubclass2Map"),
("read_only_space", 0x07e89): (193, "InternalClassWithSmiElementsMap"),
("read_only_space", 0x07eb1): (245, "InternalClassWithStructElementsMap"),
("read_only_space", 0x07ed9): (221, "ExportedSubClass2Map"),
("read_only_space", 0x07f01): (256, "SortStateMap"),
("read_only_space", 0x07f29): (262, "WasmStringViewIterMap"),
("read_only_space", 0x07f51): (145, "AllocationSiteWithWeakNextMap"),
("read_only_space", 0x07f79): (145, "AllocationSiteWithoutWeakNextMap"),
("read_only_space", 0x08045): (137, "LoadHandler1Map"),
("read_only_space", 0x0806d): (137, "LoadHandler2Map"),
("read_only_space", 0x08095): (137, "LoadHandler3Map"),
("read_only_space", 0x080bd): (138, "StoreHandler0Map"),
("read_only_space", 0x080e5): (138, "StoreHandler1Map"),
("read_only_space", 0x0810d): (138, "StoreHandler2Map"),
("read_only_space", 0x08135): (138, "StoreHandler3Map"),
("read_only_space", 0x07bb9): (236, "DescriptorArrayMap"),
("read_only_space", 0x07be1): (222, "UncompiledDataWithoutPreparseDataMap"),
("read_only_space", 0x07c09): (220, "UncompiledDataWithPreparseDataMap"),
("read_only_space", 0x07c31): (223, "UncompiledDataWithoutPreparseDataWithJobMap"),
("read_only_space", 0x07c59): (221, "UncompiledDataWithPreparseDataAndJobMap"),
("read_only_space", 0x07c81): (257, "OnHeapBasicBlockProfilerDataMap"),
("read_only_space", 0x07ca9): (215, "TurbofanBitsetTypeMap"),
("read_only_space", 0x07cd1): (219, "TurbofanUnionTypeMap"),
("read_only_space", 0x07cf9): (218, "TurbofanRangeTypeMap"),
("read_only_space", 0x07d21): (216, "TurbofanHeapConstantTypeMap"),
("read_only_space", 0x07d49): (217, "TurbofanOtherNumberConstantTypeMap"),
("read_only_space", 0x07d71): (198, "TurboshaftWord32TypeMap"),
("read_only_space", 0x07d99): (199, "TurboshaftWord32RangeTypeMap"),
("read_only_space", 0x07dc1): (200, "TurboshaftWord32SetTypeMap"),
("read_only_space", 0x07de9): (201, "TurboshaftWord64TypeMap"),
("read_only_space", 0x07e11): (202, "TurboshaftWord64RangeTypeMap"),
("read_only_space", 0x07e39): (203, "TurboshaftWord64SetTypeMap"),
("read_only_space", 0x07e61): (195, "TurboshaftFloat64TypeMap"),
("read_only_space", 0x07e89): (196, "TurboshaftFloat64RangeTypeMap"),
("read_only_space", 0x07eb1): (197, "TurboshaftFloat64SetTypeMap"),
("read_only_space", 0x07ed9): (253, "InternalClassMap"),
("read_only_space", 0x07f01): (264, "SmiPairMap"),
("read_only_space", 0x07f29): (263, "SmiBoxMap"),
("read_only_space", 0x07f51): (228, "ExportedSubClassBaseMap"),
("read_only_space", 0x07f79): (229, "ExportedSubClassMap"),
("read_only_space", 0x07fa1): (234, "AbstractInternalClassSubclass1Map"),
("read_only_space", 0x07fc9): (235, "AbstractInternalClassSubclass2Map"),
("read_only_space", 0x07ff1): (193, "InternalClassWithSmiElementsMap"),
("read_only_space", 0x08019): (254, "InternalClassWithStructElementsMap"),
("read_only_space", 0x08041): (230, "ExportedSubClass2Map"),
("read_only_space", 0x08069): (265, "SortStateMap"),
("read_only_space", 0x08091): (271, "WasmStringViewIterMap"),
("read_only_space", 0x080b9): (145, "AllocationSiteWithWeakNextMap"),
("read_only_space", 0x080e1): (145, "AllocationSiteWithoutWeakNextMap"),
("read_only_space", 0x081ad): (137, "LoadHandler1Map"),
("read_only_space", 0x081d5): (137, "LoadHandler2Map"),
("read_only_space", 0x081fd): (137, "LoadHandler3Map"),
("read_only_space", 0x08225): (138, "StoreHandler0Map"),
("read_only_space", 0x0824d): (138, "StoreHandler1Map"),
("read_only_space", 0x08275): (138, "StoreHandler2Map"),
("read_only_space", 0x0829d): (138, "StoreHandler3Map"),
("old_space", 0x0438d): (2116, "ExternalMap"),
("old_space", 0x043b5): (2120, "JSMessageObjectMap"),
}
@ -553,30 +571,30 @@ KNOWN_OBJECTS = {
("old_space", 0x04581): "StringSplitCache",
("old_space", 0x04989): "RegExpMultipleCache",
("old_space", 0x04d91): "BuiltinsConstantsTable",
("old_space", 0x051f9): "AsyncFunctionAwaitRejectSharedFun",
("old_space", 0x0521d): "AsyncFunctionAwaitResolveSharedFun",
("old_space", 0x05241): "AsyncGeneratorAwaitRejectSharedFun",
("old_space", 0x05265): "AsyncGeneratorAwaitResolveSharedFun",
("old_space", 0x05289): "AsyncGeneratorYieldWithAwaitResolveSharedFun",
("old_space", 0x052ad): "AsyncGeneratorReturnResolveSharedFun",
("old_space", 0x052d1): "AsyncGeneratorReturnClosedRejectSharedFun",
("old_space", 0x052f5): "AsyncGeneratorReturnClosedResolveSharedFun",
("old_space", 0x05319): "AsyncIteratorValueUnwrapSharedFun",
("old_space", 0x0533d): "PromiseAllResolveElementSharedFun",
("old_space", 0x05361): "PromiseAllSettledResolveElementSharedFun",
("old_space", 0x05385): "PromiseAllSettledRejectElementSharedFun",
("old_space", 0x053a9): "PromiseAnyRejectElementSharedFun",
("old_space", 0x053cd): "PromiseCapabilityDefaultRejectSharedFun",
("old_space", 0x053f1): "PromiseCapabilityDefaultResolveSharedFun",
("old_space", 0x05415): "PromiseCatchFinallySharedFun",
("old_space", 0x05439): "PromiseGetCapabilitiesExecutorSharedFun",
("old_space", 0x0545d): "PromiseThenFinallySharedFun",
("old_space", 0x05481): "PromiseThrowerFinallySharedFun",
("old_space", 0x054a5): "PromiseValueThunkFinallySharedFun",
("old_space", 0x054c9): "ProxyRevokeSharedFun",
("old_space", 0x054ed): "ShadowRealmImportValueFulfilledSFI",
("old_space", 0x05511): "SourceTextModuleExecuteAsyncModuleFulfilledSFI",
("old_space", 0x05535): "SourceTextModuleExecuteAsyncModuleRejectedSFI",
("old_space", 0x05399): "AsyncFunctionAwaitRejectSharedFun",
("old_space", 0x053bd): "AsyncFunctionAwaitResolveSharedFun",
("old_space", 0x053e1): "AsyncGeneratorAwaitRejectSharedFun",
("old_space", 0x05405): "AsyncGeneratorAwaitResolveSharedFun",
("old_space", 0x05429): "AsyncGeneratorYieldWithAwaitResolveSharedFun",
("old_space", 0x0544d): "AsyncGeneratorReturnResolveSharedFun",
("old_space", 0x05471): "AsyncGeneratorReturnClosedRejectSharedFun",
("old_space", 0x05495): "AsyncGeneratorReturnClosedResolveSharedFun",
("old_space", 0x054b9): "AsyncIteratorValueUnwrapSharedFun",
("old_space", 0x054dd): "PromiseAllResolveElementSharedFun",
("old_space", 0x05501): "PromiseAllSettledResolveElementSharedFun",
("old_space", 0x05525): "PromiseAllSettledRejectElementSharedFun",
("old_space", 0x05549): "PromiseAnyRejectElementSharedFun",
("old_space", 0x0556d): "PromiseCapabilityDefaultRejectSharedFun",
("old_space", 0x05591): "PromiseCapabilityDefaultResolveSharedFun",
("old_space", 0x055b5): "PromiseCatchFinallySharedFun",
("old_space", 0x055d9): "PromiseGetCapabilitiesExecutorSharedFun",
("old_space", 0x055fd): "PromiseThenFinallySharedFun",
("old_space", 0x05621): "PromiseThrowerFinallySharedFun",
("old_space", 0x05645): "PromiseValueThunkFinallySharedFun",
("old_space", 0x05669): "ProxyRevokeSharedFun",
("old_space", 0x0568d): "ShadowRealmImportValueFulfilledSFI",
("old_space", 0x056b1): "SourceTextModuleExecuteAsyncModuleFulfilledSFI",
("old_space", 0x056d5): "SourceTextModuleExecuteAsyncModuleRejectedSFI",
}
# Lower 32 bits of first page addresses for various heap spaces.