Revert "[turbofan] Optimize rab/gsab-backed TypedArrays and DataViews"

This reverts commit bd590292b3.

Reason for revert:
https://ci.chromium.org/ui/p/v8/builders/ci/V8%20Mac64%20GC%20Stress/25069/overview

Original change's description:
> [turbofan] Optimize rab/gsab-backed TypedArrays and DataViews
>
> This CL adds TurboFan optimizations for length and element access
> of TypedArrays and DataViews that are rab/gsab-backed.
>
> To enable this optimization, this CL builds the necessary machinery
> required to allow machine operators at the front of the pipeline
> (before simplified lowering). Some key changes to allow this are:
>  - Introduce Type::Machine() to allow the typer and the verifier to
>    provide a type to those machine operators in parts of the pipeline
>    that require nodes to be typed.
>  - Add EnterMachineGraph and ExitMachineGraph operators that define
>    the boundary between early machine graphs and the normal graph with
>    JS semantics.
>  - Give Branch operators a BranchSemantics parameter to distinguish
>    between machine branches (condition is a machine level value) and
>    JS branches (condition is a JS boolean value) and have phases that
>    handle branches decide on the branch's semantics based on this
>    parameter instead of the position in the pipeline.
>  - Extend SimplifiedLowering and SimplifiedLoweringVerifier to handle
>    machine graphs. In particular, constants required special handling,
>    because they are cached in the graph but they may have uses in both
>    a machine and the JS graph, which prevents consistent typing of
>    them.
>  - Moved lots of logic from JSCallReducerAssembler into
>    [JS]GraphAssembler such that functionality can be shared between
>    different phases (e.g. JSNativeContextSpecialization and
>    JSCallReducer need to generate logic to compute a TypedArray's
>    byte length). Extended assembler interface in general with
>    additional TNode<> overloads.
>
>
> Bug: v8:11111, chromium:1358505
> Change-Id: Ife006b8c38a83045cd3b8558acbfdcb66408891f
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3898690
> Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
> Commit-Queue: Nico Hartmann <nicohartmann@chromium.org>
> Reviewed-by: Clemens Backes <clemensb@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#83881}

Bug: v8:11111, chromium:1358505
Change-Id: Ifa7b81523237ebda941cab2feed42c176846b618
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3976028
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Commit-Queue: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Auto-Submit: Michael Achenbach <machenbach@chromium.org>
Owners-Override: Michael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/main@{#83888}
This commit is contained in:
Michael Achenbach 2022-10-24 17:06:34 +00:00 committed by V8 LUCI CQ
parent 66ba7b54eb
commit 4b28d53011
45 changed files with 675 additions and 3048 deletions

View File

@ -7,7 +7,6 @@
#include <algorithm>
#include <optional>
#include <vector>
namespace v8::base {
@ -78,18 +77,6 @@ inline size_t count_if(const C& container, const P& predicate) {
return std::count_if(begin(container), end(container), predicate);
}
// Helper for std::all_of.
template <typename C, typename P>
inline bool all_of(const C& container, const P& predicate) {
return std::all_of(begin(container), end(container), predicate);
}
// Helper for std::none_of.
template <typename C, typename P>
inline bool none_of(const C& container, const P& predicate) {
return std::none_of(begin(container), end(container), predicate);
}
// Returns true iff all elements of {container} compare equal using operator==.
template <typename C>
inline bool all_equal(const C& container) {
@ -100,21 +87,6 @@ inline bool all_equal(const C& container) {
[&](const auto& v) { return v == value; });
}
// Returns true iff all elements of {container} compare equal to {value} using
// operator==.
template <typename C, typename T>
inline bool all_equal(const C& container, const T& value) {
return std::all_of(begin(container), end(container),
[&](const auto& v) { return v == value; });
}
// Appends to vector {v} all the elements in the range {begin(container)} and
// {end(container)}.
template <typename T, typename A, typename C>
inline void vector_append(std::vector<T, A>& v, const C& container) {
v.insert(end(v), begin(container), end(container));
}
} // namespace v8::base
#endif // V8_BASE_CONTAINER_UTILS_H_

View File

@ -25,7 +25,6 @@
#include "src/logging/log.h"
#include "src/numbers/hash-seed-inl.h"
#include "src/numbers/math-random.h"
#include "src/objects/elements-kind.h"
#include "src/objects/elements.h"
#include "src/objects/object-type.h"
#include "src/objects/objects-inl.h"
@ -951,20 +950,6 @@ ExternalReference ExternalReference::search_string_raw_two_two() {
return search_string_raw<const base::uc16, const base::uc16>();
}
ExternalReference
ExternalReference::typed_array_and_rab_gsab_typed_array_elements_kind_shifts() {
uint8_t* ptr =
const_cast<uint8_t*>(TypedArrayAndRabGsabTypedArrayElementsKindShifts());
return ExternalReference(reinterpret_cast<Address>(ptr));
}
ExternalReference
ExternalReference::typed_array_and_rab_gsab_typed_array_elements_kind_sizes() {
uint8_t* ptr =
const_cast<uint8_t*>(TypedArrayAndRabGsabTypedArrayElementsKindSizes());
return ExternalReference(reinterpret_cast<Address>(ptr));
}
namespace {
void StringWriteToFlatOneByte(Address source, uint8_t* sink, int32_t start,

View File

@ -338,10 +338,6 @@ class StatsCounter;
V(re_match_for_call_from_js, "IrregexpInterpreter::MatchForCallFromJs") \
V(re_experimental_match_for_call_from_js, \
"ExperimentalRegExp::MatchForCallFromJs") \
V(typed_array_and_rab_gsab_typed_array_elements_kind_shifts, \
"TypedArrayAndRabGsabTypedArrayElementsKindShifts") \
V(typed_array_and_rab_gsab_typed_array_elements_kind_sizes, \
"TypedArrayAndRabGsabTypedArrayElementsKindSizes") \
EXTERNAL_REFERENCE_LIST_INTL(V) \
EXTERNAL_REFERENCE_LIST_SANDBOX(V)
#ifdef V8_INTL_SUPPORT

View File

@ -359,10 +359,10 @@ class TNode {
public:
template <class U,
typename std::enable_if<is_subtype<U, T>::value, int>::type = 0>
TNode(const TNode<U>& other) V8_NOEXCEPT : node_(other) {
TNode(const TNode<U>& other) : node_(other) {
LazyTemplateChecks();
}
TNode(const TNode& other) V8_NOEXCEPT : node_(other) { LazyTemplateChecks(); }
TNode(const TNode& other) : node_(other) { LazyTemplateChecks(); }
TNode() : TNode(nullptr) {}
TNode operator=(TNode other) {
@ -375,7 +375,7 @@ class TNode {
static TNode UncheckedCast(compiler::Node* node) { return TNode(node); }
protected:
private:
explicit TNode(compiler::Node* node) : node_(node) { LazyTemplateChecks(); }
// These checks shouldn't be checked before TNode is actually used.
void LazyTemplateChecks() {
@ -385,21 +385,6 @@ class TNode {
compiler::Node* node_;
};
// SloppyTNode<T> is a variant of TNode<T> and allows implicit casts from
// Node*. It is intended for function arguments as long as some call sites
// still use untyped Node* arguments.
// TODO(turbofan): Delete this class once transition is finished.
template <class T>
class SloppyTNode : public TNode<T> {
public:
SloppyTNode(compiler::Node* node) // NOLINT(runtime/explicit)
: TNode<T>(node) {}
template <class U, typename std::enable_if<is_subtype<U, T>::value,
int>::type = 0>
SloppyTNode(const TNode<U>& other) V8_NOEXCEPT // NOLINT(runtime/explicit)
: TNode<T>(other) {}
};
} // namespace internal
} // namespace v8

View File

@ -364,22 +364,6 @@ FieldAccess AccessBuilder::ForJSArrayBufferBitField() {
return access;
}
// static
FieldAccess AccessBuilder::ForJSArrayBufferByteLength() {
FieldAccess access = {kTaggedBase,
JSArrayBuffer::kRawByteLengthOffset,
MaybeHandle<Name>(),
MaybeHandle<Map>(),
TypeCache::Get()->kJSArrayBufferByteLengthType,
MachineType::UintPtr(),
kNoWriteBarrier,
"JSArrayBufferByteLength"};
#ifdef V8_ENABLE_SANDBOX
access.is_bounded_size_access = true;
#endif
return access;
}
// static
FieldAccess AccessBuilder::ForJSArrayBufferViewBuffer() {
FieldAccess access = {kTaggedBase, JSArrayBufferView::kBufferOffset,
@ -421,19 +405,6 @@ FieldAccess AccessBuilder::ForJSArrayBufferViewByteOffset() {
return access;
}
// static
FieldAccess AccessBuilder::ForJSArrayBufferViewBitField() {
FieldAccess access = {kTaggedBase,
JSArrayBufferView::kBitFieldOffset,
MaybeHandle<Name>(),
MaybeHandle<Map>(),
TypeCache::Get()->kUint32,
MachineType::Uint32(),
kNoWriteBarrier,
"JSArrayBufferViewBitField"};
return access;
}
// static
FieldAccess AccessBuilder::ForJSTypedArrayLength() {
FieldAccess access = {kTaggedBase,

View File

@ -134,9 +134,6 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSArrayBuffer::bit_field() field.
static FieldAccess ForJSArrayBufferBitField();
// Provides access to JSArrayBuffer::byteLength() field.
static FieldAccess ForJSArrayBufferByteLength();
// Provides access to JSArrayBufferView::buffer() field.
static FieldAccess ForJSArrayBufferViewBuffer();
@ -146,9 +143,6 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSArrayBufferView::byteOffset() field.
static FieldAccess ForJSArrayBufferViewByteOffset();
// Provides access to JSArrayBufferView::bitfield() field
static FieldAccess ForJSArrayBufferViewBitField();
// Provides access to JSTypedArray::length() field.
static FieldAccess ForJSTypedArrayLength();

View File

@ -12,7 +12,6 @@
#include "src/codegen/tick-counter.h"
#include "src/common/globals.h"
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/node-properties.h"
@ -1289,10 +1288,6 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
}
case BasicBlock::kBranch: {
DCHECK_EQ(IrOpcode::kBranch, input->opcode());
// TODO(nicohartmann@): Once all branches have explicitly specified
// semantics, we should allow only BranchSemantics::kMachine here.
DCHECK_NE(BranchSemantics::kJS,
BranchParametersOf(input->op()).semantics());
BasicBlock* tbranch = block->SuccessorAt(0);
BasicBlock* fbranch = block->SuccessorAt(1);
if (tbranch == fbranch) {

View File

@ -5,10 +5,8 @@
#include "src/compiler/branch-elimination.h"
#include "src/base/small-vector.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/opcodes.h"
namespace v8 {
namespace internal {
@ -83,22 +81,11 @@ void BranchElimination::SimplifyBranchCondition(Node* branch) {
// second_true second_false
//
auto SemanticsOf = [phase = this->phase_](Node* branch) {
DCHECK_EQ(branch->opcode(), IrOpcode::kBranch);
BranchSemantics semantics = BranchParametersOf(branch->op()).semantics();
if (semantics == BranchSemantics::kUnspecified) {
semantics =
(phase == kEARLY ? BranchSemantics::kJS : BranchSemantics::kMachine);
}
return semantics;
};
DCHECK_EQ(IrOpcode::kBranch, branch->opcode());
Node* merge = NodeProperties::GetControlInput(branch);
if (merge->opcode() != IrOpcode::kMerge) return;
Node* condition = branch->InputAt(0);
BranchSemantics semantics = SemanticsOf(branch);
Graph* graph = jsgraph()->graph();
base::SmallVector<Node*, 2> phi_inputs;
@ -110,14 +97,12 @@ void BranchElimination::SimplifyBranchCondition(Node* branch) {
BranchCondition branch_condition = from_input.LookupState(condition);
if (!branch_condition.IsSet()) return;
if (SemanticsOf(branch_condition.branch) != semantics) return;
bool condition_value = branch_condition.is_true;
if (semantics == BranchSemantics::kJS) {
if (phase_ == kEARLY) {
phi_inputs.emplace_back(condition_value ? jsgraph()->TrueConstant()
: jsgraph()->FalseConstant());
} else {
DCHECK_EQ(semantics, BranchSemantics::kMachine);
phi_inputs.emplace_back(
condition_value
? graph->NewNode(jsgraph()->common()->Int32Constant(1))
@ -125,12 +110,11 @@ void BranchElimination::SimplifyBranchCondition(Node* branch) {
}
}
phi_inputs.emplace_back(merge);
Node* new_phi =
graph->NewNode(common()->Phi(semantics == BranchSemantics::kJS
? MachineRepresentation::kTagged
: MachineRepresentation::kWord32,
input_count),
input_count + 1, &phi_inputs.at(0));
Node* new_phi = graph->NewNode(
common()->Phi(phase_ == kEARLY ? MachineRepresentation::kTagged
: MachineRepresentation::kWord32,
input_count),
input_count + 1, &phi_inputs.at(0));
// Replace the branch condition with the new phi.
NodeProperties::ReplaceValueInput(branch, new_phi, 0);

View File

@ -43,8 +43,6 @@ class V8_EXPORT_PRIVATE BranchElimination final
: public NON_EXPORTED_BASE(AdvancedReducerWithControlPathState)<
BranchCondition, kUniqueInstance> {
public:
// TODO(nicohartmann@): Remove {Phase} once all Branch operators have
// specified semantics.
enum Phase {
kEARLY,
kLATE,

View File

@ -29,18 +29,6 @@ std::ostream& operator<<(std::ostream& os, BranchHint hint) {
namespace compiler {
std::ostream& operator<<(std::ostream& os, BranchSemantics semantics) {
switch (semantics) {
case BranchSemantics::kJS:
return os << "JS";
case BranchSemantics::kMachine:
return os << "Machine";
case BranchSemantics::kUnspecified:
return os << "Unspecified";
}
UNREACHABLE();
}
std::ostream& operator<<(std::ostream& os, TrapId trap_id) {
switch (trap_id) {
#define TRAP_CASE(Name) \
@ -60,33 +48,13 @@ TrapId TrapIdOf(const Operator* const op) {
return OpParameter<TrapId>(op);
}
bool operator==(const BranchParameters& lhs, const BranchParameters& rhs) {
return lhs.semantics() == rhs.semantics() && lhs.hint() == rhs.hint();
}
size_t hash_value(const BranchParameters& p) {
return base::hash_combine(p.semantics(), p.hint());
}
std::ostream& operator<<(std::ostream& os, const BranchParameters& p) {
return os << p.semantics() << ", " << p.hint();
}
const BranchParameters& BranchParametersOf(const Operator* const op) {
DCHECK_EQ(op->opcode(), IrOpcode::kBranch);
return OpParameter<BranchParameters>(op);
}
BranchHint BranchHintOf(const Operator* const op) {
switch (op->opcode()) {
case IrOpcode::kIfValue:
return IfValueParametersOf(op).hint();
case IrOpcode::kIfDefault:
return OpParameter<BranchHint>(op);
// TODO(nicohartmann@): Should remove all uses of BranchHintOf for branches
// and replace with BranchParametersOf.
case IrOpcode::kBranch:
return BranchParametersOf(op).hint();
return OpParameter<BranchHint>(op);
default:
UNREACHABLE();
}
@ -466,27 +434,6 @@ const SLVerifierHintParameters& SLVerifierHintParametersOf(const Operator* op) {
return OpParameter<SLVerifierHintParameters>(op);
}
V8_EXPORT_PRIVATE bool operator==(const ExitMachineGraphParameters& lhs,
const ExitMachineGraphParameters& rhs) {
return lhs.output_representation() == rhs.output_representation() &&
lhs.output_type().Equals(rhs.output_type());
}
size_t hash_value(const ExitMachineGraphParameters& p) {
return base::hash_combine(p.output_representation(), p.output_type());
}
V8_EXPORT_PRIVATE std::ostream& operator<<(
std::ostream& os, const ExitMachineGraphParameters& p) {
return os << p.output_representation() << ", " << p.output_type();
}
const ExitMachineGraphParameters& ExitMachineGraphParametersOf(
const Operator* op) {
DCHECK_EQ(op->opcode(), IrOpcode::kExitMachineGraph);
return OpParameter<ExitMachineGraphParameters>(op);
}
#define COMMON_CACHED_OP_LIST(V) \
V(Plug, Operator::kNoProperties, 0, 0, 0, 1, 0, 0) \
V(Dead, Operator::kFoldable, 0, 0, 0, 1, 1, 1) \
@ -506,15 +453,9 @@ const ExitMachineGraphParameters& ExitMachineGraphParametersOf(
#define CACHED_LOOP_EXIT_VALUE_LIST(V) V(kTagged)
#define CACHED_BRANCH_LIST(V) \
V(JS, None) \
V(JS, True) \
V(JS, False) \
V(Machine, None) \
V(Machine, True) \
V(Machine, False) \
V(Unspecified, None) \
V(Unspecified, True) \
V(Unspecified, False)
V(None) \
V(True) \
V(False)
#define CACHED_RETURN_LIST(V) \
V(1) \
@ -685,18 +626,17 @@ struct CommonOperatorGlobalCache final {
CACHED_RETURN_LIST(CACHED_RETURN)
#undef CACHED_RETURN
template <BranchSemantics semantics, BranchHint hint>
struct BranchOperator final : public Operator1<BranchParameters> {
template <BranchHint hint>
struct BranchOperator final : public Operator1<BranchHint> {
BranchOperator()
: Operator1<BranchParameters>( // --
: Operator1<BranchHint>( // --
IrOpcode::kBranch, Operator::kKontrol, // opcode
"Branch", // name
1, 0, 1, 0, 0, 2, // counts
{semantics, hint}) {} // parameter
hint) {} // parameter
};
#define CACHED_BRANCH(Semantics, Hint) \
BranchOperator<BranchSemantics::k##Semantics, BranchHint::k##Hint> \
kBranch##Semantics##Hint##Operator;
#define CACHED_BRANCH(Hint) \
BranchOperator<BranchHint::k##Hint> kBranch##Hint##Operator;
CACHED_BRANCH_LIST(CACHED_BRANCH)
#undef CACHED_BRANCH
@ -984,12 +924,10 @@ const Operator* CommonOperatorBuilder::SLVerifierHint(
0, 0, 1, 0, 0, SLVerifierHintParameters(semantics, override_output_type));
}
const Operator* CommonOperatorBuilder::Branch(BranchHint hint,
BranchSemantics semantics) {
#define CACHED_BRANCH(Semantics, Hint) \
if (semantics == BranchSemantics::k##Semantics && \
hint == BranchHint::k##Hint) { \
return &cache_.kBranch##Semantics##Hint##Operator; \
const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
#define CACHED_BRANCH(Hint) \
if (hint == BranchHint::k##Hint) { \
return &cache_.kBranch##Hint##Operator; \
}
CACHED_BRANCH_LIST(CACHED_BRANCH)
#undef CACHED_BRANCH
@ -1371,19 +1309,6 @@ const Operator* CommonOperatorBuilder::FoldConstant() {
2, 0, 0, 1, 0, 0); // counts
}
const Operator* CommonOperatorBuilder::EnterMachineGraph(UseInfo use_info) {
return zone()->New<Operator1<UseInfo>>(IrOpcode::kEnterMachineGraph,
Operator::kPure, "EnterMachineGraph",
1, 0, 0, 1, 0, 0, use_info);
}
const Operator* CommonOperatorBuilder::ExitMachineGraph(
MachineRepresentation output_representation, Type output_type) {
return zone()->New<Operator1<ExitMachineGraphParameters>>(
IrOpcode::kExitMachineGraph, Operator::kPure, "ExitMachineGraph", 1, 0, 0,
1, 0, 0, ExitMachineGraphParameters{output_representation, output_type});
}
const Operator* CommonOperatorBuilder::EffectPhi(int effect_input_count) {
DCHECK_LT(0, effect_input_count); // Disallow empty effect phis.
switch (effect_input_count) {

View File

@ -13,7 +13,6 @@
#include "src/compiler/frame-states.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/use-info.h"
#include "src/deoptimizer/deoptimize-reason.h"
#include "src/zone/zone-containers.h"
@ -36,11 +35,7 @@ class Node;
// (machine branch semantics). Some passes are applied both before and after
// SimplifiedLowering, and use the BranchSemantics enum to know how branches
// should be treated.
// TODO(nicohartmann@): Need to remove BranchSemantics::kUnspecified once all
// branch uses have been updated.
enum class BranchSemantics { kJS, kMachine, kUnspecified };
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, BranchSemantics);
enum class BranchSemantics { kJS, kMachine };
inline BranchHint NegateBranchHint(BranchHint hint) {
switch (hint) {
@ -67,32 +62,6 @@ std::ostream& operator<<(std::ostream&, TrapId trap_id);
TrapId TrapIdOf(const Operator* const op);
class BranchParameters final {
public:
BranchParameters(BranchSemantics semantics, BranchHint hint)
: semantics_(semantics), hint_(hint) {}
BranchSemantics semantics() const { return semantics_; }
BranchHint hint() const { return hint_; }
private:
const BranchSemantics semantics_;
const BranchHint hint_;
};
bool operator==(const BranchParameters& lhs, const BranchParameters& rhs);
inline bool operator!=(const BranchParameters& lhs,
const BranchParameters& rhs) {
return !(lhs == rhs);
}
size_t hash_value(const BranchParameters& p);
std::ostream& operator<<(std::ostream&, const BranchParameters& p);
V8_EXPORT_PRIVATE const BranchParameters& BranchParametersOf(
const Operator* const) V8_WARN_UNUSED_RESULT;
V8_EXPORT_PRIVATE BranchHint BranchHintOf(const Operator* const)
V8_WARN_UNUSED_RESULT;
@ -470,35 +439,6 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& out,
V8_EXPORT_PRIVATE const SLVerifierHintParameters& SLVerifierHintParametersOf(
const Operator* op) V8_WARN_UNUSED_RESULT;
class ExitMachineGraphParameters final {
public:
ExitMachineGraphParameters(MachineRepresentation output_representation,
Type output_type)
: output_representation_(output_representation),
output_type_(output_type) {}
MachineRepresentation output_representation() const {
return output_representation_;
}
const Type& output_type() const { return output_type_; }
private:
const MachineRepresentation output_representation_;
const Type output_type_;
};
V8_EXPORT_PRIVATE bool operator==(const ExitMachineGraphParameters& lhs,
const ExitMachineGraphParameters& rhs);
size_t hash_value(const ExitMachineGraphParameters& p);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
const ExitMachineGraphParameters& p);
V8_EXPORT_PRIVATE const ExitMachineGraphParameters&
ExitMachineGraphParametersOf(const Operator* op) V8_WARN_UNUSED_RESULT;
// Interface for building common operators that can be used at any level of IR,
// including JavaScript, mid-level, and low-level.
class V8_EXPORT_PRIVATE CommonOperatorBuilder final
@ -524,11 +464,7 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* semantics,
const base::Optional<Type>& override_output_type);
const Operator* End(size_t control_input_count);
// TODO(nicohartmann@): Remove the default argument for {semantics} once all
// uses are updated.
const Operator* Branch(
BranchHint = BranchHint::kNone,
BranchSemantics semantics = BranchSemantics::kUnspecified);
const Operator* Branch(BranchHint = BranchHint::kNone);
const Operator* IfTrue();
const Operator* IfFalse();
const Operator* IfSuccess();
@ -601,9 +537,6 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* Retain();
const Operator* TypeGuard(Type type);
const Operator* FoldConstant();
const Operator* EnterMachineGraph(UseInfo use_info);
const Operator* ExitMachineGraph(MachineRepresentation output_representation,
Type output_type);
// Constructs a new merge or phi operator with the same opcode as {op}, but
// with {size} inputs.

View File

@ -23,12 +23,10 @@ struct Diamond {
Node* merge;
Diamond(Graph* g, CommonOperatorBuilder* b, Node* cond,
BranchHint hint = BranchHint::kNone,
BranchSemantics semantics = BranchSemantics::kUnspecified) {
BranchHint hint = BranchHint::kNone) {
graph = g;
common = b;
branch =
graph->NewNode(common->Branch(hint, semantics), cond, graph->start());
branch = graph->NewNode(common->Branch(hint), cond, graph->start());
if_true = graph->NewNode(common->IfTrue(), branch);
if_false = graph->NewNode(common->IfFalse(), branch);
merge = graph->NewNode(common->Merge(2), if_true, if_false);

View File

@ -5111,14 +5111,14 @@ Node* EffectControlLinearizer::AdaptFastCallTypedArrayArgument(
Node* buffer_is_not_detached = __ Word32Equal(
__ Word32And(buffer_bit_field,
__ Int32Constant(JSArrayBuffer::WasDetachedBit::kMask)),
__ Int32Constant(0));
__ ZeroConstant());
__ GotoIfNot(buffer_is_not_detached, bailout);
// Go to the slow path if the {buffer} is shared.
Node* buffer_is_not_shared = __ Word32Equal(
__ Word32And(buffer_bit_field,
__ Int32Constant(JSArrayBuffer::IsSharedBit::kMask)),
__ Int32Constant(0));
__ ZeroConstant());
__ GotoIfNot(buffer_is_not_shared, bailout);
// Unpack the store and length, and store them to a struct
@ -6939,8 +6939,7 @@ void LinearizeEffectControl(JSGraph* graph, Schedule* schedule, Zone* temp_zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
JSHeapBroker* broker) {
JSGraphAssembler graph_assembler_(graph, temp_zone,
BranchSemantics::kMachine);
JSGraphAssembler graph_assembler_(graph, temp_zone);
EffectControlLinearizer linearizer(graph, schedule, &graph_assembler_,
temp_zone, source_positions, node_origins,
MaintainSchedule::kDiscard, broker);

View File

@ -4,21 +4,12 @@
#include "src/compiler/graph-assembler.h"
#include "src/base/container-utils.h"
#include "src/codegen/callable.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/tnode.h"
#include "src/common/globals.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/linkage.h"
#include "src/compiler/type-cache.h"
// For TNode types.
#include "src/objects/elements-kind.h"
#include "src/objects/heap-number.h"
#include "src/objects/instance-type.h"
#include "src/objects/js-array-buffer.h"
#include "src/objects/oddball.h"
#include "src/objects/string.h"
@ -42,21 +33,18 @@ class V8_NODISCARD GraphAssembler::BlockInlineReduction {
};
GraphAssembler::GraphAssembler(
MachineGraph* mcgraph, Zone* zone, BranchSemantics default_branch_semantics,
MachineGraph* mcgraph, Zone* zone,
base::Optional<NodeChangedCallback> node_changed_callback,
bool mark_loop_exits)
: temp_zone_(zone),
mcgraph_(mcgraph),
default_branch_semantics_(default_branch_semantics),
effect_(nullptr),
control_(nullptr),
node_changed_callback_(node_changed_callback),
inline_reducers_(zone),
inline_reductions_blocked_(false),
loop_headers_(zone),
mark_loop_exits_(mark_loop_exits) {
DCHECK_NE(default_branch_semantics_, BranchSemantics::kUnspecified);
}
mark_loop_exits_(mark_loop_exits) {}
GraphAssembler::~GraphAssembler() { DCHECK_EQ(loop_nesting_level_, 0); }
@ -64,16 +52,16 @@ Node* GraphAssembler::IntPtrConstant(intptr_t value) {
return AddClonedNode(mcgraph()->IntPtrConstant(value));
}
TNode<UintPtrT> GraphAssembler::UintPtrConstant(uintptr_t value) {
return TNode<UintPtrT>::UncheckedCast(mcgraph()->UintPtrConstant(value));
Node* GraphAssembler::UintPtrConstant(uintptr_t value) {
return AddClonedNode(mcgraph()->UintPtrConstant(value));
}
Node* GraphAssembler::Int32Constant(int32_t value) {
return AddClonedNode(mcgraph()->Int32Constant(value));
}
TNode<Uint32T> GraphAssembler::Uint32Constant(uint32_t value) {
return TNode<Uint32T>::UncheckedCast(mcgraph()->Uint32Constant(value));
Node* GraphAssembler::Uint32Constant(uint32_t value) {
return AddClonedNode(mcgraph()->Uint32Constant(value));
}
Node* GraphAssembler::Int64Constant(int64_t value) {
@ -162,43 +150,8 @@ PURE_ASSEMBLER_MACH_UNOP_LIST(PURE_UNOP_DEF)
Node* GraphAssembler::Name(Node* left, Node* right) { \
return AddNode(graph()->NewNode(machine()->Name(), left, right)); \
}
#define PURE_BINOP_DEF_TNODE(Name, Result, Left, Right) \
TNode<Result> GraphAssembler::Name(SloppyTNode<Left> left, \
SloppyTNode<Right> right) { \
return AddNode<Result>(graph()->NewNode(machine()->Name(), left, right)); \
}
PURE_ASSEMBLER_MACH_BINOP_LIST(PURE_BINOP_DEF, PURE_BINOP_DEF_TNODE)
PURE_ASSEMBLER_MACH_BINOP_LIST(PURE_BINOP_DEF)
#undef PURE_BINOP_DEF
#undef PURE_BINOP_DEF_TNODE
TNode<BoolT> GraphAssembler::UintPtrLessThanOrEqual(TNode<UintPtrT> left,
TNode<UintPtrT> right) {
return kSystemPointerSize == 8
? Uint64LessThanOrEqual(TNode<Uint64T>::UncheckedCast(left),
TNode<Uint64T>::UncheckedCast(right))
: Uint32LessThanOrEqual(TNode<Uint32T>::UncheckedCast(left),
TNode<Uint32T>::UncheckedCast(right));
}
TNode<UintPtrT> GraphAssembler::UintPtrAdd(TNode<UintPtrT> left,
TNode<UintPtrT> right) {
return kSystemPointerSize == 8
? TNode<UintPtrT>::UncheckedCast(Int64Add(left, right))
: TNode<UintPtrT>::UncheckedCast(Int32Add(left, right));
}
TNode<UintPtrT> GraphAssembler::UintPtrSub(TNode<UintPtrT> left,
TNode<UintPtrT> right) {
return kSystemPointerSize == 8
? TNode<UintPtrT>::UncheckedCast(Int64Sub(left, right))
: TNode<UintPtrT>::UncheckedCast(Int32Sub(left, right));
}
TNode<UintPtrT> GraphAssembler::UintPtrDiv(TNode<UintPtrT> left,
TNode<UintPtrT> right) {
return kSystemPointerSize == 8
? TNode<UintPtrT>::UncheckedCast(Uint64Div(left, right))
: TNode<UintPtrT>::UncheckedCast(Uint32Div(left, right));
}
#define CHECKED_BINOP_DEF(Name) \
Node* GraphAssembler::Name(Node* left, Node* right) { \
@ -273,15 +226,6 @@ Node* JSGraphAssembler::LoadField(FieldAccess const& access, Node* object) {
return value;
}
TNode<Uint32T> JSGraphAssembler::LoadElementsKind(TNode<Map> map) {
TNode<Uint8T> bit_field2 = EnterMachineGraph<Uint8T>(
LoadField<Uint8T>(AccessBuilder::ForMapBitField2(), map),
UseInfo::TruncatingWord32());
return TNode<Uint32T>::UncheckedCast(
Word32Shr(TNode<Word32T>::UncheckedCast(bit_field2),
Uint32Constant(Map::Bits2::ElementsKindBits::kShift)));
}
Node* JSGraphAssembler::LoadElement(ElementAccess const& access, Node* object,
Node* index) {
Node* value = AddNode(graph()->NewNode(simplified()->LoadElement(access),
@ -467,363 +411,6 @@ Node* JSGraphAssembler::StringCharCodeAt(TNode<String> string,
position, effect(), control()));
}
class ArrayBufferViewAccessBuilder {
public:
explicit ArrayBufferViewAccessBuilder(JSGraphAssembler* assembler,
std::set<ElementsKind> candidates)
: assembler_(assembler), candidates_(std::move(candidates)) {
DCHECK_NOT_NULL(assembler_);
}
bool maybe_rab_gsab() const {
if (candidates_.empty()) return true;
return !base::all_of(candidates_, [](auto e) {
return !IsRabGsabTypedArrayElementsKind(e);
});
}
base::Optional<int> TryComputeStaticElementShift() {
if (candidates_.empty()) return base::nullopt;
int shift = ElementsKindToShiftSize(*candidates_.begin());
if (!base::all_of(candidates_, [shift](auto e) {
return ElementsKindToShiftSize(e) == shift;
})) {
return base::nullopt;
}
return shift;
}
base::Optional<int> TryComputeStaticElementSize() {
if (candidates_.empty()) return base::nullopt;
int size = ElementsKindToByteSize(*candidates_.begin());
if (!base::all_of(candidates_, [size](auto e) {
return ElementsKindToByteSize(e) == size;
})) {
return base::nullopt;
}
return size;
}
TNode<UintPtrT> BuildLength(TNode<JSArrayBufferView> view,
TNode<Context> context) {
auto& a = *assembler_;
// Case 1: Normal (backed by AB/SAB) or non-length tracking backed by GSAB
// (can't go oob once constructed)
auto GsabFixedOrNormal = [&]() {
return MachineLoadField<UintPtrT>(AccessBuilder::ForJSTypedArrayLength(),
view, UseInfo::Word());
};
// If we statically know we cannot have rab/gsab backed, we can simply
// load from the view.
if (!maybe_rab_gsab()) {
return GsabFixedOrNormal();
}
// Otherwise, we need to generate the checks for the view's bitfield.
TNode<Word32T> bitfield = a.EnterMachineGraph<Word32T>(
a.LoadField<Word32T>(AccessBuilder::ForJSArrayBufferViewBitField(),
view),
UseInfo::TruncatingWord32());
TNode<Word32T> length_tracking_bit = a.Word32And(
bitfield, a.Uint32Constant(JSArrayBufferView::kIsLengthTracking));
TNode<Word32T> backed_by_rab_bit = a.Word32And(
bitfield, a.Uint32Constant(JSArrayBufferView::kIsBackedByRab));
// Load the underlying buffer.
TNode<HeapObject> buffer = a.LoadField<HeapObject>(
AccessBuilder::ForJSArrayBufferViewBuffer(), view);
// Compute the element size.
TNode<Uint32T> element_size;
if (auto size_opt = TryComputeStaticElementSize()) {
element_size = a.Uint32Constant(*size_opt);
} else {
TNode<Map> typed_array_map = a.LoadField<Map>(
AccessBuilder::ForMap(WriteBarrierKind::kNoWriteBarrier), view);
TNode<Uint32T> elements_kind = a.LoadElementsKind(typed_array_map);
element_size = a.LookupByteSizeForElementsKind(elements_kind);
}
// 2) Fixed length backed by RAB (can go oob once constructed)
auto RabFixed = [&]() {
TNode<UintPtrT> unchecked_byte_length = MachineLoadField<UintPtrT>(
AccessBuilder::ForJSArrayBufferViewByteLength(), view,
UseInfo::Word());
TNode<UintPtrT> underlying_byte_length = MachineLoadField<UintPtrT>(
AccessBuilder::ForJSArrayBufferByteLength(), buffer, UseInfo::Word());
TNode<UintPtrT> byte_offset = MachineLoadField<UintPtrT>(
AccessBuilder::ForJSArrayBufferViewByteOffset(), view,
UseInfo::Word());
TNode<UintPtrT> byte_length =
a
.MachineSelectIf<UintPtrT>(a.UintPtrLessThanOrEqual(
a.UintPtrAdd(byte_offset, unchecked_byte_length),
underlying_byte_length))
.Then([&]() { return unchecked_byte_length; })
.Else([&]() { return a.UintPtrConstant(0); })
.Value();
return a.UintPtrDiv(byte_length,
TNode<UintPtrT>::UncheckedCast(element_size));
};
// 3) Length-tracking backed by RAB (JSArrayBuffer stores the length)
auto RabTracking = [&]() {
TNode<UintPtrT> byte_length = MachineLoadField<UintPtrT>(
AccessBuilder::ForJSArrayBufferByteLength(), buffer, UseInfo::Word());
TNode<UintPtrT> byte_offset = MachineLoadField<UintPtrT>(
AccessBuilder::ForJSArrayBufferViewByteOffset(), view,
UseInfo::Word());
return a
.MachineSelectIf<UintPtrT>(
a.UintPtrLessThanOrEqual(byte_offset, byte_length))
.Then([&]() {
// length = floor((byte_length - byte_offset) / element_size)
return a.UintPtrDiv(a.UintPtrSub(byte_length, byte_offset),
TNode<UintPtrT>::UncheckedCast(element_size));
})
.Else([&]() { return a.UintPtrConstant(0); })
.ExpectTrue()
.Value();
};
// 4) Length-tracking backed by GSAB (BackingStore stores the length)
auto GsabTracking = [&]() {
TNode<Number> temp = TNode<Number>::UncheckedCast(a.TypeGuard(
TypeCache::Get()->kJSArrayBufferViewByteLengthType,
a.JSCallRuntime1(Runtime::kGrowableSharedArrayBufferByteLength,
buffer, context, base::nullopt,
Operator::kNoWrite)));
TNode<UintPtrT> byte_length =
a.EnterMachineGraph<UintPtrT>(temp, UseInfo::Word());
TNode<UintPtrT> byte_offset = MachineLoadField<UintPtrT>(
AccessBuilder::ForJSArrayBufferViewByteOffset(), view,
UseInfo::Word());
return a.UintPtrDiv(a.UintPtrSub(byte_length, byte_offset),
TNode<UintPtrT>::UncheckedCast(element_size));
};
return a.MachineSelectIf<UintPtrT>(length_tracking_bit)
.Then([&]() {
return a.MachineSelectIf<UintPtrT>(backed_by_rab_bit)
.Then(RabTracking)
.Else(GsabTracking)
.Value();
})
.Else([&]() {
return a.MachineSelectIf<UintPtrT>(backed_by_rab_bit)
.Then(RabFixed)
.Else(GsabFixedOrNormal)
.Value();
})
.Value();
}
TNode<UintPtrT> BuildByteLength(TNode<JSArrayBufferView> view,
TNode<Context> context) {
auto& a = *assembler_;
// Case 1: Normal (backed by AB/SAB) or non-length tracking backed by GSAB
// (can't go oob once constructed)
auto GsabFixedOrNormal = [&]() {
return MachineLoadField<UintPtrT>(
AccessBuilder::ForJSArrayBufferViewByteLength(), view,
UseInfo::Word());
};
// If we statically know we cannot have rab/gsab backed, we can simply
// use load from the view.
if (!maybe_rab_gsab()) {
return GsabFixedOrNormal();
}
// Otherwise, we need to generate the checks for the view's bitfield.
TNode<Word32T> bitfield = a.EnterMachineGraph<Word32T>(
a.LoadField<Word32T>(AccessBuilder::ForJSArrayBufferViewBitField(),
view),
UseInfo::TruncatingWord32());
TNode<Word32T> length_tracking_bit = a.Word32And(
bitfield, a.Uint32Constant(JSArrayBufferView::kIsLengthTracking));
TNode<Word32T> backed_by_rab_bit = a.Word32And(
bitfield, a.Uint32Constant(JSArrayBufferView::kIsBackedByRab));
// Load the underlying buffer.
TNode<HeapObject> buffer = a.LoadField<HeapObject>(
AccessBuilder::ForJSArrayBufferViewBuffer(), view);
// Case 2: Fixed length backed by RAB (can go oob once constructed)
auto RabFixed = [&]() {
TNode<UintPtrT> unchecked_byte_length = MachineLoadField<UintPtrT>(
AccessBuilder::ForJSArrayBufferViewByteLength(), view,
UseInfo::Word());
TNode<UintPtrT> underlying_byte_length = MachineLoadField<UintPtrT>(
AccessBuilder::ForJSArrayBufferByteLength(), buffer, UseInfo::Word());
TNode<UintPtrT> byte_offset = MachineLoadField<UintPtrT>(
AccessBuilder::ForJSArrayBufferViewByteOffset(), view,
UseInfo::Word());
return a
.MachineSelectIf<UintPtrT>(a.UintPtrLessThanOrEqual(
a.UintPtrAdd(byte_offset, unchecked_byte_length),
underlying_byte_length))
.Then([&]() { return unchecked_byte_length; })
.Else([&]() { return a.UintPtrConstant(0); })
.Value();
};
auto RoundDownToElementSize = [&](TNode<UintPtrT> byte_size) {
if (auto shift_opt = TryComputeStaticElementShift()) {
constexpr uintptr_t all_bits = static_cast<uintptr_t>(-1);
if (*shift_opt == 0) return byte_size;
return TNode<UintPtrT>::UncheckedCast(
a.WordAnd(byte_size, a.UintPtrConstant(all_bits << (*shift_opt))));
}
TNode<Map> typed_array_map = a.LoadField<Map>(
AccessBuilder::ForMap(WriteBarrierKind::kNoWriteBarrier), view);
TNode<Uint32T> elements_kind = a.LoadElementsKind(typed_array_map);
TNode<Uint32T> element_shift =
a.LookupByteShiftForElementsKind(elements_kind);
return TNode<UintPtrT>::UncheckedCast(
a.WordShl(a.WordShr(byte_size, element_shift), element_shift));
};
// Case 3: Length-tracking backed by RAB (JSArrayBuffer stores the length)
auto RabTracking = [&]() {
TNode<UintPtrT> byte_length = MachineLoadField<UintPtrT>(
AccessBuilder::ForJSArrayBufferByteLength(), buffer, UseInfo::Word());
TNode<UintPtrT> byte_offset = MachineLoadField<UintPtrT>(
AccessBuilder::ForJSArrayBufferViewByteOffset(), view,
UseInfo::Word());
return a
.MachineSelectIf<UintPtrT>(
a.UintPtrLessThanOrEqual(byte_offset, byte_length))
.Then([&]() {
return RoundDownToElementSize(
a.UintPtrSub(byte_length, byte_offset));
})
.Else([&]() { return a.UintPtrConstant(0); })
.ExpectTrue()
.Value();
};
// Case 4: Length-tracking backed by GSAB (BackingStore stores the length)
auto GsabTracking = [&]() {
TNode<Number> temp = TNode<Number>::UncheckedCast(a.TypeGuard(
TypeCache::Get()->kJSArrayBufferViewByteLengthType,
a.JSCallRuntime1(Runtime::kGrowableSharedArrayBufferByteLength,
buffer, context, base::nullopt,
Operator::kNoWrite)));
TNode<UintPtrT> byte_length =
a.EnterMachineGraph<UintPtrT>(temp, UseInfo::Word());
TNode<UintPtrT> byte_offset = MachineLoadField<UintPtrT>(
AccessBuilder::ForJSArrayBufferViewByteOffset(), view,
UseInfo::Word());
return RoundDownToElementSize(a.UintPtrSub(byte_length, byte_offset));
};
return a.MachineSelectIf<UintPtrT>(length_tracking_bit)
.Then([&]() {
return a.MachineSelectIf<UintPtrT>(backed_by_rab_bit)
.Then(RabTracking)
.Else(GsabTracking)
.Value();
})
.Else([&]() {
return a.MachineSelectIf<UintPtrT>(backed_by_rab_bit)
.Then(RabFixed)
.Else(GsabFixedOrNormal)
.Value();
})
.Value();
}
private:
template <typename T>
TNode<T> MachineLoadField(FieldAccess const& access, TNode<HeapObject> object,
const UseInfo& use_info) {
return assembler_->EnterMachineGraph<T>(
assembler_->LoadField<T>(access, object), use_info);
}
JSGraphAssembler* assembler_;
std::set<ElementsKind> candidates_;
};
TNode<Number> JSGraphAssembler::ArrayBufferViewByteLength(
TNode<JSArrayBufferView> array_buffer_view,
std::set<ElementsKind> elements_kinds_candidates, TNode<Context> context) {
ArrayBufferViewAccessBuilder builder(this,
std::move(elements_kinds_candidates));
return ExitMachineGraph<Number>(
builder.BuildByteLength(array_buffer_view, context),
MachineType::PointerRepresentation(),
TypeCache::Get()->kJSArrayBufferByteLengthType);
}
TNode<Number> JSGraphAssembler::TypedArrayLength(
TNode<JSTypedArray> typed_array,
std::set<ElementsKind> elements_kinds_candidates, TNode<Context> context) {
ArrayBufferViewAccessBuilder builder(this, elements_kinds_candidates);
return ExitMachineGraph<Number>(builder.BuildLength(typed_array, context),
MachineType::PointerRepresentation(),
TypeCache::Get()->kJSTypedArrayLengthType);
}
TNode<Uint32T> JSGraphAssembler::LookupByteShiftForElementsKind(
TNode<Uint32T> elements_kind) {
TNode<Uint32T> index = TNode<Uint32T>::UncheckedCast(Int32Sub(
elements_kind, Uint32Constant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND)));
TNode<RawPtrT> shift_table = TNode<RawPtrT>::UncheckedCast(ExternalConstant(
ExternalReference::
typed_array_and_rab_gsab_typed_array_elements_kind_shifts()));
return TNode<Uint8T>::UncheckedCast(
Load(MachineType::Uint8(), shift_table, index));
}
TNode<Uint32T> JSGraphAssembler::LookupByteSizeForElementsKind(
TNode<Uint32T> elements_kind) {
TNode<Uint32T> index = TNode<Uint32T>::UncheckedCast(Int32Sub(
elements_kind, Uint32Constant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND)));
TNode<RawPtrT> size_table = TNode<RawPtrT>::UncheckedCast(ExternalConstant(
ExternalReference::
typed_array_and_rab_gsab_typed_array_elements_kind_sizes()));
return TNode<Uint8T>::UncheckedCast(
Load(MachineType::Uint8(), size_table, index));
}
TNode<Object> JSGraphAssembler::JSCallRuntime1(
Runtime::FunctionId function_id, TNode<Object> arg0, TNode<Context> context,
base::Optional<FrameState> frame_state, Operator::Properties properties) {
return MayThrow([&]() {
if (frame_state.has_value()) {
return AddNode<Object>(graph()->NewNode(
javascript()->CallRuntime(function_id, 1, properties), arg0, context,
static_cast<Node*>(*frame_state), effect(), control()));
} else {
return AddNode<Object>(graph()->NewNode(
javascript()->CallRuntime(function_id, 1, properties), arg0, context,
effect(), control()));
}
});
}
TNode<Object> JSGraphAssembler::JSCallRuntime2(Runtime::FunctionId function_id,
TNode<Object> arg0,
TNode<Object> arg1,
TNode<Context> context,
FrameState frame_state) {
return MayThrow([&]() {
return AddNode<Object>(
graph()->NewNode(javascript()->CallRuntime(function_id, 2), arg0, arg1,
context, frame_state, effect(), control()));
});
}
Node* GraphAssembler::TypeGuard(Type type, Node* value) {
return AddNode(
graph()->NewNode(common()->TypeGuard(type), value, effect(), control()));
@ -1003,7 +590,7 @@ void GraphAssembler::BranchWithCriticalSafetyCheck(
hint = if_false->IsDeferred() ? BranchHint::kTrue : BranchHint::kFalse;
}
BranchImpl(default_branch_semantics_, condition, if_true, if_false, hint);
BranchImpl(condition, if_true, if_false, hint);
}
void GraphAssembler::ConnectUnreachableToEnd() {

View File

@ -5,17 +5,14 @@
#ifndef V8_COMPILER_GRAPH_ASSEMBLER_H_
#define V8_COMPILER_GRAPH_ASSEMBLER_H_
#include <optional>
#include <type_traits>
#include "src/base/small-vector.h"
#include "src/codegen/tnode.h"
#include "src/common/globals.h"
#include "src/compiler/feedback-source.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node.h"
#include "src/compiler/simplified-operator.h"
#include "src/objects/oddball.h"
namespace v8 {
namespace internal {
@ -65,57 +62,56 @@ class Reducer;
V(Word32ReverseBytes) \
V(Word64ReverseBytes)
#define PURE_ASSEMBLER_MACH_BINOP_LIST(V, T) \
V(Float64Add) \
V(Float64Div) \
V(Float64Equal) \
V(Float64InsertHighWord32) \
V(Float64InsertLowWord32) \
V(Float64LessThan) \
V(Float64LessThanOrEqual) \
V(Float64Max) \
V(Float64Min) \
V(Float64Mod) \
V(Float64Sub) \
V(Int32Add) \
V(Int32LessThan) \
V(Int32LessThanOrEqual) \
V(Int32Mul) \
V(Int32Sub) \
V(Int64Add) \
V(Int64Sub) \
V(IntAdd) \
V(IntLessThan) \
V(IntMul) \
V(IntSub) \
V(Uint32LessThan) \
T(Uint32LessThanOrEqual, BoolT, Uint32T, Uint32T) \
V(Uint64LessThan) \
T(Uint64LessThanOrEqual, BoolT, Uint64T, Uint64T) \
V(UintLessThan) \
T(Word32And, Word32T, Word32T, Word32T) \
T(Word32Equal, BoolT, Word32T, Word32T) \
V(Word32Or) \
V(Word32Sar) \
V(Word32SarShiftOutZeros) \
V(Word32Shl) \
T(Word32Shr, Word32T, Word32T, Word32T) \
V(Word32Xor) \
V(Word64And) \
V(Word64Equal) \
V(Word64Or) \
V(Word64Sar) \
V(Word64SarShiftOutZeros) \
V(Word64Shl) \
V(Word64Shr) \
V(Word64Xor) \
V(WordAnd) \
V(WordEqual) \
V(WordOr) \
V(WordSar) \
V(WordSarShiftOutZeros) \
V(WordShl) \
V(WordShr) \
#define PURE_ASSEMBLER_MACH_BINOP_LIST(V) \
V(Float64Add) \
V(Float64Div) \
V(Float64Equal) \
V(Float64InsertHighWord32) \
V(Float64InsertLowWord32) \
V(Float64LessThan) \
V(Float64LessThanOrEqual) \
V(Float64Max) \
V(Float64Min) \
V(Float64Mod) \
V(Float64Sub) \
V(Int32Add) \
V(Int32LessThan) \
V(Int32LessThanOrEqual) \
V(Int32Mul) \
V(Int32Sub) \
V(Int64Sub) \
V(IntAdd) \
V(IntLessThan) \
V(IntMul) \
V(IntSub) \
V(Uint32LessThan) \
V(Uint32LessThanOrEqual) \
V(Uint64LessThan) \
V(Uint64LessThanOrEqual) \
V(UintLessThan) \
V(Word32And) \
V(Word32Equal) \
V(Word32Or) \
V(Word32Sar) \
V(Word32SarShiftOutZeros) \
V(Word32Shl) \
V(Word32Shr) \
V(Word32Xor) \
V(Word64And) \
V(Word64Equal) \
V(Word64Or) \
V(Word64Sar) \
V(Word64SarShiftOutZeros) \
V(Word64Shl) \
V(Word64Shr) \
V(Word64Xor) \
V(WordAnd) \
V(WordEqual) \
V(WordOr) \
V(WordSar) \
V(WordSarShiftOutZeros) \
V(WordShl) \
V(WordShr) \
V(WordXor)
#define CHECKED_ASSEMBLER_MACH_BINOP_LIST(V) \
@ -284,11 +280,9 @@ class V8_EXPORT_PRIVATE GraphAssembler {
// will maintain the schedule as it updates blocks.
GraphAssembler(
MachineGraph* jsgraph, Zone* zone,
BranchSemantics default_branch_semantics,
base::Optional<NodeChangedCallback> node_changed_callback = base::nullopt,
bool mark_loop_exits = false);
virtual ~GraphAssembler();
virtual SimplifiedOperatorBuilder* simplified() { UNREACHABLE(); }
void Reset();
void InitializeEffectControl(Node* effect, Node* control);
@ -328,9 +322,9 @@ class V8_EXPORT_PRIVATE GraphAssembler {
// Value creation.
Node* IntPtrConstant(intptr_t value);
TNode<UintPtrT> UintPtrConstant(uintptr_t value);
Node* UintPtrConstant(uintptr_t value);
Node* Int32Constant(int32_t value);
TNode<Uint32T> Uint32Constant(uint32_t value);
Node* Uint32Constant(uint32_t value);
Node* Int64Constant(int64_t value);
Node* Uint64Constant(uint64_t value);
Node* UniqueIntPtrConstant(intptr_t value);
@ -349,17 +343,9 @@ class V8_EXPORT_PRIVATE GraphAssembler {
#undef PURE_UNOP_DECL
#define BINOP_DECL(Name) Node* Name(Node* left, Node* right);
#define BINOP_DECL_TNODE(Name, Result, Left, Right) \
TNode<Result> Name(SloppyTNode<Left> left, SloppyTNode<Right> right);
PURE_ASSEMBLER_MACH_BINOP_LIST(BINOP_DECL, BINOP_DECL_TNODE)
PURE_ASSEMBLER_MACH_BINOP_LIST(BINOP_DECL)
CHECKED_ASSEMBLER_MACH_BINOP_LIST(BINOP_DECL)
#undef BINOP_DECL
#undef BINOP_DECL_TNODE
TNode<BoolT> UintPtrLessThanOrEqual(TNode<UintPtrT> left,
TNode<UintPtrT> right);
TNode<UintPtrT> UintPtrAdd(TNode<UintPtrT> left, TNode<UintPtrT> right);
TNode<UintPtrT> UintPtrSub(TNode<UintPtrT> left, TNode<UintPtrT> right);
TNode<UintPtrT> UintPtrDiv(TNode<UintPtrT> left, TNode<UintPtrT> right);
#ifdef V8_MAP_PACKING
Node* PackMapWord(TNode<Map> map);
@ -399,10 +385,6 @@ class V8_EXPORT_PRIVATE GraphAssembler {
Node* BitcastMaybeObjectToWord(Node* value);
Node* TypeGuard(Type type, Node* value);
template <typename T>
TNode<T> TypeGuard(Type type, TNode<T> value) {
return TNode<T>::UncheckedCast(TypeGuard(type, static_cast<Node*>(value)));
}
Node* Checkpoint(FrameState frame_state);
TNode<RawPtrT> StackSlot(int size, int alignment);
@ -461,16 +443,6 @@ class V8_EXPORT_PRIVATE GraphAssembler {
detail::GraphAssemblerLabelForVars<Vars...>* if_true,
detail::GraphAssemblerLabelForVars<Vars...>* if_false,
BranchHint hint, Vars...);
template <typename... Vars>
void MachineBranch(TNode<Word32T> condition,
GraphAssemblerLabel<sizeof...(Vars)>* if_true,
GraphAssemblerLabel<sizeof...(Vars)>* if_false,
BranchHint hint, Vars...);
template <typename... Vars>
void JSBranch(TNode<Boolean> condition,
GraphAssemblerLabel<sizeof...(Vars)>* if_true,
GraphAssemblerLabel<sizeof...(Vars)>* if_false, BranchHint hint,
Vars...);
// Control helpers.
@ -543,8 +515,6 @@ class V8_EXPORT_PRIVATE GraphAssembler {
Effect effect() const { return Effect(effect_); }
protected:
constexpr bool Is64() const { return kSystemPointerSize == 8; }
template <typename... Vars>
void MergeState(detail::GraphAssemblerLabelForVars<Vars...>* label,
Vars... vars);
@ -634,14 +604,13 @@ class V8_EXPORT_PRIVATE GraphAssembler {
class BlockInlineReduction;
template <typename... Vars>
void BranchImpl(BranchSemantics semantics, Node* condition,
GraphAssemblerLabel<sizeof...(Vars)>* if_true,
GraphAssemblerLabel<sizeof...(Vars)>* if_false,
void BranchImpl(Node* condition,
detail::GraphAssemblerLabelForVars<Vars...>* if_true,
detail::GraphAssemblerLabelForVars<Vars...>* if_false,
BranchHint hint, Vars...);
Zone* temp_zone_;
MachineGraph* mcgraph_;
BranchSemantics default_branch_semantics_;
Node* effect_;
Node* control_;
// {node_changed_callback_} should be called when a node outside the
@ -819,8 +788,7 @@ void GraphAssembler::Branch(
hint = if_false->IsDeferred() ? BranchHint::kTrue : BranchHint::kFalse;
}
BranchImpl(default_branch_semantics_, condition, if_true, if_false, hint,
vars...);
BranchImpl(condition, if_true, if_false, hint, vars...);
}
template <typename... Vars>
@ -828,36 +796,17 @@ void GraphAssembler::BranchWithHint(
Node* condition, detail::GraphAssemblerLabelForVars<Vars...>* if_true,
detail::GraphAssemblerLabelForVars<Vars...>* if_false, BranchHint hint,
Vars... vars) {
BranchImpl(default_branch_semantics_, condition, if_true, if_false, hint,
vars...);
BranchImpl(condition, if_true, if_false, hint, vars...);
}
template <typename... Vars>
void GraphAssembler::MachineBranch(
TNode<Word32T> condition, GraphAssemblerLabel<sizeof...(Vars)>* if_true,
GraphAssemblerLabel<sizeof...(Vars)>* if_false, BranchHint hint,
void GraphAssembler::BranchImpl(
Node* condition, detail::GraphAssemblerLabelForVars<Vars...>* if_true,
detail::GraphAssemblerLabelForVars<Vars...>* if_false, BranchHint hint,
Vars... vars) {
BranchImpl(BranchSemantics::kMachine, condition, if_true, if_false, hint,
vars...);
}
template <typename... Vars>
void GraphAssembler::JSBranch(TNode<Boolean> condition,
GraphAssemblerLabel<sizeof...(Vars)>* if_true,
GraphAssemblerLabel<sizeof...(Vars)>* if_false,
BranchHint hint, Vars... vars) {
BranchImpl(BranchSemantics::kJS, condition, if_true, if_false, hint, vars...);
}
template <typename... Vars>
void GraphAssembler::BranchImpl(BranchSemantics semantics, Node* condition,
GraphAssemblerLabel<sizeof...(Vars)>* if_true,
GraphAssemblerLabel<sizeof...(Vars)>* if_false,
BranchHint hint, Vars... vars) {
DCHECK_NOT_NULL(control());
Node* branch =
graph()->NewNode(common()->Branch(hint, semantics), condition, control());
Node* branch = graph()->NewNode(common()->Branch(hint), condition, control());
control_ = graph()->NewNode(common()->IfTrue(), branch);
MergeState(if_true, vars...);
@ -884,8 +833,7 @@ template <typename... Vars>
void GraphAssembler::GotoIf(Node* condition,
detail::GraphAssemblerLabelForVars<Vars...>* label,
BranchHint hint, Vars... vars) {
Node* branch = graph()->NewNode(
common()->Branch(hint, default_branch_semantics_), condition, control());
Node* branch = graph()->NewNode(common()->Branch(hint), condition, control());
control_ = graph()->NewNode(common()->IfTrue(), branch);
MergeState(label, vars...);
@ -897,8 +845,7 @@ template <typename... Vars>
void GraphAssembler::GotoIfNot(
Node* condition, detail::GraphAssemblerLabelForVars<Vars...>* label,
BranchHint hint, Vars... vars) {
Node* branch = graph()->NewNode(
common()->Branch(hint, default_branch_semantics_), condition, control());
Node* branch = graph()->NewNode(common()->Branch(hint), condition, control());
control_ = graph()->NewNode(common()->IfFalse(), branch);
MergeState(label, vars...);
@ -944,16 +891,11 @@ class V8_EXPORT_PRIVATE JSGraphAssembler : public GraphAssembler {
// Constructs a JSGraphAssembler. If {schedule} is not null, the graph
// assembler will maintain the schedule as it updates blocks.
JSGraphAssembler(
JSGraph* jsgraph, Zone* zone, BranchSemantics branch_semantics,
JSGraph* jsgraph, Zone* zone,
base::Optional<NodeChangedCallback> node_changed_callback = base::nullopt,
bool mark_loop_exits = false)
: GraphAssembler(jsgraph, zone, branch_semantics, node_changed_callback,
mark_loop_exits),
jsgraph_(jsgraph),
outermost_catch_scope_(CatchScope::Outermost(zone)),
catch_scope_(&outermost_catch_scope_) {
outermost_catch_scope_.set_gasm(this);
}
: GraphAssembler(jsgraph, zone, node_changed_callback, mark_loop_exits),
jsgraph_(jsgraph) {}
Node* SmiConstant(int32_t value);
TNode<HeapObject> HeapConstant(Handle<HeapObject> object);
@ -980,7 +922,6 @@ class V8_EXPORT_PRIVATE JSGraphAssembler : public GraphAssembler {
// access.machine_type.representation()));
return TNode<T>::UncheckedCast(LoadField(access, object));
}
TNode<Uint32T> LoadElementsKind(TNode<Map> map);
Node* LoadElement(ElementAccess const&, Node* object, Node* index);
template <typename T>
TNode<T> LoadElement(ElementAccess const& access, TNode<HeapObject> object,
@ -1008,8 +949,6 @@ class V8_EXPORT_PRIVATE JSGraphAssembler : public GraphAssembler {
TNode<Number> NumberSubtract(TNode<Number> lhs, TNode<Number> rhs);
TNode<Number> NumberShiftRightLogical(TNode<Number> lhs, TNode<Number> rhs);
TNode<Number> NumberBitwiseAnd(TNode<Number> lhs, TNode<Number> rhs);
TNode<Number> NumberDivide(TNode<Number> lhs, TNode<Number> rhs);
TNode<Number> NumberFloor(TNode<Number> value);
TNode<String> StringSubstring(TNode<String> string, TNode<Number> from,
TNode<Number> to);
TNode<Boolean> ObjectIsCallable(TNode<Object> value);
@ -1028,349 +967,12 @@ class V8_EXPORT_PRIVATE JSGraphAssembler : public GraphAssembler {
Node* StringCharCodeAt(TNode<String> string, TNode<Number> position);
TNode<Object> DoubleArrayMax(TNode<JSArray> array);
TNode<Object> DoubleArrayMin(TNode<JSArray> array);
// Computes the byte length for a given {array_buffer_view}. If the set of
// possible ElementsKinds is known statically pass as
// {elements_kinds_candidates} to allow the assembler to generate more
// efficient code. Pass an empty {elements_kinds_candidates} to generate code
// that is generic enough to handle all ElementsKinds.
TNode<Number> ArrayBufferViewByteLength(
TNode<JSArrayBufferView> array_buffer_view,
std::set<ElementsKind> elements_kinds_candidates, TNode<Context> context);
// Computes the length for a given {typed_array}. If the set of possible
// ElementsKinds is known statically pass as {elements_kinds_candidates} to
// allow the assembler to generate more efficient code. Pass an empty
// {elements_kinds_candidates} to generate code that is generic enough to
// handle all ElementsKinds.
TNode<Number> TypedArrayLength(
TNode<JSTypedArray> typed_array,
std::set<ElementsKind> elements_kinds_candidates, TNode<Context> context);
TNode<Uint32T> LookupByteShiftForElementsKind(TNode<Uint32T> elements_kind);
TNode<Uint32T> LookupByteSizeForElementsKind(TNode<Uint32T> elements_kind);
TNode<Object> JSCallRuntime1(
Runtime::FunctionId function_id, TNode<Object> arg0,
TNode<Context> context, base::Optional<FrameState> frame_state,
Operator::Properties properties = Operator::kNoProperties);
TNode<Object> JSCallRuntime2(Runtime::FunctionId function_id,
TNode<Object> arg0, TNode<Object> arg1,
TNode<Context> context, FrameState frame_state);
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const { return jsgraph()->isolate(); }
SimplifiedOperatorBuilder* simplified() override {
SimplifiedOperatorBuilder* simplified() const {
return jsgraph()->simplified();
}
JSOperatorBuilder* javascript() const { return jsgraph()->javascript(); }
template <typename T, typename U>
TNode<T> EnterMachineGraph(TNode<U> input, UseInfo use_info) {
DCHECK_EQ(use_info.type_check(), TypeCheckKind::kNone);
return AddNode<T>(
graph()->NewNode(common()->EnterMachineGraph(use_info), input));
}
template <typename T, typename U>
TNode<T> ExitMachineGraph(TNode<U> input,
MachineRepresentation output_representation,
Type output_type) {
return AddNode<T>(graph()->NewNode(
common()->ExitMachineGraph(output_representation, output_type), input));
}
// A catch scope represents a single catch handler. The handler can be
// custom catch logic within the reduction itself; or a catch handler in the
// outside graph into which the reduction will be integrated (in this case
// the scope is called 'outermost').
class V8_NODISCARD CatchScope {
private:
// Only used to partially construct the outermost scope.
explicit CatchScope(Zone* zone) : if_exception_nodes_(zone) {}
// For all inner scopes.
CatchScope(Zone* zone, JSGraphAssembler* gasm)
: gasm_(gasm),
parent_(gasm->catch_scope_),
has_handler_(true),
if_exception_nodes_(zone) {
DCHECK_NOT_NULL(gasm_);
gasm_->catch_scope_ = this;
}
public:
~CatchScope() { gasm_->catch_scope_ = parent_; }
static CatchScope Outermost(Zone* zone) { return CatchScope{zone}; }
static CatchScope Inner(Zone* zone, JSGraphAssembler* gasm) {
return {zone, gasm};
}
bool has_handler() const { return has_handler_; }
bool is_outermost() const { return parent_ == nullptr; }
CatchScope* parent() const { return parent_; }
// Should only be used to initialize the outermost scope (inner scopes
// always have a handler and are passed the gasm pointer at construction).
void set_has_handler(bool v) {
DCHECK(is_outermost());
has_handler_ = v;
}
void set_gasm(JSGraphAssembler* v) {
DCHECK(is_outermost());
DCHECK_NOT_NULL(v);
gasm_ = v;
}
bool has_exceptional_control_flow() const {
return !if_exception_nodes_.empty();
}
void RegisterIfExceptionNode(Node* if_exception) {
DCHECK(has_handler());
if_exception_nodes_.push_back(if_exception);
}
void MergeExceptionalPaths(TNode<Object>* exception_out, Effect* effect_out,
Control* control_out) {
DCHECK(has_handler());
DCHECK(has_exceptional_control_flow());
const int size = static_cast<int>(if_exception_nodes_.size());
if (size == 1) {
// No merge needed.
Node* e = if_exception_nodes_.at(0);
*exception_out = TNode<Object>::UncheckedCast(e);
*effect_out = Effect(e);
*control_out = Control(e);
} else {
DCHECK_GT(size, 1);
Node* merge = gasm_->graph()->NewNode(gasm_->common()->Merge(size),
size, if_exception_nodes_.data());
// These phis additionally take {merge} as an input. Temporarily add
// it to the list.
if_exception_nodes_.push_back(merge);
const int size_with_merge =
static_cast<int>(if_exception_nodes_.size());
Node* ephi = gasm_->graph()->NewNode(gasm_->common()->EffectPhi(size),
size_with_merge,
if_exception_nodes_.data());
Node* phi = gasm_->graph()->NewNode(
gasm_->common()->Phi(MachineRepresentation::kTagged, size),
size_with_merge, if_exception_nodes_.data());
if_exception_nodes_.pop_back();
*exception_out = TNode<Object>::UncheckedCast(phi);
*effect_out = Effect(ephi);
*control_out = Control(merge);
}
}
private:
JSGraphAssembler* gasm_ = nullptr;
CatchScope* const parent_ = nullptr;
bool has_handler_ = false;
NodeVector if_exception_nodes_;
};
CatchScope* catch_scope() const { return catch_scope_; }
Node* outermost_handler() const { return outermost_handler_; }
using NodeGenerator0 = std::function<TNode<Object>()>;
// TODO(jgruber): Currently, it's the responsibility of the developer to note
// which operations may throw and appropriately wrap these in a call to
// MayThrow (see e.g. JSCall3 and CallRuntime2). A more methodical approach
// would be good.
TNode<Object> MayThrow(const NodeGenerator0& body) {
TNode<Object> result = body();
if (catch_scope()->has_handler()) {
// The IfException node is later merged into the outer graph.
// Note: AddNode is intentionally not called since effect and control
// should not be updated.
Node* if_exception =
graph()->NewNode(common()->IfException(), effect(), control());
catch_scope()->RegisterIfExceptionNode(if_exception);
// Control resumes here.
AddNode(graph()->NewNode(common()->IfSuccess(), control()));
}
return result;
}
using VoidGenerator0 = std::function<void()>;
// TODO(jgruber): Currently IfBuilder0 and IfBuilder1 are implemented as
// separate classes. If, in the future, we encounter additional use cases that
// return more than 1 value, we should merge these back into a single variadic
// implementation.
class IfBuilder0 final {
public:
IfBuilder0(JSGraphAssembler* gasm, TNode<Boolean> cond, bool negate_cond)
: gasm_(gasm),
cond_(cond),
negate_cond_(negate_cond),
initial_effect_(gasm->effect()),
initial_control_(gasm->control()) {}
IfBuilder0& ExpectTrue() {
DCHECK_EQ(hint_, BranchHint::kNone);
hint_ = BranchHint::kTrue;
return *this;
}
IfBuilder0& ExpectFalse() {
DCHECK_EQ(hint_, BranchHint::kNone);
hint_ = BranchHint::kFalse;
return *this;
}
IfBuilder0& Then(const VoidGenerator0& body) {
then_body_ = body;
return *this;
}
IfBuilder0& Else(const VoidGenerator0& body) {
else_body_ = body;
return *this;
}
~IfBuilder0() {
// Ensure correct usage: effect/control must not have been modified while
// the IfBuilder0 instance is alive.
DCHECK_EQ(gasm_->effect(), initial_effect_);
DCHECK_EQ(gasm_->control(), initial_control_);
// Unlike IfBuilder1, this supports an empty then or else body. This is
// possible since the merge does not take any value inputs.
DCHECK(then_body_ || else_body_);
if (negate_cond_) std::swap(then_body_, else_body_);
auto if_true = (hint_ == BranchHint::kFalse) ? gasm_->MakeDeferredLabel()
: gasm_->MakeLabel();
auto if_false = (hint_ == BranchHint::kTrue) ? gasm_->MakeDeferredLabel()
: gasm_->MakeLabel();
auto merge = gasm_->MakeLabel();
gasm_->Branch(cond_, &if_true, &if_false);
gasm_->Bind(&if_true);
if (then_body_) then_body_();
if (gasm_->HasActiveBlock()) gasm_->Goto(&merge);
gasm_->Bind(&if_false);
if (else_body_) else_body_();
if (gasm_->HasActiveBlock()) gasm_->Goto(&merge);
gasm_->Bind(&merge);
}
IfBuilder0(const IfBuilder0&) = delete;
IfBuilder0& operator=(const IfBuilder0&) = delete;
private:
JSGraphAssembler* const gasm_;
const TNode<Boolean> cond_;
const bool negate_cond_;
const Effect initial_effect_;
const Control initial_control_;
BranchHint hint_ = BranchHint::kNone;
VoidGenerator0 then_body_;
VoidGenerator0 else_body_;
};
IfBuilder0 If(TNode<Boolean> cond) { return {this, cond, false}; }
IfBuilder0 IfNot(TNode<Boolean> cond) { return {this, cond, true}; }
template <typename T, typename Cond>
class IfBuilder1 {
using If1BodyFunction = std::function<TNode<T>()>;
public:
IfBuilder1(JSGraphAssembler* gasm, TNode<Cond> cond, bool negate_cond)
: gasm_(gasm), cond_(cond), negate_cond_(negate_cond) {}
V8_WARN_UNUSED_RESULT IfBuilder1& ExpectTrue() {
DCHECK_EQ(hint_, BranchHint::kNone);
hint_ = BranchHint::kTrue;
return *this;
}
V8_WARN_UNUSED_RESULT IfBuilder1& ExpectFalse() {
DCHECK_EQ(hint_, BranchHint::kNone);
hint_ = BranchHint::kFalse;
return *this;
}
V8_WARN_UNUSED_RESULT IfBuilder1& Then(const If1BodyFunction& body) {
then_body_ = body;
return *this;
}
V8_WARN_UNUSED_RESULT IfBuilder1& Else(const If1BodyFunction& body) {
else_body_ = body;
return *this;
}
V8_WARN_UNUSED_RESULT TNode<T> Value() {
DCHECK(then_body_);
DCHECK(else_body_);
if (negate_cond_) std::swap(then_body_, else_body_);
auto if_true = (hint_ == BranchHint::kFalse) ? gasm_->MakeDeferredLabel()
: gasm_->MakeLabel();
auto if_false = (hint_ == BranchHint::kTrue) ? gasm_->MakeDeferredLabel()
: gasm_->MakeLabel();
auto merge = gasm_->MakeLabel(PhiMachineRepresentationOf<T>);
if constexpr (std::is_same_v<Cond, Word32T>) {
gasm_->MachineBranch(cond_, &if_true, &if_false, hint_);
} else {
static_assert(std::is_same_v<Cond, Boolean>);
if (hint_ != BranchHint::kNone) {
gasm_->BranchWithHint(cond_, &if_true, &if_false, hint_);
} else {
gasm_->Branch(cond_, &if_true, &if_false);
}
}
gasm_->Bind(&if_true);
TNode<T> then_result = then_body_();
if (gasm_->HasActiveBlock()) gasm_->Goto(&merge, then_result);
gasm_->Bind(&if_false);
TNode<T> else_result = else_body_();
if (gasm_->HasActiveBlock()) {
gasm_->Goto(&merge, else_result);
}
gasm_->Bind(&merge);
return merge.template PhiAt<T>(0);
}
private:
static constexpr MachineRepresentation kPhiRepresentation =
MachineRepresentation::kTagged;
JSGraphAssembler* const gasm_;
const TNode<Cond> cond_;
const bool negate_cond_;
BranchHint hint_ = BranchHint::kNone;
If1BodyFunction then_body_;
If1BodyFunction else_body_;
};
template <typename T>
IfBuilder1<T, Boolean> SelectIf(TNode<Boolean> cond) {
return {this, cond, false};
}
template <typename T>
IfBuilder1<T, Boolean> SelectIfNot(TNode<Boolean> cond) {
return {this, cond, true};
}
template <typename T>
IfBuilder1<T, Word32T> MachineSelectIf(TNode<Word32T> cond) {
return {this, cond, false};
}
protected:
Operator const* PlainPrimitiveToNumberOperator();
@ -1378,12 +980,6 @@ class V8_EXPORT_PRIVATE JSGraphAssembler : public GraphAssembler {
private:
JSGraph* jsgraph_;
SetOncePointer<Operator const> to_number_operator_;
protected:
CatchScope outermost_catch_scope_;
Node* outermost_handler_;
CatchScope* catch_scope_;
friend class CatchScope;
};
} // namespace compiler

View File

@ -4,8 +4,6 @@
#include "src/compiler/heap-refs.h"
#include "src/objects/elements-kind.h"
#ifdef ENABLE_SLOW_DCHECKS
#include <algorithm>
#endif
@ -1081,11 +1079,6 @@ bool MapRef::CanInlineElementAccess() const {
kind != BIGINT64_ELEMENTS) {
return true;
}
if (v8_flags.turbo_rab_gsab && IsRabGsabTypedArrayElementsKind(kind) &&
kind != RAB_GSAB_BIGUINT64_ELEMENTS &&
kind != RAB_GSAB_BIGINT64_ELEMENTS) {
return true;
}
return false;
}

View File

@ -6,7 +6,6 @@
#include <functional>
#include "src/base/container-utils.h"
#include "src/base/small-vector.h"
#include "src/builtins/builtins-promise.h"
#include "src/builtins/builtins-utils.h"
@ -29,11 +28,8 @@
#include "src/compiler/simplified-operator.h"
#include "src/compiler/state-values-utils.h"
#include "src/compiler/type-cache.h"
#include "src/compiler/use-info.h"
#include "src/flags/flags.h"
#include "src/ic/call-optimization.h"
#include "src/objects/elements-kind.h"
#include "src/objects/instance-type.h"
#include "src/objects/js-function.h"
#include "src/objects/objects-inl.h"
#include "src/objects/ordered-hash-table.h"
@ -50,26 +46,32 @@ namespace compiler {
#define _ [&]()
class JSCallReducerAssembler : public JSGraphAssembler {
protected:
class CatchScope;
private:
static constexpr bool kMarkLoopExits = true;
public:
JSCallReducerAssembler(JSCallReducer* reducer, Node* node,
Node* effect = nullptr, Node* control = nullptr)
JSCallReducerAssembler(JSCallReducer* reducer, Node* node)
: JSGraphAssembler(
reducer->JSGraphForGraphAssembler(),
reducer->ZoneForGraphAssembler(), BranchSemantics::kJS,
reducer->ZoneForGraphAssembler(),
[reducer](Node* n) { reducer->RevisitForGraphAssembler(n); },
kMarkLoopExits),
dependencies_(reducer->dependencies()),
node_(node) {
InitializeEffectControl(
effect ? effect : NodeProperties::GetEffectInput(node),
control ? control : NodeProperties::GetControlInput(node));
node_(node),
outermost_catch_scope_(
CatchScope::Outermost(reducer->ZoneForGraphAssembler())),
catch_scope_(&outermost_catch_scope_) {
InitializeEffectControl(NodeProperties::GetEffectInput(node),
NodeProperties::GetControlInput(node));
// Finish initializing the outermost catch scope.
bool has_handler =
NodeProperties::IsExceptionalCall(node, &outermost_handler_);
outermost_catch_scope_.set_has_handler(has_handler);
outermost_catch_scope_.set_gasm(this);
}
TNode<Object> ReduceJSCallWithArrayLikeOrSpreadOfEmpty(
@ -92,8 +94,164 @@ class JSCallReducerAssembler : public JSGraphAssembler {
TNode<Object> ReceiverInput() const { return ReceiverInputAs<Object>(); }
CatchScope* catch_scope() const { return catch_scope_; }
Node* outermost_handler() const { return outermost_handler_; }
Node* node_ptr() const { return node_; }
protected:
using NodeGenerator0 = std::function<TNode<Object>()>;
using VoidGenerator0 = std::function<void()>;
// TODO(jgruber): Currently IfBuilder0 and IfBuilder1 are implemented as
// separate classes. If, in the future, we encounter additional use cases that
// return more than 1 value, we should merge these back into a single variadic
// implementation.
class IfBuilder0 final {
public:
IfBuilder0(JSGraphAssembler* gasm, TNode<Boolean> cond, bool negate_cond)
: gasm_(gasm),
cond_(cond),
negate_cond_(negate_cond),
initial_effect_(gasm->effect()),
initial_control_(gasm->control()) {}
IfBuilder0& ExpectTrue() {
DCHECK_EQ(hint_, BranchHint::kNone);
hint_ = BranchHint::kTrue;
return *this;
}
IfBuilder0& ExpectFalse() {
DCHECK_EQ(hint_, BranchHint::kNone);
hint_ = BranchHint::kFalse;
return *this;
}
IfBuilder0& Then(const VoidGenerator0& body) {
then_body_ = body;
return *this;
}
IfBuilder0& Else(const VoidGenerator0& body) {
else_body_ = body;
return *this;
}
~IfBuilder0() {
// Ensure correct usage: effect/control must not have been modified while
// the IfBuilder0 instance is alive.
DCHECK_EQ(gasm_->effect(), initial_effect_);
DCHECK_EQ(gasm_->control(), initial_control_);
// Unlike IfBuilder1, this supports an empty then or else body. This is
// possible since the merge does not take any value inputs.
DCHECK(then_body_ || else_body_);
if (negate_cond_) std::swap(then_body_, else_body_);
auto if_true = (hint_ == BranchHint::kFalse) ? gasm_->MakeDeferredLabel()
: gasm_->MakeLabel();
auto if_false = (hint_ == BranchHint::kTrue) ? gasm_->MakeDeferredLabel()
: gasm_->MakeLabel();
auto merge = gasm_->MakeLabel();
gasm_->Branch(cond_, &if_true, &if_false);
gasm_->Bind(&if_true);
if (then_body_) then_body_();
if (gasm_->HasActiveBlock()) gasm_->Goto(&merge);
gasm_->Bind(&if_false);
if (else_body_) else_body_();
if (gasm_->HasActiveBlock()) gasm_->Goto(&merge);
gasm_->Bind(&merge);
}
IfBuilder0(const IfBuilder0&) = delete;
IfBuilder0& operator=(const IfBuilder0&) = delete;
private:
JSGraphAssembler* const gasm_;
const TNode<Boolean> cond_;
const bool negate_cond_;
const Effect initial_effect_;
const Control initial_control_;
BranchHint hint_ = BranchHint::kNone;
VoidGenerator0 then_body_;
VoidGenerator0 else_body_;
};
IfBuilder0 If(TNode<Boolean> cond) { return {this, cond, false}; }
IfBuilder0 IfNot(TNode<Boolean> cond) { return {this, cond, true}; }
template <typename T>
class IfBuilder1 {
using If1BodyFunction = std::function<TNode<T>()>;
public:
IfBuilder1(JSGraphAssembler* gasm, TNode<Boolean> cond)
: gasm_(gasm), cond_(cond) {}
V8_WARN_UNUSED_RESULT IfBuilder1& ExpectTrue() {
DCHECK_EQ(hint_, BranchHint::kNone);
hint_ = BranchHint::kTrue;
return *this;
}
V8_WARN_UNUSED_RESULT IfBuilder1& ExpectFalse() {
DCHECK_EQ(hint_, BranchHint::kNone);
hint_ = BranchHint::kFalse;
return *this;
}
V8_WARN_UNUSED_RESULT IfBuilder1& Then(const If1BodyFunction& body) {
then_body_ = body;
return *this;
}
V8_WARN_UNUSED_RESULT IfBuilder1& Else(const If1BodyFunction& body) {
else_body_ = body;
return *this;
}
V8_WARN_UNUSED_RESULT TNode<T> Value() {
DCHECK(then_body_);
DCHECK(else_body_);
auto if_true = (hint_ == BranchHint::kFalse) ? gasm_->MakeDeferredLabel()
: gasm_->MakeLabel();
auto if_false = (hint_ == BranchHint::kTrue) ? gasm_->MakeDeferredLabel()
: gasm_->MakeLabel();
auto merge = gasm_->MakeLabel(kPhiRepresentation);
gasm_->Branch(cond_, &if_true, &if_false);
gasm_->Bind(&if_true);
TNode<T> then_result = then_body_();
if (gasm_->HasActiveBlock()) gasm_->Goto(&merge, then_result);
gasm_->Bind(&if_false);
TNode<T> else_result = else_body_();
if (gasm_->HasActiveBlock()) {
gasm_->Goto(&merge, else_result);
}
gasm_->Bind(&merge);
return merge.PhiAt<T>(0);
}
private:
static constexpr MachineRepresentation kPhiRepresentation =
MachineRepresentation::kTagged;
JSGraphAssembler* const gasm_;
const TNode<Boolean> cond_;
BranchHint hint_ = BranchHint::kNone;
If1BodyFunction then_body_;
If1BodyFunction else_body_;
};
template <typename T>
IfBuilder1<T> SelectIf(TNode<Boolean> cond) {
return {this, cond};
}
// Simplified operators.
TNode<Number> SpeculativeToNumber(
TNode<Object> value,
@ -119,6 +277,9 @@ class JSCallReducerAssembler : public JSGraphAssembler {
TNode<Object> arg0, TNode<Object> arg1,
TNode<Object> arg2, TNode<Object> arg3,
FrameState frame_state);
TNode<Object> JSCallRuntime2(Runtime::FunctionId function_id,
TNode<Object> arg0, TNode<Object> arg1,
FrameState frame_state);
// Emplace a copy of the call node into the graph at current effect/control.
TNode<Object> CopyNode();
@ -136,20 +297,6 @@ class JSCallReducerAssembler : public JSGraphAssembler {
TNode<Number> LoadMapElementsKind(TNode<Map> map);
template <typename T, typename U>
TNode<T> EnterMachineGraph(TNode<U> input, UseInfo use_info) {
return AddNode<T>(
graph()->NewNode(common()->EnterMachineGraph(use_info), input));
}
template <typename T, typename U>
TNode<T> ExitMachineGraph(TNode<U> input,
MachineRepresentation output_representation,
Type output_type) {
return AddNode<T>(graph()->NewNode(
common()->ExitMachineGraph(output_representation, output_type), input));
}
void MaybeInsertMapChecks(MapInference* inference,
bool has_stability_dependency) {
// TODO(jgruber): Implement MapInference::InsertMapChecks in graph
@ -161,6 +308,124 @@ class JSCallReducerAssembler : public JSGraphAssembler {
}
}
// TODO(jgruber): Currently, it's the responsibility of the developer to note
// which operations may throw and appropriately wrap these in a call to
// MayThrow (see e.g. JSCall3 and CallRuntime2). A more methodical approach
// would be good.
TNode<Object> MayThrow(const NodeGenerator0& body) {
TNode<Object> result = body();
if (catch_scope()->has_handler()) {
// The IfException node is later merged into the outer graph.
// Note: AddNode is intentionally not called since effect and control
// should not be updated.
Node* if_exception =
graph()->NewNode(common()->IfException(), effect(), control());
catch_scope()->RegisterIfExceptionNode(if_exception);
// Control resumes here.
AddNode(graph()->NewNode(common()->IfSuccess(), control()));
}
return result;
}
// A catch scope represents a single catch handler. The handler can be
// custom catch logic within the reduction itself; or a catch handler in the
// outside graph into which the reduction will be integrated (in this case
// the scope is called 'outermost').
class V8_NODISCARD CatchScope {
private:
// Only used to partially construct the outermost scope.
explicit CatchScope(Zone* zone) : if_exception_nodes_(zone) {}
// For all inner scopes.
CatchScope(Zone* zone, JSCallReducerAssembler* gasm)
: gasm_(gasm),
parent_(gasm->catch_scope_),
has_handler_(true),
if_exception_nodes_(zone) {
gasm_->catch_scope_ = this;
}
public:
~CatchScope() { gasm_->catch_scope_ = parent_; }
static CatchScope Outermost(Zone* zone) { return CatchScope{zone}; }
static CatchScope Inner(Zone* zone, JSCallReducerAssembler* gasm) {
return {zone, gasm};
}
bool has_handler() const { return has_handler_; }
bool is_outermost() const { return parent_ == nullptr; }
CatchScope* parent() const { return parent_; }
// Should only be used to initialize the outermost scope (inner scopes
// always have a handler and are passed the gasm pointer at construction).
void set_has_handler(bool v) {
DCHECK(is_outermost());
has_handler_ = v;
}
void set_gasm(JSCallReducerAssembler* v) {
DCHECK(is_outermost());
gasm_ = v;
}
bool has_exceptional_control_flow() const {
return !if_exception_nodes_.empty();
}
void RegisterIfExceptionNode(Node* if_exception) {
DCHECK(has_handler());
if_exception_nodes_.push_back(if_exception);
}
void MergeExceptionalPaths(TNode<Object>* exception_out, Effect* effect_out,
Control* control_out) {
DCHECK(has_handler());
DCHECK(has_exceptional_control_flow());
const int size = static_cast<int>(if_exception_nodes_.size());
if (size == 1) {
// No merge needed.
Node* e = if_exception_nodes_.at(0);
*exception_out = TNode<Object>::UncheckedCast(e);
*effect_out = Effect(e);
*control_out = Control(e);
} else {
DCHECK_GT(size, 1);
Node* merge = gasm_->graph()->NewNode(gasm_->common()->Merge(size),
size, if_exception_nodes_.data());
// These phis additionally take {merge} as an input. Temporarily add
// it to the list.
if_exception_nodes_.push_back(merge);
const int size_with_merge =
static_cast<int>(if_exception_nodes_.size());
Node* ephi = gasm_->graph()->NewNode(gasm_->common()->EffectPhi(size),
size_with_merge,
if_exception_nodes_.data());
Node* phi = gasm_->graph()->NewNode(
gasm_->common()->Phi(MachineRepresentation::kTagged, size),
size_with_merge, if_exception_nodes_.data());
if_exception_nodes_.pop_back();
*exception_out = TNode<Object>::UncheckedCast(phi);
*effect_out = Effect(ephi);
*control_out = Control(merge);
}
}
private:
JSCallReducerAssembler* gasm_ = nullptr;
CatchScope* const parent_ = nullptr;
bool has_handler_ = false;
NodeVector if_exception_nodes_;
};
class TryCatchBuilder0 {
public:
using TryFunction = VoidGenerator0;
@ -351,7 +616,7 @@ class JSCallReducerAssembler : public JSGraphAssembler {
JSCallRuntime2(Runtime::kThrowTypeError,
NumberConstant(static_cast<double>(
MessageTemplate::kCalledNonCallable)),
maybe_callable, ContextInput(), frame_state);
maybe_callable, frame_state);
Unreachable(); // The runtime call throws unconditionally.
})
.ExpectTrue();
@ -397,11 +662,17 @@ class JSCallReducerAssembler : public JSGraphAssembler {
return FrameState(NodeProperties::GetFrameStateInput(node_));
}
JSOperatorBuilder* javascript() const { return jsgraph()->javascript(); }
CompilationDependencies* dependencies() const { return dependencies_; }
private:
CompilationDependencies* const dependencies_;
Node* const node_;
CatchScope outermost_catch_scope_;
Node* outermost_handler_;
CatchScope* catch_scope_;
friend class CatchScope;
};
enum class ArrayReduceDirection { kLeft, kRight };
@ -842,6 +1113,16 @@ TNode<Object> JSCallReducerAssembler::JSCall4(
});
}
TNode<Object> JSCallReducerAssembler::JSCallRuntime2(
Runtime::FunctionId function_id, TNode<Object> arg0, TNode<Object> arg1,
FrameState frame_state) {
return MayThrow(_ {
return AddNode<Object>(
graph()->NewNode(javascript()->CallRuntime(function_id, 2), arg0, arg1,
ContextInput(), frame_state, effect(), control()));
});
}
TNode<Object> JSCallReducerAssembler::CopyNode() {
return MayThrow(_ {
Node* copy = graph()->CloneNode(node_ptr());
@ -857,7 +1138,6 @@ TNode<JSArray> JSCallReducerAssembler::CreateArrayNoThrow(
graph()->NewNode(javascript()->CreateArray(1, base::nullopt), ctor, ctor,
size, ContextInput(), frame_state, effect(), control()));
}
TNode<JSArray> JSCallReducerAssembler::AllocateEmptyJSArray(
ElementsKind kind, const NativeContextRef& native_context) {
// TODO(jgruber): Port AllocationBuilder to JSGraphAssembler.
@ -2298,26 +2578,6 @@ TNode<Object> PromiseBuiltinReducerAssembler::ReducePromiseConstructor(
#undef _
std::pair<Node*, Node*> JSCallReducer::ReleaseEffectAndControlFromAssembler(
JSCallReducerAssembler* gasm) {
auto catch_scope = gasm->catch_scope();
DCHECK(catch_scope->is_outermost());
if (catch_scope->has_handler() &&
catch_scope->has_exceptional_control_flow()) {
TNode<Object> handler_exception;
Effect handler_effect{nullptr};
Control handler_control{nullptr};
gasm->catch_scope()->MergeExceptionalPaths(
&handler_exception, &handler_effect, &handler_control);
ReplaceWithValue(gasm->outermost_handler(), handler_exception,
handler_effect, handler_control);
}
return {gasm->effect(), gasm->control()};
}
Reduction JSCallReducer::ReplaceWithSubgraph(JSCallReducerAssembler* gasm,
Node* subgraph) {
// TODO(jgruber): Consider a less fiddly way of integrating the new subgraph
@ -4638,11 +4898,13 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
case Builtin::kArrayBufferIsView:
return ReduceArrayBufferIsView(node);
case Builtin::kDataViewPrototypeGetByteLength:
return ReduceArrayBufferViewByteLengthAccessor(node, JS_DATA_VIEW_TYPE);
return ReduceArrayBufferViewAccessor(
node, JS_DATA_VIEW_TYPE,
AccessBuilder::ForJSArrayBufferViewByteLength());
case Builtin::kDataViewPrototypeGetByteOffset:
return ReduceArrayBufferViewAccessor(
node, JS_DATA_VIEW_TYPE,
AccessBuilder::ForJSArrayBufferViewByteOffset(), builtin);
AccessBuilder::ForJSArrayBufferViewByteOffset());
case Builtin::kDataViewPrototypeGetUint8:
return ReduceDataViewAccess(node, DataViewAccess::kGet,
ExternalArrayType::kExternalUint8Array);
@ -4692,13 +4954,16 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
return ReduceDataViewAccess(node, DataViewAccess::kSet,
ExternalArrayType::kExternalFloat64Array);
case Builtin::kTypedArrayPrototypeByteLength:
return ReduceArrayBufferViewByteLengthAccessor(node, JS_TYPED_ARRAY_TYPE);
return ReduceArrayBufferViewAccessor(
node, JS_TYPED_ARRAY_TYPE,
AccessBuilder::ForJSArrayBufferViewByteLength());
case Builtin::kTypedArrayPrototypeByteOffset:
return ReduceArrayBufferViewAccessor(
node, JS_TYPED_ARRAY_TYPE,
AccessBuilder::ForJSArrayBufferViewByteOffset(), builtin);
AccessBuilder::ForJSArrayBufferViewByteOffset());
case Builtin::kTypedArrayPrototypeLength:
return ReduceTypedArrayPrototypeLength(node);
return ReduceArrayBufferViewAccessor(
node, JS_TYPED_ARRAY_TYPE, AccessBuilder::ForJSTypedArrayLength());
case Builtin::kTypedArrayPrototypeToStringTag:
return ReduceTypedArrayPrototypeToStringTag(node);
case Builtin::kMathAbs:
@ -7232,113 +7497,6 @@ Reduction JSCallReducer::ReduceTypedArrayPrototypeToStringTag(Node* node) {
return Replace(value);
}
Reduction JSCallReducer::ReduceArrayBufferViewByteLengthAccessor(
Node* node, InstanceType instance_type) {
DCHECK(instance_type == JS_TYPED_ARRAY_TYPE ||
instance_type == JS_DATA_VIEW_TYPE);
Node* receiver = NodeProperties::GetValueInput(node, 1);
Effect effect{NodeProperties::GetEffectInput(node)};
Control control{NodeProperties::GetControlInput(node)};
MapInference inference(broker(), receiver, effect);
if (!inference.HaveMaps() ||
!inference.AllOfInstanceTypesAre(instance_type)) {
return inference.NoChange();
}
std::set<ElementsKind> elements_kinds;
bool maybe_rab_gsab = false;
if (instance_type == JS_DATA_VIEW_TYPE) {
maybe_rab_gsab = true;
} else {
for (const auto& map : inference.GetMaps()) {
ElementsKind kind = map.elements_kind();
elements_kinds.insert(kind);
if (IsRabGsabTypedArrayElementsKind(kind)) maybe_rab_gsab = true;
}
}
if (!v8_flags.harmony_rab_gsab || !maybe_rab_gsab) {
// We do not perform any change depending on this inference.
Reduction unused_reduction = inference.NoChange();
USE(unused_reduction);
// Call default implementation for non-rab/gsab TAs.
return ReduceArrayBufferViewAccessor(
node, JS_TYPED_ARRAY_TYPE,
AccessBuilder::ForJSArrayBufferViewByteLength(),
Builtin::kTypedArrayPrototypeByteLength);
} else if (!v8_flags.turbo_rab_gsab) {
return inference.NoChange();
}
inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
control,
CallParametersOf(node->op()).feedback());
const bool depended_on_detaching_protector =
dependencies()->DependOnArrayBufferDetachingProtector();
if (!depended_on_detaching_protector && instance_type == JS_DATA_VIEW_TYPE) {
// DataView prototype accessors throw on detached ArrayBuffers instead of
// return 0, so skip the optimization.
//
// TODO(turbofan): Ideally we would bail out if the buffer is actually
// detached.
return inference.NoChange();
}
JSCallReducerAssembler a(this, node);
TNode<JSTypedArray> typed_array =
TNode<JSTypedArray>::UncheckedCast(receiver);
TNode<Number> length = a.ArrayBufferViewByteLength(
typed_array, std::move(elements_kinds), a.ContextInput());
return ReplaceWithSubgraph(&a, length);
}
Reduction JSCallReducer::ReduceTypedArrayPrototypeLength(Node* node) {
Node* receiver = NodeProperties::GetValueInput(node, 1);
Effect effect{NodeProperties::GetEffectInput(node)};
Control control{NodeProperties::GetControlInput(node)};
MapInference inference(broker(), receiver, effect);
if (!inference.HaveMaps() ||
!inference.AllOfInstanceTypesAre(JS_TYPED_ARRAY_TYPE)) {
return inference.NoChange();
}
std::set<ElementsKind> elements_kinds;
bool maybe_rab_gsab = false;
for (const auto& map : inference.GetMaps()) {
ElementsKind kind = map.elements_kind();
elements_kinds.insert(kind);
if (IsRabGsabTypedArrayElementsKind(kind)) maybe_rab_gsab = true;
}
if (!v8_flags.harmony_rab_gsab || !maybe_rab_gsab) {
// We do not perform any change depending on this inference.
Reduction unused_reduction = inference.NoChange();
USE(unused_reduction);
// Call default implementation for non-rab/gsab TAs.
return ReduceArrayBufferViewAccessor(node, JS_TYPED_ARRAY_TYPE,
AccessBuilder::ForJSTypedArrayLength(),
Builtin::kTypedArrayPrototypeLength);
} else if (!v8_flags.turbo_rab_gsab) {
return inference.NoChange();
}
inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
control,
CallParametersOf(node->op()).feedback());
JSCallReducerAssembler a(this, node);
TNode<JSTypedArray> typed_array =
TNode<JSTypedArray>::UncheckedCast(receiver);
TNode<Number> length = a.TypedArrayLength(
typed_array, std::move(elements_kinds), a.ContextInput());
return ReplaceWithSubgraph(&a, length);
}
// ES #sec-number.isfinite
Reduction JSCallReducer::ReduceNumberIsFinite(Node* node) {
JSCallNode n(node);
@ -7847,8 +8005,7 @@ Reduction JSCallReducer::ReduceArrayBufferIsView(Node* node) {
}
Reduction JSCallReducer::ReduceArrayBufferViewAccessor(
Node* node, InstanceType instance_type, FieldAccess const& access,
Builtin builtin) {
Node* node, InstanceType instance_type, FieldAccess const& access) {
Node* receiver = NodeProperties::GetValueInput(node, 1);
Effect effect{NodeProperties::GetEffectInput(node)};
Control control{NodeProperties::GetControlInput(node)};
@ -7859,11 +8016,13 @@ Reduction JSCallReducer::ReduceArrayBufferViewAccessor(
return inference.NoChange();
}
DCHECK_IMPLIES((builtin == Builtin::kTypedArrayPrototypeLength ||
builtin == Builtin::kTypedArrayPrototypeByteLength),
base::none_of(inference.GetMaps(), [](const auto& map) {
return IsRabGsabTypedArrayElementsKind(map.elements_kind());
}));
// TODO(v8:11111): We skip this optimization for RAB/GSAB for now. Should
// have some optimization here eventually.
for (const auto& map : inference.GetMaps()) {
if (IsRabGsabTypedArrayElementsKind(map.elements_kind())) {
return inference.NoChange();
}
}
CHECK(inference.RelyOnMapsViaStability(dependencies()));
@ -7969,20 +8128,11 @@ Reduction JSCallReducer::ReduceDataViewAccess(Node* node, DataViewAccess access,
offset = effect = graph()->NewNode(simplified()->CheckBounds(p.feedback()),
offset, byte_length, effect, control);
} else {
Node* byte_length;
if (!v8_flags.harmony_rab_gsab) {
// We only deal with DataViews here that have Smi [[ByteLength]]s.
byte_length = effect =
graph()->NewNode(simplified()->LoadField(
AccessBuilder::ForJSArrayBufferViewByteLength()),
receiver, effect, control);
} else {
JSCallReducerAssembler a(this, node);
byte_length = a.ArrayBufferViewByteLength(
TNode<JSArrayBufferView>::UncheckedCast(receiver), {},
a.ContextInput());
std::tie(effect, control) = ReleaseEffectAndControlFromAssembler(&a);
}
// We only deal with DataViews here that have Smi [[ByteLength]]s.
Node* byte_length = effect =
graph()->NewNode(simplified()->LoadField(
AccessBuilder::ForJSArrayBufferViewByteLength()),
receiver, effect, control);
if (element_size > 1) {
// For non-byte accesses we also need to check that the {offset}

View File

@ -181,9 +181,6 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Reduction ReduceTypedArrayConstructor(Node* node,
const SharedFunctionInfoRef& shared);
Reduction ReduceTypedArrayPrototypeToStringTag(Node* node);
Reduction ReduceArrayBufferViewByteLengthAccessor(Node* node,
InstanceType instance_type);
Reduction ReduceTypedArrayPrototypeLength(Node* node);
Reduction ReduceForInsufficientFeedback(Node* node, DeoptimizeReason reason);
@ -219,8 +216,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Reduction ReduceArrayBufferIsView(Node* node);
Reduction ReduceArrayBufferViewAccessor(Node* node,
InstanceType instance_type,
FieldAccess const& access,
Builtin builtin);
FieldAccess const& access);
enum class DataViewAccess { kGet, kSet };
Reduction ReduceDataViewAccess(Node* node, DataViewAccess access,
@ -238,8 +234,6 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
// The pendant to ReplaceWithValue when using GraphAssembler-based reductions.
Reduction ReplaceWithSubgraph(JSCallReducerAssembler* gasm, Node* subgraph);
std::pair<Node*, Node*> ReleaseEffectAndControlFromAssembler(
JSCallReducerAssembler* gasm);
// Helper to verify promise receiver maps are as expected.
// On bailout from a reduction, be sure to return inference.NoChange().

View File

@ -14,7 +14,6 @@
#include "src/compiler/allocation-builder-inl.h"
#include "src/compiler/allocation-builder.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/graph-assembler.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/js-operator.h"
@ -25,11 +24,9 @@
#include "src/compiler/property-access-builder.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
#include "src/flags/flags.h"
#include "src/handles/handles.h"
#include "src/heap/factory.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/elements-kind.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/heap-number.h"
#include "src/objects/string.h"
@ -2095,7 +2092,6 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
Node* receiver = NodeProperties::GetValueInput(node, 0);
Effect effect{NodeProperties::GetEffectInput(node)};
Control control{NodeProperties::GetControlInput(node)};
Node* context = NodeProperties::GetContextInput(node);
// TODO(neis): It's odd that we do optimizations below that don't really care
// about the feedback, but we don't do them when the feedback is megamorphic.
@ -2205,8 +2201,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// Access the actual element.
ValueEffectControl continuation =
BuildElementAccess(receiver, index, value, effect, control, context,
access_info, feedback.keyed_mode());
BuildElementAccess(receiver, index, value, effect, control, access_info,
feedback.keyed_mode());
value = continuation.value();
effect = continuation.effect();
control = continuation.control();
@ -2270,9 +2266,9 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
}
// Access the actual element.
ValueEffectControl continuation = BuildElementAccess(
this_receiver, this_index, this_value, this_effect, this_control,
context, access_info, feedback.keyed_mode());
ValueEffectControl continuation =
BuildElementAccess(this_receiver, this_index, this_value, this_effect,
this_control, access_info, feedback.keyed_mode());
values.push_back(continuation.value());
effects.push_back(continuation.effect());
controls.push_back(continuation.control());
@ -3090,7 +3086,6 @@ ExternalArrayType GetArrayTypeFromElementsKind(ElementsKind kind) {
switch (kind) {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
case TYPE##_ELEMENTS: \
case RAB_GSAB_##TYPE##_ELEMENTS: \
return kExternal##Type##Array;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
@ -3105,18 +3100,14 @@ ExternalArrayType GetArrayTypeFromElementsKind(ElementsKind kind) {
JSNativeContextSpecialization::ValueEffectControl
JSNativeContextSpecialization::BuildElementAccess(
Node* receiver, Node* index, Node* value, Node* effect, Node* control,
Node* context, ElementAccessInfo const& access_info,
KeyedAccessMode const& keyed_mode) {
ElementAccessInfo const& access_info, KeyedAccessMode const& keyed_mode) {
// TODO(bmeurer): We currently specialize based on elements kind. We should
// also be able to properly support strings and other JSObjects here.
ElementsKind elements_kind = access_info.elements_kind();
DCHECK_IMPLIES(IsRabGsabTypedArrayElementsKind(elements_kind),
v8_flags.turbo_rab_gsab);
ZoneVector<MapRef> const& receiver_maps =
access_info.lookup_start_object_maps();
if (IsTypedArrayElementsKind(elements_kind) ||
IsRabGsabTypedArrayElementsKind(elements_kind)) {
if (IsTypedArrayElementsKind(elements_kind)) {
Node* buffer_or_receiver = receiver;
Node* length;
Node* base_pointer;
@ -3126,9 +3117,7 @@ JSNativeContextSpecialization::BuildElementAccess(
// for asm.js-like code patterns).
base::Optional<JSTypedArrayRef> typed_array =
GetTypedArrayConstant(broker(), receiver);
if (typed_array.has_value() &&
!IsRabGsabTypedArrayElementsKind(elements_kind)) {
// TODO(v8:11111): Add support for rab/gsab here.
if (typed_array.has_value()) {
length = jsgraph()->Constant(static_cast<double>(typed_array->length()));
DCHECK(!typed_array->is_on_heap());
@ -3141,14 +3130,9 @@ JSNativeContextSpecialization::BuildElementAccess(
external_pointer = jsgraph()->PointerConstant(typed_array->data_ptr());
} else {
// Load the {receiver}s length.
JSGraphAssembler assembler(jsgraph_, zone(), BranchSemantics::kJS,
[this](Node* n) { this->Revisit(n); });
assembler.InitializeEffectControl(effect, control);
length = assembler.TypedArrayLength(
TNode<JSTypedArray>::UncheckedCast(receiver), {elements_kind},
TNode<Context>::UncheckedCast(context));
std::tie(effect, control) =
ReleaseEffectAndControlFromAssembler(&assembler);
length = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSTypedArrayLength()),
receiver, effect, control);
// Load the base pointer for the {receiver}. This will always be Smi
// zero unless we allow on-heap TypedArrays, which is only the case
@ -3919,27 +3903,6 @@ Node* JSNativeContextSpecialization::BuildLoadPrototypeFromObject(
control);
}
std::pair<Node*, Node*>
JSNativeContextSpecialization::ReleaseEffectAndControlFromAssembler(
JSGraphAssembler* gasm) {
auto catch_scope = gasm->catch_scope();
DCHECK(catch_scope->is_outermost());
if (catch_scope->has_handler() &&
catch_scope->has_exceptional_control_flow()) {
TNode<Object> handler_exception;
Effect handler_effect{nullptr};
Control handler_control{nullptr};
gasm->catch_scope()->MergeExceptionalPaths(
&handler_exception, &handler_effect, &handler_control);
ReplaceWithValue(gasm->outermost_handler(), handler_exception,
handler_effect, handler_control);
}
return {gasm->effect(), gasm->control()};
}
Graph* JSNativeContextSpecialization::graph() const {
return jsgraph()->graph();
}

View File

@ -7,7 +7,6 @@
#include "src/base/flags.h"
#include "src/base/optional.h"
#include "src/compiler/graph-assembler.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-heap-broker.h"
#include "src/deoptimizer/deoptimize-reason.h"
@ -190,7 +189,7 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
// Construct the appropriate subgraph for element access.
ValueEffectControl BuildElementAccess(Node* receiver, Node* index,
Node* value, Node* effect,
Node* control, Node* context,
Node* control,
ElementAccessInfo const& access_info,
KeyedAccessMode const& keyed_mode);
@ -250,9 +249,6 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
Node* BuildLoadPrototypeFromObject(Node* object, Node* effect, Node* control);
std::pair<Node*, Node*> ReleaseEffectAndControlFromAssembler(
JSGraphAssembler* assembler);
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }

View File

@ -916,21 +916,23 @@ const Operator* JSOperatorBuilder::CallRuntime(Runtime::FunctionId id) {
return CallRuntime(f, f->nargs);
}
const Operator* JSOperatorBuilder::CallRuntime(
Runtime::FunctionId id, size_t arity, Operator::Properties properties) {
const Operator* JSOperatorBuilder::CallRuntime(Runtime::FunctionId id,
size_t arity) {
const Runtime::Function* f = Runtime::FunctionForId(id);
return CallRuntime(f, arity, properties);
return CallRuntime(f, arity);
}
const Operator* JSOperatorBuilder::CallRuntime(
const Runtime::Function* f, size_t arity, Operator::Properties properties) {
const Operator* JSOperatorBuilder::CallRuntime(const Runtime::Function* f,
size_t arity) {
CallRuntimeParameters parameters(f->function_id, arity);
DCHECK(f->nargs == -1 || f->nargs == static_cast<int>(parameters.arity()));
return zone()->New<Operator1<CallRuntimeParameters>>( // --
IrOpcode::kJSCallRuntime, properties, // opcode
"JSCallRuntime", // name
parameters.arity(), 1, 1, f->result_size, 1, 2, // inputs/outputs
parameters); // parameter
return zone()->New<Operator1<CallRuntimeParameters>>( // --
IrOpcode::kJSCallRuntime, Operator::kNoProperties, // opcode
"JSCallRuntime", // name
parameters.arity(), 1, 1, f->result_size, 1, 2, // inputs/outputs
parameters); // parameter
}
#if V8_ENABLE_WEBASSEMBLY

View File

@ -13,7 +13,6 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator-properties.h"
#include "src/objects/feedback-cell.h"
#include "src/runtime/runtime.h"
@ -996,12 +995,8 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
SpeculationMode speculation_mode = SpeculationMode::kDisallowSpeculation,
CallFeedbackRelation feedback_relation = CallFeedbackRelation::kTarget);
const Operator* CallRuntime(Runtime::FunctionId id);
const Operator* CallRuntime(
Runtime::FunctionId id, size_t arity,
Operator::Properties properties = Operator::kNoProperties);
const Operator* CallRuntime(
const Runtime::Function* function, size_t arity,
Operator::Properties properties = Operator::kNoProperties);
const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
const Operator* CallRuntime(const Runtime::Function* function, size_t arity);
#if V8_ENABLE_WEBASSEMBLY
const Operator* CallWasm(const wasm::WasmModule* wasm_module,

View File

@ -9,7 +9,6 @@
#include "src/codegen/code-factory.h"
#include "src/codegen/interface-descriptors-inl.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/graph-assembler.h"
#include "src/compiler/js-graph.h"
@ -842,7 +841,7 @@ Reduction JSTypedLowering::ReduceJSEqual(Node* node) {
// then ObjectIsUndetectable(left)
// else ReferenceEqual(left, right)
#define __ gasm.
JSGraphAssembler gasm(jsgraph(), jsgraph()->zone(), BranchSemantics::kJS);
JSGraphAssembler gasm(jsgraph(), jsgraph()->zone());
gasm.InitializeEffectControl(r.effect(), r.control());
auto lhs = TNode<Object>::UncheckedCast(r.left());

View File

@ -268,7 +268,6 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
case Runtime::kAbort:
case Runtime::kAllocateInOldGeneration:
case Runtime::kCreateIterResultObject:
case Runtime::kGrowableSharedArrayBufferByteLength:
case Runtime::kIncBlockCounter:
case Runtime::kIsFunction:
case Runtime::kNewClosure:

View File

@ -6,7 +6,6 @@
#include "src/base/logging.h"
#include "src/codegen/tick-counter.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-properties.h"
@ -184,7 +183,7 @@ MemoryOptimizer::MemoryOptimizer(
JSGraph* jsgraph, Zone* zone,
MemoryLowering::AllocationFolding allocation_folding,
const char* function_debug_name, TickCounter* tick_counter)
: graph_assembler_(jsgraph, zone, BranchSemantics::kMachine),
: graph_assembler_(jsgraph, zone),
memory_lowering_(jsgraph, zone, &graph_assembler_, allocation_folding,
WriteBarrierAssertFailed, function_debug_name),
jsgraph_(jsgraph),

View File

@ -406,6 +406,7 @@ Node::Node(NodeId id, const Operator* op, int inline_count, int inline_capacity)
DCHECK_LE(inline_capacity, kMaxInlineCapacity);
}
void Node::AppendUse(Use* use) {
DCHECK(first_use_ == nullptr || first_use_->prev == nullptr);
DCHECK_EQ(this, *use->input_ptr());

View File

@ -695,13 +695,6 @@ Node::Uses::const_iterator Node::Uses::begin() const {
Node::Uses::const_iterator Node::Uses::end() const { return const_iterator(); }
inline Node::Uses::const_iterator begin(const Node::Uses& uses) {
return uses.begin();
}
inline Node::Uses::const_iterator end(const Node::Uses& uses) {
return uses.end();
}
} // namespace compiler
} // namespace internal
} // namespace v8

View File

@ -33,26 +33,20 @@
V(Throw) \
V(End)
#define MACHINE_LEVEL_CONSTANT_OP_LIST(V) \
V(Int32Constant) \
V(Int64Constant) \
V(TaggedIndexConstant) \
V(Float32Constant) \
V(Float64Constant) \
V(CompressedHeapConstant) \
V(RelocatableInt32Constant) \
V(RelocatableInt64Constant)
#define JS_LEVEL_CONSTANT_OP_LIST(V) \
V(ExternalConstant) \
V(NumberConstant) \
V(PointerConstant) \
V(HeapConstant)
// Opcodes for constant operators.
#define CONSTANT_OP_LIST(V) \
JS_LEVEL_CONSTANT_OP_LIST(V) \
MACHINE_LEVEL_CONSTANT_OP_LIST(V)
#define CONSTANT_OP_LIST(V) \
V(Int32Constant) \
V(Int64Constant) \
V(TaggedIndexConstant) \
V(Float32Constant) \
V(Float64Constant) \
V(ExternalConstant) \
V(NumberConstant) \
V(PointerConstant) \
V(HeapConstant) \
V(CompressedHeapConstant) \
V(RelocatableInt32Constant) \
V(RelocatableInt64Constant)
#define INNER_OP_LIST(V) \
V(Select) \
@ -80,9 +74,7 @@
V(Retain) \
V(MapGuard) \
V(FoldConstant) \
V(TypeGuard) \
V(EnterMachineGraph) \
V(ExitMachineGraph)
V(TypeGuard)
#define COMMON_OP_LIST(V) \
CONSTANT_OP_LIST(V) \
@ -555,13 +547,6 @@
SIMPLIFIED_OTHER_OP_LIST(V)
// Opcodes for Machine-level operators.
#define MACHINE_UNOP_32_LIST(V) \
V(Word32Clz) \
V(Word32Ctz) \
V(Int32AbsWithOverflow) \
V(Word32ReverseBits) \
V(Word32ReverseBytes)
#define MACHINE_COMPARE_BINOP_LIST(V) \
V(Word32Equal) \
V(Word64Equal) \
@ -580,6 +565,13 @@
V(Float64LessThan) \
V(Float64LessThanOrEqual)
#define MACHINE_UNOP_32_LIST(V) \
V(Word32Clz) \
V(Word32Ctz) \
V(Int32AbsWithOverflow) \
V(Word32ReverseBits) \
V(Word32ReverseBytes)
#define MACHINE_BINOP_32_LIST(V) \
V(Word32And) \
V(Word32Or) \
@ -1094,24 +1086,6 @@ class V8_EXPORT_PRIVATE IrOpcode {
return kJSEqual <= value && value <= kJSDebugger;
}
// Returns true if opcode for machine operator.
static bool IsMachineOpcode(Value value) {
return kWord32Clz <= value && value <= kTraceInstruction;
}
// Returns true iff opcode is a machine-level constant.
static bool IsMachineConstantOpcode(Value value) {
switch (value) {
#define CASE(name) \
case k##name: \
return true;
MACHINE_LEVEL_CONSTANT_OP_LIST(CASE)
#undef CASE
default:
return false;
}
}
// Returns true if opcode for constant operator.
static bool IsConstantOpcode(Value value) {
#define CASE(Name) \

View File

@ -1130,11 +1130,6 @@ SPECULATIVE_NUMBER_BINOP(NumberShiftRight)
SPECULATIVE_NUMBER_BINOP(NumberShiftRightLogical)
#undef SPECULATIVE_NUMBER_BINOP
#define MACHINE_BINOP(Name) \
Type OperationTyper::Name(Type, Type) { return Type::Machine(); }
TYPER_SUPPORTED_MACHINE_BINOP_LIST(MACHINE_BINOP)
#undef MACHINE_BINOP
Type OperationTyper::BigIntAdd(Type lhs, Type rhs) {
DCHECK(lhs.Is(Type::BigInt()));
DCHECK(rhs.Is(Type::BigInt()));

View File

@ -9,26 +9,6 @@
#include "src/compiler/opcodes.h"
#include "src/compiler/types.h"
#define TYPER_SUPPORTED_MACHINE_BINOP_LIST(V) \
V(Int32Add) \
V(Int64Add) \
V(Int32Sub) \
V(Int64Sub) \
V(Load) \
V(Uint32Div) \
V(Uint64Div) \
V(Uint32LessThan) \
V(Uint32LessThanOrEqual) \
V(Uint64LessThanOrEqual) \
V(Word32And) \
V(Word32Equal) \
V(Word32Or) \
V(Word32Shl) \
V(Word32Shr) \
V(Word64And) \
V(Word64Shl) \
V(Word64Shr)
namespace v8 {
namespace internal {
@ -74,7 +54,6 @@ class V8_EXPORT_PRIVATE OperationTyper {
SIMPLIFIED_BIGINT_BINOP_LIST(DECLARE_METHOD)
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD)
SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_METHOD)
TYPER_SUPPORTED_MACHINE_BINOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
// Comparison operators.

View File

@ -1986,8 +1986,7 @@ struct LateOptimizationPhase {
CommonOperatorReducer common_reducer(
&graph_reducer, data->graph(), data->broker(), data->common(),
data->machine(), temp_zone, BranchSemantics::kMachine);
JSGraphAssembler graph_assembler(data->jsgraph(), temp_zone,
BranchSemantics::kMachine);
JSGraphAssembler graph_assembler(data->jsgraph(), temp_zone);
SelectLowering select_lowering(&graph_assembler, data->graph());
AddReducer(data, &graph_reducer, &escape_analysis);
AddReducer(data, &graph_reducer, &branch_condition_elimination);

View File

@ -160,9 +160,6 @@ RepresentationChanger::RepresentationChanger(
Node* RepresentationChanger::GetRepresentationFor(
Node* node, MachineRepresentation output_rep, Type output_type,
Node* use_node, UseInfo use_info) {
// We are currently not inserting conversions in machine graphs.
// We might add that, though.
DCHECK_IMPLIES(!output_type.IsNone(), !output_type.Is(Type::Machine()));
if (output_rep == MachineRepresentation::kNone && !output_type.IsNone()) {
// The output representation should be set if the type is inhabited (i.e.,
// if the value is possible).
@ -490,25 +487,28 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTaggedPointer);
}
} else if (CanBeTaggedSigned(output_rep) &&
use_info.type_check() == TypeCheckKind::kHeapObject) {
if (!output_type.Maybe(Type::SignedSmall())) {
return node;
}
// TODO(turbofan): Consider adding a Bailout operator that just deopts
// for TaggedSigned output representation.
op = simplified()->CheckedTaggedToTaggedPointer(use_info.feedback());
} else if (IsAnyTagged(output_rep)) {
if (use_info.type_check() == TypeCheckKind::kBigInt) {
if (output_type.Is(Type::BigInt())) {
DCHECK_NE(output_rep, MachineRepresentation::kTaggedSigned);
return node;
}
op = simplified()->CheckBigInt(use_info.feedback());
} else if (use_info.type_check() == TypeCheckKind::kBigInt64) {
if (output_type.Is(Type::SignedBigInt64())) {
DCHECK_NE(output_rep, MachineRepresentation::kTaggedSigned);
return node;
}
op = simplified()->CheckBigInt64(use_info.feedback());
} else if (output_rep == MachineRepresentation::kTaggedPointer ||
!output_type.Maybe(Type::SignedSmall())) {
DCHECK_NE(output_rep, MachineRepresentation::kTaggedSigned);
return node;
} else {
op = simplified()->CheckedTaggedToTaggedPointer(use_info.feedback());
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTaggedPointer);
}
} else {
return TypeError(node, output_rep, output_type,
@ -1039,11 +1039,9 @@ Node* RepresentationChanger::GetBitRepresentationFor(
case IrOpcode::kHeapConstant: {
HeapObjectMatcher m(node);
if (m.Is(factory()->false_value())) {
return InsertTypeOverrideForVerifier(Type::Boolean(),
jsgraph()->Int32Constant(0));
return jsgraph()->Int32Constant(0);
} else if (m.Is(factory()->true_value())) {
return InsertTypeOverrideForVerifier(Type::Boolean(),
jsgraph()->Int32Constant(1));
return jsgraph()->Int32Constant(1);
}
break;
}

View File

@ -4,8 +4,6 @@
#include "src/compiler/simplified-lowering-verifier.h"
#include "src/compiler/backend/instruction-codes.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/operation-typer.h"
#include "src/compiler/type-cache.h"
@ -24,8 +22,7 @@ Truncation LeastGeneralTruncation(const Truncation& t1, const Truncation& t2,
return LeastGeneralTruncation(LeastGeneralTruncation(t1, t2), t3);
}
bool IsNonTruncatingMachineTypeFor(const MachineType& mt, const Type& type,
Zone* graph_zone) {
bool IsNonTruncatingMachineTypeFor(const MachineType& mt, const Type& type) {
if (type.IsNone()) return true;
// TODO(nicohartmann@): Add more cases here.
if (type.Is(Type::BigInt())) {
@ -40,7 +37,7 @@ bool IsNonTruncatingMachineTypeFor(const MachineType& mt, const Type& type,
case MachineRepresentation::kBit:
CHECK(mt.semantic() == MachineSemantic::kBool ||
mt.semantic() == MachineSemantic::kAny);
return type.Is(Type::Boolean()) || type.Is(Type::Range(0, 1, graph_zone));
return type.Is(Type::Boolean());
default:
return true;
}
@ -78,19 +75,6 @@ void SimplifiedLoweringVerifier::CheckAndSet(Node* node, const Type& type,
SetTruncation(node, GeneralizeTruncation(trunc, type));
}
void SimplifiedLoweringVerifier::ReportInvalidTypeCombination(
Node* node, const std::vector<Type>& types) {
std::ostringstream types_str;
for (size_t i = 0; i < types.size(); ++i) {
if (i != 0) types_str << ", ";
types[i].PrintTo(types_str);
}
FATAL(
"SimplifiedLoweringVerifierError: invalid combination of input types %s "
" for node #%d:%s",
types_str.str().c_str(), node->id(), node->op()->mnemonic());
}
bool IsModuloTruncation(const Truncation& truncation) {
return truncation.IsUsedAsWord32() || truncation.IsUsedAsWord64() ||
Truncation::Any().IsLessGeneralThan(truncation);
@ -150,21 +134,7 @@ void SimplifiedLoweringVerifier::VisitNode(Node* node,
case IrOpcode::kFrameState:
case IrOpcode::kJSStackCheck:
break;
case IrOpcode::kInt32Constant: {
// NOTE: Constants require special handling as they are shared between
// machine graphs and non-machine graphs lowered during SL. The former
// might have assigned Type::Machine() to the constant, but to be able
// to provide a different type for uses of constants that don't come
// from machine graphs, the machine-uses of Int32Constants have been
// put behind additional SLVerifierHints to provide the required
// Type::Machine() to them, such that we can treat constants here as
// having JS types to satisfy their non-machine uses.
int32_t value = OpParameter<int32_t>(node->op());
Type type = Type::Constant(value, graph_zone());
SetType(node, type);
SetTruncation(node, GeneralizeTruncation(Truncation::Word32(), type));
break;
}
case IrOpcode::kInt32Constant:
case IrOpcode::kInt64Constant:
case IrOpcode::kFloat64Constant: {
// Constants might be untyped, because they are cached in the graph and
@ -213,22 +183,8 @@ void SimplifiedLoweringVerifier::VisitNode(Node* node,
break;
}
case IrOpcode::kInt32Add: {
Type left_type = InputType(node, 0);
Type right_type = InputType(node, 1);
Type output_type;
if (left_type.IsNone() && right_type.IsNone()) {
output_type = Type::None();
} else if (left_type.Is(Type::Machine()) &&
right_type.Is(Type::Machine())) {
output_type = Type::Machine();
} else if (left_type.Is(Type::NumberOrOddball()) &&
right_type.Is(Type::NumberOrOddball())) {
left_type = op_typer.ToNumber(left_type);
right_type = op_typer.ToNumber(right_type);
output_type = op_typer.NumberAdd(left_type, right_type);
} else {
ReportInvalidTypeCombination(node, {left_type, right_type});
}
Type output_type =
op_typer.NumberAdd(InputType(node, 0), InputType(node, 1));
Truncation output_trunc = LeastGeneralTruncation(InputTruncation(node, 0),
InputTruncation(node, 1),
Truncation::Word32());
@ -237,22 +193,8 @@ void SimplifiedLoweringVerifier::VisitNode(Node* node,
break;
}
case IrOpcode::kInt32Sub: {
Type left_type = InputType(node, 0);
Type right_type = InputType(node, 1);
Type output_type;
if (left_type.IsNone() && right_type.IsNone()) {
output_type = Type::None();
} else if (left_type.Is(Type::Machine()) &&
right_type.Is(Type::Machine())) {
output_type = Type::Machine();
} else if (left_type.Is(Type::NumberOrOddball()) &&
right_type.Is(Type::NumberOrOddball())) {
left_type = op_typer.ToNumber(left_type);
right_type = op_typer.ToNumber(right_type);
output_type = op_typer.NumberSubtract(left_type, right_type);
} else {
ReportInvalidTypeCombination(node, {left_type, right_type});
}
Type output_type =
op_typer.NumberSubtract(InputType(node, 0), InputType(node, 1));
Truncation output_trunc = LeastGeneralTruncation(InputTruncation(node, 0),
InputTruncation(node, 1),
Truncation::Word32());
@ -280,16 +222,9 @@ void SimplifiedLoweringVerifier::VisitNode(Node* node,
case IrOpcode::kInt64Add: {
Type left_type = InputType(node, 0);
Type right_type = InputType(node, 1);
Type output_type;
if (left_type.IsNone() && right_type.IsNone()) {
// None x None -> None
output_type = Type::None();
} else if (left_type.Is(Type::Machine()) &&
right_type.Is(Type::Machine())) {
// Machine x Machine -> Machine
output_type = Type::Machine();
} else if (left_type.Is(Type::BigInt()) &&
right_type.Is(Type::BigInt())) {
if (left_type.Is(Type::BigInt()) && right_type.Is(Type::BigInt())) {
// BigInt x BigInt -> BigInt
output_type = op_typer.BigIntAdd(left_type, right_type);
} else if (left_type.Is(Type::Number()) &&
@ -298,38 +233,17 @@ void SimplifiedLoweringVerifier::VisitNode(Node* node,
output_type = op_typer.NumberAdd(left_type, right_type);
} else {
// Invalid type combination.
ReportInvalidTypeCombination(node, {left_type, right_type});
}
Truncation output_trunc = LeastGeneralTruncation(InputTruncation(node, 0),
InputTruncation(node, 1),
Truncation::Word64());
CHECK(IsModuloTruncation(output_trunc));
CheckAndSet(node, output_type, output_trunc);
break;
}
case IrOpcode::kInt64Sub: {
Type left_type = InputType(node, 0);
Type right_type = InputType(node, 1);
Type output_type;
if (left_type.IsNone() && right_type.IsNone()) {
// None x None -> None
output_type = Type::None();
} else if (left_type.Is(Type::Machine()) &&
right_type.Is(Type::Machine())) {
// Machine x Machine -> Machine
output_type = Type::Machine();
} else if (left_type.Is(Type::BigInt()) &&
right_type.Is(Type::BigInt())) {
// BigInt x BigInt -> BigInt
output_type = op_typer.BigIntSubtract(left_type, right_type);
} else if (left_type.Is(Type::Number()) &&
right_type.Is(Type::Number())) {
// Number x Number -> Number
output_type = op_typer.NumberSubtract(left_type, right_type);
} else {
// Invalid type combination.
ReportInvalidTypeCombination(node, {left_type, right_type});
std::ostringstream left_str, right_str;
left_type.PrintTo(left_str);
right_type.PrintTo(right_str);
FATAL(
"SimplifiedLoweringVerifierError: invalid combination of input "
"types "
"%s and %s for node #%d:%s",
left_str.str().c_str(), right_str.str().c_str(), node->id(),
node->op()->mnemonic());
}
Truncation output_trunc = LeastGeneralTruncation(InputTruncation(node, 0),
InputTruncation(node, 1),
Truncation::Word64());
@ -412,10 +326,8 @@ void SimplifiedLoweringVerifier::VisitNode(Node* node,
break;
}
case IrOpcode::kBranch: {
CHECK_EQ(BranchParametersOf(node->op()).semantics(),
BranchSemantics::kMachine);
Type input_type = InputType(node, 0);
CHECK(input_type.Is(Type::Boolean()) || input_type.Is(Type::Machine()));
CHECK(InputType(node, 0).Is(Type::Boolean()));
CHECK_EQ(InputTruncation(node, 0), Truncation::Any());
break;
}
case IrOpcode::kTypedStateValues: {
@ -424,7 +336,7 @@ void SimplifiedLoweringVerifier::VisitNode(Node* node,
// Inputs must not be truncated.
CHECK_EQ(InputTruncation(node, i), Truncation::Any());
CHECK(IsNonTruncatingMachineTypeFor(machine_types->at(i),
InputType(node, i), graph_zone()));
InputType(node, i)));
}
break;
}
@ -433,11 +345,6 @@ void SimplifiedLoweringVerifier::VisitNode(Node* node,
SetTruncation(node, Truncation::Any());
break;
}
case IrOpcode::kEnterMachineGraph:
case IrOpcode::kExitMachineGraph: {
// Eliminated during lowering.
UNREACHABLE();
}
#define CASE(code, ...) case IrOpcode::k##code:
// Control operators
@ -581,6 +488,7 @@ void SimplifiedLoweringVerifier::VisitNode(Node* node,
CASE(Word64RolLowerable)
CASE(Word64RorLowerable)
CASE(Int64AddWithOverflow)
CASE(Int64Sub)
CASE(Int64SubWithOverflow)
CASE(Int64Mul)
CASE(Int64MulHigh)

View File

@ -5,8 +5,6 @@
#ifndef V8_COMPILER_SIMPLIFIED_LOWERING_VERIFIER_H_
#define V8_COMPILER_SIMPLIFIED_LOWERING_VERIFIER_H_
#include "src/base/container-utils.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/representation-change.h"
namespace v8 {
@ -23,11 +21,7 @@ class SimplifiedLoweringVerifier final {
};
SimplifiedLoweringVerifier(Zone* zone, Graph* graph)
: hints_(zone),
machine_uses_of_constants_(zone),
data_(zone),
graph_(graph),
zone_(zone) {}
: hints_(zone), data_(zone), graph_(graph) {}
void VisitNode(Node* node, OperationTyper& op_typer);
@ -36,33 +30,10 @@ class SimplifiedLoweringVerifier final {
hints_.push_back(node);
}
const ZoneVector<Node*>& inserted_hints() const { return hints_; }
void RecordMachineUsesOfConstant(Node* constant, Node::Uses uses) {
DCHECK(IrOpcode::IsMachineConstantOpcode(constant->opcode()));
auto it = machine_uses_of_constants_.find(constant);
if (it == machine_uses_of_constants_.end()) {
it =
machine_uses_of_constants_.emplace(constant, ZoneVector<Node*>(zone_))
.first;
}
base::vector_append(it->second, uses);
}
const ZoneUnorderedMap<Node*, ZoneVector<Node*>>& machine_uses_of_constants()
const {
return machine_uses_of_constants_;
}
base::Optional<Type> GetType(Node* node) const {
if (NodeProperties::IsTyped(node)) {
Type type = NodeProperties::GetType(node);
// We do not use the static type for constants, even if we have one,
// because those are cached in the graph and shared between machine
// and non-machine subgraphs. The former might have assigned
// Type::Machine() to them.
if (IrOpcode::IsMachineConstantOpcode(node->opcode())) {
DCHECK(type.Is(Type::Machine()));
} else {
return type;
}
return NodeProperties::GetType(node);
}
// For nodes that have not been typed before SL, we use the type that has
// been inferred by the verifier.
@ -89,7 +60,16 @@ class SimplifiedLoweringVerifier final {
Type InputType(Node* node, int input_index) const {
// TODO(nicohartmann): Check that inputs are typed, once all operators are
// supported.
auto type_opt = GetType(node->InputAt(input_index));
Node* input = node->InputAt(input_index);
if (NodeProperties::IsTyped(input)) {
return NodeProperties::GetType(input);
}
// For nodes that have not been typed before SL, we use the type that has
// been inferred by the verifier.
base::Optional<Type> type_opt;
if (input->id() < data_.size()) {
type_opt = data_[input->id()].type;
}
return type_opt.has_value() ? *type_opt : Type::None();
}
@ -111,7 +91,6 @@ class SimplifiedLoweringVerifier final {
void CheckType(Node* node, const Type& type);
void CheckAndSet(Node* node, const Type& type, const Truncation& trunc);
void ReportInvalidTypeCombination(Node* node, const std::vector<Type>& types);
// Generalize to a less strict truncation in the context of a given type. For
// example, a Truncation::kWord32[kIdentifyZeros] does not have any effect on
@ -125,10 +104,8 @@ class SimplifiedLoweringVerifier final {
Zone* graph_zone() const { return graph_->zone(); }
ZoneVector<Node*> hints_;
ZoneUnorderedMap<Node*, ZoneVector<Node*>> machine_uses_of_constants_;
ZoneVector<PerNodeData> data_;
Graph* graph_;
Zone* zone_;
};
} // namespace compiler

View File

@ -396,11 +396,6 @@ class RepresentationSelector {
bool UpdateFeedbackType(Node* node) {
if (node->op()->ValueOutputCount() == 0) return false;
if ((IrOpcode::IsMachineOpcode(node->opcode()) ||
IrOpcode::IsMachineConstantOpcode(node->opcode())) &&
node->opcode() != IrOpcode::kLoadFramePointer) {
DCHECK(NodeProperties::GetType(node).Is(Type::Machine()));
}
// For any non-phi node just wait until we get all inputs typed. We only
// allow untyped inputs for phi nodes because phis are the only places
@ -600,6 +595,7 @@ class RepresentationSelector {
while (!stack.empty()) {
NodeState& current = stack.top();
Node* node = current.node;
// If there is an unvisited input, push it and continue with that node.
bool pushed_unvisited = false;
while (current.input_index < node->InputCount()) {
@ -754,19 +750,6 @@ class RepresentationSelector {
TRACE("--{Verify Phase}--\n");
// Patch pending type overrides.
for (auto [constant, uses] : verifier_->machine_uses_of_constants()) {
Node* typed_constant =
InsertTypeOverrideForVerifier(Type::Machine(), constant);
for (auto use : uses) {
for (int i = 0; i < use->InputCount(); ++i) {
if (use->InputAt(i) == constant) {
use->ReplaceInput(i, typed_constant);
}
}
}
}
// Generate a new traversal containing all the new nodes created during
// lowering.
GenerateTraversal();
@ -791,9 +774,7 @@ class RepresentationSelector {
}
// Verify all nodes.
for (Node* node : traversal_nodes_) {
verifier_->VisitNode(node, op_typer_);
}
for (Node* node : traversal_nodes_) verifier_->VisitNode(node, op_typer_);
// Print graph.
if (info != nullptr && info->trace_turbo_json()) {
@ -1084,7 +1065,7 @@ class RepresentationSelector {
void VisitNoop(Node* node, Truncation truncation) {
if (truncation.IsUnused()) return VisitUnused<T>(node);
MachineRepresentation representation =
GetOutputInfoForPhi(TypeOf(node), truncation);
GetOutputInfoForPhi(node, TypeOf(node), truncation);
VisitUnop<T>(node, UseInfo(representation, truncation), representation);
if (lower<T>()) DeferReplacement(node, node->InputAt(0));
}
@ -1160,7 +1141,11 @@ class RepresentationSelector {
}
// Infer representation for phi-like nodes.
MachineRepresentation GetOutputInfoForPhi(Type type, Truncation use) {
// The {node} parameter is only used to decide on the int64 representation.
// Once the type system supports an external pointer type, the {node}
// parameter can be removed.
MachineRepresentation GetOutputInfoForPhi(Node* node, Type type,
Truncation use) {
// Compute the representation.
if (type.Is(Type::None())) {
return MachineRepresentation::kNone;
@ -1198,7 +1183,7 @@ class RepresentationSelector {
ProcessInput<T>(node, 0, UseInfo::Bool());
MachineRepresentation output =
GetOutputInfoForPhi(TypeOf(node), truncation);
GetOutputInfoForPhi(node, TypeOf(node), truncation);
SetOutput<T>(node, output);
if (lower<T>()) {
@ -1219,13 +1204,8 @@ class RepresentationSelector {
template <Phase T>
void VisitPhi(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
// If we already have a non-tagged representation set in the Phi node, it
// does come from subgraphs using machine operators we introduced early in
// the pipeline. In this case, we just keep the representation.
MachineRepresentation output = PhiRepresentationOf(node->op());
if (output == MachineRepresentation::kTagged) {
output = GetOutputInfoForPhi(TypeOf(node), truncation);
}
MachineRepresentation output =
GetOutputInfoForPhi(node, TypeOf(node), truncation);
// Only set the output representation if not running with type
// feedback. (Feedback typing will set the representation.)
SetOutput<T>(node, output);
@ -1252,16 +1232,12 @@ class RepresentationSelector {
if (input_type.Is(type)) {
VisitUnop<T>(node, UseInfo::None(), MachineRepresentation::kBit);
if (lower<T>()) {
DeferReplacement(
node, InsertTypeOverrideForVerifier(
Type::Boolean(), lowering->jsgraph()->Int32Constant(1)));
DeferReplacement(node, lowering->jsgraph()->Int32Constant(1));
}
} else {
VisitUnop<T>(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
if (lower<T>() && !input_type.Maybe(type)) {
DeferReplacement(
node, InsertTypeOverrideForVerifier(
Type::Boolean(), lowering->jsgraph()->Int32Constant(0)));
DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
}
}
}
@ -2164,18 +2140,7 @@ class RepresentationSelector {
->GetParameterType(ParameterIndexOf(node->op()))
.representation());
case IrOpcode::kInt32Constant:
DCHECK_EQ(0, node->InputCount());
SetOutput<T>(node, MachineRepresentation::kWord32);
DCHECK(NodeProperties::GetType(node).Is(Type::Machine()));
if (verification_enabled()) {
// During lowering, SimplifiedLowering generates Int32Constants which
// need to be treated differently by the verifier than the
// Int32Constants introduced explicitly in machine graphs. To be able
// to distinguish them, we record those that are being visited here
// because they were generated before SimplifiedLowering.
verifier_->RecordMachineUsesOfConstant(node, node->uses());
}
return;
return VisitLeaf<T>(node, MachineRepresentation::kWord32);
case IrOpcode::kInt64Constant:
return VisitLeaf<T>(node, MachineRepresentation::kWord64);
case IrOpcode::kExternalConstant:
@ -2209,19 +2174,8 @@ class RepresentationSelector {
}
case IrOpcode::kBranch: {
const auto& p = BranchParametersOf(node->op());
if (p.semantics() == BranchSemantics::kMachine) {
// If this is a machine branch, the condition is a machine operator,
// so we enter machine branch here.
ProcessInput<T>(node, 0, UseInfo::Any());
} else {
DCHECK(TypeOf(node->InputAt(0)).Is(Type::Boolean()));
ProcessInput<T>(node, 0, UseInfo::Bool());
if (lower<T>()) {
ChangeOp(node,
common()->Branch(p.hint(), BranchSemantics::kMachine));
}
}
DCHECK(TypeOf(node->InputAt(0)).Is(Type::Boolean()));
ProcessInput<T>(node, 0, UseInfo::Bool());
EnqueueInput<T>(node, NodeProperties::FirstControlIndex(node));
return;
}
@ -3801,7 +3755,7 @@ class RepresentationSelector {
if (InputIs(node, Type::Boolean())) {
VisitUnop<T>(node, UseInfo::Bool(), MachineRepresentation::kWord32);
if (lower<T>()) {
DeferReplacement(node, node->InputAt(0));
ChangeToSemanticsHintForVerifier(node, node->op());
}
} else if (InputIs(node, Type::String())) {
VisitUnop<T>(node, UseInfo::AnyTagged(),
@ -3814,7 +3768,7 @@ class RepresentationSelector {
VisitUnop<T>(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
if (lower<T>()) {
DeferReplacement(node, node->InputAt(0));
ChangeToSemanticsHintForVerifier(node, node->op());
}
} else {
VisitUnop<T>(node, UseInfo::AnyTagged(),
@ -3828,7 +3782,7 @@ class RepresentationSelector {
VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
if (lower<T>()) {
DeferReplacement(node, node->InputAt(0));
ChangeToSemanticsHintForVerifier(node, node->op());
}
} else {
VisitUnop<T>(node, UseInfo::AnyTagged(),
@ -3892,16 +3846,12 @@ class RepresentationSelector {
if (input_type.Is(type_cache_->kSafeInteger)) {
VisitUnop<T>(node, UseInfo::None(), MachineRepresentation::kBit);
if (lower<T>()) {
DeferReplacement(node, InsertTypeOverrideForVerifier(
Type::Boolean(),
lowering->jsgraph()->Int32Constant(1)));
DeferReplacement(node, lowering->jsgraph()->Int32Constant(1));
}
} else if (!input_type.Maybe(Type::Number())) {
VisitUnop<T>(node, UseInfo::Any(), MachineRepresentation::kBit);
if (lower<T>()) {
DeferReplacement(node, InsertTypeOverrideForVerifier(
Type::Boolean(),
lowering->jsgraph()->Int32Constant(0)));
DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
}
} else if (input_type.Is(Type::Number())) {
VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
@ -3924,16 +3874,12 @@ class RepresentationSelector {
if (input_type.Is(type_cache_->kSafeInteger)) {
VisitUnop<T>(node, UseInfo::None(), MachineRepresentation::kBit);
if (lower<T>()) {
DeferReplacement(node, InsertTypeOverrideForVerifier(
Type::Boolean(),
lowering->jsgraph()->Int32Constant(1)));
DeferReplacement(node, lowering->jsgraph()->Int32Constant(1));
}
} else if (!input_type.Maybe(Type::Number())) {
VisitUnop<T>(node, UseInfo::Any(), MachineRepresentation::kBit);
if (lower<T>()) {
DeferReplacement(node, InsertTypeOverrideForVerifier(
Type::Boolean(),
lowering->jsgraph()->Int32Constant(0)));
DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
}
} else if (input_type.Is(Type::Number())) {
VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
@ -3954,16 +3900,12 @@ class RepresentationSelector {
if (input_type.Is(type_cache_->kSafeInteger)) {
VisitUnop<T>(node, UseInfo::None(), MachineRepresentation::kBit);
if (lower<T>()) {
DeferReplacement(node, InsertTypeOverrideForVerifier(
Type::Boolean(),
lowering->jsgraph()->Int32Constant(1)));
DeferReplacement(node, lowering->jsgraph()->Int32Constant(1));
}
} else if (!input_type.Maybe(Type::Number())) {
VisitUnop<T>(node, UseInfo::Any(), MachineRepresentation::kBit);
if (lower<T>()) {
DeferReplacement(node, InsertTypeOverrideForVerifier(
Type::Boolean(),
lowering->jsgraph()->Int32Constant(0)));
DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
}
} else if (input_type.Is(Type::Number())) {
VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
@ -3986,16 +3928,12 @@ class RepresentationSelector {
if (input_type.Is(Type::MinusZero())) {
VisitUnop<T>(node, UseInfo::None(), MachineRepresentation::kBit);
if (lower<T>()) {
DeferReplacement(node, InsertTypeOverrideForVerifier(
Type::Boolean(),
lowering->jsgraph()->Int32Constant(1)));
DeferReplacement(node, lowering->jsgraph()->Int32Constant(1));
}
} else if (!input_type.Maybe(Type::MinusZero())) {
VisitUnop<T>(node, UseInfo::Any(), MachineRepresentation::kBit);
if (lower<T>()) {
DeferReplacement(node, InsertTypeOverrideForVerifier(
Type::Boolean(),
lowering->jsgraph()->Int32Constant(0)));
DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
}
} else if (input_type.Is(Type::Number())) {
VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
@ -4013,16 +3951,12 @@ class RepresentationSelector {
if (input_type.Is(Type::NaN())) {
VisitUnop<T>(node, UseInfo::None(), MachineRepresentation::kBit);
if (lower<T>()) {
DeferReplacement(node, InsertTypeOverrideForVerifier(
Type::Boolean(),
lowering->jsgraph()->Int32Constant(1)));
DeferReplacement(node, lowering->jsgraph()->Int32Constant(1));
}
} else if (!input_type.Maybe(Type::NaN())) {
VisitUnop<T>(node, UseInfo::Any(), MachineRepresentation::kBit);
if (lower<T>()) {
DeferReplacement(node, InsertTypeOverrideForVerifier(
Type::Boolean(),
lowering->jsgraph()->Int32Constant(0)));
DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
}
} else if (input_type.Is(Type::Number())) {
VisitUnop<T>(node, UseInfo::TruncatingFloat64(),
@ -4207,7 +4141,7 @@ class RepresentationSelector {
// for the sigma's type.
Type type = TypeOf(node);
MachineRepresentation representation =
GetOutputInfoForPhi(type, truncation);
GetOutputInfoForPhi(node, type, truncation);
// Here we pretend that the input has the sigma's type for the
// conversion.
@ -4340,65 +4274,6 @@ class RepresentationSelector {
}
return;
}
case IrOpcode::kDebugBreak:
return;
// Nodes from machine graphs.
case IrOpcode::kEnterMachineGraph: {
DCHECK_EQ(1, node->op()->ValueInputCount());
UseInfo use_info = OpParameter<UseInfo>(node->op());
ProcessInput<T>(node, 0, use_info);
SetOutput<T>(node, use_info.representation());
if (lower<T>()) {
DeferReplacement(node, InsertTypeOverrideForVerifier(
Type::Machine(), node->InputAt(0)));
}
return;
}
case IrOpcode::kExitMachineGraph: {
DCHECK_EQ(1, node->op()->ValueInputCount());
ProcessInput<T>(node, 0, UseInfo::Any());
const auto& p = ExitMachineGraphParametersOf(node->op());
SetOutput<T>(node, p.output_representation(), p.output_type());
if (lower<T>()) {
DeferReplacement(node, InsertTypeOverrideForVerifier(
p.output_type(), node->InputAt(0)));
}
return;
}
case IrOpcode::kInt32Add:
case IrOpcode::kInt32Sub:
case IrOpcode::kUint32LessThan:
case IrOpcode::kUint32LessThanOrEqual:
case IrOpcode::kUint64LessThanOrEqual:
case IrOpcode::kUint32Div:
case IrOpcode::kWord32And:
case IrOpcode::kWord32Equal:
case IrOpcode::kWord32Or:
case IrOpcode::kWord32Shl:
case IrOpcode::kWord32Shr:
for (int i = 0; i < node->InputCount(); ++i) {
ProcessInput<T>(node, i, UseInfo::Any());
}
SetOutput<T>(node, MachineRepresentation::kWord32);
return;
case IrOpcode::kInt64Add:
case IrOpcode::kInt64Sub:
case IrOpcode::kUint64Div:
case IrOpcode::kWord64And:
case IrOpcode::kWord64Shl:
case IrOpcode::kWord64Shr:
for (int i = 0; i < node->InputCount(); ++i) {
ProcessInput<T>(node, i, UseInfo::Any());
}
SetOutput<T>(node, MachineRepresentation::kWord64);
return;
case IrOpcode::kLoad:
for (int i = 0; i < node->InputCount(); ++i) {
ProcessInput<T>(node, i, UseInfo::Any());
}
SetOutput<T>(node, LoadRepresentationOf(node->op()).representation());
return;
default:
FATAL(
@ -4444,6 +4319,18 @@ class RepresentationSelector {
return node;
}
void ChangeToSemanticsHintForVerifier(Node* node, const Operator* semantics) {
DCHECK_EQ(node->op()->ValueInputCount(), 1);
DCHECK_EQ(node->op()->EffectInputCount(), 0);
DCHECK_EQ(node->op()->ControlInputCount(), 0);
if (verification_enabled()) {
ChangeOp(node, common()->SLVerifierHint(semantics, base::nullopt));
verifier_->RecordHint(node);
} else {
DeferReplacement(node, node->InputAt(0));
}
}
private:
void ChangeOp(Node* node, const Operator* new_op) {
compiler::NodeProperties::ChangeOp(node, new_op);
@ -4696,9 +4583,8 @@ void SimplifiedLowering::DoJSToNumberOrNumericTruncatesToFloat64(
Node* control = node->InputAt(4);
Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), value);
Node* branch0 = graph()->NewNode(
common()->Branch(BranchHint::kTrue, BranchSemantics::kMachine), check0,
control);
Node* branch0 =
graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
Node* etrue0 = effect;
@ -4736,9 +4622,7 @@ void SimplifiedLowering::DoJSToNumberOrNumericTruncatesToFloat64(
}
Node* check1 = graph()->NewNode(simplified()->ObjectIsSmi(), vfalse0);
Node* branch1 = graph()->NewNode(
common()->Branch(BranchHint::kNone, BranchSemantics::kMachine), check1,
if_false0);
Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
Node* etrue1 = efalse0;
@ -4801,9 +4685,8 @@ void SimplifiedLowering::DoJSToNumberOrNumericTruncatesToWord32(
Node* control = node->InputAt(4);
Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), value);
Node* branch0 = graph()->NewNode(
common()->Branch(BranchHint::kTrue, BranchSemantics::kMachine), check0,
control);
Node* branch0 =
graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
Node* etrue0 = effect;
@ -4838,9 +4721,7 @@ void SimplifiedLowering::DoJSToNumberOrNumericTruncatesToWord32(
}
Node* check1 = graph()->NewNode(simplified()->ObjectIsSmi(), vfalse0);
Node* branch1 = graph()->NewNode(
common()->Branch(BranchHint::kNone, BranchSemantics::kMachine), check1,
if_false0);
Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
Node* etrue1 = efalse0;
@ -4968,9 +4849,8 @@ Node* SimplifiedLowering::Int32Div(Node* const node) {
common()->Phi(MachineRepresentation::kWord32, 2);
Node* check0 = graph()->NewNode(machine()->Int32LessThan(), zero, rhs);
Node* branch0 = graph()->NewNode(
common()->Branch(BranchHint::kTrue, BranchSemantics::kMachine), check0,
graph()->start());
Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), check0,
graph()->start());
Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
Node* true0 = graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_true0);
@ -4979,9 +4859,7 @@ Node* SimplifiedLowering::Int32Div(Node* const node) {
Node* false0;
{
Node* check1 = graph()->NewNode(machine()->Int32LessThan(), rhs, minus_one);
Node* branch1 = graph()->NewNode(
common()->Branch(BranchHint::kNone, BranchSemantics::kMachine), check1,
if_false0);
Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
Node* true1 = graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_true1);
@ -4990,9 +4868,7 @@ Node* SimplifiedLowering::Int32Div(Node* const node) {
Node* false1;
{
Node* check2 = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
Node* branch2 = graph()->NewNode(
common()->Branch(BranchHint::kNone, BranchSemantics::kMachine),
check2, if_false1);
Node* branch2 = graph()->NewNode(common()->Branch(), check2, if_false1);
Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
Node* true2 = zero;
@ -5050,9 +4926,8 @@ Node* SimplifiedLowering::Int32Mod(Node* const node) {
common()->Phi(MachineRepresentation::kWord32, 2);
Node* check0 = graph()->NewNode(machine()->Int32LessThan(), zero, rhs);
Node* branch0 = graph()->NewNode(
common()->Branch(BranchHint::kTrue, BranchSemantics::kMachine), check0,
graph()->start());
Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), check0,
graph()->start());
Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
Node* true0;
@ -5060,9 +4935,7 @@ Node* SimplifiedLowering::Int32Mod(Node* const node) {
Node* msk = graph()->NewNode(machine()->Int32Add(), rhs, minus_one);
Node* check1 = graph()->NewNode(machine()->Word32And(), rhs, msk);
Node* branch1 = graph()->NewNode(
common()->Branch(BranchHint::kNone, BranchSemantics::kMachine), check1,
if_true0);
Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
Node* true1 = graph()->NewNode(machine()->Int32Mod(), lhs, rhs, if_true1);
@ -5071,9 +4944,8 @@ Node* SimplifiedLowering::Int32Mod(Node* const node) {
Node* false1;
{
Node* check2 = graph()->NewNode(machine()->Int32LessThan(), lhs, zero);
Node* branch2 = graph()->NewNode(
common()->Branch(BranchHint::kFalse, BranchSemantics::kMachine),
check2, if_false1);
Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
check2, if_false1);
Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
Node* true2 = graph()->NewNode(
@ -5097,9 +4969,8 @@ Node* SimplifiedLowering::Int32Mod(Node* const node) {
Node* false0;
{
Node* check1 = graph()->NewNode(machine()->Int32LessThan(), rhs, minus_one);
Node* branch1 = graph()->NewNode(
common()->Branch(BranchHint::kTrue, BranchSemantics::kMachine), check1,
if_false0);
Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
check1, if_false0);
Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
Node* true1 = graph()->NewNode(machine()->Int32Mod(), lhs, rhs, if_true1);
@ -5144,8 +5015,7 @@ Node* SimplifiedLowering::Uint32Div(Node* const node) {
}
Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
Diamond d(graph(), common(), check, BranchHint::kFalse,
BranchSemantics::kMachine);
Diamond d(graph(), common(), check, BranchHint::kFalse);
Node* div = graph()->NewNode(machine()->Uint32Div(), lhs, rhs, d.if_false);
return d.Phi(MachineRepresentation::kWord32, zero, div);
}
@ -5182,9 +5052,8 @@ Node* SimplifiedLowering::Uint32Mod(Node* const node) {
common()->Phi(MachineRepresentation::kWord32, 2);
Node* check0 = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
Node* branch0 = graph()->NewNode(
common()->Branch(BranchHint::kFalse, BranchSemantics::kMachine), check0,
graph()->start());
Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kFalse), check0,
graph()->start());
Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
Node* true0 = zero;
@ -5195,9 +5064,7 @@ Node* SimplifiedLowering::Uint32Mod(Node* const node) {
Node* msk = graph()->NewNode(machine()->Int32Add(), rhs, minus_one);
Node* check1 = graph()->NewNode(machine()->Word32And(), rhs, msk);
Node* branch1 = graph()->NewNode(
common()->Branch(BranchHint::kNone, BranchSemantics::kMachine), check1,
if_false0);
Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
Node* true1 = graph()->NewNode(machine()->Uint32Mod(), lhs, rhs, if_true1);

View File

@ -88,7 +88,6 @@ class Typer::Visitor : public Reducer {
SIMPLIFIED_BIGINT_BINOP_LIST(DECLARE_BINARY_CASE)
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_BINARY_CASE)
SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_BINARY_CASE)
TYPER_SUPPORTED_MACHINE_BINOP_LIST(DECLARE_BINARY_CASE)
#undef DECLARE_BINARY_CASE
#define DECLARE_OTHER_CASE(x, ...) \
case IrOpcode::k##x: \
@ -126,139 +125,7 @@ class Typer::Visitor : public Reducer {
SIMPLIFIED_CHECKED_OP_LIST(DECLARE_IMPOSSIBLE_CASE)
IF_WASM(SIMPLIFIED_WASM_OP_LIST, DECLARE_IMPOSSIBLE_CASE)
MACHINE_SIMD_OP_LIST(DECLARE_IMPOSSIBLE_CASE)
MACHINE_UNOP_32_LIST(DECLARE_IMPOSSIBLE_CASE)
DECLARE_IMPOSSIBLE_CASE(Word32Xor)
DECLARE_IMPOSSIBLE_CASE(Word32Sar)
DECLARE_IMPOSSIBLE_CASE(Word32Rol)
DECLARE_IMPOSSIBLE_CASE(Word32Ror)
DECLARE_IMPOSSIBLE_CASE(Int32AddWithOverflow)
DECLARE_IMPOSSIBLE_CASE(Int32SubWithOverflow)
DECLARE_IMPOSSIBLE_CASE(Int32Mul)
DECLARE_IMPOSSIBLE_CASE(Int32MulWithOverflow)
DECLARE_IMPOSSIBLE_CASE(Int32MulHigh)
DECLARE_IMPOSSIBLE_CASE(Int32Div)
DECLARE_IMPOSSIBLE_CASE(Int32Mod)
DECLARE_IMPOSSIBLE_CASE(Uint32Mod)
DECLARE_IMPOSSIBLE_CASE(Uint32MulHigh)
DECLARE_IMPOSSIBLE_CASE(Word64Or)
DECLARE_IMPOSSIBLE_CASE(Word64Xor)
DECLARE_IMPOSSIBLE_CASE(Word64Sar)
DECLARE_IMPOSSIBLE_CASE(Word64Rol)
DECLARE_IMPOSSIBLE_CASE(Word64Ror)
DECLARE_IMPOSSIBLE_CASE(Word64RolLowerable)
DECLARE_IMPOSSIBLE_CASE(Word64RorLowerable)
DECLARE_IMPOSSIBLE_CASE(Int64AddWithOverflow)
DECLARE_IMPOSSIBLE_CASE(Int64SubWithOverflow)
DECLARE_IMPOSSIBLE_CASE(Int64Mul)
DECLARE_IMPOSSIBLE_CASE(Int64MulHigh)
DECLARE_IMPOSSIBLE_CASE(Int64MulWithOverflow)
DECLARE_IMPOSSIBLE_CASE(Int64Div)
DECLARE_IMPOSSIBLE_CASE(Int64Mod)
DECLARE_IMPOSSIBLE_CASE(Uint64Mod)
DECLARE_IMPOSSIBLE_CASE(Uint64MulHigh)
DECLARE_IMPOSSIBLE_CASE(Word64Equal)
DECLARE_IMPOSSIBLE_CASE(Int32LessThan)
DECLARE_IMPOSSIBLE_CASE(Int32LessThanOrEqual)
DECLARE_IMPOSSIBLE_CASE(Int64LessThan)
DECLARE_IMPOSSIBLE_CASE(Int64LessThanOrEqual)
DECLARE_IMPOSSIBLE_CASE(Uint64LessThan)
DECLARE_IMPOSSIBLE_CASE(Float32Equal)
DECLARE_IMPOSSIBLE_CASE(Float32LessThan)
DECLARE_IMPOSSIBLE_CASE(Float32LessThanOrEqual)
DECLARE_IMPOSSIBLE_CASE(Float64Equal)
DECLARE_IMPOSSIBLE_CASE(Float64LessThan)
DECLARE_IMPOSSIBLE_CASE(Float64LessThanOrEqual)
MACHINE_FLOAT32_BINOP_LIST(DECLARE_IMPOSSIBLE_CASE)
MACHINE_FLOAT32_UNOP_LIST(DECLARE_IMPOSSIBLE_CASE)
MACHINE_FLOAT64_BINOP_LIST(DECLARE_IMPOSSIBLE_CASE)
MACHINE_FLOAT64_UNOP_LIST(DECLARE_IMPOSSIBLE_CASE)
MACHINE_ATOMIC_OP_LIST(DECLARE_IMPOSSIBLE_CASE)
DECLARE_IMPOSSIBLE_CASE(AbortCSADcheck)
DECLARE_IMPOSSIBLE_CASE(DebugBreak)
DECLARE_IMPOSSIBLE_CASE(Comment)
DECLARE_IMPOSSIBLE_CASE(LoadImmutable)
DECLARE_IMPOSSIBLE_CASE(Store)
DECLARE_IMPOSSIBLE_CASE(StackSlot)
DECLARE_IMPOSSIBLE_CASE(Word32Popcnt)
DECLARE_IMPOSSIBLE_CASE(Word64Popcnt)
DECLARE_IMPOSSIBLE_CASE(Word64Clz)
DECLARE_IMPOSSIBLE_CASE(Word64Ctz)
DECLARE_IMPOSSIBLE_CASE(Word64ClzLowerable)
DECLARE_IMPOSSIBLE_CASE(Word64CtzLowerable)
DECLARE_IMPOSSIBLE_CASE(Word64ReverseBits)
DECLARE_IMPOSSIBLE_CASE(Word64ReverseBytes)
DECLARE_IMPOSSIBLE_CASE(Simd128ReverseBytes)
DECLARE_IMPOSSIBLE_CASE(Int64AbsWithOverflow)
DECLARE_IMPOSSIBLE_CASE(BitcastTaggedToWord)
DECLARE_IMPOSSIBLE_CASE(BitcastTaggedToWordForTagAndSmiBits)
DECLARE_IMPOSSIBLE_CASE(BitcastWordToTagged)
DECLARE_IMPOSSIBLE_CASE(BitcastWordToTaggedSigned)
DECLARE_IMPOSSIBLE_CASE(TruncateFloat64ToWord32)
DECLARE_IMPOSSIBLE_CASE(ChangeFloat32ToFloat64)
DECLARE_IMPOSSIBLE_CASE(ChangeFloat64ToInt32)
DECLARE_IMPOSSIBLE_CASE(ChangeFloat64ToInt64)
DECLARE_IMPOSSIBLE_CASE(ChangeFloat64ToUint32)
DECLARE_IMPOSSIBLE_CASE(ChangeFloat64ToUint64)
DECLARE_IMPOSSIBLE_CASE(Float64SilenceNaN)
DECLARE_IMPOSSIBLE_CASE(TruncateFloat64ToInt64)
DECLARE_IMPOSSIBLE_CASE(TruncateFloat64ToUint32)
DECLARE_IMPOSSIBLE_CASE(TruncateFloat32ToInt32)
DECLARE_IMPOSSIBLE_CASE(TruncateFloat32ToUint32)
DECLARE_IMPOSSIBLE_CASE(TryTruncateFloat32ToInt64)
DECLARE_IMPOSSIBLE_CASE(TryTruncateFloat64ToInt64)
DECLARE_IMPOSSIBLE_CASE(TryTruncateFloat32ToUint64)
DECLARE_IMPOSSIBLE_CASE(TryTruncateFloat64ToUint64)
DECLARE_IMPOSSIBLE_CASE(TryTruncateFloat64ToInt32)
DECLARE_IMPOSSIBLE_CASE(TryTruncateFloat64ToUint32)
DECLARE_IMPOSSIBLE_CASE(ChangeInt32ToFloat64)
DECLARE_IMPOSSIBLE_CASE(BitcastWord32ToWord64)
DECLARE_IMPOSSIBLE_CASE(ChangeInt32ToInt64)
DECLARE_IMPOSSIBLE_CASE(ChangeInt64ToFloat64)
DECLARE_IMPOSSIBLE_CASE(ChangeUint32ToFloat64)
DECLARE_IMPOSSIBLE_CASE(ChangeUint32ToUint64)
DECLARE_IMPOSSIBLE_CASE(TruncateFloat64ToFloat32)
DECLARE_IMPOSSIBLE_CASE(TruncateInt64ToInt32)
DECLARE_IMPOSSIBLE_CASE(RoundFloat64ToInt32)
DECLARE_IMPOSSIBLE_CASE(RoundInt32ToFloat32)
DECLARE_IMPOSSIBLE_CASE(RoundInt64ToFloat32)
DECLARE_IMPOSSIBLE_CASE(RoundInt64ToFloat64)
DECLARE_IMPOSSIBLE_CASE(RoundUint32ToFloat32)
DECLARE_IMPOSSIBLE_CASE(RoundUint64ToFloat32)
DECLARE_IMPOSSIBLE_CASE(RoundUint64ToFloat64)
DECLARE_IMPOSSIBLE_CASE(BitcastFloat32ToInt32)
DECLARE_IMPOSSIBLE_CASE(BitcastFloat64ToInt64)
DECLARE_IMPOSSIBLE_CASE(BitcastInt32ToFloat32)
DECLARE_IMPOSSIBLE_CASE(BitcastInt64ToFloat64)
DECLARE_IMPOSSIBLE_CASE(Float64ExtractLowWord32)
DECLARE_IMPOSSIBLE_CASE(Float64ExtractHighWord32)
DECLARE_IMPOSSIBLE_CASE(Float64InsertLowWord32)
DECLARE_IMPOSSIBLE_CASE(Float64InsertHighWord32)
DECLARE_IMPOSSIBLE_CASE(Word32Select)
DECLARE_IMPOSSIBLE_CASE(Word64Select)
DECLARE_IMPOSSIBLE_CASE(Float32Select)
DECLARE_IMPOSSIBLE_CASE(Float64Select)
DECLARE_IMPOSSIBLE_CASE(LoadStackCheckOffset)
DECLARE_IMPOSSIBLE_CASE(LoadFramePointer)
DECLARE_IMPOSSIBLE_CASE(LoadParentFramePointer)
DECLARE_IMPOSSIBLE_CASE(UnalignedLoad)
DECLARE_IMPOSSIBLE_CASE(UnalignedStore)
DECLARE_IMPOSSIBLE_CASE(Int32PairAdd)
DECLARE_IMPOSSIBLE_CASE(Int32PairSub)
DECLARE_IMPOSSIBLE_CASE(Int32PairMul)
DECLARE_IMPOSSIBLE_CASE(Word32PairShl)
DECLARE_IMPOSSIBLE_CASE(Word32PairShr)
DECLARE_IMPOSSIBLE_CASE(Word32PairSar)
DECLARE_IMPOSSIBLE_CASE(ProtectedLoad)
DECLARE_IMPOSSIBLE_CASE(ProtectedStore)
DECLARE_IMPOSSIBLE_CASE(MemoryBarrier)
DECLARE_IMPOSSIBLE_CASE(SignExtendWord8ToInt32)
DECLARE_IMPOSSIBLE_CASE(SignExtendWord16ToInt32)
DECLARE_IMPOSSIBLE_CASE(SignExtendWord8ToInt64)
DECLARE_IMPOSSIBLE_CASE(SignExtendWord16ToInt64)
DECLARE_IMPOSSIBLE_CASE(SignExtendWord32ToInt64)
DECLARE_IMPOSSIBLE_CASE(StackPointerGreaterThan)
DECLARE_IMPOSSIBLE_CASE(TraceInstruction)
MACHINE_OP_LIST(DECLARE_IMPOSSIBLE_CASE)
#undef DECLARE_IMPOSSIBLE_CASE
UNREACHABLE();
}
@ -361,7 +228,6 @@ class Typer::Visitor : public Reducer {
SIMPLIFIED_BIGINT_BINOP_LIST(DECLARE_METHOD)
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD)
SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_METHOD)
TYPER_SUPPORTED_MACHINE_BINOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
#define DECLARE_METHOD(Name, ...) \
inline Type Type##Name(Type left, Type right) { \
@ -377,7 +243,6 @@ class Typer::Visitor : public Reducer {
SIMPLIFIED_BIGINT_BINOP_LIST(DECLARE_METHOD)
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD)
SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_METHOD)
TYPER_SUPPORTED_MACHINE_BINOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
#define DECLARE_METHOD(Name, ...) \
inline Type Type##Name(Type input) { return TypeUnaryOp(input, Name); }
@ -863,9 +728,9 @@ Type Typer::Visitor::TypeOsrValue(Node* node) {
Type Typer::Visitor::TypeRetain(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeInt32Constant(Node* node) { return Type::Machine(); }
Type Typer::Visitor::TypeInt32Constant(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeInt64Constant(Node* node) { return Type::Machine(); }
Type Typer::Visitor::TypeInt64Constant(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeTaggedIndexConstant(Node* node) { UNREACHABLE(); }
@ -909,14 +774,6 @@ Type Typer::Visitor::TypePhi(Node* node) {
return type;
}
Type Typer::Visitor::TypeEnterMachineGraph(Node* node) {
return Type::Machine();
}
Type Typer::Visitor::TypeExitMachineGraph(Node* node) {
return ExitMachineGraphParametersOf(node->op()).output_type();
}
Type Typer::Visitor::TypeInductionVariablePhi(Node* node) {
int arity = NodeProperties::GetControlInput(node)->op()->ControlInputCount();
DCHECK_EQ(IrOpcode::kLoop, NodeProperties::GetControlInput(node)->opcode());

View File

@ -49,7 +49,6 @@ namespace compiler {
//
// Constant(x) < T iff instance_type(map(x)) < T
//
// None <= Machine <= Any
//
// RANGE TYPES
//
@ -141,8 +140,7 @@ namespace compiler {
// We split the macro list into two parts because the Torque equivalent in
// turbofan-types.tq uses two 32bit bitfield structs.
#define PROPER_ATOMIC_BITSET_TYPE_HIGH_LIST(V) \
V(SandboxedPointer, uint64_t{1} << 32) \
V(Machine, uint64_t{1} << 33)
V(SandboxedPointer, uint64_t{1} << 32)
#define PROPER_BITSET_TYPE_LIST(V) \
V(None, uint64_t{0}) \

View File

@ -312,16 +312,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
}
CHECK_EQ(1, count_true);
CHECK_EQ(1, count_false);
switch (BranchParametersOf(node->op()).semantics()) {
case BranchSemantics::kJS:
case BranchSemantics::kUnspecified:
// The condition must be a Boolean.
CheckValueInputIs(node, 0, Type::Boolean());
break;
case BranchSemantics::kMachine:
CheckValueInputIs(node, 0, Type::Machine());
break;
}
// The condition must be a Boolean.
CheckValueInputIs(node, 0, Type::Boolean());
CheckNotTyped(node);
break;
}
@ -614,12 +606,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kTailCall:
// TODO(bmeurer): what are the constraints on these?
break;
case IrOpcode::kEnterMachineGraph:
CheckTypeIs(node, Type::Machine());
break;
case IrOpcode::kExitMachineGraph:
CheckValueInputIs(node, 0, Type::Machine());
break;
// JavaScript operators
// --------------------

View File

@ -41,12 +41,11 @@ ObjectAccess ObjectAccessForGCStores(wasm::ValueType type);
class WasmGraphAssembler : public GraphAssembler {
public:
WasmGraphAssembler(MachineGraph* mcgraph, Zone* zone)
: GraphAssembler(mcgraph, zone, BranchSemantics::kMachine),
simplified_(zone) {}
: GraphAssembler(mcgraph, zone), simplified_(zone) {}
template <typename... Args>
Node* CallRuntimeStub(wasm::WasmCode::RuntimeStubId stub_id,
Operator::Properties properties, Args... args) {
Operator::Properties properties, Args*... args) {
auto* call_descriptor = GetBuiltinCallDescriptor(
WasmRuntimeStubIdToBuiltinName(stub_id), temp_zone(),
StubCallMode::kCallWasmRuntimeStub, false, properties);
@ -64,7 +63,7 @@ class WasmGraphAssembler : public GraphAssembler {
template <typename... Args>
Node* CallBuiltin(Builtin name, Operator::Properties properties,
Args... args) {
Args*... args) {
auto* call_descriptor = GetBuiltinCallDescriptor(
name, temp_zone(), StubCallMode::kCallBuiltinPointer, false,
properties);
@ -269,7 +268,7 @@ class WasmGraphAssembler : public GraphAssembler {
effect(), control()));
}
SimplifiedOperatorBuilder* simplified() override { return &simplified_; }
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
private:
SimplifiedOperatorBuilder simplified_;

View File

@ -833,7 +833,7 @@ DEFINE_BOOL(verify_csa, DEBUG_BOOL,
// non-ENABLE_VERIFY_CSA configuration.
DEFINE_BOOL_READONLY(verify_csa, false,
"verify TurboFan machine graph of code stubs")
#endif // ENABLE_VERIFY_CSA
#endif
DEFINE_BOOL(trace_verify_csa, false, "trace code stubs verification")
DEFINE_STRING(csa_trap_on_node, nullptr,
"trigger break point when a node with given id is created in "
@ -934,9 +934,6 @@ DEFINE_BOOL_READONLY(turbo_rewrite_far_jumps, false,
"rewrite far to near jumps (ia32,x64)")
#endif
DEFINE_BOOL(
turbo_rab_gsab, true,
"optimize ResizableArrayBuffer / GrowableSharedArrayBuffer in TurboFan")
DEFINE_BOOL(
stress_gc_during_compilation, false,
"simulate GC/compiler thread race related to https://crbug.com/v8/8520")

View File

@ -12,65 +12,62 @@
namespace v8 {
namespace internal {
namespace {
constexpr size_t size_to_shift(size_t size) {
switch (size) {
case 1:
int ElementsKindToShiftSize(ElementsKind elements_kind) {
switch (elements_kind) {
case UINT8_ELEMENTS:
case INT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
case RAB_GSAB_UINT8_ELEMENTS:
case RAB_GSAB_INT8_ELEMENTS:
case RAB_GSAB_UINT8_CLAMPED_ELEMENTS:
return 0;
case 2:
case UINT16_ELEMENTS:
case INT16_ELEMENTS:
case RAB_GSAB_UINT16_ELEMENTS:
case RAB_GSAB_INT16_ELEMENTS:
return 1;
case 4:
case UINT32_ELEMENTS:
case INT32_ELEMENTS:
case FLOAT32_ELEMENTS:
case RAB_GSAB_UINT32_ELEMENTS:
case RAB_GSAB_INT32_ELEMENTS:
case RAB_GSAB_FLOAT32_ELEMENTS:
return 2;
case 8:
case PACKED_DOUBLE_ELEMENTS:
case HOLEY_DOUBLE_ELEMENTS:
case FLOAT64_ELEMENTS:
case BIGINT64_ELEMENTS:
case BIGUINT64_ELEMENTS:
case RAB_GSAB_FLOAT64_ELEMENTS:
case RAB_GSAB_BIGINT64_ELEMENTS:
case RAB_GSAB_BIGUINT64_ELEMENTS:
return 3;
default:
case PACKED_SMI_ELEMENTS:
case PACKED_ELEMENTS:
case PACKED_FROZEN_ELEMENTS:
case PACKED_SEALED_ELEMENTS:
case PACKED_NONEXTENSIBLE_ELEMENTS:
case HOLEY_SMI_ELEMENTS:
case HOLEY_ELEMENTS:
case HOLEY_FROZEN_ELEMENTS:
case HOLEY_SEALED_ELEMENTS:
case HOLEY_NONEXTENSIBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
case FAST_STRING_WRAPPER_ELEMENTS:
case SLOW_STRING_WRAPPER_ELEMENTS:
case SHARED_ARRAY_ELEMENTS:
return kTaggedSizeLog2;
case WASM_ARRAY_ELEMENTS:
case NO_ELEMENTS:
UNREACHABLE();
}
}
} // namespace
constexpr uint8_t kTypedArrayAndRabGsabTypedArrayElementsKindShifts[] = {
#define SHIFT(Type, type, TYPE, ctype) size_to_shift(sizeof(ctype)),
TYPED_ARRAYS(SHIFT) RAB_GSAB_TYPED_ARRAYS(SHIFT)
#undef SHIFT
};
constexpr uint8_t kTypedArrayAndRabGsabTypedArrayElementsKindSizes[] = {
#define SIZE(Type, type, TYPE, ctype) sizeof(ctype),
TYPED_ARRAYS(SIZE) RAB_GSAB_TYPED_ARRAYS(SIZE)
#undef SIZE
};
#define VERIFY_SHIFT(Type, type, TYPE, ctype) \
static_assert( \
kTypedArrayAndRabGsabTypedArrayElementsKindShifts \
[ElementsKind::TYPE##_ELEMENTS - \
ElementsKind::FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND] == \
ElementsKindToShiftSize(ElementsKind::TYPE##_ELEMENTS), \
"Shift of ElementsKind::" #TYPE \
"_ELEMENTS does not match in static table");
TYPED_ARRAYS(VERIFY_SHIFT)
RAB_GSAB_TYPED_ARRAYS(VERIFY_SHIFT)
#undef VERIFY_SHIFT
#define VERIFY_SIZE(Type, type, TYPE, ctype) \
static_assert( \
kTypedArrayAndRabGsabTypedArrayElementsKindSizes \
[ElementsKind::TYPE##_ELEMENTS - \
ElementsKind::FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND] == \
ElementsKindToByteSize(ElementsKind::TYPE##_ELEMENTS), \
"Size of ElementsKind::" #TYPE \
"_ELEMENTS does not match in static table");
TYPED_ARRAYS(VERIFY_SIZE)
RAB_GSAB_TYPED_ARRAYS(VERIFY_SIZE)
#undef VERIFY_SIZE
const uint8_t* TypedArrayAndRabGsabTypedArrayElementsKindShifts() {
return &kTypedArrayAndRabGsabTypedArrayElementsKindShifts[0];
UNREACHABLE();
}
const uint8_t* TypedArrayAndRabGsabTypedArrayElementsKindSizes() {
return &kTypedArrayAndRabGsabTypedArrayElementsKindSizes[0];
int ElementsKindToByteSize(ElementsKind elements_kind) {
return 1 << ElementsKindToShiftSize(elements_kind);
}
int GetDefaultHeaderSizeForElementsKind(ElementsKind elements_kind) {

View File

@ -169,64 +169,8 @@ constexpr int kFastElementsKindBits = 3;
static_assert((1 << kFastElementsKindBits) > LAST_FAST_ELEMENTS_KIND);
static_assert((1 << (kFastElementsKindBits - 1)) <= LAST_FAST_ELEMENTS_KIND);
const uint8_t* TypedArrayAndRabGsabTypedArrayElementsKindShifts();
const uint8_t* TypedArrayAndRabGsabTypedArrayElementsKindSizes();
inline constexpr int ElementsKindToShiftSize(ElementsKind elements_kind) {
switch (elements_kind) {
case UINT8_ELEMENTS:
case INT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
case RAB_GSAB_UINT8_ELEMENTS:
case RAB_GSAB_INT8_ELEMENTS:
case RAB_GSAB_UINT8_CLAMPED_ELEMENTS:
return 0;
case UINT16_ELEMENTS:
case INT16_ELEMENTS:
case RAB_GSAB_UINT16_ELEMENTS:
case RAB_GSAB_INT16_ELEMENTS:
return 1;
case UINT32_ELEMENTS:
case INT32_ELEMENTS:
case FLOAT32_ELEMENTS:
case RAB_GSAB_UINT32_ELEMENTS:
case RAB_GSAB_INT32_ELEMENTS:
case RAB_GSAB_FLOAT32_ELEMENTS:
return 2;
case PACKED_DOUBLE_ELEMENTS:
case HOLEY_DOUBLE_ELEMENTS:
case FLOAT64_ELEMENTS:
case BIGINT64_ELEMENTS:
case BIGUINT64_ELEMENTS:
case RAB_GSAB_FLOAT64_ELEMENTS:
case RAB_GSAB_BIGINT64_ELEMENTS:
case RAB_GSAB_BIGUINT64_ELEMENTS:
return 3;
case PACKED_SMI_ELEMENTS:
case PACKED_ELEMENTS:
case PACKED_FROZEN_ELEMENTS:
case PACKED_SEALED_ELEMENTS:
case PACKED_NONEXTENSIBLE_ELEMENTS:
case HOLEY_SMI_ELEMENTS:
case HOLEY_ELEMENTS:
case HOLEY_FROZEN_ELEMENTS:
case HOLEY_SEALED_ELEMENTS:
case HOLEY_NONEXTENSIBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
case FAST_STRING_WRAPPER_ELEMENTS:
case SLOW_STRING_WRAPPER_ELEMENTS:
case SHARED_ARRAY_ELEMENTS:
return kTaggedSizeLog2;
case WASM_ARRAY_ELEMENTS:
case NO_ELEMENTS:
UNREACHABLE();
}
UNREACHABLE();
}
inline constexpr int ElementsKindToByteSize(ElementsKind elements_kind) {
return 1 << ElementsKindToShiftSize(elements_kind);
}
V8_EXPORT_PRIVATE int ElementsKindToShiftSize(ElementsKind elements_kind);
V8_EXPORT_PRIVATE int ElementsKindToByteSize(ElementsKind elements_kind);
int GetDefaultHeaderSizeForElementsKind(ElementsKind elements_kind);
const char* ElementsKindToString(ElementsKind kind);

View File

@ -54,7 +54,6 @@ bitfield struct TurbofanTypeLowBits extends uint32 {
bitfield struct TurbofanTypeHighBits extends uint32 {
sandboxed_pointer: bool: 1 bit;
machine: bool: 1 bit;
}
@export

View File

@ -3,823 +3,25 @@
// found in the LICENSE file.
// Flags: --harmony-rab-gsab --allow-natives-syntax --turbofan
// Flags: --no-always-turbofan --turbo-rab-gsab
"use strict";
d8.file.execute('test/mjsunit/typedarray-helpers.js');
const is_little_endian = (() => {
var buffer = new ArrayBuffer(4);
const HEAP32 = new Int32Array(buffer);
const HEAPU8 = new Uint8Array(buffer);
HEAP32[0] = 255;
return (HEAPU8[0] === 255 && HEAPU8[3] === 0);
})();
function FillBuffer(buffer) {
const view = new Uint8Array(buffer);
for (let i = 0; i < view.length; ++i) {
view[i] = i;
}
}
%NeverOptimizeFunction(FillBuffer);
function asU16(index) {
const start = index * 2;
if (is_little_endian) {
return (start + 1) * 256 + start;
} else {
return start * 256 + start + 1;
}
}
%NeverOptimizeFunction(asU16);
function asU32(index) {
const start = index * 4;
if (is_little_endian) {
return (((start + 3) * 256 + start + 2) * 256 + start + 1) * 256 + start;
} else {
return ((((start * 256) + start + 1) * 256) + start + 2) * 256 + start + 3;
}
}
%NeverOptimizeFunction(asU32);
function asF32(index) {
const start = index * 4;
const ab = new ArrayBuffer(4);
const ta = new Uint8Array(ab);
for (let i = 0; i < 4; ++i) ta[i] = start + i;
return new Float32Array(ab)[0];
}
%NeverOptimizeFunction(asF32);
function asF64(index) {
const start = index * 8;
const ab = new ArrayBuffer(8);
const ta = new Uint8Array(ab);
for (let i = 0; i < 8; ++i) ta[i] = start + i;
return new Float64Array(ab)[0];
}
%NeverOptimizeFunction(asF64);
function asB64(index) {
const start = index * 8;
let result = 0n;
if (is_little_endian) {
for (let i = 0; i < 8; ++i) {
result = result << 8n;
result += BigInt(start + 7 - i);
}
} else {
for (let i = 0; i < 8; ++i) {
result = result << 8n;
result += BigInt(start + i);
}
}
return result;
}
%NeverOptimizeFunction(asB64);
function CreateBuffer(shared, len, max_len) {
return shared ? new SharedArrayBuffer(len, {maxByteLength: max_len}) :
new ArrayBuffer(len, {maxByteLength: max_len});
}
%NeverOptimizeFunction(CreateBuffer);
function MakeResize(target, shared, offset, fixed_len) {
const bpe = target.name === 'DataView' ? 1 : target.BYTES_PER_ELEMENT;
function RoundDownToElementSize(blen) {
return Math.floor(blen / bpe) * bpe;
}
if (!shared) {
if (fixed_len === undefined) {
return (b, len) => {
b.resize(len);
const blen = Math.max(0, len - offset);
return RoundDownToElementSize(blen);
};
} else {
const fixed_blen = fixed_len * bpe;
return (b, len) => {
b.resize(len);
const blen = fixed_blen <= (len - offset) ? fixed_blen : 0;
return RoundDownToElementSize(blen);
}
}
} else {
if (fixed_len === undefined) {
return (b, len) => {
let blen = 0;
if (len > b.byteLength) {
b.grow(len);
blen = Math.max(0, len - offset);
} else {
blen = b.byteLength - offset;
}
return RoundDownToElementSize(blen);
};
} else {
return (b, len) => {
if (len > b.byteLength) {
b.grow(len);
}
return fixed_len * bpe;
};
}
}
}
%NeverOptimizeFunction(MakeResize);
function MakeElement(target, offset) {
const o = offset / target.BYTES_PER_ELEMENT;
if (target.name === 'Int8Array') {
return (index) => {
return o + index;
};
} else if (target.name === 'Uint32Array') {
return (index) => {
return asU32(o + index);
};
} else if (target.name === 'Float64Array') {
return (index) => {
return asF64(o + index);
};
} else if (target.name === 'BigInt64Array') {
return (index) => {
return asB64(o + index);
};
} else {
console.log(`unimplemented: MakeElement(${target.name})`);
return () => undefined;
}
}
%NeverOptimizeFunction(MakeElement);
function MakeCheckBuffer(target, offset) {
return (ab, up_to) => {
const view = new Uint8Array(ab);
for (let i = 0; i < offset; ++i) {
assertEquals(0, view[i]);
}
for (let i = 0; i < (up_to * target.BYTES_PER_ELEMENT) + 1; ++i) {
// Use PrintBuffer(ab) for debugging.
assertEquals(offset + i, view[offset + i]);
}
}
}
%NeverOptimizeFunction(MakeCheckBuffer);
function ClearBuffer(ab) {
for (let i = 0; i < ab.byteLength; ++i) ab[i] = 0;
}
%NeverOptimizeFunction(ClearBuffer);
// Use this for debugging these tests.
function PrintBuffer(buffer) {
const view = new Uint8Array(buffer);
for (let i = 0; i < 32; ++i) {
console.log(`[${i}]: ${view[i]}`)
}
}
%NeverOptimizeFunction(PrintBuffer);
(function() {
for (let shared of [false, true]) {
for (let length_tracking of [false, true]) {
for (let with_offset of [false, true]) {
for (let target
of [Int8Array, Uint32Array, Float64Array, BigInt64Array]) {
const test_case = `Testing: Length_${shared ? 'GSAB' : 'RAB'}_${
length_tracking ? 'LengthTracking' : 'FixedLength'}${
with_offset ? 'WithOffset' : ''}_${target.name}`;
// console.log(test_case);
const byte_length_code = 'return ta.byteLength; // ' + test_case;
const ByteLength = new Function('ta', byte_length_code);
const length_code = 'return ta.length; // ' + test_case;
const Length = new Function('ta', length_code);
const offset = with_offset ? 8 : 0;
let blen = 16 - offset;
const fixed_len =
length_tracking ? undefined : (blen / target.BYTES_PER_ELEMENT);
const ab = CreateBuffer(shared, 16, 40);
const ta = new target(ab, offset, fixed_len);
const Resize = MakeResize(target, shared, offset, fixed_len);
assertUnoptimized(ByteLength);
assertUnoptimized(Length);
%PrepareFunctionForOptimization(ByteLength);
%PrepareFunctionForOptimization(Length);
assertEquals(blen, ByteLength(ta));
assertEquals(blen, ByteLength(ta));
assertEquals(Math.floor(blen / target.BYTES_PER_ELEMENT), Length(ta));
assertEquals(Math.floor(blen / target.BYTES_PER_ELEMENT), Length(ta));
%OptimizeFunctionOnNextCall(ByteLength);
%OptimizeFunctionOnNextCall(Length);
assertEquals(blen, ByteLength(ta));
assertEquals(Math.floor(blen / target.BYTES_PER_ELEMENT), Length(ta));
blen = Resize(ab, 32);
assertEquals(blen, ByteLength(ta));
assertEquals(Math.floor(blen / target.BYTES_PER_ELEMENT), Length(ta));
blen = Resize(ab, 9);
assertEquals(blen, ByteLength(ta));
assertEquals(Math.floor(blen / target.BYTES_PER_ELEMENT), Length(ta));
assertOptimized(ByteLength);
assertOptimized(Length);
blen = Resize(ab, 24);
assertEquals(blen, ByteLength(ta));
assertEquals(Math.floor(blen / target.BYTES_PER_ELEMENT), Length(ta));
assertOptimized(ByteLength);
assertOptimized(Length);
if (!shared) {
%ArrayBufferDetach(ab);
assertEquals(0, ByteLength(ta));
assertEquals(0, Length(ta));
assertOptimized(Length);
}
}
}
}
}
})();
(function() {
for (let shared of [false, true]) {
for (let length_tracking of [false, true]) {
for (let with_offset of [false, true]) {
for (let target
of [Int8Array, Uint32Array, Float64Array, BigInt64Array]) {
const test_case = `Testing: Read_${shared ? 'GSAB' : 'RAB'}_${
length_tracking ? 'LengthTracking' : 'FixedLength'}${
with_offset ? 'WithOffset' : ''}_${target.name}`;
// console.log(test_case);
const read_code = 'return ta[index]; // ' + test_case;
const Read = new Function('ta', 'index', read_code);
const offset = with_offset ? 8 : 0;
let blen = 16 - offset;
let len = Math.floor(blen / target.BYTES_PER_ELEMENT);
const fixed_len = length_tracking ? undefined : len;
const ab = CreateBuffer(shared, 16, 40);
const ta = new target(ab, offset, fixed_len);
const Resize = MakeResize(target, shared, offset, fixed_len);
const Element = MakeElement(target, offset);
FillBuffer(ab);
assertUnoptimized(Read);
%PrepareFunctionForOptimization(Read);
for (let i = 0; i < len * 2; ++i)
assertEquals(i < len ? Element(i) : undefined, Read(ta, i));
%OptimizeFunctionOnNextCall(Read);
for (let i = 0; i < len * 2; ++i)
assertEquals(i < len ? Element(i) : undefined, Read(ta, i));
assertOptimized(Read);
blen = Resize(ab, 32);
FillBuffer(ab);
len = Math.floor(blen / target.BYTES_PER_ELEMENT);
for (let i = 0; i < len * 2; ++i)
assertEquals(i < len ? Element(i) : undefined, Read(ta, i));
assertOptimized(Read);
blen = Resize(ab, 9);
FillBuffer(ab);
len = Math.floor(blen / target.BYTES_PER_ELEMENT);
for (let i = 0; i < len * 2; ++i)
assertEquals(i < len ? Element(i) : undefined, Read(ta, i));
assertOptimized(Read);
blen = Resize(ab, 0);
len = Math.floor(blen / target.BYTES_PER_ELEMENT);
for (let i = 0; i < len * 2; ++i)
assertEquals(i < len ? Element(i) : undefined, Read(ta, i));
assertOptimized(Read);
blen = Resize(ab, 24);
FillBuffer(ab);
len = Math.floor(blen / target.BYTES_PER_ELEMENT);
for (let i = 0; i < len * 2; ++i)
assertEquals(i < len ? Element(i) : undefined, Read(ta, i));
assertOptimized(Read);
if (!shared) {
%ArrayBufferDetach(ab);
assertEquals(undefined, Read(ta, 0));
// assertOptimized(Read);
}
}
}
}
}
})();
(function() {
for (let shared of [false, true]) {
for (let length_tracking of [false, true]) {
for (let with_offset of [false, true]) {
for (let target
of [Int8Array, Uint32Array, Float64Array, BigInt64Array]) {
const test_case = `Testing: Write_${shared ? 'GSAB' : 'RAB'}_${
length_tracking ? 'LengthTracking' : 'FixedLength'}${
with_offset ? 'WithOffset' : ''}_${target.name}`;
// console.log(test_case);
const write_code = 'ta[index] = value; // ' + test_case;
const Write = new Function('ta', 'index', 'value', write_code);
const offset = with_offset ? 8 : 0;
let blen = 16 - offset;
let len = Math.floor(blen / target.BYTES_PER_ELEMENT);
const fixed_len = length_tracking ? undefined : len;
const ab = CreateBuffer(shared, 16, 40);
const ta = new target(ab, offset, fixed_len);
const Resize = MakeResize(target, shared, offset, fixed_len);
const Element = MakeElement(target, offset);
const CheckBuffer = MakeCheckBuffer(target, offset);
ClearBuffer(ab);
assertUnoptimized(Write);
%PrepareFunctionForOptimization(Write);
for (let i = 0; i < len; ++i) {
Write(ta, i, Element(i));
CheckBuffer(ab, i);
}
ClearBuffer(ab);
%OptimizeFunctionOnNextCall(Write);
for (let i = 0; i < len; ++i) {
Write(ta, i, Element(i));
CheckBuffer(ab, i);
}
assertOptimized(Write);
blen = Resize(ab, 32);
ClearBuffer(ab);
len = Math.floor(blen / target.BYTES_PER_ELEMENT);
for (let i = 0; i < len; ++i) {
Write(ta, i, Element(i));
CheckBuffer(ab, i);
}
assertOptimized(Write);
blen = Resize(ab, 9);
ClearBuffer(ab);
len = Math.floor(blen / target.BYTES_PER_ELEMENT);
for (let i = 0; i < len; ++i) {
Write(ta, i, Element(i));
CheckBuffer(ab, i);
}
assertOptimized(Write);
blen = Resize(ab, 24);
ClearBuffer(ab);
len = Math.floor(blen / target.BYTES_PER_ELEMENT);
for (let i = 0; i < len; ++i) {
Write(ta, i, Element(i));
CheckBuffer(ab, i);
}
assertOptimized(Write);
}
}
}
}
})();
(function() {
for (let shared of [false, true]) {
for (let length_tracking of [false, true]) {
for (let with_offset of [false, true]) {
const test_case = `Testing: ByteLength_${shared ? 'GSAB' : 'RAB'}_${
length_tracking ?
'LengthTracking' :
'FixedLength'}${with_offset ? 'WithOffset' : ''}_DataView`;
// console.log(test_case);
const byte_length_code = 'return dv.byteLength; // ' + test_case;
const ByteLength = new Function('dv', byte_length_code);
const offset = with_offset ? 8 : 0;
let blen = 16 - offset;
const fixed_blen = length_tracking ? undefined : blen;
const ab = CreateBuffer(shared, 16, 40);
const dv = new DataView(ab, offset, fixed_blen);
const Resize = MakeResize(DataView, shared, offset, fixed_blen);
assertUnoptimized(ByteLength);
%PrepareFunctionForOptimization(ByteLength);
assertEquals(blen, ByteLength(dv));
assertEquals(blen, ByteLength(dv));
%OptimizeFunctionOnNextCall(ByteLength);
assertEquals(blen, ByteLength(dv));
assertOptimized(ByteLength);
blen = Resize(ab, 32);
assertEquals(blen, ByteLength(dv));
assertOptimized(ByteLength);
blen = Resize(ab, 9);
if (length_tracking || shared) {
assertEquals(blen, ByteLength(dv));
} else {
// For fixed length rabs, Resize(ab, 9) will put the ArrayBuffer in
// detached state, for which DataView.prototype.byteLength has to throw.
assertThrows(() => { ByteLength(dv); }, TypeError);
}
assertOptimized(ByteLength);
blen = Resize(ab, 24);
assertEquals(blen, ByteLength(dv));
assertOptimized(ByteLength);
if (!shared) {
%ArrayBufferDetach(ab);
assertThrows(() => { ByteLength(dv); }, TypeError);
assertOptimized(ByteLength);
}
}
}
}
})();
(function() {
function ByteLength_RAB_LengthTrackingWithOffset_DataView(dv) {
return dv.byteLength;
}
const ByteLength = ByteLength_RAB_LengthTrackingWithOffset_DataView;
const rab = CreateResizableArrayBuffer(16, 40);
const dv = new DataView(rab, 7);
%PrepareFunctionForOptimization(ByteLength);
assertEquals(9, ByteLength(dv));
assertEquals(9, ByteLength(dv));
%OptimizeFunctionOnNextCall(ByteLength);
assertEquals(9, ByteLength(dv));
assertOptimized(ByteLength);
})();
(function() {
function Read_TA_RAB_LengthTracking_Mixed(ta, index) {
return ta[index];
}
const Get = Read_TA_RAB_LengthTracking_Mixed;
const ab = new ArrayBuffer(16);
FillBuffer(ab);
const rab = CreateResizableArrayBuffer(16, 40);
FillBuffer(rab);
let ta_int8 = new Int8Array(ab);
let ta_uint16 = new Uint16Array(rab);
let ta_float32 = new Float32Array(ab);
let ta_float64 = new Float64Array(rab);
// Train with feedback for all elements kinds.
%PrepareFunctionForOptimization(Get);
assertEquals(0, Get(ta_int8, 0));
assertEquals(3, Get(ta_int8, 3));
assertEquals(15, Get(ta_int8, 15));
assertEquals(undefined, Get(ta_int8, 16));
assertEquals(undefined, Get(ta_int8, 32));
assertEquals(asU16(0), Get(ta_uint16, 0));
assertEquals(asU16(3), Get(ta_uint16, 3));
assertEquals(asU16(7), Get(ta_uint16, 7));
assertEquals(undefined, Get(ta_uint16, 8));
assertEquals(undefined, Get(ta_uint16, 12));
assertEquals(asF32(0), Get(ta_float32, 0));
assertEquals(asF32(3), Get(ta_float32, 3));
assertEquals(undefined, Get(ta_float32, 4));
assertEquals(undefined, Get(ta_float32, 12));
assertEquals(asF64(0), Get(ta_float64, 0));
assertEquals(asF64(1), Get(ta_float64, 1));
assertEquals(undefined, Get(ta_float64, 2));
assertEquals(undefined, Get(ta_float64, 12));
%OptimizeFunctionOnNextCall(Get);
assertEquals(0, Get(ta_int8, 0));
assertEquals(3, Get(ta_int8, 3));
assertEquals(15, Get(ta_int8, 15));
assertEquals(undefined, Get(ta_int8, 16));
assertEquals(undefined, Get(ta_int8, 32));
assertEquals(asU16(0), Get(ta_uint16, 0));
assertEquals(asU16(3), Get(ta_uint16, 3));
assertEquals(asU16(7), Get(ta_uint16, 7));
assertEquals(undefined, Get(ta_uint16, 8));
assertEquals(undefined, Get(ta_uint16, 12));
assertEquals(asF32(0), Get(ta_float32, 0));
assertEquals(asF32(3), Get(ta_float32, 3));
assertEquals(undefined, Get(ta_float32, 4));
assertEquals(undefined, Get(ta_float32, 12));
assertEquals(asF64(0), Get(ta_float64, 0));
assertEquals(asF64(1), Get(ta_float64, 1));
assertEquals(undefined, Get(ta_float64, 2));
assertEquals(undefined, Get(ta_float64, 12));
assertOptimized(Get);
rab.resize(32);
FillBuffer(rab);
assertEquals(0, Get(ta_int8, 0));
assertEquals(3, Get(ta_int8, 3));
assertEquals(15, Get(ta_int8, 15));
assertEquals(undefined, Get(ta_int8, 16));
assertEquals(undefined, Get(ta_int8, 32));
assertEquals(asU16(0), Get(ta_uint16, 0));
assertEquals(asU16(3), Get(ta_uint16, 3));
assertEquals(asU16(15), Get(ta_uint16, 15));
assertEquals(undefined, Get(ta_uint16, 16));
assertEquals(undefined, Get(ta_uint16, 40));
assertEquals(asF32(0), Get(ta_float32, 0));
assertEquals(asF32(3), Get(ta_float32, 3));
assertEquals(undefined, Get(ta_float32, 4));
assertEquals(undefined, Get(ta_float32, 12));
assertEquals(asF64(0), Get(ta_float64, 0));
assertEquals(asF64(1), Get(ta_float64, 1));
assertEquals(asF64(3), Get(ta_float64, 3));
assertEquals(undefined, Get(ta_float64, 4));
assertEquals(undefined, Get(ta_float64, 12));
assertOptimized(Get);
rab.resize(9);
assertEquals(0, Get(ta_int8, 0));
assertEquals(3, Get(ta_int8, 3));
assertEquals(15, Get(ta_int8, 15));
assertEquals(undefined, Get(ta_int8, 16));
assertEquals(undefined, Get(ta_int8, 32));
assertEquals(asU16(0), Get(ta_uint16, 0));
assertEquals(asU16(3), Get(ta_uint16, 3));
assertEquals(undefined, Get(ta_uint16, 4));
assertEquals(undefined, Get(ta_uint16, 12));
assertEquals(asF32(0), Get(ta_float32, 0));
assertEquals(asF32(3), Get(ta_float32, 3));
assertEquals(undefined, Get(ta_float32, 4));
assertEquals(undefined, Get(ta_float32, 12));
assertEquals(asF64(0), Get(ta_float64, 0));
assertEquals(undefined, Get(ta_float64, 1));
assertEquals(undefined, Get(ta_float64, 12));
assertOptimized(Get);
// Call with a different map to trigger deoptimization. We use this
// to verify that we have actually specialized on the above maps only.
let ta_uint8 = new Uint8Array(rab);
assertEquals(7, Get(ta_uint8, 7));
assertUnoptimized(Get);
}());
(function() {
function Read_TA_RAB_LengthTracking_Mixed(ta, index) {
return ta[index];
}
const Get = Read_TA_RAB_LengthTracking_Mixed;
const ab = new ArrayBuffer(16);
FillBuffer(ab);
const rab = CreateResizableArrayBuffer(16, 40);
FillBuffer(rab);
let ta_int8 = new Int8Array(ab);
let ta_uint16 = new Uint16Array(rab);
let ta_float32 = new Float32Array(ab);
let ta_float64 = new Float64Array(rab);
// Train with feedback for all elements kinds.
%PrepareFunctionForOptimization(Get);
assertEquals(0, Get(ta_int8, 0));
assertEquals(3, Get(ta_int8, 3));
assertEquals(15, Get(ta_int8, 15));
assertEquals(undefined, Get(ta_int8, 16));
assertEquals(undefined, Get(ta_int8, 32));
assertEquals(asU16(0), Get(ta_uint16, 0));
assertEquals(asU16(3), Get(ta_uint16, 3));
assertEquals(asU16(7), Get(ta_uint16, 7));
assertEquals(undefined, Get(ta_uint16, 8));
assertEquals(undefined, Get(ta_uint16, 12));
assertEquals(asF32(0), Get(ta_float32, 0));
assertEquals(asF32(3), Get(ta_float32, 3));
assertEquals(undefined, Get(ta_float32, 4));
assertEquals(undefined, Get(ta_float32, 12));
assertEquals(asF64(0), Get(ta_float64, 0));
assertEquals(asF64(1), Get(ta_float64, 1));
assertEquals(undefined, Get(ta_float64, 2));
assertEquals(undefined, Get(ta_float64, 12));
%OptimizeFunctionOnNextCall(Get);
assertEquals(0, Get(ta_int8, 0));
assertEquals(3, Get(ta_int8, 3));
assertEquals(15, Get(ta_int8, 15));
assertEquals(undefined, Get(ta_int8, 16));
assertEquals(undefined, Get(ta_int8, 32));
assertEquals(asU16(0), Get(ta_uint16, 0));
assertEquals(asU16(3), Get(ta_uint16, 3));
assertEquals(asU16(7), Get(ta_uint16, 7));
assertEquals(undefined, Get(ta_uint16, 8));
assertEquals(undefined, Get(ta_uint16, 12));
assertEquals(asF32(0), Get(ta_float32, 0));
assertEquals(asF32(3), Get(ta_float32, 3));
assertEquals(undefined, Get(ta_float32, 4));
assertEquals(undefined, Get(ta_float32, 12));
assertEquals(asF64(0), Get(ta_float64, 0));
assertEquals(asF64(1), Get(ta_float64, 1));
assertEquals(undefined, Get(ta_float64, 2));
assertEquals(undefined, Get(ta_float64, 12));
assertOptimized(Get);
rab.resize(32);
FillBuffer(rab);
assertEquals(0, Get(ta_int8, 0));
assertEquals(3, Get(ta_int8, 3));
assertEquals(15, Get(ta_int8, 15));
assertEquals(undefined, Get(ta_int8, 16));
assertEquals(undefined, Get(ta_int8, 32));
assertEquals(asU16(0), Get(ta_uint16, 0));
assertEquals(asU16(3), Get(ta_uint16, 3));
assertEquals(asU16(15), Get(ta_uint16, 15));
assertEquals(undefined, Get(ta_uint16, 16));
assertEquals(undefined, Get(ta_uint16, 40));
assertEquals(asF32(0), Get(ta_float32, 0));
assertEquals(asF32(3), Get(ta_float32, 3));
assertEquals(undefined, Get(ta_float32, 4));
assertEquals(undefined, Get(ta_float32, 12));
assertEquals(asF64(0), Get(ta_float64, 0));
assertEquals(asF64(1), Get(ta_float64, 1));
assertEquals(asF64(3), Get(ta_float64, 3));
assertEquals(undefined, Get(ta_float64, 4));
assertEquals(undefined, Get(ta_float64, 12));
assertOptimized(Get);
rab.resize(9);
assertEquals(0, Get(ta_int8, 0));
assertEquals(3, Get(ta_int8, 3));
assertEquals(15, Get(ta_int8, 15));
assertEquals(undefined, Get(ta_int8, 16));
assertEquals(undefined, Get(ta_int8, 32));
assertEquals(asU16(0), Get(ta_uint16, 0));
assertEquals(asU16(3), Get(ta_uint16, 3));
assertEquals(undefined, Get(ta_uint16, 4));
assertEquals(undefined, Get(ta_uint16, 12));
assertEquals(asF32(0), Get(ta_float32, 0));
assertEquals(asF32(3), Get(ta_float32, 3));
assertEquals(undefined, Get(ta_float32, 4));
assertEquals(undefined, Get(ta_float32, 12));
assertEquals(asF64(0), Get(ta_float64, 0));
assertEquals(undefined, Get(ta_float64, 1));
assertEquals(undefined, Get(ta_float64, 12));
assertOptimized(Get);
// Call with a different map to trigger deoptimization. We use this
// to verify that we have actually specialized on the above maps only.
let ta_uint8 = new Uint8Array(rab);
Get(7, Get(ta_uint8, 7));
assertUnoptimized(Get);
}());
(function() {
function Length_TA_RAB_LengthTracking_Mixed(ta) {
return ta.length;
}
let Length = Length_TA_RAB_LengthTracking_Mixed;
const ab = new ArrayBuffer(32);
const rab = CreateResizableArrayBuffer(16, 40);
let ta_int8 = new Int8Array(ab);
let ta_uint16 = new Uint16Array(rab);
let ta_float32 = new Float32Array(ab);
let ta_bigint64 = new BigInt64Array(rab);
// Train with feedback for all elements kinds.
%PrepareFunctionForOptimization(Length);
assertEquals(32, Length(ta_int8));
assertEquals(8, Length(ta_uint16));
assertEquals(8, Length(ta_float32));
assertEquals(2, Length(ta_bigint64));
%OptimizeFunctionOnNextCall(Length);
assertEquals(32, Length(ta_int8));
assertEquals(8, Length(ta_uint16));
assertEquals(8, Length(ta_float32));
assertEquals(2, Length(ta_bigint64));
assertOptimized(Length);
}());
(function() {
function Length_RAB_GSAB_LengthTrackingWithOffset_Mixed(ta) {
return ta.length;
}
const Length = Length_RAB_GSAB_LengthTrackingWithOffset_Mixed;
const rab = CreateResizableArrayBuffer(16, 40);
let ta_int8 = new Int8Array(rab);
let ta_float64 = new Float64Array(rab);
// Train with feedback for Int8Array and Float64Array.
%PrepareFunctionForOptimization(Length);
assertEquals(16, Length(ta_int8));
assertEquals(2, Length(ta_float64));
%OptimizeFunctionOnNextCall(Length);
assertEquals(16, Length(ta_int8));
assertEquals(2, Length(ta_float64));
assertOptimized(Length);
let ta_uint32 = new Uint32Array(rab);
let ta_bigint64 = new BigInt64Array(rab);
// Calling with Uint32Array will deopt because of the map check on length.
assertEquals(4, Length(ta_uint32));
assertUnoptimized(Length);
%PrepareFunctionForOptimization(Length);
assertEquals(2, Length(ta_bigint64));
// Recompile with additional feedback for Uint32Array and BigInt64Array.
%OptimizeFunctionOnNextCall(Length);
assertEquals(2, Length(ta_bigint64));
assertOptimized(Length);
// Length handles all four TypedArrays without deopting.
assertEquals(16, Length(ta_int8));
assertEquals(2, Length(ta_float64));
assertEquals(4, Length(ta_uint32));
assertEquals(2, Length(ta_bigint64));
assertOptimized(Length);
// Length handles corresponding gsab-backed TypedArrays without deopting.
const gsab = CreateGrowableSharedArrayBuffer(16, 40);
let ta2_uint32 = new Uint32Array(gsab, 8);
let ta2_float64 = new Float64Array(gsab, 8);
let ta2_bigint64 = new BigInt64Array(gsab, 8);
let ta2_int8 = new Int8Array(gsab, 8);
assertEquals(8, Length(ta2_int8));
assertEquals(1, Length(ta2_float64));
assertEquals(2, Length(ta2_uint32));
assertEquals(1, Length(ta2_bigint64));
assertOptimized(Length);
// Test Length after rab has been resized to a smaller size.
rab.resize(5);
assertEquals(5, Length(ta_int8));
assertEquals(0, Length(ta_float64));
assertEquals(1, Length(ta_uint32));
assertEquals(0, Length(ta_bigint64));
assertOptimized(Length);
// Test Length after rab has been resized to a larger size.
rab.resize(40);
assertEquals(40, Length(ta_int8));
assertEquals(5, Length(ta_float64));
assertEquals(10, Length(ta_uint32));
assertEquals(5, Length(ta_bigint64));
assertOptimized(Length);
// Test Length after gsab has been grown to a larger size.
gsab.grow(25);
assertEquals(17, Length(ta2_int8));
assertEquals(2, Length(ta2_float64));
assertEquals(4, Length(ta2_uint32));
assertEquals(2, Length(ta2_bigint64));
assertOptimized(Length);
})();
(function() {
function Length_AB_RAB_GSAB_LengthTrackingWithOffset_Mixed(ta) {
return ta.length;
}
const Length = Length_AB_RAB_GSAB_LengthTrackingWithOffset_Mixed;
let ab = new ArrayBuffer(32);
let rab = CreateResizableArrayBuffer(16, 40);
let gsab = CreateGrowableSharedArrayBuffer(16, 40);
let ta_ab_int32 = new Int32Array(ab, 8, 3);
let ta_rab_int32 = new Int32Array(rab, 4);
let ta_gsab_float64 = new Float64Array(gsab);
let ta_gsab_bigint64 = new BigInt64Array(gsab, 0, 2);
// Optimize Length with polymorphic feedback.
%PrepareFunctionForOptimization(Length);
assertEquals(3, Length(ta_ab_int32));
assertEquals(3, Length(ta_rab_int32));
assertEquals(2, Length(ta_gsab_float64));
assertEquals(2, Length(ta_gsab_bigint64));
%OptimizeFunctionOnNextCall(Length);
assertEquals(3, Length(ta_ab_int32));
assertEquals(3, Length(ta_rab_int32));
assertEquals(2, Length(ta_gsab_float64));
assertEquals(2, Length(ta_gsab_bigint64));
assertOptimized(Length);
// Test resizing and growing the underlying rab/gsab buffers.
rab.resize(8);
gsab.grow(36);
assertEquals(3, Length(ta_ab_int32));
assertEquals(1, Length(ta_rab_int32));
assertEquals(4, Length(ta_gsab_float64));
assertEquals(2, Length(ta_gsab_bigint64));
assertOptimized(Length);
// Construct additional TypedArrays with the same ElementsKind.
let ta2_ab_bigint64 = new BigInt64Array(ab, 0, 1);
let ta2_gsab_int32 = new Int32Array(gsab, 16);
let ta2_rab_float64 = new Float64Array(rab, 8);
let ta2_rab_int32 = new Int32Array(rab, 0, 1);
assertEquals(1, Length(ta2_ab_bigint64));
assertEquals(5, Length(ta2_gsab_int32));
assertEquals(0, Length(ta2_rab_float64));
assertEquals(1, Length(ta2_rab_int32));
assertOptimized(Length);
})();
(function() {
function ByteOffset(ta) {
return ta.byteOffset;
}
const rab = CreateResizableArrayBuffer(16, 40);
const ta = new Int32Array(rab, 4);
%PrepareFunctionForOptimization(ByteOffset);
assertEquals(4, ByteOffset(ta));
assertEquals(4, ByteOffset(ta));
%OptimizeFunctionOnNextCall(ByteOffset);
assertEquals(4, ByteOffset(ta));
assertOptimized(ByteOffset);
(function TypedArrayLength() {
for(let ctor of ctors) {
// We have to make sure that we construct a new string for each case to
// prevent the compiled function from being reused with spoiled feedback.
const test = new Function('\
const rab = CreateResizableArrayBuffer(16, 40); \
const ta = new ' + ctor.name + '(rab); \
rab.resize(32); \
return ta.length;');
%PrepareFunctionForOptimization(test);
assertEquals(32 / ctor.BYTES_PER_ELEMENT, test(ctor));
assertEquals(32 / ctor.BYTES_PER_ELEMENT, test(ctor));
%OptimizeFunctionOnNextCall(test);
assertEquals(32 / ctor.BYTES_PER_ELEMENT, test(ctor));
}
})();

View File

@ -1,51 +0,0 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --harmony-rab-gsab
(function Test_OOB() {
function f() {
try {
const buffer = new ArrayBuffer(42, {'maxByteLength': 42});
const view = new DataView(buffer, 0, 42);
// Resize the buffer to smaller than the view.
buffer.resize(20);
// Any access in the view should throw.
view.setInt8(11, 0xab);
return 'did not prevent out-of-bounds access';
} catch (e) {
return 'ok';
}
}
%PrepareFunctionForOptimization(f);
assertEquals('ok', f());
assertEquals('ok', f());
%OptimizeFunctionOnNextCall(f);
assertEquals('ok', f());
assertEquals('ok', f());
}());
(function Test_OOB_WithOffset() {
function f() {
try {
const buffer = new ArrayBuffer(42, {'maxByteLength': 42});
const view = new DataView(buffer, 30, 42);
// Resize the buffer to smaller than the view.
buffer.resize(40);
// Any access in the view should throw.
view.setInt8(11, 0xab);
return 'did not prevent out-of-bounds access';
} catch (e) {
return 'ok';
}
}
%PrepareFunctionForOptimization(f);
assertEquals('ok', f());
assertEquals('ok', f());
%OptimizeFunctionOnNextCall(f);
assertEquals('ok', f());
assertEquals('ok', f());
}());