[turbofan] staging new implementation of escape analysis
Bug: Change-Id: Idebe4fa6d651a404a0dc1947ed4a34a8dc9707a9 Reviewed-on: https://chromium-review.googlesource.com/565720 Commit-Queue: Tobias Tebbi <tebbi@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Cr-Commit-Position: refs/heads/master@{#46966}
This commit is contained in:
parent
7c8a75e508
commit
d230b44f0c
5
BUILD.gn
5
BUILD.gn
@ -1381,6 +1381,10 @@ v8_source_set("v8_base") {
|
||||
"src/compiler/memory-optimizer.h",
|
||||
"src/compiler/move-optimizer.cc",
|
||||
"src/compiler/move-optimizer.h",
|
||||
"src/compiler/new-escape-analysis-reducer.cc",
|
||||
"src/compiler/new-escape-analysis-reducer.h",
|
||||
"src/compiler/new-escape-analysis.cc",
|
||||
"src/compiler/new-escape-analysis.h",
|
||||
"src/compiler/node-aux-data.h",
|
||||
"src/compiler/node-cache.cc",
|
||||
"src/compiler/node-cache.h",
|
||||
@ -1402,6 +1406,7 @@ v8_source_set("v8_base") {
|
||||
"src/compiler/operator.h",
|
||||
"src/compiler/osr.cc",
|
||||
"src/compiler/osr.h",
|
||||
"src/compiler/persistent-map.h",
|
||||
"src/compiler/pipeline-statistics.cc",
|
||||
"src/compiler/pipeline-statistics.h",
|
||||
"src/compiler/pipeline.cc",
|
||||
|
@ -172,7 +172,6 @@ struct hash<T*> : public std::unary_function<T*, size_t> {
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// base::bit_equal_to is a function object class for bitwise equality
|
||||
// comparison, similar to std::equal_to, except that the comparison is performed
|
||||
// on the bit representation of the operands.
|
||||
|
@ -142,6 +142,22 @@ std::ostream& operator<<(std::ostream& os, ParameterInfo const& i) {
|
||||
return os;
|
||||
}
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, ObjectStateInfo const& i) {
|
||||
return os << "id:" << i.object_id() << "|size:" << i.size();
|
||||
}
|
||||
|
||||
size_t hash_value(ObjectStateInfo const& p) {
|
||||
return base::hash_combine(p.object_id(), p.size());
|
||||
}
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, TypedObjectStateInfo const& i) {
|
||||
return os << "id:" << i.object_id() << "|" << i.machine_types();
|
||||
}
|
||||
|
||||
size_t hash_value(TypedObjectStateInfo const& p) {
|
||||
return base::hash_combine(p.object_id(), p.machine_types());
|
||||
}
|
||||
|
||||
bool operator==(RelocatablePtrConstantInfo const& lhs,
|
||||
RelocatablePtrConstantInfo const& rhs) {
|
||||
return lhs.rmode() == rhs.rmode() && lhs.value() == rhs.value() &&
|
||||
@ -322,7 +338,7 @@ ZoneVector<MachineType> const* MachineTypesOf(Operator const* op) {
|
||||
if (op->opcode() == IrOpcode::kTypedStateValues) {
|
||||
return OpParameter<TypedStateValueInfo>(op).machine_types();
|
||||
}
|
||||
return OpParameter<const ZoneVector<MachineType>*>(op);
|
||||
return OpParameter<TypedObjectStateInfo>(op).machine_types();
|
||||
}
|
||||
|
||||
#define CACHED_OP_LIST(V) \
|
||||
@ -1076,6 +1092,14 @@ const Operator* CommonOperatorBuilder::RelocatableInt64Constant(
|
||||
RelocatablePtrConstantInfo(value, rmode)); // parameter
|
||||
}
|
||||
|
||||
const Operator* CommonOperatorBuilder::ObjectId(uint32_t object_id) {
|
||||
return new (zone()) Operator1<uint32_t>( // --
|
||||
IrOpcode::kObjectId, Operator::kPure, // opcode
|
||||
"ObjectId", // name
|
||||
0, 0, 0, 1, 0, 0, // counts
|
||||
object_id); // parameter
|
||||
}
|
||||
|
||||
const Operator* CommonOperatorBuilder::Select(MachineRepresentation rep,
|
||||
BranchHint hint) {
|
||||
return new (zone()) Operator1<SelectParameters>( // --
|
||||
@ -1220,21 +1244,35 @@ bool IsRestOf(Operator const* op) {
|
||||
return OpParameter<bool>(op);
|
||||
}
|
||||
|
||||
const Operator* CommonOperatorBuilder::ObjectState(int pointer_slots) {
|
||||
return new (zone()) Operator1<int>( // --
|
||||
IrOpcode::kObjectState, Operator::kPure, // opcode
|
||||
"ObjectState", // name
|
||||
pointer_slots, 0, 0, 1, 0, 0, // counts
|
||||
pointer_slots); // parameter
|
||||
const Operator* CommonOperatorBuilder::ObjectState(int object_id,
|
||||
int pointer_slots) {
|
||||
return new (zone()) Operator1<ObjectStateInfo>( // --
|
||||
IrOpcode::kObjectState, Operator::kPure, // opcode
|
||||
"ObjectState", // name
|
||||
pointer_slots, 0, 0, 1, 0, 0, // counts
|
||||
ObjectStateInfo{object_id, pointer_slots}); // parameter
|
||||
}
|
||||
|
||||
const Operator* CommonOperatorBuilder::TypedObjectState(
|
||||
const ZoneVector<MachineType>* types) {
|
||||
return new (zone()) Operator1<const ZoneVector<MachineType>*>( // --
|
||||
IrOpcode::kTypedObjectState, Operator::kPure, // opcode
|
||||
"TypedObjectState", // name
|
||||
static_cast<int>(types->size()), 0, 0, 1, 0, 0, // counts
|
||||
types); // parameter
|
||||
int object_id, const ZoneVector<MachineType>* types) {
|
||||
return new (zone()) Operator1<TypedObjectStateInfo>( // --
|
||||
IrOpcode::kTypedObjectState, Operator::kPure, // opcode
|
||||
"TypedObjectState", // name
|
||||
static_cast<int>(types->size()), 0, 0, 1, 0, 0, // counts
|
||||
TypedObjectStateInfo(object_id, types)); // parameter
|
||||
}
|
||||
|
||||
uint32_t ObjectIdOf(Operator const* op) {
|
||||
switch (op->opcode()) {
|
||||
case IrOpcode::kObjectState:
|
||||
return OpParameter<ObjectStateInfo>(op).object_id();
|
||||
case IrOpcode::kTypedObjectState:
|
||||
return OpParameter<TypedObjectStateInfo>(op).object_id();
|
||||
case IrOpcode::kObjectId:
|
||||
return OpParameter<uint32_t>(op);
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
const Operator* CommonOperatorBuilder::FrameState(
|
||||
|
@ -123,6 +123,23 @@ std::ostream& operator<<(std::ostream&, ParameterInfo const&);
|
||||
V8_EXPORT_PRIVATE int ParameterIndexOf(const Operator* const);
|
||||
const ParameterInfo& ParameterInfoOf(const Operator* const);
|
||||
|
||||
struct ObjectStateInfo final : std::pair<uint32_t, int> {
|
||||
using std::pair<uint32_t, int>::pair;
|
||||
uint32_t object_id() const { return first; }
|
||||
int size() const { return second; }
|
||||
};
|
||||
std::ostream& operator<<(std::ostream&, ObjectStateInfo const&);
|
||||
size_t hash_value(ObjectStateInfo const& p);
|
||||
|
||||
struct TypedObjectStateInfo final
|
||||
: std::pair<uint32_t, const ZoneVector<MachineType>*> {
|
||||
using std::pair<uint32_t, const ZoneVector<MachineType>*>::pair;
|
||||
uint32_t object_id() const { return first; }
|
||||
const ZoneVector<MachineType>* machine_types() const { return second; }
|
||||
};
|
||||
std::ostream& operator<<(std::ostream&, TypedObjectStateInfo const&);
|
||||
size_t hash_value(TypedObjectStateInfo const& p);
|
||||
|
||||
class RelocatablePtrConstantInfo final {
|
||||
public:
|
||||
enum Type { kInt32, kInt64 };
|
||||
@ -296,6 +313,8 @@ ZoneVector<MachineType> const* MachineTypesOf(Operator const*)
|
||||
// IsRestOf(op) is true in the second case.
|
||||
bool IsRestOf(Operator const*);
|
||||
|
||||
uint32_t ObjectIdOf(Operator const*);
|
||||
|
||||
// Interface for building common operators that can be used at any level of IR,
|
||||
// including JavaScript, mid-level, and low-level.
|
||||
class V8_EXPORT_PRIVATE CommonOperatorBuilder final
|
||||
@ -340,6 +359,7 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
|
||||
const Operator* NumberConstant(volatile double);
|
||||
const Operator* PointerConstant(intptr_t);
|
||||
const Operator* HeapConstant(const Handle<HeapObject>&);
|
||||
const Operator* ObjectId(uint32_t);
|
||||
|
||||
const Operator* RelocatableInt32Constant(int32_t value,
|
||||
RelocInfo::Mode rmode);
|
||||
@ -362,8 +382,9 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
|
||||
SparseInputMask bitmask);
|
||||
const Operator* ArgumentsElementsState(bool is_rest);
|
||||
const Operator* ArgumentsLengthState(bool is_rest);
|
||||
const Operator* ObjectState(int pointer_slots);
|
||||
const Operator* TypedObjectState(const ZoneVector<MachineType>* types);
|
||||
const Operator* ObjectState(int object_id, int pointer_slots);
|
||||
const Operator* TypedObjectState(int object_id,
|
||||
const ZoneVector<MachineType>* types);
|
||||
const Operator* FrameState(BailoutId bailout_id,
|
||||
OutputFrameStateCombine state_combine,
|
||||
const FrameStateFunctionInfo* function_info);
|
||||
|
@ -125,6 +125,8 @@ const Alias EscapeStatusAnalysis::kNotReachable =
|
||||
const Alias EscapeStatusAnalysis::kUntrackable =
|
||||
std::numeric_limits<Alias>::max() - 1;
|
||||
|
||||
namespace impl {
|
||||
|
||||
class VirtualObject : public ZoneObject {
|
||||
public:
|
||||
enum Status {
|
||||
@ -566,6 +568,9 @@ bool VirtualState::MergeFrom(MergeCache* cache, Zone* zone, Graph* graph,
|
||||
return changed;
|
||||
}
|
||||
|
||||
} // namespace impl
|
||||
using namespace impl;
|
||||
|
||||
EscapeStatusAnalysis::EscapeStatusAnalysis(EscapeAnalysis* object_analysis,
|
||||
Graph* graph, Zone* zone)
|
||||
: stack_(zone),
|
||||
@ -1684,8 +1689,8 @@ Node* EscapeAnalysis::GetOrCreateObjectState(Node* effect, Node* node) {
|
||||
}
|
||||
int input_count = static_cast<int>(cache_->fields().size());
|
||||
Node* new_object_state =
|
||||
graph()->NewNode(common()->ObjectState(input_count), input_count,
|
||||
&cache_->fields().front());
|
||||
graph()->NewNode(common()->ObjectState(vobj->id(), input_count),
|
||||
input_count, &cache_->fields().front());
|
||||
NodeProperties::SetType(new_object_state, Type::OtherInternal());
|
||||
vobj->SetObjectState(new_object_state);
|
||||
TRACE(
|
||||
|
@ -15,9 +15,11 @@ namespace compiler {
|
||||
// Forward declarations.
|
||||
class CommonOperatorBuilder;
|
||||
class EscapeStatusAnalysis;
|
||||
namespace impl {
|
||||
class MergeCache;
|
||||
class VirtualState;
|
||||
class VirtualObject;
|
||||
}; // namespace impl
|
||||
|
||||
// EscapeObjectAnalysis simulates stores to determine values of loads if
|
||||
// an object is virtual and eliminated.
|
||||
@ -55,17 +57,19 @@ class V8_EXPORT_PRIVATE EscapeAnalysis {
|
||||
bool ProcessEffectPhi(Node* node);
|
||||
|
||||
void ForwardVirtualState(Node* node);
|
||||
VirtualState* CopyForModificationAt(VirtualState* state, Node* node);
|
||||
VirtualObject* CopyForModificationAt(VirtualObject* obj, VirtualState* state,
|
||||
Node* node);
|
||||
impl::VirtualState* CopyForModificationAt(impl::VirtualState* state,
|
||||
Node* node);
|
||||
impl::VirtualObject* CopyForModificationAt(impl::VirtualObject* obj,
|
||||
impl::VirtualState* state,
|
||||
Node* node);
|
||||
|
||||
Node* replacement(Node* node);
|
||||
bool UpdateReplacement(VirtualState* state, Node* node, Node* rep);
|
||||
bool UpdateReplacement(impl::VirtualState* state, Node* node, Node* rep);
|
||||
|
||||
VirtualObject* GetVirtualObject(VirtualState* state, Node* node);
|
||||
impl::VirtualObject* GetVirtualObject(impl::VirtualState* state, Node* node);
|
||||
|
||||
void DebugPrint();
|
||||
void DebugPrintState(VirtualState* state);
|
||||
void DebugPrintState(impl::VirtualState* state);
|
||||
|
||||
Graph* graph() const;
|
||||
Zone* zone() const { return zone_; }
|
||||
@ -75,10 +79,10 @@ class V8_EXPORT_PRIVATE EscapeAnalysis {
|
||||
Node* const slot_not_analyzed_;
|
||||
CommonOperatorBuilder* const common_;
|
||||
EscapeStatusAnalysis* status_analysis_;
|
||||
ZoneVector<VirtualState*> virtual_states_;
|
||||
ZoneVector<impl::VirtualState*> virtual_states_;
|
||||
ZoneVector<Node*> replacements_;
|
||||
ZoneSet<VirtualObject*> cycle_detection_;
|
||||
MergeCache* cache_;
|
||||
ZoneSet<impl::VirtualObject*> cycle_detection_;
|
||||
impl::MergeCache* cache_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(EscapeAnalysis);
|
||||
};
|
||||
|
@ -482,21 +482,39 @@ class StateObjectDeduplicator {
|
||||
static const size_t kNotDuplicated = SIZE_MAX;
|
||||
|
||||
size_t GetObjectId(Node* node) {
|
||||
DCHECK(node->opcode() == IrOpcode::kTypedObjectState ||
|
||||
node->opcode() == IrOpcode::kObjectId ||
|
||||
node->opcode() == IrOpcode::kArgumentsElementsState);
|
||||
for (size_t i = 0; i < objects_.size(); ++i) {
|
||||
if (objects_[i] == node) {
|
||||
if (objects_[i] == node) return i;
|
||||
// ObjectId nodes are the Turbofan way to express objects with the same
|
||||
// identity in the deopt info. So they should always be mapped to
|
||||
// previously appearing TypedObjectState nodes.
|
||||
if (HasObjectId(objects_[i]) && HasObjectId(node) &&
|
||||
ObjectIdOf(objects_[i]->op()) == ObjectIdOf(node->op())) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
DCHECK(node->opcode() == IrOpcode::kTypedObjectState ||
|
||||
node->opcode() == IrOpcode::kArgumentsElementsState);
|
||||
return kNotDuplicated;
|
||||
}
|
||||
|
||||
size_t InsertObject(Node* node) {
|
||||
DCHECK(node->opcode() == IrOpcode::kTypedObjectState ||
|
||||
node->opcode() == IrOpcode::kObjectId ||
|
||||
node->opcode() == IrOpcode::kArgumentsElementsState);
|
||||
size_t id = objects_.size();
|
||||
objects_.push_back(node);
|
||||
return id;
|
||||
}
|
||||
|
||||
private:
|
||||
static bool HasObjectId(Node* node) {
|
||||
return node->opcode() == IrOpcode::kTypedObjectState ||
|
||||
node->opcode() == IrOpcode::kObjectId;
|
||||
}
|
||||
|
||||
ZoneVector<Node*> objects_;
|
||||
};
|
||||
|
||||
@ -527,9 +545,11 @@ size_t InstructionSelector::AddOperandToStateValueDescriptor(
|
||||
case IrOpcode::kObjectState: {
|
||||
UNREACHABLE();
|
||||
}
|
||||
case IrOpcode::kTypedObjectState: {
|
||||
case IrOpcode::kTypedObjectState:
|
||||
case IrOpcode::kObjectId: {
|
||||
size_t id = deduplicator->GetObjectId(input);
|
||||
if (id == StateObjectDeduplicator::kNotDuplicated) {
|
||||
DCHECK(input->opcode() == IrOpcode::kTypedObjectState);
|
||||
size_t entries = 0;
|
||||
id = deduplicator->InsertObject(input);
|
||||
StateValueList* nested = values->PushRecursiveField(zone, id);
|
||||
|
397
src/compiler/new-escape-analysis-reducer.cc
Normal file
397
src/compiler/new-escape-analysis-reducer.cc
Normal file
@ -0,0 +1,397 @@
|
||||
// Copyright 2017 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/compiler/new-escape-analysis-reducer.h"
|
||||
|
||||
#include "src/compiler/all-nodes.h"
|
||||
#include "src/compiler/simplified-operator.h"
|
||||
#include "src/compiler/type-cache.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
#ifdef DEBUG
|
||||
#define TRACE(...) \
|
||||
do { \
|
||||
if (FLAG_trace_turbo_escape) PrintF(__VA_ARGS__); \
|
||||
} while (false)
|
||||
#else
|
||||
#define TRACE(...)
|
||||
#endif // DEBUG
|
||||
|
||||
NewEscapeAnalysisReducer::NewEscapeAnalysisReducer(
|
||||
Editor* editor, JSGraph* jsgraph, EscapeAnalysisResult analysis_result,
|
||||
Zone* zone)
|
||||
: AdvancedReducer(editor),
|
||||
jsgraph_(jsgraph),
|
||||
analysis_result_(analysis_result),
|
||||
object_id_cache_(zone),
|
||||
node_cache_(jsgraph->graph(), zone),
|
||||
arguments_elements_(zone),
|
||||
zone_(zone) {}
|
||||
|
||||
Node* NewEscapeAnalysisReducer::MaybeGuard(Node* original, Node* replacement) {
|
||||
// We might need to guard the replacement if the type of the {replacement}
|
||||
// node is not in a sub-type relation to the type of the the {original} node.
|
||||
Type* const replacement_type = NodeProperties::GetType(replacement);
|
||||
Type* const original_type = NodeProperties::GetType(original);
|
||||
if (!replacement_type->Is(original_type)) {
|
||||
Node* const control = NodeProperties::GetControlInput(original);
|
||||
replacement = jsgraph()->graph()->NewNode(
|
||||
jsgraph()->common()->TypeGuard(original_type), replacement, control);
|
||||
NodeProperties::SetType(replacement, original_type);
|
||||
}
|
||||
return replacement;
|
||||
}
|
||||
|
||||
Node* NewEscapeAnalysisReducer::ObjectIdNode(const VirtualObject* vobject) {
|
||||
VirtualObject::Id id = vobject->id();
|
||||
if (id >= object_id_cache_.size()) object_id_cache_.resize(id + 1);
|
||||
if (!object_id_cache_[id]) {
|
||||
Node* node = jsgraph()->graph()->NewNode(jsgraph()->common()->ObjectId(id));
|
||||
NodeProperties::SetType(node, Type::Object());
|
||||
object_id_cache_[id] = node;
|
||||
}
|
||||
return object_id_cache_[id];
|
||||
}
|
||||
|
||||
Reduction NewEscapeAnalysisReducer::Reduce(Node* node) {
|
||||
if (Node* replacement = analysis_result().GetReplacementOf(node)) {
|
||||
DCHECK(node->opcode() != IrOpcode::kAllocate &&
|
||||
node->opcode() != IrOpcode::kFinishRegion);
|
||||
RelaxEffectsAndControls(node);
|
||||
if (replacement != jsgraph()->Dead()) {
|
||||
replacement = MaybeGuard(node, replacement);
|
||||
}
|
||||
RelaxEffectsAndControls(node);
|
||||
return Replace(replacement);
|
||||
}
|
||||
|
||||
switch (node->opcode()) {
|
||||
case IrOpcode::kAllocate: {
|
||||
const VirtualObject* vobject = analysis_result().GetVirtualObject(node);
|
||||
if (vobject && !vobject->HasEscaped()) {
|
||||
RelaxEffectsAndControls(node);
|
||||
}
|
||||
return NoChange();
|
||||
}
|
||||
case IrOpcode::kFinishRegion: {
|
||||
Node* effect = NodeProperties::GetEffectInput(node, 0);
|
||||
if (effect->opcode() == IrOpcode::kBeginRegion) {
|
||||
RelaxEffectsAndControls(effect);
|
||||
RelaxEffectsAndControls(node);
|
||||
}
|
||||
return NoChange();
|
||||
}
|
||||
case IrOpcode::kNewUnmappedArgumentsElements:
|
||||
arguments_elements_.insert(node);
|
||||
return NoChange();
|
||||
default: {
|
||||
// TODO(sigurds): Change this to GetFrameStateInputCount once
|
||||
// it is working. For now we use EffectInputCount > 0 to determine
|
||||
// whether a node might have a frame state input.
|
||||
if (node->op()->EffectInputCount() > 0) {
|
||||
ReduceFrameStateInputs(node);
|
||||
}
|
||||
return NoChange();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// While doing DFS on the FrameState tree, we have to recognize duplicate
|
||||
// occurrences of virtual objects.
|
||||
class Deduplicator {
|
||||
public:
|
||||
explicit Deduplicator(Zone* zone) : is_duplicate_(zone) {}
|
||||
bool SeenBefore(const VirtualObject* vobject) {
|
||||
VirtualObject::Id id = vobject->id();
|
||||
if (id >= is_duplicate_.size()) {
|
||||
is_duplicate_.resize(id + 1);
|
||||
}
|
||||
bool is_duplicate = is_duplicate_[id];
|
||||
is_duplicate_[id] = true;
|
||||
return is_duplicate;
|
||||
}
|
||||
|
||||
private:
|
||||
ZoneVector<bool> is_duplicate_;
|
||||
};
|
||||
|
||||
void NewEscapeAnalysisReducer::ReduceFrameStateInputs(Node* node) {
|
||||
DCHECK_GE(node->op()->EffectInputCount(), 1);
|
||||
for (int i = 0; i < node->InputCount(); ++i) {
|
||||
Node* input = node->InputAt(i);
|
||||
if (input->opcode() == IrOpcode::kFrameState) {
|
||||
Deduplicator deduplicator(zone());
|
||||
if (Node* ret = ReduceDeoptState(input, node, &deduplicator)) {
|
||||
node->ReplaceInput(i, ret);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Node* NewEscapeAnalysisReducer::ReduceDeoptState(Node* node, Node* effect,
|
||||
Deduplicator* deduplicator) {
|
||||
TRACE_FN("ReduceDeoptState", node);
|
||||
if (node->opcode() == IrOpcode::kFrameState) {
|
||||
NodeHashCache::Constructor new_node(&node_cache_, node);
|
||||
// This input order is important to match the DFS traversal used in the
|
||||
// instruction selector. Otherwise, the instruction selector might find a
|
||||
// duplicate node before the original one.
|
||||
for (int input_id : {kFrameStateOuterStateInput, kFrameStateFunctionInput,
|
||||
kFrameStateParametersInput, kFrameStateContextInput,
|
||||
kFrameStateLocalsInput, kFrameStateStackInput}) {
|
||||
Node* input = node->InputAt(input_id);
|
||||
new_node.ReplaceInput(ReduceDeoptState(input, effect, deduplicator),
|
||||
input_id);
|
||||
}
|
||||
return new_node.Get();
|
||||
} else if (node->opcode() == IrOpcode::kStateValues) {
|
||||
NodeHashCache::Constructor new_node(&node_cache_, node);
|
||||
for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
|
||||
Node* input = NodeProperties::GetValueInput(node, i);
|
||||
new_node.ReplaceValueInput(ReduceDeoptState(input, effect, deduplicator),
|
||||
i);
|
||||
}
|
||||
return new_node.Get();
|
||||
} else if (const VirtualObject* vobject =
|
||||
analysis_result().GetVirtualObject(node)) {
|
||||
if (vobject->HasEscaped()) return node;
|
||||
if (deduplicator->SeenBefore(vobject)) {
|
||||
return ObjectIdNode(vobject);
|
||||
} else {
|
||||
std::vector<Node*> inputs;
|
||||
for (int offset = 0; offset < vobject->size(); offset += kPointerSize) {
|
||||
Node* field =
|
||||
analysis_result().GetVirtualObjectField(vobject, offset, effect);
|
||||
CHECK_NOT_NULL(field);
|
||||
if (field != jsgraph()->Dead()) {
|
||||
inputs.push_back(ReduceDeoptState(field, effect, deduplicator));
|
||||
}
|
||||
}
|
||||
int num_inputs = static_cast<int>(inputs.size());
|
||||
NodeHashCache::Constructor new_node(
|
||||
&node_cache_,
|
||||
jsgraph()->common()->ObjectState(vobject->id(), num_inputs),
|
||||
num_inputs, &inputs.front(), NodeProperties::GetType(node));
|
||||
return new_node.Get();
|
||||
}
|
||||
} else {
|
||||
return node;
|
||||
}
|
||||
}
|
||||
|
||||
void NewEscapeAnalysisReducer::VerifyReplacement() const {
|
||||
AllNodes all(zone(), jsgraph()->graph());
|
||||
for (Node* node : all.reachable) {
|
||||
if (node->opcode() == IrOpcode::kAllocate) {
|
||||
if (const VirtualObject* vobject =
|
||||
analysis_result().GetVirtualObject(node)) {
|
||||
if (!vobject->HasEscaped()) {
|
||||
V8_Fatal(__FILE__, __LINE__,
|
||||
"Escape analysis failed to remove node %s#%d\n",
|
||||
node->op()->mnemonic(), node->id());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void NewEscapeAnalysisReducer::Finalize() {
|
||||
for (Node* node : arguments_elements_) {
|
||||
DCHECK(node->opcode() == IrOpcode::kNewUnmappedArgumentsElements);
|
||||
|
||||
Node* arguments_frame = NodeProperties::GetValueInput(node, 0);
|
||||
if (arguments_frame->opcode() != IrOpcode::kArgumentsFrame) continue;
|
||||
Node* arguments_length = NodeProperties::GetValueInput(node, 1);
|
||||
if (arguments_length->opcode() != IrOpcode::kArgumentsLength) continue;
|
||||
|
||||
Node* arguments_length_state = nullptr;
|
||||
for (Edge edge : arguments_length->use_edges()) {
|
||||
Node* use = edge.from();
|
||||
switch (use->opcode()) {
|
||||
case IrOpcode::kObjectState:
|
||||
case IrOpcode::kTypedObjectState:
|
||||
case IrOpcode::kStateValues:
|
||||
case IrOpcode::kTypedStateValues:
|
||||
if (!arguments_length_state) {
|
||||
arguments_length_state = jsgraph()->graph()->NewNode(
|
||||
jsgraph()->common()->ArgumentsLengthState(
|
||||
IsRestLengthOf(arguments_length->op())));
|
||||
NodeProperties::SetType(arguments_length_state,
|
||||
Type::OtherInternal());
|
||||
}
|
||||
edge.UpdateTo(arguments_length_state);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
bool escaping_use = false;
|
||||
ZoneVector<Node*> loads(zone());
|
||||
for (Edge edge : node->use_edges()) {
|
||||
Node* use = edge.from();
|
||||
if (!NodeProperties::IsValueEdge(edge)) continue;
|
||||
if (use->use_edges().empty()) {
|
||||
// A node without uses is dead, so we don't have to care about it.
|
||||
continue;
|
||||
}
|
||||
switch (use->opcode()) {
|
||||
case IrOpcode::kStateValues:
|
||||
case IrOpcode::kTypedStateValues:
|
||||
case IrOpcode::kObjectState:
|
||||
case IrOpcode::kTypedObjectState:
|
||||
break;
|
||||
case IrOpcode::kLoadElement:
|
||||
loads.push_back(use);
|
||||
break;
|
||||
case IrOpcode::kLoadField:
|
||||
if (FieldAccessOf(use->op()).offset == FixedArray::kLengthOffset) {
|
||||
loads.push_back(use);
|
||||
} else {
|
||||
escaping_use = true;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
// If the arguments elements node node is used by an unhandled node,
|
||||
// then we cannot remove this allocation.
|
||||
escaping_use = true;
|
||||
break;
|
||||
}
|
||||
if (escaping_use) break;
|
||||
}
|
||||
if (!escaping_use) {
|
||||
Node* arguments_elements_state = jsgraph()->graph()->NewNode(
|
||||
jsgraph()->common()->ArgumentsElementsState(
|
||||
IsRestLengthOf(arguments_length->op())));
|
||||
NodeProperties::SetType(arguments_elements_state, Type::OtherInternal());
|
||||
ReplaceWithValue(node, arguments_elements_state);
|
||||
|
||||
ElementAccess stack_access;
|
||||
stack_access.base_is_tagged = BaseTaggedness::kUntaggedBase;
|
||||
// Reduce base address by {kPointerSize} such that (length - index)
|
||||
// resolves to the right position.
|
||||
stack_access.header_size =
|
||||
CommonFrameConstants::kFixedFrameSizeAboveFp - kPointerSize;
|
||||
stack_access.type = Type::NonInternal();
|
||||
stack_access.machine_type = MachineType::AnyTagged();
|
||||
stack_access.write_barrier_kind = WriteBarrierKind::kNoWriteBarrier;
|
||||
const Operator* load_stack_op =
|
||||
jsgraph()->simplified()->LoadElement(stack_access);
|
||||
|
||||
for (Node* load : loads) {
|
||||
switch (load->opcode()) {
|
||||
case IrOpcode::kLoadElement: {
|
||||
Node* index = NodeProperties::GetValueInput(load, 1);
|
||||
// {offset} is a reverted index starting from 1. The base address is
|
||||
// adapted to allow offsets starting from 1.
|
||||
Node* offset = jsgraph()->graph()->NewNode(
|
||||
jsgraph()->simplified()->NumberSubtract(), arguments_length,
|
||||
index);
|
||||
NodeProperties::SetType(offset,
|
||||
TypeCache::Get().kArgumentsLengthType);
|
||||
NodeProperties::ReplaceValueInput(load, arguments_frame, 0);
|
||||
NodeProperties::ReplaceValueInput(load, offset, 1);
|
||||
NodeProperties::ChangeOp(load, load_stack_op);
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kLoadField: {
|
||||
DCHECK_EQ(FieldAccessOf(load->op()).offset,
|
||||
FixedArray::kLengthOffset);
|
||||
Node* length = NodeProperties::GetValueInput(node, 1);
|
||||
ReplaceWithValue(load, length);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Node* NodeHashCache::Query(Node* node) {
|
||||
auto it = cache_.find(node);
|
||||
if (it != cache_.end()) {
|
||||
return *it;
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
NodeHashCache::Constructor::Constructor(NodeHashCache* cache,
|
||||
const Operator* op, int input_count,
|
||||
Node** inputs, Type* type)
|
||||
: node_cache_(cache), from_(nullptr) {
|
||||
if (node_cache_->temp_nodes_.size() > 0) {
|
||||
tmp_ = node_cache_->temp_nodes_.back();
|
||||
node_cache_->temp_nodes_.pop_back();
|
||||
int tmp_input_count = tmp_->InputCount();
|
||||
if (input_count <= tmp_input_count) {
|
||||
tmp_->TrimInputCount(input_count);
|
||||
}
|
||||
for (int i = 0; i < input_count; ++i) {
|
||||
if (i < tmp_input_count) {
|
||||
tmp_->ReplaceInput(i, inputs[i]);
|
||||
} else {
|
||||
tmp_->AppendInput(node_cache_->graph_->zone(), inputs[i]);
|
||||
}
|
||||
}
|
||||
NodeProperties::ChangeOp(tmp_, op);
|
||||
} else {
|
||||
tmp_ = node_cache_->graph_->NewNode(op, input_count, inputs);
|
||||
}
|
||||
NodeProperties::SetType(tmp_, type);
|
||||
}
|
||||
|
||||
Node* NodeHashCache::Constructor::Get() {
|
||||
DCHECK(tmp_ || from_);
|
||||
Node* node;
|
||||
if (!tmp_) {
|
||||
node = node_cache_->Query(from_);
|
||||
if (!node) node = from_;
|
||||
} else {
|
||||
node = node_cache_->Query(tmp_);
|
||||
if (node) {
|
||||
node_cache_->temp_nodes_.push_back(tmp_);
|
||||
} else {
|
||||
node = tmp_;
|
||||
node_cache_->Insert(node);
|
||||
}
|
||||
}
|
||||
tmp_ = from_ = nullptr;
|
||||
return node;
|
||||
}
|
||||
|
||||
Node* NodeHashCache::Constructor::MutableNode() {
|
||||
DCHECK(tmp_ || from_);
|
||||
if (!tmp_) {
|
||||
if (node_cache_->temp_nodes_.empty()) {
|
||||
tmp_ = node_cache_->graph_->CloneNode(from_);
|
||||
} else {
|
||||
tmp_ = node_cache_->temp_nodes_.back();
|
||||
node_cache_->temp_nodes_.pop_back();
|
||||
int from_input_count = from_->InputCount();
|
||||
int tmp_input_count = tmp_->InputCount();
|
||||
if (from_input_count <= tmp_input_count) {
|
||||
tmp_->TrimInputCount(from_input_count);
|
||||
}
|
||||
for (int i = 0; i < from_input_count; ++i) {
|
||||
if (i < tmp_input_count) {
|
||||
tmp_->ReplaceInput(i, from_->InputAt(i));
|
||||
} else {
|
||||
tmp_->AppendInput(node_cache_->graph_->zone(), from_->InputAt(i));
|
||||
}
|
||||
}
|
||||
NodeProperties::SetType(tmp_, NodeProperties::GetType(from_));
|
||||
NodeProperties::ChangeOp(tmp_, from_->op());
|
||||
}
|
||||
}
|
||||
return tmp_;
|
||||
}
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
122
src/compiler/new-escape-analysis-reducer.h
Normal file
122
src/compiler/new-escape-analysis-reducer.h
Normal file
@ -0,0 +1,122 @@
|
||||
// Copyright 2017 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_NEW_ESCAPE_ANALYSIS_REDUCER_H_
|
||||
#define V8_COMPILER_NEW_ESCAPE_ANALYSIS_REDUCER_H_
|
||||
|
||||
#include "src/base/compiler-specific.h"
|
||||
#include "src/compiler/graph-reducer.h"
|
||||
#include "src/compiler/new-escape-analysis.h"
|
||||
#include "src/globals.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
class Deduplicator;
|
||||
class JSGraph;
|
||||
|
||||
// Perform hash-consing when creating or mutating nodes. Used to avoid duplicate
|
||||
// nodes when creating ObjectState, StateValues and FrameState nodes
|
||||
class NodeHashCache {
|
||||
public:
|
||||
NodeHashCache(Graph* graph, Zone* zone)
|
||||
: graph_(graph), cache_(zone), temp_nodes_(zone) {}
|
||||
|
||||
// Handle to a conceptually new mutable node. Tries to re-use existing nodes
|
||||
// and to recycle memory if possible.
|
||||
class Constructor {
|
||||
public:
|
||||
// Construct a new node as a clone of [from].
|
||||
Constructor(NodeHashCache* cache, Node* from)
|
||||
: node_cache_(cache), from_(from), tmp_(nullptr) {}
|
||||
// Construct a new node from scratch.
|
||||
Constructor(NodeHashCache* cache, const Operator* op, int input_count,
|
||||
Node** inputs, Type* type);
|
||||
|
||||
// Modify the new node.
|
||||
void ReplaceValueInput(Node* input, int i) {
|
||||
if (!tmp_ && input == NodeProperties::GetValueInput(from_, i)) return;
|
||||
Node* node = MutableNode();
|
||||
NodeProperties::ReplaceValueInput(node, input, i);
|
||||
}
|
||||
void ReplaceInput(Node* input, int i) {
|
||||
if (!tmp_ && input == from_->InputAt(i)) return;
|
||||
Node* node = MutableNode();
|
||||
node->ReplaceInput(i, input);
|
||||
}
|
||||
|
||||
// Obtain the mutated node or a cached copy. Invalidates the [Constructor].
|
||||
Node* Get();
|
||||
|
||||
private:
|
||||
Node* MutableNode();
|
||||
|
||||
NodeHashCache* node_cache_;
|
||||
// Original node, copied on write.
|
||||
Node* from_;
|
||||
// Temporary node used for mutations, can be recycled if cache is hit.
|
||||
Node* tmp_;
|
||||
};
|
||||
|
||||
private:
|
||||
Node* Query(Node* node);
|
||||
void Insert(Node* node) { cache_.insert(node); }
|
||||
|
||||
Graph* graph_;
|
||||
struct NodeEquals {
|
||||
bool operator()(Node* a, Node* b) const {
|
||||
return NodeProperties::Equals(a, b);
|
||||
}
|
||||
};
|
||||
struct NodeHashCode {
|
||||
size_t operator()(Node* n) const { return NodeProperties::HashCode(n); }
|
||||
};
|
||||
ZoneUnorderedSet<Node*, NodeHashCode, NodeEquals> cache_;
|
||||
// Unused nodes whose memory can be recycled.
|
||||
ZoneVector<Node*> temp_nodes_;
|
||||
};
|
||||
|
||||
// Modify the graph according to the information computed in the previous phase.
|
||||
class V8_EXPORT_PRIVATE NewEscapeAnalysisReducer final
|
||||
: public NON_EXPORTED_BASE(AdvancedReducer) {
|
||||
public:
|
||||
NewEscapeAnalysisReducer(Editor* editor, JSGraph* jsgraph,
|
||||
EscapeAnalysisResult analysis_result, Zone* zone);
|
||||
|
||||
Reduction Reduce(Node* node) override;
|
||||
const char* reducer_name() const override {
|
||||
return "NewEscapeAnalysisReducer";
|
||||
}
|
||||
void Finalize() override;
|
||||
|
||||
// Verifies that all virtual allocation nodes have been dealt with. Run it
|
||||
// after this reducer has been applied.
|
||||
void VerifyReplacement() const;
|
||||
|
||||
private:
|
||||
void ReduceFrameStateInputs(Node* node);
|
||||
Node* ReduceDeoptState(Node* node, Node* effect, Deduplicator* deduplicator);
|
||||
Node* ObjectIdNode(const VirtualObject* vobject);
|
||||
Node* MaybeGuard(Node* original, Node* replacement);
|
||||
|
||||
JSGraph* jsgraph() const { return jsgraph_; }
|
||||
EscapeAnalysisResult analysis_result() const { return analysis_result_; }
|
||||
Zone* zone() const { return zone_; }
|
||||
|
||||
JSGraph* const jsgraph_;
|
||||
EscapeAnalysisResult analysis_result_;
|
||||
ZoneVector<Node*> object_id_cache_;
|
||||
NodeHashCache node_cache_;
|
||||
ZoneSet<Node*> arguments_elements_;
|
||||
Zone* const zone_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(NewEscapeAnalysisReducer);
|
||||
};
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_COMPILER_NEW_ESCAPE_ANALYSIS_REDUCER_H_
|
699
src/compiler/new-escape-analysis.cc
Normal file
699
src/compiler/new-escape-analysis.cc
Normal file
@ -0,0 +1,699 @@
|
||||
// Copyright 2017 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/compiler/new-escape-analysis.h"
|
||||
|
||||
#include "src/bootstrapper.h"
|
||||
#include "src/compiler/linkage.h"
|
||||
#include "src/compiler/node-matchers.h"
|
||||
#include "src/compiler/operator-properties.h"
|
||||
#include "src/compiler/simplified-operator.h"
|
||||
#include "src/objects-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
#ifdef DEBUG
|
||||
thread_local int TraceScope::depth = 0;
|
||||
#endif
|
||||
|
||||
template <class T>
|
||||
class Sidetable {
|
||||
public:
|
||||
explicit Sidetable(Zone* zone) : map_(zone) {}
|
||||
T& operator[](const Node* node) {
|
||||
NodeId id = node->id();
|
||||
if (id >= map_.size()) {
|
||||
map_.resize(id + 1);
|
||||
}
|
||||
return map_[id];
|
||||
}
|
||||
|
||||
private:
|
||||
ZoneVector<T> map_;
|
||||
};
|
||||
|
||||
template <class T>
|
||||
class SparseSidetable {
|
||||
public:
|
||||
explicit SparseSidetable(Zone* zone, T def_value = T())
|
||||
: def_value_(std::move(def_value)), map_(zone) {}
|
||||
T& operator[](const Node* node) {
|
||||
return map_.insert(std::make_pair(node->id(), def_value_)).first->second;
|
||||
}
|
||||
|
||||
private:
|
||||
T def_value_;
|
||||
ZoneUnorderedMap<NodeId, T> map_;
|
||||
};
|
||||
|
||||
// Keeps track of the changes to the current node during reduction.
|
||||
// Encapsulates the current state of the IR graph and the reducer state like
|
||||
// side-tables. All access to the IR and the reducer state should happen through
|
||||
// a ReduceScope to ensure that changes and dependencies are tracked and all
|
||||
// necessary node revisitations happen.
|
||||
class ReduceScope {
|
||||
public:
|
||||
typedef EffectGraphReducer::Reduction Reduction;
|
||||
explicit ReduceScope(Node* node, Reduction* reduction)
|
||||
: current_node_(node), reduction_(reduction) {}
|
||||
|
||||
protected:
|
||||
Node* current_node() const { return current_node_; }
|
||||
Reduction* reduction() { return reduction_; }
|
||||
|
||||
private:
|
||||
Node* current_node_;
|
||||
Reduction* reduction_;
|
||||
};
|
||||
|
||||
// A VariableTracker object keeps track of the values of variables at all points
|
||||
// of the effect chain and introduces new phi nodes when necessary.
|
||||
// Initially and by default, variables are mapped to nullptr, which means that
|
||||
// the variable allocation point does not dominate the current point on the
|
||||
// effect chain. We map variables that represent uninitialized memory to the
|
||||
// Dead node to ensure it is not read.
|
||||
// Unmapped values are impossible by construction, it is indistinguishable if a
|
||||
// PersistentMap does not contain an element or maps it to the default element.
|
||||
class VariableTracker {
|
||||
private:
|
||||
// The state of all variables at one point in the effect chain.
|
||||
class State {
|
||||
typedef PersistentMap<Variable, Node*> Map;
|
||||
|
||||
public:
|
||||
explicit State(Zone* zone) : map_(zone) {}
|
||||
Node* Get(Variable var) const {
|
||||
CHECK(var != Variable::Invalid());
|
||||
return map_.Get(var);
|
||||
}
|
||||
void Set(Variable var, Node* node) {
|
||||
CHECK(var != Variable::Invalid());
|
||||
return map_.Set(var, node);
|
||||
}
|
||||
Map::iterator begin() const { return map_.begin(); }
|
||||
Map::iterator end() const { return map_.end(); }
|
||||
bool operator!=(const State& other) const { return map_ != other.map_; }
|
||||
|
||||
private:
|
||||
Map map_;
|
||||
};
|
||||
|
||||
public:
|
||||
VariableTracker(JSGraph* graph, EffectGraphReducer* reducer, Zone* zone);
|
||||
Variable NewVariable() { return Variable(next_variable_++); }
|
||||
Node* Get(Variable var, Node* effect) { return table_[effect].Get(var); }
|
||||
Zone* zone() { return zone_; }
|
||||
|
||||
class Scope : public ReduceScope {
|
||||
public:
|
||||
Scope(VariableTracker* tracker, Node* node, Reduction* reduction);
|
||||
~Scope();
|
||||
Node* Get(Variable var) { return current_state_.Get(var); }
|
||||
void Set(Variable var, Node* node) { current_state_.Set(var, node); }
|
||||
|
||||
private:
|
||||
VariableTracker* states_;
|
||||
State current_state_;
|
||||
};
|
||||
|
||||
private:
|
||||
State MergeInputs(Node* effect_phi);
|
||||
Zone* zone_;
|
||||
JSGraph* graph_;
|
||||
SparseSidetable<State> table_;
|
||||
ZoneVector<Node*> buffer_;
|
||||
EffectGraphReducer* reducer_;
|
||||
int next_variable_ = 0;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(VariableTracker);
|
||||
};
|
||||
|
||||
// Encapsulates the current state of the escape analysis reducer to preserve
|
||||
// invariants regarding changes and re-visitation.
|
||||
class EscapeAnalysisTracker : public ZoneObject {
|
||||
public:
|
||||
EscapeAnalysisTracker(JSGraph* jsgraph, EffectGraphReducer* reducer,
|
||||
Zone* zone)
|
||||
: virtual_objects_(zone),
|
||||
replacements_(zone),
|
||||
variable_states_(jsgraph, reducer, zone),
|
||||
jsgraph_(jsgraph),
|
||||
zone_(zone) {}
|
||||
|
||||
class Scope : public VariableTracker::Scope {
|
||||
public:
|
||||
Scope(EffectGraphReducer* reducer, EscapeAnalysisTracker* tracker,
|
||||
Node* node, Reduction* reduction)
|
||||
: VariableTracker::Scope(&tracker->variable_states_, node, reduction),
|
||||
tracker_(tracker),
|
||||
reducer_(reducer) {}
|
||||
const VirtualObject* GetVirtualObject(Node* node) {
|
||||
VirtualObject* vobject = tracker_->virtual_objects_[node];
|
||||
if (vobject) vobject->AddDependency(current_node());
|
||||
return vobject;
|
||||
}
|
||||
// Create or retrieve a virtual object for the current node.
|
||||
const VirtualObject* InitVirtualObject(int size) {
|
||||
DCHECK(current_node()->opcode() == IrOpcode::kAllocate);
|
||||
VirtualObject* vobject = tracker_->virtual_objects_[current_node()];
|
||||
if (vobject) {
|
||||
CHECK(vobject->size() == size);
|
||||
} else {
|
||||
vobject = tracker_->NewVirtualObject(size);
|
||||
}
|
||||
if (vobject) vobject->AddDependency(current_node());
|
||||
vobject_ = vobject;
|
||||
return vobject;
|
||||
}
|
||||
|
||||
void SetVirtualObject(Node* object) {
|
||||
vobject_ = tracker_->virtual_objects_[object];
|
||||
}
|
||||
|
||||
void SetEscaped(Node* node) {
|
||||
if (VirtualObject* object = tracker_->virtual_objects_[node]) {
|
||||
if (object->HasEscaped()) return;
|
||||
TRACE("Setting %s#%d to escaped because of use by %s#%d\n",
|
||||
node->op()->mnemonic(), node->id(),
|
||||
current_node()->op()->mnemonic(), current_node()->id());
|
||||
object->SetEscaped();
|
||||
object->RevisitDependants(reducer_);
|
||||
}
|
||||
}
|
||||
// The inputs of the current node have to be accessed through the scope to
|
||||
// ensure that they respect the node replacements.
|
||||
Node* ValueInput(int i) {
|
||||
return tracker_->ResolveReplacement(
|
||||
NodeProperties::GetValueInput(current_node(), i));
|
||||
}
|
||||
Node* ContextInput() {
|
||||
return tracker_->ResolveReplacement(
|
||||
NodeProperties::GetContextInput(current_node()));
|
||||
}
|
||||
|
||||
void SetReplacement(Node* replacement) {
|
||||
replacement_ = replacement;
|
||||
vobject_ =
|
||||
replacement ? tracker_->virtual_objects_[replacement] : nullptr;
|
||||
TRACE("Set %s#%d as replacement.\n", replacement->op()->mnemonic(),
|
||||
replacement->id());
|
||||
}
|
||||
|
||||
void MarkForDeletion() { SetReplacement(tracker_->jsgraph_->Dead()); }
|
||||
|
||||
~Scope() {
|
||||
if (replacement_ != tracker_->replacements_[current_node()] ||
|
||||
vobject_ != tracker_->virtual_objects_[current_node()]) {
|
||||
reduction()->set_value_changed();
|
||||
}
|
||||
tracker_->replacements_[current_node()] = replacement_;
|
||||
tracker_->virtual_objects_[current_node()] = vobject_;
|
||||
}
|
||||
|
||||
private:
|
||||
EscapeAnalysisTracker* tracker_;
|
||||
EffectGraphReducer* reducer_;
|
||||
VirtualObject* vobject_ = nullptr;
|
||||
Node* replacement_ = nullptr;
|
||||
};
|
||||
|
||||
Node* GetReplacementOf(Node* node) { return replacements_[node]; }
|
||||
Node* ResolveReplacement(Node* node) {
|
||||
if (Node* replacement = GetReplacementOf(node)) {
|
||||
// Replacements cannot have replacements. This is important to ensure
|
||||
// re-visitation: If a replacement is replaced, then all nodes accessing
|
||||
// the replacement have to be updated.
|
||||
DCHECK_NULL(GetReplacementOf(replacement));
|
||||
return replacement;
|
||||
}
|
||||
return node;
|
||||
}
|
||||
|
||||
private:
|
||||
friend class EscapeAnalysisResult;
|
||||
static const size_t kMaxTrackedObjects = 100;
|
||||
|
||||
VirtualObject* NewVirtualObject(int size) {
|
||||
if (next_object_id_ >= kMaxTrackedObjects) return nullptr;
|
||||
return new (zone_)
|
||||
VirtualObject(&variable_states_, next_object_id_++, size);
|
||||
}
|
||||
|
||||
SparseSidetable<VirtualObject*> virtual_objects_;
|
||||
Sidetable<Node*> replacements_;
|
||||
VariableTracker variable_states_;
|
||||
VirtualObject::Id next_object_id_ = 0;
|
||||
JSGraph* const jsgraph_;
|
||||
Zone* const zone_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(EscapeAnalysisTracker);
|
||||
};
|
||||
|
||||
EffectGraphReducer::EffectGraphReducer(
|
||||
Graph* graph, std::function<void(Node*, Reduction*)> reduce, Zone* zone)
|
||||
: graph_(graph),
|
||||
state_(graph, kNumStates),
|
||||
revisit_(zone),
|
||||
stack_(zone),
|
||||
reduce_(reduce) {}
|
||||
|
||||
void EffectGraphReducer::ReduceFrom(Node* node) {
|
||||
// Perform DFS and eagerly trigger revisitation as soon as possible.
|
||||
// A stack element {node, i} indicates that input i of node should be visited
|
||||
// next.
|
||||
DCHECK(stack_.empty());
|
||||
stack_.push({node, 0});
|
||||
while (!stack_.empty()) {
|
||||
Node* current = stack_.top().node;
|
||||
int& input_index = stack_.top().input_index;
|
||||
if (input_index < current->InputCount()) {
|
||||
Node* input = current->InputAt(input_index);
|
||||
input_index++;
|
||||
switch (state_.Get(input)) {
|
||||
case State::kVisited:
|
||||
// The input is already reduced.
|
||||
break;
|
||||
case State::kOnStack:
|
||||
// The input is on the DFS stack right now, so it will be revisited
|
||||
// later anyway.
|
||||
break;
|
||||
case State::kUnvisited:
|
||||
case State::kRevisit: {
|
||||
state_.Set(input, State::kOnStack);
|
||||
stack_.push({input, 0});
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
stack_.pop();
|
||||
Reduction reduction;
|
||||
reduce_(current, &reduction);
|
||||
for (Edge edge : current->use_edges()) {
|
||||
// Mark uses for revisitation.
|
||||
Node* use = edge.from();
|
||||
if (NodeProperties::IsEffectEdge(edge)) {
|
||||
if (reduction.effect_changed()) Revisit(use);
|
||||
} else {
|
||||
if (reduction.value_changed()) Revisit(use);
|
||||
}
|
||||
}
|
||||
state_.Set(current, State::kVisited);
|
||||
// Process the revisitation buffer immediately. This improves performance
|
||||
// of escape analysis. Using a stack for {revisit_} reverses the order in
|
||||
// which the revisitation happens. This also seems to improve performance.
|
||||
while (!revisit_.empty()) {
|
||||
Node* revisit = revisit_.top();
|
||||
if (state_.Get(revisit) == State::kRevisit) {
|
||||
state_.Set(revisit, State::kOnStack);
|
||||
stack_.push({revisit, 0});
|
||||
}
|
||||
revisit_.pop();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void EffectGraphReducer::Revisit(Node* node) {
|
||||
if (state_.Get(node) == State::kVisited) {
|
||||
TRACE(" Queueing for revisit: %s#%d\n", node->op()->mnemonic(),
|
||||
node->id());
|
||||
state_.Set(node, State::kRevisit);
|
||||
revisit_.push(node);
|
||||
}
|
||||
}
|
||||
|
||||
VariableTracker::VariableTracker(JSGraph* graph, EffectGraphReducer* reducer,
|
||||
Zone* zone)
|
||||
: zone_(zone),
|
||||
graph_(graph),
|
||||
table_(zone, State(zone)),
|
||||
buffer_(zone),
|
||||
reducer_(reducer) {}
|
||||
|
||||
VariableTracker::Scope::Scope(VariableTracker* states, Node* node,
|
||||
Reduction* reduction)
|
||||
: ReduceScope(node, reduction),
|
||||
states_(states),
|
||||
current_state_(states->zone_) {
|
||||
switch (node->opcode()) {
|
||||
case IrOpcode::kEffectPhi:
|
||||
current_state_ = states_->MergeInputs(node);
|
||||
break;
|
||||
default:
|
||||
int effect_inputs = node->op()->EffectInputCount();
|
||||
if (effect_inputs == 1) {
|
||||
current_state_ =
|
||||
states_->table_[NodeProperties::GetEffectInput(node, 0)];
|
||||
} else {
|
||||
DCHECK_EQ(0, effect_inputs);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
VariableTracker::Scope::~Scope() {
|
||||
if (!reduction()->effect_changed() &&
|
||||
states_->table_[current_node()] != current_state_) {
|
||||
reduction()->set_effect_changed();
|
||||
}
|
||||
states_->table_[current_node()] = current_state_;
|
||||
}
|
||||
|
||||
VariableTracker::State VariableTracker::MergeInputs(Node* effect_phi) {
|
||||
// A variable that is mapped to [nullptr] was not assigned a value on every
|
||||
// execution path to the current effect phi. Relying on the invariant that
|
||||
// every variable is initialized (at least with a sentinel like the Dead
|
||||
// node), this means that the variable initialization does not dominate the
|
||||
// current point. So for loop effect phis, we can keep nullptr for a variable
|
||||
// as long as the first input of the loop has nullptr for this variable. For
|
||||
// non-loop effect phis, we can even keep it nullptr as long as any input has
|
||||
// nullptr.
|
||||
DCHECK(effect_phi->opcode() == IrOpcode::kEffectPhi);
|
||||
int arity = effect_phi->op()->EffectInputCount();
|
||||
Node* control = NodeProperties::GetControlInput(effect_phi, 0);
|
||||
TRACE("control: %s#%d\n", control->op()->mnemonic(), control->id());
|
||||
bool is_loop = control->opcode() == IrOpcode::kLoop;
|
||||
buffer_.reserve(arity + 1);
|
||||
|
||||
State first_input = table_[NodeProperties::GetEffectInput(effect_phi, 0)];
|
||||
State result = first_input;
|
||||
for (std::pair<Variable, Node*> var_value : first_input) {
|
||||
if (Node* value = var_value.second) {
|
||||
Variable var = var_value.first;
|
||||
TRACE("var %i:\n", var.id_);
|
||||
buffer_.clear();
|
||||
buffer_.push_back(value);
|
||||
bool identical_inputs = true;
|
||||
int num_defined_inputs = 1;
|
||||
TRACE(" input 0: %s#%d\n", value->op()->mnemonic(), value->id());
|
||||
for (int i = 1; i < arity; ++i) {
|
||||
Node* next_value =
|
||||
table_[NodeProperties::GetEffectInput(effect_phi, i)].Get(var);
|
||||
if (next_value != value) identical_inputs = false;
|
||||
if (next_value != nullptr) {
|
||||
num_defined_inputs++;
|
||||
TRACE(" input %i: %s#%d\n", i, next_value->op()->mnemonic(),
|
||||
next_value->id());
|
||||
} else {
|
||||
TRACE(" input %i: nullptr\n", i);
|
||||
}
|
||||
buffer_.push_back(next_value);
|
||||
}
|
||||
|
||||
Node* old_value = table_[effect_phi].Get(var);
|
||||
if (old_value) {
|
||||
TRACE(" old: %s#%d\n", old_value->op()->mnemonic(), old_value->id());
|
||||
} else {
|
||||
TRACE(" old: nullptr\n");
|
||||
}
|
||||
// Reuse a previously created phi node if possible.
|
||||
if (old_value && old_value->opcode() == IrOpcode::kPhi &&
|
||||
NodeProperties::GetControlInput(old_value, 0) == control) {
|
||||
// Since a phi node can never dominate its control node,
|
||||
// [old_value] cannot originate from the inputs. Thus [old_value]
|
||||
// must have been created by a previous reduction of this [effect_phi].
|
||||
for (int i = 0; i < arity; ++i) {
|
||||
NodeProperties::ReplaceValueInput(
|
||||
old_value, buffer_[i] ? buffer_[i] : graph_->Dead(), i);
|
||||
// This change cannot affect the rest of the reducer, so there is no
|
||||
// need to trigger additional revisitations.
|
||||
}
|
||||
result.Set(var, old_value);
|
||||
} else {
|
||||
if (num_defined_inputs == 1 && is_loop) {
|
||||
// For loop effect phis, the variable initialization dominates iff it
|
||||
// dominates the first input.
|
||||
DCHECK_EQ(2, arity);
|
||||
DCHECK_EQ(value, buffer_[0]);
|
||||
result.Set(var, value);
|
||||
} else if (num_defined_inputs < arity) {
|
||||
// If the variable is undefined on some input of this non-loop effect
|
||||
// phi, then its initialization does not dominate this point.
|
||||
result.Set(var, nullptr);
|
||||
} else {
|
||||
DCHECK_EQ(num_defined_inputs, arity);
|
||||
// We only create a phi if the values are different.
|
||||
if (identical_inputs) {
|
||||
result.Set(var, value);
|
||||
} else {
|
||||
TRACE("Creating new phi\n");
|
||||
buffer_.push_back(control);
|
||||
Node* phi = graph_->graph()->NewNode(
|
||||
graph_->common()->Phi(MachineRepresentation::kTagged, arity),
|
||||
arity + 1, &buffer_.front());
|
||||
// TODO(tebbi): Computing precise types here is tricky, because of
|
||||
// the necessary revisitations. If we really need this, we should
|
||||
// probably do it afterwards.
|
||||
NodeProperties::SetType(phi, Type::Any());
|
||||
reducer_->AddRoot(phi);
|
||||
result.Set(var, phi);
|
||||
}
|
||||
}
|
||||
}
|
||||
#ifdef DEBUG
|
||||
if (Node* result_node = result.Get(var)) {
|
||||
TRACE(" result: %s#%d\n", result_node->op()->mnemonic(),
|
||||
result_node->id());
|
||||
} else {
|
||||
TRACE(" result: nullptr\n");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
int OffsetOfFieldAccess(const Operator* op) {
|
||||
DCHECK(op->opcode() == IrOpcode::kLoadField ||
|
||||
op->opcode() == IrOpcode::kStoreField);
|
||||
FieldAccess access = FieldAccessOf(op);
|
||||
return access.offset;
|
||||
}
|
||||
|
||||
Maybe<int> OffsetOfElementsAccess(const Operator* op, Node* index_node) {
|
||||
DCHECK(op->opcode() == IrOpcode::kLoadElement ||
|
||||
op->opcode() == IrOpcode::kStoreElement);
|
||||
Type* index_type = NodeProperties::GetType(index_node);
|
||||
if (!index_type->Is(Type::Number())) return Nothing<int>();
|
||||
double max = index_type->Max();
|
||||
double min = index_type->Min();
|
||||
int index = static_cast<int>(min);
|
||||
if (!(index == min && index == max)) return Nothing<int>();
|
||||
ElementAccess access = ElementAccessOf(op);
|
||||
DCHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
|
||||
kPointerSizeLog2);
|
||||
return Just(access.header_size + (index << ElementSizeLog2Of(
|
||||
access.machine_type.representation())));
|
||||
}
|
||||
|
||||
void ReduceNode(const Operator* op, EscapeAnalysisTracker::Scope* current,
|
||||
JSGraph* jsgraph) {
|
||||
switch (op->opcode()) {
|
||||
case IrOpcode::kAllocate: {
|
||||
NumberMatcher size(current->ValueInput(0));
|
||||
if (!size.HasValue()) break;
|
||||
int size_int = static_cast<int>(size.Value());
|
||||
if (size_int != size.Value()) break;
|
||||
if (const VirtualObject* vobject = current->InitVirtualObject(size_int)) {
|
||||
// Initialize with dead nodes as a sentinel for uninitialized memory.
|
||||
for (Variable field : *vobject) {
|
||||
current->Set(field, jsgraph->Dead());
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kFinishRegion:
|
||||
current->SetVirtualObject(current->ValueInput(0));
|
||||
break;
|
||||
case IrOpcode::kStoreField: {
|
||||
Node* object = current->ValueInput(0);
|
||||
Node* value = current->ValueInput(1);
|
||||
const VirtualObject* vobject = current->GetVirtualObject(object);
|
||||
Variable var;
|
||||
if (vobject && !vobject->HasEscaped() &&
|
||||
vobject->FieldAt(OffsetOfFieldAccess(op)).To(&var)) {
|
||||
current->Set(var, value);
|
||||
current->MarkForDeletion();
|
||||
} else {
|
||||
current->SetEscaped(object);
|
||||
current->SetEscaped(value);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kStoreElement: {
|
||||
Node* object = current->ValueInput(0);
|
||||
Node* index = current->ValueInput(1);
|
||||
Node* value = current->ValueInput(2);
|
||||
const VirtualObject* vobject = current->GetVirtualObject(object);
|
||||
int offset;
|
||||
Variable var;
|
||||
if (vobject && !vobject->HasEscaped() &&
|
||||
OffsetOfElementsAccess(op, index).To(&offset) &&
|
||||
vobject->FieldAt(offset).To(&var)) {
|
||||
current->Set(var, value);
|
||||
current->MarkForDeletion();
|
||||
} else {
|
||||
current->SetEscaped(value);
|
||||
current->SetEscaped(object);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kLoadField: {
|
||||
Node* object = current->ValueInput(0);
|
||||
const VirtualObject* vobject = current->GetVirtualObject(object);
|
||||
Variable var;
|
||||
if (vobject && !vobject->HasEscaped() &&
|
||||
vobject->FieldAt(OffsetOfFieldAccess(op)).To(&var)) {
|
||||
current->SetReplacement(current->Get(var));
|
||||
} else {
|
||||
// TODO(tebbi): At the moment, we mark objects as escaping if there
|
||||
// is a load from an invalid location to avoid dead nodes. This is a
|
||||
// workaround that should be removed once we can handle dead nodes
|
||||
// everywhere.
|
||||
current->SetEscaped(object);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kLoadElement: {
|
||||
Node* object = current->ValueInput(0);
|
||||
Node* index = current->ValueInput(1);
|
||||
const VirtualObject* vobject = current->GetVirtualObject(object);
|
||||
int offset;
|
||||
Variable var;
|
||||
if (vobject && !vobject->HasEscaped() &&
|
||||
OffsetOfElementsAccess(op, index).To(&offset) &&
|
||||
vobject->FieldAt(offset).To(&var)) {
|
||||
current->SetReplacement(current->Get(var));
|
||||
} else {
|
||||
current->SetEscaped(object);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kTypeGuard: {
|
||||
// The type-guard is re-introduced in the final reducer if the types
|
||||
// don't match.
|
||||
current->SetReplacement(current->ValueInput(0));
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kReferenceEqual: {
|
||||
Node* left = current->ValueInput(0);
|
||||
Node* right = current->ValueInput(1);
|
||||
const VirtualObject* left_object = current->GetVirtualObject(left);
|
||||
const VirtualObject* right_object = current->GetVirtualObject(right);
|
||||
if (left_object && !left_object->HasEscaped()) {
|
||||
if (right_object && !right_object->HasEscaped() &&
|
||||
left_object->id() == right_object->id()) {
|
||||
current->SetReplacement(jsgraph->TrueConstant());
|
||||
} else {
|
||||
current->SetReplacement(jsgraph->FalseConstant());
|
||||
}
|
||||
} else if (right_object && !right_object->HasEscaped()) {
|
||||
current->SetReplacement(jsgraph->FalseConstant());
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kCheckMaps: {
|
||||
CheckMapsParameters params = CheckMapsParametersOf(op);
|
||||
Node* checked = current->ValueInput(0);
|
||||
const VirtualObject* vobject = current->GetVirtualObject(checked);
|
||||
Variable map_field;
|
||||
if (vobject && !vobject->HasEscaped() &&
|
||||
vobject->FieldAt(HeapObject::kMapOffset).To(&map_field)) {
|
||||
Node* map = current->Get(map_field);
|
||||
if (map) {
|
||||
Type* const map_type = NodeProperties::GetType(map);
|
||||
if (map_type->IsHeapConstant() &&
|
||||
params.maps().contains(ZoneHandleSet<Map>(bit_cast<Handle<Map>>(
|
||||
map_type->AsHeapConstant()->Value())))) {
|
||||
current->MarkForDeletion();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
current->SetEscaped(checked);
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kCheckHeapObject: {
|
||||
Node* checked = current->ValueInput(0);
|
||||
switch (checked->opcode()) {
|
||||
case IrOpcode::kAllocate:
|
||||
case IrOpcode::kFinishRegion:
|
||||
case IrOpcode::kHeapConstant:
|
||||
current->SetReplacement(checked);
|
||||
break;
|
||||
default:
|
||||
current->SetEscaped(checked);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kStateValues:
|
||||
case IrOpcode::kFrameState:
|
||||
// These uses are always safe.
|
||||
break;
|
||||
default: {
|
||||
// For unknown nodes, treat all value inputs as escaping.
|
||||
int value_input_count = op->ValueInputCount();
|
||||
for (int i = 0; i < value_input_count; ++i) {
|
||||
Node* input = current->ValueInput(i);
|
||||
current->SetEscaped(input);
|
||||
}
|
||||
if (OperatorProperties::HasContextInput(op)) {
|
||||
current->SetEscaped(current->ContextInput());
|
||||
}
|
||||
break;
|
||||
}
|
||||
} // namespace
|
||||
} // namespace
|
||||
|
||||
} // namespace
|
||||
|
||||
void NewEscapeAnalysis::Reduce(Node* node, Reduction* reduction) {
|
||||
const Operator* op = node->op();
|
||||
TRACE("Reducing %s#%d\n", op->mnemonic(), node->id());
|
||||
|
||||
EscapeAnalysisTracker::Scope current(this, tracker_, node, reduction);
|
||||
ReduceNode(op, ¤t, jsgraph());
|
||||
}
|
||||
|
||||
NewEscapeAnalysis::NewEscapeAnalysis(JSGraph* jsgraph, Zone* zone)
|
||||
: EffectGraphReducer(
|
||||
jsgraph->graph(),
|
||||
[this](Node* node, Reduction* reduction) { Reduce(node, reduction); },
|
||||
zone),
|
||||
tracker_(new (zone) EscapeAnalysisTracker(jsgraph, this, zone)),
|
||||
jsgraph_(jsgraph) {}
|
||||
|
||||
Node* EscapeAnalysisResult::GetReplacementOf(Node* node) {
|
||||
return tracker_->GetReplacementOf(node);
|
||||
}
|
||||
|
||||
Node* EscapeAnalysisResult::GetVirtualObjectField(const VirtualObject* vobject,
|
||||
int field, Node* effect) {
|
||||
return tracker_->variable_states_.Get(vobject->FieldAt(field).FromJust(),
|
||||
effect);
|
||||
}
|
||||
|
||||
const VirtualObject* EscapeAnalysisResult::GetVirtualObject(Node* node) {
|
||||
return tracker_->virtual_objects_[node];
|
||||
}
|
||||
|
||||
VirtualObject::VirtualObject(VariableTracker* var_states, VirtualObject::Id id,
|
||||
int size)
|
||||
: Dependable(var_states->zone()), id_(id), fields_(var_states->zone()) {
|
||||
DCHECK(size % kPointerSize == 0);
|
||||
TRACE("Creating VirtualObject id:%d size:%d\n", id, size);
|
||||
int num_fields = size / kPointerSize;
|
||||
fields_.reserve(num_fields);
|
||||
for (int i = 0; i < num_fields; ++i) {
|
||||
fields_.push_back(var_states->NewVariable());
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
214
src/compiler/new-escape-analysis.h
Normal file
214
src/compiler/new-escape-analysis.h
Normal file
@ -0,0 +1,214 @@
|
||||
// Copyright 2017 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_NEW_ESCAPE_ANALYSIS_H_
|
||||
#define V8_COMPILER_NEW_ESCAPE_ANALYSIS_H_
|
||||
|
||||
#include "src/base/functional.h"
|
||||
#include "src/compiler/graph-reducer.h"
|
||||
#include "src/compiler/js-graph.h"
|
||||
#include "src/compiler/persistent-map.h"
|
||||
#include "src/globals.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
#define TRACE(...) \
|
||||
do { \
|
||||
if (FLAG_trace_turbo_escape) PrintF(__VA_ARGS__); \
|
||||
} while (false)
|
||||
#else
|
||||
#define TRACE(...)
|
||||
#endif
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
class CommonOperatorBuilder;
|
||||
class VariableTracker;
|
||||
class EscapeAnalysisTracker;
|
||||
|
||||
#ifdef DEBUG
|
||||
class TraceScope {
|
||||
public:
|
||||
TraceScope(const char* name, Node* node) : name_(name), node_(node) {
|
||||
for (int i = 0; i < depth; ++i) TRACE(" ");
|
||||
TRACE("[ %s %s#%d\n", name, node->op()->mnemonic(), node->id());
|
||||
++depth;
|
||||
}
|
||||
~TraceScope() {
|
||||
--depth;
|
||||
for (int i = 0; i < depth; ++i) TRACE(" ");
|
||||
TRACE("] %s %s#%d\n", name_, node_->op()->mnemonic(), node_->id());
|
||||
}
|
||||
|
||||
private:
|
||||
const char* name_;
|
||||
Node* node_;
|
||||
static thread_local int depth;
|
||||
};
|
||||
#define TRACE_FN(name, node) TraceScope __trace_scope_(name, node)
|
||||
#else
|
||||
#define TRACE_FN(name, node)
|
||||
#endif
|
||||
|
||||
// {EffectGraphReducer} reduces up to a fixed point. It distinguishes changes to
|
||||
// the effect output of a node from changes to the value output to reduce the
|
||||
// number of revisitations.
|
||||
class EffectGraphReducer {
|
||||
public:
|
||||
class Reduction {
|
||||
public:
|
||||
bool value_changed() const { return value_changed_; }
|
||||
void set_value_changed() { value_changed_ = true; }
|
||||
bool effect_changed() const { return effect_changed_; }
|
||||
void set_effect_changed() { effect_changed_ = true; }
|
||||
|
||||
private:
|
||||
bool value_changed_ = false;
|
||||
bool effect_changed_ = false;
|
||||
};
|
||||
|
||||
EffectGraphReducer(Graph* graph,
|
||||
std::function<void(Node*, Reduction*)> reduce, Zone* zone);
|
||||
|
||||
void ReduceGraph() { ReduceFrom(graph_->end()); }
|
||||
|
||||
// Mark node for revisitation.
|
||||
void Revisit(Node* node);
|
||||
|
||||
// Add a new root node to start reduction from. This is useful if the reducer
|
||||
// adds nodes that are not yet reachable, but should already be considered
|
||||
// part of the graph.
|
||||
void AddRoot(Node* node) {
|
||||
DCHECK(state_.Get(node) == State::kUnvisited);
|
||||
state_.Set(node, State::kRevisit);
|
||||
revisit_.push(node);
|
||||
}
|
||||
|
||||
bool Complete() { return stack_.empty() && revisit_.empty(); }
|
||||
|
||||
private:
|
||||
struct NodeState {
|
||||
Node* node;
|
||||
int input_index;
|
||||
};
|
||||
void ReduceFrom(Node* node);
|
||||
enum class State : uint8_t { kUnvisited = 0, kRevisit, kOnStack, kVisited };
|
||||
const uint8_t kNumStates = static_cast<uint8_t>(State::kVisited) + 1;
|
||||
Graph* graph_;
|
||||
NodeMarker<State> state_;
|
||||
ZoneStack<Node*> revisit_;
|
||||
ZoneStack<NodeState> stack_;
|
||||
std::function<void(Node*, Reduction*)> reduce_;
|
||||
};
|
||||
|
||||
// A variable is an abstract storage location, which is lowered to SSA values
|
||||
// and phi nodes by {VariableTracker}.
|
||||
class Variable {
|
||||
public:
|
||||
Variable() : id_(kInvalid) {}
|
||||
bool operator==(Variable other) const { return id_ == other.id_; }
|
||||
bool operator!=(Variable other) const { return id_ != other.id_; }
|
||||
bool operator<(Variable other) const { return id_ < other.id_; }
|
||||
static Variable Invalid() { return Variable(kInvalid); }
|
||||
friend V8_INLINE size_t hash_value(Variable v) {
|
||||
return base::hash_value(v.id_);
|
||||
}
|
||||
friend std::ostream& operator<<(std::ostream& os, Variable var) {
|
||||
return os << var.id_;
|
||||
}
|
||||
|
||||
private:
|
||||
typedef int Id;
|
||||
explicit Variable(Id id) : id_(id) {}
|
||||
Id id_;
|
||||
static const Id kInvalid = -1;
|
||||
|
||||
friend class VariableTracker;
|
||||
};
|
||||
|
||||
// An object that can track the nodes in the graph whose current reduction
|
||||
// depends on the value of the object.
|
||||
class Dependable : public ZoneObject {
|
||||
public:
|
||||
explicit Dependable(Zone* zone) : dependants_(zone) {}
|
||||
void AddDependency(Node* node) { dependants_.push_back(node); }
|
||||
void RevisitDependants(EffectGraphReducer* reducer) {
|
||||
for (Node* node : dependants_) {
|
||||
reducer->Revisit(node);
|
||||
}
|
||||
dependants_.clear();
|
||||
}
|
||||
|
||||
private:
|
||||
ZoneVector<Node*> dependants_;
|
||||
};
|
||||
|
||||
// A virtual object represents an allocation site and tracks the Variables
|
||||
// associated with its fields as well as its global escape status.
|
||||
class VirtualObject : public Dependable {
|
||||
public:
|
||||
typedef uint32_t Id;
|
||||
typedef ZoneVector<Variable>::const_iterator const_iterator;
|
||||
VirtualObject(VariableTracker* var_states, Id id, int size);
|
||||
Maybe<Variable> FieldAt(int offset) const {
|
||||
DCHECK(offset % kPointerSize == 0);
|
||||
CHECK(!HasEscaped());
|
||||
if (offset >= size()) {
|
||||
// This can only happen in unreachable code.
|
||||
return Nothing<Variable>();
|
||||
}
|
||||
return Just(fields_.at(offset / kPointerSize));
|
||||
}
|
||||
Id id() const { return id_; }
|
||||
int size() const { return static_cast<int>(kPointerSize * fields_.size()); }
|
||||
// Escaped might mean that the object escaped to untracked memory or that it
|
||||
// is used in an operation that requires materialization.
|
||||
void SetEscaped() { escaped_ = true; }
|
||||
bool HasEscaped() const { return escaped_; }
|
||||
const_iterator begin() const { return fields_.begin(); }
|
||||
const_iterator end() const { return fields_.end(); }
|
||||
|
||||
private:
|
||||
bool escaped_ = false;
|
||||
Id id_;
|
||||
ZoneVector<Variable> fields_;
|
||||
};
|
||||
|
||||
class EscapeAnalysisResult {
|
||||
public:
|
||||
explicit EscapeAnalysisResult(EscapeAnalysisTracker* tracker)
|
||||
: tracker_(tracker) {}
|
||||
|
||||
const VirtualObject* GetVirtualObject(Node* node);
|
||||
Node* GetVirtualObjectField(const VirtualObject* vobject, int field,
|
||||
Node* effect);
|
||||
Node* GetReplacementOf(Node* node);
|
||||
|
||||
private:
|
||||
EscapeAnalysisTracker* tracker_;
|
||||
};
|
||||
|
||||
class V8_EXPORT_PRIVATE NewEscapeAnalysis final
|
||||
: public NON_EXPORTED_BASE(EffectGraphReducer) {
|
||||
public:
|
||||
NewEscapeAnalysis(JSGraph* jsgraph, Zone* zone);
|
||||
|
||||
EscapeAnalysisResult analysis_result() {
|
||||
DCHECK(Complete());
|
||||
return EscapeAnalysisResult(tracker_);
|
||||
}
|
||||
|
||||
private:
|
||||
void Reduce(Node* node, Reduction* reduction);
|
||||
JSGraph* jsgraph() { return jsgraph_; }
|
||||
EscapeAnalysisTracker* tracker_;
|
||||
JSGraph* jsgraph_;
|
||||
};
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_COMPILER_NEW_ESCAPE_ANALYSIS_H_
|
@ -482,6 +482,38 @@ bool NodeProperties::IsInputRange(Edge edge, int first, int num) {
|
||||
return first <= index && index < first + num;
|
||||
}
|
||||
|
||||
// static
|
||||
size_t NodeProperties::HashCode(Node* node) {
|
||||
size_t h = base::hash_combine(node->op()->HashCode(), node->InputCount());
|
||||
for (Node* input : node->inputs()) {
|
||||
h = base::hash_combine(h, input->id());
|
||||
}
|
||||
return h;
|
||||
}
|
||||
|
||||
// static
|
||||
bool NodeProperties::Equals(Node* a, Node* b) {
|
||||
DCHECK_NOT_NULL(a);
|
||||
DCHECK_NOT_NULL(b);
|
||||
DCHECK_NOT_NULL(a->op());
|
||||
DCHECK_NOT_NULL(b->op());
|
||||
if (!a->op()->Equals(b->op())) return false;
|
||||
if (a->InputCount() != b->InputCount()) return false;
|
||||
Node::Inputs aInputs = a->inputs();
|
||||
Node::Inputs bInputs = b->inputs();
|
||||
|
||||
auto aIt = aInputs.begin();
|
||||
auto bIt = bInputs.begin();
|
||||
auto aEnd = aInputs.end();
|
||||
|
||||
for (; aIt != aEnd; ++aIt, ++bIt) {
|
||||
DCHECK_NOT_NULL(*aIt);
|
||||
DCHECK_NOT_NULL(*bIt);
|
||||
if ((*aIt)->id() != (*bIt)->id()) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -132,6 +132,12 @@ class V8_EXPORT_PRIVATE NodeProperties final {
|
||||
// Checks if two nodes are the same, looking past {CheckHeapObject}.
|
||||
static bool IsSame(Node* a, Node* b);
|
||||
|
||||
// Check if two nodes have equal operators and reference-equal inputs. Used
|
||||
// for value numbering/hash-consing.
|
||||
static bool Equals(Node* a, Node* b);
|
||||
// A corresponding hash function.
|
||||
static size_t HashCode(Node* node);
|
||||
|
||||
// Walks up the {effect} chain to find a witness that provides map
|
||||
// information about the {receiver}. Can look through potentially
|
||||
// side effecting nodes.
|
||||
|
@ -62,6 +62,7 @@
|
||||
V(ArgumentsElementsState) \
|
||||
V(ArgumentsLengthState) \
|
||||
V(ObjectState) \
|
||||
V(ObjectId) \
|
||||
V(TypedObjectState) \
|
||||
V(Call) \
|
||||
V(Parameter) \
|
||||
|
514
src/compiler/persistent-map.h
Normal file
514
src/compiler/persistent-map.h
Normal file
@ -0,0 +1,514 @@
|
||||
// Copyright 2017 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_PERSISTENT_H_
|
||||
#define V8_COMPILER_PERSISTENT_H_
|
||||
|
||||
#include <array>
|
||||
#include <bitset>
|
||||
#include <tuple>
|
||||
|
||||
#include "src/base/functional.h"
|
||||
#include "src/zone/zone-containers.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// PersistentMap is a persistent map datastructure based on hash trees (a binary
|
||||
// tree using the bits of a hash value as addresses). The map is a conceptually
|
||||
// infinite: All keys are initially mapped to a default value, values are
|
||||
// deleted by overwriting them with the default value. The iterators produce
|
||||
// exactly the keys that are not the default value. The hash values should have
|
||||
// high variance in their high bits, so dense integers are a bad choice.
|
||||
// Complexity:
|
||||
// - Copy and assignment: O(1)
|
||||
// - access: O(log n)
|
||||
// - update: O(log n) time and space
|
||||
// - iteration: amortized O(1) per step
|
||||
// - Zip: O(n)
|
||||
// - equality check: O(n)
|
||||
// TODO(tebbi): Cache map transitions to avoid re-allocation of the same map.
|
||||
// TODO(tebbi): Implement an O(1) equality check based on hash consing or
|
||||
// something similar.
|
||||
template <class Key, class Value, class Hasher = base::hash<Key>>
|
||||
class PersistentMap {
|
||||
public:
|
||||
using key_type = Key;
|
||||
using mapped_type = Value;
|
||||
using value_type = std::pair<Key, Value>;
|
||||
|
||||
private:
|
||||
static constexpr size_t kHashBits = 32;
|
||||
enum Bit : int { kLeft = 0, kRight = 1 };
|
||||
|
||||
// Access hash bits starting from the high bits and compare them according to
|
||||
// their unsigned value. This way, the order in the hash tree is compatible
|
||||
// with numeric hash comparisons.
|
||||
class HashValue;
|
||||
|
||||
struct KeyValue : std::pair<Key, Value> {
|
||||
const Key& key() const { return this->first; }
|
||||
const Value& value() const { return this->second; }
|
||||
using std::pair<Key, Value>::pair;
|
||||
};
|
||||
|
||||
struct FocusedTree;
|
||||
|
||||
public:
|
||||
// Depth of the last added element. This is a cheap estimate for the size of
|
||||
// the hash tree.
|
||||
size_t last_depth() const {
|
||||
if (tree_) {
|
||||
return tree_->length;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
const Value& Get(const Key& key) const {
|
||||
HashValue key_hash = HashValue(Hasher()(key));
|
||||
const FocusedTree* tree = FindHash(key_hash);
|
||||
return GetFocusedValue(tree, key);
|
||||
}
|
||||
|
||||
// Add or overwrite an existing key-value pair.
|
||||
PersistentMap Add(Key key, Value value) const;
|
||||
void Set(Key key, Value value) { *this = Add(key, value); }
|
||||
|
||||
bool operator==(const PersistentMap& other) const {
|
||||
if (tree_ == other.tree_) return true;
|
||||
if (def_value_ != other.def_value_) return false;
|
||||
for (const std::tuple<Key, Value, Value>& triple : Zip(other)) {
|
||||
if (std::get<1>(triple) != std::get<2>(triple)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool operator!=(const PersistentMap& other) const {
|
||||
return !(*this == other);
|
||||
}
|
||||
|
||||
// The iterator produces key-value pairs in the lexicographical order of
|
||||
// hash value and key. It produces exactly the key-value pairs where the value
|
||||
// is not the default value.
|
||||
class iterator;
|
||||
|
||||
iterator begin() const {
|
||||
if (!tree_) return end();
|
||||
return iterator::begin(tree_, def_value_);
|
||||
}
|
||||
iterator end() const { return iterator::end(def_value_); }
|
||||
|
||||
// Iterator to traverse two maps in lockstep, producing matching value pairs
|
||||
// for each key where at least one value is different from the respective
|
||||
// default.
|
||||
class double_iterator;
|
||||
|
||||
// An iterable to iterate over the two maps in lockstep.
|
||||
struct ZipIterable {
|
||||
PersistentMap a;
|
||||
PersistentMap b;
|
||||
double_iterator begin() { return double_iterator(a.begin(), b.begin()); }
|
||||
double_iterator end() { return double_iterator(a.end(), b.end()); }
|
||||
};
|
||||
|
||||
ZipIterable Zip(const PersistentMap& other) const { return {*this, other}; }
|
||||
|
||||
explicit PersistentMap(Zone* zone, Value def_value = Value())
|
||||
: PersistentMap(nullptr, zone, def_value) {}
|
||||
|
||||
private:
|
||||
// Find the {FocusedTree} that contains a key-value pair with key hash {hash}.
|
||||
const FocusedTree* FindHash(HashValue hash) const;
|
||||
|
||||
// Find the {FocusedTree} that contains a key-value pair with key hash {hash}.
|
||||
// Output the path to this {FocusedTree} and its length. If no such
|
||||
// {FocusedTree} exists, return {nullptr} and output the path to the last node
|
||||
// with a matching hash prefix. Note that {length} is the length of the found
|
||||
// path and may be less than the length of the found {FocusedTree}.
|
||||
const FocusedTree* FindHash(HashValue hash,
|
||||
std::array<const FocusedTree*, kHashBits>* path,
|
||||
int* length) const;
|
||||
|
||||
// Load value from the leaf node on the focused path of {tree}.
|
||||
const Value& GetFocusedValue(const FocusedTree* tree, const Key& key) const;
|
||||
|
||||
// Return the {FocusedTree} representing the left (bit==kLeft) or right
|
||||
// (bit==kRight) child of the node on the path of {tree} at tree level
|
||||
// {level}.
|
||||
static const FocusedTree* GetChild(const FocusedTree* tree, int level,
|
||||
Bit bit);
|
||||
|
||||
// Find the leftmost path in the tree, starting at the node at tree level
|
||||
// {level} on the path of {start}. Output the level of the leaf to {level} and
|
||||
// the path to {path}.
|
||||
static const FocusedTree* FindLeftmost(
|
||||
const FocusedTree* start, int* level,
|
||||
std::array<const FocusedTree*, kHashBits>* path);
|
||||
|
||||
PersistentMap(const FocusedTree* tree, Zone* zone, Value def_value)
|
||||
: tree_(tree), def_value_(def_value), zone_(zone) {}
|
||||
|
||||
const FocusedTree* tree_;
|
||||
Value def_value_;
|
||||
Zone* zone_;
|
||||
};
|
||||
|
||||
// This structure represents a hash tree with one focused path to a specific
|
||||
// leaf. For the focused leaf, it stores key, value and key hash. The path is
|
||||
// defined by the hash bits of the focused leaf. In a traditional tree
|
||||
// datastructure, the nodes of a path form a linked list with the values being
|
||||
// the pointers outside of this path. Instead of storing all of these nodes,
|
||||
// we store an array of the pointers pointing outside of the path. This is
|
||||
// similar to the stack used when doing DFS traversal of a tree. The hash of
|
||||
// the leaf is used to know if the pointers point to the left or the
|
||||
// right of the path. As there is no explicit representation of a tree node,
|
||||
// this structure also represents all the nodes on its path. The intended node
|
||||
// depends on the tree depth, which is always clear from the referencing
|
||||
// context. So the pointer to a {FocusedTree} stored in the
|
||||
// {PersistentMap.tree_} always references the root, while a pointer from a
|
||||
// focused node of another {FocusedTree} always references to one tree level
|
||||
// lower than before.
|
||||
template <class Key, class Value, class Hasher>
|
||||
struct PersistentMap<Key, Value, Hasher>::FocusedTree {
|
||||
KeyValue key_value;
|
||||
// The depth of the focused path, that is, the number of pointers stored in
|
||||
// this structure.
|
||||
int8_t length;
|
||||
HashValue key_hash;
|
||||
// Out-of-line storage for hash collisions.
|
||||
const ZoneMap<Key, Value>* more;
|
||||
using more_iterator = typename ZoneMap<Key, Value>::const_iterator;
|
||||
// {path_array} has to be the last member: To store an array inline, we
|
||||
// over-allocate memory for this structure and access memory beyond
|
||||
// {path_array}. This corresponds to a flexible array member as defined in
|
||||
// C99.
|
||||
const FocusedTree* path_array[1];
|
||||
const FocusedTree*& path(int i) {
|
||||
DCHECK(i < length);
|
||||
return reinterpret_cast<const FocusedTree**>(
|
||||
reinterpret_cast<byte*>(this) + offsetof(FocusedTree, path_array))[i];
|
||||
}
|
||||
const FocusedTree* path(int i) const {
|
||||
DCHECK(i < length);
|
||||
return reinterpret_cast<const FocusedTree* const*>(
|
||||
reinterpret_cast<const byte*>(this) +
|
||||
offsetof(FocusedTree, path_array))[i];
|
||||
}
|
||||
};
|
||||
|
||||
template <class Key, class Value, class Hasher>
|
||||
class PersistentMap<Key, Value, Hasher>::HashValue {
|
||||
public:
|
||||
explicit HashValue(size_t hash) : bits_(hash) {}
|
||||
explicit HashValue(std::bitset<kHashBits> hash) : bits_(hash) {}
|
||||
|
||||
Bit operator[](int pos) const {
|
||||
return bits_[kHashBits - pos - 1] ? kRight : kLeft;
|
||||
}
|
||||
|
||||
bool operator<(HashValue other) const {
|
||||
static_assert(sizeof(*this) <= sizeof(unsigned long), ""); // NOLINT
|
||||
return bits_.to_ulong() < other.bits_.to_ulong();
|
||||
}
|
||||
bool operator==(HashValue other) const { return bits_ == other.bits_; }
|
||||
bool operator!=(HashValue other) const { return bits_ != other.bits_; }
|
||||
HashValue operator^(HashValue other) const {
|
||||
return HashValue(bits_ ^ other.bits_);
|
||||
}
|
||||
|
||||
private:
|
||||
std::bitset<kHashBits> bits_;
|
||||
};
|
||||
|
||||
template <class Key, class Value, class Hasher>
|
||||
class PersistentMap<Key, Value, Hasher>::iterator {
|
||||
public:
|
||||
const value_type operator*() const {
|
||||
if (current_->more) {
|
||||
return *more_iter_;
|
||||
} else {
|
||||
return current_->key_value;
|
||||
}
|
||||
}
|
||||
|
||||
iterator& operator++() {
|
||||
do {
|
||||
if (!current_) {
|
||||
// Iterator is past the end.
|
||||
return *this;
|
||||
}
|
||||
if (current_->more) {
|
||||
DCHECK(more_iter_ != current_->more->end());
|
||||
++more_iter_;
|
||||
if (more_iter_ != current_->more->end()) return *this;
|
||||
}
|
||||
if (level_ == 0) {
|
||||
*this = end(def_value_);
|
||||
return *this;
|
||||
}
|
||||
--level_;
|
||||
while (current_->key_hash[level_] == kRight || path_[level_] == nullptr) {
|
||||
if (level_ == 0) {
|
||||
*this = end(def_value_);
|
||||
return *this;
|
||||
}
|
||||
--level_;
|
||||
}
|
||||
const FocusedTree* first_right_alternative = path_[level_];
|
||||
level_++;
|
||||
current_ = FindLeftmost(first_right_alternative, &level_, &path_);
|
||||
if (current_->more) {
|
||||
more_iter_ = current_->more->begin();
|
||||
}
|
||||
} while ((**this).second == def_value());
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool operator==(const iterator& other) const {
|
||||
if (is_end()) return other.is_end();
|
||||
if (other.is_end()) return false;
|
||||
if (current_->key_hash != other.current_->key_hash) {
|
||||
return false;
|
||||
} else {
|
||||
return (**this).first == (*other).first;
|
||||
}
|
||||
}
|
||||
bool operator!=(const iterator& other) const { return !(*this == other); }
|
||||
|
||||
bool operator<(const iterator& other) const {
|
||||
if (is_end()) return false;
|
||||
if (other.is_end()) return true;
|
||||
if (current_->key_hash < other.current_->key_hash) {
|
||||
return true;
|
||||
} else if (current_->key_hash == other.current_->key_hash) {
|
||||
return (**this).first < (*other).first;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool is_end() const { return current_ == nullptr; }
|
||||
|
||||
const Value& def_value() { return def_value_; }
|
||||
|
||||
static iterator begin(const FocusedTree* tree, Value def_value) {
|
||||
iterator i(def_value);
|
||||
i.current_ = FindLeftmost(tree, &i.level_, &i.path_);
|
||||
if (i.current_->more) {
|
||||
i.more_iter_ = i.current_->more->begin();
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
static iterator end(Value def_value) { return iterator(def_value); }
|
||||
|
||||
private:
|
||||
int level_;
|
||||
typename FocusedTree::more_iterator more_iter_;
|
||||
const FocusedTree* current_;
|
||||
std::array<const FocusedTree*, kHashBits> path_;
|
||||
Value def_value_;
|
||||
|
||||
explicit iterator(Value def_value)
|
||||
: level_(0), current_(nullptr), def_value_(def_value) {}
|
||||
};
|
||||
|
||||
template <class Key, class Value, class Hasher>
|
||||
class PersistentMap<Key, Value, Hasher>::double_iterator {
|
||||
public:
|
||||
std::tuple<Key, Value, Value> operator*() {
|
||||
if (first_current_) {
|
||||
auto pair = *first_;
|
||||
return std::make_tuple(
|
||||
pair.first, pair.second,
|
||||
second_current_ ? (*second_).second : second_.def_value());
|
||||
} else {
|
||||
DCHECK(second_current_);
|
||||
auto pair = *second_;
|
||||
return std::make_tuple(pair.first, first_.def_value(), pair.second);
|
||||
}
|
||||
}
|
||||
|
||||
double_iterator& operator++() {
|
||||
if (first_current_) ++first_;
|
||||
if (second_current_) ++second_;
|
||||
return *this = double_iterator(first_, second_);
|
||||
}
|
||||
|
||||
double_iterator(iterator first, iterator second)
|
||||
: first_(first), second_(second) {
|
||||
if (first_ == second_) {
|
||||
first_current_ = second_current_ = true;
|
||||
} else if (first_ < second_) {
|
||||
first_current_ = true;
|
||||
second_current_ = false;
|
||||
} else {
|
||||
first_current_ = false;
|
||||
second_current_ = true;
|
||||
}
|
||||
}
|
||||
|
||||
bool operator!=(const double_iterator& other) {
|
||||
return first_ != other.first_ || second_ != other.second_;
|
||||
}
|
||||
|
||||
bool is_end() const { return first_.is_end() && second_.is_end(); }
|
||||
|
||||
private:
|
||||
iterator first_;
|
||||
iterator second_;
|
||||
bool first_current_;
|
||||
bool second_current_;
|
||||
};
|
||||
|
||||
template <class Key, class Value, class Hasher>
|
||||
PersistentMap<Key, Value, Hasher> PersistentMap<Key, Value, Hasher>::Add(
|
||||
Key key, Value value) const {
|
||||
HashValue key_hash = HashValue(Hasher()(key));
|
||||
std::array<const FocusedTree*, kHashBits> path;
|
||||
int length = 0;
|
||||
const FocusedTree* old = FindHash(key_hash, &path, &length);
|
||||
ZoneMap<Key, Value>* more = nullptr;
|
||||
if (GetFocusedValue(old, key) == value) return *this;
|
||||
if (old && !(old->more == nullptr && old->key_value.key() == key)) {
|
||||
more = new (zone_->New(sizeof(*more))) ZoneMap<Key, Value>(zone_);
|
||||
if (old->more) {
|
||||
*more = *old->more;
|
||||
} else {
|
||||
(*more)[old->key_value.key()] = old->key_value.value();
|
||||
}
|
||||
(*more)[key] = value;
|
||||
}
|
||||
FocusedTree* tree =
|
||||
new (zone_->New(sizeof(FocusedTree) +
|
||||
std::max(0, length - 1) * sizeof(const FocusedTree*)))
|
||||
FocusedTree{KeyValue(std::move(key), std::move(value)),
|
||||
static_cast<int8_t>(length),
|
||||
key_hash,
|
||||
more,
|
||||
{}};
|
||||
for (int i = 0; i < length; ++i) {
|
||||
tree->path(i) = path[i];
|
||||
}
|
||||
return PersistentMap(tree, zone_, def_value_);
|
||||
}
|
||||
|
||||
template <class Key, class Value, class Hasher>
|
||||
const typename PersistentMap<Key, Value, Hasher>::FocusedTree*
|
||||
PersistentMap<Key, Value, Hasher>::FindHash(HashValue hash) const {
|
||||
const FocusedTree* tree = tree_;
|
||||
int level = 0;
|
||||
while (tree && hash != tree->key_hash) {
|
||||
while ((hash ^ tree->key_hash)[level] == 0) {
|
||||
++level;
|
||||
}
|
||||
tree = level < tree->length ? tree->path(level) : nullptr;
|
||||
++level;
|
||||
}
|
||||
return tree;
|
||||
}
|
||||
|
||||
template <class Key, class Value, class Hasher>
|
||||
const typename PersistentMap<Key, Value, Hasher>::FocusedTree*
|
||||
PersistentMap<Key, Value, Hasher>::FindHash(
|
||||
HashValue hash, std::array<const FocusedTree*, kHashBits>* path,
|
||||
int* length) const {
|
||||
const FocusedTree* tree = tree_;
|
||||
int level = 0;
|
||||
while (tree && hash != tree->key_hash) {
|
||||
int map_length = tree->length;
|
||||
while ((hash ^ tree->key_hash)[level] == 0) {
|
||||
(*path)[level] = level < map_length ? tree->path(level) : nullptr;
|
||||
++level;
|
||||
}
|
||||
(*path)[level] = tree;
|
||||
tree = level < tree->length ? tree->path(level) : nullptr;
|
||||
++level;
|
||||
}
|
||||
if (tree) {
|
||||
while (level < tree->length) {
|
||||
(*path)[level] = tree->path(level);
|
||||
++level;
|
||||
}
|
||||
}
|
||||
*length = level;
|
||||
return tree;
|
||||
}
|
||||
|
||||
template <class Key, class Value, class Hasher>
|
||||
const Value& PersistentMap<Key, Value, Hasher>::GetFocusedValue(
|
||||
const FocusedTree* tree, const Key& key) const {
|
||||
if (!tree) {
|
||||
return def_value_;
|
||||
}
|
||||
if (tree->more) {
|
||||
auto it = tree->more->find(key);
|
||||
if (it == tree->more->end())
|
||||
return def_value_;
|
||||
else
|
||||
return it->second;
|
||||
} else {
|
||||
if (key == tree->key_value.key()) {
|
||||
return tree->key_value.value();
|
||||
} else {
|
||||
return def_value_;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <class Key, class Value, class Hasher>
|
||||
const typename PersistentMap<Key, Value, Hasher>::FocusedTree*
|
||||
PersistentMap<Key, Value, Hasher>::GetChild(const FocusedTree* tree, int level,
|
||||
Bit bit) {
|
||||
if (tree->key_hash[level] == bit) {
|
||||
return tree;
|
||||
} else if (level < tree->length) {
|
||||
return tree->path(level);
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
template <class Key, class Value, class Hasher>
|
||||
const typename PersistentMap<Key, Value, Hasher>::FocusedTree*
|
||||
PersistentMap<Key, Value, Hasher>::FindLeftmost(
|
||||
const FocusedTree* start, int* level,
|
||||
std::array<const FocusedTree*, kHashBits>* path) {
|
||||
const FocusedTree* current = start;
|
||||
while (*level < current->length) {
|
||||
if (const FocusedTree* child = GetChild(current, *level, kLeft)) {
|
||||
(*path)[*level] = GetChild(current, *level, kRight);
|
||||
current = child;
|
||||
++*level;
|
||||
} else if (const FocusedTree* child = GetChild(current, *level, kRight)) {
|
||||
(*path)[*level] = GetChild(current, *level, kLeft);
|
||||
current = child;
|
||||
++*level;
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
return current;
|
||||
}
|
||||
|
||||
template <class Key, class Value, class Hasher>
|
||||
std::ostream& operator<<(std::ostream& os,
|
||||
const PersistentMap<Key, Value, Hasher>& map) {
|
||||
os << "{";
|
||||
bool first = true;
|
||||
for (auto pair : map) {
|
||||
if (!first) os << ", ";
|
||||
first = false;
|
||||
os << pair.first << ": " << pair.second;
|
||||
}
|
||||
return os << "}";
|
||||
}
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_COMPILER_PERSISTENT_MAP_H_
|
@ -51,6 +51,8 @@
|
||||
#include "src/compiler/machine-operator-reducer.h"
|
||||
#include "src/compiler/memory-optimizer.h"
|
||||
#include "src/compiler/move-optimizer.h"
|
||||
#include "src/compiler/new-escape-analysis-reducer.h"
|
||||
#include "src/compiler/new-escape-analysis.h"
|
||||
#include "src/compiler/osr.h"
|
||||
#include "src/compiler/pipeline-statistics.h"
|
||||
#include "src/compiler/redundancy-elimination.h"
|
||||
@ -1157,19 +1159,32 @@ struct EscapeAnalysisPhase {
|
||||
static const char* phase_name() { return "escape analysis"; }
|
||||
|
||||
void Run(PipelineData* data, Zone* temp_zone) {
|
||||
EscapeAnalysis escape_analysis(data->graph(), data->jsgraph()->common(),
|
||||
temp_zone);
|
||||
if (!escape_analysis.Run()) return;
|
||||
JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
|
||||
EscapeAnalysisReducer escape_reducer(&graph_reducer, data->jsgraph(),
|
||||
&escape_analysis, temp_zone);
|
||||
AddReducer(data, &graph_reducer, &escape_reducer);
|
||||
graph_reducer.ReduceGraph();
|
||||
if (escape_reducer.compilation_failed()) {
|
||||
data->set_compilation_failed();
|
||||
return;
|
||||
if (FLAG_turbo_new_escape) {
|
||||
NewEscapeAnalysis escape_analysis(data->jsgraph(), temp_zone);
|
||||
escape_analysis.ReduceGraph();
|
||||
JSGraphReducer reducer(data->jsgraph(), temp_zone);
|
||||
NewEscapeAnalysisReducer escape_reducer(&reducer, data->jsgraph(),
|
||||
escape_analysis.analysis_result(),
|
||||
temp_zone);
|
||||
AddReducer(data, &reducer, &escape_reducer);
|
||||
reducer.ReduceGraph();
|
||||
// TODO(tebbi): Turn this into a debug mode check once we have confidence.
|
||||
escape_reducer.VerifyReplacement();
|
||||
} else {
|
||||
EscapeAnalysis escape_analysis(data->graph(), data->jsgraph()->common(),
|
||||
temp_zone);
|
||||
if (!escape_analysis.Run()) return;
|
||||
JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
|
||||
EscapeAnalysisReducer escape_reducer(&graph_reducer, data->jsgraph(),
|
||||
&escape_analysis, temp_zone);
|
||||
AddReducer(data, &graph_reducer, &escape_reducer);
|
||||
graph_reducer.ReduceGraph();
|
||||
if (escape_reducer.compilation_failed()) {
|
||||
data->set_compilation_failed();
|
||||
return;
|
||||
}
|
||||
escape_reducer.VerifyReplacement();
|
||||
}
|
||||
escape_reducer.VerifyReplacement();
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -142,12 +142,10 @@ UseInfo TruncatingUseInfoFromRepresentation(MachineRepresentation rep) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
|
||||
UseInfo UseInfoForBasePointer(const FieldAccess& access) {
|
||||
return access.tag() != 0 ? UseInfo::AnyTagged() : UseInfo::PointerInt();
|
||||
}
|
||||
|
||||
|
||||
UseInfo UseInfoForBasePointer(const ElementAccess& access) {
|
||||
return access.tag() != 0 ? UseInfo::AnyTagged() : UseInfo::PointerInt();
|
||||
}
|
||||
@ -1014,8 +1012,9 @@ class RepresentationSelector {
|
||||
// The target of the call.
|
||||
ProcessInput(node, i, UseInfo::Any());
|
||||
} else if ((i - 1) < params) {
|
||||
ProcessInput(node, i, TruncatingUseInfoFromRepresentation(
|
||||
desc->GetInputType(i).representation()));
|
||||
ProcessInput(node, i,
|
||||
TruncatingUseInfoFromRepresentation(
|
||||
desc->GetInputType(i).representation()));
|
||||
} else {
|
||||
ProcessInput(node, i, UseInfo::AnyTagged());
|
||||
}
|
||||
@ -1158,8 +1157,8 @@ class RepresentationSelector {
|
||||
(*types)[i] =
|
||||
DeoptMachineTypeOf(GetInfo(input)->representation(), TypeOf(input));
|
||||
}
|
||||
NodeProperties::ChangeOp(node,
|
||||
jsgraph_->common()->TypedObjectState(types));
|
||||
NodeProperties::ChangeOp(node, jsgraph_->common()->TypedObjectState(
|
||||
ObjectIdOf(node->op()), types));
|
||||
}
|
||||
SetOutput(node, MachineRepresentation::kTagged);
|
||||
}
|
||||
@ -2852,6 +2851,8 @@ class RepresentationSelector {
|
||||
return VisitStateValues(node);
|
||||
case IrOpcode::kObjectState:
|
||||
return VisitObjectState(node);
|
||||
case IrOpcode::kObjectId:
|
||||
return SetOutput(node, MachineRepresentation::kTaggedPointer);
|
||||
case IrOpcode::kTypeGuard: {
|
||||
// We just get rid of the sigma here, choosing the best representation
|
||||
// for the sigma's type.
|
||||
@ -3295,7 +3296,6 @@ void SimplifiedLowering::DoLoadBuffer(Node* node,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void SimplifiedLowering::DoStoreBuffer(Node* node) {
|
||||
DCHECK_EQ(IrOpcode::kStoreBuffer, node->opcode());
|
||||
MachineRepresentation const rep =
|
||||
@ -3423,7 +3423,6 @@ Node* SimplifiedLowering::Int32Div(Node* const node) {
|
||||
return graph()->NewNode(phi_op, true0, false0, merge0);
|
||||
}
|
||||
|
||||
|
||||
Node* SimplifiedLowering::Int32Mod(Node* const node) {
|
||||
Int32BinopMatcher m(node);
|
||||
Node* const zero = jsgraph()->Int32Constant(0);
|
||||
@ -3556,7 +3555,6 @@ Node* SimplifiedLowering::Uint32Div(Node* const node) {
|
||||
return d.Phi(MachineRepresentation::kWord32, zero, div);
|
||||
}
|
||||
|
||||
|
||||
Node* SimplifiedLowering::Uint32Mod(Node* const node) {
|
||||
Uint32BinopMatcher m(node);
|
||||
Node* const minus_one = jsgraph()->Int32Constant(-1);
|
||||
|
@ -832,6 +832,8 @@ Type* Typer::Visitor::TypeTypedStateValues(Node* node) {
|
||||
return Type::Internal();
|
||||
}
|
||||
|
||||
Type* Typer::Visitor::TypeObjectId(Node* node) { UNREACHABLE(); }
|
||||
|
||||
Type* Typer::Visitor::TypeArgumentsElementsState(Node* node) {
|
||||
return Type::Internal();
|
||||
}
|
||||
|
@ -14,41 +14,6 @@ namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
namespace {
|
||||
|
||||
size_t HashCode(Node* node) {
|
||||
size_t h = base::hash_combine(node->op()->HashCode(), node->InputCount());
|
||||
for (Node* input : node->inputs()) {
|
||||
h = base::hash_combine(h, input->id());
|
||||
}
|
||||
return h;
|
||||
}
|
||||
|
||||
|
||||
bool Equals(Node* a, Node* b) {
|
||||
DCHECK_NOT_NULL(a);
|
||||
DCHECK_NOT_NULL(b);
|
||||
DCHECK_NOT_NULL(a->op());
|
||||
DCHECK_NOT_NULL(b->op());
|
||||
if (!a->op()->Equals(b->op())) return false;
|
||||
if (a->InputCount() != b->InputCount()) return false;
|
||||
Node::Inputs aInputs = a->inputs();
|
||||
Node::Inputs bInputs = b->inputs();
|
||||
|
||||
auto aIt = aInputs.begin();
|
||||
auto bIt = bInputs.begin();
|
||||
auto aEnd = aInputs.end();
|
||||
|
||||
for (; aIt != aEnd; ++aIt, ++bIt) {
|
||||
DCHECK_NOT_NULL(*aIt);
|
||||
DCHECK_NOT_NULL(*bIt);
|
||||
if ((*aIt)->id() != (*bIt)->id()) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
ValueNumberingReducer::ValueNumberingReducer(Zone* temp_zone, Zone* graph_zone)
|
||||
: entries_(nullptr),
|
||||
capacity_(0),
|
||||
@ -62,7 +27,7 @@ ValueNumberingReducer::~ValueNumberingReducer() {}
|
||||
Reduction ValueNumberingReducer::Reduce(Node* node) {
|
||||
if (!node->op()->HasProperty(Operator::kIdempotent)) return NoChange();
|
||||
|
||||
const size_t hash = HashCode(node);
|
||||
const size_t hash = NodeProperties::HashCode(node);
|
||||
if (!entries_) {
|
||||
DCHECK(size_ == 0);
|
||||
DCHECK(capacity_ == 0);
|
||||
@ -131,7 +96,7 @@ Reduction ValueNumberingReducer::Reduce(Node* node) {
|
||||
// Otherwise, keep searching for another collision.
|
||||
continue;
|
||||
}
|
||||
if (Equals(entry, node)) {
|
||||
if (NodeProperties::Equals(entry, node)) {
|
||||
Reduction reduction = ReplaceIfTypesMatch(node, entry);
|
||||
if (reduction.Changed()) {
|
||||
// Overwrite the colliding entry with the actual entry.
|
||||
@ -153,7 +118,7 @@ Reduction ValueNumberingReducer::Reduce(Node* node) {
|
||||
dead = i;
|
||||
continue;
|
||||
}
|
||||
if (Equals(entry, node)) {
|
||||
if (NodeProperties::Equals(entry, node)) {
|
||||
return ReplaceIfTypesMatch(node, entry);
|
||||
}
|
||||
}
|
||||
@ -197,7 +162,8 @@ void ValueNumberingReducer::Grow() {
|
||||
for (size_t i = 0; i < old_capacity; ++i) {
|
||||
Node* const old_entry = old_entries[i];
|
||||
if (!old_entry || old_entry->IsDead()) continue;
|
||||
for (size_t j = HashCode(old_entry) & mask;; j = (j + 1) & mask) {
|
||||
for (size_t j = NodeProperties::HashCode(old_entry) & mask;;
|
||||
j = (j + 1) & mask) {
|
||||
Node* const entry = entries_[j];
|
||||
if (entry == old_entry) {
|
||||
// Skip duplicate of the old entry.
|
||||
|
@ -507,6 +507,8 @@ void Verifier::Visitor::Check(Node* node) {
|
||||
// still be kStateValues.
|
||||
break;
|
||||
}
|
||||
case IrOpcode::kObjectId:
|
||||
CheckTypeIs(node, Type::Object());
|
||||
case IrOpcode::kStateValues:
|
||||
case IrOpcode::kTypedStateValues:
|
||||
case IrOpcode::kArgumentsElementsState:
|
||||
|
@ -465,6 +465,8 @@ DEFINE_BOOL(turbo_loop_variable, true, "Turbofan loop variable optimization")
|
||||
DEFINE_BOOL(turbo_cf_optimization, true, "optimize control flow in TurboFan")
|
||||
DEFINE_BOOL(turbo_frame_elision, true, "elide frames in TurboFan")
|
||||
DEFINE_BOOL(turbo_escape, true, "enable escape analysis")
|
||||
DEFINE_BOOL(turbo_new_escape, false,
|
||||
"enable new implementation of escape analysis")
|
||||
DEFINE_BOOL(turbo_instruction_scheduling, false,
|
||||
"enable instruction scheduling in TurboFan")
|
||||
DEFINE_BOOL(turbo_stress_instruction_scheduling, false,
|
||||
|
@ -1430,7 +1430,7 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames,
|
||||
} else {
|
||||
// The receiver is not in a stack slot nor in a literal. We give up.
|
||||
it.Skip(Translation::NumberOfOperandsFor(opcode));
|
||||
// TODO(3029): Materializing a captured object (or duplicated
|
||||
// TODO(6586): Materializing a captured object (or duplicated
|
||||
// object) is hard, we return undefined for now. This breaks the
|
||||
// produced stack trace, as constructor frames aren't marked as
|
||||
// such anymore.
|
||||
|
@ -811,6 +811,10 @@
|
||||
'compiler/memory-optimizer.h',
|
||||
'compiler/move-optimizer.cc',
|
||||
'compiler/move-optimizer.h',
|
||||
'compiler/new-escape-analysis.cc',
|
||||
'compiler/new-escape-analysis.h',
|
||||
'compiler/new-escape-analysis-reducer.cc',
|
||||
'compiler/new-escape-analysis-reducer.h',
|
||||
'compiler/node-aux-data.h',
|
||||
'compiler/node-cache.cc',
|
||||
'compiler/node-cache.h',
|
||||
@ -832,6 +836,7 @@
|
||||
'compiler/operator.h',
|
||||
'compiler/osr.cc',
|
||||
'compiler/osr.h',
|
||||
'compiler/persistent-map.h',
|
||||
'compiler/pipeline.cc',
|
||||
'compiler/pipeline.h',
|
||||
'compiler/pipeline-statistics.cc',
|
||||
|
@ -12,8 +12,11 @@
|
||||
#include <queue>
|
||||
#include <set>
|
||||
#include <stack>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
#include <vector>
|
||||
|
||||
#include "src/base/functional.h"
|
||||
#include "src/zone/zone-allocator.h"
|
||||
|
||||
namespace v8 {
|
||||
@ -133,6 +136,35 @@ class ZoneMap
|
||||
Compare(), ZoneAllocator<std::pair<const K, V>>(zone)) {}
|
||||
};
|
||||
|
||||
// A wrapper subclass for std::unordered_map to make it easy to construct one
|
||||
// that uses a zone allocator.
|
||||
template <typename K, typename V, typename Hash = base::hash<K>,
|
||||
typename KeyEqual = std::equal_to<K>>
|
||||
class ZoneUnorderedMap
|
||||
: public std::unordered_map<K, V, Hash, KeyEqual,
|
||||
ZoneAllocator<std::pair<const K, V>>> {
|
||||
public:
|
||||
// Constructs an empty map.
|
||||
explicit ZoneUnorderedMap(Zone* zone)
|
||||
: std::unordered_map<K, V, Hash, KeyEqual,
|
||||
ZoneAllocator<std::pair<const K, V>>>(
|
||||
100, Hash(), KeyEqual(),
|
||||
ZoneAllocator<std::pair<const K, V>>(zone)) {}
|
||||
};
|
||||
|
||||
// A wrapper subclass for std::unordered_set to make it easy to construct one
|
||||
// that uses a zone allocator.
|
||||
template <typename K, typename Hash = base::hash<K>,
|
||||
typename KeyEqual = std::equal_to<K>>
|
||||
class ZoneUnorderedSet
|
||||
: public std::unordered_set<K, Hash, KeyEqual, ZoneAllocator<K>> {
|
||||
public:
|
||||
// Constructs an empty map.
|
||||
explicit ZoneUnorderedSet(Zone* zone)
|
||||
: std::unordered_set<K, Hash, KeyEqual, ZoneAllocator<K>>(
|
||||
100, Hash(), KeyEqual(), ZoneAllocator<K>(zone)) {}
|
||||
};
|
||||
|
||||
// A wrapper subclass for std::multimap to make it easy to construct one that
|
||||
// uses a zone allocator.
|
||||
template <typename K, typename V, typename Compare = std::less<K>>
|
||||
|
22
test/mjsunit/compiler/escape-analysis-cycle.js
Normal file
22
test/mjsunit/compiler/escape-analysis-cycle.js
Normal file
@ -0,0 +1,22 @@
|
||||
// Copyright 2017 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Flags: --allow-natives-syntax --turbo-escape
|
||||
|
||||
function g(o) {
|
||||
return {a : o, b: 42, c: o};
|
||||
}
|
||||
|
||||
function f() {
|
||||
var o = {a: {}, b: 43};
|
||||
o.a = g(g(o));
|
||||
o.c = o.a.c;
|
||||
%DeoptimizeNow();
|
||||
return o.c.a.c.a.c.a.c.b;
|
||||
}
|
||||
|
||||
assertEquals(42, f());
|
||||
assertEquals(42, f());
|
||||
%OptimizeFunctionOnNextCall(f);
|
||||
assertEquals(42, f());
|
@ -249,14 +249,17 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
|
||||
})();
|
||||
|
||||
(function() {
|
||||
var re = /Array\.forEach/;
|
||||
var lazyDeopt = function(deopt) {
|
||||
// TODO(6586): Once we fixed the materailization of receivers for stack trace
|
||||
// computation, this should be /Array\.forEach/ again.
|
||||
var re = /forEach/;
|
||||
var lazyDeopt = function foobar(deopt) {
|
||||
var b = [1,2,3];
|
||||
var result = 0;
|
||||
var sum = function(v,i,o) {
|
||||
result += v;
|
||||
if (i == 1) {
|
||||
var e = new Error();
|
||||
print(e.stack);
|
||||
assertTrue(re.exec(e.stack) !== null);
|
||||
}
|
||||
};
|
||||
|
@ -259,7 +259,7 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
|
||||
})();
|
||||
|
||||
(function() {
|
||||
var re = /Array\.map/;
|
||||
var re = /map/;
|
||||
var lazyDeopt = function(deopt) {
|
||||
var b = [1,2,3];
|
||||
var result = 0;
|
||||
|
@ -85,6 +85,7 @@ v8_executable("unittests") {
|
||||
"compiler/node-test-utils.h",
|
||||
"compiler/node-unittest.cc",
|
||||
"compiler/opcodes-unittest.cc",
|
||||
"compiler/persistent-unittest.cc",
|
||||
"compiler/regalloc/live-range-unittest.cc",
|
||||
"compiler/regalloc/move-optimizer-unittest.cc",
|
||||
"compiler/regalloc/register-allocator-unittest.cc",
|
||||
|
121
test/unittests/compiler/persistent-unittest.cc
Normal file
121
test/unittests/compiler/persistent-unittest.cc
Normal file
@ -0,0 +1,121 @@
|
||||
// Copyright 2017 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include <tuple>
|
||||
|
||||
#include "src/base/utils/random-number-generator.h"
|
||||
#include "src/compiler/persistent-map.h"
|
||||
#include "test/unittests/test-utils.h"
|
||||
#include "testing/gmock/include/gmock/gmock.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
// A random distribution that produces both small values and arbitrary numbers.
|
||||
static int small_big_distr(base::RandomNumberGenerator* rand) {
|
||||
return rand->NextInt() / std::max(1, rand->NextInt() / 100);
|
||||
}
|
||||
|
||||
TEST(PersistentMap, RefTest) {
|
||||
base::RandomNumberGenerator rand(92834738);
|
||||
AccountingAllocator allocator;
|
||||
Zone zone(&allocator, ZONE_NAME);
|
||||
std::vector<PersistentMap<int, int>> pers_maps;
|
||||
pers_maps.emplace_back(&zone);
|
||||
std::vector<std::map<int, int>> ref_maps = {{}};
|
||||
for (int i = 0; i < 100000; ++i) {
|
||||
if (rand.NextInt(2) == 0) {
|
||||
// Read value;
|
||||
int key = small_big_distr(&rand);
|
||||
if (ref_maps[0].count(key) > 0) {
|
||||
ASSERT_EQ(pers_maps[0].Get(key), ref_maps[0][key]);
|
||||
} else {
|
||||
ASSERT_EQ(pers_maps[0].Get(key), 0);
|
||||
}
|
||||
}
|
||||
if (rand.NextInt(2) == 0) {
|
||||
// Add value;
|
||||
int key = small_big_distr(&rand);
|
||||
int value = small_big_distr(&rand);
|
||||
pers_maps[0].Set(key, value);
|
||||
ref_maps[0][key] = value;
|
||||
}
|
||||
if (rand.NextInt(1000) == 0) {
|
||||
// Create empty map.
|
||||
pers_maps.emplace_back(&zone);
|
||||
ref_maps.emplace_back();
|
||||
}
|
||||
if (rand.NextInt(100) == 0) {
|
||||
// Copy and move around maps.
|
||||
int num_maps = static_cast<int>(pers_maps.size());
|
||||
int source = rand.NextInt(num_maps - 1) + 1;
|
||||
int target = rand.NextInt(num_maps - 1) + 1;
|
||||
pers_maps[target] = std::move(pers_maps[0]);
|
||||
ref_maps[target] = std::move(ref_maps[0]);
|
||||
pers_maps[0] = pers_maps[source];
|
||||
ref_maps[0] = ref_maps[source];
|
||||
}
|
||||
}
|
||||
for (size_t i = 0; i < pers_maps.size(); ++i) {
|
||||
std::set<int> keys;
|
||||
for (auto pair : pers_maps[i]) {
|
||||
ASSERT_EQ(keys.count(pair.first), 0u);
|
||||
keys.insert(pair.first);
|
||||
ASSERT_EQ(ref_maps[i][pair.first], pair.second);
|
||||
}
|
||||
for (auto pair : ref_maps[i]) {
|
||||
ASSERT_EQ(pers_maps[i].Get(pair.first), pair.second);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST(PersistentMap, Zip) {
|
||||
base::RandomNumberGenerator rand(92834738);
|
||||
AccountingAllocator allocator;
|
||||
Zone zone(&allocator, ZONE_NAME);
|
||||
|
||||
// Provoke hash collisions to stress the iterator.
|
||||
struct bad_hash {
|
||||
size_t operator()(int key) { return static_cast<size_t>(key) % 1000; }
|
||||
};
|
||||
PersistentMap<int, int, bad_hash> a(&zone);
|
||||
PersistentMap<int, int, bad_hash> b(&zone);
|
||||
|
||||
int sum_a = 0;
|
||||
int sum_b = 0;
|
||||
|
||||
for (int i = 0; i < 30000; ++i) {
|
||||
int key = small_big_distr(&rand);
|
||||
int value = small_big_distr(&rand);
|
||||
if (rand.NextBool()) {
|
||||
sum_a += value;
|
||||
a.Set(key, a.Get(key) + value);
|
||||
} else {
|
||||
sum_b += value;
|
||||
b.Set(key, b.Get(key) + value);
|
||||
}
|
||||
}
|
||||
|
||||
int sum = sum_a + sum_b;
|
||||
|
||||
for (auto pair : a) {
|
||||
sum_a -= pair.second;
|
||||
}
|
||||
ASSERT_EQ(0, sum_a);
|
||||
|
||||
for (auto pair : b) {
|
||||
sum_b -= pair.second;
|
||||
}
|
||||
ASSERT_EQ(0, sum_b);
|
||||
|
||||
for (auto triple : a.Zip(b)) {
|
||||
sum -= std::get<1>(triple) + std::get<2>(triple);
|
||||
}
|
||||
ASSERT_EQ(0, sum);
|
||||
}
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
@ -80,6 +80,7 @@
|
||||
'compiler/node-test-utils.h',
|
||||
'compiler/node-unittest.cc',
|
||||
'compiler/opcodes-unittest.cc',
|
||||
'compiler/persistent-unittest.cc',
|
||||
'compiler/regalloc/register-allocator-unittest.cc',
|
||||
'compiler/schedule-unittest.cc',
|
||||
'compiler/scheduler-unittest.cc',
|
||||
|
Loading…
Reference in New Issue
Block a user