[TurboProp] Split out MemoryLowering from MemoryOptimizer

Seperates the memory lowering operations into a seperate MemoryLowering
class which is used by the MemoryOptimizer. This will enable TurboProp
to reduce memory operations without having to do a full memory
optimization pass.

BUG=v8:9684

Change-Id: I1b333f1360fd342612672842bf879f44ab1ee60c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1815243
Reviewed-by: Georg Neis <neis@chromium.org>
Reviewed-by: Ross McIlroy <rmcilroy@chromium.org>
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63966}
This commit is contained in:
Ross McIlroy 2019-09-23 17:00:19 +01:00 committed by Commit Bot
parent 63e9a7d9bf
commit 6d5e9b798e
6 changed files with 893 additions and 673 deletions

View File

@ -1880,6 +1880,8 @@ v8_compiler_sources = [
"src/compiler/machine-operator.h",
"src/compiler/map-inference.cc",
"src/compiler/map-inference.h",
"src/compiler/memory-lowering.cc",
"src/compiler/memory-lowering.h",
"src/compiler/memory-optimizer.cc",
"src/compiler/memory-optimizer.h",
"src/compiler/node-aux-data.h",

View File

@ -0,0 +1,551 @@
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/memory-lowering.h"
#include "src/codegen/interface-descriptors.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/compiler/simplified-operator.h"
#include "src/roots/roots-inl.h"
namespace v8 {
namespace internal {
namespace compiler {
// An allocation group represents a set of allocations that have been folded
// together.
class MemoryLowering::AllocationGroup final : public ZoneObject {
public:
AllocationGroup(Node* node, AllocationType allocation, Zone* zone);
AllocationGroup(Node* node, AllocationType allocation, Node* size,
Zone* zone);
~AllocationGroup() = default;
void Add(Node* object);
bool Contains(Node* object) const;
bool IsYoungGenerationAllocation() const {
return allocation() == AllocationType::kYoung;
}
AllocationType allocation() const { return allocation_; }
Node* size() const { return size_; }
private:
ZoneSet<NodeId> node_ids_;
AllocationType const allocation_;
Node* const size_;
DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationGroup);
};
MemoryLowering::MemoryLowering(JSGraph* jsgraph, Zone* zone,
PoisoningMitigationLevel poisoning_level,
AllocationFolding allocation_folding,
WriteBarrierAssertFailedCallback callback,
const char* function_debug_name)
: jsgraph_(jsgraph),
zone_(zone),
graph_assembler_(jsgraph, nullptr, nullptr, zone),
allocation_folding_(allocation_folding),
poisoning_level_(poisoning_level),
write_barrier_assert_failed_(callback),
function_debug_name_(function_debug_name) {}
Reduction MemoryLowering::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kAllocate:
// Allocate nodes were purged from the graph in effect-control
// linearization.
UNREACHABLE();
case IrOpcode::kAllocateRaw:
return ReduceAllocateRaw(node);
case IrOpcode::kLoadFromObject:
return ReduceLoadFromObject(node);
case IrOpcode::kLoadElement:
return ReduceLoadElement(node);
case IrOpcode::kLoadField:
return ReduceLoadField(node);
case IrOpcode::kStoreToObject:
return ReduceStoreToObject(node);
case IrOpcode::kStoreElement:
return ReduceStoreElement(node);
case IrOpcode::kStoreField:
return ReduceStoreField(node);
case IrOpcode::kStore:
return ReduceStore(node);
default:
return NoChange();
}
}
#define __ gasm()->
Reduction MemoryLowering::ReduceAllocateRaw(
Node* node, AllocationType allocation_type,
AllowLargeObjects allow_large_objects, AllocationState const** state_ptr) {
DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
DCHECK_IMPLIES(allocation_folding_ == AllocationFolding::kDoAllocationFolding,
state_ptr != nullptr);
Node* value;
Node* size = node->InputAt(0);
Node* effect = node->InputAt(1);
Node* control = node->InputAt(2);
gasm()->Reset(effect, control);
Node* allocate_builtin;
if (allocation_type == AllocationType::kYoung) {
if (allow_large_objects == AllowLargeObjects::kTrue) {
allocate_builtin = __ AllocateInYoungGenerationStubConstant();
} else {
allocate_builtin = __ AllocateRegularInYoungGenerationStubConstant();
}
} else {
if (allow_large_objects == AllowLargeObjects::kTrue) {
allocate_builtin = __ AllocateInOldGenerationStubConstant();
} else {
allocate_builtin = __ AllocateRegularInOldGenerationStubConstant();
}
}
// Determine the top/limit addresses.
Node* top_address = __ ExternalConstant(
allocation_type == AllocationType::kYoung
? ExternalReference::new_space_allocation_top_address(isolate())
: ExternalReference::old_space_allocation_top_address(isolate()));
Node* limit_address = __ ExternalConstant(
allocation_type == AllocationType::kYoung
? ExternalReference::new_space_allocation_limit_address(isolate())
: ExternalReference::old_space_allocation_limit_address(isolate()));
// Check if we can fold this allocation into a previous allocation represented
// by the incoming {state}.
IntPtrMatcher m(size);
if (m.IsInRange(0, kMaxRegularHeapObjectSize) && FLAG_inline_new &&
allocation_folding_ == AllocationFolding::kDoAllocationFolding) {
intptr_t const object_size = m.Value();
AllocationState const* state = *state_ptr;
if (state->size() <= kMaxRegularHeapObjectSize - object_size &&
state->group()->allocation() == allocation_type) {
// We can fold this Allocate {node} into the allocation {group}
// represented by the given {state}. Compute the upper bound for
// the new {state}.
intptr_t const state_size = state->size() + object_size;
// Update the reservation check to the actual maximum upper bound.
AllocationGroup* const group = state->group();
if (machine()->Is64()) {
if (OpParameter<int64_t>(group->size()->op()) < state_size) {
NodeProperties::ChangeOp(group->size(),
common()->Int64Constant(state_size));
}
} else {
if (OpParameter<int32_t>(group->size()->op()) < state_size) {
NodeProperties::ChangeOp(
group->size(),
common()->Int32Constant(static_cast<int32_t>(state_size)));
}
}
// Update the allocation top with the new object allocation.
// TODO(bmeurer): Defer writing back top as much as possible.
Node* top = __ IntAdd(state->top(), size);
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
top_address, __ IntPtrConstant(0), top);
// Compute the effective inner allocated address.
value = __ BitcastWordToTagged(
__ IntAdd(state->top(), __ IntPtrConstant(kHeapObjectTag)));
effect = __ ExtractCurrentEffect();
control = __ ExtractCurrentControl();
// Extend the allocation {group}.
group->Add(value);
*state_ptr =
AllocationState::Open(group, state_size, top, effect, zone());
} else {
auto call_runtime = __ MakeDeferredLabel();
auto done = __ MakeLabel(MachineType::PointerRepresentation());
// Setup a mutable reservation size node; will be patched as we fold
// additional allocations into this new group.
Node* size = __ UniqueIntPtrConstant(object_size);
// Load allocation top and limit.
Node* top =
__ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
Node* limit =
__ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
// Check if we need to collect garbage before we can start bump pointer
// allocation (always done for folded allocations).
Node* check = __ UintLessThan(__ IntAdd(top, size), limit);
__ GotoIfNot(check, &call_runtime);
__ Goto(&done, top);
__ Bind(&call_runtime);
{
if (!allocate_operator_.is_set()) {
auto descriptor = AllocateDescriptor{};
auto call_descriptor = Linkage::GetStubCallDescriptor(
graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kCanUseRoots, Operator::kNoThrow);
allocate_operator_.set(common()->Call(call_descriptor));
}
Node* vfalse = __ BitcastTaggedToWord(
__ Call(allocate_operator_.get(), allocate_builtin, size));
vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
__ Goto(&done, vfalse);
}
__ Bind(&done);
// Compute the new top and write it back.
top = __ IntAdd(done.PhiAt(0), __ IntPtrConstant(object_size));
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
top_address, __ IntPtrConstant(0), top);
// Compute the initial object address.
value = __ BitcastWordToTagged(
__ IntAdd(done.PhiAt(0), __ IntPtrConstant(kHeapObjectTag)));
effect = __ ExtractCurrentEffect();
control = __ ExtractCurrentControl();
// Start a new allocation group.
AllocationGroup* group =
new (zone()) AllocationGroup(value, allocation_type, size, zone());
*state_ptr =
AllocationState::Open(group, object_size, top, effect, zone());
}
} else {
auto call_runtime = __ MakeDeferredLabel();
auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
// Load allocation top and limit.
Node* top =
__ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
Node* limit =
__ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
// Compute the new top.
Node* new_top = __ IntAdd(top, size);
// Check if we can do bump pointer allocation here.
Node* check = __ UintLessThan(new_top, limit);
__ GotoIfNot(check, &call_runtime);
if (allow_large_objects == AllowLargeObjects::kTrue) {
__ GotoIfNot(
__ UintLessThan(size, __ IntPtrConstant(kMaxRegularHeapObjectSize)),
&call_runtime);
}
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
top_address, __ IntPtrConstant(0), new_top);
__ Goto(&done, __ BitcastWordToTagged(
__ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
__ Bind(&call_runtime);
if (!allocate_operator_.is_set()) {
auto descriptor = AllocateDescriptor{};
auto call_descriptor = Linkage::GetStubCallDescriptor(
graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kCanUseRoots, Operator::kNoThrow);
allocate_operator_.set(common()->Call(call_descriptor));
}
__ Goto(&done, __ Call(allocate_operator_.get(), allocate_builtin, size));
__ Bind(&done);
value = done.PhiAt(0);
effect = __ ExtractCurrentEffect();
control = __ ExtractCurrentControl();
if (state_ptr) {
// Create an unfoldable allocation group.
AllocationGroup* group =
new (zone()) AllocationGroup(value, allocation_type, zone());
*state_ptr = AllocationState::Closed(group, effect, zone());
}
}
// Replace all effect uses of {node} with the {effect} and replace
// all value uses of {node} with the {value}.
for (Edge edge : node->use_edges()) {
if (NodeProperties::IsEffectEdge(edge)) {
edge.UpdateTo(effect);
} else if (NodeProperties::IsValueEdge(edge)) {
edge.UpdateTo(value);
} else {
DCHECK(NodeProperties::IsControlEdge(edge));
edge.UpdateTo(control);
}
}
// Kill the {node} to make sure we don't leave dangling dead uses.
node->Kill();
return Replace(value);
}
Reduction MemoryLowering::ReduceLoadFromObject(Node* node) {
DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode());
ObjectAccess const& access = ObjectAccessOf(node->op());
NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
return Changed(node);
}
Reduction MemoryLowering::ReduceLoadElement(Node* node) {
DCHECK_EQ(IrOpcode::kLoadElement, node->opcode());
ElementAccess const& access = ElementAccessOf(node->op());
Node* index = node->InputAt(1);
node->ReplaceInput(1, ComputeIndex(access, index));
MachineType type = access.machine_type;
if (NeedsPoisoning(access.load_sensitivity)) {
NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
} else {
NodeProperties::ChangeOp(node, machine()->Load(type));
}
return Changed(node);
}
Reduction MemoryLowering::ReduceLoadField(Node* node) {
DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
FieldAccess const& access = FieldAccessOf(node->op());
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
MachineType type = access.machine_type;
if (NeedsPoisoning(access.load_sensitivity)) {
NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
} else {
NodeProperties::ChangeOp(node, machine()->Load(type));
}
return Changed(node);
}
Reduction MemoryLowering::ReduceStoreToObject(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStoreToObject, node->opcode());
ObjectAccess const& access = ObjectAccessOf(node->op());
Node* object = node->InputAt(0);
Node* value = node->InputAt(2);
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, access.write_barrier_kind);
NodeProperties::ChangeOp(
node, machine()->Store(StoreRepresentation(
access.machine_type.representation(), write_barrier_kind)));
return Changed(node);
}
Reduction MemoryLowering::ReduceStoreElement(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStoreElement, node->opcode());
ElementAccess const& access = ElementAccessOf(node->op());
Node* object = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
node->ReplaceInput(1, ComputeIndex(access, index));
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, access.write_barrier_kind);
NodeProperties::ChangeOp(
node, machine()->Store(StoreRepresentation(
access.machine_type.representation(), write_barrier_kind)));
return Changed(node);
}
Reduction MemoryLowering::ReduceStoreField(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStoreField, node->opcode());
FieldAccess const& access = FieldAccessOf(node->op());
Node* object = node->InputAt(0);
Node* value = node->InputAt(1);
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, access.write_barrier_kind);
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
NodeProperties::ChangeOp(
node, machine()->Store(StoreRepresentation(
access.machine_type.representation(), write_barrier_kind)));
return Changed(node);
}
Reduction MemoryLowering::ReduceStore(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStore, node->opcode());
StoreRepresentation representation = StoreRepresentationOf(node->op());
Node* object = node->InputAt(0);
Node* value = node->InputAt(2);
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, representation.write_barrier_kind());
if (write_barrier_kind != representation.write_barrier_kind()) {
NodeProperties::ChangeOp(
node, machine()->Store(StoreRepresentation(
representation.representation(), write_barrier_kind)));
return Changed(node);
}
return NoChange();
}
Node* MemoryLowering::ComputeIndex(ElementAccess const& access, Node* index) {
int const element_size_shift =
ElementSizeLog2Of(access.machine_type.representation());
if (element_size_shift) {
index = __ WordShl(index, __ IntPtrConstant(element_size_shift));
}
int const fixed_offset = access.header_size - access.tag();
if (fixed_offset) {
index = __ IntAdd(index, __ IntPtrConstant(fixed_offset));
}
return index;
}
#undef __
namespace {
bool ValueNeedsWriteBarrier(Node* value, Isolate* isolate) {
while (true) {
switch (value->opcode()) {
case IrOpcode::kBitcastWordToTaggedSigned:
case IrOpcode::kChangeTaggedSignedToCompressedSigned:
case IrOpcode::kChangeTaggedToCompressedSigned:
return false;
case IrOpcode::kChangeTaggedPointerToCompressedPointer:
case IrOpcode::kChangeTaggedToCompressed:
value = NodeProperties::GetValueInput(value, 0);
continue;
case IrOpcode::kHeapConstant: {
RootIndex root_index;
if (isolate->roots_table().IsRootHandle(HeapConstantOf(value->op()),
&root_index) &&
RootsTable::IsImmortalImmovable(root_index)) {
return false;
}
break;
}
default:
break;
}
return true;
}
}
} // namespace
Reduction MemoryLowering::ReduceAllocateRaw(Node* node) {
DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
const AllocateParameters& allocation = AllocateParametersOf(node->op());
return ReduceAllocateRaw(node, allocation.allocation_type(),
allocation.allow_large_objects(), nullptr);
}
WriteBarrierKind MemoryLowering::ComputeWriteBarrierKind(
Node* node, Node* object, Node* value, AllocationState const* state,
WriteBarrierKind write_barrier_kind) {
if (state && state->IsYoungGenerationAllocation() &&
state->group()->Contains(object)) {
write_barrier_kind = kNoWriteBarrier;
}
if (!ValueNeedsWriteBarrier(value, isolate())) {
write_barrier_kind = kNoWriteBarrier;
}
if (write_barrier_kind == WriteBarrierKind::kAssertNoWriteBarrier) {
write_barrier_assert_failed_(node, object, function_debug_name_, zone());
}
return write_barrier_kind;
}
bool MemoryLowering::NeedsPoisoning(LoadSensitivity load_sensitivity) const {
// Safe loads do not need poisoning.
if (load_sensitivity == LoadSensitivity::kSafe) return false;
switch (poisoning_level_) {
case PoisoningMitigationLevel::kDontPoison:
return false;
case PoisoningMitigationLevel::kPoisonAll:
return true;
case PoisoningMitigationLevel::kPoisonCriticalOnly:
return load_sensitivity == LoadSensitivity::kCritical;
}
UNREACHABLE();
}
MemoryLowering::AllocationGroup::AllocationGroup(Node* node,
AllocationType allocation,
Zone* zone)
: node_ids_(zone), allocation_(allocation), size_(nullptr) {
node_ids_.insert(node->id());
}
MemoryLowering::AllocationGroup::AllocationGroup(Node* node,
AllocationType allocation,
Node* size, Zone* zone)
: node_ids_(zone), allocation_(allocation), size_(size) {
node_ids_.insert(node->id());
}
void MemoryLowering::AllocationGroup::Add(Node* node) {
node_ids_.insert(node->id());
}
bool MemoryLowering::AllocationGroup::Contains(Node* node) const {
// Additions should stay within the same allocated object, so it's safe to
// ignore them.
while (node_ids_.find(node->id()) == node_ids_.end()) {
switch (node->opcode()) {
case IrOpcode::kBitcastTaggedToWord:
case IrOpcode::kBitcastWordToTagged:
case IrOpcode::kInt32Add:
case IrOpcode::kInt64Add:
node = NodeProperties::GetValueInput(node, 0);
break;
default:
return false;
}
}
return true;
}
MemoryLowering::AllocationState::AllocationState()
: group_(nullptr),
size_(std::numeric_limits<int>::max()),
top_(nullptr),
effect_(nullptr) {}
MemoryLowering::AllocationState::AllocationState(AllocationGroup* group,
Node* effect)
: group_(group),
size_(std::numeric_limits<int>::max()),
top_(nullptr),
effect_(effect) {}
MemoryLowering::AllocationState::AllocationState(AllocationGroup* group,
intptr_t size, Node* top,
Node* effect)
: group_(group), size_(size), top_(top), effect_(effect) {}
bool MemoryLowering::AllocationState::IsYoungGenerationAllocation() const {
return group() && group()->IsYoungGenerationAllocation();
}
Graph* MemoryLowering::graph() const { return jsgraph()->graph(); }
Isolate* MemoryLowering::isolate() const { return jsgraph()->isolate(); }
CommonOperatorBuilder* MemoryLowering::common() const {
return jsgraph()->common();
}
MachineOperatorBuilder* MemoryLowering::machine() const {
return jsgraph()->machine();
}
} // namespace compiler
} // namespace internal
} // namespace v8

View File

@ -0,0 +1,136 @@
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_MEMORY_LOWERING_H_
#define V8_COMPILER_MEMORY_LOWERING_H_
#include "src/compiler/graph-assembler.h"
#include "src/compiler/graph-reducer.h"
namespace v8 {
namespace internal {
namespace compiler {
// Forward declarations.
class CommonOperatorBuilder;
struct ElementAccess;
class Graph;
class JSGraph;
class MachineOperatorBuilder;
class Node;
class Operator;
// Provides operations to lower all simplified memory access and allocation
// related nodes (i.e. Allocate, LoadField, StoreField and friends) to machine
// operators.
class MemoryLowering final : public Reducer {
public:
enum class AllocationFolding { kDoAllocationFolding, kDontAllocationFolding };
class AllocationGroup;
// An allocation state is propagated on the effect paths through the graph.
class AllocationState final : public ZoneObject {
public:
static AllocationState const* Empty(Zone* zone) {
return new (zone) AllocationState();
}
static AllocationState const* Closed(AllocationGroup* group, Node* effect,
Zone* zone) {
return new (zone) AllocationState(group, effect);
}
static AllocationState const* Open(AllocationGroup* group, intptr_t size,
Node* top, Node* effect, Zone* zone) {
return new (zone) AllocationState(group, size, top, effect);
}
bool IsYoungGenerationAllocation() const;
AllocationGroup* group() const { return group_; }
Node* top() const { return top_; }
Node* effect() const { return effect_; }
intptr_t size() const { return size_; }
private:
AllocationState();
explicit AllocationState(AllocationGroup* group, Node* effect);
AllocationState(AllocationGroup* group, intptr_t size, Node* top,
Node* effect);
AllocationGroup* const group_;
// The upper bound of the combined allocated object size on the current path
// (max int if allocation folding is impossible on this path).
intptr_t const size_;
Node* const top_;
Node* const effect_;
DISALLOW_COPY_AND_ASSIGN(AllocationState);
};
using WriteBarrierAssertFailedCallback = std::function<void(
Node* node, Node* object, const char* name, Zone* temp_zone)>;
MemoryLowering(
JSGraph* jsgraph, Zone* zone, PoisoningMitigationLevel poisoning_level,
AllocationFolding allocation_folding =
AllocationFolding::kDontAllocationFolding,
WriteBarrierAssertFailedCallback callback = [](Node*, Node*, const char*,
Zone*) { UNREACHABLE(); },
const char* function_debug_name = nullptr);
~MemoryLowering() = default;
const char* reducer_name() const override { return "MemoryReducer"; }
// Perform memory lowering reduction on the given Node.
Reduction Reduce(Node* node) override;
// Specific reducers for each optype to enable keeping track of
// AllocationState by the MemoryOptimizer.
Reduction ReduceAllocateRaw(Node* node, AllocationType allocation_type,
AllowLargeObjects allow_large_objects,
AllocationState const** state);
Reduction ReduceLoadFromObject(Node* node);
Reduction ReduceLoadElement(Node* node);
Reduction ReduceLoadField(Node* node);
Reduction ReduceStoreToObject(Node* node,
AllocationState const* state = nullptr);
Reduction ReduceStoreElement(Node* node,
AllocationState const* state = nullptr);
Reduction ReduceStoreField(Node* node,
AllocationState const* state = nullptr);
Reduction ReduceStore(Node* node, AllocationState const* state = nullptr);
private:
Reduction ReduceAllocateRaw(Node* node);
WriteBarrierKind ComputeWriteBarrierKind(Node* node, Node* object,
Node* value,
AllocationState const* state,
WriteBarrierKind);
Node* ComputeIndex(ElementAccess const& access, Node* node);
bool NeedsPoisoning(LoadSensitivity load_sensitivity) const;
Graph* graph() const;
Isolate* isolate() const;
Zone* zone() const { return zone_; }
JSGraph* jsgraph() const { return jsgraph_; }
CommonOperatorBuilder* common() const;
MachineOperatorBuilder* machine() const;
GraphAssembler* gasm() { return &graph_assembler_; }
SetOncePointer<const Operator> allocate_operator_;
JSGraph* const jsgraph_;
Zone* zone_;
GraphAssembler graph_assembler_;
AllocationFolding allocation_folding_;
PoisoningMitigationLevel poisoning_level_;
WriteBarrierAssertFailedCallback write_barrier_assert_failed_;
const char* function_debug_name_;
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryLowering);
};
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_MEMORY_LOWERING_H_

View File

@ -11,90 +11,12 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/compiler/simplified-operator.h"
#include "src/roots/roots-inl.h"
namespace v8 {
namespace internal {
namespace compiler {
MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
PoisoningMitigationLevel poisoning_level,
AllocationFolding allocation_folding,
const char* function_debug_name,
TickCounter* tick_counter)
: jsgraph_(jsgraph),
empty_state_(AllocationState::Empty(zone)),
pending_(zone),
tokens_(zone),
zone_(zone),
graph_assembler_(jsgraph, nullptr, nullptr, zone),
poisoning_level_(poisoning_level),
allocation_folding_(allocation_folding),
function_debug_name_(function_debug_name),
tick_counter_(tick_counter) {}
void MemoryOptimizer::Optimize() {
EnqueueUses(graph()->start(), empty_state());
while (!tokens_.empty()) {
Token const token = tokens_.front();
tokens_.pop();
VisitNode(token.node, token.state);
}
DCHECK(pending_.empty());
DCHECK(tokens_.empty());
}
MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node,
AllocationType allocation,
Zone* zone)
: node_ids_(zone), allocation_(allocation), size_(nullptr) {
node_ids_.insert(node->id());
}
MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node,
AllocationType allocation,
Node* size, Zone* zone)
: node_ids_(zone), allocation_(allocation), size_(size) {
node_ids_.insert(node->id());
}
void MemoryOptimizer::AllocationGroup::Add(Node* node) {
node_ids_.insert(node->id());
}
bool MemoryOptimizer::AllocationGroup::Contains(Node* node) const {
// Additions should stay within the same allocated object, so it's safe to
// ignore them.
while (node_ids_.find(node->id()) == node_ids_.end()) {
switch (node->opcode()) {
case IrOpcode::kBitcastTaggedToWord:
case IrOpcode::kBitcastWordToTagged:
case IrOpcode::kInt32Add:
case IrOpcode::kInt64Add:
node = NodeProperties::GetValueInput(node, 0);
break;
default:
return false;
}
}
return true;
}
MemoryOptimizer::AllocationState::AllocationState()
: group_(nullptr), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group)
: group_(group), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group,
intptr_t size, Node* top)
: group_(group), size_(size), top_(top) {}
bool MemoryOptimizer::AllocationState::IsYoungGenerationAllocation() const {
return group() && group()->IsYoungGenerationAllocation();
}
namespace {
bool CanAllocate(const Node* node) {
@ -221,470 +143,6 @@ Node* EffectPhiForPhi(Node* phi) {
return nullptr;
}
} // namespace
void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
tick_counter_->DoTick();
DCHECK(!node->IsDead());
DCHECK_LT(0, node->op()->EffectInputCount());
switch (node->opcode()) {
case IrOpcode::kAllocate:
// Allocate nodes were purged from the graph in effect-control
// linearization.
UNREACHABLE();
case IrOpcode::kAllocateRaw:
return VisitAllocateRaw(node, state);
case IrOpcode::kCall:
return VisitCall(node, state);
case IrOpcode::kLoadFromObject:
return VisitLoadFromObject(node, state);
case IrOpcode::kLoadElement:
return VisitLoadElement(node, state);
case IrOpcode::kLoadField:
return VisitLoadField(node, state);
case IrOpcode::kStoreToObject:
return VisitStoreToObject(node, state);
case IrOpcode::kStoreElement:
return VisitStoreElement(node, state);
case IrOpcode::kStoreField:
return VisitStoreField(node, state);
case IrOpcode::kStore:
return VisitStore(node, state);
default:
if (!CanAllocate(node)) {
// These operations cannot trigger GC.
return VisitOtherEffect(node, state);
}
}
DCHECK_EQ(0, node->op()->EffectOutputCount());
}
#define __ gasm()->
bool MemoryOptimizer::AllocationTypeNeedsUpdateToOld(Node* const node,
const Edge edge) {
if (COMPRESS_POINTERS_BOOL && IrOpcode::IsCompressOpcode(node->opcode())) {
// In Pointer Compression we might have a Compress node between an
// AllocateRaw and the value used as input. This case is trickier since we
// have to check all of the Compress node edges to test for a StoreField.
for (Edge const new_edge : node->use_edges()) {
if (AllocationTypeNeedsUpdateToOld(new_edge.from(), new_edge)) {
return true;
}
}
// If we arrived here, we tested all the edges of the Compress node and
// didn't find it necessary to update the AllocationType.
return false;
}
// Test to see if we need to update the AllocationType.
if (node->opcode() == IrOpcode::kStoreField && edge.index() == 1) {
Node* parent = node->InputAt(0);
if (parent->opcode() == IrOpcode::kAllocateRaw &&
AllocationTypeOf(parent->op()) == AllocationType::kOld) {
return true;
}
}
return false;
}
void MemoryOptimizer::VisitAllocateRaw(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
Node* value;
Node* size = node->InputAt(0);
Node* effect = node->InputAt(1);
Node* control = node->InputAt(2);
gasm()->Reset(effect, control);
const AllocateParameters& allocation = AllocateParametersOf(node->op());
AllocationType allocation_type = allocation.allocation_type();
// Propagate tenuring from outer allocations to inner allocations, i.e.
// when we allocate an object in old space and store a newly allocated
// child object into the pretenured object, then the newly allocated
// child object also should get pretenured to old space.
if (allocation_type == AllocationType::kOld) {
for (Edge const edge : node->use_edges()) {
Node* const user = edge.from();
if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) {
Node* child = user->InputAt(1);
// In Pointer Compression we might have a Compress node between an
// AllocateRaw and the value used as input. If so, we need to update
// child to point to the StoreField.
if (COMPRESS_POINTERS_BOOL &&
IrOpcode::IsCompressOpcode(child->opcode())) {
child = child->InputAt(0);
}
if (child->opcode() == IrOpcode::kAllocateRaw &&
AllocationTypeOf(child->op()) == AllocationType::kYoung) {
NodeProperties::ChangeOp(child, node->op());
break;
}
}
}
} else {
DCHECK_EQ(AllocationType::kYoung, allocation_type);
for (Edge const edge : node->use_edges()) {
Node* const user = edge.from();
if (AllocationTypeNeedsUpdateToOld(user, edge)) {
allocation_type = AllocationType::kOld;
break;
}
}
}
Node* allocate_builtin;
if (allocation_type == AllocationType::kYoung) {
if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) {
allocate_builtin = __ AllocateInYoungGenerationStubConstant();
} else {
allocate_builtin = __ AllocateRegularInYoungGenerationStubConstant();
}
} else {
if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) {
allocate_builtin = __ AllocateInOldGenerationStubConstant();
} else {
allocate_builtin = __ AllocateRegularInOldGenerationStubConstant();
}
}
// Determine the top/limit addresses.
Node* top_address = __ ExternalConstant(
allocation_type == AllocationType::kYoung
? ExternalReference::new_space_allocation_top_address(isolate())
: ExternalReference::old_space_allocation_top_address(isolate()));
Node* limit_address = __ ExternalConstant(
allocation_type == AllocationType::kYoung
? ExternalReference::new_space_allocation_limit_address(isolate())
: ExternalReference::old_space_allocation_limit_address(isolate()));
// Check if we can fold this allocation into a previous allocation represented
// by the incoming {state}.
IntPtrMatcher m(size);
if (m.IsInRange(0, kMaxRegularHeapObjectSize) && FLAG_inline_new) {
intptr_t const object_size = m.Value();
if (allocation_folding_ == AllocationFolding::kDoAllocationFolding &&
state->size() <= kMaxRegularHeapObjectSize - object_size &&
state->group()->allocation() == allocation_type) {
// We can fold this Allocate {node} into the allocation {group}
// represented by the given {state}. Compute the upper bound for
// the new {state}.
intptr_t const state_size = state->size() + object_size;
// Update the reservation check to the actual maximum upper bound.
AllocationGroup* const group = state->group();
if (machine()->Is64()) {
if (OpParameter<int64_t>(group->size()->op()) < state_size) {
NodeProperties::ChangeOp(group->size(),
common()->Int64Constant(state_size));
}
} else {
if (OpParameter<int32_t>(group->size()->op()) < state_size) {
NodeProperties::ChangeOp(
group->size(),
common()->Int32Constant(static_cast<int32_t>(state_size)));
}
}
// Update the allocation top with the new object allocation.
// TODO(bmeurer): Defer writing back top as much as possible.
Node* top = __ IntAdd(state->top(), size);
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
top_address, __ IntPtrConstant(0), top);
// Compute the effective inner allocated address.
value = __ BitcastWordToTagged(
__ IntAdd(state->top(), __ IntPtrConstant(kHeapObjectTag)));
// Extend the allocation {group}.
group->Add(value);
state = AllocationState::Open(group, state_size, top, zone());
} else {
auto call_runtime = __ MakeDeferredLabel();
auto done = __ MakeLabel(MachineType::PointerRepresentation());
// Setup a mutable reservation size node; will be patched as we fold
// additional allocations into this new group.
Node* size = __ UniqueIntPtrConstant(object_size);
// Load allocation top and limit.
Node* top =
__ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
Node* limit =
__ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
// Check if we need to collect garbage before we can start bump pointer
// allocation (always done for folded allocations).
Node* check = __ UintLessThan(__ IntAdd(top, size), limit);
__ GotoIfNot(check, &call_runtime);
__ Goto(&done, top);
__ Bind(&call_runtime);
{
if (!allocate_operator_.is_set()) {
auto descriptor = AllocateDescriptor{};
auto call_descriptor = Linkage::GetStubCallDescriptor(
graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kCanUseRoots, Operator::kNoThrow);
allocate_operator_.set(common()->Call(call_descriptor));
}
Node* vfalse = __ BitcastTaggedToWord(
__ Call(allocate_operator_.get(), allocate_builtin, size));
vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
__ Goto(&done, vfalse);
}
__ Bind(&done);
// Compute the new top and write it back.
top = __ IntAdd(done.PhiAt(0), __ IntPtrConstant(object_size));
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
top_address, __ IntPtrConstant(0), top);
// Compute the initial object address.
value = __ BitcastWordToTagged(
__ IntAdd(done.PhiAt(0), __ IntPtrConstant(kHeapObjectTag)));
// Start a new allocation group.
AllocationGroup* group =
new (zone()) AllocationGroup(value, allocation_type, size, zone());
state = AllocationState::Open(group, object_size, top, zone());
}
} else {
auto call_runtime = __ MakeDeferredLabel();
auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
// Load allocation top and limit.
Node* top =
__ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
Node* limit =
__ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
// Compute the new top.
Node* new_top = __ IntAdd(top, size);
// Check if we can do bump pointer allocation here.
Node* check = __ UintLessThan(new_top, limit);
__ GotoIfNot(check, &call_runtime);
if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) {
__ GotoIfNot(
__ UintLessThan(size, __ IntPtrConstant(kMaxRegularHeapObjectSize)),
&call_runtime);
}
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
top_address, __ IntPtrConstant(0), new_top);
__ Goto(&done, __ BitcastWordToTagged(
__ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
__ Bind(&call_runtime);
if (!allocate_operator_.is_set()) {
auto descriptor = AllocateDescriptor{};
auto call_descriptor = Linkage::GetStubCallDescriptor(
graph()->zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kCanUseRoots, Operator::kNoThrow);
allocate_operator_.set(common()->Call(call_descriptor));
}
__ Goto(&done, __ Call(allocate_operator_.get(), allocate_builtin, size));
__ Bind(&done);
value = done.PhiAt(0);
// Create an unfoldable allocation group.
AllocationGroup* group =
new (zone()) AllocationGroup(value, allocation_type, zone());
state = AllocationState::Closed(group, zone());
}
effect = __ ExtractCurrentEffect();
control = __ ExtractCurrentControl();
// Replace all effect uses of {node} with the {effect}, enqueue the
// effect uses for further processing, and replace all value uses of
// {node} with the {value}.
for (Edge edge : node->use_edges()) {
if (NodeProperties::IsEffectEdge(edge)) {
EnqueueUse(edge.from(), edge.index(), state);
edge.UpdateTo(effect);
} else if (NodeProperties::IsValueEdge(edge)) {
edge.UpdateTo(value);
} else {
DCHECK(NodeProperties::IsControlEdge(edge));
edge.UpdateTo(control);
}
}
// Kill the {node} to make sure we don't leave dangling dead uses.
node->Kill();
}
void MemoryOptimizer::VisitLoadFromObject(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode());
ObjectAccess const& access = ObjectAccessOf(node->op());
NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitStoreToObject(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStoreToObject, node->opcode());
ObjectAccess const& access = ObjectAccessOf(node->op());
Node* object = node->InputAt(0);
Node* value = node->InputAt(2);
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, access.write_barrier_kind);
NodeProperties::ChangeOp(
node, machine()->Store(StoreRepresentation(
access.machine_type.representation(), write_barrier_kind)));
EnqueueUses(node, state);
}
#undef __
void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) {
DCHECK_EQ(IrOpcode::kCall, node->opcode());
// If the call can allocate, we start with a fresh state.
if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) {
state = empty_state();
}
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitLoadElement(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kLoadElement, node->opcode());
ElementAccess const& access = ElementAccessOf(node->op());
Node* index = node->InputAt(1);
node->ReplaceInput(1, ComputeIndex(access, index));
MachineType type = access.machine_type;
if (NeedsPoisoning(access.load_sensitivity)) {
NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
} else {
NodeProperties::ChangeOp(node, machine()->Load(type));
}
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) {
DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
FieldAccess const& access = FieldAccessOf(node->op());
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
MachineType type = access.machine_type;
if (NeedsPoisoning(access.load_sensitivity)) {
NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
} else {
NodeProperties::ChangeOp(node, machine()->Load(type));
}
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitStoreElement(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStoreElement, node->opcode());
ElementAccess const& access = ElementAccessOf(node->op());
Node* object = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, access.write_barrier_kind);
node->ReplaceInput(1, ComputeIndex(access, index));
NodeProperties::ChangeOp(
node, machine()->Store(StoreRepresentation(
access.machine_type.representation(), write_barrier_kind)));
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitStoreField(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStoreField, node->opcode());
FieldAccess const& access = FieldAccessOf(node->op());
Node* object = node->InputAt(0);
Node* value = node->InputAt(1);
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, access.write_barrier_kind);
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
NodeProperties::ChangeOp(
node, machine()->Store(StoreRepresentation(
access.machine_type.representation(), write_barrier_kind)));
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitStore(Node* node, AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStore, node->opcode());
StoreRepresentation representation = StoreRepresentationOf(node->op());
Node* object = node->InputAt(0);
Node* value = node->InputAt(2);
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, representation.write_barrier_kind());
if (write_barrier_kind != representation.write_barrier_kind()) {
NodeProperties::ChangeOp(
node, machine()->Store(StoreRepresentation(
representation.representation(), write_barrier_kind)));
}
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitOtherEffect(Node* node,
AllocationState const* state) {
EnqueueUses(node, state);
}
Node* MemoryOptimizer::ComputeIndex(ElementAccess const& access, Node* index) {
int const element_size_shift =
ElementSizeLog2Of(access.machine_type.representation());
if (element_size_shift) {
index = graph()->NewNode(machine()->WordShl(), index,
jsgraph()->IntPtrConstant(element_size_shift));
}
int const fixed_offset = access.header_size - access.tag();
if (fixed_offset) {
index = graph()->NewNode(machine()->IntAdd(), index,
jsgraph()->IntPtrConstant(fixed_offset));
}
return index;
}
namespace {
bool ValueNeedsWriteBarrier(Node* value, Isolate* isolate) {
while (true) {
switch (value->opcode()) {
case IrOpcode::kBitcastWordToTaggedSigned:
case IrOpcode::kChangeTaggedSignedToCompressedSigned:
case IrOpcode::kChangeTaggedToCompressedSigned:
return false;
case IrOpcode::kChangeTaggedPointerToCompressedPointer:
case IrOpcode::kChangeTaggedToCompressed:
value = NodeProperties::GetValueInput(value, 0);
continue;
case IrOpcode::kHeapConstant: {
RootIndex root_index;
if (isolate->roots_table().IsRootHandle(HeapConstantOf(value->op()),
&root_index) &&
RootsTable::IsImmortalImmovable(root_index)) {
return false;
}
break;
}
default:
break;
}
return true;
}
}
void WriteBarrierAssertFailed(Node* node, Node* object, const char* name,
Zone* temp_zone) {
std::stringstream str;
@ -722,20 +180,199 @@ void WriteBarrierAssertFailed(Node* node, Node* object, const char* name,
} // namespace
WriteBarrierKind MemoryOptimizer::ComputeWriteBarrierKind(
Node* node, Node* object, Node* value, AllocationState const* state,
WriteBarrierKind write_barrier_kind) {
if (state->IsYoungGenerationAllocation() &&
state->group()->Contains(object)) {
write_barrier_kind = kNoWriteBarrier;
MemoryOptimizer::MemoryOptimizer(
JSGraph* jsgraph, Zone* zone, PoisoningMitigationLevel poisoning_level,
MemoryLowering::AllocationFolding allocation_folding,
const char* function_debug_name, TickCounter* tick_counter)
: memory_lowering_(jsgraph, zone, poisoning_level, allocation_folding,
WriteBarrierAssertFailed, function_debug_name),
jsgraph_(jsgraph),
empty_state_(AllocationState::Empty(zone)),
pending_(zone),
tokens_(zone),
zone_(zone),
tick_counter_(tick_counter) {}
void MemoryOptimizer::Optimize() {
EnqueueUses(graph()->start(), empty_state());
while (!tokens_.empty()) {
Token const token = tokens_.front();
tokens_.pop();
VisitNode(token.node, token.state);
}
if (!ValueNeedsWriteBarrier(value, isolate())) {
write_barrier_kind = kNoWriteBarrier;
DCHECK(pending_.empty());
DCHECK(tokens_.empty());
}
void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
tick_counter_->DoTick();
DCHECK(!node->IsDead());
DCHECK_LT(0, node->op()->EffectInputCount());
switch (node->opcode()) {
case IrOpcode::kAllocate:
// Allocate nodes were purged from the graph in effect-control
// linearization.
UNREACHABLE();
case IrOpcode::kAllocateRaw:
return VisitAllocateRaw(node, state);
case IrOpcode::kCall:
return VisitCall(node, state);
case IrOpcode::kLoadFromObject:
return VisitLoadFromObject(node, state);
case IrOpcode::kLoadElement:
return VisitLoadElement(node, state);
case IrOpcode::kLoadField:
return VisitLoadField(node, state);
case IrOpcode::kStoreToObject:
return VisitStoreToObject(node, state);
case IrOpcode::kStoreElement:
return VisitStoreElement(node, state);
case IrOpcode::kStoreField:
return VisitStoreField(node, state);
case IrOpcode::kStore:
return VisitStore(node, state);
default:
if (!CanAllocate(node)) {
// These operations cannot trigger GC.
return VisitOtherEffect(node, state);
}
}
if (write_barrier_kind == WriteBarrierKind::kAssertNoWriteBarrier) {
WriteBarrierAssertFailed(node, object, function_debug_name_, zone());
DCHECK_EQ(0, node->op()->EffectOutputCount());
}
bool MemoryOptimizer::AllocationTypeNeedsUpdateToOld(Node* const node,
const Edge edge) {
if (COMPRESS_POINTERS_BOOL && IrOpcode::IsCompressOpcode(node->opcode())) {
// In Pointer Compression we might have a Compress node between an
// AllocateRaw and the value used as input. This case is trickier since we
// have to check all of the Compress node edges to test for a StoreField.
for (Edge const new_edge : node->use_edges()) {
if (AllocationTypeNeedsUpdateToOld(new_edge.from(), new_edge)) {
return true;
}
}
// If we arrived here, we tested all the edges of the Compress node and
// didn't find it necessary to update the AllocationType.
return false;
}
return write_barrier_kind;
// Test to see if we need to update the AllocationType.
if (node->opcode() == IrOpcode::kStoreField && edge.index() == 1) {
Node* parent = node->InputAt(0);
if (parent->opcode() == IrOpcode::kAllocateRaw &&
AllocationTypeOf(parent->op()) == AllocationType::kOld) {
return true;
}
}
return false;
}
void MemoryOptimizer::VisitAllocateRaw(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
const AllocateParameters& allocation = AllocateParametersOf(node->op());
AllocationType allocation_type = allocation.allocation_type();
// Propagate tenuring from outer allocations to inner allocations, i.e.
// when we allocate an object in old space and store a newly allocated
// child object into the pretenured object, then the newly allocated
// child object also should get pretenured to old space.
if (allocation_type == AllocationType::kOld) {
for (Edge const edge : node->use_edges()) {
Node* const user = edge.from();
if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) {
Node* child = user->InputAt(1);
// In Pointer Compression we might have a Compress node between an
// AllocateRaw and the value used as input. If so, we need to update
// child to point to the StoreField.
if (COMPRESS_POINTERS_BOOL &&
IrOpcode::IsCompressOpcode(child->opcode())) {
child = child->InputAt(0);
}
if (child->opcode() == IrOpcode::kAllocateRaw &&
AllocationTypeOf(child->op()) == AllocationType::kYoung) {
NodeProperties::ChangeOp(child, node->op());
break;
}
}
}
} else {
DCHECK_EQ(AllocationType::kYoung, allocation_type);
for (Edge const edge : node->use_edges()) {
Node* const user = edge.from();
if (AllocationTypeNeedsUpdateToOld(user, edge)) {
allocation_type = AllocationType::kOld;
break;
}
}
}
memory_lowering()->ReduceAllocateRaw(
node, allocation_type, allocation.allow_large_objects(), &state);
EnqueueUses(state->effect(), state);
}
void MemoryOptimizer::VisitLoadFromObject(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode());
memory_lowering()->ReduceLoadFromObject(node);
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitStoreToObject(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStoreToObject, node->opcode());
memory_lowering()->ReduceStoreToObject(node, state);
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitLoadElement(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kLoadElement, node->opcode());
memory_lowering()->ReduceLoadElement(node);
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) {
DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
memory_lowering()->ReduceLoadField(node);
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitStoreElement(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStoreElement, node->opcode());
memory_lowering()->ReduceStoreElement(node, state);
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitStoreField(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStoreField, node->opcode());
memory_lowering()->ReduceStoreField(node, state);
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitStore(Node* node, AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStore, node->opcode());
memory_lowering()->ReduceStore(node, state);
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) {
DCHECK_EQ(IrOpcode::kCall, node->opcode());
// If the call can allocate, we start with a fresh state.
if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) {
state = empty_state();
}
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitOtherEffect(Node* node,
AllocationState const* state) {
EnqueueUses(node, state);
}
MemoryOptimizer::AllocationState const* MemoryOptimizer::MergeStates(
@ -743,7 +380,7 @@ MemoryOptimizer::AllocationState const* MemoryOptimizer::MergeStates(
// Check if all states are the same; or at least if all allocation
// states belong to the same allocation group.
AllocationState const* state = states.front();
AllocationGroup* group = state->group();
MemoryLowering::AllocationGroup* group = state->group();
for (size_t i = 1; i < states.size(); ++i) {
if (states[i] != state) state = nullptr;
if (states[i]->group() != group) group = nullptr;
@ -755,7 +392,7 @@ MemoryOptimizer::AllocationState const* MemoryOptimizer::MergeStates(
// TODO(bmeurer): We could potentially just create a Phi here to merge
// the various tops; but we need to pay special attention not to create
// an unschedulable graph.
state = AllocationState::Closed(group, zone());
state = AllocationState::Closed(group, nullptr, zone());
} else {
// The states are from different allocation groups.
state = empty_state();
@ -830,31 +467,6 @@ void MemoryOptimizer::EnqueueUse(Node* node, int index,
Graph* MemoryOptimizer::graph() const { return jsgraph()->graph(); }
Isolate* MemoryOptimizer::isolate() const { return jsgraph()->isolate(); }
CommonOperatorBuilder* MemoryOptimizer::common() const {
return jsgraph()->common();
}
MachineOperatorBuilder* MemoryOptimizer::machine() const {
return jsgraph()->machine();
}
bool MemoryOptimizer::NeedsPoisoning(LoadSensitivity load_sensitivity) const {
// Safe loads do not need poisoning.
if (load_sensitivity == LoadSensitivity::kSafe) return false;
switch (poisoning_level_) {
case PoisoningMitigationLevel::kDontPoison:
return false;
case PoisoningMitigationLevel::kPoisonAll:
return true;
case PoisoningMitigationLevel::kPoisonCriticalOnly:
return load_sensitivity == LoadSensitivity::kCritical;
}
UNREACHABLE();
}
} // namespace compiler
} // namespace internal
} // namespace v8

View File

@ -5,7 +5,7 @@
#ifndef V8_COMPILER_MEMORY_OPTIMIZER_H_
#define V8_COMPILER_MEMORY_OPTIMIZER_H_
#include "src/compiler/graph-assembler.h"
#include "src/compiler/memory-lowering.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@ -15,95 +15,29 @@ class TickCounter;
namespace compiler {
// Forward declarations.
class CommonOperatorBuilder;
struct ElementAccess;
class Graph;
class JSGraph;
class MachineOperatorBuilder;
class Node;
class Operator;
class Graph;
// NodeIds are identifying numbers for nodes that can be used to index auxiliary
// out-of-line data associated with each node.
using NodeId = uint32_t;
// Lowers all simplified memory access and allocation related nodes (i.e.
// Allocate, LoadField, StoreField and friends) to machine operators.
// Performs allocation folding and store write barrier elimination
// implicitly.
// implicitly, while lowering all simplified memory access and allocation
// related nodes (i.e. Allocate, LoadField, StoreField and friends) to machine
// operators.
class MemoryOptimizer final {
public:
enum class AllocationFolding { kDoAllocationFolding, kDontAllocationFolding };
MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
PoisoningMitigationLevel poisoning_level,
AllocationFolding allocation_folding,
MemoryLowering::AllocationFolding allocation_folding,
const char* function_debug_name, TickCounter* tick_counter);
~MemoryOptimizer() = default;
void Optimize();
private:
// An allocation group represents a set of allocations that have been folded
// together.
class AllocationGroup final : public ZoneObject {
public:
AllocationGroup(Node* node, AllocationType allocation, Zone* zone);
AllocationGroup(Node* node, AllocationType allocation, Node* size,
Zone* zone);
~AllocationGroup() = default;
void Add(Node* object);
bool Contains(Node* object) const;
bool IsYoungGenerationAllocation() const {
return allocation() == AllocationType::kYoung;
}
AllocationType allocation() const { return allocation_; }
Node* size() const { return size_; }
private:
ZoneSet<NodeId> node_ids_;
AllocationType const allocation_;
Node* const size_;
DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationGroup);
};
// An allocation state is propagated on the effect paths through the graph.
class AllocationState final : public ZoneObject {
public:
static AllocationState const* Empty(Zone* zone) {
return new (zone) AllocationState();
}
static AllocationState const* Closed(AllocationGroup* group, Zone* zone) {
return new (zone) AllocationState(group);
}
static AllocationState const* Open(AllocationGroup* group, intptr_t size,
Node* top, Zone* zone) {
return new (zone) AllocationState(group, size, top);
}
bool IsYoungGenerationAllocation() const;
AllocationGroup* group() const { return group_; }
Node* top() const { return top_; }
intptr_t size() const { return size_; }
private:
AllocationState();
explicit AllocationState(AllocationGroup* group);
AllocationState(AllocationGroup* group, intptr_t size, Node* top);
AllocationGroup* const group_;
// The upper bound of the combined allocated object size on the current path
// (max int if allocation folding is impossible on this path).
intptr_t const size_;
Node* const top_;
DISALLOW_COPY_AND_ASSIGN(AllocationState);
};
using AllocationState = MemoryLowering::AllocationState;
// An array of allocation states used to collect states on merges.
using AllocationStates = ZoneVector<AllocationState const*>;
@ -127,44 +61,29 @@ class MemoryOptimizer final {
void VisitStore(Node*, AllocationState const*);
void VisitOtherEffect(Node*, AllocationState const*);
Node* ComputeIndex(ElementAccess const&, Node*);
WriteBarrierKind ComputeWriteBarrierKind(Node* node, Node* object,
Node* value,
AllocationState const* state,
WriteBarrierKind);
AllocationState const* MergeStates(AllocationStates const& states);
void EnqueueMerge(Node*, int, AllocationState const*);
void EnqueueUses(Node*, AllocationState const*);
void EnqueueUse(Node*, int, AllocationState const*);
bool NeedsPoisoning(LoadSensitivity load_sensitivity) const;
// Returns true if the AllocationType of the current AllocateRaw node that we
// are visiting needs to be updated to kOld, due to propagation of tenuring
// from outer to inner allocations.
bool AllocationTypeNeedsUpdateToOld(Node* const user, const Edge edge);
AllocationState const* empty_state() const { return empty_state_; }
MemoryLowering* memory_lowering() { return &memory_lowering_; }
Graph* graph() const;
Isolate* isolate() const;
JSGraph* jsgraph() const { return jsgraph_; }
CommonOperatorBuilder* common() const;
MachineOperatorBuilder* machine() const;
Zone* zone() const { return zone_; }
GraphAssembler* gasm() { return &graph_assembler_; }
SetOncePointer<const Operator> allocate_operator_;
JSGraph* const jsgraph_;
MemoryLowering memory_lowering_;
JSGraph* jsgraph_;
AllocationState const* const empty_state_;
ZoneMap<NodeId, AllocationStates> pending_;
ZoneQueue<Token> tokens_;
Zone* const zone_;
GraphAssembler graph_assembler_;
PoisoningMitigationLevel poisoning_level_;
AllocationFolding allocation_folding_;
const char* function_debug_name_;
TickCounter* const tick_counter_;
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryOptimizer);

View File

@ -1727,8 +1727,8 @@ struct MemoryOptimizationPhase {
MemoryOptimizer optimizer(
data->jsgraph(), temp_zone, data->info()->GetPoisoningMitigationLevel(),
data->info()->is_allocation_folding_enabled()
? MemoryOptimizer::AllocationFolding::kDoAllocationFolding
: MemoryOptimizer::AllocationFolding::kDontAllocationFolding,
? MemoryLowering::AllocationFolding::kDoAllocationFolding
: MemoryLowering::AllocationFolding::kDontAllocationFolding,
data->debug_name(), &data->info()->tick_counter());
optimizer.Optimize();
}