[turbofan] Initial version of the new LoadElimination.

This adds a new optimization phase to the TurboFan pipeline, which walks
over the effect chain and tries to eliminate redundant loads (and even
some stores) of object fields. We currently ignore element access, but
that will probably need to be handled as well at some point. We also
don't have any special treatment to properly track object maps, which is
also on the list of things that will happen afterwards.

The implementation is pretty simple currently, and probably way to
inefficient. It's meant to be a proof-of-concept to iterate on.

R=jarin@chromium.org
BUG=v8:4930,v8:5141

Review-Url: https://codereview.chromium.org/2120253002
Cr-Commit-Position: refs/heads/master@{#37528}
This commit is contained in:
bmeurer 2016-07-05 05:19:48 -07:00 committed by Commit bot
parent 65415ca795
commit d70dc1ace4
5 changed files with 488 additions and 122 deletions

View File

@ -1,101 +1,442 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/load-elimination.h"
#include "src/compiler/graph.h"
#include "src/compiler/node-marker.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/compiler/simplified-operator.h"
#include "src/types.h"
namespace v8 {
namespace internal {
namespace compiler {
LoadElimination::~LoadElimination() {}
#ifdef DEBUG
#define TRACE(...) \
do { \
if (FLAG_trace_turbo_load_elimination) PrintF(__VA_ARGS__); \
} while (false)
#else
#define TRACE(...)
#endif
Reduction LoadElimination::Reduce(Node* node) {
namespace {
const size_t kMaxTrackedFields = 16;
Node* ActualValue(Node* node) {
switch (node->opcode()) {
case IrOpcode::kLoadField:
return ReduceLoadField(node);
case IrOpcode::kCheckBounds:
case IrOpcode::kCheckNumber:
case IrOpcode::kCheckTaggedPointer:
case IrOpcode::kCheckTaggedSigned:
case IrOpcode::kFinishRegion:
return ActualValue(NodeProperties::GetValueInput(node, 0));
default:
break;
return node;
}
return NoChange();
}
Reduction LoadElimination::ReduceLoadField(Node* node) {
DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
FieldAccess const access = FieldAccessOf(node->op());
Node* object = NodeProperties::GetValueInput(node, 0);
for (Node* effect = NodeProperties::GetEffectInput(node);;
effect = NodeProperties::GetEffectInput(effect)) {
switch (effect->opcode()) {
case IrOpcode::kLoadField: {
FieldAccess const effect_access = FieldAccessOf(effect->op());
if (object == NodeProperties::GetValueInput(effect, 0) &&
access == effect_access && effect_access.type->Is(access.type)) {
Node* const value = effect;
ReplaceWithValue(node, value);
return Replace(value);
}
enum Aliasing { kNoAlias, kMayAlias, kMustAlias };
Aliasing QueryAlias(Node* a, Node* b) {
if (a == b) return kMustAlias;
if (b->opcode() == IrOpcode::kAllocate) {
switch (a->opcode()) {
case IrOpcode::kAllocate:
case IrOpcode::kHeapConstant:
case IrOpcode::kParameter:
return kNoAlias;
default:
break;
}
}
if (a->opcode() == IrOpcode::kAllocate) {
switch (b->opcode()) {
case IrOpcode::kHeapConstant:
case IrOpcode::kParameter:
return kNoAlias;
default:
break;
}
}
return kMayAlias;
}
bool MayAlias(Node* a, Node* b) { return QueryAlias(a, b) != kNoAlias; }
bool MustAlias(Node* a, Node* b) { return QueryAlias(a, b) == kMustAlias; }
// Abstract state to approximate the current state of a certain field along the
// effect paths through the graph.
class AbstractField final : public ZoneObject {
public:
explicit AbstractField(Zone* zone) : info_for_node_(zone) {}
AbstractField(Node* object, Node* value, Zone* zone) : info_for_node_(zone) {
info_for_node_.insert(std::make_pair(object, value));
}
AbstractField const* Extend(Node* object, Node* value, Zone* zone) const {
AbstractField* that = new (zone) AbstractField(zone);
that->info_for_node_ = this->info_for_node_;
that->info_for_node_.insert(std::make_pair(object, value));
return that;
}
Node* Lookup(Node* object) const {
for (auto pair : info_for_node_) {
if (MustAlias(object, pair.first)) return pair.second;
}
return nullptr;
}
AbstractField const* Kill(Node* object, Zone* zone) const {
for (auto pair : this->info_for_node_) {
if (MayAlias(object, pair.first)) {
AbstractField* that = new (zone) AbstractField(zone);
for (auto pair : this->info_for_node_) {
if (!MayAlias(object, pair.first)) that->info_for_node_.insert(pair);
}
return that;
}
case IrOpcode::kStoreField: {
if (access == FieldAccessOf(effect->op())) {
if (object == NodeProperties::GetValueInput(effect, 0)) {
Node* const value = NodeProperties::GetValueInput(effect, 1);
Type* value_type = NodeProperties::GetType(value);
Type* node_type = NodeProperties::GetType(node);
// Make sure the replacement's type is a subtype of the node's
// type. Otherwise we could confuse optimizations that were
// based on the original type.
if (value_type->Is(node_type)) {
ReplaceWithValue(node, value);
return Replace(value);
}
return this;
}
bool Equals(AbstractField const* that) const {
return this == that || this->info_for_node_ == that->info_for_node_;
}
AbstractField const* Merge(AbstractField const* that, Zone* zone) const {
if (this->Equals(that)) return this;
AbstractField* copy = new (zone) AbstractField(zone);
for (auto this_it : this->info_for_node_) {
Node* this_object = this_it.first;
Node* this_value = this_it.second;
auto that_it = that->info_for_node_.find(this_object);
if (that_it != that->info_for_node_.end() &&
that_it->second == this_value) {
copy->info_for_node_.insert(this_it);
}
}
return copy;
}
private:
ZoneMap<Node*, Node*> info_for_node_;
};
// Abstract state to track the state of all fields along the effect paths
// through the graph.
class AbstractState final : public ZoneObject {
public:
AbstractState() {
for (size_t i = 0; i < kMaxTrackedFields; ++i) fields_[i] = nullptr;
}
AbstractState const* Extend(Node* object, size_t index, Node* value,
Zone* zone) const {
AbstractState* that = new (zone) AbstractState(*this);
AbstractField const* that_field = that->fields_[index];
if (that_field) {
that_field = that_field->Extend(object, value, zone);
} else {
that_field = new (zone) AbstractField(object, value, zone);
}
that->fields_[index] = that_field;
return that;
}
AbstractState const* Kill(Node* object, size_t index, Zone* zone) const {
if (!this->fields_[index]) return this;
AbstractState* that = new (zone) AbstractState(*this);
that->fields_[index] = nullptr;
return that;
}
AbstractState const* Merge(AbstractState const* that, Zone* zone) const {
if (this->Equals(that)) return this;
AbstractState* copy = new (zone) AbstractState();
for (size_t i = 0; i < kMaxTrackedFields; ++i) {
AbstractField const* this_field = this->fields_[i];
AbstractField const* that_field = that->fields_[i];
if (this_field && that_field) {
copy->fields_[i] = this_field->Merge(that_field, zone);
}
}
return copy;
}
Node* Lookup(Node* object, size_t index) const {
AbstractField const* this_field = this->fields_[index];
if (this_field) return this_field->Lookup(object);
return nullptr;
}
bool Equals(AbstractState const* that) const {
if (this == that) return true;
for (size_t i = 0; i < kMaxTrackedFields; ++i) {
AbstractField const* this_field = this->fields_[i];
AbstractField const* that_field = that->fields_[i];
if (this_field) {
if (!that_field || !this_field->Equals(that_field)) return false;
} else if (that_field) {
return false;
}
DCHECK(this_field == that_field || this_field->Equals(that_field));
}
return true;
}
private:
AbstractField const* fields_[kMaxTrackedFields];
};
class LoadEliminationAnalysis final {
public:
LoadEliminationAnalysis(Graph* graph, Zone* zone)
: candidates_(zone),
empty_state_(),
queue_(zone),
node_states_(graph->NodeCount(), zone) {}
void Run(Node* start) {
TRACE("--{Analysis phase}--\n");
UpdateState(start, empty_state());
while (!queue_.empty()) {
Node* const node = queue_.top();
queue_.pop();
VisitNode(node);
}
TRACE("--{Replacement phase}--\n");
ZoneMap<Node*, Node*> replacements(zone());
for (Node* const node : candidates_) {
switch (node->opcode()) {
case IrOpcode::kLoadField: {
FieldAccess const& access = FieldAccessOf(node->op());
Node* const object =
ActualValue(NodeProperties::GetValueInput(node, 0));
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = GetState(effect);
int field_index = FieldIndexOf(access);
DCHECK_LE(0, field_index);
if (Node* value = state->Lookup(object, field_index)) {
auto it = replacements.find(value);
if (it != replacements.end()) value = it->second;
Type* const value_type = NodeProperties::GetType(value);
if (value_type->Is(access.type)) {
replacements.insert(std::make_pair(node, value));
TRACE(" - Replacing redundant #%d:LoadField with #%d:%s\n",
node->id(), value->id(), value->op()->mnemonic());
NodeProperties::ReplaceUses(node, value, effect);
node->Kill();
} else {
// This LoadField has stronger guarantees than the stored value
// can give us, which suggests that we are probably in unreachable
// code, guarded by some Check, so don't bother trying to optimize
// this LoadField {node}.
return NoChange();
TRACE(
" - Cannot replace redundant #%d:LoadField with #%d:%s,"
" because types don't agree",
node->id(), value->id(), value->op()->mnemonic());
}
}
// TODO(turbofan): Alias analysis to the rescue?
return NoChange();
break;
}
break;
}
case IrOpcode::kBeginRegion:
case IrOpcode::kStoreBuffer:
case IrOpcode::kStoreElement: {
// These can never interfere with field loads.
break;
}
case IrOpcode::kFinishRegion: {
// "Look through" FinishRegion nodes to make LoadElimination capable
// of looking into atomic regions.
if (object == effect) object = NodeProperties::GetValueInput(effect, 0);
break;
}
case IrOpcode::kAllocate: {
// Allocations don't interfere with field loads. In case we see the
// actual allocation for the {object} we can abort.
if (object == effect) return NoChange();
break;
}
default: {
if (!effect->op()->HasProperty(Operator::kNoWrite) ||
effect->op()->EffectInputCount() != 1) {
return NoChange();
case IrOpcode::kStoreField: {
FieldAccess const& access = FieldAccessOf(node->op());
Node* const object =
ActualValue(NodeProperties::GetValueInput(node, 0));
Node* const value =
ActualValue(NodeProperties::GetValueInput(node, 1));
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = GetState(effect);
int field_index = FieldIndexOf(access);
DCHECK_LE(0, field_index);
if (value == state->Lookup(object, field_index)) {
TRACE(" - Killing redundant #%d:StoreField\n", node->id());
NodeProperties::ReplaceUses(node, value, effect);
node->Kill();
}
break;
}
break;
default:
UNREACHABLE();
}
}
}
UNREACHABLE();
return NoChange();
private:
void VisitNode(Node* node) {
TRACE(" - Visiting node #%d:%s\n", node->id(), node->op()->mnemonic());
switch (node->opcode()) {
case IrOpcode::kEffectPhi:
return VisitEffectPhi(node);
case IrOpcode::kLoadField:
return VisitLoadField(node);
case IrOpcode::kStoreElement:
return VisitStoreElement(node);
case IrOpcode::kStoreField:
return VisitStoreField(node);
case IrOpcode::kDeoptimize:
case IrOpcode::kReturn:
case IrOpcode::kTerminate:
case IrOpcode::kThrow:
break;
default:
return VisitOtherNode(node);
}
}
void VisitEffectPhi(Node* node) {
int const input_count = node->InputCount() - 1;
DCHECK_LT(0, input_count);
Node* const control = NodeProperties::GetControlInput(node);
Node* const effect0 = NodeProperties::GetEffectInput(node, 0);
AbstractState const* state = GetState(effect0);
if (state == nullptr) return;
if (control->opcode() == IrOpcode::kMerge) {
for (int i = 1; i < input_count; ++i) {
Node* const effecti = NodeProperties::GetEffectInput(node, i);
if (GetState(effecti) == nullptr) return;
}
}
for (int i = 1; i < input_count; ++i) {
Node* const effecti = NodeProperties::GetEffectInput(node, i);
if (AbstractState const* statei = GetState(effecti)) {
state = state->Merge(statei, zone());
}
}
UpdateState(node, state);
}
void VisitLoadField(Node* node) {
FieldAccess const& access = FieldAccessOf(node->op());
Node* const object = ActualValue(NodeProperties::GetValueInput(node, 0));
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = GetState(effect);
int field_index = FieldIndexOf(access);
if (field_index >= 0) {
Node* const value = state->Lookup(object, field_index);
if (!value) {
TRACE(" Node #%d:LoadField is not redundant\n", node->id());
state = state->Extend(object, field_index, node, zone());
} else if (!NodeProperties::GetType(value)->Is(access.type)) {
TRACE(
" Node #%d:LoadField is redundant for #%d:%s, but"
" types don't agree\n",
node->id(), value->id(), value->op()->mnemonic());
state = state->Extend(object, field_index, node, zone());
} else if (value) {
TRACE(" Node #%d:LoadField is fully redundant for #%d:%s\n",
node->id(), value->id(), value->op()->mnemonic());
candidates_.insert(node);
}
} else {
TRACE(" Node #%d:LoadField is unsupported\n", node->id());
}
UpdateState(node, state);
}
void VisitStoreField(Node* node) {
FieldAccess const& access = FieldAccessOf(node->op());
Node* const object = ActualValue(NodeProperties::GetValueInput(node, 0));
Node* const new_value = NodeProperties::GetValueInput(node, 1);
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = GetState(effect);
int field_index = FieldIndexOf(access);
if (field_index >= 0) {
Node* const old_value = state->Lookup(object, field_index);
if (old_value == new_value) {
TRACE(" Node #%d:StoreField is fully redundant, storing #%d:%s\n",
node->id(), new_value->id(), new_value->op()->mnemonic());
candidates_.insert(node);
}
TRACE(" Killing all potentially aliasing stores for %d on #%d:%s\n",
field_index, object->id(), object->op()->mnemonic());
state = state->Kill(object, field_index, zone());
TRACE(" Node #%d:StoreField provides #%d:%s for %d on #%d:%s\n",
node->id(), new_value->id(), new_value->op()->mnemonic(),
field_index, object->id(), object->op()->mnemonic());
state = state->Extend(object, field_index, new_value, zone());
} else {
TRACE(" Node #%d:StoreField is unsupported\n", node->id());
state = empty_state();
}
UpdateState(node, state);
}
void VisitStoreElement(Node* node) {
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = GetState(effect);
UpdateState(node, state);
}
void VisitOtherNode(Node* node) {
DCHECK_EQ(1, node->op()->EffectInputCount());
DCHECK_EQ(1, node->op()->EffectOutputCount());
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node->op()->HasProperty(Operator::kNoWrite)
? GetState(effect)
: empty_state();
UpdateState(node, state);
}
int FieldIndexOf(FieldAccess const& access) const {
switch (access.machine_type.representation()) {
case MachineRepresentation::kNone:
case MachineRepresentation::kBit:
UNREACHABLE();
break;
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
case MachineRepresentation::kWord32:
case MachineRepresentation::kWord64:
case MachineRepresentation::kFloat32:
return -1; // Currently untracked.
case MachineRepresentation::kFloat64:
case MachineRepresentation::kSimd128:
case MachineRepresentation::kTagged:
// TODO(bmeurer): Check that we never do overlapping load/stores of
// individual parts of Float64/Simd128 values.
break;
}
DCHECK_EQ(kTaggedBase, access.base_is_tagged);
DCHECK_EQ(0, access.offset % kPointerSize);
int field_index = access.offset / kPointerSize;
if (field_index >= static_cast<int>(kMaxTrackedFields)) return -1;
return field_index;
}
AbstractState const* GetState(Node* node) const {
return node_states_[node->id()];
}
void SetState(Node* node, AbstractState const* state) {
node_states_[node->id()] = state;
}
void UpdateState(Node* node, AbstractState const* new_state) {
AbstractState const* old_state = GetState(node);
if (old_state && old_state->Equals(new_state)) return;
SetState(node, new_state);
EnqueueUses(node);
}
void EnqueueUses(Node* node) {
for (Edge const edge : node->use_edges()) {
if (NodeProperties::IsEffectEdge(edge)) {
queue_.push(edge.from());
}
}
}
AbstractState const* empty_state() const { return &empty_state_; }
Zone* zone() const { return node_states_.get_allocator().zone(); }
ZoneSet<Node*> candidates_;
AbstractState const empty_state_;
ZoneStack<Node*> queue_;
ZoneVector<AbstractState const*> node_states_;
DISALLOW_COPY_AND_ASSIGN(LoadEliminationAnalysis);
};
} // namespace
void LoadElimination::Run() {
LoadEliminationAnalysis analysis(graph(), zone());
analysis.Run(graph()->start());
}
} // namespace compiler

View File

@ -1,36 +1,34 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_LOAD_ELIMINATION_H_
#define V8_COMPILER_LOAD_ELIMINATION_H_
#include "src/compiler/graph-reducer.h"
namespace v8 {
namespace internal {
// Forward declarations.
class Zone;
namespace compiler {
// Forward declarations.
class Graph;
class SimplifiedOperatorBuilder;
class LoadElimination final : public AdvancedReducer {
// Eliminates redundant loads via scalar replacement of aggregates.
class LoadElimination final {
public:
explicit LoadElimination(Editor* editor, Graph* graph,
SimplifiedOperatorBuilder* simplified)
: AdvancedReducer(editor), graph_(graph), simplified_(simplified) {}
~LoadElimination() final;
LoadElimination(Graph* graph, Zone* zone) : graph_(graph), zone_(zone) {}
Reduction Reduce(Node* node) final;
void Run();
private:
SimplifiedOperatorBuilder* simplified() const { return simplified_; }
Graph* graph() const { return graph_; }
Reduction ReduceLoadField(Node* node);
Zone* zone() const { return zone_; }
Graph* const graph_;
SimplifiedOperatorBuilder* const simplified_;
Zone* const zone_;
};
} // namespace compiler

View File

@ -872,8 +872,6 @@ struct TypedLoweringPhase {
JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common());
LoadElimination load_elimination(&graph_reducer, data->graph(),
data->jsgraph()->simplified());
JSBuiltinReducer builtin_reducer(&graph_reducer, data->jsgraph());
MaybeHandle<LiteralsArray> literals_array =
data->info()->is_native_context_specializing()
@ -912,7 +910,6 @@ struct TypedLoweringPhase {
}
AddReducer(data, &graph_reducer, &typed_lowering);
AddReducer(data, &graph_reducer, &intrinsic_lowering);
AddReducer(data, &graph_reducer, &load_elimination);
AddReducer(data, &graph_reducer, &value_numbering);
AddReducer(data, &graph_reducer, &simple_reducer);
AddReducer(data, &graph_reducer, &checkpoint_elimination);
@ -1043,6 +1040,22 @@ struct StoreStoreEliminationPhase {
}
};
struct LoadEliminationPhase {
static const char* phase_name() { return "load elimination"; }
void Run(PipelineData* data, Zone* temp_zone) {
// The memory optimizer requires the graphs to be trimmed, so trim now.
GraphTrimmer trimmer(temp_zone, data->graph());
NodeVector roots(temp_zone);
data->jsgraph()->GetCachedNodes(&roots);
trimmer.TrimGraph(roots.begin(), roots.end());
// Eliminate redundant loads.
LoadElimination load_elimination(data->graph(), temp_zone);
load_elimination.Run();
}
};
struct MemoryOptimizationPhase {
static const char* phase_name() { return "memory optimization"; }
@ -1446,6 +1459,11 @@ bool PipelineImpl::CreateGraph() {
Run<EscapeAnalysisPhase>();
RunPrintAndVerify("Escape Analysed");
}
if (FLAG_turbo_load_elimination) {
Run<LoadEliminationPhase>();
RunPrintAndVerify("Load eliminated");
}
}
// Select representations. This has to run w/o the Typer decorator, because

View File

@ -462,6 +462,9 @@ DEFINE_BOOL(native_context_specialization, true,
"enable native context specialization in TurboFan")
DEFINE_BOOL(turbo_inlining, true, "enable inlining in TurboFan")
DEFINE_BOOL(trace_turbo_inlining, false, "trace TurboFan inlining")
DEFINE_BOOL(turbo_load_elimination, true, "enable load elimination in TurboFan")
DEFINE_BOOL(trace_turbo_load_elimination, false,
"trace TurboFan load elimination")
DEFINE_BOOL(loop_assignment_analysis, true, "perform loop assignment analysis")
DEFINE_BOOL(turbo_profiling, false, "enable profiling in TurboFan")
DEFINE_BOOL(turbo_verify_allocation, DEBUG_BOOL,

View File

@ -1,9 +1,10 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/access-builder.h"
#include "src/compiler/load-elimination.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/node.h"
#include "src/compiler/simplified-operator.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
@ -18,11 +19,9 @@ class LoadEliminationTest : public TypedGraphTest {
~LoadEliminationTest() override {}
protected:
Reduction Reduce(Node* node) {
// TODO(titzer): mock the GraphReducer here for better unit testing.
GraphReducer graph_reducer(zone(), graph());
LoadElimination reducer(&graph_reducer, graph(), simplified());
return reducer.Reduce(node);
void Run() {
LoadElimination load_elimination(graph(), zone());
load_elimination.Run();
}
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
@ -31,42 +30,49 @@ class LoadEliminationTest : public TypedGraphTest {
SimplifiedOperatorBuilder simplified_;
};
TEST_F(LoadEliminationTest, LoadFieldWithStoreField) {
Node* object1 = Parameter(Type::Any(), 0);
Node* object2 = Parameter(Type::Any(), 1);
Node* value = Parameter(Type::Any(), 2);
TEST_F(LoadEliminationTest, LoadFieldAndLoadField) {
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* control = graph()->start();
FieldAccess access = {kTaggedBase,
kPointerSize,
MaybeHandle<Name>(),
Type::Any(),
MachineType::AnyTagged(),
kNoWriteBarrier};
Node* load1 = effect = graph()->NewNode(simplified()->LoadField(access),
object, effect, control);
Node* load2 = effect = graph()->NewNode(simplified()->LoadField(access),
object, effect, control);
control = graph()->NewNode(common()->Return(), load2, effect, control);
graph()->end()->ReplaceInput(0, control);
FieldAccess access1 = AccessBuilder::ForContextSlot(42);
Node* store1 = graph()->NewNode(simplified()->StoreField(access1), object1,
value, effect, control);
Reduction r1 = Reduce(graph()->NewNode(simplified()->LoadField(access1),
object1, store1, control));
ASSERT_TRUE(r1.Changed());
EXPECT_EQ(value, r1.replacement());
Run();
FieldAccess access2 = AccessBuilder::ForMap();
Node* store2 = graph()->NewNode(simplified()->StoreField(access2), object1,
object2, store1, control);
Reduction r2 = Reduce(graph()->NewNode(simplified()->LoadField(access2),
object1, store2, control));
ASSERT_TRUE(r2.Changed());
EXPECT_EQ(object2, r2.replacement());
EXPECT_THAT(graph()->end(), IsEnd(IsReturn(load1, load1, graph()->start())));
}
Node* store3 = graph()->NewNode(
simplified()->StoreBuffer(BufferAccess(kExternalInt8Array)), object2,
value, Int32Constant(10), object1, store2, control);
TEST_F(LoadEliminationTest, StoreFieldAndLoadField) {
Node* object = Parameter(Type::Any(), 0);
Node* value = Parameter(Type::Any(), 1);
Node* effect = graph()->start();
Node* control = graph()->start();
FieldAccess access = {kTaggedBase,
kPointerSize,
MaybeHandle<Name>(),
Type::Any(),
MachineType::AnyTagged(),
kNoWriteBarrier};
Node* store = effect = graph()->NewNode(simplified()->StoreField(access),
object, value, effect, control);
Node* load = effect = graph()->NewNode(simplified()->LoadField(access),
object, effect, control);
control = graph()->NewNode(common()->Return(), load, effect, control);
graph()->end()->ReplaceInput(0, control);
Reduction r3 = Reduce(graph()->NewNode(simplified()->LoadField(access1),
object2, store3, control));
ASSERT_FALSE(r3.Changed());
Run();
Reduction r4 = Reduce(graph()->NewNode(simplified()->LoadField(access1),
object1, store3, control));
ASSERT_TRUE(r4.Changed());
EXPECT_EQ(value, r4.replacement());
EXPECT_THAT(graph()->end(), IsEnd(IsReturn(value, store, graph()->start())));
}
} // namespace compiler