[turbofan] Initial version of allocation folding and write barrier elimination.

This adds a new pass MemoryOptimizer that walks over the effect chain
from Start and lowers all Allocate, LoadField, StoreField, LoadElement,
and StoreElement nodes, trying to fold allocations into allocation
groups and eliminate write barriers on StoreField and StoreElement if
possible (i.e. if the object belongs to the current allocation group and
that group allocates in new space).

R=hpayer@chromium.org, jarin@chromium.org
BUG=v8:4931, chromium:580959
LOG=n

Review-Url: https://codereview.chromium.org/1963583004
Cr-Commit-Position: refs/heads/master@{#36128}
This commit is contained in:
bmeurer 2016-05-10 03:11:06 -07:00 committed by Commit bot
parent 7e8f248b9b
commit b8229ec446
31 changed files with 707 additions and 712 deletions

View File

@ -194,6 +194,7 @@ config("toolchain") {
"CAN_USE_NEON", "CAN_USE_NEON",
] ]
} }
# TODO(jochen): Add support for arm_test_noprobe. # TODO(jochen): Add support for arm_test_noprobe.
if (current_cpu != "arm") { if (current_cpu != "arm") {
@ -208,6 +209,7 @@ config("toolchain") {
if (v8_target_arch == "arm64") { if (v8_target_arch == "arm64") {
defines += [ "V8_TARGET_ARCH_ARM64" ] defines += [ "V8_TARGET_ARCH_ARM64" ]
} }
# TODO(jochen): Add support for mips. # TODO(jochen): Add support for mips.
if (v8_target_arch == "mipsel") { if (v8_target_arch == "mipsel") {
defines += [ "V8_TARGET_ARCH_MIPS" ] defines += [ "V8_TARGET_ARCH_MIPS" ]
@ -239,14 +241,17 @@ config("toolchain") {
} else if (mips_arch_variant == "r1") { } else if (mips_arch_variant == "r1") {
defines += [ "FPU_MODE_FP32" ] defines += [ "FPU_MODE_FP32" ]
} }
# TODO(jochen): Add support for mips_arch_variant rx and loongson. # TODO(jochen): Add support for mips_arch_variant rx and loongson.
} }
# TODO(jochen): Add support for mips64. # TODO(jochen): Add support for mips64.
if (v8_target_arch == "mips64el") { if (v8_target_arch == "mips64el") {
defines += [ "V8_TARGET_ARCH_MIPS64" ] defines += [ "V8_TARGET_ARCH_MIPS64" ]
if (v8_can_use_fpu_instructions) { if (v8_can_use_fpu_instructions) {
defines += [ "CAN_USE_FPU_INSTRUCTIONS" ] defines += [ "CAN_USE_FPU_INSTRUCTIONS" ]
} }
# TODO(jochen): Add support for big endian host byteorder. # TODO(jochen): Add support for big endian host byteorder.
defines += [ "V8_TARGET_ARCH_MIPS64_LE" ] defines += [ "V8_TARGET_ARCH_MIPS64_LE" ]
if (v8_use_mips_abi_hardfloat) { if (v8_use_mips_abi_hardfloat) {
@ -835,8 +840,6 @@ source_set("v8_base") {
"src/compiler/bytecode-graph-builder.cc", "src/compiler/bytecode-graph-builder.cc",
"src/compiler/bytecode-graph-builder.h", "src/compiler/bytecode-graph-builder.h",
"src/compiler/c-linkage.cc", "src/compiler/c-linkage.cc",
"src/compiler/change-lowering.cc",
"src/compiler/change-lowering.h",
"src/compiler/coalesced-live-ranges.cc", "src/compiler/coalesced-live-ranges.cc",
"src/compiler/coalesced-live-ranges.h", "src/compiler/coalesced-live-ranges.h",
"src/compiler/code-assembler.cc", "src/compiler/code-assembler.cc",
@ -940,6 +943,8 @@ source_set("v8_base") {
"src/compiler/machine-operator-reducer.h", "src/compiler/machine-operator-reducer.h",
"src/compiler/machine-operator.cc", "src/compiler/machine-operator.cc",
"src/compiler/machine-operator.h", "src/compiler/machine-operator.h",
"src/compiler/memory-optimizer.cc",
"src/compiler/memory-optimizer.h",
"src/compiler/move-optimizer.cc", "src/compiler/move-optimizer.cc",
"src/compiler/move-optimizer.h", "src/compiler/move-optimizer.h",
"src/compiler/node-aux-data.h", "src/compiler/node-aux-data.h",
@ -2128,9 +2133,7 @@ if ((current_toolchain == host_toolchain && v8_toolset_for_d8 == "host") ||
} }
} }
if ((current_toolchain == host_toolchain && v8_toolset_for_shell == "host") || if ((current_toolchain == host_toolchain && v8_toolset_for_shell == "host") || (current_toolchain == snapshot_toolchain && v8_toolset_for_shell == "host") || (current_toolchain != host_toolchain && v8_toolset_for_shell == "target")) {
(current_toolchain == snapshot_toolchain && v8_toolset_for_shell == "host") ||
(current_toolchain != host_toolchain && v8_toolset_for_shell == "target")) {
executable("shell") { executable("shell") {
sources = [ sources = [
"samples/shell.cc", "samples/shell.cc",

View File

@ -2667,11 +2667,6 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// -- r1 : requested object size (untagged) // -- r1 : requested object size (untagged)
// -- lr : return address // -- lr : return address
// ----------------------------------- // -----------------------------------
Label runtime;
__ Allocate(r1, r0, r2, r3, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ bind(&runtime);
__ SmiTag(r1); __ SmiTag(r1);
__ Push(r1); __ Push(r1);
__ Move(cp, Smi::FromInt(0)); __ Move(cp, Smi::FromInt(0));
@ -2684,11 +2679,6 @@ void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// -- r1 : requested object size (untagged) // -- r1 : requested object size (untagged)
// -- lr : return address // -- lr : return address
// ----------------------------------- // -----------------------------------
Label runtime;
__ Allocate(r1, r0, r2, r3, &runtime, PRETENURE);
__ Ret();
__ bind(&runtime);
__ SmiTag(r1); __ SmiTag(r1);
__ Move(r2, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE))); __ Move(r2, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ Push(r1, r2); __ Push(r1, r2);

View File

@ -2758,11 +2758,6 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// -- x1 : requested object size (untagged) // -- x1 : requested object size (untagged)
// -- lr : return address // -- lr : return address
// ----------------------------------- // -----------------------------------
Label runtime;
__ Allocate(x1, x0, x2, x3, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ Bind(&runtime);
__ SmiTag(x1); __ SmiTag(x1);
__ Push(x1); __ Push(x1);
__ Move(cp, Smi::FromInt(0)); __ Move(cp, Smi::FromInt(0));
@ -2776,11 +2771,6 @@ void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// -- x1 : requested object size (untagged) // -- x1 : requested object size (untagged)
// -- lr : return address // -- lr : return address
// ----------------------------------- // -----------------------------------
Label runtime;
__ Allocate(x1, x0, x2, x3, &runtime, PRETENURE);
__ Ret();
__ Bind(&runtime);
__ SmiTag(x1); __ SmiTag(x1);
__ Move(x2, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE))); __ Move(x2, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ Push(x1, x2); __ Push(x1, x2);

View File

@ -499,13 +499,6 @@ Callable CodeFactory::AllocateHeapNumber(Isolate* isolate) {
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor()); return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
} }
// static
Callable CodeFactory::AllocateMutableHeapNumber(Isolate* isolate) {
AllocateMutableHeapNumberStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
#define SIMD128_ALLOC(TYPE, Type, type, lane_count, lane_type) \ #define SIMD128_ALLOC(TYPE, Type, type, lane_count, lane_type) \
Callable CodeFactory::Allocate##Type(Isolate* isolate) { \ Callable CodeFactory::Allocate##Type(Isolate* isolate) { \
Allocate##Type##Stub stub(isolate); \ Allocate##Type##Stub stub(isolate); \

View File

@ -130,7 +130,6 @@ class CodeFactory final {
static Callable FastNewStrictArguments(Isolate* isolate); static Callable FastNewStrictArguments(Isolate* isolate);
static Callable AllocateHeapNumber(Isolate* isolate); static Callable AllocateHeapNumber(Isolate* isolate);
static Callable AllocateMutableHeapNumber(Isolate* isolate);
#define SIMD128_ALLOC(TYPE, Type, type, lane_count, lane_type) \ #define SIMD128_ALLOC(TYPE, Type, type, lane_count, lane_type) \
static Callable Allocate##Type(Isolate* isolate); static Callable Allocate##Type(Isolate* isolate);
SIMD128_TYPES(SIMD128_ALLOC) SIMD128_TYPES(SIMD128_ALLOC)

View File

@ -462,17 +462,6 @@ void AllocateHeapNumberStub::GenerateAssembly(
assembler->Return(result); assembler->Return(result);
} }
void AllocateMutableHeapNumberStub::GenerateAssembly(
CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
Node* result = assembler->Allocate(HeapNumber::kSize);
assembler->StoreMapNoWriteBarrier(
result,
assembler->HeapConstant(isolate()->factory()->mutable_heap_number_map()));
assembler->Return(result);
}
#define SIMD128_GEN_ASM(TYPE, Type, type, lane_count, lane_type) \ #define SIMD128_GEN_ASM(TYPE, Type, type, lane_count, lane_type) \
void Allocate##Type##Stub::GenerateAssembly(CodeStubAssembler* assembler) \ void Allocate##Type##Stub::GenerateAssembly(CodeStubAssembler* assembler) \
const { \ const { \
@ -4042,11 +4031,6 @@ void AllocateHeapNumberStub::InitializeDescriptor(
} }
void AllocateMutableHeapNumberStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
descriptor->Initialize();
}
#define SIMD128_INIT_DESC(TYPE, Type, type, lane_count, lane_type) \ #define SIMD128_INIT_DESC(TYPE, Type, type, lane_count, lane_type) \
void Allocate##Type##Stub::InitializeDescriptor( \ void Allocate##Type##Stub::InitializeDescriptor( \
CodeStubDescriptor* descriptor) { \ CodeStubDescriptor* descriptor) { \

View File

@ -94,7 +94,6 @@ namespace internal {
V(LoadIC) \ V(LoadIC) \
/* TurboFanCodeStubs */ \ /* TurboFanCodeStubs */ \
V(AllocateHeapNumber) \ V(AllocateHeapNumber) \
V(AllocateMutableHeapNumber) \
V(AllocateFloat32x4) \ V(AllocateFloat32x4) \
V(AllocateInt32x4) \ V(AllocateInt32x4) \
V(AllocateUint32x4) \ V(AllocateUint32x4) \
@ -2743,18 +2742,6 @@ class AllocateHeapNumberStub : public TurboFanCodeStub {
DEFINE_CODE_STUB(AllocateHeapNumber, TurboFanCodeStub); DEFINE_CODE_STUB(AllocateHeapNumber, TurboFanCodeStub);
}; };
class AllocateMutableHeapNumberStub : public TurboFanCodeStub {
public:
explicit AllocateMutableHeapNumberStub(Isolate* isolate)
: TurboFanCodeStub(isolate) {}
void InitializeDescriptor(CodeStubDescriptor* descriptor) override;
void GenerateAssembly(CodeStubAssembler* assembler) const override;
DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateMutableHeapNumber);
DEFINE_CODE_STUB(AllocateMutableHeapNumber, TurboFanCodeStub);
};
#define SIMD128_ALLOC_STUB(TYPE, Type, type, lane_count, lane_type) \ #define SIMD128_ALLOC_STUB(TYPE, Type, type, lane_count, lane_type) \
class Allocate##Type##Stub : public TurboFanCodeStub { \ class Allocate##Type##Stub : public TurboFanCodeStub { \
public: \ public: \

View File

@ -1,186 +0,0 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/change-lowering.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
namespace v8 {
namespace internal {
namespace compiler {
ChangeLowering::~ChangeLowering() {}
Reduction ChangeLowering::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kLoadField:
return ReduceLoadField(node);
case IrOpcode::kStoreField:
return ReduceStoreField(node);
case IrOpcode::kLoadElement:
return ReduceLoadElement(node);
case IrOpcode::kStoreElement:
return ReduceStoreElement(node);
case IrOpcode::kAllocate:
return ReduceAllocate(node);
default:
return NoChange();
}
UNREACHABLE();
return NoChange();
}
Reduction ChangeLowering::ReduceLoadField(Node* node) {
const FieldAccess& access = FieldAccessOf(node->op());
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
return Changed(node);
}
Reduction ChangeLowering::ReduceStoreField(Node* node) {
const FieldAccess& access = FieldAccessOf(node->op());
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
NodeProperties::ChangeOp(node, machine()->Store(StoreRepresentation(
access.machine_type.representation(),
access.write_barrier_kind)));
return Changed(node);
}
Node* ChangeLowering::ComputeIndex(const ElementAccess& access,
Node* const key) {
Node* index = key;
const int element_size_shift =
ElementSizeLog2Of(access.machine_type.representation());
if (element_size_shift) {
index = graph()->NewNode(machine()->Word32Shl(), index,
jsgraph()->Int32Constant(element_size_shift));
}
const int fixed_offset = access.header_size - access.tag();
if (fixed_offset) {
index = graph()->NewNode(machine()->Int32Add(), index,
jsgraph()->Int32Constant(fixed_offset));
}
if (machine()->Is64()) {
// TODO(turbofan): This is probably only correct for typed arrays, and only
// if the typed arrays are at most 2GiB in size, which happens to match
// exactly our current situation.
index = graph()->NewNode(machine()->ChangeUint32ToUint64(), index);
}
return index;
}
Reduction ChangeLowering::ReduceLoadElement(Node* node) {
const ElementAccess& access = ElementAccessOf(node->op());
node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
return Changed(node);
}
Reduction ChangeLowering::ReduceStoreElement(Node* node) {
const ElementAccess& access = ElementAccessOf(node->op());
node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
NodeProperties::ChangeOp(node, machine()->Store(StoreRepresentation(
access.machine_type.representation(),
access.write_barrier_kind)));
return Changed(node);
}
Reduction ChangeLowering::ReduceAllocate(Node* node) {
PretenureFlag const pretenure = OpParameter<PretenureFlag>(node->op());
Node* size = node->InputAt(0);
Node* effect = node->InputAt(1);
Node* control = node->InputAt(2);
if (machine()->Is64()) {
size = graph()->NewNode(machine()->ChangeInt32ToInt64(), size);
}
Node* top_address = jsgraph()->ExternalConstant(
pretenure == NOT_TENURED
? ExternalReference::new_space_allocation_top_address(isolate())
: ExternalReference::old_space_allocation_top_address(isolate()));
Node* limit_address = jsgraph()->ExternalConstant(
pretenure == NOT_TENURED
? ExternalReference::new_space_allocation_limit_address(isolate())
: ExternalReference::old_space_allocation_limit_address(isolate()));
Node* top = effect =
graph()->NewNode(machine()->Load(MachineType::Pointer()), top_address,
jsgraph()->IntPtrConstant(0), effect, control);
Node* limit = effect =
graph()->NewNode(machine()->Load(MachineType::Pointer()), limit_address,
jsgraph()->IntPtrConstant(0), effect, control);
Node* new_top = graph()->NewNode(machine()->IntAdd(), top, size);
Node* check = graph()->NewNode(machine()->UintLessThan(), new_top, limit);
Node* branch =
graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* etrue = effect;
Node* vtrue;
{
etrue = graph()->NewNode(
machine()->Store(StoreRepresentation(
MachineType::PointerRepresentation(), kNoWriteBarrier)),
top_address, jsgraph()->IntPtrConstant(0), new_top, etrue, if_true);
vtrue = graph()->NewNode(
machine()->BitcastWordToTagged(),
graph()->NewNode(machine()->IntAdd(), top,
jsgraph()->IntPtrConstant(kHeapObjectTag)));
}
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* efalse = effect;
Node* vfalse;
{
Node* target = pretenure == NOT_TENURED
? jsgraph()->AllocateInNewSpaceStubConstant()
: jsgraph()->AllocateInOldSpaceStubConstant();
if (!allocate_operator_.is_set()) {
CallDescriptor* descriptor =
Linkage::GetAllocateCallDescriptor(graph()->zone());
allocate_operator_.set(common()->Call(descriptor));
}
vfalse = efalse = graph()->NewNode(allocate_operator_.get(), target, size,
efalse, if_false);
}
control = graph()->NewNode(common()->Merge(2), if_true, if_false);
effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
Node* value = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
ReplaceWithValue(node, value, effect);
return Replace(value);
}
Isolate* ChangeLowering::isolate() const { return jsgraph()->isolate(); }
Graph* ChangeLowering::graph() const { return jsgraph()->graph(); }
CommonOperatorBuilder* ChangeLowering::common() const {
return jsgraph()->common();
}
MachineOperatorBuilder* ChangeLowering::machine() const {
return jsgraph()->machine();
}
} // namespace compiler
} // namespace internal
} // namespace v8

View File

@ -1,52 +0,0 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_CHANGE_LOWERING_H_
#define V8_COMPILER_CHANGE_LOWERING_H_
#include "src/compiler/graph-reducer.h"
namespace v8 {
namespace internal {
namespace compiler {
// Forward declarations.
class CommonOperatorBuilder;
struct ElementAccess;
class JSGraph;
class Linkage;
class MachineOperatorBuilder;
class Operator;
class ChangeLowering final : public AdvancedReducer {
public:
ChangeLowering(Editor* editor, JSGraph* jsgraph)
: AdvancedReducer(editor), jsgraph_(jsgraph) {}
~ChangeLowering() final;
Reduction Reduce(Node* node) final;
private:
Reduction ReduceLoadField(Node* node);
Reduction ReduceStoreField(Node* node);
Reduction ReduceLoadElement(Node* node);
Reduction ReduceStoreElement(Node* node);
Reduction ReduceAllocate(Node* node);
Node* ComputeIndex(const ElementAccess& access, Node* const key);
Graph* graph() const;
Isolate* isolate() const;
JSGraph* jsgraph() const { return jsgraph_; }
CommonOperatorBuilder* common() const;
MachineOperatorBuilder* machine() const;
JSGraph* const jsgraph_;
SetOncePointer<const Operator> allocate_operator_;
};
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_CHANGE_LOWERING_H_

View File

@ -953,9 +953,21 @@ Node* JSCreateLowering::AllocateFastLiteral(
site_context->ExitScope(current_site, boilerplate_object); site_context->ExitScope(current_site, boilerplate_object);
} else if (property_details.representation().IsDouble()) { } else if (property_details.representation().IsDouble()) {
// Allocate a mutable HeapNumber box and store the value into it. // Allocate a mutable HeapNumber box and store the value into it.
value = effect = AllocateMutableHeapNumber( effect = graph()->NewNode(common()->BeginRegion(), effect);
Handle<HeapNumber>::cast(boilerplate_value)->value(), value = effect = graph()->NewNode(
simplified()->Allocate(NOT_TENURED),
jsgraph()->Constant(HeapNumber::kSize), effect, control);
effect = graph()->NewNode(
simplified()->StoreField(AccessBuilder::ForMap()), value,
jsgraph()->HeapConstant(factory()->mutable_heap_number_map()),
effect, control); effect, control);
effect = graph()->NewNode(
simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
value, jsgraph()->Constant(
Handle<HeapNumber>::cast(boilerplate_value)->value()),
effect, control);
value = effect =
graph()->NewNode(common()->FinishRegion(), value, effect);
} else if (property_details.representation().IsSmi()) { } else if (property_details.representation().IsSmi()) {
// Ensure that value is stored as smi. // Ensure that value is stored as smi.
value = boilerplate_value->IsUninitialized() value = boilerplate_value->IsUninitialized()
@ -1076,23 +1088,6 @@ Node* JSCreateLowering::AllocateFastLiteralElements(
return builder.Finish(); return builder.Finish();
} }
Node* JSCreateLowering::AllocateMutableHeapNumber(double value, Node* effect,
Node* control) {
// TODO(turbofan): Support inline allocation of MutableHeapNumber
// (requires proper alignment on Allocate, and Begin/FinishRegion).
Callable callable = CodeFactory::AllocateMutableHeapNumber(isolate());
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), jsgraph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNoFlags, Operator::kNoThrow);
Node* result = effect = graph()->NewNode(
common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
jsgraph()->NoContextConstant(), effect, control);
effect = graph()->NewNode(
simplified()->StoreField(AccessBuilder::ForHeapNumberValue()), result,
jsgraph()->Constant(value), effect, control);
return result;
}
MaybeHandle<LiteralsArray> JSCreateLowering::GetSpecializationLiterals( MaybeHandle<LiteralsArray> JSCreateLowering::GetSpecializationLiterals(
Node* node) { Node* node) {
Node* const closure = NodeProperties::GetValueInput(node, 0); Node* const closure = NodeProperties::GetValueInput(node, 0);

View File

@ -71,7 +71,6 @@ class JSCreateLowering final : public AdvancedReducer {
Handle<JSObject> boilerplate, Handle<JSObject> boilerplate,
PretenureFlag pretenure, PretenureFlag pretenure,
AllocationSiteUsageContext* site_context); AllocationSiteUsageContext* site_context);
Node* AllocateMutableHeapNumber(double value, Node* effect, Node* control);
// Infers the LiteralsArray to use for a given {node}. // Infers the LiteralsArray to use for a given {node}.
MaybeHandle<LiteralsArray> GetSpecializationLiterals(Node* node); MaybeHandle<LiteralsArray> GetSpecializationLiterals(Node* node);

View File

@ -138,14 +138,14 @@ void JSGenericLowering::LowerJSStrictNotEqual(Node* node) {
void JSGenericLowering::LowerJSToBoolean(Node* node) { void JSGenericLowering::LowerJSToBoolean(Node* node) {
Callable callable = CodeFactory::ToBoolean(isolate()); Callable callable = CodeFactory::ToBoolean(isolate());
node->AppendInput(zone(), graph()->start()); node->AppendInput(zone(), graph()->start());
ReplaceWithStubCall(node, callable, CallDescriptor::kNoFlags, ReplaceWithStubCall(node, callable, CallDescriptor::kNoAllocate,
Operator::kEliminatable); Operator::kEliminatable);
} }
void JSGenericLowering::LowerJSTypeOf(Node* node) { void JSGenericLowering::LowerJSTypeOf(Node* node) {
Callable callable = CodeFactory::Typeof(isolate()); Callable callable = CodeFactory::Typeof(isolate());
node->AppendInput(zone(), graph()->start()); node->AppendInput(zone(), graph()->start());
ReplaceWithStubCall(node, callable, CallDescriptor::kNoFlags, ReplaceWithStubCall(node, callable, CallDescriptor::kNoAllocate,
Operator::kEliminatable); Operator::kEliminatable);
} }

View File

@ -312,19 +312,21 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
!FLAG_unbox_double_fields) { !FLAG_unbox_double_fields) {
if (access_info.HasTransitionMap()) { if (access_info.HasTransitionMap()) {
// Allocate a MutableHeapNumber for the new property. // Allocate a MutableHeapNumber for the new property.
Callable callable = this_effect =
CodeFactory::AllocateMutableHeapNumber(isolate()); graph()->NewNode(common()->BeginRegion(), this_effect);
CallDescriptor* desc = Linkage::GetStubCallDescriptor( Node* this_box = this_effect =
isolate(), jsgraph()->zone(), callable.descriptor(), 0, graph()->NewNode(simplified()->Allocate(NOT_TENURED),
CallDescriptor::kNoFlags, Operator::kNoThrow); jsgraph()->Constant(HeapNumber::kSize),
Node* this_box = this_effect = graph()->NewNode( this_effect, this_control);
common()->Call(desc), this_effect = graph()->NewNode(
jsgraph()->HeapConstant(callable.code()), simplified()->StoreField(AccessBuilder::ForMap()), this_box,
jsgraph()->NoContextConstant(), this_effect, this_control); jsgraph()->HeapConstant(factory()->mutable_heap_number_map()),
this_effect, this_control);
this_effect = graph()->NewNode( this_effect = graph()->NewNode(
simplified()->StoreField(AccessBuilder::ForHeapNumberValue()), simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
this_box, this_value, this_effect, this_control); this_box, this_value, this_effect, this_control);
this_value = this_box; this_value = this_effect = graph()->NewNode(
common()->FinishRegion(), this_box, this_effect);
field_access.type = Type::TaggedPointer(); field_access.type = Type::TaggedPointer();
} else { } else {

View File

@ -162,7 +162,9 @@ class CallDescriptor final : public ZoneObject {
kRestoreJSSP = 1u << 6, kRestoreJSSP = 1u << 6,
kRestoreCSP = 1u << 7, kRestoreCSP = 1u << 7,
// Causes the code generator to initialize the root register. // Causes the code generator to initialize the root register.
kInitializeRootRegister = 1u << 8 kInitializeRootRegister = 1u << 8,
// Does not ever try to allocate space on our heap.
kNoAllocate = 1u << 9
}; };
typedef base::Flags<Flag> Flags; typedef base::Flags<Flag> Flags;

View File

@ -0,0 +1,494 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/memory-optimizer.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/compiler/simplified-operator.h"
namespace v8 {
namespace internal {
namespace compiler {
MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone)
: jsgraph_(jsgraph),
empty_state_(AllocationState::Empty(zone)),
pending_(zone),
tokens_(zone),
zone_(zone) {}
void MemoryOptimizer::Optimize() {
EnqueueUses(graph()->start(), empty_state());
while (!tokens_.empty()) {
Token const token = tokens_.front();
tokens_.pop();
VisitNode(token.node, token.state);
}
DCHECK(pending_.empty());
DCHECK(tokens_.empty());
}
MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node,
PretenureFlag pretenure,
Zone* zone)
: node_ids_(zone), pretenure_(pretenure), size_(nullptr) {
node_ids_.insert(node->id());
}
MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node,
PretenureFlag pretenure,
Node* size, Zone* zone)
: node_ids_(zone), pretenure_(pretenure), size_(size) {
node_ids_.insert(node->id());
}
void MemoryOptimizer::AllocationGroup::Add(Node* node) {
node_ids_.insert(node->id());
}
bool MemoryOptimizer::AllocationGroup::Contains(Node* node) const {
return node_ids_.find(node->id()) != node_ids_.end();
}
MemoryOptimizer::AllocationState::AllocationState()
: group_(nullptr), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group)
: group_(group), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group,
int size, Node* top)
: group_(group), size_(size), top_(top) {}
bool MemoryOptimizer::AllocationState::IsNewSpaceAllocation() const {
return group() && group()->IsNewSpaceAllocation();
}
void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
DCHECK(!node->IsDead());
DCHECK_LT(0, node->op()->EffectInputCount());
switch (node->opcode()) {
case IrOpcode::kAllocate:
return VisitAllocate(node, state);
case IrOpcode::kCall:
return VisitCall(node, state);
case IrOpcode::kLoadElement:
return VisitLoadElement(node, state);
case IrOpcode::kLoadField:
return VisitLoadField(node, state);
case IrOpcode::kStoreElement:
return VisitStoreElement(node, state);
case IrOpcode::kStoreField:
return VisitStoreField(node, state);
case IrOpcode::kCheckedLoad:
case IrOpcode::kCheckedStore:
case IrOpcode::kIfException:
case IrOpcode::kLoad:
case IrOpcode::kStore:
return VisitOtherEffect(node, state);
default:
break;
}
DCHECK_EQ(0, node->op()->EffectOutputCount());
}
void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) {
DCHECK_EQ(IrOpcode::kAllocate, node->opcode());
Node* value;
Node* size = node->InputAt(0);
Node* effect = node->InputAt(1);
Node* control = node->InputAt(2);
PretenureFlag pretenure = OpParameter<PretenureFlag>(node->op());
// Determine the top/limit addresses.
Node* top_address = jsgraph()->ExternalConstant(
pretenure == NOT_TENURED
? ExternalReference::new_space_allocation_top_address(isolate())
: ExternalReference::old_space_allocation_top_address(isolate()));
Node* limit_address = jsgraph()->ExternalConstant(
pretenure == NOT_TENURED
? ExternalReference::new_space_allocation_limit_address(isolate())
: ExternalReference::old_space_allocation_limit_address(isolate()));
// Check if we can fold this allocation into a previous allocation represented
// by the incoming {state}.
Int32Matcher m(size);
if (m.HasValue() && m.Value() < Page::kMaxRegularHeapObjectSize) {
int32_t const object_size = m.Value();
if (state->size() <= Page::kMaxRegularHeapObjectSize - object_size &&
state->group()->pretenure() == pretenure) {
// We can fold this Allocate {node} into the allocation {group}
// represented by the given {state}. Compute the upper bound for
// the new {state}.
int32_t const state_size = state->size() + object_size;
// Update the reservation check to the actual maximum upper bound.
AllocationGroup* const group = state->group();
if (OpParameter<int32_t>(group->size()) < state_size) {
NodeProperties::ChangeOp(group->size(),
common()->Int32Constant(state_size));
}
// Update the allocation top with the new object allocation.
// TODO(bmeurer): Defer writing back top as much as possible.
Node* top = graph()->NewNode(machine()->IntAdd(), state->top(),
jsgraph()->IntPtrConstant(object_size));
effect = graph()->NewNode(
machine()->Store(StoreRepresentation(
MachineType::PointerRepresentation(), kNoWriteBarrier)),
top_address, jsgraph()->IntPtrConstant(0), top, effect, control);
// Compute the effective inner allocated address.
value = graph()->NewNode(
machine()->BitcastWordToTagged(),
graph()->NewNode(machine()->IntAdd(), state->top(),
jsgraph()->IntPtrConstant(kHeapObjectTag)));
// Extend the allocation {group}.
group->Add(value);
state = AllocationState::Open(group, state_size, top, zone());
} else {
// Setup a mutable reservation size node; will be patched as we fold
// additional allocations into this new group.
Node* size = graph()->NewNode(common()->Int32Constant(object_size));
// Load allocation top and limit.
Node* top = effect =
graph()->NewNode(machine()->Load(MachineType::Pointer()), top_address,
jsgraph()->IntPtrConstant(0), effect, control);
Node* limit = effect = graph()->NewNode(
machine()->Load(MachineType::Pointer()), limit_address,
jsgraph()->IntPtrConstant(0), effect, control);
// Check if we need to collect garbage before we can start bump pointer
// allocation (always done for folded allocations).
Node* check = graph()->NewNode(
machine()->UintLessThan(),
graph()->NewNode(
machine()->IntAdd(), top,
machine()->Is64()
? graph()->NewNode(machine()->ChangeInt32ToInt64(), size)
: size),
limit);
Node* branch =
graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* etrue = effect;
Node* vtrue = top;
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* efalse = effect;
Node* vfalse;
{
Node* target = pretenure == NOT_TENURED
? jsgraph()->AllocateInNewSpaceStubConstant()
: jsgraph()->AllocateInOldSpaceStubConstant();
if (!allocate_operator_.is_set()) {
CallDescriptor* descriptor =
Linkage::GetAllocateCallDescriptor(graph()->zone());
allocate_operator_.set(common()->Call(descriptor));
}
vfalse = efalse = graph()->NewNode(allocate_operator_.get(), target,
size, efalse, if_false);
vfalse = graph()->NewNode(machine()->IntSub(), vfalse,
jsgraph()->IntPtrConstant(kHeapObjectTag));
}
control = graph()->NewNode(common()->Merge(2), if_true, if_false);
effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
value = graph()->NewNode(
common()->Phi(MachineType::PointerRepresentation(), 2), vtrue, vfalse,
control);
// Compute the new top and write it back.
top = graph()->NewNode(machine()->IntAdd(), value,
jsgraph()->IntPtrConstant(object_size));
effect = graph()->NewNode(
machine()->Store(StoreRepresentation(
MachineType::PointerRepresentation(), kNoWriteBarrier)),
top_address, jsgraph()->IntPtrConstant(0), top, effect, control);
// Compute the initial object address.
value = graph()->NewNode(
machine()->BitcastWordToTagged(),
graph()->NewNode(machine()->IntAdd(), value,
jsgraph()->IntPtrConstant(kHeapObjectTag)));
// Start a new allocation group.
AllocationGroup* group =
new (zone()) AllocationGroup(value, pretenure, size, zone());
state = AllocationState::Open(group, object_size, top, zone());
}
} else {
// Load allocation top and limit.
Node* top = effect =
graph()->NewNode(machine()->Load(MachineType::Pointer()), top_address,
jsgraph()->IntPtrConstant(0), effect, control);
Node* limit = effect =
graph()->NewNode(machine()->Load(MachineType::Pointer()), limit_address,
jsgraph()->IntPtrConstant(0), effect, control);
// Compute the new top.
Node* new_top = graph()->NewNode(
machine()->IntAdd(), top,
machine()->Is64()
? graph()->NewNode(machine()->ChangeInt32ToInt64(), size)
: size);
// Check if we can do bump pointer allocation here.
Node* check = graph()->NewNode(machine()->UintLessThan(), new_top, limit);
Node* branch =
graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* etrue = effect;
Node* vtrue;
{
etrue = graph()->NewNode(
machine()->Store(StoreRepresentation(
MachineType::PointerRepresentation(), kNoWriteBarrier)),
top_address, jsgraph()->IntPtrConstant(0), new_top, etrue, if_true);
vtrue = graph()->NewNode(
machine()->BitcastWordToTagged(),
graph()->NewNode(machine()->IntAdd(), top,
jsgraph()->IntPtrConstant(kHeapObjectTag)));
}
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* efalse = effect;
Node* vfalse;
{
Node* target = pretenure == NOT_TENURED
? jsgraph()->AllocateInNewSpaceStubConstant()
: jsgraph()->AllocateInOldSpaceStubConstant();
if (!allocate_operator_.is_set()) {
CallDescriptor* descriptor =
Linkage::GetAllocateCallDescriptor(graph()->zone());
allocate_operator_.set(common()->Call(descriptor));
}
vfalse = efalse = graph()->NewNode(allocate_operator_.get(), target, size,
efalse, if_false);
}
control = graph()->NewNode(common()->Merge(2), if_true, if_false);
effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
vtrue, vfalse, control);
// Create an unfoldable allocation group.
AllocationGroup* group =
new (zone()) AllocationGroup(value, pretenure, zone());
state = AllocationState::Closed(group, zone());
}
// Replace all effect uses of {node} with the {effect}, enqueue the
// effect uses for further processing, and replace all value uses of
// {node} with the {value}.
for (Edge edge : node->use_edges()) {
if (NodeProperties::IsEffectEdge(edge)) {
EnqueueUse(edge.from(), edge.index(), state);
edge.UpdateTo(effect);
} else {
DCHECK(NodeProperties::IsValueEdge(edge));
edge.UpdateTo(value);
}
}
// Kill the {node} to make sure we don't leave dangling dead uses.
node->Kill();
}
void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) {
DCHECK_EQ(IrOpcode::kCall, node->opcode());
// If the call can allocate, we start with a fresh state.
if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) {
state = empty_state();
}
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitLoadElement(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kLoadElement, node->opcode());
ElementAccess const& access = ElementAccessOf(node->op());
Node* index = node->InputAt(1);
node->ReplaceInput(1, ComputeIndex(access, index));
NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) {
DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
FieldAccess const& access = FieldAccessOf(node->op());
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitStoreElement(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStoreElement, node->opcode());
ElementAccess const& access = ElementAccessOf(node->op());
Node* object = node->InputAt(0);
Node* index = node->InputAt(1);
WriteBarrierKind write_barrier_kind =
ComputeWriteBarrierKind(object, state, access.write_barrier_kind);
node->ReplaceInput(1, ComputeIndex(access, index));
NodeProperties::ChangeOp(
node, machine()->Store(StoreRepresentation(
access.machine_type.representation(), write_barrier_kind)));
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitStoreField(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStoreField, node->opcode());
FieldAccess const& access = FieldAccessOf(node->op());
Node* object = node->InputAt(0);
WriteBarrierKind write_barrier_kind =
ComputeWriteBarrierKind(object, state, access.write_barrier_kind);
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
NodeProperties::ChangeOp(
node, machine()->Store(StoreRepresentation(
access.machine_type.representation(), write_barrier_kind)));
EnqueueUses(node, state);
}
void MemoryOptimizer::VisitOtherEffect(Node* node,
AllocationState const* state) {
EnqueueUses(node, state);
}
Node* MemoryOptimizer::ComputeIndex(ElementAccess const& access, Node* key) {
Node* index = key;
int element_size_shift =
ElementSizeLog2Of(access.machine_type.representation());
if (element_size_shift) {
index = graph()->NewNode(machine()->Word32Shl(), index,
jsgraph()->Int32Constant(element_size_shift));
}
const int fixed_offset = access.header_size - access.tag();
if (fixed_offset) {
index = graph()->NewNode(machine()->Int32Add(), index,
jsgraph()->Int32Constant(fixed_offset));
}
if (machine()->Is64()) {
// TODO(turbofan): This is probably only correct for typed arrays, and only
// if the typed arrays are at most 2GiB in size, which happens to match
// exactly our current situation.
index = graph()->NewNode(machine()->ChangeUint32ToUint64(), index);
}
return index;
}
WriteBarrierKind MemoryOptimizer::ComputeWriteBarrierKind(
Node* object, AllocationState const* state,
WriteBarrierKind write_barrier_kind) {
if (state->IsNewSpaceAllocation() && state->group()->Contains(object)) {
write_barrier_kind = kNoWriteBarrier;
}
return write_barrier_kind;
}
MemoryOptimizer::AllocationState const* MemoryOptimizer::MergeStates(
AllocationStates const& states) {
// Check if all states are the same; or at least if all allocation
// states belong to the same allocation group.
AllocationState const* state = states.front();
AllocationGroup* group = state->group();
for (size_t i = 1; i < states.size(); ++i) {
if (states[i] != state) state = nullptr;
if (states[i]->group() != group) group = nullptr;
}
if (state == nullptr) {
if (group != nullptr) {
// We cannot fold any more allocations into this group, but we can still
// eliminate write barriers on stores to this group.
// TODO(bmeurer): We could potentially just create a Phi here to merge
// the various tops; but we need to pay special attention not to create
// an unschedulable graph.
state = AllocationState::Closed(group, zone());
} else {
// The states are from different allocation groups.
state = empty_state();
}
}
return state;
}
void MemoryOptimizer::EnqueueMerge(Node* node, int index,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
int const input_count = node->InputCount() - 1;
DCHECK_LT(0, input_count);
Node* const control = node->InputAt(input_count);
if (control->opcode() == IrOpcode::kLoop) {
// For loops we always start with an empty state at the beginning.
if (index == 0) EnqueueUses(node, empty_state());
} else {
DCHECK_EQ(IrOpcode::kMerge, control->opcode());
// Check if we already know about this pending merge.
NodeId const id = node->id();
auto it = pending_.find(id);
if (it == pending_.end()) {
// Insert a new pending merge.
it = pending_.insert(std::make_pair(id, AllocationStates(zone()))).first;
}
// Add the next input state.
it->second.push_back(state);
// Check if states for all inputs are available by now.
if (it->second.size() == static_cast<size_t>(input_count)) {
// All inputs to this effect merge are done, merge the states given all
// input constraints, drop the pending merge and enqueue uses of the
// EffectPhi {node}.
state = MergeStates(it->second);
EnqueueUses(node, state);
pending_.erase(it);
}
}
}
void MemoryOptimizer::EnqueueUses(Node* node, AllocationState const* state) {
for (Edge const edge : node->use_edges()) {
if (NodeProperties::IsEffectEdge(edge)) {
EnqueueUse(edge.from(), edge.index(), state);
}
}
}
void MemoryOptimizer::EnqueueUse(Node* node, int index,
AllocationState const* state) {
if (node->opcode() == IrOpcode::kEffectPhi) {
// An EffectPhi represents a merge of different effect chains, which
// needs special handling depending on whether the merge is part of a
// loop or just a normal control join.
EnqueueMerge(node, index, state);
} else {
Token token = {node, state};
tokens_.push(token);
}
}
Graph* MemoryOptimizer::graph() const { return jsgraph()->graph(); }
Isolate* MemoryOptimizer::isolate() const { return jsgraph()->isolate(); }
CommonOperatorBuilder* MemoryOptimizer::common() const {
return jsgraph()->common();
}
MachineOperatorBuilder* MemoryOptimizer::machine() const {
return jsgraph()->machine();
}
} // namespace compiler
} // namespace internal
} // namespace v8

View File

@ -0,0 +1,149 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_MEMORY_OPTIMIZER_H_
#define V8_COMPILER_MEMORY_OPTIMIZER_H_
#include "src/zone-containers.h"
namespace v8 {
namespace internal {
namespace compiler {
// Forward declarations.
class CommonOperatorBuilder;
struct ElementAccess;
class Graph;
class JSGraph;
class MachineOperatorBuilder;
class Node;
class Operator;
// NodeIds are identifying numbers for nodes that can be used to index auxiliary
// out-of-line data associated with each node.
typedef uint32_t NodeId;
// Lowers all simplified memory access and allocation related nodes (i.e.
// Allocate, LoadField, StoreField and friends) to machine operators.
// Performs allocation folding and store write barrier elimination
// implicitly.
class MemoryOptimizer final {
public:
MemoryOptimizer(JSGraph* jsgraph, Zone* zone);
~MemoryOptimizer() {}
void Optimize();
private:
// An allocation group represents a set of allocations that have been folded
// together.
class AllocationGroup final : public ZoneObject {
public:
AllocationGroup(Node* node, PretenureFlag pretenure, Zone* zone);
AllocationGroup(Node* node, PretenureFlag pretenure, Node* size,
Zone* zone);
~AllocationGroup() {}
void Add(Node* object);
bool Contains(Node* object) const;
bool IsNewSpaceAllocation() const { return pretenure() == NOT_TENURED; }
PretenureFlag pretenure() const { return pretenure_; }
Node* size() const { return size_; }
private:
ZoneSet<NodeId> node_ids_;
PretenureFlag const pretenure_;
Node* const size_;
DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationGroup);
};
// An allocation state is propagated on the effect paths through the graph.
class AllocationState final : public ZoneObject {
public:
static AllocationState const* Empty(Zone* zone) {
return new (zone) AllocationState();
}
static AllocationState const* Closed(AllocationGroup* group, Zone* zone) {
return new (zone) AllocationState(group);
}
static AllocationState const* Open(AllocationGroup* group, int size,
Node* top, Zone* zone) {
return new (zone) AllocationState(group, size, top);
}
bool IsNewSpaceAllocation() const;
AllocationGroup* group() const { return group_; }
Node* top() const { return top_; }
int size() const { return size_; }
private:
AllocationState();
explicit AllocationState(AllocationGroup* group);
AllocationState(AllocationGroup* group, int size, Node* top);
AllocationGroup* const group_;
// The upper bound of the combined allocated object size on the current path
// (max int if allocation folding is impossible on this path).
int const size_;
Node* const top_;
DISALLOW_COPY_AND_ASSIGN(AllocationState);
};
// An array of allocation states used to collect states on merges.
typedef ZoneVector<AllocationState const*> AllocationStates;
// We thread through tokens to represent the current state on a given effect
// path through the graph.
struct Token {
Node* node;
AllocationState const* state;
};
void VisitNode(Node*, AllocationState const*);
void VisitAllocate(Node*, AllocationState const*);
void VisitCall(Node*, AllocationState const*);
void VisitLoadElement(Node*, AllocationState const*);
void VisitLoadField(Node*, AllocationState const*);
void VisitStoreElement(Node*, AllocationState const*);
void VisitStoreField(Node*, AllocationState const*);
void VisitOtherEffect(Node*, AllocationState const*);
Node* ComputeIndex(ElementAccess const&, Node*);
WriteBarrierKind ComputeWriteBarrierKind(Node* object,
AllocationState const* state,
WriteBarrierKind);
AllocationState const* MergeStates(AllocationStates const& states);
void EnqueueMerge(Node*, int, AllocationState const*);
void EnqueueUses(Node*, AllocationState const*);
void EnqueueUse(Node*, int, AllocationState const*);
AllocationState const* empty_state() const { return empty_state_; }
Graph* graph() const;
Isolate* isolate() const;
JSGraph* jsgraph() const { return jsgraph_; }
CommonOperatorBuilder* common() const;
MachineOperatorBuilder* machine() const;
Zone* zone() const { return zone_; }
SetOncePointer<const Operator> allocate_operator_;
JSGraph* const jsgraph_;
AllocationState const* const empty_state_;
ZoneMap<NodeId, AllocationStates> pending_;
ZoneQueue<Token> tokens_;
Zone* const zone_;
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryOptimizer);
};
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_MEMORY_OPTIMIZER_H_

View File

@ -14,7 +14,6 @@
#include "src/compiler/basic-block-instrumentor.h" #include "src/compiler/basic-block-instrumentor.h"
#include "src/compiler/branch-elimination.h" #include "src/compiler/branch-elimination.h"
#include "src/compiler/bytecode-graph-builder.h" #include "src/compiler/bytecode-graph-builder.h"
#include "src/compiler/change-lowering.h"
#include "src/compiler/code-generator.h" #include "src/compiler/code-generator.h"
#include "src/compiler/common-operator-reducer.h" #include "src/compiler/common-operator-reducer.h"
#include "src/compiler/control-flow-optimizer.h" #include "src/compiler/control-flow-optimizer.h"
@ -46,6 +45,7 @@
#include "src/compiler/loop-analysis.h" #include "src/compiler/loop-analysis.h"
#include "src/compiler/loop-peeling.h" #include "src/compiler/loop-peeling.h"
#include "src/compiler/machine-operator-reducer.h" #include "src/compiler/machine-operator-reducer.h"
#include "src/compiler/memory-optimizer.h"
#include "src/compiler/move-optimizer.h" #include "src/compiler/move-optimizer.h"
#include "src/compiler/osr.h" #include "src/compiler/osr.h"
#include "src/compiler/pipeline-statistics.h" #include "src/compiler/pipeline-statistics.h"
@ -1019,6 +1019,15 @@ struct EffectControlLinearizationPhase {
} }
}; };
struct MemoryOptimizationPhase {
static const char* phase_name() { return "memory optimization"; }
void Run(PipelineData* data, Zone* temp_zone) {
MemoryOptimizer optimizer(data->jsgraph(), temp_zone);
optimizer.Optimize();
}
};
struct LateOptimizationPhase { struct LateOptimizationPhase {
static const char* phase_name() { return "late optimization"; } static const char* phase_name() { return "late optimization"; }
@ -1027,7 +1036,6 @@ struct LateOptimizationPhase {
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(), DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common()); data->common());
ValueNumberingReducer value_numbering(temp_zone); ValueNumberingReducer value_numbering(temp_zone);
ChangeLowering lowering(&graph_reducer, data->jsgraph());
MachineOperatorReducer machine_reducer(data->jsgraph()); MachineOperatorReducer machine_reducer(data->jsgraph());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(), CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine()); data->common(), data->machine());
@ -1036,7 +1044,6 @@ struct LateOptimizationPhase {
TailCallOptimization tco(data->common(), data->graph()); TailCallOptimization tco(data->common(), data->graph());
AddReducer(data, &graph_reducer, &dead_code_elimination); AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &value_numbering); AddReducer(data, &graph_reducer, &value_numbering);
AddReducer(data, &graph_reducer, &lowering);
AddReducer(data, &graph_reducer, &machine_reducer); AddReducer(data, &graph_reducer, &machine_reducer);
AddReducer(data, &graph_reducer, &common_reducer); AddReducer(data, &graph_reducer, &common_reducer);
AddReducer(data, &graph_reducer, &select_lowering); AddReducer(data, &graph_reducer, &select_lowering);
@ -1458,6 +1465,11 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
RunPrintAndVerify("Control flow optimized", true); RunPrintAndVerify("Control flow optimized", true);
} }
// Optimize memory access and allocation operations.
Run<MemoryOptimizationPhase>();
// TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
RunPrintAndVerify("Memory optimized", true);
// Lower changes that have been inserted before. // Lower changes that have been inserted before.
Run<LateOptimizationPhase>(); Run<LateOptimizationPhase>();
// TODO(jarin, rossberg): Remove UNTYPED once machine typing works. // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.

View File

@ -2473,7 +2473,8 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// Keep the linear allocation area empty if requested to do so, just // Keep the linear allocation area empty if requested to do so, just
// return area back to the free list instead. // return area back to the free list instead.
owner_->Free(new_node->address() + size_in_bytes, bytes_left); owner_->Free(new_node->address() + size_in_bytes, bytes_left);
DCHECK(owner_->top() == NULL && owner_->limit() == NULL); owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
new_node->address() + size_in_bytes);
} else if (bytes_left > kThreshold && } else if (bytes_left > kThreshold &&
owner_->heap()->incremental_marking()->IsMarkingIncomplete() && owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
FLAG_incremental_marking) { FLAG_incremental_marking) {
@ -2485,7 +2486,7 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
new_node_size - size_in_bytes - linear_size); new_node_size - size_in_bytes - linear_size);
owner_->SetTopAndLimit(new_node->address() + size_in_bytes, owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
new_node->address() + size_in_bytes + linear_size); new_node->address() + size_in_bytes + linear_size);
} else if (bytes_left > 0) { } else if (bytes_left >= 0) {
// Normally we give the rest of the node to the allocator as its new // Normally we give the rest of the node to the allocator as its new
// linear allocation area. // linear allocation area.
owner_->SetTopAndLimit(new_node->address() + size_in_bytes, owner_->SetTopAndLimit(new_node->address() + size_in_bytes,

View File

@ -2624,11 +2624,6 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// -- edx : requested object size (untagged) // -- edx : requested object size (untagged)
// -- esp[0] : return address // -- esp[0] : return address
// ----------------------------------- // -----------------------------------
Label runtime;
__ Allocate(edx, eax, ecx, edi, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ bind(&runtime);
__ SmiTag(edx); __ SmiTag(edx);
__ PopReturnAddressTo(ecx); __ PopReturnAddressTo(ecx);
__ Push(edx); __ Push(edx);
@ -2643,11 +2638,6 @@ void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// -- edx : requested object size (untagged) // -- edx : requested object size (untagged)
// -- esp[0] : return address // -- esp[0] : return address
// ----------------------------------- // -----------------------------------
Label runtime;
__ Allocate(edx, eax, ecx, edi, &runtime, PRETENURE);
__ Ret();
__ bind(&runtime);
__ SmiTag(edx); __ SmiTag(edx);
__ PopReturnAddressTo(ecx); __ PopReturnAddressTo(ecx);
__ Push(edx); __ Push(edx);

View File

@ -75,12 +75,6 @@ const char* CallInterfaceDescriptor::DebugName(Isolate* isolate) const {
} }
void AllocateMutableHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
void VoidDescriptor::InitializePlatformSpecific( void VoidDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
data->InitializePlatformSpecific(0, nullptr); data->InitializePlatformSpecific(0, nullptr);

View File

@ -47,7 +47,6 @@ class PlatformInterfaceDescriptor;
V(RegExpConstructResult) \ V(RegExpConstructResult) \
V(TransitionElementsKind) \ V(TransitionElementsKind) \
V(AllocateHeapNumber) \ V(AllocateHeapNumber) \
V(AllocateMutableHeapNumber) \
V(AllocateFloat32x4) \ V(AllocateFloat32x4) \
V(AllocateInt32x4) \ V(AllocateInt32x4) \
V(AllocateUint32x4) \ V(AllocateUint32x4) \
@ -582,12 +581,6 @@ class AllocateHeapNumberDescriptor : public CallInterfaceDescriptor {
SIMD128_TYPES(SIMD128_ALLOC_DESC) SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC #undef SIMD128_ALLOC_DESC
class AllocateMutableHeapNumberDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(AllocateMutableHeapNumberDescriptor,
CallInterfaceDescriptor)
};
class ArrayNoArgumentConstructorDescriptor : public CallInterfaceDescriptor { class ArrayNoArgumentConstructorDescriptor : public CallInterfaceDescriptor {
public: public:
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE( DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(

View File

@ -2742,11 +2742,6 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// -- a0 : requested object size (untagged) // -- a0 : requested object size (untagged)
// -- ra : return address // -- ra : return address
// ----------------------------------- // -----------------------------------
Label runtime;
__ Allocate(a0, v0, a1, a2, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ bind(&runtime);
__ SmiTag(a0); __ SmiTag(a0);
__ Push(a0); __ Push(a0);
__ Move(cp, Smi::FromInt(0)); __ Move(cp, Smi::FromInt(0));
@ -2759,11 +2754,6 @@ void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// -- a0 : requested object size (untagged) // -- a0 : requested object size (untagged)
// -- ra : return address // -- ra : return address
// ----------------------------------- // -----------------------------------
Label runtime;
__ Allocate(a0, v0, a1, a2, &runtime, PRETENURE);
__ Ret();
__ bind(&runtime);
__ SmiTag(a0); __ SmiTag(a0);
__ Move(a1, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE))); __ Move(a1, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ Push(a0, a1); __ Push(a0, a1);

View File

@ -2730,11 +2730,6 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// -- a0 : requested object size (untagged) // -- a0 : requested object size (untagged)
// -- ra : return address // -- ra : return address
// ----------------------------------- // -----------------------------------
Label runtime;
__ Allocate(a0, v0, a1, a2, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ bind(&runtime);
__ SmiTag(a0); __ SmiTag(a0);
__ Push(a0); __ Push(a0);
__ Move(cp, Smi::FromInt(0)); __ Move(cp, Smi::FromInt(0));
@ -2747,11 +2742,6 @@ void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// -- a0 : requested object size (untagged) // -- a0 : requested object size (untagged)
// -- ra : return address // -- ra : return address
// ----------------------------------- // -----------------------------------
Label runtime;
__ Allocate(a0, v0, a1, a2, &runtime, PRETENURE);
__ Ret();
__ bind(&runtime);
__ SmiTag(a0); __ SmiTag(a0);
__ Move(a1, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE))); __ Move(a1, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ Push(a0, a1); __ Push(a0, a1);

View File

@ -2740,11 +2740,6 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// -- r4 : requested object size (untagged) // -- r4 : requested object size (untagged)
// -- lr : return address // -- lr : return address
// ----------------------------------- // -----------------------------------
Label runtime;
__ Allocate(r4, r3, r5, r6, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ bind(&runtime);
__ SmiTag(r4); __ SmiTag(r4);
__ Push(r4); __ Push(r4);
__ LoadSmiLiteral(cp, Smi::FromInt(0)); __ LoadSmiLiteral(cp, Smi::FromInt(0));
@ -2757,11 +2752,6 @@ void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// -- r4 : requested object size (untagged) // -- r4 : requested object size (untagged)
// -- lr : return address // -- lr : return address
// ----------------------------------- // -----------------------------------
Label runtime;
__ Allocate(r4, r3, r5, r6, &runtime, PRETENURE);
__ Ret();
__ bind(&runtime);
__ SmiTag(r4); __ SmiTag(r4);
__ LoadSmiLiteral(r5, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE))); __ LoadSmiLiteral(r5, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ Push(r4, r5); __ Push(r4, r5);

View File

@ -2697,11 +2697,6 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// -- r3 : requested object size (untagged) // -- r3 : requested object size (untagged)
// -- lr : return address // -- lr : return address
// ----------------------------------- // -----------------------------------
Label runtime;
__ Allocate(r3, r2, r4, r5, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ bind(&runtime);
__ SmiTag(r3); __ SmiTag(r3);
__ Push(r3); __ Push(r3);
__ LoadSmiLiteral(cp, Smi::FromInt(0)); __ LoadSmiLiteral(cp, Smi::FromInt(0));
@ -2714,11 +2709,6 @@ void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// -- r3 : requested object size (untagged) // -- r3 : requested object size (untagged)
// -- lr : return address // -- lr : return address
// ----------------------------------- // -----------------------------------
Label runtime;
__ Allocate(r3, r2, r4, r5, &runtime, PRETENURE);
__ Ret();
__ bind(&runtime);
__ SmiTag(r3); __ SmiTag(r3);
__ LoadSmiLiteral(r4, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE))); __ LoadSmiLiteral(r4, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ Push(r3, r4); __ Push(r3, r4);

View File

@ -502,8 +502,6 @@
'compiler/bytecode-branch-analysis.h', 'compiler/bytecode-branch-analysis.h',
'compiler/bytecode-graph-builder.cc', 'compiler/bytecode-graph-builder.cc',
'compiler/bytecode-graph-builder.h', 'compiler/bytecode-graph-builder.h',
'compiler/change-lowering.cc',
'compiler/change-lowering.h',
'compiler/c-linkage.cc', 'compiler/c-linkage.cc',
'compiler/coalesced-live-ranges.cc', 'compiler/coalesced-live-ranges.cc',
'compiler/coalesced-live-ranges.h', 'compiler/coalesced-live-ranges.h',
@ -609,6 +607,8 @@
'compiler/machine-operator-reducer.h', 'compiler/machine-operator-reducer.h',
'compiler/machine-operator.cc', 'compiler/machine-operator.cc',
'compiler/machine-operator.h', 'compiler/machine-operator.h',
'compiler/memory-optimizer.cc',
'compiler/memory-optimizer.h',
'compiler/move-optimizer.cc', 'compiler/move-optimizer.cc',
'compiler/move-optimizer.h', 'compiler/move-optimizer.h',
'compiler/node-aux-data.h', 'compiler/node-aux-data.h',

View File

@ -2048,11 +2048,6 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// -- rdx : requested object size (untagged) // -- rdx : requested object size (untagged)
// -- rsp[0] : return address // -- rsp[0] : return address
// ----------------------------------- // -----------------------------------
Label runtime;
__ Allocate(rdx, rax, rcx, rdi, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ bind(&runtime);
__ Integer32ToSmi(rdx, rdx); __ Integer32ToSmi(rdx, rdx);
__ PopReturnAddressTo(rcx); __ PopReturnAddressTo(rcx);
__ Push(rdx); __ Push(rdx);
@ -2067,11 +2062,6 @@ void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// -- rdx : requested object size (untagged) // -- rdx : requested object size (untagged)
// -- rsp[0] : return address // -- rsp[0] : return address
// ----------------------------------- // -----------------------------------
Label runtime;
__ Allocate(rdx, rax, rcx, rdi, &runtime, PRETENURE);
__ Ret();
__ bind(&runtime);
__ Integer32ToSmi(rdx, rdx); __ Integer32ToSmi(rdx, rdx);
__ PopReturnAddressTo(rcx); __ PopReturnAddressTo(rcx);
__ Push(rdx); __ Push(rdx);

View File

@ -2654,11 +2654,6 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// -- edx : requested object size (untagged) // -- edx : requested object size (untagged)
// -- esp[0] : return address // -- esp[0] : return address
// ----------------------------------- // -----------------------------------
Label runtime;
__ Allocate(edx, eax, ecx, edi, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ bind(&runtime);
__ SmiTag(edx); __ SmiTag(edx);
__ PopReturnAddressTo(ecx); __ PopReturnAddressTo(ecx);
__ Push(edx); __ Push(edx);
@ -2673,11 +2668,6 @@ void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// -- edx : requested object size (untagged) // -- edx : requested object size (untagged)
// -- esp[0] : return address // -- esp[0] : return address
// ----------------------------------- // -----------------------------------
Label runtime;
__ Allocate(edx, eax, ecx, edi, &runtime, PRETENURE);
__ Ret();
__ bind(&runtime);
__ SmiTag(edx); __ SmiTag(edx);
__ PopReturnAddressTo(ecx); __ PopReturnAddressTo(ecx);
__ Push(edx); __ Push(edx);

View File

@ -6,11 +6,10 @@
#include "src/ast/scopes.h" #include "src/ast/scopes.h"
#include "src/compiler/access-builder.h" #include "src/compiler/access-builder.h"
#include "src/compiler/change-lowering.h"
#include "src/compiler/control-builders.h" #include "src/compiler/control-builders.h"
#include "src/compiler/effect-control-linearizer.h" #include "src/compiler/effect-control-linearizer.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/graph-visualizer.h" #include "src/compiler/graph-visualizer.h"
#include "src/compiler/memory-optimizer.h"
#include "src/compiler/node-properties.h" #include "src/compiler/node-properties.h"
#include "src/compiler/pipeline.h" #include "src/compiler/pipeline.h"
#include "src/compiler/representation-change.h" #include "src/compiler/representation-change.h"
@ -67,11 +66,8 @@ class SimplifiedLoweringTester : public GraphBuilderTester<ReturnType> {
EffectControlLinearizer linearizer(&jsgraph, schedule, this->zone()); EffectControlLinearizer linearizer(&jsgraph, schedule, this->zone());
linearizer.Run(); linearizer.Run();
GraphReducer reducer(this->zone(), this->graph()); MemoryOptimizer memory_optimizer(&jsgraph, this->zone());
ChangeLowering lowering(&reducer, &jsgraph); memory_optimizer.Optimize();
reducer.AddReducer(&lowering);
reducer.ReduceGraph();
Verifier::Run(this->graph());
} }
void CheckNumberCall(double expected, double input) { void CheckNumberCall(double expected, double input) {
@ -753,11 +749,8 @@ class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
EffectControlLinearizer linearizer(&jsgraph, schedule, this->zone()); EffectControlLinearizer linearizer(&jsgraph, schedule, this->zone());
linearizer.Run(); linearizer.Run();
GraphReducer reducer(this->zone(), this->graph()); MemoryOptimizer memory_optimizer(&jsgraph, this->zone());
ChangeLowering lowering(&reducer, &jsgraph); memory_optimizer.Optimize();
reducer.AddReducer(&lowering);
reducer.ReduceGraph();
Verifier::Run(this->graph());
} }
// Inserts the node as the return value of the graph. // Inserts the node as the return value of the graph.

View File

@ -1,276 +0,0 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/code-stubs.h"
#include "src/compiler/change-lowering.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
#include "test/unittests/compiler/compiler-test-utils.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
#include "testing/gmock-support.h"
using testing::_;
using testing::AllOf;
using testing::BitEq;
using testing::Capture;
using testing::CaptureEq;
namespace v8 {
namespace internal {
namespace compiler {
class ChangeLoweringTest : public TypedGraphTest {
public:
ChangeLoweringTest() : simplified_(zone()) {}
virtual MachineRepresentation WordRepresentation() const = 0;
protected:
bool Is32() const {
return WordRepresentation() == MachineRepresentation::kWord32;
}
bool Is64() const {
return WordRepresentation() == MachineRepresentation::kWord64;
}
Reduction Reduce(Node* node) {
GraphReducer graph_reducer(zone(), graph());
MachineOperatorBuilder machine(zone(), WordRepresentation());
JSOperatorBuilder javascript(zone());
JSGraph jsgraph(isolate(), graph(), common(), &javascript, nullptr,
&machine);
ChangeLowering reducer(&graph_reducer, &jsgraph);
return reducer.Reduce(node);
}
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
Matcher<Node*> IsAllocateHeapNumber(const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher) {
return IsCall(
_, IsHeapConstant(AllocateHeapNumberStub(isolate()).GetCode()),
IsNumberConstant(BitEq(0.0)), effect_matcher, control_matcher);
}
Matcher<Node*> IsChangeInt32ToSmi(const Matcher<Node*>& value_matcher) {
return Is64() ? IsWord64Shl(IsChangeInt32ToInt64(value_matcher),
IsSmiShiftBitsConstant())
: IsWord32Shl(value_matcher, IsSmiShiftBitsConstant());
}
Matcher<Node*> IsChangeSmiToInt32(const Matcher<Node*>& value_matcher) {
return Is64() ? IsTruncateInt64ToInt32(
IsWord64Sar(value_matcher, IsSmiShiftBitsConstant()))
: IsWord32Sar(value_matcher, IsSmiShiftBitsConstant());
}
Matcher<Node*> IsChangeUint32ToSmi(const Matcher<Node*>& value_matcher) {
return Is64() ? IsWord64Shl(IsChangeUint32ToUint64(value_matcher),
IsSmiShiftBitsConstant())
: IsWord32Shl(value_matcher, IsSmiShiftBitsConstant());
}
Matcher<Node*> IsLoadHeapNumber(const Matcher<Node*>& value_matcher,
const Matcher<Node*>& control_matcher) {
return IsLoad(MachineType::Float64(), value_matcher,
IsIntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag),
graph()->start(), control_matcher);
}
Matcher<Node*> IsIntPtrConstant(int value) {
return Is32() ? IsInt32Constant(value) : IsInt64Constant(value);
}
Matcher<Node*> IsSmiShiftBitsConstant() {
return IsIntPtrConstant(kSmiShiftSize + kSmiTagSize);
}
Matcher<Node*> IsWordEqual(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
return Is32() ? IsWord32Equal(lhs_matcher, rhs_matcher)
: IsWord64Equal(lhs_matcher, rhs_matcher);
}
private:
SimplifiedOperatorBuilder simplified_;
};
// -----------------------------------------------------------------------------
// Common.
class ChangeLoweringCommonTest
: public ChangeLoweringTest,
public ::testing::WithParamInterface<MachineRepresentation> {
public:
~ChangeLoweringCommonTest() override {}
MachineRepresentation WordRepresentation() const final { return GetParam(); }
};
TARGET_TEST_P(ChangeLoweringCommonTest, StoreFieldSmi) {
FieldAccess access = {
kTaggedBase, FixedArrayBase::kHeaderSize, Handle<Name>::null(),
Type::Any(), MachineType::AnyTagged(), kNoWriteBarrier};
Node* p0 = Parameter(Type::TaggedPointer());
Node* p1 = Parameter(Type::TaggedSigned());
Node* store = graph()->NewNode(simplified()->StoreField(access), p0, p1,
graph()->start(), graph()->start());
Reduction r = Reduce(store);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsStore(StoreRepresentation(MachineRepresentation::kTagged,
kNoWriteBarrier),
p0, IsIntPtrConstant(access.offset - access.tag()), p1,
graph()->start(), graph()->start()));
}
TARGET_TEST_P(ChangeLoweringCommonTest, StoreFieldTagged) {
FieldAccess access = {
kTaggedBase, FixedArrayBase::kHeaderSize, Handle<Name>::null(),
Type::Any(), MachineType::AnyTagged(), kFullWriteBarrier};
Node* p0 = Parameter(Type::TaggedPointer());
Node* p1 = Parameter(Type::Tagged());
Node* store = graph()->NewNode(simplified()->StoreField(access), p0, p1,
graph()->start(), graph()->start());
Reduction r = Reduce(store);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsStore(StoreRepresentation(MachineRepresentation::kTagged,
kFullWriteBarrier),
p0, IsIntPtrConstant(access.offset - access.tag()), p1,
graph()->start(), graph()->start()));
}
TARGET_TEST_P(ChangeLoweringCommonTest, LoadField) {
FieldAccess access = {
kTaggedBase, FixedArrayBase::kHeaderSize, Handle<Name>::null(),
Type::Any(), MachineType::AnyTagged(), kNoWriteBarrier};
Node* p0 = Parameter(Type::TaggedPointer());
Node* load = graph()->NewNode(simplified()->LoadField(access), p0,
graph()->start(), graph()->start());
Reduction r = Reduce(load);
ASSERT_TRUE(r.Changed());
Matcher<Node*> index_match = IsIntPtrConstant(access.offset - access.tag());
EXPECT_THAT(r.replacement(),
IsLoad(MachineType::AnyTagged(), p0,
IsIntPtrConstant(access.offset - access.tag()),
graph()->start(), graph()->start()));
}
TARGET_TEST_P(ChangeLoweringCommonTest, StoreElementTagged) {
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
MachineType::AnyTagged(), kFullWriteBarrier};
Node* p0 = Parameter(Type::TaggedPointer());
Node* p1 = Parameter(Type::Signed32());
Node* p2 = Parameter(Type::Tagged());
Node* store = graph()->NewNode(simplified()->StoreElement(access), p0, p1, p2,
graph()->start(), graph()->start());
Reduction r = Reduce(store);
const int element_size_shift =
ElementSizeLog2Of(access.machine_type.representation());
ASSERT_TRUE(r.Changed());
Matcher<Node*> index_match =
IsInt32Add(IsWord32Shl(p1, IsInt32Constant(element_size_shift)),
IsInt32Constant(access.header_size - access.tag()));
if (!Is32()) {
index_match = IsChangeUint32ToUint64(index_match);
}
EXPECT_THAT(r.replacement(),
IsStore(StoreRepresentation(MachineRepresentation::kTagged,
kFullWriteBarrier),
p0, index_match, p2, graph()->start(), graph()->start()));
}
TARGET_TEST_P(ChangeLoweringCommonTest, StoreElementUint8) {
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
Type::Signed32(), MachineType::Uint8(),
kNoWriteBarrier};
Node* p0 = Parameter(Type::TaggedPointer());
Node* p1 = Parameter(Type::Signed32());
Node* p2 = Parameter(Type::Signed32());
Node* store = graph()->NewNode(simplified()->StoreElement(access), p0, p1, p2,
graph()->start(), graph()->start());
Reduction r = Reduce(store);
ASSERT_TRUE(r.Changed());
Matcher<Node*> index_match =
IsInt32Add(p1, IsInt32Constant(access.header_size - access.tag()));
if (!Is32()) {
index_match = IsChangeUint32ToUint64(index_match);
}
EXPECT_THAT(r.replacement(),
IsStore(StoreRepresentation(MachineRepresentation::kWord8,
kNoWriteBarrier),
p0, index_match, p2, graph()->start(), graph()->start()));
}
TARGET_TEST_P(ChangeLoweringCommonTest, LoadElementTagged) {
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
MachineType::AnyTagged(), kNoWriteBarrier};
Node* p0 = Parameter(Type::TaggedPointer());
Node* p1 = Parameter(Type::Signed32());
Node* load = graph()->NewNode(simplified()->LoadElement(access), p0, p1,
graph()->start(), graph()->start());
Reduction r = Reduce(load);
const int element_size_shift =
ElementSizeLog2Of(access.machine_type.representation());
ASSERT_TRUE(r.Changed());
Matcher<Node*> index_match =
IsInt32Add(IsWord32Shl(p1, IsInt32Constant(element_size_shift)),
IsInt32Constant(access.header_size - access.tag()));
if (!Is32()) {
index_match = IsChangeUint32ToUint64(index_match);
}
EXPECT_THAT(r.replacement(), IsLoad(MachineType::AnyTagged(), p0, index_match,
graph()->start(), graph()->start()));
}
TARGET_TEST_P(ChangeLoweringCommonTest, LoadElementInt8) {
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
Type::Signed32(), MachineType::Int8(),
kNoWriteBarrier};
Node* p0 = Parameter(Type::TaggedPointer());
Node* p1 = Parameter(Type::Signed32());
Node* load = graph()->NewNode(simplified()->LoadElement(access), p0, p1,
graph()->start(), graph()->start());
Reduction r = Reduce(load);
ASSERT_TRUE(r.Changed());
Matcher<Node*> index_match =
IsInt32Add(p1, IsInt32Constant(access.header_size - access.tag()));
if (!Is32()) {
index_match = IsChangeUint32ToUint64(index_match);
}
EXPECT_THAT(r.replacement(), IsLoad(MachineType::Int8(), p0, index_match,
graph()->start(), graph()->start()));
}
TARGET_TEST_P(ChangeLoweringCommonTest, Allocate) {
Node* p0 = Parameter(Type::Signed32());
Node* alloc = graph()->NewNode(simplified()->Allocate(TENURED), p0,
graph()->start(), graph()->start());
Reduction r = Reduce(alloc);
// Only check that we lowered, but do not specify the exact form since
// this is subject to change.
ASSERT_TRUE(r.Changed());
}
} // namespace compiler
} // namespace internal
} // namespace v8

View File

@ -41,7 +41,6 @@
'cancelable-tasks-unittest.cc', 'cancelable-tasks-unittest.cc',
'char-predicates-unittest.cc', 'char-predicates-unittest.cc',
'compiler/branch-elimination-unittest.cc', 'compiler/branch-elimination-unittest.cc',
'compiler/change-lowering-unittest.cc',
'compiler/coalesced-live-ranges-unittest.cc', 'compiler/coalesced-live-ranges-unittest.cc',
'compiler/common-operator-reducer-unittest.cc', 'compiler/common-operator-reducer-unittest.cc',
'compiler/common-operator-unittest.cc', 'compiler/common-operator-unittest.cc',