diff --git a/BUILD.gn b/BUILD.gn index 221a153b32..f817a36888 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -194,6 +194,7 @@ config("toolchain") { "CAN_USE_NEON", ] } + # TODO(jochen): Add support for arm_test_noprobe. if (current_cpu != "arm") { @@ -208,6 +209,7 @@ config("toolchain") { if (v8_target_arch == "arm64") { defines += [ "V8_TARGET_ARCH_ARM64" ] } + # TODO(jochen): Add support for mips. if (v8_target_arch == "mipsel") { defines += [ "V8_TARGET_ARCH_MIPS" ] @@ -239,14 +241,17 @@ config("toolchain") { } else if (mips_arch_variant == "r1") { defines += [ "FPU_MODE_FP32" ] } + # TODO(jochen): Add support for mips_arch_variant rx and loongson. } + # TODO(jochen): Add support for mips64. if (v8_target_arch == "mips64el") { defines += [ "V8_TARGET_ARCH_MIPS64" ] if (v8_can_use_fpu_instructions) { defines += [ "CAN_USE_FPU_INSTRUCTIONS" ] } + # TODO(jochen): Add support for big endian host byteorder. defines += [ "V8_TARGET_ARCH_MIPS64_LE" ] if (v8_use_mips_abi_hardfloat) { @@ -835,8 +840,6 @@ source_set("v8_base") { "src/compiler/bytecode-graph-builder.cc", "src/compiler/bytecode-graph-builder.h", "src/compiler/c-linkage.cc", - "src/compiler/change-lowering.cc", - "src/compiler/change-lowering.h", "src/compiler/coalesced-live-ranges.cc", "src/compiler/coalesced-live-ranges.h", "src/compiler/code-assembler.cc", @@ -940,6 +943,8 @@ source_set("v8_base") { "src/compiler/machine-operator-reducer.h", "src/compiler/machine-operator.cc", "src/compiler/machine-operator.h", + "src/compiler/memory-optimizer.cc", + "src/compiler/memory-optimizer.h", "src/compiler/move-optimizer.cc", "src/compiler/move-optimizer.h", "src/compiler/node-aux-data.h", @@ -2128,9 +2133,7 @@ if ((current_toolchain == host_toolchain && v8_toolset_for_d8 == "host") || } } -if ((current_toolchain == host_toolchain && v8_toolset_for_shell == "host") || - (current_toolchain == snapshot_toolchain && v8_toolset_for_shell == "host") || - (current_toolchain != host_toolchain && v8_toolset_for_shell == "target")) { +if ((current_toolchain == host_toolchain && v8_toolset_for_shell == "host") || (current_toolchain == snapshot_toolchain && v8_toolset_for_shell == "host") || (current_toolchain != host_toolchain && v8_toolset_for_shell == "target")) { executable("shell") { sources = [ "samples/shell.cc", diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc index 1f5481bed7..5b6025cf9e 100644 --- a/src/arm/builtins-arm.cc +++ b/src/arm/builtins-arm.cc @@ -2667,11 +2667,6 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) { // -- r1 : requested object size (untagged) // -- lr : return address // ----------------------------------- - Label runtime; - __ Allocate(r1, r0, r2, r3, &runtime, NO_ALLOCATION_FLAGS); - __ Ret(); - - __ bind(&runtime); __ SmiTag(r1); __ Push(r1); __ Move(cp, Smi::FromInt(0)); @@ -2684,11 +2679,6 @@ void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) { // -- r1 : requested object size (untagged) // -- lr : return address // ----------------------------------- - Label runtime; - __ Allocate(r1, r0, r2, r3, &runtime, PRETENURE); - __ Ret(); - - __ bind(&runtime); __ SmiTag(r1); __ Move(r2, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE))); __ Push(r1, r2); diff --git a/src/arm64/builtins-arm64.cc b/src/arm64/builtins-arm64.cc index d4b7604656..3f0a070df9 100644 --- a/src/arm64/builtins-arm64.cc +++ b/src/arm64/builtins-arm64.cc @@ -2758,11 +2758,6 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) { // -- x1 : requested object size (untagged) // -- lr : return address // ----------------------------------- - Label runtime; - __ Allocate(x1, x0, x2, x3, &runtime, NO_ALLOCATION_FLAGS); - __ Ret(); - - __ Bind(&runtime); __ SmiTag(x1); __ Push(x1); __ Move(cp, Smi::FromInt(0)); @@ -2776,11 +2771,6 @@ void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) { // -- x1 : requested object size (untagged) // -- lr : return address // ----------------------------------- - Label runtime; - __ Allocate(x1, x0, x2, x3, &runtime, PRETENURE); - __ Ret(); - - __ Bind(&runtime); __ SmiTag(x1); __ Move(x2, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE))); __ Push(x1, x2); diff --git a/src/code-factory.cc b/src/code-factory.cc index cfb6ca7a59..6041eec5d3 100644 --- a/src/code-factory.cc +++ b/src/code-factory.cc @@ -499,13 +499,6 @@ Callable CodeFactory::AllocateHeapNumber(Isolate* isolate) { return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor()); } - -// static -Callable CodeFactory::AllocateMutableHeapNumber(Isolate* isolate) { - AllocateMutableHeapNumberStub stub(isolate); - return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor()); -} - #define SIMD128_ALLOC(TYPE, Type, type, lane_count, lane_type) \ Callable CodeFactory::Allocate##Type(Isolate* isolate) { \ Allocate##Type##Stub stub(isolate); \ diff --git a/src/code-factory.h b/src/code-factory.h index 986298cc7b..68428dd032 100644 --- a/src/code-factory.h +++ b/src/code-factory.h @@ -130,7 +130,6 @@ class CodeFactory final { static Callable FastNewStrictArguments(Isolate* isolate); static Callable AllocateHeapNumber(Isolate* isolate); - static Callable AllocateMutableHeapNumber(Isolate* isolate); #define SIMD128_ALLOC(TYPE, Type, type, lane_count, lane_type) \ static Callable Allocate##Type(Isolate* isolate); SIMD128_TYPES(SIMD128_ALLOC) diff --git a/src/code-stubs.cc b/src/code-stubs.cc index 5d7354a651..4834e70ed9 100644 --- a/src/code-stubs.cc +++ b/src/code-stubs.cc @@ -462,17 +462,6 @@ void AllocateHeapNumberStub::GenerateAssembly( assembler->Return(result); } -void AllocateMutableHeapNumberStub::GenerateAssembly( - CodeStubAssembler* assembler) const { - typedef compiler::Node Node; - - Node* result = assembler->Allocate(HeapNumber::kSize); - assembler->StoreMapNoWriteBarrier( - result, - assembler->HeapConstant(isolate()->factory()->mutable_heap_number_map())); - assembler->Return(result); -} - #define SIMD128_GEN_ASM(TYPE, Type, type, lane_count, lane_type) \ void Allocate##Type##Stub::GenerateAssembly(CodeStubAssembler* assembler) \ const { \ @@ -4042,11 +4031,6 @@ void AllocateHeapNumberStub::InitializeDescriptor( } -void AllocateMutableHeapNumberStub::InitializeDescriptor( - CodeStubDescriptor* descriptor) { - descriptor->Initialize(); -} - #define SIMD128_INIT_DESC(TYPE, Type, type, lane_count, lane_type) \ void Allocate##Type##Stub::InitializeDescriptor( \ CodeStubDescriptor* descriptor) { \ diff --git a/src/code-stubs.h b/src/code-stubs.h index d0664a1ff1..707da746c1 100644 --- a/src/code-stubs.h +++ b/src/code-stubs.h @@ -94,7 +94,6 @@ namespace internal { V(LoadIC) \ /* TurboFanCodeStubs */ \ V(AllocateHeapNumber) \ - V(AllocateMutableHeapNumber) \ V(AllocateFloat32x4) \ V(AllocateInt32x4) \ V(AllocateUint32x4) \ @@ -2743,18 +2742,6 @@ class AllocateHeapNumberStub : public TurboFanCodeStub { DEFINE_CODE_STUB(AllocateHeapNumber, TurboFanCodeStub); }; -class AllocateMutableHeapNumberStub : public TurboFanCodeStub { - public: - explicit AllocateMutableHeapNumberStub(Isolate* isolate) - : TurboFanCodeStub(isolate) {} - - void InitializeDescriptor(CodeStubDescriptor* descriptor) override; - void GenerateAssembly(CodeStubAssembler* assembler) const override; - - DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateMutableHeapNumber); - DEFINE_CODE_STUB(AllocateMutableHeapNumber, TurboFanCodeStub); -}; - #define SIMD128_ALLOC_STUB(TYPE, Type, type, lane_count, lane_type) \ class Allocate##Type##Stub : public TurboFanCodeStub { \ public: \ diff --git a/src/compiler/change-lowering.cc b/src/compiler/change-lowering.cc deleted file mode 100644 index bf9d03c25d..0000000000 --- a/src/compiler/change-lowering.cc +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/compiler/change-lowering.h" - -#include "src/compiler/js-graph.h" -#include "src/compiler/linkage.h" -#include "src/compiler/machine-operator.h" -#include "src/compiler/node-properties.h" -#include "src/compiler/simplified-operator.h" - -namespace v8 { -namespace internal { -namespace compiler { - -ChangeLowering::~ChangeLowering() {} - - -Reduction ChangeLowering::Reduce(Node* node) { - switch (node->opcode()) { - case IrOpcode::kLoadField: - return ReduceLoadField(node); - case IrOpcode::kStoreField: - return ReduceStoreField(node); - case IrOpcode::kLoadElement: - return ReduceLoadElement(node); - case IrOpcode::kStoreElement: - return ReduceStoreElement(node); - case IrOpcode::kAllocate: - return ReduceAllocate(node); - default: - return NoChange(); - } - UNREACHABLE(); - return NoChange(); -} - -Reduction ChangeLowering::ReduceLoadField(Node* node) { - const FieldAccess& access = FieldAccessOf(node->op()); - Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag()); - node->InsertInput(graph()->zone(), 1, offset); - NodeProperties::ChangeOp(node, machine()->Load(access.machine_type)); - return Changed(node); -} - -Reduction ChangeLowering::ReduceStoreField(Node* node) { - const FieldAccess& access = FieldAccessOf(node->op()); - Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag()); - node->InsertInput(graph()->zone(), 1, offset); - NodeProperties::ChangeOp(node, machine()->Store(StoreRepresentation( - access.machine_type.representation(), - access.write_barrier_kind))); - return Changed(node); -} - - -Node* ChangeLowering::ComputeIndex(const ElementAccess& access, - Node* const key) { - Node* index = key; - const int element_size_shift = - ElementSizeLog2Of(access.machine_type.representation()); - if (element_size_shift) { - index = graph()->NewNode(machine()->Word32Shl(), index, - jsgraph()->Int32Constant(element_size_shift)); - } - const int fixed_offset = access.header_size - access.tag(); - if (fixed_offset) { - index = graph()->NewNode(machine()->Int32Add(), index, - jsgraph()->Int32Constant(fixed_offset)); - } - if (machine()->Is64()) { - // TODO(turbofan): This is probably only correct for typed arrays, and only - // if the typed arrays are at most 2GiB in size, which happens to match - // exactly our current situation. - index = graph()->NewNode(machine()->ChangeUint32ToUint64(), index); - } - return index; -} - -Reduction ChangeLowering::ReduceLoadElement(Node* node) { - const ElementAccess& access = ElementAccessOf(node->op()); - node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1))); - NodeProperties::ChangeOp(node, machine()->Load(access.machine_type)); - return Changed(node); -} - -Reduction ChangeLowering::ReduceStoreElement(Node* node) { - const ElementAccess& access = ElementAccessOf(node->op()); - node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1))); - NodeProperties::ChangeOp(node, machine()->Store(StoreRepresentation( - access.machine_type.representation(), - access.write_barrier_kind))); - return Changed(node); -} - -Reduction ChangeLowering::ReduceAllocate(Node* node) { - PretenureFlag const pretenure = OpParameter(node->op()); - - Node* size = node->InputAt(0); - Node* effect = node->InputAt(1); - Node* control = node->InputAt(2); - - if (machine()->Is64()) { - size = graph()->NewNode(machine()->ChangeInt32ToInt64(), size); - } - - Node* top_address = jsgraph()->ExternalConstant( - pretenure == NOT_TENURED - ? ExternalReference::new_space_allocation_top_address(isolate()) - : ExternalReference::old_space_allocation_top_address(isolate())); - Node* limit_address = jsgraph()->ExternalConstant( - pretenure == NOT_TENURED - ? ExternalReference::new_space_allocation_limit_address(isolate()) - : ExternalReference::old_space_allocation_limit_address(isolate())); - - Node* top = effect = - graph()->NewNode(machine()->Load(MachineType::Pointer()), top_address, - jsgraph()->IntPtrConstant(0), effect, control); - Node* limit = effect = - graph()->NewNode(machine()->Load(MachineType::Pointer()), limit_address, - jsgraph()->IntPtrConstant(0), effect, control); - - Node* new_top = graph()->NewNode(machine()->IntAdd(), top, size); - - Node* check = graph()->NewNode(machine()->UintLessThan(), new_top, limit); - Node* branch = - graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control); - - Node* if_true = graph()->NewNode(common()->IfTrue(), branch); - Node* etrue = effect; - Node* vtrue; - { - etrue = graph()->NewNode( - machine()->Store(StoreRepresentation( - MachineType::PointerRepresentation(), kNoWriteBarrier)), - top_address, jsgraph()->IntPtrConstant(0), new_top, etrue, if_true); - vtrue = graph()->NewNode( - machine()->BitcastWordToTagged(), - graph()->NewNode(machine()->IntAdd(), top, - jsgraph()->IntPtrConstant(kHeapObjectTag))); - } - - Node* if_false = graph()->NewNode(common()->IfFalse(), branch); - Node* efalse = effect; - Node* vfalse; - { - Node* target = pretenure == NOT_TENURED - ? jsgraph()->AllocateInNewSpaceStubConstant() - : jsgraph()->AllocateInOldSpaceStubConstant(); - if (!allocate_operator_.is_set()) { - CallDescriptor* descriptor = - Linkage::GetAllocateCallDescriptor(graph()->zone()); - allocate_operator_.set(common()->Call(descriptor)); - } - vfalse = efalse = graph()->NewNode(allocate_operator_.get(), target, size, - efalse, if_false); - } - - control = graph()->NewNode(common()->Merge(2), if_true, if_false); - effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control); - Node* value = graph()->NewNode( - common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control); - - ReplaceWithValue(node, value, effect); - return Replace(value); -} - -Isolate* ChangeLowering::isolate() const { return jsgraph()->isolate(); } - - -Graph* ChangeLowering::graph() const { return jsgraph()->graph(); } - - -CommonOperatorBuilder* ChangeLowering::common() const { - return jsgraph()->common(); -} - - -MachineOperatorBuilder* ChangeLowering::machine() const { - return jsgraph()->machine(); -} - -} // namespace compiler -} // namespace internal -} // namespace v8 diff --git a/src/compiler/change-lowering.h b/src/compiler/change-lowering.h deleted file mode 100644 index 63a17edbef..0000000000 --- a/src/compiler/change-lowering.h +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_COMPILER_CHANGE_LOWERING_H_ -#define V8_COMPILER_CHANGE_LOWERING_H_ - -#include "src/compiler/graph-reducer.h" - -namespace v8 { -namespace internal { -namespace compiler { - -// Forward declarations. -class CommonOperatorBuilder; -struct ElementAccess; -class JSGraph; -class Linkage; -class MachineOperatorBuilder; -class Operator; - -class ChangeLowering final : public AdvancedReducer { - public: - ChangeLowering(Editor* editor, JSGraph* jsgraph) - : AdvancedReducer(editor), jsgraph_(jsgraph) {} - ~ChangeLowering() final; - - Reduction Reduce(Node* node) final; - - private: - Reduction ReduceLoadField(Node* node); - Reduction ReduceStoreField(Node* node); - Reduction ReduceLoadElement(Node* node); - Reduction ReduceStoreElement(Node* node); - Reduction ReduceAllocate(Node* node); - - Node* ComputeIndex(const ElementAccess& access, Node* const key); - Graph* graph() const; - Isolate* isolate() const; - JSGraph* jsgraph() const { return jsgraph_; } - CommonOperatorBuilder* common() const; - MachineOperatorBuilder* machine() const; - - JSGraph* const jsgraph_; - SetOncePointer allocate_operator_; -}; - -} // namespace compiler -} // namespace internal -} // namespace v8 - -#endif // V8_COMPILER_CHANGE_LOWERING_H_ diff --git a/src/compiler/js-create-lowering.cc b/src/compiler/js-create-lowering.cc index 746296935e..7e589ea589 100644 --- a/src/compiler/js-create-lowering.cc +++ b/src/compiler/js-create-lowering.cc @@ -953,9 +953,21 @@ Node* JSCreateLowering::AllocateFastLiteral( site_context->ExitScope(current_site, boilerplate_object); } else if (property_details.representation().IsDouble()) { // Allocate a mutable HeapNumber box and store the value into it. - value = effect = AllocateMutableHeapNumber( - Handle::cast(boilerplate_value)->value(), + effect = graph()->NewNode(common()->BeginRegion(), effect); + value = effect = graph()->NewNode( + simplified()->Allocate(NOT_TENURED), + jsgraph()->Constant(HeapNumber::kSize), effect, control); + effect = graph()->NewNode( + simplified()->StoreField(AccessBuilder::ForMap()), value, + jsgraph()->HeapConstant(factory()->mutable_heap_number_map()), effect, control); + effect = graph()->NewNode( + simplified()->StoreField(AccessBuilder::ForHeapNumberValue()), + value, jsgraph()->Constant( + Handle::cast(boilerplate_value)->value()), + effect, control); + value = effect = + graph()->NewNode(common()->FinishRegion(), value, effect); } else if (property_details.representation().IsSmi()) { // Ensure that value is stored as smi. value = boilerplate_value->IsUninitialized() @@ -1076,23 +1088,6 @@ Node* JSCreateLowering::AllocateFastLiteralElements( return builder.Finish(); } -Node* JSCreateLowering::AllocateMutableHeapNumber(double value, Node* effect, - Node* control) { - // TODO(turbofan): Support inline allocation of MutableHeapNumber - // (requires proper alignment on Allocate, and Begin/FinishRegion). - Callable callable = CodeFactory::AllocateMutableHeapNumber(isolate()); - CallDescriptor* desc = Linkage::GetStubCallDescriptor( - isolate(), jsgraph()->zone(), callable.descriptor(), 0, - CallDescriptor::kNoFlags, Operator::kNoThrow); - Node* result = effect = graph()->NewNode( - common()->Call(desc), jsgraph()->HeapConstant(callable.code()), - jsgraph()->NoContextConstant(), effect, control); - effect = graph()->NewNode( - simplified()->StoreField(AccessBuilder::ForHeapNumberValue()), result, - jsgraph()->Constant(value), effect, control); - return result; -} - MaybeHandle JSCreateLowering::GetSpecializationLiterals( Node* node) { Node* const closure = NodeProperties::GetValueInput(node, 0); diff --git a/src/compiler/js-create-lowering.h b/src/compiler/js-create-lowering.h index 99eb461148..57b28af603 100644 --- a/src/compiler/js-create-lowering.h +++ b/src/compiler/js-create-lowering.h @@ -71,7 +71,6 @@ class JSCreateLowering final : public AdvancedReducer { Handle boilerplate, PretenureFlag pretenure, AllocationSiteUsageContext* site_context); - Node* AllocateMutableHeapNumber(double value, Node* effect, Node* control); // Infers the LiteralsArray to use for a given {node}. MaybeHandle GetSpecializationLiterals(Node* node); diff --git a/src/compiler/js-generic-lowering.cc b/src/compiler/js-generic-lowering.cc index de167226ee..1ce9768031 100644 --- a/src/compiler/js-generic-lowering.cc +++ b/src/compiler/js-generic-lowering.cc @@ -138,14 +138,14 @@ void JSGenericLowering::LowerJSStrictNotEqual(Node* node) { void JSGenericLowering::LowerJSToBoolean(Node* node) { Callable callable = CodeFactory::ToBoolean(isolate()); node->AppendInput(zone(), graph()->start()); - ReplaceWithStubCall(node, callable, CallDescriptor::kNoFlags, + ReplaceWithStubCall(node, callable, CallDescriptor::kNoAllocate, Operator::kEliminatable); } void JSGenericLowering::LowerJSTypeOf(Node* node) { Callable callable = CodeFactory::Typeof(isolate()); node->AppendInput(zone(), graph()->start()); - ReplaceWithStubCall(node, callable, CallDescriptor::kNoFlags, + ReplaceWithStubCall(node, callable, CallDescriptor::kNoAllocate, Operator::kEliminatable); } diff --git a/src/compiler/js-native-context-specialization.cc b/src/compiler/js-native-context-specialization.cc index 649c208113..374f7b9702 100644 --- a/src/compiler/js-native-context-specialization.cc +++ b/src/compiler/js-native-context-specialization.cc @@ -312,19 +312,21 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess( !FLAG_unbox_double_fields) { if (access_info.HasTransitionMap()) { // Allocate a MutableHeapNumber for the new property. - Callable callable = - CodeFactory::AllocateMutableHeapNumber(isolate()); - CallDescriptor* desc = Linkage::GetStubCallDescriptor( - isolate(), jsgraph()->zone(), callable.descriptor(), 0, - CallDescriptor::kNoFlags, Operator::kNoThrow); - Node* this_box = this_effect = graph()->NewNode( - common()->Call(desc), - jsgraph()->HeapConstant(callable.code()), - jsgraph()->NoContextConstant(), this_effect, this_control); + this_effect = + graph()->NewNode(common()->BeginRegion(), this_effect); + Node* this_box = this_effect = + graph()->NewNode(simplified()->Allocate(NOT_TENURED), + jsgraph()->Constant(HeapNumber::kSize), + this_effect, this_control); + this_effect = graph()->NewNode( + simplified()->StoreField(AccessBuilder::ForMap()), this_box, + jsgraph()->HeapConstant(factory()->mutable_heap_number_map()), + this_effect, this_control); this_effect = graph()->NewNode( simplified()->StoreField(AccessBuilder::ForHeapNumberValue()), this_box, this_value, this_effect, this_control); - this_value = this_box; + this_value = this_effect = graph()->NewNode( + common()->FinishRegion(), this_box, this_effect); field_access.type = Type::TaggedPointer(); } else { diff --git a/src/compiler/linkage.h b/src/compiler/linkage.h index bf299d39a6..958e8dc949 100644 --- a/src/compiler/linkage.h +++ b/src/compiler/linkage.h @@ -162,7 +162,9 @@ class CallDescriptor final : public ZoneObject { kRestoreJSSP = 1u << 6, kRestoreCSP = 1u << 7, // Causes the code generator to initialize the root register. - kInitializeRootRegister = 1u << 8 + kInitializeRootRegister = 1u << 8, + // Does not ever try to allocate space on our heap. + kNoAllocate = 1u << 9 }; typedef base::Flags Flags; diff --git a/src/compiler/memory-optimizer.cc b/src/compiler/memory-optimizer.cc new file mode 100644 index 0000000000..59fd899405 --- /dev/null +++ b/src/compiler/memory-optimizer.cc @@ -0,0 +1,494 @@ +// Copyright 2016 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/memory-optimizer.h" + +#include "src/compiler/js-graph.h" +#include "src/compiler/linkage.h" +#include "src/compiler/node-matchers.h" +#include "src/compiler/node-properties.h" +#include "src/compiler/node.h" +#include "src/compiler/simplified-operator.h" + +namespace v8 { +namespace internal { +namespace compiler { + +MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone) + : jsgraph_(jsgraph), + empty_state_(AllocationState::Empty(zone)), + pending_(zone), + tokens_(zone), + zone_(zone) {} + +void MemoryOptimizer::Optimize() { + EnqueueUses(graph()->start(), empty_state()); + while (!tokens_.empty()) { + Token const token = tokens_.front(); + tokens_.pop(); + VisitNode(token.node, token.state); + } + DCHECK(pending_.empty()); + DCHECK(tokens_.empty()); +} + +MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node, + PretenureFlag pretenure, + Zone* zone) + : node_ids_(zone), pretenure_(pretenure), size_(nullptr) { + node_ids_.insert(node->id()); +} + +MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node, + PretenureFlag pretenure, + Node* size, Zone* zone) + : node_ids_(zone), pretenure_(pretenure), size_(size) { + node_ids_.insert(node->id()); +} + +void MemoryOptimizer::AllocationGroup::Add(Node* node) { + node_ids_.insert(node->id()); +} + +bool MemoryOptimizer::AllocationGroup::Contains(Node* node) const { + return node_ids_.find(node->id()) != node_ids_.end(); +} + +MemoryOptimizer::AllocationState::AllocationState() + : group_(nullptr), size_(std::numeric_limits::max()), top_(nullptr) {} + +MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group) + : group_(group), size_(std::numeric_limits::max()), top_(nullptr) {} + +MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group, + int size, Node* top) + : group_(group), size_(size), top_(top) {} + +bool MemoryOptimizer::AllocationState::IsNewSpaceAllocation() const { + return group() && group()->IsNewSpaceAllocation(); +} + +void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) { + DCHECK(!node->IsDead()); + DCHECK_LT(0, node->op()->EffectInputCount()); + switch (node->opcode()) { + case IrOpcode::kAllocate: + return VisitAllocate(node, state); + case IrOpcode::kCall: + return VisitCall(node, state); + case IrOpcode::kLoadElement: + return VisitLoadElement(node, state); + case IrOpcode::kLoadField: + return VisitLoadField(node, state); + case IrOpcode::kStoreElement: + return VisitStoreElement(node, state); + case IrOpcode::kStoreField: + return VisitStoreField(node, state); + case IrOpcode::kCheckedLoad: + case IrOpcode::kCheckedStore: + case IrOpcode::kIfException: + case IrOpcode::kLoad: + case IrOpcode::kStore: + return VisitOtherEffect(node, state); + default: + break; + } + DCHECK_EQ(0, node->op()->EffectOutputCount()); +} + +void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) { + DCHECK_EQ(IrOpcode::kAllocate, node->opcode()); + Node* value; + Node* size = node->InputAt(0); + Node* effect = node->InputAt(1); + Node* control = node->InputAt(2); + PretenureFlag pretenure = OpParameter(node->op()); + + // Determine the top/limit addresses. + Node* top_address = jsgraph()->ExternalConstant( + pretenure == NOT_TENURED + ? ExternalReference::new_space_allocation_top_address(isolate()) + : ExternalReference::old_space_allocation_top_address(isolate())); + Node* limit_address = jsgraph()->ExternalConstant( + pretenure == NOT_TENURED + ? ExternalReference::new_space_allocation_limit_address(isolate()) + : ExternalReference::old_space_allocation_limit_address(isolate())); + + // Check if we can fold this allocation into a previous allocation represented + // by the incoming {state}. + Int32Matcher m(size); + if (m.HasValue() && m.Value() < Page::kMaxRegularHeapObjectSize) { + int32_t const object_size = m.Value(); + if (state->size() <= Page::kMaxRegularHeapObjectSize - object_size && + state->group()->pretenure() == pretenure) { + // We can fold this Allocate {node} into the allocation {group} + // represented by the given {state}. Compute the upper bound for + // the new {state}. + int32_t const state_size = state->size() + object_size; + + // Update the reservation check to the actual maximum upper bound. + AllocationGroup* const group = state->group(); + if (OpParameter(group->size()) < state_size) { + NodeProperties::ChangeOp(group->size(), + common()->Int32Constant(state_size)); + } + + // Update the allocation top with the new object allocation. + // TODO(bmeurer): Defer writing back top as much as possible. + Node* top = graph()->NewNode(machine()->IntAdd(), state->top(), + jsgraph()->IntPtrConstant(object_size)); + effect = graph()->NewNode( + machine()->Store(StoreRepresentation( + MachineType::PointerRepresentation(), kNoWriteBarrier)), + top_address, jsgraph()->IntPtrConstant(0), top, effect, control); + + // Compute the effective inner allocated address. + value = graph()->NewNode( + machine()->BitcastWordToTagged(), + graph()->NewNode(machine()->IntAdd(), state->top(), + jsgraph()->IntPtrConstant(kHeapObjectTag))); + + // Extend the allocation {group}. + group->Add(value); + state = AllocationState::Open(group, state_size, top, zone()); + } else { + // Setup a mutable reservation size node; will be patched as we fold + // additional allocations into this new group. + Node* size = graph()->NewNode(common()->Int32Constant(object_size)); + + // Load allocation top and limit. + Node* top = effect = + graph()->NewNode(machine()->Load(MachineType::Pointer()), top_address, + jsgraph()->IntPtrConstant(0), effect, control); + Node* limit = effect = graph()->NewNode( + machine()->Load(MachineType::Pointer()), limit_address, + jsgraph()->IntPtrConstant(0), effect, control); + + // Check if we need to collect garbage before we can start bump pointer + // allocation (always done for folded allocations). + Node* check = graph()->NewNode( + machine()->UintLessThan(), + graph()->NewNode( + machine()->IntAdd(), top, + machine()->Is64() + ? graph()->NewNode(machine()->ChangeInt32ToInt64(), size) + : size), + limit); + Node* branch = + graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control); + + Node* if_true = graph()->NewNode(common()->IfTrue(), branch); + Node* etrue = effect; + Node* vtrue = top; + + Node* if_false = graph()->NewNode(common()->IfFalse(), branch); + Node* efalse = effect; + Node* vfalse; + { + Node* target = pretenure == NOT_TENURED + ? jsgraph()->AllocateInNewSpaceStubConstant() + : jsgraph()->AllocateInOldSpaceStubConstant(); + if (!allocate_operator_.is_set()) { + CallDescriptor* descriptor = + Linkage::GetAllocateCallDescriptor(graph()->zone()); + allocate_operator_.set(common()->Call(descriptor)); + } + vfalse = efalse = graph()->NewNode(allocate_operator_.get(), target, + size, efalse, if_false); + vfalse = graph()->NewNode(machine()->IntSub(), vfalse, + jsgraph()->IntPtrConstant(kHeapObjectTag)); + } + + control = graph()->NewNode(common()->Merge(2), if_true, if_false); + effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control); + value = graph()->NewNode( + common()->Phi(MachineType::PointerRepresentation(), 2), vtrue, vfalse, + control); + + // Compute the new top and write it back. + top = graph()->NewNode(machine()->IntAdd(), value, + jsgraph()->IntPtrConstant(object_size)); + effect = graph()->NewNode( + machine()->Store(StoreRepresentation( + MachineType::PointerRepresentation(), kNoWriteBarrier)), + top_address, jsgraph()->IntPtrConstant(0), top, effect, control); + + // Compute the initial object address. + value = graph()->NewNode( + machine()->BitcastWordToTagged(), + graph()->NewNode(machine()->IntAdd(), value, + jsgraph()->IntPtrConstant(kHeapObjectTag))); + + // Start a new allocation group. + AllocationGroup* group = + new (zone()) AllocationGroup(value, pretenure, size, zone()); + state = AllocationState::Open(group, object_size, top, zone()); + } + } else { + // Load allocation top and limit. + Node* top = effect = + graph()->NewNode(machine()->Load(MachineType::Pointer()), top_address, + jsgraph()->IntPtrConstant(0), effect, control); + Node* limit = effect = + graph()->NewNode(machine()->Load(MachineType::Pointer()), limit_address, + jsgraph()->IntPtrConstant(0), effect, control); + + // Compute the new top. + Node* new_top = graph()->NewNode( + machine()->IntAdd(), top, + machine()->Is64() + ? graph()->NewNode(machine()->ChangeInt32ToInt64(), size) + : size); + + // Check if we can do bump pointer allocation here. + Node* check = graph()->NewNode(machine()->UintLessThan(), new_top, limit); + Node* branch = + graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control); + + Node* if_true = graph()->NewNode(common()->IfTrue(), branch); + Node* etrue = effect; + Node* vtrue; + { + etrue = graph()->NewNode( + machine()->Store(StoreRepresentation( + MachineType::PointerRepresentation(), kNoWriteBarrier)), + top_address, jsgraph()->IntPtrConstant(0), new_top, etrue, if_true); + vtrue = graph()->NewNode( + machine()->BitcastWordToTagged(), + graph()->NewNode(machine()->IntAdd(), top, + jsgraph()->IntPtrConstant(kHeapObjectTag))); + } + + Node* if_false = graph()->NewNode(common()->IfFalse(), branch); + Node* efalse = effect; + Node* vfalse; + { + Node* target = pretenure == NOT_TENURED + ? jsgraph()->AllocateInNewSpaceStubConstant() + : jsgraph()->AllocateInOldSpaceStubConstant(); + if (!allocate_operator_.is_set()) { + CallDescriptor* descriptor = + Linkage::GetAllocateCallDescriptor(graph()->zone()); + allocate_operator_.set(common()->Call(descriptor)); + } + vfalse = efalse = graph()->NewNode(allocate_operator_.get(), target, size, + efalse, if_false); + } + + control = graph()->NewNode(common()->Merge(2), if_true, if_false); + effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control); + value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), + vtrue, vfalse, control); + + // Create an unfoldable allocation group. + AllocationGroup* group = + new (zone()) AllocationGroup(value, pretenure, zone()); + state = AllocationState::Closed(group, zone()); + } + + // Replace all effect uses of {node} with the {effect}, enqueue the + // effect uses for further processing, and replace all value uses of + // {node} with the {value}. + for (Edge edge : node->use_edges()) { + if (NodeProperties::IsEffectEdge(edge)) { + EnqueueUse(edge.from(), edge.index(), state); + edge.UpdateTo(effect); + } else { + DCHECK(NodeProperties::IsValueEdge(edge)); + edge.UpdateTo(value); + } + } + + // Kill the {node} to make sure we don't leave dangling dead uses. + node->Kill(); +} + +void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) { + DCHECK_EQ(IrOpcode::kCall, node->opcode()); + // If the call can allocate, we start with a fresh state. + if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) { + state = empty_state(); + } + EnqueueUses(node, state); +} + +void MemoryOptimizer::VisitLoadElement(Node* node, + AllocationState const* state) { + DCHECK_EQ(IrOpcode::kLoadElement, node->opcode()); + ElementAccess const& access = ElementAccessOf(node->op()); + Node* index = node->InputAt(1); + node->ReplaceInput(1, ComputeIndex(access, index)); + NodeProperties::ChangeOp(node, machine()->Load(access.machine_type)); + EnqueueUses(node, state); +} + +void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) { + DCHECK_EQ(IrOpcode::kLoadField, node->opcode()); + FieldAccess const& access = FieldAccessOf(node->op()); + Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag()); + node->InsertInput(graph()->zone(), 1, offset); + NodeProperties::ChangeOp(node, machine()->Load(access.machine_type)); + EnqueueUses(node, state); +} + +void MemoryOptimizer::VisitStoreElement(Node* node, + AllocationState const* state) { + DCHECK_EQ(IrOpcode::kStoreElement, node->opcode()); + ElementAccess const& access = ElementAccessOf(node->op()); + Node* object = node->InputAt(0); + Node* index = node->InputAt(1); + WriteBarrierKind write_barrier_kind = + ComputeWriteBarrierKind(object, state, access.write_barrier_kind); + node->ReplaceInput(1, ComputeIndex(access, index)); + NodeProperties::ChangeOp( + node, machine()->Store(StoreRepresentation( + access.machine_type.representation(), write_barrier_kind))); + EnqueueUses(node, state); +} + +void MemoryOptimizer::VisitStoreField(Node* node, + AllocationState const* state) { + DCHECK_EQ(IrOpcode::kStoreField, node->opcode()); + FieldAccess const& access = FieldAccessOf(node->op()); + Node* object = node->InputAt(0); + WriteBarrierKind write_barrier_kind = + ComputeWriteBarrierKind(object, state, access.write_barrier_kind); + Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag()); + node->InsertInput(graph()->zone(), 1, offset); + NodeProperties::ChangeOp( + node, machine()->Store(StoreRepresentation( + access.machine_type.representation(), write_barrier_kind))); + EnqueueUses(node, state); +} + +void MemoryOptimizer::VisitOtherEffect(Node* node, + AllocationState const* state) { + EnqueueUses(node, state); +} + +Node* MemoryOptimizer::ComputeIndex(ElementAccess const& access, Node* key) { + Node* index = key; + int element_size_shift = + ElementSizeLog2Of(access.machine_type.representation()); + if (element_size_shift) { + index = graph()->NewNode(machine()->Word32Shl(), index, + jsgraph()->Int32Constant(element_size_shift)); + } + const int fixed_offset = access.header_size - access.tag(); + if (fixed_offset) { + index = graph()->NewNode(machine()->Int32Add(), index, + jsgraph()->Int32Constant(fixed_offset)); + } + if (machine()->Is64()) { + // TODO(turbofan): This is probably only correct for typed arrays, and only + // if the typed arrays are at most 2GiB in size, which happens to match + // exactly our current situation. + index = graph()->NewNode(machine()->ChangeUint32ToUint64(), index); + } + return index; +} + +WriteBarrierKind MemoryOptimizer::ComputeWriteBarrierKind( + Node* object, AllocationState const* state, + WriteBarrierKind write_barrier_kind) { + if (state->IsNewSpaceAllocation() && state->group()->Contains(object)) { + write_barrier_kind = kNoWriteBarrier; + } + return write_barrier_kind; +} + +MemoryOptimizer::AllocationState const* MemoryOptimizer::MergeStates( + AllocationStates const& states) { + // Check if all states are the same; or at least if all allocation + // states belong to the same allocation group. + AllocationState const* state = states.front(); + AllocationGroup* group = state->group(); + for (size_t i = 1; i < states.size(); ++i) { + if (states[i] != state) state = nullptr; + if (states[i]->group() != group) group = nullptr; + } + if (state == nullptr) { + if (group != nullptr) { + // We cannot fold any more allocations into this group, but we can still + // eliminate write barriers on stores to this group. + // TODO(bmeurer): We could potentially just create a Phi here to merge + // the various tops; but we need to pay special attention not to create + // an unschedulable graph. + state = AllocationState::Closed(group, zone()); + } else { + // The states are from different allocation groups. + state = empty_state(); + } + } + return state; +} + +void MemoryOptimizer::EnqueueMerge(Node* node, int index, + AllocationState const* state) { + DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode()); + int const input_count = node->InputCount() - 1; + DCHECK_LT(0, input_count); + Node* const control = node->InputAt(input_count); + if (control->opcode() == IrOpcode::kLoop) { + // For loops we always start with an empty state at the beginning. + if (index == 0) EnqueueUses(node, empty_state()); + } else { + DCHECK_EQ(IrOpcode::kMerge, control->opcode()); + // Check if we already know about this pending merge. + NodeId const id = node->id(); + auto it = pending_.find(id); + if (it == pending_.end()) { + // Insert a new pending merge. + it = pending_.insert(std::make_pair(id, AllocationStates(zone()))).first; + } + // Add the next input state. + it->second.push_back(state); + // Check if states for all inputs are available by now. + if (it->second.size() == static_cast(input_count)) { + // All inputs to this effect merge are done, merge the states given all + // input constraints, drop the pending merge and enqueue uses of the + // EffectPhi {node}. + state = MergeStates(it->second); + EnqueueUses(node, state); + pending_.erase(it); + } + } +} + +void MemoryOptimizer::EnqueueUses(Node* node, AllocationState const* state) { + for (Edge const edge : node->use_edges()) { + if (NodeProperties::IsEffectEdge(edge)) { + EnqueueUse(edge.from(), edge.index(), state); + } + } +} + +void MemoryOptimizer::EnqueueUse(Node* node, int index, + AllocationState const* state) { + if (node->opcode() == IrOpcode::kEffectPhi) { + // An EffectPhi represents a merge of different effect chains, which + // needs special handling depending on whether the merge is part of a + // loop or just a normal control join. + EnqueueMerge(node, index, state); + } else { + Token token = {node, state}; + tokens_.push(token); + } +} + +Graph* MemoryOptimizer::graph() const { return jsgraph()->graph(); } + +Isolate* MemoryOptimizer::isolate() const { return jsgraph()->isolate(); } + +CommonOperatorBuilder* MemoryOptimizer::common() const { + return jsgraph()->common(); +} + +MachineOperatorBuilder* MemoryOptimizer::machine() const { + return jsgraph()->machine(); +} + +} // namespace compiler +} // namespace internal +} // namespace v8 diff --git a/src/compiler/memory-optimizer.h b/src/compiler/memory-optimizer.h new file mode 100644 index 0000000000..f0cd546860 --- /dev/null +++ b/src/compiler/memory-optimizer.h @@ -0,0 +1,149 @@ +// Copyright 2016 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_MEMORY_OPTIMIZER_H_ +#define V8_COMPILER_MEMORY_OPTIMIZER_H_ + +#include "src/zone-containers.h" + +namespace v8 { +namespace internal { +namespace compiler { + +// Forward declarations. +class CommonOperatorBuilder; +struct ElementAccess; +class Graph; +class JSGraph; +class MachineOperatorBuilder; +class Node; +class Operator; + +// NodeIds are identifying numbers for nodes that can be used to index auxiliary +// out-of-line data associated with each node. +typedef uint32_t NodeId; + +// Lowers all simplified memory access and allocation related nodes (i.e. +// Allocate, LoadField, StoreField and friends) to machine operators. +// Performs allocation folding and store write barrier elimination +// implicitly. +class MemoryOptimizer final { + public: + MemoryOptimizer(JSGraph* jsgraph, Zone* zone); + ~MemoryOptimizer() {} + + void Optimize(); + + private: + // An allocation group represents a set of allocations that have been folded + // together. + class AllocationGroup final : public ZoneObject { + public: + AllocationGroup(Node* node, PretenureFlag pretenure, Zone* zone); + AllocationGroup(Node* node, PretenureFlag pretenure, Node* size, + Zone* zone); + ~AllocationGroup() {} + + void Add(Node* object); + bool Contains(Node* object) const; + bool IsNewSpaceAllocation() const { return pretenure() == NOT_TENURED; } + + PretenureFlag pretenure() const { return pretenure_; } + Node* size() const { return size_; } + + private: + ZoneSet node_ids_; + PretenureFlag const pretenure_; + Node* const size_; + + DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationGroup); + }; + + // An allocation state is propagated on the effect paths through the graph. + class AllocationState final : public ZoneObject { + public: + static AllocationState const* Empty(Zone* zone) { + return new (zone) AllocationState(); + } + static AllocationState const* Closed(AllocationGroup* group, Zone* zone) { + return new (zone) AllocationState(group); + } + static AllocationState const* Open(AllocationGroup* group, int size, + Node* top, Zone* zone) { + return new (zone) AllocationState(group, size, top); + } + + bool IsNewSpaceAllocation() const; + + AllocationGroup* group() const { return group_; } + Node* top() const { return top_; } + int size() const { return size_; } + + private: + AllocationState(); + explicit AllocationState(AllocationGroup* group); + AllocationState(AllocationGroup* group, int size, Node* top); + + AllocationGroup* const group_; + // The upper bound of the combined allocated object size on the current path + // (max int if allocation folding is impossible on this path). + int const size_; + Node* const top_; + + DISALLOW_COPY_AND_ASSIGN(AllocationState); + }; + + // An array of allocation states used to collect states on merges. + typedef ZoneVector AllocationStates; + + // We thread through tokens to represent the current state on a given effect + // path through the graph. + struct Token { + Node* node; + AllocationState const* state; + }; + + void VisitNode(Node*, AllocationState const*); + void VisitAllocate(Node*, AllocationState const*); + void VisitCall(Node*, AllocationState const*); + void VisitLoadElement(Node*, AllocationState const*); + void VisitLoadField(Node*, AllocationState const*); + void VisitStoreElement(Node*, AllocationState const*); + void VisitStoreField(Node*, AllocationState const*); + void VisitOtherEffect(Node*, AllocationState const*); + + Node* ComputeIndex(ElementAccess const&, Node*); + WriteBarrierKind ComputeWriteBarrierKind(Node* object, + AllocationState const* state, + WriteBarrierKind); + + AllocationState const* MergeStates(AllocationStates const& states); + + void EnqueueMerge(Node*, int, AllocationState const*); + void EnqueueUses(Node*, AllocationState const*); + void EnqueueUse(Node*, int, AllocationState const*); + + AllocationState const* empty_state() const { return empty_state_; } + Graph* graph() const; + Isolate* isolate() const; + JSGraph* jsgraph() const { return jsgraph_; } + CommonOperatorBuilder* common() const; + MachineOperatorBuilder* machine() const; + Zone* zone() const { return zone_; } + + SetOncePointer allocate_operator_; + JSGraph* const jsgraph_; + AllocationState const* const empty_state_; + ZoneMap pending_; + ZoneQueue tokens_; + Zone* const zone_; + + DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryOptimizer); +}; + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_MEMORY_OPTIMIZER_H_ diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc index b292c7ce2a..1741916ab0 100644 --- a/src/compiler/pipeline.cc +++ b/src/compiler/pipeline.cc @@ -14,7 +14,6 @@ #include "src/compiler/basic-block-instrumentor.h" #include "src/compiler/branch-elimination.h" #include "src/compiler/bytecode-graph-builder.h" -#include "src/compiler/change-lowering.h" #include "src/compiler/code-generator.h" #include "src/compiler/common-operator-reducer.h" #include "src/compiler/control-flow-optimizer.h" @@ -46,6 +45,7 @@ #include "src/compiler/loop-analysis.h" #include "src/compiler/loop-peeling.h" #include "src/compiler/machine-operator-reducer.h" +#include "src/compiler/memory-optimizer.h" #include "src/compiler/move-optimizer.h" #include "src/compiler/osr.h" #include "src/compiler/pipeline-statistics.h" @@ -1019,6 +1019,15 @@ struct EffectControlLinearizationPhase { } }; +struct MemoryOptimizationPhase { + static const char* phase_name() { return "memory optimization"; } + + void Run(PipelineData* data, Zone* temp_zone) { + MemoryOptimizer optimizer(data->jsgraph(), temp_zone); + optimizer.Optimize(); + } +}; + struct LateOptimizationPhase { static const char* phase_name() { return "late optimization"; } @@ -1027,7 +1036,6 @@ struct LateOptimizationPhase { DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(), data->common()); ValueNumberingReducer value_numbering(temp_zone); - ChangeLowering lowering(&graph_reducer, data->jsgraph()); MachineOperatorReducer machine_reducer(data->jsgraph()); CommonOperatorReducer common_reducer(&graph_reducer, data->graph(), data->common(), data->machine()); @@ -1036,7 +1044,6 @@ struct LateOptimizationPhase { TailCallOptimization tco(data->common(), data->graph()); AddReducer(data, &graph_reducer, &dead_code_elimination); AddReducer(data, &graph_reducer, &value_numbering); - AddReducer(data, &graph_reducer, &lowering); AddReducer(data, &graph_reducer, &machine_reducer); AddReducer(data, &graph_reducer, &common_reducer); AddReducer(data, &graph_reducer, &select_lowering); @@ -1458,6 +1465,11 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) { RunPrintAndVerify("Control flow optimized", true); } + // Optimize memory access and allocation operations. + Run(); + // TODO(jarin, rossberg): Remove UNTYPED once machine typing works. + RunPrintAndVerify("Memory optimized", true); + // Lower changes that have been inserted before. Run(); // TODO(jarin, rossberg): Remove UNTYPED once machine typing works. diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc index 611a298460..2a54b5a9ca 100644 --- a/src/heap/spaces.cc +++ b/src/heap/spaces.cc @@ -2473,7 +2473,8 @@ HeapObject* FreeList::Allocate(int size_in_bytes) { // Keep the linear allocation area empty if requested to do so, just // return area back to the free list instead. owner_->Free(new_node->address() + size_in_bytes, bytes_left); - DCHECK(owner_->top() == NULL && owner_->limit() == NULL); + owner_->SetTopAndLimit(new_node->address() + size_in_bytes, + new_node->address() + size_in_bytes); } else if (bytes_left > kThreshold && owner_->heap()->incremental_marking()->IsMarkingIncomplete() && FLAG_incremental_marking) { @@ -2485,7 +2486,7 @@ HeapObject* FreeList::Allocate(int size_in_bytes) { new_node_size - size_in_bytes - linear_size); owner_->SetTopAndLimit(new_node->address() + size_in_bytes, new_node->address() + size_in_bytes + linear_size); - } else if (bytes_left > 0) { + } else if (bytes_left >= 0) { // Normally we give the rest of the node to the allocator as its new // linear allocation area. owner_->SetTopAndLimit(new_node->address() + size_in_bytes, diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc index 31f56461ee..a984adec47 100644 --- a/src/ia32/builtins-ia32.cc +++ b/src/ia32/builtins-ia32.cc @@ -2624,11 +2624,6 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) { // -- edx : requested object size (untagged) // -- esp[0] : return address // ----------------------------------- - Label runtime; - __ Allocate(edx, eax, ecx, edi, &runtime, NO_ALLOCATION_FLAGS); - __ Ret(); - - __ bind(&runtime); __ SmiTag(edx); __ PopReturnAddressTo(ecx); __ Push(edx); @@ -2643,11 +2638,6 @@ void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) { // -- edx : requested object size (untagged) // -- esp[0] : return address // ----------------------------------- - Label runtime; - __ Allocate(edx, eax, ecx, edi, &runtime, PRETENURE); - __ Ret(); - - __ bind(&runtime); __ SmiTag(edx); __ PopReturnAddressTo(ecx); __ Push(edx); diff --git a/src/interface-descriptors.cc b/src/interface-descriptors.cc index 586701fdfa..c0a342c87e 100644 --- a/src/interface-descriptors.cc +++ b/src/interface-descriptors.cc @@ -75,12 +75,6 @@ const char* CallInterfaceDescriptor::DebugName(Isolate* isolate) const { } -void AllocateMutableHeapNumberDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - data->InitializePlatformSpecific(0, nullptr, nullptr); -} - - void VoidDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { data->InitializePlatformSpecific(0, nullptr); diff --git a/src/interface-descriptors.h b/src/interface-descriptors.h index c44297c968..4159ed455b 100644 --- a/src/interface-descriptors.h +++ b/src/interface-descriptors.h @@ -47,7 +47,6 @@ class PlatformInterfaceDescriptor; V(RegExpConstructResult) \ V(TransitionElementsKind) \ V(AllocateHeapNumber) \ - V(AllocateMutableHeapNumber) \ V(AllocateFloat32x4) \ V(AllocateInt32x4) \ V(AllocateUint32x4) \ @@ -582,12 +581,6 @@ class AllocateHeapNumberDescriptor : public CallInterfaceDescriptor { SIMD128_TYPES(SIMD128_ALLOC_DESC) #undef SIMD128_ALLOC_DESC -class AllocateMutableHeapNumberDescriptor : public CallInterfaceDescriptor { - public: - DECLARE_DESCRIPTOR(AllocateMutableHeapNumberDescriptor, - CallInterfaceDescriptor) -}; - class ArrayNoArgumentConstructorDescriptor : public CallInterfaceDescriptor { public: DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE( diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc index 024bd83570..89fcef863f 100644 --- a/src/mips/builtins-mips.cc +++ b/src/mips/builtins-mips.cc @@ -2742,11 +2742,6 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) { // -- a0 : requested object size (untagged) // -- ra : return address // ----------------------------------- - Label runtime; - __ Allocate(a0, v0, a1, a2, &runtime, NO_ALLOCATION_FLAGS); - __ Ret(); - - __ bind(&runtime); __ SmiTag(a0); __ Push(a0); __ Move(cp, Smi::FromInt(0)); @@ -2759,11 +2754,6 @@ void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) { // -- a0 : requested object size (untagged) // -- ra : return address // ----------------------------------- - Label runtime; - __ Allocate(a0, v0, a1, a2, &runtime, PRETENURE); - __ Ret(); - - __ bind(&runtime); __ SmiTag(a0); __ Move(a1, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE))); __ Push(a0, a1); diff --git a/src/mips64/builtins-mips64.cc b/src/mips64/builtins-mips64.cc index 57938144da..1cb20abea5 100644 --- a/src/mips64/builtins-mips64.cc +++ b/src/mips64/builtins-mips64.cc @@ -2730,11 +2730,6 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) { // -- a0 : requested object size (untagged) // -- ra : return address // ----------------------------------- - Label runtime; - __ Allocate(a0, v0, a1, a2, &runtime, NO_ALLOCATION_FLAGS); - __ Ret(); - - __ bind(&runtime); __ SmiTag(a0); __ Push(a0); __ Move(cp, Smi::FromInt(0)); @@ -2747,11 +2742,6 @@ void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) { // -- a0 : requested object size (untagged) // -- ra : return address // ----------------------------------- - Label runtime; - __ Allocate(a0, v0, a1, a2, &runtime, PRETENURE); - __ Ret(); - - __ bind(&runtime); __ SmiTag(a0); __ Move(a1, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE))); __ Push(a0, a1); diff --git a/src/ppc/builtins-ppc.cc b/src/ppc/builtins-ppc.cc index 3c8ace7d54..6dde86cf6d 100644 --- a/src/ppc/builtins-ppc.cc +++ b/src/ppc/builtins-ppc.cc @@ -2740,11 +2740,6 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) { // -- r4 : requested object size (untagged) // -- lr : return address // ----------------------------------- - Label runtime; - __ Allocate(r4, r3, r5, r6, &runtime, NO_ALLOCATION_FLAGS); - __ Ret(); - - __ bind(&runtime); __ SmiTag(r4); __ Push(r4); __ LoadSmiLiteral(cp, Smi::FromInt(0)); @@ -2757,11 +2752,6 @@ void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) { // -- r4 : requested object size (untagged) // -- lr : return address // ----------------------------------- - Label runtime; - __ Allocate(r4, r3, r5, r6, &runtime, PRETENURE); - __ Ret(); - - __ bind(&runtime); __ SmiTag(r4); __ LoadSmiLiteral(r5, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE))); __ Push(r4, r5); diff --git a/src/s390/builtins-s390.cc b/src/s390/builtins-s390.cc index 2d521db65e..62c9da221d 100644 --- a/src/s390/builtins-s390.cc +++ b/src/s390/builtins-s390.cc @@ -2697,11 +2697,6 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) { // -- r3 : requested object size (untagged) // -- lr : return address // ----------------------------------- - Label runtime; - __ Allocate(r3, r2, r4, r5, &runtime, NO_ALLOCATION_FLAGS); - __ Ret(); - - __ bind(&runtime); __ SmiTag(r3); __ Push(r3); __ LoadSmiLiteral(cp, Smi::FromInt(0)); @@ -2714,11 +2709,6 @@ void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) { // -- r3 : requested object size (untagged) // -- lr : return address // ----------------------------------- - Label runtime; - __ Allocate(r3, r2, r4, r5, &runtime, PRETENURE); - __ Ret(); - - __ bind(&runtime); __ SmiTag(r3); __ LoadSmiLiteral(r4, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE))); __ Push(r3, r4); diff --git a/src/v8.gyp b/src/v8.gyp index 0a41a70050..25e678080b 100644 --- a/src/v8.gyp +++ b/src/v8.gyp @@ -502,8 +502,6 @@ 'compiler/bytecode-branch-analysis.h', 'compiler/bytecode-graph-builder.cc', 'compiler/bytecode-graph-builder.h', - 'compiler/change-lowering.cc', - 'compiler/change-lowering.h', 'compiler/c-linkage.cc', 'compiler/coalesced-live-ranges.cc', 'compiler/coalesced-live-ranges.h', @@ -609,6 +607,8 @@ 'compiler/machine-operator-reducer.h', 'compiler/machine-operator.cc', 'compiler/machine-operator.h', + 'compiler/memory-optimizer.cc', + 'compiler/memory-optimizer.h', 'compiler/move-optimizer.cc', 'compiler/move-optimizer.h', 'compiler/node-aux-data.h', diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc index a14fdf5b18..dc8cd74956 100644 --- a/src/x64/builtins-x64.cc +++ b/src/x64/builtins-x64.cc @@ -2048,11 +2048,6 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) { // -- rdx : requested object size (untagged) // -- rsp[0] : return address // ----------------------------------- - Label runtime; - __ Allocate(rdx, rax, rcx, rdi, &runtime, NO_ALLOCATION_FLAGS); - __ Ret(); - - __ bind(&runtime); __ Integer32ToSmi(rdx, rdx); __ PopReturnAddressTo(rcx); __ Push(rdx); @@ -2067,11 +2062,6 @@ void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) { // -- rdx : requested object size (untagged) // -- rsp[0] : return address // ----------------------------------- - Label runtime; - __ Allocate(rdx, rax, rcx, rdi, &runtime, PRETENURE); - __ Ret(); - - __ bind(&runtime); __ Integer32ToSmi(rdx, rdx); __ PopReturnAddressTo(rcx); __ Push(rdx); diff --git a/src/x87/builtins-x87.cc b/src/x87/builtins-x87.cc index 319f99a54f..a7f4edacc8 100644 --- a/src/x87/builtins-x87.cc +++ b/src/x87/builtins-x87.cc @@ -2654,11 +2654,6 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) { // -- edx : requested object size (untagged) // -- esp[0] : return address // ----------------------------------- - Label runtime; - __ Allocate(edx, eax, ecx, edi, &runtime, NO_ALLOCATION_FLAGS); - __ Ret(); - - __ bind(&runtime); __ SmiTag(edx); __ PopReturnAddressTo(ecx); __ Push(edx); @@ -2673,11 +2668,6 @@ void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) { // -- edx : requested object size (untagged) // -- esp[0] : return address // ----------------------------------- - Label runtime; - __ Allocate(edx, eax, ecx, edi, &runtime, PRETENURE); - __ Ret(); - - __ bind(&runtime); __ SmiTag(edx); __ PopReturnAddressTo(ecx); __ Push(edx); diff --git a/test/cctest/compiler/test-simplified-lowering.cc b/test/cctest/compiler/test-simplified-lowering.cc index ad92009ff8..4efb1494fd 100644 --- a/test/cctest/compiler/test-simplified-lowering.cc +++ b/test/cctest/compiler/test-simplified-lowering.cc @@ -6,11 +6,10 @@ #include "src/ast/scopes.h" #include "src/compiler/access-builder.h" -#include "src/compiler/change-lowering.h" #include "src/compiler/control-builders.h" #include "src/compiler/effect-control-linearizer.h" -#include "src/compiler/graph-reducer.h" #include "src/compiler/graph-visualizer.h" +#include "src/compiler/memory-optimizer.h" #include "src/compiler/node-properties.h" #include "src/compiler/pipeline.h" #include "src/compiler/representation-change.h" @@ -67,11 +66,8 @@ class SimplifiedLoweringTester : public GraphBuilderTester { EffectControlLinearizer linearizer(&jsgraph, schedule, this->zone()); linearizer.Run(); - GraphReducer reducer(this->zone(), this->graph()); - ChangeLowering lowering(&reducer, &jsgraph); - reducer.AddReducer(&lowering); - reducer.ReduceGraph(); - Verifier::Run(this->graph()); + MemoryOptimizer memory_optimizer(&jsgraph, this->zone()); + memory_optimizer.Optimize(); } void CheckNumberCall(double expected, double input) { @@ -753,11 +749,8 @@ class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders { EffectControlLinearizer linearizer(&jsgraph, schedule, this->zone()); linearizer.Run(); - GraphReducer reducer(this->zone(), this->graph()); - ChangeLowering lowering(&reducer, &jsgraph); - reducer.AddReducer(&lowering); - reducer.ReduceGraph(); - Verifier::Run(this->graph()); + MemoryOptimizer memory_optimizer(&jsgraph, this->zone()); + memory_optimizer.Optimize(); } // Inserts the node as the return value of the graph. diff --git a/test/unittests/compiler/change-lowering-unittest.cc b/test/unittests/compiler/change-lowering-unittest.cc deleted file mode 100644 index 5144356140..0000000000 --- a/test/unittests/compiler/change-lowering-unittest.cc +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/code-stubs.h" -#include "src/compiler/change-lowering.h" -#include "src/compiler/js-graph.h" -#include "src/compiler/linkage.h" -#include "src/compiler/node-properties.h" -#include "src/compiler/simplified-operator.h" -#include "test/unittests/compiler/compiler-test-utils.h" -#include "test/unittests/compiler/graph-unittest.h" -#include "test/unittests/compiler/node-test-utils.h" -#include "testing/gmock-support.h" - -using testing::_; -using testing::AllOf; -using testing::BitEq; -using testing::Capture; -using testing::CaptureEq; - -namespace v8 { -namespace internal { -namespace compiler { - -class ChangeLoweringTest : public TypedGraphTest { - public: - ChangeLoweringTest() : simplified_(zone()) {} - - virtual MachineRepresentation WordRepresentation() const = 0; - - protected: - bool Is32() const { - return WordRepresentation() == MachineRepresentation::kWord32; - } - bool Is64() const { - return WordRepresentation() == MachineRepresentation::kWord64; - } - - Reduction Reduce(Node* node) { - GraphReducer graph_reducer(zone(), graph()); - MachineOperatorBuilder machine(zone(), WordRepresentation()); - JSOperatorBuilder javascript(zone()); - JSGraph jsgraph(isolate(), graph(), common(), &javascript, nullptr, - &machine); - ChangeLowering reducer(&graph_reducer, &jsgraph); - return reducer.Reduce(node); - } - - SimplifiedOperatorBuilder* simplified() { return &simplified_; } - - Matcher IsAllocateHeapNumber(const Matcher& effect_matcher, - const Matcher& control_matcher) { - return IsCall( - _, IsHeapConstant(AllocateHeapNumberStub(isolate()).GetCode()), - IsNumberConstant(BitEq(0.0)), effect_matcher, control_matcher); - } - Matcher IsChangeInt32ToSmi(const Matcher& value_matcher) { - return Is64() ? IsWord64Shl(IsChangeInt32ToInt64(value_matcher), - IsSmiShiftBitsConstant()) - : IsWord32Shl(value_matcher, IsSmiShiftBitsConstant()); - } - Matcher IsChangeSmiToInt32(const Matcher& value_matcher) { - return Is64() ? IsTruncateInt64ToInt32( - IsWord64Sar(value_matcher, IsSmiShiftBitsConstant())) - : IsWord32Sar(value_matcher, IsSmiShiftBitsConstant()); - } - Matcher IsChangeUint32ToSmi(const Matcher& value_matcher) { - return Is64() ? IsWord64Shl(IsChangeUint32ToUint64(value_matcher), - IsSmiShiftBitsConstant()) - : IsWord32Shl(value_matcher, IsSmiShiftBitsConstant()); - } - Matcher IsLoadHeapNumber(const Matcher& value_matcher, - const Matcher& control_matcher) { - return IsLoad(MachineType::Float64(), value_matcher, - IsIntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag), - graph()->start(), control_matcher); - } - Matcher IsIntPtrConstant(int value) { - return Is32() ? IsInt32Constant(value) : IsInt64Constant(value); - } - Matcher IsSmiShiftBitsConstant() { - return IsIntPtrConstant(kSmiShiftSize + kSmiTagSize); - } - Matcher IsWordEqual(const Matcher& lhs_matcher, - const Matcher& rhs_matcher) { - return Is32() ? IsWord32Equal(lhs_matcher, rhs_matcher) - : IsWord64Equal(lhs_matcher, rhs_matcher); - } - - private: - SimplifiedOperatorBuilder simplified_; -}; - - -// ----------------------------------------------------------------------------- -// Common. - - -class ChangeLoweringCommonTest - : public ChangeLoweringTest, - public ::testing::WithParamInterface { - public: - ~ChangeLoweringCommonTest() override {} - - MachineRepresentation WordRepresentation() const final { return GetParam(); } -}; - -TARGET_TEST_P(ChangeLoweringCommonTest, StoreFieldSmi) { - FieldAccess access = { - kTaggedBase, FixedArrayBase::kHeaderSize, Handle::null(), - Type::Any(), MachineType::AnyTagged(), kNoWriteBarrier}; - Node* p0 = Parameter(Type::TaggedPointer()); - Node* p1 = Parameter(Type::TaggedSigned()); - Node* store = graph()->NewNode(simplified()->StoreField(access), p0, p1, - graph()->start(), graph()->start()); - Reduction r = Reduce(store); - - ASSERT_TRUE(r.Changed()); - EXPECT_THAT(r.replacement(), - IsStore(StoreRepresentation(MachineRepresentation::kTagged, - kNoWriteBarrier), - p0, IsIntPtrConstant(access.offset - access.tag()), p1, - graph()->start(), graph()->start())); -} - - -TARGET_TEST_P(ChangeLoweringCommonTest, StoreFieldTagged) { - FieldAccess access = { - kTaggedBase, FixedArrayBase::kHeaderSize, Handle::null(), - Type::Any(), MachineType::AnyTagged(), kFullWriteBarrier}; - Node* p0 = Parameter(Type::TaggedPointer()); - Node* p1 = Parameter(Type::Tagged()); - Node* store = graph()->NewNode(simplified()->StoreField(access), p0, p1, - graph()->start(), graph()->start()); - Reduction r = Reduce(store); - - ASSERT_TRUE(r.Changed()); - EXPECT_THAT(r.replacement(), - IsStore(StoreRepresentation(MachineRepresentation::kTagged, - kFullWriteBarrier), - p0, IsIntPtrConstant(access.offset - access.tag()), p1, - graph()->start(), graph()->start())); -} - - -TARGET_TEST_P(ChangeLoweringCommonTest, LoadField) { - FieldAccess access = { - kTaggedBase, FixedArrayBase::kHeaderSize, Handle::null(), - Type::Any(), MachineType::AnyTagged(), kNoWriteBarrier}; - Node* p0 = Parameter(Type::TaggedPointer()); - Node* load = graph()->NewNode(simplified()->LoadField(access), p0, - graph()->start(), graph()->start()); - Reduction r = Reduce(load); - - ASSERT_TRUE(r.Changed()); - Matcher index_match = IsIntPtrConstant(access.offset - access.tag()); - EXPECT_THAT(r.replacement(), - IsLoad(MachineType::AnyTagged(), p0, - IsIntPtrConstant(access.offset - access.tag()), - graph()->start(), graph()->start())); -} - - -TARGET_TEST_P(ChangeLoweringCommonTest, StoreElementTagged) { - ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(), - MachineType::AnyTagged(), kFullWriteBarrier}; - Node* p0 = Parameter(Type::TaggedPointer()); - Node* p1 = Parameter(Type::Signed32()); - Node* p2 = Parameter(Type::Tagged()); - Node* store = graph()->NewNode(simplified()->StoreElement(access), p0, p1, p2, - graph()->start(), graph()->start()); - Reduction r = Reduce(store); - - const int element_size_shift = - ElementSizeLog2Of(access.machine_type.representation()); - ASSERT_TRUE(r.Changed()); - Matcher index_match = - IsInt32Add(IsWord32Shl(p1, IsInt32Constant(element_size_shift)), - IsInt32Constant(access.header_size - access.tag())); - if (!Is32()) { - index_match = IsChangeUint32ToUint64(index_match); - } - - EXPECT_THAT(r.replacement(), - IsStore(StoreRepresentation(MachineRepresentation::kTagged, - kFullWriteBarrier), - p0, index_match, p2, graph()->start(), graph()->start())); -} - - -TARGET_TEST_P(ChangeLoweringCommonTest, StoreElementUint8) { - ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, - Type::Signed32(), MachineType::Uint8(), - kNoWriteBarrier}; - Node* p0 = Parameter(Type::TaggedPointer()); - Node* p1 = Parameter(Type::Signed32()); - Node* p2 = Parameter(Type::Signed32()); - Node* store = graph()->NewNode(simplified()->StoreElement(access), p0, p1, p2, - graph()->start(), graph()->start()); - Reduction r = Reduce(store); - - ASSERT_TRUE(r.Changed()); - Matcher index_match = - IsInt32Add(p1, IsInt32Constant(access.header_size - access.tag())); - if (!Is32()) { - index_match = IsChangeUint32ToUint64(index_match); - } - - EXPECT_THAT(r.replacement(), - IsStore(StoreRepresentation(MachineRepresentation::kWord8, - kNoWriteBarrier), - p0, index_match, p2, graph()->start(), graph()->start())); -} - - -TARGET_TEST_P(ChangeLoweringCommonTest, LoadElementTagged) { - ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(), - MachineType::AnyTagged(), kNoWriteBarrier}; - Node* p0 = Parameter(Type::TaggedPointer()); - Node* p1 = Parameter(Type::Signed32()); - Node* load = graph()->NewNode(simplified()->LoadElement(access), p0, p1, - graph()->start(), graph()->start()); - Reduction r = Reduce(load); - - const int element_size_shift = - ElementSizeLog2Of(access.machine_type.representation()); - ASSERT_TRUE(r.Changed()); - Matcher index_match = - IsInt32Add(IsWord32Shl(p1, IsInt32Constant(element_size_shift)), - IsInt32Constant(access.header_size - access.tag())); - if (!Is32()) { - index_match = IsChangeUint32ToUint64(index_match); - } - - EXPECT_THAT(r.replacement(), IsLoad(MachineType::AnyTagged(), p0, index_match, - graph()->start(), graph()->start())); -} - - -TARGET_TEST_P(ChangeLoweringCommonTest, LoadElementInt8) { - ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, - Type::Signed32(), MachineType::Int8(), - kNoWriteBarrier}; - Node* p0 = Parameter(Type::TaggedPointer()); - Node* p1 = Parameter(Type::Signed32()); - Node* load = graph()->NewNode(simplified()->LoadElement(access), p0, p1, - graph()->start(), graph()->start()); - Reduction r = Reduce(load); - - ASSERT_TRUE(r.Changed()); - Matcher index_match = - IsInt32Add(p1, IsInt32Constant(access.header_size - access.tag())); - if (!Is32()) { - index_match = IsChangeUint32ToUint64(index_match); - } - - EXPECT_THAT(r.replacement(), IsLoad(MachineType::Int8(), p0, index_match, - graph()->start(), graph()->start())); -} - - -TARGET_TEST_P(ChangeLoweringCommonTest, Allocate) { - Node* p0 = Parameter(Type::Signed32()); - Node* alloc = graph()->NewNode(simplified()->Allocate(TENURED), p0, - graph()->start(), graph()->start()); - Reduction r = Reduce(alloc); - - // Only check that we lowered, but do not specify the exact form since - // this is subject to change. - ASSERT_TRUE(r.Changed()); -} - -} // namespace compiler -} // namespace internal -} // namespace v8 diff --git a/test/unittests/unittests.gyp b/test/unittests/unittests.gyp index 823edc371f..a873121c17 100644 --- a/test/unittests/unittests.gyp +++ b/test/unittests/unittests.gyp @@ -41,7 +41,6 @@ 'cancelable-tasks-unittest.cc', 'char-predicates-unittest.cc', 'compiler/branch-elimination-unittest.cc', - 'compiler/change-lowering-unittest.cc', 'compiler/coalesced-live-ranges-unittest.cc', 'compiler/common-operator-reducer-unittest.cc', 'compiler/common-operator-unittest.cc',