v8/src/compiler/effect-control-linearizer.h

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

191 lines
7.8 KiB
C
Raw Normal View History

// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_EFFECT_CONTROL_LINEARIZER_H_
#define V8_COMPILER_EFFECT_CONTROL_LINEARIZER_H_
#include "src/compiler/common-operator.h"
#include "src/compiler/graph-assembler.h"
#include "src/compiler/node.h"
#include "src/compiler/simplified-operator.h"
#include "src/globals.h"
namespace v8 {
namespace internal {
// Forward declarations.
class Callable;
class Zone;
namespace compiler {
class CommonOperatorBuilder;
class SimplifiedOperatorBuilder;
class MachineOperatorBuilder;
class JSGraph;
class Graph;
class Schedule;
class SourcePositionTable;
class V8_EXPORT_PRIVATE EffectControlLinearizer {
public:
EffectControlLinearizer(JSGraph* graph, Schedule* schedule, Zone* temp_zone,
SourcePositionTable* source_positions);
void Run();
private:
void ProcessNode(Node* node, Node** frame_state, Node** effect,
Node** control);
bool TryWireInStateEffect(Node* node, Node* frame_state, Node** effect,
Node** control);
Node* LowerChangeBitToTagged(Node* node);
Node* LowerChangeInt31ToTaggedSigned(Node* node);
Node* LowerChangeInt32ToTagged(Node* node);
Node* LowerChangeUint32ToTagged(Node* node);
Node* LowerChangeFloat64ToTagged(Node* node);
Node* LowerChangeFloat64ToTaggedPointer(Node* node);
Node* LowerChangeTaggedSignedToInt32(Node* node);
Node* LowerChangeTaggedToBit(Node* node);
Node* LowerChangeTaggedToInt32(Node* node);
Node* LowerChangeTaggedToUint32(Node* node);
Node* LowerChangeTaggedToTaggedSigned(Node* node);
Node* LowerCheckBounds(Node* node, Node* frame_state);
Node* LowerCheckInternalizedString(Node* node, Node* frame_state);
Node* LowerCheckMaps(Node* node, Node* frame_state);
Node* LowerCompareMaps(Node* node);
Node* LowerCheckNumber(Node* node, Node* frame_state);
Node* LowerCheckReceiver(Node* node, Node* frame_state);
Node* LowerCheckString(Node* node, Node* frame_state);
Node* LowerCheckSeqString(Node* node, Node* frame_state);
Node* LowerCheckSymbol(Node* node, Node* frame_state);
Node* LowerCheckIf(Node* node, Node* frame_state);
Node* LowerCheckedInt32Add(Node* node, Node* frame_state);
Node* LowerCheckedInt32Sub(Node* node, Node* frame_state);
Node* LowerCheckedInt32Div(Node* node, Node* frame_state);
Node* LowerCheckedInt32Mod(Node* node, Node* frame_state);
Node* LowerCheckedUint32Div(Node* node, Node* frame_state);
Node* LowerCheckedUint32Mod(Node* node, Node* frame_state);
Node* LowerCheckedInt32Mul(Node* node, Node* frame_state);
Node* LowerCheckedInt32ToTaggedSigned(Node* node, Node* frame_state);
Node* LowerCheckedUint32ToInt32(Node* node, Node* frame_state);
Node* LowerCheckedUint32ToTaggedSigned(Node* node, Node* frame_state);
Node* LowerCheckedFloat64ToInt32(Node* node, Node* frame_state);
Node* LowerCheckedTaggedSignedToInt32(Node* node, Node* frame_state);
Node* LowerCheckedTaggedToInt32(Node* node, Node* frame_state);
Node* LowerCheckedTaggedToFloat64(Node* node, Node* frame_state);
Node* LowerCheckedTaggedToTaggedSigned(Node* node, Node* frame_state);
Node* LowerCheckedTaggedToTaggedPointer(Node* node, Node* frame_state);
Node* LowerChangeTaggedToFloat64(Node* node);
Node* LowerTruncateTaggedToBit(Node* node);
Node* LowerTruncateTaggedPointerToBit(Node* node);
Node* LowerTruncateTaggedToFloat64(Node* node);
Node* LowerTruncateTaggedToWord32(Node* node);
Node* LowerCheckedTruncateTaggedToWord32(Node* node, Node* frame_state);
Node* LowerObjectIsArrayBufferView(Node* node);
Node* LowerObjectIsCallable(Node* node);
Node* LowerObjectIsConstructor(Node* node);
Node* LowerObjectIsDetectableCallable(Node* node);
[es2015] Optimize Object.is baseline and interesting cases. The Object.is builtin provides an entry point to the abstract operation SameValue, which properly distinguishes -0 and 0, and also identifies NaNs. Most of the time you don't need these, but rather just regular strict equality, but when you do, Object.is(o, -0) is the most readable way to check for minus zero. This is for example used in Node.js by formatNumber to properly print -0 for negative zero. However since the builtin thus far implemented as C++ builtin and TurboFan didn't know anything about it, Node.js considering to go with a more performant, less readable version (which also makes assumptions about the input value) in https://github.com/nodejs/node/pull/15726 until the performance of Object.is will be on par (so hopefully we can go back to Object.is in Node 9). This CL ports the baseline implementation of Object.is to CSA, which is pretty straight-forward since SameValue is already available in CodeStubAssembler, and inlines a few interesting cases into TurboFan, i.e. comparing same SSA node, and checking for -0 and NaN explicitly. On the micro-benchmarks we go from testNumberIsMinusZero: 1000 ms. testObjectIsMinusZero: 929 ms. testObjectIsNaN: 954 ms. testObjectIsSame: 793 ms. testStrictEqualSame: 104 ms. to testNumberIsMinusZero: 89 ms. testObjectIsMinusZero: 88 ms. testObjectIsNaN: 88 ms. testObjectIsSame: 86 ms. testStrictEqualSame: 105 ms. which is a nice 10x to 11x improvement and brings Object.is on par with strict equality for most cases. Drive-by-fix: Also refactor and optimize the SameValue check in the CodeStubAssembler to avoid code bloat (by not inlining StrictEqual into every user of SameValue, and also avoiding useless checks). Bug: v8:6882 Change-Id: Ibffd8c36511f219fcce0d89ed4e1073f5d6c6344 Reviewed-on: https://chromium-review.googlesource.com/700254 Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Cr-Commit-Position: refs/heads/master@{#48275}
2017-10-04 06:25:26 +00:00
Node* LowerObjectIsMinusZero(Node* node);
Node* LowerObjectIsNaN(Node* node);
Node* LowerObjectIsNonCallable(Node* node);
Node* LowerObjectIsNumber(Node* node);
Node* LowerObjectIsReceiver(Node* node);
Node* LowerObjectIsSmi(Node* node);
Node* LowerObjectIsString(Node* node);
Node* LowerObjectIsSymbol(Node* node);
Node* LowerObjectIsUndetectable(Node* node);
[turbofan] escape analysis supports arguments object and rest elements The new NewUnmappedArgumentsElements node now takes two inputs: - the frame holding the arguments (current frame or arguments adaptor frame) - the length of the suffix of passed arguments to be copied into the backing store These inputs are computed with two new node types: ArgumentsFrame() ArgumentsLength[formal_parameter_count,is_rest_length](Node* arguments_frame) The node type NewRestParameterElements can now be expressed with NewUnmappedArgumentsElements and an appropriate length and is thus not needed anymore. In escape analysis, we lower loads from the length field of NewUnmappedArgumentsElements with its length input and if we find out that no write access to the arguments elements exists, we replace element loads with direct stack access and replace the NewUnmappedArgumentsElements node with a node of the new node type ArgumentsElementsState. This corresponds to an ObjectState node and gets translated into a deoptimizer instruction to allocate the backing store. Together with the already existing deoptimizer support for the actual arguments object/rest parameters, this allows to remove all allocations for arguments objects/rest parameters in this case. In the deoptimizer, we read the actual parameters from the stack while transforming the static deopt info into TranslatedValue objects. If escape analysis cannot remove the backing store allocation, NewUnmappedArgumentsElements gets lo BUG=v8:5726 Review-Url: https://codereview.chromium.org/2692753004 Cr-Commit-Position: refs/heads/master@{#43475}
2017-02-28 11:16:27 +00:00
Node* LowerArgumentsFrame(Node* node);
Node* LowerArgumentsLength(Node* node);
Node* LowerNewDoubleElements(Node* node);
Node* LowerNewSmiOrObjectElements(Node* node);
Node* LowerNewArgumentsElements(Node* node);
Node* LowerArrayBufferWasNeutered(Node* node);
Node* LowerStringCharAt(Node* node);
Node* LowerStringCharCodeAt(Node* node);
Node* LowerSeqStringCharCodeAt(Node* node);
Node* LowerStringToLowerCaseIntl(Node* node);
Node* LowerStringToUpperCaseIntl(Node* node);
Node* LowerStringFromCharCode(Node* node);
Node* LowerStringFromCodePoint(Node* node);
Node* LowerStringIndexOf(Node* node);
Node* LowerStringEqual(Node* node);
Node* LowerStringLessThan(Node* node);
Node* LowerStringLessThanOrEqual(Node* node);
Node* LowerCheckFloat64Hole(Node* node, Node* frame_state);
Node* LowerCheckNotTaggedHole(Node* node, Node* frame_state);
Node* LowerConvertTaggedHoleToUndefined(Node* node);
Node* LowerPlainPrimitiveToNumber(Node* node);
Node* LowerPlainPrimitiveToWord32(Node* node);
Node* LowerPlainPrimitiveToFloat64(Node* node);
Node* LowerEnsureWritableFastElements(Node* node);
Node* LowerMaybeGrowFastElements(Node* node, Node* frame_state);
void LowerTransitionElementsKind(Node* node);
[turbofan] Optimize fast enum cache driven for..in. This CL adds support to optimize for..in in fast enum-cache mode to the same degree that it was optimized in Crankshaft, without adding the same deoptimization loop that Crankshaft had with missing enum cache indices. That means code like for (var k in o) { var v = o[k]; // ... } and code like for (var k in o) { if (Object.prototype.hasOwnProperty.call(o, k)) { var v = o[k]; // ... } } which follows the https://eslint.org/docs/rules/guard-for-in linter rule, can now utilize the enum cache indices if o has only fast properties on the receiver, which speeds up the access o[k] significantly and reduces the pollution of the global megamorphic stub cache. For example the micro-benchmark in the tracking bug v8:6702 now runs faster than ever before: forIn: 1516 ms. forInHasOwnProperty: 1674 ms. forInHasOwnPropertySafe: 1595 ms. forInSum: 2051 ms. forInSumSafe: 2215 ms. Compared to numbers from V8 5.8 which is the last version running with Crankshaft forIn: 1641 ms. forInHasOwnProperty: 1719 ms. forInHasOwnPropertySafe: 1802 ms. forInSum: 2226 ms. forInSumSafe: 2409 ms. and V8 6.0 which is the current stable version with TurboFan: forIn: 1713 ms. forInHasOwnProperty: 5417 ms. forInHasOwnPropertySafe: 5324 ms. forInSum: 7556 ms. forInSumSafe: 11067 ms. It also improves the throughput on the string-fasta benchmark by around 7-10%, and there seems to be a ~5% improvement on the Speedometer/React benchmark locally. For this to work, the ForInPrepare bytecode was split into ForInEnumerate and ForInPrepare, which is very similar to how it was handled in Fullcodegen initially. In TurboFan we introduce a new operator LoadFieldByIndex that does the dynamic property load. This also removes the CheckMapValue operator again in favor of just using LoadField, ReferenceEqual and CheckIf, which work automatically with the EscapeAnalysis and the BranchConditionElimination. Bug: v8:6702 Change-Id: I91235413eea478ba77ace7bd14bb2f62e155dd9a Reviewed-on: https://chromium-review.googlesource.com/645949 Commit-Queue: Benedikt Meurer <bmeurer@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#47768}
2017-09-01 10:49:06 +00:00
Node* LowerLoadFieldByIndex(Node* node);
Node* LowerLoadTypedElement(Node* node);
void LowerStoreTypedElement(Node* node);
void LowerStoreSignedSmallElement(Node* node);
Node* LowerFindOrderedHashMapEntry(Node* node);
Node* LowerFindOrderedHashMapEntryForInt32Key(Node* node);
void LowerTransitionAndStoreElement(Node* node);
void LowerRuntimeAbort(Node* node);
// Lowering of optional operators.
Maybe<Node*> LowerFloat64RoundUp(Node* node);
Maybe<Node*> LowerFloat64RoundDown(Node* node);
Maybe<Node*> LowerFloat64RoundTiesEven(Node* node);
Maybe<Node*> LowerFloat64RoundTruncate(Node* node);
Node* AllocateHeapNumberWithValue(Node* node);
Node* BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode, Node* value,
Node* frame_state);
Node* BuildCheckedHeapNumberOrOddballToFloat64(CheckTaggedInputMode mode,
Node* value,
Node* frame_state);
Node* BuildFloat64RoundDown(Node* value);
Node* ComputeIntegerHash(Node* value);
Node* LowerStringComparison(Callable const& callable, Node* node);
Node* IsElementsKindGreaterThan(Node* kind, ElementsKind reference_kind);
Node* ChangeInt32ToSmi(Node* value);
Node* ChangeIntPtrToInt32(Node* value);
Node* ChangeUint32ToUintPtr(Node* value);
Node* ChangeUint32ToSmi(Node* value);
Node* ChangeSmiToIntPtr(Node* value);
Node* ChangeSmiToInt32(Node* value);
Node* ObjectIsSmi(Node* value);
Node* SmiMaxValueConstant();
Node* SmiShiftBitsConstant();
void TransitionElementsTo(Node* node, Node* array, ElementsKind from,
ElementsKind to);
Factory* factory() const;
Isolate* isolate() const;
JSGraph* jsgraph() const { return js_graph_; }
Graph* graph() const;
Schedule* schedule() const { return schedule_; }
Zone* temp_zone() const { return temp_zone_; }
CommonOperatorBuilder* common() const;
SimplifiedOperatorBuilder* simplified() const;
MachineOperatorBuilder* machine() const;
GraphAssembler* gasm() { return &graph_assembler_; }
JSGraph* js_graph_;
Schedule* schedule_;
Zone* temp_zone_;
RegionObservability region_observability_ = RegionObservability::kObservable;
SourcePositionTable* source_positions_;
GraphAssembler graph_assembler_;
Node* frame_state_zapper_; // For tracking down compiler::Node::New crashes.
};
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_EFFECT_CONTROL_LINEARIZER_H_