[turbofan] Run everything after representation selection concurrently.

Further refactor the pipeline to even run the first scheduler (part of
the effect control linearization) concurrently. This temporarily
disables most of the write barrier elimination, but we will get back to
that later.

Drive-by-fix: Remove the dead code from ChangeLowering, and stack
allocate the Typer in the pipeline. Also migrate the AllocateStub to a
native code builtin, so that we have the code object + a handle to it
available all the time.

CQ_INCLUDE_TRYBOTS=tryserver.v8:v8_linux64_tsan_rel
R=mstarzinger@chromium.org
BUG=v8:4969
LOG=n

Review-Url: https://codereview.chromium.org/1926023002
Cr-Commit-Position: refs/heads/master@{#35918}
This commit is contained in:
bmeurer 2016-04-30 12:00:33 -07:00 committed by Commit bot
parent 987bd9ccc7
commit d1b3d426ce
40 changed files with 383 additions and 305 deletions

View File

@ -2661,6 +2661,40 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : requested object size (untagged)
// -- lr : return address
// -----------------------------------
Label runtime;
__ Allocate(r1, r0, r2, r3, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ bind(&runtime);
__ SmiTag(r1);
__ Push(r1);
__ Move(cp, Smi::FromInt(0));
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
// static
void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : requested object size (untagged)
// -- lr : return address
// -----------------------------------
Label runtime;
__ Allocate(r1, r0, r2, r3, &runtime, PRETENURE);
__ Ret();
__ bind(&runtime);
__ SmiTag(r1);
__ Move(r2, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ Push(r1, r2);
__ Move(cp, Smi::FromInt(0));
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------

View File

@ -247,13 +247,6 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC
void AllocateDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state

View File

@ -19,6 +19,7 @@ const Register kReturnRegister1 = {Register::kCode_r1};
const Register kReturnRegister2 = {Register::kCode_r2};
const Register kJSFunctionRegister = {Register::kCode_r1};
const Register kContextRegister = {Register::kCode_r7};
const Register kAllocateSizeRegister = {Register::kCode_r1};
const Register kInterpreterAccumulatorRegister = {Register::kCode_r0};
const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r5};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r6};

View File

@ -2751,6 +2751,42 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_AllocateInNewSpace");
// ----------- S t a t e -------------
// -- x1 : requested object size (untagged)
// -- lr : return address
// -----------------------------------
Label runtime;
__ Allocate(x1, x0, x2, x3, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ Bind(&runtime);
__ SmiTag(x1);
__ Push(x1);
__ Move(cp, Smi::FromInt(0));
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
// static
void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_AllocateInOldSpace");
// ----------- S t a t e -------------
// -- x1 : requested object size (untagged)
// -- lr : return address
// -----------------------------------
Label runtime;
__ Allocate(x1, x0, x2, x3, &runtime, PRETENURE);
__ Ret();
__ Bind(&runtime);
__ SmiTag(x1);
__ Move(x2, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ Push(x1, x2);
__ Move(cp, Smi::FromInt(0));
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline");

View File

@ -272,13 +272,6 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC
void AllocateDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: function

View File

@ -39,6 +39,7 @@ namespace internal {
#define kReturnRegister2 x2
#define kJSFunctionRegister x1
#define kContextRegister cp
#define kAllocateSizeRegister x1
#define kInterpreterAccumulatorRegister x0
#define kInterpreterBytecodeOffsetRegister x19
#define kInterpreterBytecodeArrayRegister x20

View File

@ -175,6 +175,9 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
// Define list of builtins implemented in assembly.
#define BUILTIN_LIST_A(V) \
V(AllocateInNewSpace, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(AllocateInOldSpace, BUILTIN, UNINITIALIZED, kNoExtraICState) \
\
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
\
V(ConstructedNonConstructable, BUILTIN, UNINITIALIZED, kNoExtraICState) \
@ -443,6 +446,8 @@ class Builtins {
static void Generate_Adaptor(MacroAssembler* masm,
CFunctionId id,
BuiltinExtraArguments extra_args);
static void Generate_AllocateInNewSpace(MacroAssembler* masm);
static void Generate_AllocateInOldSpace(MacroAssembler* masm);
static void Generate_ConstructedNonConstructable(MacroAssembler* masm);
static void Generate_CompileLazy(MacroAssembler* masm);
static void Generate_CompileBaseline(MacroAssembler* masm);

View File

@ -514,12 +514,6 @@ Callable CodeFactory::AllocateMutableHeapNumber(Isolate* isolate) {
SIMD128_TYPES(SIMD128_ALLOC)
#undef SIMD128_ALLOC
// static
Callable CodeFactory::Allocate(Isolate* isolate, PretenureFlag pretenure_flag) {
AllocateStub stub(isolate, pretenure_flag);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
Callable CodeFactory::ArgumentAdaptor(Isolate* isolate) {
return Callable(isolate->builtins()->ArgumentsAdaptorTrampoline(),

View File

@ -135,7 +135,6 @@ class CodeFactory final {
static Callable Allocate##Type(Isolate* isolate);
SIMD128_TYPES(SIMD128_ALLOC)
#undef SIMD128_ALLOC
static Callable Allocate(Isolate* isolate, PretenureFlag pretenure_flag);
static Callable ArgumentAdaptor(Isolate* isolate);
static Callable Call(Isolate* isolate,

View File

@ -1289,16 +1289,6 @@ Handle<Code> TransitionElementsKindStub::GenerateCode() {
return DoGenerateCode(this);
}
template <>
HValue* CodeStubGraphBuilder<AllocateStub>::BuildCodeStub() {
HValue* result =
Add<HAllocate>(GetParameter(0), HType::Tagged(),
casted_stub()->pretenure_flag(), JS_OBJECT_TYPE);
return result;
}
Handle<Code> AllocateStub::GenerateCode() { return DoGenerateCode(this); }
HValue* CodeStubGraphBuilderBase::BuildArrayConstructor(
ElementsKind kind,
AllocationSiteOverrideMode override_mode,

View File

@ -4056,10 +4056,6 @@ void AllocateMutableHeapNumberStub::InitializeDescriptor(
SIMD128_TYPES(SIMD128_INIT_DESC)
#undef SIMD128_INIT_DESC
void AllocateStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize();
}
void ToBooleanICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(FUNCTION_ADDR(Runtime_ToBooleanIC_Miss));
descriptor->SetMissHandler(ExternalReference(

View File

@ -57,7 +57,6 @@ namespace internal {
V(VectorStoreIC) \
V(VectorKeyedStoreIC) \
/* HydrogenCodeStubs */ \
V(Allocate) \
V(ArrayNArgumentsConstructor) \
V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \
@ -2758,23 +2757,6 @@ class AllocateMutableHeapNumberStub : public TurboFanCodeStub {
SIMD128_TYPES(SIMD128_ALLOC_STUB)
#undef SIMD128_ALLOC_STUB
class AllocateStub final : public HydrogenCodeStub {
public:
AllocateStub(Isolate* isolate, PretenureFlag pretenure_flag)
: HydrogenCodeStub(isolate) {
set_sub_minor_key(PretenureFlagBits::encode(pretenure_flag));
}
PretenureFlag pretenure_flag() const {
return PretenureFlagBits::decode(sub_minor_key());
}
private:
typedef BitField<PretenureFlag, 0, 1> PretenureFlagBits;
DEFINE_CALL_INTERFACE_DESCRIPTOR(Allocate);
DEFINE_HYDROGEN_CODE_STUB(Allocate, HydrogenCodeStub);
};
class ArrayConstructorStubBase : public HydrogenCodeStub {
public:
ArrayConstructorStubBase(Isolate* isolate,

View File

@ -24,30 +24,25 @@ Reduction ChangeLowering::Reduce(Node* node) {
Node* control = graph()->start();
switch (node->opcode()) {
case IrOpcode::kChangeBitToBool:
return ChangeBitToBool(node->InputAt(0), control);
return ReduceChangeBitToBool(node->InputAt(0), control);
case IrOpcode::kChangeBoolToBit:
return ChangeBoolToBit(node->InputAt(0));
return ReduceChangeBoolToBit(node->InputAt(0));
case IrOpcode::kChangeInt31ToTagged:
return ChangeInt31ToTagged(node->InputAt(0), control);
return ReduceChangeInt31ToTagged(node->InputAt(0), control);
case IrOpcode::kChangeTaggedSignedToInt32:
return ChangeTaggedSignedToInt32(node->InputAt(0));
return ReduceChangeTaggedSignedToInt32(node->InputAt(0));
case IrOpcode::kLoadField:
return LoadField(node);
return ReduceLoadField(node);
case IrOpcode::kStoreField:
return StoreField(node);
return ReduceStoreField(node);
case IrOpcode::kLoadElement:
return LoadElement(node);
return ReduceLoadElement(node);
case IrOpcode::kStoreElement:
return StoreElement(node);
return ReduceStoreElement(node);
case IrOpcode::kAllocate:
return Allocate(node);
return ReduceAllocate(node);
case IrOpcode::kObjectIsSmi:
return ObjectIsSmi(node);
case IrOpcode::kChangeInt32ToTagged:
case IrOpcode::kChangeUint32ToTagged:
case IrOpcode::kChangeFloat64ToTagged:
FATAL("Changes should be already lowered during effect linearization.");
break;
return ReduceObjectIsSmi(node);
default:
return NoChange();
}
@ -55,19 +50,10 @@ Reduction ChangeLowering::Reduce(Node* node) {
return NoChange();
}
Node* ChangeLowering::HeapNumberValueIndexConstant() {
return jsgraph()->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag);
}
Node* ChangeLowering::SmiShiftBitsConstant() {
return jsgraph()->IntPtrConstant(kSmiShiftSize + kSmiTagSize);
}
Node* ChangeLowering::ChangeInt32ToFloat64(Node* value) {
return graph()->NewNode(machine()->ChangeInt32ToFloat64(), value);
}
Node* ChangeLowering::ChangeInt32ToSmi(Node* value) {
if (machine()->Is64()) {
value = graph()->NewNode(machine()->ChangeInt32ToInt64(), value);
@ -75,11 +61,6 @@ Node* ChangeLowering::ChangeInt32ToSmi(Node* value) {
return graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant());
}
Node* ChangeLowering::ChangeSmiToFloat64(Node* value) {
return ChangeInt32ToFloat64(ChangeSmiToWord32(value));
}
Node* ChangeLowering::ChangeSmiToWord32(Node* value) {
value = graph()->NewNode(machine()->WordSar(), value, SmiShiftBitsConstant());
if (machine()->Is64()) {
@ -93,47 +74,23 @@ Node* ChangeLowering::ChangeUint32ToFloat64(Node* value) {
return graph()->NewNode(machine()->ChangeUint32ToFloat64(), value);
}
Node* ChangeLowering::ChangeUint32ToSmi(Node* value) {
if (machine()->Is64()) {
value = graph()->NewNode(machine()->ChangeUint32ToUint64(), value);
}
return graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant());
}
Node* ChangeLowering::LoadHeapNumberValue(Node* value, Node* control) {
return graph()->NewNode(machine()->Load(MachineType::Float64()), value,
HeapNumberValueIndexConstant(), graph()->start(),
control);
}
Node* ChangeLowering::TestNotSmi(Node* value) {
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagMask == 1);
return graph()->NewNode(machine()->WordAnd(), value,
jsgraph()->IntPtrConstant(kSmiTagMask));
}
Reduction ChangeLowering::ChangeBitToBool(Node* value, Node* control) {
Reduction ChangeLowering::ReduceChangeBitToBool(Node* value, Node* control) {
return Replace(
graph()->NewNode(common()->Select(MachineRepresentation::kTagged), value,
jsgraph()->TrueConstant(), jsgraph()->FalseConstant()));
}
Reduction ChangeLowering::ChangeBoolToBit(Node* value) {
Reduction ChangeLowering::ReduceChangeBoolToBit(Node* value) {
return Replace(graph()->NewNode(machine()->WordEqual(), value,
jsgraph()->TrueConstant()));
}
Reduction ChangeLowering::ChangeInt31ToTagged(Node* value, Node* control) {
Reduction ChangeLowering::ReduceChangeInt31ToTagged(Node* value,
Node* control) {
return Replace(ChangeInt32ToSmi(value));
}
Reduction ChangeLowering::ChangeTaggedSignedToInt32(Node* value) {
Reduction ChangeLowering::ReduceChangeTaggedSignedToInt32(Node* value) {
return Replace(ChangeSmiToWord32(value));
}
@ -141,64 +98,35 @@ namespace {
WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
MachineRepresentation representation,
Type* field_type, Type* input_type) {
if (field_type->Is(Type::TaggedSigned()) ||
input_type->Is(Type::TaggedSigned())) {
// Write barriers are only for writes of heap objects.
return kNoWriteBarrier;
}
if (input_type->Is(Type::BooleanOrNullOrUndefined())) {
// Write barriers are not necessary when storing true, false, null or
// undefined, because these special oddballs are always in the root set.
return kNoWriteBarrier;
}
Node* value) {
// TODO(bmeurer): Optimize write barriers based on input.
if (base_is_tagged == kTaggedBase &&
representation == MachineRepresentation::kTagged) {
if (input_type->IsConstant() &&
input_type->AsConstant()->Value()->IsHeapObject()) {
Handle<HeapObject> input =
Handle<HeapObject>::cast(input_type->AsConstant()->Value());
if (input->IsMap()) {
// Write barriers for storing maps are cheaper.
return kMapWriteBarrier;
}
Isolate* const isolate = input->GetIsolate();
RootIndexMap root_index_map(isolate);
int root_index = root_index_map.Lookup(*input);
if (root_index != RootIndexMap::kInvalidRootIndex &&
isolate->heap()->RootIsImmortalImmovable(root_index)) {
// Write barriers are unnecessary for immortal immovable roots.
return kNoWriteBarrier;
}
}
if (field_type->Is(Type::TaggedPointer()) ||
input_type->Is(Type::TaggedPointer())) {
// Write barriers for heap objects don't need a Smi check.
if (value->opcode() == IrOpcode::kHeapConstant) {
return kPointerWriteBarrier;
} else if (value->opcode() == IrOpcode::kNumberConstant) {
double const number_value = OpParameter<double>(value);
if (IsSmiDouble(number_value)) return kNoWriteBarrier;
return kPointerWriteBarrier;
}
// Write barriers are only for writes into heap objects (i.e. tagged base).
return kFullWriteBarrier;
}
return kNoWriteBarrier;
}
WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
MachineRepresentation representation,
int field_offset, Type* field_type,
Type* input_type) {
int field_offset, Node* value) {
if (base_is_tagged == kTaggedBase && field_offset == HeapObject::kMapOffset) {
// Write barriers for storing maps are cheaper.
return kMapWriteBarrier;
}
return ComputeWriteBarrierKind(base_is_tagged, representation, field_type,
input_type);
return ComputeWriteBarrierKind(base_is_tagged, representation, value);
}
} // namespace
Reduction ChangeLowering::LoadField(Node* node) {
Reduction ChangeLowering::ReduceLoadField(Node* node) {
const FieldAccess& access = FieldAccessOf(node->op());
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
@ -206,13 +134,11 @@ Reduction ChangeLowering::LoadField(Node* node) {
return Changed(node);
}
Reduction ChangeLowering::StoreField(Node* node) {
Reduction ChangeLowering::ReduceStoreField(Node* node) {
const FieldAccess& access = FieldAccessOf(node->op());
Type* type = NodeProperties::GetType(node->InputAt(1));
WriteBarrierKind kind = ComputeWriteBarrierKind(
access.base_is_tagged, access.machine_type.representation(),
access.offset, access.type, type);
access.offset, node->InputAt(1));
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
NodeProperties::ChangeOp(node,
@ -245,73 +171,41 @@ Node* ChangeLowering::ComputeIndex(const ElementAccess& access,
return index;
}
Reduction ChangeLowering::LoadElement(Node* node) {
Reduction ChangeLowering::ReduceLoadElement(Node* node) {
const ElementAccess& access = ElementAccessOf(node->op());
node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
return Changed(node);
}
Reduction ChangeLowering::StoreElement(Node* node) {
Reduction ChangeLowering::ReduceStoreElement(Node* node) {
const ElementAccess& access = ElementAccessOf(node->op());
Type* type = NodeProperties::GetType(node->InputAt(2));
node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
NodeProperties::ChangeOp(
node, machine()->Store(StoreRepresentation(
access.machine_type.representation(),
ComputeWriteBarrierKind(access.base_is_tagged,
access.machine_type.representation(),
access.type, type))));
node->InputAt(2)))));
return Changed(node);
}
Reduction ChangeLowering::Allocate(Node* node) {
Reduction ChangeLowering::ReduceAllocate(Node* node) {
PretenureFlag pretenure = OpParameter<PretenureFlag>(node->op());
Callable callable = CodeFactory::Allocate(isolate(), pretenure);
Node* target = jsgraph()->HeapConstant(callable.code());
CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
isolate(), jsgraph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNoFlags, Operator::kNoThrow);
const Operator* op = common()->Call(descriptor);
Node* target = pretenure == NOT_TENURED
? jsgraph()->AllocateInNewSpaceStubConstant()
: jsgraph()->AllocateInOldSpaceStubConstant();
node->InsertInput(graph()->zone(), 0, target);
node->InsertInput(graph()->zone(), 2, jsgraph()->NoContextConstant());
NodeProperties::ChangeOp(node, op);
if (!allocate_operator_.is_set()) {
CallDescriptor* descriptor =
Linkage::GetAllocateCallDescriptor(graph()->zone());
allocate_operator_.set(common()->Call(descriptor));
}
NodeProperties::ChangeOp(node, allocate_operator_.get());
return Changed(node);
}
Node* ChangeLowering::IsSmi(Node* value) {
return graph()->NewNode(
machine()->WordEqual(),
graph()->NewNode(machine()->WordAnd(), value,
jsgraph()->IntPtrConstant(kSmiTagMask)),
jsgraph()->IntPtrConstant(kSmiTag));
}
Node* ChangeLowering::LoadHeapObjectMap(Node* object, Node* control) {
return graph()->NewNode(
machine()->Load(MachineType::AnyTagged()), object,
jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
graph()->start(), control);
}
Node* ChangeLowering::LoadMapBitField(Node* map) {
return graph()->NewNode(
machine()->Load(MachineType::Uint8()), map,
jsgraph()->IntPtrConstant(Map::kBitFieldOffset - kHeapObjectTag),
graph()->start(), graph()->start());
}
Node* ChangeLowering::LoadMapInstanceType(Node* map) {
return graph()->NewNode(
machine()->Load(MachineType::Uint8()), map,
jsgraph()->IntPtrConstant(Map::kInstanceTypeOffset - kHeapObjectTag),
graph()->start(), graph()->start());
}
Reduction ChangeLowering::ObjectIsSmi(Node* node) {
Reduction ChangeLowering::ReduceObjectIsSmi(Node* node) {
node->ReplaceInput(0,
graph()->NewNode(machine()->WordAnd(), node->InputAt(0),
jsgraph()->IntPtrConstant(kSmiTagMask)));

View File

@ -27,38 +27,24 @@ class ChangeLowering final : public Reducer {
Reduction Reduce(Node* node) final;
private:
Node* HeapNumberValueIndexConstant();
Node* SmiShiftBitsConstant();
Node* ChangeInt32ToFloat64(Node* value);
Node* ChangeInt32ToSmi(Node* value);
Node* ChangeSmiToFloat64(Node* value);
Node* ChangeSmiToWord32(Node* value);
Node* ChangeUint32ToFloat64(Node* value);
Node* ChangeUint32ToSmi(Node* value);
Node* LoadHeapNumberValue(Node* value, Node* control);
Node* TestNotSmi(Node* value);
Reduction ChangeBitToBool(Node* value, Node* control);
Reduction ChangeBoolToBit(Node* value);
Reduction ChangeFloat64ToTagged(Node* value, Node* control);
Reduction ChangeInt31ToTagged(Node* value, Node* control);
Reduction ChangeInt32ToTagged(Node* value, Node* control);
Reduction ChangeTaggedSignedToInt32(Node* value);
Reduction ChangeUint32ToTagged(Node* value, Node* control);
Reduction ReduceChangeBitToBool(Node* value, Node* control);
Reduction ReduceChangeBoolToBit(Node* value);
Reduction ReduceChangeInt31ToTagged(Node* value, Node* control);
Reduction ReduceChangeTaggedSignedToInt32(Node* value);
Reduction LoadField(Node* node);
Reduction StoreField(Node* node);
Reduction LoadElement(Node* node);
Reduction StoreElement(Node* node);
Reduction Allocate(Node* node);
Reduction ReduceLoadField(Node* node);
Reduction ReduceStoreField(Node* node);
Reduction ReduceLoadElement(Node* node);
Reduction ReduceStoreElement(Node* node);
Reduction ReduceAllocate(Node* node);
Node* IsSmi(Node* value);
Node* LoadHeapObjectMap(Node* object, Node* control);
Node* LoadMapBitField(Node* map);
Node* LoadMapInstanceType(Node* map);
Reduction ObjectIsSmi(Node* node);
Reduction ReduceObjectIsSmi(Node* node);
Node* ComputeIndex(const ElementAccess& access, Node* const key);
Graph* graph() const;
@ -68,7 +54,7 @@ class ChangeLowering final : public Reducer {
MachineOperatorBuilder* machine() const;
JSGraph* const jsgraph_;
SetOncePointer<const Operator> allocate_heap_number_operator_;
SetOncePointer<const Operator> allocate_operator_;
};
} // namespace compiler

View File

@ -17,7 +17,7 @@ Node** CommonNodeCache::FindExternalConstant(ExternalReference value) {
Node** CommonNodeCache::FindHeapConstant(Handle<HeapObject> value) {
return heap_constants_.Find(zone(), bit_cast<intptr_t>(value.location()));
return heap_constants_.Find(zone(), bit_cast<intptr_t>(value.address()));
}

View File

@ -839,9 +839,9 @@ EffectControlLinearizer::ValueEffectControl
EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value, Node* effect,
Node* control) {
effect = graph()->NewNode(common()->BeginRegion(), effect);
Node* result = effect =
graph()->NewNode(simplified()->Allocate(NOT_TENURED),
jsgraph()->Constant(HeapNumber::kSize), effect, control);
Node* result = effect = graph()->NewNode(
simplified()->Allocate(NOT_TENURED),
jsgraph()->Int32Constant(HeapNumber::kSize), effect, control);
effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
result, jsgraph()->HeapNumberMapConstant(), effect,
control);

View File

@ -513,6 +513,16 @@ Constant::Constant(RelocatablePtrConstantInfo info)
}
#endif
Handle<HeapObject> Constant::ToHeapObject() const {
DCHECK_EQ(kHeapObject, type());
Handle<HeapObject> value(
bit_cast<HeapObject**>(static_cast<intptr_t>(value_)));
if (value->IsConsString()) {
value = String::Flatten(Handle<String>::cast(value), TENURED);
}
return value;
}
std::ostream& operator<<(std::ostream& os, const Constant& constant) {
switch (constant.type()) {
case Constant::kInt32:

View File

@ -1000,10 +1000,7 @@ class Constant final {
return RpoNumber::FromInt(static_cast<int>(value_));
}
Handle<HeapObject> ToHeapObject() const {
DCHECK_EQ(kHeapObject, type());
return bit_cast<Handle<HeapObject> >(static_cast<intptr_t>(value_));
}
Handle<HeapObject> ToHeapObject() const;
private:
Type type_;

View File

@ -14,6 +14,15 @@ namespace compiler {
#define CACHED(name, expr) \
cached_nodes_[name] ? cached_nodes_[name] : (cached_nodes_[name] = (expr))
Node* JSGraph::AllocateInNewSpaceStubConstant() {
return CACHED(kAllocateInNewSpaceStubConstant,
HeapConstant(isolate()->builtins()->AllocateInNewSpace()));
}
Node* JSGraph::AllocateInOldSpaceStubConstant() {
return CACHED(kAllocateInOldSpaceStubConstant,
HeapConstant(isolate()->builtins()->AllocateInOldSpace()));
}
Node* JSGraph::CEntryStubConstant(int result_size) {
if (result_size == 1) {
@ -81,9 +90,6 @@ Node* JSGraph::NaNConstant() {
Node* JSGraph::HeapConstant(Handle<HeapObject> value) {
if (value->IsConsString()) {
value = String::Flatten(Handle<String>::cast(value), TENURED);
}
Node** loc = cache_.FindHeapConstant(value);
if (*loc == nullptr) {
*loc = graph()->NewNode(common()->HeapConstant(value));

View File

@ -39,6 +39,8 @@ class JSGraph : public ZoneObject {
}
// Canonicalized global constants.
Node* AllocateInNewSpaceStubConstant();
Node* AllocateInOldSpaceStubConstant();
Node* CEntryStubConstant(int result_size);
Node* EmptyFixedArrayConstant();
Node* HeapNumberMapConstant();
@ -140,6 +142,8 @@ class JSGraph : public ZoneObject {
private:
enum CachedNode {
kAllocateInNewSpaceStubConstant,
kAllocateInOldSpaceStubConstant,
kCEntryStubConstant,
kEmptyFixedArrayConstant,
kHeapNumberMapConstant,

View File

@ -404,6 +404,35 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
descriptor.DebugName(isolate));
}
// static
CallDescriptor* Linkage::GetAllocateCallDescriptor(Zone* zone) {
LocationSignature::Builder locations(zone, 1, 1);
MachineSignature::Builder types(zone, 1, 1);
locations.AddParam(regloc(kAllocateSizeRegister));
types.AddParam(MachineType::Int32());
locations.AddReturn(regloc(kReturnRegister0));
types.AddReturn(MachineType::AnyTagged());
// The target for allocate calls is a code object.
MachineType target_type = MachineType::AnyTagged();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
return new (zone) CallDescriptor( // --
CallDescriptor::kCallCodeObject, // kind
target_type, // target MachineType
target_loc, // target location
types.Build(), // machine_sig
locations.Build(), // location_sig
0, // stack_parameter_count
Operator::kNoThrow, // properties
kNoCalleeSaved, // callee-saved registers
kNoCalleeSaved, // callee-saved fp
CallDescriptor::kCanUseRoots, // flags
"Allocate");
}
// static
CallDescriptor* Linkage::GetBytecodeDispatchCallDescriptor(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count) {

View File

@ -333,6 +333,7 @@ class Linkage : public ZoneObject {
MachineType return_type = MachineType::AnyTagged(),
size_t return_count = 1);
static CallDescriptor* GetAllocateCallDescriptor(Zone* zone);
static CallDescriptor* GetBytecodeDispatchCallDescriptor(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count);

View File

@ -1314,13 +1314,11 @@ bool Pipeline::CreateGraph() {
}
// Type the graph.
base::SmartPointer<Typer> typer;
typer.Reset(new Typer(isolate(), data->graph(),
info()->is_deoptimization_enabled()
? Typer::kDeoptimizationEnabled
: Typer::kNoFlags,
info()->dependencies()));
Run<TyperPhase>(typer.get());
Typer typer(isolate(), data->graph(), info()->is_deoptimization_enabled()
? Typer::kDeoptimizationEnabled
: Typer::kNoFlags,
info()->dependencies());
Run<TyperPhase>(&typer);
RunPrintAndVerify("Typed");
BeginPhaseKind("lowering");
@ -1347,26 +1345,6 @@ bool Pipeline::CreateGraph() {
Run<EarlyOptimizationPhase>();
RunPrintAndVerify("Early optimized");
Run<EffectControlLinearizationPhase>();
RunPrintAndVerify("Effect and control linearized");
Run<BranchEliminationPhase>();
RunPrintAndVerify("Branch conditions eliminated");
// Optimize control flow.
if (FLAG_turbo_cf_optimization) {
Run<ControlFlowOptimizationPhase>();
RunPrintAndVerify("Control flow optimized");
}
// Lower changes that have been inserted before.
Run<LateOptimizationPhase>();
// TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
RunPrintAndVerify("Late optimized", true);
// Kill the Typer and thereby uninstall the decorator (if any).
typer.Reset(nullptr);
EndPhaseKind();
return true;
@ -1377,6 +1355,23 @@ bool Pipeline::OptimizeGraph(Linkage* linkage) {
BeginPhaseKind("block building");
Run<EffectControlLinearizationPhase>();
RunPrintAndVerify("Effect and control linearized", true);
Run<BranchEliminationPhase>();
RunPrintAndVerify("Branch conditions eliminated", true);
// Optimize control flow.
if (FLAG_turbo_cf_optimization) {
Run<ControlFlowOptimizationPhase>();
RunPrintAndVerify("Control flow optimized", true);
}
// Lower changes that have been inserted before.
Run<LateOptimizationPhase>();
// TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
RunPrintAndVerify("Late optimized", true);
Run<LateGraphTrimmingPhase>();
// TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
RunPrintAndVerify("Late trimmed", true);

View File

@ -1108,7 +1108,7 @@ class RepresentationSelector {
break;
}
case IrOpcode::kAllocate: {
ProcessInput(node, 0, UseInfo::AnyTagged());
ProcessInput(node, 0, UseInfo::TruncatingWord32());
ProcessRemainingInputs(node, 1);
SetOutput(node, MachineRepresentation::kTagged);
break;

View File

@ -214,6 +214,15 @@ struct SimplifiedOperatorGlobalCache final {
PURE_OP_LIST(PURE)
#undef PURE
template <PretenureFlag kPretenure>
struct AllocateOperator final : public Operator1<PretenureFlag> {
AllocateOperator()
: Operator1<PretenureFlag>(IrOpcode::kAllocate, Operator::kNoThrow,
"Allocate", 1, 1, 1, 1, 1, 0, kPretenure) {}
};
AllocateOperator<NOT_TENURED> kAllocateNotTenuredOperator;
AllocateOperator<TENURED> kAllocateTenuredOperator;
#define BUFFER_ACCESS(Type, type, TYPE, ctype, size) \
struct LoadBuffer##Type##Operator final : public Operator1<BufferAccess> { \
LoadBuffer##Type##Operator() \
@ -258,9 +267,14 @@ const Operator* SimplifiedOperatorBuilder::ReferenceEqual(Type* type) {
const Operator* SimplifiedOperatorBuilder::Allocate(PretenureFlag pretenure) {
return new (zone())
Operator1<PretenureFlag>(IrOpcode::kAllocate, Operator::kNoThrow,
"Allocate", 1, 1, 1, 1, 1, 0, pretenure);
switch (pretenure) {
case NOT_TENURED:
return &cache_.kAllocateNotTenuredOperator;
case TENURED:
return &cache_.kAllocateTenuredOperator;
}
UNREACHABLE();
return nullptr;
}

View File

@ -43,6 +43,10 @@ class HandleBase {
V8_INLINE bool is_null() const { return location_ == nullptr; }
// Returns the raw address where this handle is stored. This should only be
// used for hashing handles; do not ever try to dereference it.
V8_INLINE Address address() const { return bit_cast<Address>(location_); }
protected:
// Provides the C++ dereference operator.
V8_INLINE Object* operator*() const {
@ -132,14 +136,14 @@ class Handle final : public HandleBase {
// Provide function object for location equality comparison.
struct equal_to : public std::binary_function<Handle<T>, Handle<T>, bool> {
V8_INLINE bool operator()(Handle<T> lhs, Handle<T> rhs) const {
return lhs.location() == rhs.location();
return lhs.address() == rhs.address();
}
};
// Provide function object for location hashing.
struct hash : public std::unary_function<Handle<T>, size_t> {
V8_INLINE size_t operator()(Handle<T> const& handle) const {
return base::hash<void*>()(handle.location());
return base::hash<void*>()(handle.address());
}
};

View File

@ -2618,6 +2618,44 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- edx : requested object size (untagged)
// -- esp[0] : return address
// -----------------------------------
Label runtime;
__ Allocate(edx, eax, ecx, edi, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ bind(&runtime);
__ SmiTag(edx);
__ PopReturnAddressTo(ecx);
__ Push(edx);
__ PushReturnAddressFrom(ecx);
__ Move(esi, Smi::FromInt(0));
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
// static
void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- edx : requested object size (untagged)
// -- esp[0] : return address
// -----------------------------------
Label runtime;
__ Allocate(edx, eax, ecx, edi, &runtime, PRETENURE);
__ Ret();
__ bind(&runtime);
__ SmiTag(edx);
__ PopReturnAddressTo(ecx);
__ Push(edx);
__ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ PushReturnAddressFrom(ecx);
__ Move(esi, Smi::FromInt(0));
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------

View File

@ -251,13 +251,6 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC
void AllocateDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {eax};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state

View File

@ -19,6 +19,7 @@ const Register kReturnRegister1 = {Register::kCode_edx};
const Register kReturnRegister2 = {Register::kCode_edi};
const Register kJSFunctionRegister = {Register::kCode_edi};
const Register kContextRegister = {Register::kCode_esi};
const Register kAllocateSizeRegister = {Register::kCode_edx};
const Register kInterpreterAccumulatorRegister = {Register::kCode_eax};
const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_ecx};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_edi};

View File

@ -58,7 +58,6 @@ class PlatformInterfaceDescriptor;
V(AllocateInt8x16) \
V(AllocateUint8x16) \
V(AllocateBool8x16) \
V(Allocate) \
V(ArrayConstructorConstantArgCount) \
V(ArrayConstructor) \
V(InternalArrayConstructorConstantArgCount) \
@ -578,11 +577,6 @@ class AllocateMutableHeapNumberDescriptor : public CallInterfaceDescriptor {
CallInterfaceDescriptor)
};
class AllocateDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(AllocateDescriptor, CallInterfaceDescriptor)
};
class ArrayConstructorConstantArgCountDescriptor
: public CallInterfaceDescriptor {

View File

@ -2736,6 +2736,40 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : requested object size (untagged)
// -- ra : return address
// -----------------------------------
Label runtime;
__ Allocate(a0, v0, a1, a2, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ bind(&runtime);
__ SmiTag(a0);
__ Push(a0);
__ Move(cp, Smi::FromInt(0));
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
// static
void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : requested object size (untagged)
// -- ra : return address
// -----------------------------------
Label runtime;
__ Allocate(a0, v0, a1, a2, &runtime, PRETENURE);
__ Ret();
__ bind(&runtime);
__ SmiTag(a0);
__ Move(a1, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ Push(a0, a1);
__ Move(cp, Smi::FromInt(0));
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// State setup as expected by MacroAssembler::InvokePrologue.

View File

@ -246,13 +246,6 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC
void AllocateDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state

View File

@ -18,6 +18,7 @@ const Register kReturnRegister1 = {Register::kCode_v1};
const Register kReturnRegister2 = {Register::kCode_a0};
const Register kJSFunctionRegister = {Register::kCode_a1};
const Register kContextRegister = {Register::kCpRegister};
const Register kAllocateSizeRegister = {Register::kCode_a0};
const Register kInterpreterAccumulatorRegister = {Register::kCode_v0};
const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_t4};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_t5};

View File

@ -2724,6 +2724,40 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : requested object size (untagged)
// -- ra : return address
// -----------------------------------
Label runtime;
__ Allocate(a0, v0, a1, a2, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ bind(&runtime);
__ SmiTag(a0);
__ Push(a0);
__ Move(cp, Smi::FromInt(0));
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
// static
void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : requested object size (untagged)
// -- ra : return address
// -----------------------------------
Label runtime;
__ Allocate(a0, v0, a1, a2, &runtime, PRETENURE);
__ Ret();
__ bind(&runtime);
__ SmiTag(a0);
__ Move(a1, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ Push(a0, a1);
__ Move(cp, Smi::FromInt(0));
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// State setup as expected by MacroAssembler::InvokePrologue.

View File

@ -246,13 +246,6 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC
void AllocateDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state

View File

@ -18,6 +18,7 @@ const Register kReturnRegister1 = {Register::kCode_v1};
const Register kReturnRegister2 = {Register::kCode_a0};
const Register kJSFunctionRegister = {Register::kCode_a1};
const Register kContextRegister = {Register::kCpRegister};
const Register kAllocateSizeRegister = {Register::kCode_a0};
const Register kInterpreterAccumulatorRegister = {Register::kCode_v0};
const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_t0};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_t1};

View File

@ -2042,6 +2042,44 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
__ PushReturnAddressFrom(rcx);
}
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rdx : requested object size (untagged)
// -- rsp[0] : return address
// -----------------------------------
Label runtime;
__ Allocate(rdx, rax, rcx, rdi, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ bind(&runtime);
__ Integer32ToSmi(rdx, rdx);
__ PopReturnAddressTo(rcx);
__ Push(rdx);
__ PushReturnAddressFrom(rcx);
__ Move(rsi, Smi::FromInt(0));
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
// static
void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rdx : requested object size (untagged)
// -- rsp[0] : return address
// -----------------------------------
Label runtime;
__ Allocate(rdx, rax, rcx, rdi, &runtime, PRETENURE);
__ Ret();
__ bind(&runtime);
__ Integer32ToSmi(rdx, rdx);
__ PopReturnAddressTo(rcx);
__ Push(rdx);
__ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ PushReturnAddressFrom(rcx);
__ Move(rsi, Smi::FromInt(0));
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------

View File

@ -242,13 +242,6 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC
void AllocateDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rax};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state

View File

@ -21,6 +21,7 @@ const Register kReturnRegister1 = {Register::kCode_rdx};
const Register kReturnRegister2 = {Register::kCode_r8};
const Register kJSFunctionRegister = {Register::kCode_rdi};
const Register kContextRegister = {Register::kCode_rsi};
const Register kAllocateSizeRegister = {Register::kCode_rdx};
const Register kInterpreterAccumulatorRegister = {Register::kCode_rax};
const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r12};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r14};

View File

@ -1370,7 +1370,7 @@ TEST(LowerStoreField_to_store) {
CHECK_EQ(IrOpcode::kStore, store->opcode());
CHECK_EQ(t.p1, store->InputAt(2));
StoreRepresentation rep = StoreRepresentationOf(store->op());
CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
}
}
@ -1435,7 +1435,7 @@ TEST(LowerStoreElement_to_store) {
CHECK_EQ(IrOpcode::kStore, store->opcode());
CHECK_EQ(t.p2, store->InputAt(2));
StoreRepresentation rep = StoreRepresentationOf(store->op());
CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
}
}