2017-03-16 11:32:01 +00:00
|
|
|
// Copyright 2017 the V8 project authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file.
|
|
|
|
|
2017-11-30 15:27:59 +00:00
|
|
|
#include "src/api.h"
|
2017-03-16 11:32:01 +00:00
|
|
|
#include "src/builtins/builtins-utils-gen.h"
|
|
|
|
#include "src/builtins/builtins.h"
|
|
|
|
#include "src/code-stub-assembler.h"
|
2017-09-13 10:56:20 +00:00
|
|
|
#include "src/heap/heap-inl.h"
|
2018-03-21 09:51:18 +00:00
|
|
|
#include "src/ic/accessor-assembler.h"
|
2018-08-09 08:17:01 +00:00
|
|
|
#include "src/ic/keyed-store-generic.h"
|
2017-03-16 11:32:01 +00:00
|
|
|
#include "src/macro-assembler.h"
|
2018-03-08 14:07:39 +00:00
|
|
|
#include "src/objects/debug-objects.h"
|
2017-09-13 10:56:20 +00:00
|
|
|
#include "src/objects/shared-function-info.h"
|
2017-03-16 11:32:01 +00:00
|
|
|
#include "src/runtime/runtime.h"
|
|
|
|
|
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
|
|
|
|
2017-11-30 15:27:59 +00:00
|
|
|
template <typename T>
|
|
|
|
using TNode = compiler::TNode<T>;
|
|
|
|
|
2017-03-16 11:32:01 +00:00
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
// Interrupt and stack checks.
|
|
|
|
|
|
|
|
void Builtins::Generate_InterruptCheck(MacroAssembler* masm) {
|
|
|
|
masm->TailCallRuntime(Runtime::kInterrupt);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Builtins::Generate_StackCheck(MacroAssembler* masm) {
|
|
|
|
masm->TailCallRuntime(Runtime::kStackGuard);
|
|
|
|
}
|
|
|
|
|
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
// TurboFan support builtins.
|
|
|
|
|
|
|
|
TF_BUILTIN(CopyFastSmiOrObjectElements, CodeStubAssembler) {
|
|
|
|
Node* object = Parameter(Descriptor::kObject);
|
|
|
|
|
|
|
|
// Load the {object}s elements.
|
2017-10-17 06:54:48 +00:00
|
|
|
Node* source = LoadObjectField(object, JSObject::kElementsOffset);
|
2017-10-19 14:17:56 +00:00
|
|
|
Node* target = CloneFixedArray(source, ExtractFixedArrayFlag::kFixedArrays);
|
2017-10-17 06:54:48 +00:00
|
|
|
StoreObjectField(object, JSObject::kElementsOffset, target);
|
|
|
|
Return(target);
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TF_BUILTIN(GrowFastDoubleElements, CodeStubAssembler) {
|
|
|
|
Node* object = Parameter(Descriptor::kObject);
|
|
|
|
Node* key = Parameter(Descriptor::kKey);
|
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
|
|
|
|
Label runtime(this, Label::kDeferred);
|
|
|
|
Node* elements = LoadElements(object);
|
2017-06-30 11:26:14 +00:00
|
|
|
elements = TryGrowElementsCapacity(object, elements, PACKED_DOUBLE_ELEMENTS,
|
2017-03-16 11:32:01 +00:00
|
|
|
key, &runtime);
|
|
|
|
Return(elements);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&runtime);
|
2017-03-16 11:32:01 +00:00
|
|
|
TailCallRuntime(Runtime::kGrowArrayElements, context, object, key);
|
|
|
|
}
|
|
|
|
|
|
|
|
TF_BUILTIN(GrowFastSmiOrObjectElements, CodeStubAssembler) {
|
|
|
|
Node* object = Parameter(Descriptor::kObject);
|
|
|
|
Node* key = Parameter(Descriptor::kKey);
|
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
|
|
|
|
Label runtime(this, Label::kDeferred);
|
|
|
|
Node* elements = LoadElements(object);
|
|
|
|
elements =
|
2017-06-30 11:26:14 +00:00
|
|
|
TryGrowElementsCapacity(object, elements, PACKED_ELEMENTS, key, &runtime);
|
2017-03-16 11:32:01 +00:00
|
|
|
Return(elements);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&runtime);
|
2017-03-16 11:32:01 +00:00
|
|
|
TailCallRuntime(Runtime::kGrowArrayElements, context, object, key);
|
|
|
|
}
|
|
|
|
|
2017-09-01 08:54:08 +00:00
|
|
|
TF_BUILTIN(NewArgumentsElements, CodeStubAssembler) {
|
2017-03-16 11:32:01 +00:00
|
|
|
Node* frame = Parameter(Descriptor::kFrame);
|
2018-03-27 13:11:54 +00:00
|
|
|
TNode<IntPtrT> length = SmiToIntPtr(Parameter(Descriptor::kLength));
|
|
|
|
TNode<IntPtrT> mapped_count =
|
|
|
|
SmiToIntPtr(Parameter(Descriptor::kMappedCount));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
// Check if we can allocate in new space.
|
2017-06-30 11:26:14 +00:00
|
|
|
ElementsKind kind = PACKED_ELEMENTS;
|
2017-03-16 11:32:01 +00:00
|
|
|
int max_elements = FixedArray::GetMaxLengthForNewSpaceAllocation(kind);
|
|
|
|
Label if_newspace(this), if_oldspace(this, Label::kDeferred);
|
|
|
|
Branch(IntPtrLessThan(length, IntPtrConstant(max_elements)), &if_newspace,
|
|
|
|
&if_oldspace);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_newspace);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
// Prefer EmptyFixedArray in case of non-positive {length} (the {length}
|
|
|
|
// can be negative here for rest parameters).
|
|
|
|
Label if_empty(this), if_notempty(this);
|
|
|
|
Branch(IntPtrLessThanOrEqual(length, IntPtrConstant(0)), &if_empty,
|
|
|
|
&if_notempty);
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_empty);
|
2017-03-16 11:32:01 +00:00
|
|
|
Return(EmptyFixedArrayConstant());
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_notempty);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
// Allocate a FixedArray in new space.
|
2018-08-13 15:08:55 +00:00
|
|
|
TNode<FixedArray> result = CAST(AllocateFixedArray(kind, length));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2017-09-01 08:54:08 +00:00
|
|
|
// The elements might be used to back mapped arguments. In that case fill
|
|
|
|
// the mapped elements (i.e. the first {mapped_count}) with the hole, but
|
|
|
|
// make sure not to overshoot the {length} if some arguments are missing.
|
2018-03-27 13:11:54 +00:00
|
|
|
TNode<IntPtrT> number_of_holes = IntPtrMin(mapped_count, length);
|
2017-09-01 08:54:08 +00:00
|
|
|
Node* the_hole = TheHoleConstant();
|
|
|
|
|
|
|
|
// Fill the first elements up to {number_of_holes} with the hole.
|
2018-08-07 13:26:55 +00:00
|
|
|
TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
|
2017-09-01 08:54:08 +00:00
|
|
|
Label loop1(this, &var_index), done_loop1(this);
|
|
|
|
Goto(&loop1);
|
|
|
|
BIND(&loop1);
|
|
|
|
{
|
|
|
|
// Load the current {index}.
|
2018-08-07 13:26:55 +00:00
|
|
|
TNode<IntPtrT> index = var_index.value();
|
2017-09-01 08:54:08 +00:00
|
|
|
|
|
|
|
// Check if we are done.
|
|
|
|
GotoIf(WordEqual(index, number_of_holes), &done_loop1);
|
|
|
|
|
|
|
|
// Store the hole into the {result}.
|
|
|
|
StoreFixedArrayElement(result, index, the_hole, SKIP_WRITE_BARRIER);
|
|
|
|
|
|
|
|
// Continue with next {index}.
|
2018-08-07 13:26:55 +00:00
|
|
|
var_index = IntPtrAdd(index, IntPtrConstant(1));
|
2017-09-01 08:54:08 +00:00
|
|
|
Goto(&loop1);
|
|
|
|
}
|
|
|
|
BIND(&done_loop1);
|
|
|
|
|
2017-03-16 11:32:01 +00:00
|
|
|
// Compute the effective {offset} into the {frame}.
|
2018-08-07 13:26:55 +00:00
|
|
|
TNode<IntPtrT> offset = IntPtrAdd(length, IntPtrConstant(1));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
// Copy the parameters from {frame} (starting at {offset}) to {result}.
|
2017-09-01 08:54:08 +00:00
|
|
|
Label loop2(this, &var_index), done_loop2(this);
|
|
|
|
Goto(&loop2);
|
|
|
|
BIND(&loop2);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
// Load the current {index}.
|
2018-08-07 13:26:55 +00:00
|
|
|
TNode<IntPtrT> index = var_index.value();
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
// Check if we are done.
|
2017-09-01 08:54:08 +00:00
|
|
|
GotoIf(WordEqual(index, length), &done_loop2);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
// Load the parameter at the given {index}.
|
2018-08-07 13:26:55 +00:00
|
|
|
TNode<Object> value =
|
|
|
|
CAST(Load(MachineType::AnyTagged(), frame,
|
|
|
|
TimesPointerSize(IntPtrSub(offset, index))));
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
// Store the {value} into the {result}.
|
|
|
|
StoreFixedArrayElement(result, index, value, SKIP_WRITE_BARRIER);
|
|
|
|
|
|
|
|
// Continue with next {index}.
|
2018-08-07 13:26:55 +00:00
|
|
|
var_index = IntPtrAdd(index, IntPtrConstant(1));
|
2017-09-01 08:54:08 +00:00
|
|
|
Goto(&loop2);
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
2017-09-01 08:54:08 +00:00
|
|
|
BIND(&done_loop2);
|
2017-03-16 11:32:01 +00:00
|
|
|
|
|
|
|
Return(result);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:46:06 +00:00
|
|
|
BIND(&if_oldspace);
|
2017-03-16 11:32:01 +00:00
|
|
|
{
|
|
|
|
// Allocate in old space (or large object space).
|
|
|
|
TailCallRuntime(Runtime::kNewArgumentsElements, NoContextConstant(),
|
2018-02-23 13:46:00 +00:00
|
|
|
BitcastWordToTagged(frame), SmiFromIntPtr(length),
|
|
|
|
SmiFromIntPtr(mapped_count));
|
2017-03-16 11:32:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-20 10:55:37 +00:00
|
|
|
TF_BUILTIN(ReturnReceiver, CodeStubAssembler) {
|
|
|
|
Return(Parameter(Descriptor::kReceiver));
|
|
|
|
}
|
2017-03-16 11:32:01 +00:00
|
|
|
|
2018-03-08 14:07:39 +00:00
|
|
|
TF_BUILTIN(DebugBreakTrampoline, CodeStubAssembler) {
|
|
|
|
Label tailcall_to_shared(this);
|
2018-06-18 16:35:56 +00:00
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
|
2018-03-08 14:07:39 +00:00
|
|
|
TNode<Int32T> arg_count =
|
2018-06-18 16:35:56 +00:00
|
|
|
UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
|
2018-06-22 13:32:53 +00:00
|
|
|
TNode<JSFunction> function = CAST(Parameter(Descriptor::kJSTarget));
|
2018-03-08 14:07:39 +00:00
|
|
|
|
|
|
|
// Check break-at-entry flag on the debug info.
|
|
|
|
TNode<SharedFunctionInfo> shared =
|
|
|
|
CAST(LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset));
|
[sfi] Remove SFI function identifier field
Remove the function identifier field from SharedFunctionInfo. This field
would store one of a) the function's inferred name, b) the "builtin
function id", or c) debug info. We remove these in turn:
a) The function's inferred name is available on the ScopeInfo, so like
the start/end position we read it off either the ScopeInfo (for
compiled functions) or the UncompiledData (for uncompiled functions).
As a side-effect, now both UncompiledData and its subclass,
UncompiledDataWithPreparsedScope, contain a pointer field. To keep
BodyDescriptors manageable, we introduce a SubclassBodyDescriptor
which effectively appends two BodyDescriptors together.
b) The builtin function id is < 255, so we can steal a byte from
expected no. of properies (also <255) and store these together.
Eventually we want to get rid of this field and use the builtin ID,
but this is pending JS builtin removal.
As a side-effect, BuiltinFunctionId becomes an enum class (for better
storage size guarantees).
c) The debug info can hang off anything (since it stores the field it
replaces), so we can attach it to the script field instead.
This saves a word on compiled function (uncompiled functions
unfortunately still have to store it in UncompiledData).
Bug: chromium:818642
Change-Id: I8b4b3a070f0fe328aafcaeac58842d144d12d996
Reviewed-on: https://chromium-review.googlesource.com/1138328
Reviewed-by: Yang Guo <yangguo@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Camillo Bruni <cbruni@chromium.org>
Reviewed-by: Ross McIlroy <rmcilroy@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#54543}
2018-07-18 15:11:31 +00:00
|
|
|
TNode<Object> maybe_heap_object_or_smi =
|
|
|
|
LoadObjectField(shared, SharedFunctionInfo::kScriptOrDebugInfoOffset);
|
2018-06-27 22:32:40 +00:00
|
|
|
TNode<HeapObject> maybe_debug_info =
|
|
|
|
TaggedToHeapObject(maybe_heap_object_or_smi, &tailcall_to_shared);
|
|
|
|
GotoIfNot(HasInstanceType(maybe_debug_info, InstanceType::DEBUG_INFO_TYPE),
|
|
|
|
&tailcall_to_shared);
|
2018-03-08 14:07:39 +00:00
|
|
|
|
|
|
|
{
|
|
|
|
TNode<DebugInfo> debug_info = CAST(maybe_debug_info);
|
|
|
|
TNode<Smi> flags =
|
|
|
|
CAST(LoadObjectField(debug_info, DebugInfo::kFlagsOffset));
|
|
|
|
GotoIfNot(SmiToInt32(SmiAnd(flags, SmiConstant(DebugInfo::kBreakAtEntry))),
|
|
|
|
&tailcall_to_shared);
|
|
|
|
|
|
|
|
CallRuntime(Runtime::kDebugBreakAtEntry, context, function);
|
|
|
|
Goto(&tailcall_to_shared);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&tailcall_to_shared);
|
|
|
|
// Tail call into code object on the SharedFunctionInfo.
|
2018-03-22 16:09:55 +00:00
|
|
|
TNode<Code> code = GetSharedFunctionInfoCode(shared);
|
2018-06-07 15:21:27 +00:00
|
|
|
TailCallJSCode(code, context, function, new_target, arg_count);
|
2018-03-08 14:07:39 +00:00
|
|
|
}
|
|
|
|
|
2017-08-03 11:17:17 +00:00
|
|
|
class RecordWriteCodeStubAssembler : public CodeStubAssembler {
|
|
|
|
public:
|
|
|
|
explicit RecordWriteCodeStubAssembler(compiler::CodeAssemblerState* state)
|
|
|
|
: CodeStubAssembler(state) {}
|
|
|
|
|
|
|
|
Node* IsMarking() {
|
|
|
|
Node* is_marking_addr = ExternalConstant(
|
|
|
|
ExternalReference::heap_is_marking_flag_address(this->isolate()));
|
|
|
|
return Load(MachineType::Uint8(), is_marking_addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
Node* IsPageFlagSet(Node* object, int mask) {
|
2018-07-24 20:13:40 +00:00
|
|
|
Node* page = WordAnd(object, IntPtrConstant(~kPageAlignmentMask));
|
2017-08-03 11:17:17 +00:00
|
|
|
Node* flags = Load(MachineType::Pointer(), page,
|
|
|
|
IntPtrConstant(MemoryChunk::kFlagsOffset));
|
|
|
|
return WordNotEqual(WordAnd(flags, IntPtrConstant(mask)),
|
|
|
|
IntPtrConstant(0));
|
|
|
|
}
|
|
|
|
|
|
|
|
Node* IsWhite(Node* object) {
|
2017-10-18 09:06:55 +00:00
|
|
|
DCHECK_EQ(strcmp(Marking::kWhiteBitPattern, "00"), 0);
|
2017-08-03 11:17:17 +00:00
|
|
|
Node* cell;
|
|
|
|
Node* mask;
|
|
|
|
GetMarkBit(object, &cell, &mask);
|
2018-02-23 13:46:00 +00:00
|
|
|
mask = TruncateIntPtrToInt32(mask);
|
2017-08-03 11:17:17 +00:00
|
|
|
// Non-white has 1 for the first bit, so we only need to check for the first
|
|
|
|
// bit.
|
2017-10-24 11:38:07 +00:00
|
|
|
return Word32Equal(Word32And(Load(MachineType::Int32(), cell), mask),
|
|
|
|
Int32Constant(0));
|
2017-08-03 11:17:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void GetMarkBit(Node* object, Node** cell, Node** mask) {
|
2018-07-24 20:13:40 +00:00
|
|
|
Node* page = WordAnd(object, IntPtrConstant(~kPageAlignmentMask));
|
2018-10-11 13:04:34 +00:00
|
|
|
Node* bitmap = Load(MachineType::Pointer(), page,
|
|
|
|
IntPtrConstant(MemoryChunk::kMarkBitmapOffset));
|
2017-08-03 11:17:17 +00:00
|
|
|
|
|
|
|
{
|
|
|
|
// Temp variable to calculate cell offset in bitmap.
|
|
|
|
Node* r0;
|
|
|
|
int shift = Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 -
|
|
|
|
Bitmap::kBytesPerCellLog2;
|
|
|
|
r0 = WordShr(object, IntPtrConstant(shift));
|
2018-07-24 20:13:40 +00:00
|
|
|
r0 = WordAnd(r0, IntPtrConstant((kPageAlignmentMask >> shift) &
|
2017-08-03 11:17:17 +00:00
|
|
|
~(Bitmap::kBytesPerCell - 1)));
|
2018-10-11 13:04:34 +00:00
|
|
|
*cell = IntPtrAdd(bitmap, r0);
|
2017-08-03 11:17:17 +00:00
|
|
|
}
|
|
|
|
{
|
|
|
|
// Temp variable to calculate bit offset in cell.
|
|
|
|
Node* r1;
|
|
|
|
r1 = WordShr(object, IntPtrConstant(kPointerSizeLog2));
|
|
|
|
r1 = WordAnd(r1, IntPtrConstant((1 << Bitmap::kBitsPerCellLog2) - 1));
|
|
|
|
// It seems that LSB(e.g. cl) is automatically used, so no manual masking
|
|
|
|
// is needed. Uncomment the following line otherwise.
|
|
|
|
// WordAnd(r1, IntPtrConstant((1 << kBitsPerByte) - 1)));
|
|
|
|
*mask = WordShl(IntPtrConstant(1), r1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-22 09:13:07 +00:00
|
|
|
Node* ShouldSkipFPRegs(Node* mode) {
|
|
|
|
return WordEqual(mode, SmiConstant(kDontSaveFPRegs));
|
|
|
|
}
|
|
|
|
|
|
|
|
Node* ShouldEmitRememberSet(Node* remembered_set) {
|
|
|
|
return WordEqual(remembered_set, SmiConstant(EMIT_REMEMBERED_SET));
|
|
|
|
}
|
|
|
|
|
|
|
|
void CallCFunction1WithCallerSavedRegistersMode(MachineType return_type,
|
|
|
|
MachineType arg0_type,
|
|
|
|
Node* function, Node* arg0,
|
|
|
|
Node* mode, Label* next) {
|
|
|
|
Label dont_save_fp(this), save_fp(this);
|
|
|
|
Branch(ShouldSkipFPRegs(mode), &dont_save_fp, &save_fp);
|
|
|
|
BIND(&dont_save_fp);
|
|
|
|
{
|
|
|
|
CallCFunction1WithCallerSavedRegisters(return_type, arg0_type, function,
|
|
|
|
arg0, kDontSaveFPRegs);
|
|
|
|
Goto(next);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&save_fp);
|
|
|
|
{
|
|
|
|
CallCFunction1WithCallerSavedRegisters(return_type, arg0_type, function,
|
|
|
|
arg0, kSaveFPRegs);
|
|
|
|
Goto(next);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CallCFunction3WithCallerSavedRegistersMode(
|
|
|
|
MachineType return_type, MachineType arg0_type, MachineType arg1_type,
|
|
|
|
MachineType arg2_type, Node* function, Node* arg0, Node* arg1, Node* arg2,
|
|
|
|
Node* mode, Label* next) {
|
|
|
|
Label dont_save_fp(this), save_fp(this);
|
|
|
|
Branch(ShouldSkipFPRegs(mode), &dont_save_fp, &save_fp);
|
|
|
|
BIND(&dont_save_fp);
|
|
|
|
{
|
|
|
|
CallCFunction3WithCallerSavedRegisters(return_type, arg0_type, arg1_type,
|
|
|
|
arg2_type, function, arg0, arg1,
|
|
|
|
arg2, kDontSaveFPRegs);
|
|
|
|
Goto(next);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&save_fp);
|
|
|
|
{
|
|
|
|
CallCFunction3WithCallerSavedRegisters(return_type, arg0_type, arg1_type,
|
|
|
|
arg2_type, function, arg0, arg1,
|
|
|
|
arg2, kSaveFPRegs);
|
|
|
|
Goto(next);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void InsertToStoreBufferAndGoto(Node* isolate, Node* slot, Node* mode,
|
|
|
|
Label* next) {
|
2017-08-03 11:17:17 +00:00
|
|
|
Node* store_buffer_top_addr =
|
|
|
|
ExternalConstant(ExternalReference::store_buffer_top(this->isolate()));
|
|
|
|
Node* store_buffer_top =
|
|
|
|
Load(MachineType::Pointer(), store_buffer_top_addr);
|
|
|
|
StoreNoWriteBarrier(MachineType::PointerRepresentation(), store_buffer_top,
|
|
|
|
slot);
|
|
|
|
Node* new_store_buffer_top =
|
|
|
|
IntPtrAdd(store_buffer_top, IntPtrConstant(kPointerSize));
|
|
|
|
StoreNoWriteBarrier(MachineType::PointerRepresentation(),
|
|
|
|
store_buffer_top_addr, new_store_buffer_top);
|
|
|
|
|
|
|
|
Node* test = WordAnd(new_store_buffer_top,
|
2018-08-20 13:30:37 +00:00
|
|
|
IntPtrConstant(Heap::store_buffer_mask_constant()));
|
2017-08-03 11:17:17 +00:00
|
|
|
|
|
|
|
Label overflow(this);
|
|
|
|
Branch(WordEqual(test, IntPtrConstant(0)), &overflow, next);
|
|
|
|
|
|
|
|
BIND(&overflow);
|
|
|
|
{
|
2018-04-25 07:28:14 +00:00
|
|
|
Node* function =
|
|
|
|
ExternalConstant(ExternalReference::store_buffer_overflow_function());
|
2017-09-22 09:13:07 +00:00
|
|
|
CallCFunction1WithCallerSavedRegistersMode(MachineType::Int32(),
|
|
|
|
MachineType::Pointer(),
|
|
|
|
function, isolate, mode, next);
|
2017-08-03 11:17:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
|
2017-09-22 09:13:07 +00:00
|
|
|
Label generational_wb(this);
|
2017-08-03 11:17:17 +00:00
|
|
|
Label incremental_wb(this);
|
|
|
|
Label exit(this);
|
|
|
|
|
2018-09-03 09:52:06 +00:00
|
|
|
Node* remembered_set = Parameter(Descriptor::kRememberedSet);
|
2017-09-22 09:13:07 +00:00
|
|
|
Branch(ShouldEmitRememberSet(remembered_set), &generational_wb,
|
|
|
|
&incremental_wb);
|
2017-08-03 11:17:17 +00:00
|
|
|
|
2017-09-22 09:13:07 +00:00
|
|
|
BIND(&generational_wb);
|
2017-08-03 11:17:17 +00:00
|
|
|
{
|
2017-09-22 09:13:07 +00:00
|
|
|
Label test_old_to_new_flags(this);
|
|
|
|
Label store_buffer_exit(this), store_buffer_incremental_wb(this);
|
2018-09-03 09:52:06 +00:00
|
|
|
|
2017-09-22 09:13:07 +00:00
|
|
|
// When incremental marking is not on, we skip cross generation pointer
|
|
|
|
// checking here, because there are checks for
|
|
|
|
// `kPointersFromHereAreInterestingMask` and
|
|
|
|
// `kPointersToHereAreInterestingMask` in
|
|
|
|
// `src/compiler/<arch>/code-generator-<arch>.cc` before calling this stub,
|
|
|
|
// which serves as the cross generation checking.
|
2018-09-03 09:52:06 +00:00
|
|
|
Node* slot = Parameter(Descriptor::kSlot);
|
2017-09-22 09:13:07 +00:00
|
|
|
Branch(IsMarking(), &test_old_to_new_flags, &store_buffer_exit);
|
|
|
|
|
|
|
|
BIND(&test_old_to_new_flags);
|
|
|
|
{
|
2018-09-03 09:52:06 +00:00
|
|
|
Node* value = Load(MachineType::Pointer(), slot);
|
|
|
|
|
2017-09-22 09:13:07 +00:00
|
|
|
// TODO(albertnetymk): Try to cache the page flag for value and object,
|
|
|
|
// instead of calling IsPageFlagSet each time.
|
|
|
|
Node* value_in_new_space =
|
|
|
|
IsPageFlagSet(value, MemoryChunk::kIsInNewSpaceMask);
|
|
|
|
GotoIfNot(value_in_new_space, &incremental_wb);
|
2017-08-03 11:17:17 +00:00
|
|
|
|
2018-09-03 09:52:06 +00:00
|
|
|
Node* object = BitcastTaggedToWord(Parameter(Descriptor::kObject));
|
2017-09-22 09:13:07 +00:00
|
|
|
Node* object_in_new_space =
|
|
|
|
IsPageFlagSet(object, MemoryChunk::kIsInNewSpaceMask);
|
2018-09-03 09:29:01 +00:00
|
|
|
Branch(object_in_new_space, &incremental_wb,
|
|
|
|
&store_buffer_incremental_wb);
|
2017-09-22 09:13:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&store_buffer_exit);
|
2018-09-03 09:29:01 +00:00
|
|
|
{
|
|
|
|
Node* isolate_constant =
|
|
|
|
ExternalConstant(ExternalReference::isolate_address(isolate()));
|
2018-09-03 09:52:06 +00:00
|
|
|
Node* fp_mode = Parameter(Descriptor::kFPMode);
|
2018-09-03 09:29:01 +00:00
|
|
|
InsertToStoreBufferAndGoto(isolate_constant, slot, fp_mode, &exit);
|
|
|
|
}
|
2017-09-22 09:13:07 +00:00
|
|
|
|
|
|
|
BIND(&store_buffer_incremental_wb);
|
2018-09-03 09:29:01 +00:00
|
|
|
{
|
|
|
|
Node* isolate_constant =
|
|
|
|
ExternalConstant(ExternalReference::isolate_address(isolate()));
|
2018-09-03 09:52:06 +00:00
|
|
|
Node* fp_mode = Parameter(Descriptor::kFPMode);
|
2018-09-03 09:29:01 +00:00
|
|
|
InsertToStoreBufferAndGoto(isolate_constant, slot, fp_mode,
|
|
|
|
&incremental_wb);
|
|
|
|
}
|
2017-09-22 09:13:07 +00:00
|
|
|
}
|
2017-08-03 11:17:17 +00:00
|
|
|
|
|
|
|
BIND(&incremental_wb);
|
|
|
|
{
|
|
|
|
Label call_incremental_wb(this);
|
|
|
|
|
2018-09-03 09:52:06 +00:00
|
|
|
Node* slot = Parameter(Descriptor::kSlot);
|
|
|
|
Node* value = Load(MachineType::Pointer(), slot);
|
|
|
|
|
2017-08-03 11:17:17 +00:00
|
|
|
// There are two cases we need to call incremental write barrier.
|
|
|
|
// 1) value_is_white
|
|
|
|
GotoIf(IsWhite(value), &call_incremental_wb);
|
|
|
|
|
|
|
|
// 2) is_compacting && value_in_EC && obj_isnt_skip
|
|
|
|
// is_compacting = true when is_marking = true
|
|
|
|
GotoIfNot(IsPageFlagSet(value, MemoryChunk::kEvacuationCandidateMask),
|
|
|
|
&exit);
|
2018-09-03 09:52:06 +00:00
|
|
|
|
|
|
|
Node* object = BitcastTaggedToWord(Parameter(Descriptor::kObject));
|
2018-09-03 09:29:01 +00:00
|
|
|
Branch(
|
2017-08-03 11:17:17 +00:00
|
|
|
IsPageFlagSet(object, MemoryChunk::kSkipEvacuationSlotsRecordingMask),
|
2018-09-03 09:29:01 +00:00
|
|
|
&exit, &call_incremental_wb);
|
2017-08-03 11:17:17 +00:00
|
|
|
|
|
|
|
BIND(&call_incremental_wb);
|
|
|
|
{
|
2017-08-21 15:23:17 +00:00
|
|
|
Node* function = ExternalConstant(
|
2018-04-25 07:28:14 +00:00
|
|
|
ExternalReference::incremental_marking_record_write_function());
|
2018-09-03 09:29:01 +00:00
|
|
|
Node* isolate_constant =
|
|
|
|
ExternalConstant(ExternalReference::isolate_address(isolate()));
|
2018-09-03 09:52:06 +00:00
|
|
|
Node* fp_mode = Parameter(Descriptor::kFPMode);
|
2017-09-22 09:13:07 +00:00
|
|
|
CallCFunction3WithCallerSavedRegistersMode(
|
2017-08-21 15:23:17 +00:00
|
|
|
MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(),
|
2018-09-03 09:29:01 +00:00
|
|
|
MachineType::Pointer(), function, object, slot, isolate_constant,
|
|
|
|
fp_mode, &exit);
|
2017-08-03 11:17:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&exit);
|
|
|
|
Return(TrueConstant());
|
|
|
|
}
|
|
|
|
|
2018-03-21 09:51:18 +00:00
|
|
|
class DeletePropertyBaseAssembler : public AccessorAssembler {
|
2017-04-20 12:11:05 +00:00
|
|
|
public:
|
|
|
|
explicit DeletePropertyBaseAssembler(compiler::CodeAssemblerState* state)
|
2018-03-21 09:51:18 +00:00
|
|
|
: AccessorAssembler(state) {}
|
2017-04-20 12:11:05 +00:00
|
|
|
|
2018-05-11 11:00:17 +00:00
|
|
|
void DeleteDictionaryProperty(TNode<Object> receiver,
|
|
|
|
TNode<NameDictionary> properties,
|
|
|
|
TNode<Name> name, TNode<Context> context,
|
|
|
|
Label* dont_delete, Label* notfound) {
|
|
|
|
TVARIABLE(IntPtrT, var_name_index);
|
2017-04-20 12:11:05 +00:00
|
|
|
Label dictionary_found(this, &var_name_index);
|
|
|
|
NameDictionaryLookup<NameDictionary>(properties, name, &dictionary_found,
|
|
|
|
&var_name_index, notfound);
|
|
|
|
|
|
|
|
BIND(&dictionary_found);
|
2018-05-11 11:00:17 +00:00
|
|
|
TNode<IntPtrT> key_index = var_name_index.value();
|
|
|
|
TNode<Uint32T> details =
|
2017-04-20 12:11:05 +00:00
|
|
|
LoadDetailsByKeyIndex<NameDictionary>(properties, key_index);
|
|
|
|
GotoIf(IsSetWord32(details, PropertyDetails::kAttributesDontDeleteMask),
|
|
|
|
dont_delete);
|
|
|
|
// Overwrite the entry itself (see NameDictionary::SetEntry).
|
2018-05-11 11:00:17 +00:00
|
|
|
TNode<HeapObject> filler = TheHoleConstant();
|
2018-10-08 19:15:01 +00:00
|
|
|
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kTheHoleValue));
|
2017-04-20 12:11:05 +00:00
|
|
|
StoreFixedArrayElement(properties, key_index, filler, SKIP_WRITE_BARRIER);
|
|
|
|
StoreValueByKeyIndex<NameDictionary>(properties, key_index, filler,
|
|
|
|
SKIP_WRITE_BARRIER);
|
|
|
|
StoreDetailsByKeyIndex<NameDictionary>(properties, key_index,
|
2017-07-10 15:02:17 +00:00
|
|
|
SmiConstant(0));
|
2017-04-20 12:11:05 +00:00
|
|
|
|
|
|
|
// Update bookkeeping information (see NameDictionary::ElementRemoved).
|
2018-05-11 11:00:17 +00:00
|
|
|
TNode<Smi> nof = GetNumberOfElements<NameDictionary>(properties);
|
|
|
|
TNode<Smi> new_nof = SmiSub(nof, SmiConstant(1));
|
2017-04-20 12:11:05 +00:00
|
|
|
SetNumberOfElements<NameDictionary>(properties, new_nof);
|
2018-05-11 11:00:17 +00:00
|
|
|
TNode<Smi> num_deleted =
|
|
|
|
GetNumberOfDeletedElements<NameDictionary>(properties);
|
|
|
|
TNode<Smi> new_deleted = SmiAdd(num_deleted, SmiConstant(1));
|
2017-04-20 12:11:05 +00:00
|
|
|
SetNumberOfDeletedElements<NameDictionary>(properties, new_deleted);
|
|
|
|
|
|
|
|
// Shrink the dictionary if necessary (see NameDictionary::Shrink).
|
|
|
|
Label shrinking_done(this);
|
2018-05-11 11:00:17 +00:00
|
|
|
TNode<Smi> capacity = GetCapacity<NameDictionary>(properties);
|
2017-04-20 12:11:05 +00:00
|
|
|
GotoIf(SmiGreaterThan(new_nof, SmiShr(capacity, 2)), &shrinking_done);
|
|
|
|
GotoIf(SmiLessThan(new_nof, SmiConstant(16)), &shrinking_done);
|
2017-06-12 14:03:47 +00:00
|
|
|
CallRuntime(Runtime::kShrinkPropertyDictionary, context, receiver);
|
2017-04-20 12:11:05 +00:00
|
|
|
Goto(&shrinking_done);
|
|
|
|
BIND(&shrinking_done);
|
|
|
|
|
|
|
|
Return(TrueConstant());
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
TF_BUILTIN(DeleteProperty, DeletePropertyBaseAssembler) {
|
2018-05-11 11:00:17 +00:00
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kObject));
|
|
|
|
TNode<Object> key = CAST(Parameter(Descriptor::kKey));
|
|
|
|
TNode<Smi> language_mode = CAST(Parameter(Descriptor::kLanguageMode));
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
2017-04-20 12:11:05 +00:00
|
|
|
|
|
|
|
VARIABLE(var_index, MachineType::PointerRepresentation());
|
|
|
|
VARIABLE(var_unique, MachineRepresentation::kTagged, key);
|
|
|
|
Label if_index(this), if_unique_name(this), if_notunique(this),
|
|
|
|
if_notfound(this), slow(this);
|
|
|
|
|
|
|
|
GotoIf(TaggedIsSmi(receiver), &slow);
|
2018-05-11 11:00:17 +00:00
|
|
|
TNode<Map> receiver_map = LoadMap(CAST(receiver));
|
2018-04-12 11:37:14 +00:00
|
|
|
TNode<Int32T> instance_type = LoadMapInstanceType(receiver_map);
|
|
|
|
GotoIf(IsCustomElementsReceiverInstanceType(instance_type), &slow);
|
2017-04-20 12:11:05 +00:00
|
|
|
TryToName(key, &if_index, &var_index, &if_unique_name, &var_unique, &slow,
|
|
|
|
&if_notunique);
|
|
|
|
|
|
|
|
BIND(&if_index);
|
|
|
|
{
|
|
|
|
Comment("integer index");
|
|
|
|
Goto(&slow); // TODO(jkummerow): Implement more smarts here.
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&if_unique_name);
|
|
|
|
{
|
|
|
|
Comment("key is unique name");
|
2018-05-11 11:00:17 +00:00
|
|
|
TNode<Name> unique = CAST(var_unique.value());
|
2017-04-20 12:11:05 +00:00
|
|
|
CheckForAssociatedProtector(unique, &slow);
|
|
|
|
|
|
|
|
Label dictionary(this), dont_delete(this);
|
2017-07-19 20:51:26 +00:00
|
|
|
GotoIf(IsDictionaryMap(receiver_map), &dictionary);
|
|
|
|
|
2017-05-03 15:50:50 +00:00
|
|
|
// Fast properties need to clear recorded slots, which can only be done
|
|
|
|
// in C++.
|
|
|
|
Goto(&slow);
|
2017-04-20 12:11:05 +00:00
|
|
|
|
|
|
|
BIND(&dictionary);
|
|
|
|
{
|
2018-03-21 09:51:18 +00:00
|
|
|
InvalidateValidityCellIfPrototype(receiver_map);
|
|
|
|
|
2018-05-11 11:00:17 +00:00
|
|
|
TNode<NameDictionary> properties =
|
|
|
|
CAST(LoadSlowProperties(CAST(receiver)));
|
2017-04-20 12:11:05 +00:00
|
|
|
DeleteDictionaryProperty(receiver, properties, unique, context,
|
|
|
|
&dont_delete, &if_notfound);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&dont_delete);
|
|
|
|
{
|
2017-10-16 10:55:06 +00:00
|
|
|
STATIC_ASSERT(LanguageModeSize == 2);
|
2017-10-25 17:43:04 +00:00
|
|
|
GotoIf(SmiNotEqual(language_mode, SmiConstant(LanguageMode::kSloppy)),
|
2017-10-16 10:55:06 +00:00
|
|
|
&slow);
|
2017-04-20 12:11:05 +00:00
|
|
|
Return(FalseConstant());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&if_notunique);
|
|
|
|
{
|
|
|
|
// If the string was not found in the string table, then no object can
|
|
|
|
// have a property with that name.
|
|
|
|
TryInternalizeString(key, &if_index, &var_index, &if_unique_name,
|
|
|
|
&var_unique, &if_notfound, &slow);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&if_notfound);
|
|
|
|
Return(TrueConstant());
|
|
|
|
|
|
|
|
BIND(&slow);
|
|
|
|
{
|
|
|
|
TailCallRuntime(Runtime::kDeleteProperty, context, receiver, key,
|
|
|
|
language_mode);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-01 10:49:06 +00:00
|
|
|
TF_BUILTIN(ForInEnumerate, CodeStubAssembler) {
|
|
|
|
Node* receiver = Parameter(Descriptor::kReceiver);
|
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
|
|
|
|
Label if_empty(this), if_runtime(this, Label::kDeferred);
|
|
|
|
Node* receiver_map = CheckEnumCache(receiver, &if_empty, &if_runtime);
|
|
|
|
Return(receiver_map);
|
|
|
|
|
|
|
|
BIND(&if_empty);
|
|
|
|
Return(EmptyFixedArrayConstant());
|
|
|
|
|
|
|
|
BIND(&if_runtime);
|
|
|
|
TailCallRuntime(Runtime::kForInEnumerate, context, receiver);
|
|
|
|
}
|
|
|
|
|
|
|
|
TF_BUILTIN(ForInFilter, CodeStubAssembler) {
|
|
|
|
Node* key = Parameter(Descriptor::kKey);
|
|
|
|
Node* object = Parameter(Descriptor::kObject);
|
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
|
|
|
|
|
|
|
CSA_ASSERT(this, IsString(key));
|
|
|
|
|
|
|
|
Label if_true(this), if_false(this);
|
2018-08-09 13:25:21 +00:00
|
|
|
TNode<Oddball> result = HasProperty(context, object, key, kForInHasProperty);
|
2017-09-01 10:49:06 +00:00
|
|
|
Branch(IsTrue(result), &if_true, &if_false);
|
|
|
|
|
|
|
|
BIND(&if_true);
|
|
|
|
Return(key);
|
|
|
|
|
|
|
|
BIND(&if_false);
|
|
|
|
Return(UndefinedConstant());
|
|
|
|
}
|
|
|
|
|
2017-10-27 07:34:03 +00:00
|
|
|
TF_BUILTIN(SameValue, CodeStubAssembler) {
|
|
|
|
Node* lhs = Parameter(Descriptor::kLeft);
|
|
|
|
Node* rhs = Parameter(Descriptor::kRight);
|
|
|
|
|
|
|
|
Label if_true(this), if_false(this);
|
|
|
|
BranchIfSameValue(lhs, rhs, &if_true, &if_false);
|
|
|
|
|
|
|
|
BIND(&if_true);
|
|
|
|
Return(TrueConstant());
|
|
|
|
|
|
|
|
BIND(&if_false);
|
|
|
|
Return(FalseConstant());
|
|
|
|
}
|
|
|
|
|
2017-11-30 15:27:59 +00:00
|
|
|
class InternalBuiltinsAssembler : public CodeStubAssembler {
|
|
|
|
public:
|
|
|
|
explicit InternalBuiltinsAssembler(compiler::CodeAssemblerState* state)
|
|
|
|
: CodeStubAssembler(state) {}
|
|
|
|
|
2018-09-18 05:32:37 +00:00
|
|
|
TNode<MicrotaskQueue> GetDefaultMicrotaskQueue();
|
|
|
|
TNode<IntPtrT> GetPendingMicrotaskCount(
|
|
|
|
TNode<MicrotaskQueue> microtask_queue);
|
|
|
|
void SetPendingMicrotaskCount(TNode<MicrotaskQueue> microtask_queue,
|
|
|
|
TNode<IntPtrT> new_num_tasks);
|
|
|
|
TNode<FixedArray> GetQueuedMicrotasks(TNode<MicrotaskQueue> microtask_queue);
|
|
|
|
void SetQueuedMicrotasks(TNode<MicrotaskQueue> microtask_queue,
|
|
|
|
TNode<FixedArray> new_queue);
|
2018-01-09 11:34:42 +00:00
|
|
|
|
2017-11-30 15:27:59 +00:00
|
|
|
TNode<Context> GetCurrentContext();
|
|
|
|
void SetCurrentContext(TNode<Context> context);
|
|
|
|
|
|
|
|
void EnterMicrotaskContext(TNode<Context> context);
|
|
|
|
void LeaveMicrotaskContext();
|
|
|
|
|
2018-02-08 16:36:52 +00:00
|
|
|
void RunPromiseHook(Runtime::FunctionId id, TNode<Context> context,
|
2018-05-14 08:55:07 +00:00
|
|
|
SloppyTNode<HeapObject> promise_or_capability);
|
2018-02-08 16:36:52 +00:00
|
|
|
|
2017-11-30 15:27:59 +00:00
|
|
|
TNode<Object> GetPendingException() {
|
2018-04-25 07:28:14 +00:00
|
|
|
auto ref = ExternalReference::Create(kPendingExceptionAddress, isolate());
|
2017-11-30 15:27:59 +00:00
|
|
|
return TNode<Object>::UncheckedCast(
|
|
|
|
Load(MachineType::AnyTagged(), ExternalConstant(ref)));
|
|
|
|
}
|
|
|
|
void ClearPendingException() {
|
2018-04-25 07:28:14 +00:00
|
|
|
auto ref = ExternalReference::Create(kPendingExceptionAddress, isolate());
|
2017-11-30 15:27:59 +00:00
|
|
|
StoreNoWriteBarrier(MachineRepresentation::kTagged, ExternalConstant(ref),
|
|
|
|
TheHoleConstant());
|
|
|
|
}
|
|
|
|
|
|
|
|
TNode<Object> GetScheduledException() {
|
|
|
|
auto ref = ExternalReference::scheduled_exception_address(isolate());
|
|
|
|
return TNode<Object>::UncheckedCast(
|
|
|
|
Load(MachineType::AnyTagged(), ExternalConstant(ref)));
|
|
|
|
}
|
|
|
|
void ClearScheduledException() {
|
|
|
|
auto ref = ExternalReference::scheduled_exception_address(isolate());
|
|
|
|
StoreNoWriteBarrier(MachineRepresentation::kTagged, ExternalConstant(ref),
|
|
|
|
TheHoleConstant());
|
|
|
|
}
|
2018-06-22 11:33:49 +00:00
|
|
|
|
|
|
|
template <typename Descriptor>
|
|
|
|
void GenerateAdaptorWithExitFrameType(
|
|
|
|
Builtins::ExitFrameType exit_frame_type);
|
2017-11-30 15:27:59 +00:00
|
|
|
};
|
|
|
|
|
2018-06-22 11:33:49 +00:00
|
|
|
template <typename Descriptor>
|
|
|
|
void InternalBuiltinsAssembler::GenerateAdaptorWithExitFrameType(
|
|
|
|
Builtins::ExitFrameType exit_frame_type) {
|
|
|
|
TNode<JSFunction> target = CAST(Parameter(Descriptor::kTarget));
|
|
|
|
TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
|
|
|
|
TNode<WordT> c_function =
|
|
|
|
UncheckedCast<WordT>(Parameter(Descriptor::kCFunction));
|
|
|
|
|
|
|
|
// The logic contained here is mirrored for TurboFan inlining in
|
|
|
|
// JSTypedLowering::ReduceJSCall{Function,Construct}. Keep these in sync.
|
|
|
|
|
|
|
|
// Make sure we operate in the context of the called function (for example
|
|
|
|
// ConstructStubs implemented in C++ will be run in the context of the caller
|
|
|
|
// instead of the callee, due to the way that [[Construct]] is defined for
|
|
|
|
// ordinary functions).
|
|
|
|
TNode<Context> context =
|
|
|
|
CAST(LoadObjectField(target, JSFunction::kContextOffset));
|
|
|
|
|
|
|
|
// Update arguments count for CEntry to contain the number of arguments
|
|
|
|
// including the receiver and the extra arguments.
|
|
|
|
TNode<Int32T> argc =
|
|
|
|
UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
|
|
|
|
argc = Int32Add(
|
|
|
|
argc,
|
|
|
|
Int32Constant(BuiltinExitFrameConstants::kNumExtraArgsWithReceiver));
|
|
|
|
|
|
|
|
TNode<Code> code = HeapConstant(
|
|
|
|
CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
|
|
|
|
exit_frame_type == Builtins::BUILTIN_EXIT));
|
|
|
|
|
|
|
|
// Unconditionally push argc, target and new target as extra stack arguments.
|
|
|
|
// They will be used by stack frame iterators when constructing stack trace.
|
|
|
|
TailCallStub(CEntry1ArgvOnStackDescriptor{}, // descriptor
|
|
|
|
code, context, // standard arguments for TailCallStub
|
|
|
|
argc, c_function, // register arguments
|
|
|
|
TheHoleConstant(), // additional stack argument 1 (padding)
|
|
|
|
SmiFromInt32(argc), // additional stack argument 2
|
|
|
|
target, // additional stack argument 3
|
|
|
|
new_target); // additional stack argument 4
|
|
|
|
}
|
|
|
|
|
|
|
|
TF_BUILTIN(AdaptorWithExitFrame, InternalBuiltinsAssembler) {
|
|
|
|
GenerateAdaptorWithExitFrameType<Descriptor>(Builtins::EXIT);
|
|
|
|
}
|
|
|
|
|
|
|
|
TF_BUILTIN(AdaptorWithBuiltinExitFrame, InternalBuiltinsAssembler) {
|
|
|
|
GenerateAdaptorWithExitFrameType<Descriptor>(Builtins::BUILTIN_EXIT);
|
|
|
|
}
|
|
|
|
|
2018-09-18 05:32:37 +00:00
|
|
|
TNode<MicrotaskQueue> InternalBuiltinsAssembler::GetDefaultMicrotaskQueue() {
|
|
|
|
return TNode<MicrotaskQueue>::UncheckedCast(
|
2018-09-20 10:14:46 +00:00
|
|
|
LoadRoot(RootIndex::kDefaultMicrotaskQueue));
|
2018-01-09 11:34:42 +00:00
|
|
|
}
|
|
|
|
|
2018-09-18 05:32:37 +00:00
|
|
|
TNode<IntPtrT> InternalBuiltinsAssembler::GetPendingMicrotaskCount(
|
|
|
|
TNode<MicrotaskQueue> microtask_queue) {
|
|
|
|
TNode<IntPtrT> result = LoadAndUntagObjectField(
|
|
|
|
microtask_queue, MicrotaskQueue::kPendingMicrotaskCountOffset);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
void InternalBuiltinsAssembler::SetPendingMicrotaskCount(
|
|
|
|
TNode<MicrotaskQueue> microtask_queue, TNode<IntPtrT> new_num_tasks) {
|
|
|
|
StoreObjectField(microtask_queue,
|
|
|
|
MicrotaskQueue::kPendingMicrotaskCountOffset,
|
|
|
|
SmiFromIntPtr(new_num_tasks));
|
2018-01-09 11:34:42 +00:00
|
|
|
}
|
|
|
|
|
2018-09-18 05:32:37 +00:00
|
|
|
TNode<FixedArray> InternalBuiltinsAssembler::GetQueuedMicrotasks(
|
|
|
|
TNode<MicrotaskQueue> microtask_queue) {
|
|
|
|
return LoadObjectField<FixedArray>(microtask_queue,
|
|
|
|
MicrotaskQueue::kQueueOffset);
|
2018-01-09 11:34:42 +00:00
|
|
|
}
|
|
|
|
|
2018-09-18 05:32:37 +00:00
|
|
|
void InternalBuiltinsAssembler::SetQueuedMicrotasks(
|
|
|
|
TNode<MicrotaskQueue> microtask_queue, TNode<FixedArray> new_queue) {
|
|
|
|
StoreObjectField(microtask_queue, MicrotaskQueue::kQueueOffset, new_queue);
|
2018-01-09 11:34:42 +00:00
|
|
|
}
|
|
|
|
|
2017-11-30 15:27:59 +00:00
|
|
|
TNode<Context> InternalBuiltinsAssembler::GetCurrentContext() {
|
2018-04-25 07:28:14 +00:00
|
|
|
auto ref = ExternalReference::Create(kContextAddress, isolate());
|
2017-11-30 15:27:59 +00:00
|
|
|
return TNode<Context>::UncheckedCast(
|
|
|
|
Load(MachineType::AnyTagged(), ExternalConstant(ref)));
|
|
|
|
}
|
|
|
|
|
|
|
|
void InternalBuiltinsAssembler::SetCurrentContext(TNode<Context> context) {
|
2018-04-25 07:28:14 +00:00
|
|
|
auto ref = ExternalReference::Create(kContextAddress, isolate());
|
2017-11-30 15:27:59 +00:00
|
|
|
StoreNoWriteBarrier(MachineRepresentation::kTagged, ExternalConstant(ref),
|
|
|
|
context);
|
|
|
|
}
|
|
|
|
|
|
|
|
void InternalBuiltinsAssembler::EnterMicrotaskContext(
|
|
|
|
TNode<Context> microtask_context) {
|
|
|
|
auto ref = ExternalReference::handle_scope_implementer_address(isolate());
|
|
|
|
Node* const hsi = Load(MachineType::Pointer(), ExternalConstant(ref));
|
|
|
|
StoreNoWriteBarrier(
|
|
|
|
MachineType::PointerRepresentation(), hsi,
|
|
|
|
IntPtrConstant(HandleScopeImplementerOffsets::kMicrotaskContext),
|
|
|
|
BitcastTaggedToWord(microtask_context));
|
|
|
|
|
|
|
|
// Load mirrored std::vector length from
|
|
|
|
// HandleScopeImplementer::entered_contexts_count_
|
|
|
|
auto type = kSizetSize == 8 ? MachineType::Uint64() : MachineType::Uint32();
|
|
|
|
Node* entered_contexts_length = Load(
|
|
|
|
type, hsi,
|
|
|
|
IntPtrConstant(HandleScopeImplementerOffsets::kEnteredContextsCount));
|
|
|
|
|
|
|
|
auto rep = kSizetSize == 8 ? MachineRepresentation::kWord64
|
|
|
|
: MachineRepresentation::kWord32;
|
|
|
|
|
|
|
|
StoreNoWriteBarrier(
|
|
|
|
rep, hsi,
|
|
|
|
IntPtrConstant(
|
|
|
|
HandleScopeImplementerOffsets::kEnteredContextCountDuringMicrotasks),
|
|
|
|
entered_contexts_length);
|
|
|
|
}
|
|
|
|
|
|
|
|
void InternalBuiltinsAssembler::LeaveMicrotaskContext() {
|
|
|
|
auto ref = ExternalReference::handle_scope_implementer_address(isolate());
|
|
|
|
|
|
|
|
Node* const hsi = Load(MachineType::Pointer(), ExternalConstant(ref));
|
|
|
|
StoreNoWriteBarrier(
|
|
|
|
MachineType::PointerRepresentation(), hsi,
|
|
|
|
IntPtrConstant(HandleScopeImplementerOffsets::kMicrotaskContext),
|
|
|
|
IntPtrConstant(0));
|
|
|
|
if (kSizetSize == 4) {
|
|
|
|
StoreNoWriteBarrier(
|
|
|
|
MachineRepresentation::kWord32, hsi,
|
|
|
|
IntPtrConstant(HandleScopeImplementerOffsets::
|
|
|
|
kEnteredContextCountDuringMicrotasks),
|
|
|
|
Int32Constant(0));
|
|
|
|
} else {
|
|
|
|
StoreNoWriteBarrier(
|
|
|
|
MachineRepresentation::kWord64, hsi,
|
|
|
|
IntPtrConstant(HandleScopeImplementerOffsets::
|
|
|
|
kEnteredContextCountDuringMicrotasks),
|
|
|
|
Int64Constant(0));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-08 16:36:52 +00:00
|
|
|
void InternalBuiltinsAssembler::RunPromiseHook(
|
|
|
|
Runtime::FunctionId id, TNode<Context> context,
|
2018-05-14 08:55:07 +00:00
|
|
|
SloppyTNode<HeapObject> promise_or_capability) {
|
2018-02-08 16:36:52 +00:00
|
|
|
Label hook(this, Label::kDeferred), done_hook(this);
|
2018-10-23 12:00:37 +00:00
|
|
|
Branch(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(), &hook,
|
|
|
|
&done_hook);
|
2018-02-08 16:36:52 +00:00
|
|
|
BIND(&hook);
|
|
|
|
{
|
2018-05-14 08:55:07 +00:00
|
|
|
// Get to the underlying JSPromise instance.
|
2018-10-10 06:00:52 +00:00
|
|
|
TNode<HeapObject> promise = Select<HeapObject>(
|
|
|
|
IsPromiseCapability(promise_or_capability),
|
2018-05-14 08:55:07 +00:00
|
|
|
[=] {
|
|
|
|
return CAST(LoadObjectField(promise_or_capability,
|
|
|
|
PromiseCapability::kPromiseOffset));
|
2018-10-10 06:00:52 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
[=] { return promise_or_capability; });
|
|
|
|
GotoIf(IsUndefined(promise), &done_hook);
|
2018-05-14 08:55:07 +00:00
|
|
|
CallRuntime(id, context, promise);
|
2018-02-08 16:36:52 +00:00
|
|
|
Goto(&done_hook);
|
|
|
|
}
|
|
|
|
BIND(&done_hook);
|
|
|
|
}
|
|
|
|
|
2018-01-09 11:34:42 +00:00
|
|
|
TF_BUILTIN(EnqueueMicrotask, InternalBuiltinsAssembler) {
|
|
|
|
Node* microtask = Parameter(Descriptor::kMicrotask);
|
|
|
|
|
2018-09-18 05:32:37 +00:00
|
|
|
TNode<MicrotaskQueue> microtask_queue = GetDefaultMicrotaskQueue();
|
|
|
|
TNode<IntPtrT> num_tasks = GetPendingMicrotaskCount(microtask_queue);
|
2018-01-09 11:34:42 +00:00
|
|
|
TNode<IntPtrT> new_num_tasks = IntPtrAdd(num_tasks, IntPtrConstant(1));
|
2018-09-18 05:32:37 +00:00
|
|
|
TNode<FixedArray> queue = GetQueuedMicrotasks(microtask_queue);
|
2018-01-09 11:34:42 +00:00
|
|
|
TNode<IntPtrT> queue_length = LoadAndUntagFixedArrayBaseLength(queue);
|
|
|
|
|
|
|
|
Label if_append(this), if_grow(this), done(this);
|
|
|
|
Branch(WordEqual(num_tasks, queue_length), &if_grow, &if_append);
|
|
|
|
|
|
|
|
BIND(&if_grow);
|
|
|
|
{
|
|
|
|
// Determine the new queue length and check if we need to allocate
|
|
|
|
// in large object space (instead of just going to new space, where
|
|
|
|
// we also know that we don't need any write barriers for setting
|
|
|
|
// up the new queue object).
|
|
|
|
Label if_newspace(this), if_lospace(this, Label::kDeferred);
|
|
|
|
TNode<IntPtrT> new_queue_length =
|
|
|
|
IntPtrMax(IntPtrConstant(8), IntPtrAdd(num_tasks, num_tasks));
|
|
|
|
Branch(IntPtrLessThanOrEqual(new_queue_length,
|
|
|
|
IntPtrConstant(FixedArray::kMaxRegularLength)),
|
|
|
|
&if_newspace, &if_lospace);
|
|
|
|
|
|
|
|
BIND(&if_newspace);
|
|
|
|
{
|
|
|
|
// This is the likely case where the new queue fits into new space,
|
|
|
|
// and thus we don't need any write barriers for initializing it.
|
|
|
|
TNode<FixedArray> new_queue =
|
2018-08-13 15:08:55 +00:00
|
|
|
CAST(AllocateFixedArray(PACKED_ELEMENTS, new_queue_length));
|
2018-01-09 11:34:42 +00:00
|
|
|
CopyFixedArrayElements(PACKED_ELEMENTS, queue, new_queue, num_tasks,
|
|
|
|
SKIP_WRITE_BARRIER);
|
|
|
|
StoreFixedArrayElement(new_queue, num_tasks, microtask,
|
|
|
|
SKIP_WRITE_BARRIER);
|
|
|
|
FillFixedArrayWithValue(PACKED_ELEMENTS, new_queue, new_num_tasks,
|
2018-09-20 10:14:46 +00:00
|
|
|
new_queue_length, RootIndex::kUndefinedValue);
|
2018-09-18 05:32:37 +00:00
|
|
|
SetQueuedMicrotasks(microtask_queue, new_queue);
|
2018-01-09 11:34:42 +00:00
|
|
|
Goto(&done);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&if_lospace);
|
|
|
|
{
|
|
|
|
// The fallback case where the new queue ends up in large object space.
|
2018-08-13 15:08:55 +00:00
|
|
|
TNode<FixedArray> new_queue = CAST(AllocateFixedArray(
|
2018-01-09 11:34:42 +00:00
|
|
|
PACKED_ELEMENTS, new_queue_length, INTPTR_PARAMETERS,
|
2018-08-13 15:08:55 +00:00
|
|
|
AllocationFlag::kAllowLargeObjectAllocation));
|
2018-01-09 11:34:42 +00:00
|
|
|
CopyFixedArrayElements(PACKED_ELEMENTS, queue, new_queue, num_tasks);
|
|
|
|
StoreFixedArrayElement(new_queue, num_tasks, microtask);
|
|
|
|
FillFixedArrayWithValue(PACKED_ELEMENTS, new_queue, new_num_tasks,
|
2018-09-20 10:14:46 +00:00
|
|
|
new_queue_length, RootIndex::kUndefinedValue);
|
2018-09-18 05:32:37 +00:00
|
|
|
SetQueuedMicrotasks(microtask_queue, new_queue);
|
2018-01-09 11:34:42 +00:00
|
|
|
Goto(&done);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&if_append);
|
|
|
|
{
|
|
|
|
StoreFixedArrayElement(queue, num_tasks, microtask);
|
|
|
|
Goto(&done);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&done);
|
2018-09-18 05:32:37 +00:00
|
|
|
SetPendingMicrotaskCount(microtask_queue, new_num_tasks);
|
2018-01-09 11:34:42 +00:00
|
|
|
Return(UndefinedConstant());
|
|
|
|
}
|
|
|
|
|
2017-11-30 15:27:59 +00:00
|
|
|
TF_BUILTIN(RunMicrotasks, InternalBuiltinsAssembler) {
|
2018-01-29 15:43:04 +00:00
|
|
|
// Load the current context from the isolate.
|
|
|
|
TNode<Context> current_context = GetCurrentContext();
|
2018-09-18 05:32:37 +00:00
|
|
|
TNode<MicrotaskQueue> microtask_queue = GetDefaultMicrotaskQueue();
|
2017-11-30 15:27:59 +00:00
|
|
|
|
2018-10-12 07:49:50 +00:00
|
|
|
Label init_queue_loop(this), done_init_queue_loop(this);
|
2017-11-30 15:27:59 +00:00
|
|
|
Goto(&init_queue_loop);
|
|
|
|
BIND(&init_queue_loop);
|
|
|
|
{
|
|
|
|
TVARIABLE(IntPtrT, index, IntPtrConstant(0));
|
2018-02-08 16:36:52 +00:00
|
|
|
Label loop(this, &index), loop_next(this);
|
2017-11-30 15:27:59 +00:00
|
|
|
|
2018-09-18 05:32:37 +00:00
|
|
|
TNode<IntPtrT> num_tasks = GetPendingMicrotaskCount(microtask_queue);
|
2018-10-12 07:49:50 +00:00
|
|
|
GotoIf(IntPtrEqual(num_tasks, IntPtrConstant(0)), &done_init_queue_loop);
|
2017-11-30 15:27:59 +00:00
|
|
|
|
2018-09-18 05:32:37 +00:00
|
|
|
TNode<FixedArray> queue = GetQueuedMicrotasks(microtask_queue);
|
2017-11-30 15:27:59 +00:00
|
|
|
|
|
|
|
CSA_ASSERT(this, IntPtrGreaterThanOrEqual(
|
|
|
|
LoadAndUntagFixedArrayBaseLength(queue), num_tasks));
|
|
|
|
CSA_ASSERT(this, IntPtrGreaterThan(num_tasks, IntPtrConstant(0)));
|
|
|
|
|
2018-09-18 05:32:37 +00:00
|
|
|
SetQueuedMicrotasks(microtask_queue, EmptyFixedArrayConstant());
|
|
|
|
SetPendingMicrotaskCount(microtask_queue, IntPtrConstant(0));
|
2017-11-30 15:27:59 +00:00
|
|
|
|
|
|
|
Goto(&loop);
|
|
|
|
BIND(&loop);
|
|
|
|
{
|
2018-03-28 14:05:31 +00:00
|
|
|
TNode<HeapObject> microtask =
|
|
|
|
CAST(LoadFixedArrayElement(queue, index.value()));
|
2018-02-09 15:24:14 +00:00
|
|
|
index = IntPtrAdd(index.value(), IntPtrConstant(1));
|
2017-11-30 15:27:59 +00:00
|
|
|
|
|
|
|
CSA_ASSERT(this, TaggedIsNotSmi(microtask));
|
|
|
|
|
2018-10-12 07:49:50 +00:00
|
|
|
StoreRoot(RootIndex::kCurrentMicrotask, microtask);
|
2017-11-30 15:27:59 +00:00
|
|
|
TNode<Map> microtask_map = LoadMap(microtask);
|
|
|
|
TNode<Int32T> microtask_type = LoadMapInstanceType(microtask_map);
|
|
|
|
|
2018-02-08 16:36:52 +00:00
|
|
|
VARIABLE(var_exception, MachineRepresentation::kTagged,
|
|
|
|
TheHoleConstant());
|
|
|
|
Label if_exception(this, Label::kDeferred);
|
|
|
|
Label is_callable(this), is_callback(this),
|
|
|
|
is_promise_fulfill_reaction_job(this),
|
|
|
|
is_promise_reject_reaction_job(this),
|
|
|
|
is_promise_resolve_thenable_job(this),
|
|
|
|
is_unreachable(this, Label::kDeferred);
|
|
|
|
|
|
|
|
int32_t case_values[] = {CALLABLE_TASK_TYPE, CALLBACK_TASK_TYPE,
|
|
|
|
PROMISE_FULFILL_REACTION_JOB_TASK_TYPE,
|
|
|
|
PROMISE_REJECT_REACTION_JOB_TASK_TYPE,
|
|
|
|
PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE};
|
|
|
|
Label* case_labels[] = {
|
|
|
|
&is_callable, &is_callback, &is_promise_fulfill_reaction_job,
|
|
|
|
&is_promise_reject_reaction_job, &is_promise_resolve_thenable_job};
|
2017-11-30 15:27:59 +00:00
|
|
|
static_assert(arraysize(case_values) == arraysize(case_labels), "");
|
|
|
|
Switch(microtask_type, &is_unreachable, case_values, case_labels,
|
|
|
|
arraysize(case_labels));
|
|
|
|
|
2018-02-08 16:36:52 +00:00
|
|
|
BIND(&is_callable);
|
|
|
|
{
|
|
|
|
// Enter the context of the {microtask}.
|
2018-02-26 16:51:23 +00:00
|
|
|
TNode<Context> microtask_context =
|
|
|
|
LoadObjectField<Context>(microtask, CallableTask::kContextOffset);
|
2018-02-27 09:36:55 +00:00
|
|
|
TNode<Context> native_context = LoadNativeContext(microtask_context);
|
|
|
|
|
2018-02-08 16:36:52 +00:00
|
|
|
CSA_ASSERT(this, IsNativeContext(native_context));
|
|
|
|
EnterMicrotaskContext(microtask_context);
|
|
|
|
SetCurrentContext(native_context);
|
|
|
|
|
2018-02-26 16:51:23 +00:00
|
|
|
TNode<JSReceiver> callable = LoadObjectField<JSReceiver>(
|
|
|
|
microtask, CallableTask::kCallableOffset);
|
2018-02-08 16:36:52 +00:00
|
|
|
Node* const result = CallJS(
|
|
|
|
CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
|
|
|
|
microtask_context, callable, UndefinedConstant());
|
|
|
|
GotoIfException(result, &if_exception, &var_exception);
|
|
|
|
LeaveMicrotaskContext();
|
|
|
|
SetCurrentContext(current_context);
|
|
|
|
Goto(&loop_next);
|
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&is_callback);
|
2017-11-30 15:27:59 +00:00
|
|
|
{
|
2018-01-29 15:43:04 +00:00
|
|
|
Node* const microtask_callback =
|
2018-02-08 16:36:52 +00:00
|
|
|
LoadObjectField(microtask, CallbackTask::kCallbackOffset);
|
2018-01-29 15:43:04 +00:00
|
|
|
Node* const microtask_data =
|
2018-02-08 16:36:52 +00:00
|
|
|
LoadObjectField(microtask, CallbackTask::kDataOffset);
|
2018-01-31 09:10:00 +00:00
|
|
|
|
2018-01-29 15:43:04 +00:00
|
|
|
// If this turns out to become a bottleneck because of the calls
|
2018-05-07 14:09:04 +00:00
|
|
|
// to C++ via CEntry, we can choose to speed them up using a
|
2018-01-29 15:43:04 +00:00
|
|
|
// similar mechanism that we use for the CallApiFunction stub,
|
|
|
|
// except that calling the MicrotaskCallback is even easier, since
|
|
|
|
// it doesn't accept any tagged parameters, doesn't return a value
|
|
|
|
// and ignores exceptions.
|
|
|
|
//
|
|
|
|
// But from our current measurements it doesn't seem to be a
|
|
|
|
// serious performance problem, even if the microtask is full
|
2018-02-08 16:36:52 +00:00
|
|
|
// of CallHandlerTasks (which is not a realistic use case anyways).
|
2018-03-19 14:16:21 +00:00
|
|
|
Node* const result =
|
|
|
|
CallRuntime(Runtime::kRunMicrotaskCallback, current_context,
|
|
|
|
microtask_callback, microtask_data);
|
|
|
|
GotoIfException(result, &if_exception, &var_exception);
|
2018-02-08 16:36:52 +00:00
|
|
|
Goto(&loop_next);
|
2017-11-30 15:27:59 +00:00
|
|
|
}
|
|
|
|
|
2018-02-08 16:36:52 +00:00
|
|
|
BIND(&is_promise_resolve_thenable_job);
|
2017-11-30 15:27:59 +00:00
|
|
|
{
|
2018-02-08 16:36:52 +00:00
|
|
|
// Enter the context of the {microtask}.
|
2018-02-26 16:51:23 +00:00
|
|
|
TNode<Context> microtask_context = LoadObjectField<Context>(
|
|
|
|
microtask, PromiseResolveThenableJobTask::kContextOffset);
|
2018-02-27 09:36:55 +00:00
|
|
|
TNode<Context> native_context = LoadNativeContext(microtask_context);
|
2018-02-08 16:36:52 +00:00
|
|
|
CSA_ASSERT(this, IsNativeContext(native_context));
|
|
|
|
EnterMicrotaskContext(microtask_context);
|
2017-11-30 15:27:59 +00:00
|
|
|
SetCurrentContext(native_context);
|
2018-02-08 16:36:52 +00:00
|
|
|
|
|
|
|
Node* const promise_to_resolve = LoadObjectField(
|
|
|
|
microtask, PromiseResolveThenableJobTask::kPromiseToResolveOffset);
|
|
|
|
Node* const then = LoadObjectField(
|
|
|
|
microtask, PromiseResolveThenableJobTask::kThenOffset);
|
|
|
|
Node* const thenable = LoadObjectField(
|
|
|
|
microtask, PromiseResolveThenableJobTask::kThenableOffset);
|
|
|
|
|
|
|
|
Node* const result =
|
[builtins] Optimize PromiseResolveThenableJob for the common case.
The idea here is that in case the `thenable` is a JSPromise and `then`
is the initial `Promise.prototype.then` method, and the @@species lookup
chain is intact, we can skip creating the temporary promise and the
closures (with the shared context), and instead directly call into our
PerformPromiseThen. This is sound since - given above mentioned
conditions - our short-cut
PerformPromiseThen(thenable, undefined, undefined, promise_to_resolve)
is not observably different from the actual
resolve, reject = CreateResolvingFunctions(promise_to_resolve)
result_capability = NewPromiseCapability(%Promise%)
PerformPromiseThen(thenable, resolve, reject, result_capability)
except through PromiseHooks (and potentially via the async stack
traces). So we disable the fast-path if either promise hooks are enabled
or the debugger is active for now.
This improves the performance on the wikipedia benchmark by 20-25% and
the bluebird-doxbee benchmark by around 20%.
Bug: v8:7253
Change-Id: I23c92ad365c2b71d65057573f2d8febe2afe00b0
Reviewed-on: https://chromium-review.googlesource.com/911800
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Sathya Gunasekaran <gsathya@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51261}
2018-02-13 04:08:31 +00:00
|
|
|
CallBuiltin(Builtins::kPromiseResolveThenableJob, native_context,
|
2018-02-08 16:36:52 +00:00
|
|
|
promise_to_resolve, thenable, then);
|
|
|
|
GotoIfException(result, &if_exception, &var_exception);
|
2018-02-06 09:47:20 +00:00
|
|
|
LeaveMicrotaskContext();
|
|
|
|
SetCurrentContext(current_context);
|
2018-02-08 16:36:52 +00:00
|
|
|
Goto(&loop_next);
|
2017-11-30 15:27:59 +00:00
|
|
|
}
|
|
|
|
|
2018-02-08 16:36:52 +00:00
|
|
|
BIND(&is_promise_fulfill_reaction_job);
|
2017-11-30 15:27:59 +00:00
|
|
|
{
|
2018-02-08 16:36:52 +00:00
|
|
|
// Enter the context of the {microtask}.
|
2018-02-26 16:51:23 +00:00
|
|
|
TNode<Context> microtask_context = LoadObjectField<Context>(
|
|
|
|
microtask, PromiseReactionJobTask::kContextOffset);
|
2018-02-27 09:36:55 +00:00
|
|
|
TNode<Context> native_context = LoadNativeContext(microtask_context);
|
2018-02-08 16:36:52 +00:00
|
|
|
CSA_ASSERT(this, IsNativeContext(native_context));
|
2018-02-07 17:59:00 +00:00
|
|
|
EnterMicrotaskContext(microtask_context);
|
2018-02-08 16:36:52 +00:00
|
|
|
SetCurrentContext(native_context);
|
2017-11-30 15:27:59 +00:00
|
|
|
|
2018-02-08 16:36:52 +00:00
|
|
|
Node* const argument =
|
|
|
|
LoadObjectField(microtask, PromiseReactionJobTask::kArgumentOffset);
|
|
|
|
Node* const handler =
|
|
|
|
LoadObjectField(microtask, PromiseReactionJobTask::kHandlerOffset);
|
2018-05-14 08:55:07 +00:00
|
|
|
Node* const promise_or_capability = LoadObjectField(
|
|
|
|
microtask, PromiseReactionJobTask::kPromiseOrCapabilityOffset);
|
2017-11-30 15:27:59 +00:00
|
|
|
|
2018-02-08 16:36:52 +00:00
|
|
|
// Run the promise before/debug hook if enabled.
|
2018-05-14 08:55:07 +00:00
|
|
|
RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context,
|
|
|
|
promise_or_capability);
|
2018-02-08 16:36:52 +00:00
|
|
|
|
|
|
|
Node* const result =
|
|
|
|
CallBuiltin(Builtins::kPromiseFulfillReactionJob, microtask_context,
|
2018-05-14 08:55:07 +00:00
|
|
|
argument, handler, promise_or_capability);
|
2018-02-08 16:36:52 +00:00
|
|
|
GotoIfException(result, &if_exception, &var_exception);
|
|
|
|
|
|
|
|
// Run the promise after/debug hook if enabled.
|
2018-05-14 08:55:07 +00:00
|
|
|
RunPromiseHook(Runtime::kPromiseHookAfter, microtask_context,
|
|
|
|
promise_or_capability);
|
2018-01-31 09:10:00 +00:00
|
|
|
|
2018-02-06 09:47:20 +00:00
|
|
|
LeaveMicrotaskContext();
|
|
|
|
SetCurrentContext(current_context);
|
2018-02-08 16:36:52 +00:00
|
|
|
Goto(&loop_next);
|
2017-11-30 15:27:59 +00:00
|
|
|
}
|
|
|
|
|
2018-02-08 16:36:52 +00:00
|
|
|
BIND(&is_promise_reject_reaction_job);
|
2017-11-30 15:27:59 +00:00
|
|
|
{
|
2018-02-08 16:36:52 +00:00
|
|
|
// Enter the context of the {microtask}.
|
2018-02-26 16:51:23 +00:00
|
|
|
TNode<Context> microtask_context = LoadObjectField<Context>(
|
|
|
|
microtask, PromiseReactionJobTask::kContextOffset);
|
2018-02-27 09:36:55 +00:00
|
|
|
TNode<Context> native_context = LoadNativeContext(microtask_context);
|
2018-02-08 16:36:52 +00:00
|
|
|
CSA_ASSERT(this, IsNativeContext(native_context));
|
2018-02-07 17:59:00 +00:00
|
|
|
EnterMicrotaskContext(microtask_context);
|
2018-02-08 16:36:52 +00:00
|
|
|
SetCurrentContext(native_context);
|
|
|
|
|
|
|
|
Node* const argument =
|
|
|
|
LoadObjectField(microtask, PromiseReactionJobTask::kArgumentOffset);
|
|
|
|
Node* const handler =
|
|
|
|
LoadObjectField(microtask, PromiseReactionJobTask::kHandlerOffset);
|
2018-05-14 08:55:07 +00:00
|
|
|
Node* const promise_or_capability = LoadObjectField(
|
|
|
|
microtask, PromiseReactionJobTask::kPromiseOrCapabilityOffset);
|
2018-02-08 16:36:52 +00:00
|
|
|
|
|
|
|
// Run the promise before/debug hook if enabled.
|
2018-05-14 08:55:07 +00:00
|
|
|
RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context,
|
|
|
|
promise_or_capability);
|
2018-02-08 16:36:52 +00:00
|
|
|
|
|
|
|
Node* const result =
|
|
|
|
CallBuiltin(Builtins::kPromiseRejectReactionJob, microtask_context,
|
2018-05-14 08:55:07 +00:00
|
|
|
argument, handler, promise_or_capability);
|
2018-02-08 16:36:52 +00:00
|
|
|
GotoIfException(result, &if_exception, &var_exception);
|
2018-01-31 09:10:00 +00:00
|
|
|
|
2018-02-08 16:36:52 +00:00
|
|
|
// Run the promise after/debug hook if enabled.
|
2018-05-14 08:55:07 +00:00
|
|
|
RunPromiseHook(Runtime::kPromiseHookAfter, microtask_context,
|
|
|
|
promise_or_capability);
|
2018-02-08 16:36:52 +00:00
|
|
|
|
|
|
|
LeaveMicrotaskContext();
|
|
|
|
SetCurrentContext(current_context);
|
|
|
|
Goto(&loop_next);
|
2017-11-30 15:27:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
BIND(&is_unreachable);
|
|
|
|
Unreachable();
|
|
|
|
|
2018-02-08 16:36:52 +00:00
|
|
|
BIND(&if_exception);
|
|
|
|
{
|
|
|
|
// Report unhandled exceptions from microtasks.
|
|
|
|
CallRuntime(Runtime::kReportMessage, current_context,
|
|
|
|
var_exception.value());
|
|
|
|
LeaveMicrotaskContext();
|
|
|
|
SetCurrentContext(current_context);
|
|
|
|
Goto(&loop_next);
|
|
|
|
}
|
2018-02-07 17:59:00 +00:00
|
|
|
|
2018-02-08 16:36:52 +00:00
|
|
|
BIND(&loop_next);
|
2018-02-09 15:24:14 +00:00
|
|
|
Branch(IntPtrLessThan(index.value(), num_tasks), &loop, &init_queue_loop);
|
2018-02-08 16:36:52 +00:00
|
|
|
}
|
|
|
|
}
|
2018-10-12 07:49:50 +00:00
|
|
|
|
|
|
|
BIND(&done_init_queue_loop);
|
|
|
|
{
|
|
|
|
// Reset the "current microtask" on the isolate.
|
|
|
|
StoreRoot(RootIndex::kCurrentMicrotask, UndefinedConstant());
|
|
|
|
Return(UndefinedConstant());
|
|
|
|
}
|
2018-02-07 17:59:00 +00:00
|
|
|
}
|
|
|
|
|
2018-06-14 10:13:22 +00:00
|
|
|
TF_BUILTIN(AllocateInNewSpace, CodeStubAssembler) {
|
[turbofan] Initial Word64 support in representation selection.
This adds support to TurboFan's representation selection for the Word64
representation, and makes use of that to handle indices for memory access
and allocation instructions (i.e. LoadElement, StoreElement, Allocate,
etc.). These instructions had previously used Word32 as representation
for the indices / sizes, and then internally converted it to the correct
representation (aka Word64 on 64-bit architectures) later on, but that
was kind of brittle, and sometimes led to weird generated code.
The change thus only adds support to convert integer values in the safe
integer range from all kinds of representations to Word64 (on 64-bit
architectures). We don't yet handle the opposite direction and none of
the representation selection heuristics for the numeric operations were
changed so far. This will be done in follow-up CLs.
This CL itself is supposed to be neutral wrt. functionality, and only
serves as a starting point, and a cleanup for the (weird) implicit
Word64 index/size handling.
Bug: v8:7881, v8:8015, v8:8171
Design-Document: http://bit.ly/turbofan-word64
Change-Id: I3c6961a0e96cbc3fb8ac9d3e1be8f2e5c89bfd25
Cq-Include-Trybots: luci.chromium.try:linux_chromium_headless_rel
Reviewed-on: https://chromium-review.googlesource.com/1224932
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Jaroslav Sevcik <jarin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#55886}
2018-09-14 07:32:02 +00:00
|
|
|
TNode<IntPtrT> requested_size =
|
|
|
|
UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
|
2018-06-14 10:13:22 +00:00
|
|
|
|
|
|
|
TailCallRuntime(Runtime::kAllocateInNewSpace, NoContextConstant(),
|
[turbofan] Initial Word64 support in representation selection.
This adds support to TurboFan's representation selection for the Word64
representation, and makes use of that to handle indices for memory access
and allocation instructions (i.e. LoadElement, StoreElement, Allocate,
etc.). These instructions had previously used Word32 as representation
for the indices / sizes, and then internally converted it to the correct
representation (aka Word64 on 64-bit architectures) later on, but that
was kind of brittle, and sometimes led to weird generated code.
The change thus only adds support to convert integer values in the safe
integer range from all kinds of representations to Word64 (on 64-bit
architectures). We don't yet handle the opposite direction and none of
the representation selection heuristics for the numeric operations were
changed so far. This will be done in follow-up CLs.
This CL itself is supposed to be neutral wrt. functionality, and only
serves as a starting point, and a cleanup for the (weird) implicit
Word64 index/size handling.
Bug: v8:7881, v8:8015, v8:8171
Design-Document: http://bit.ly/turbofan-word64
Change-Id: I3c6961a0e96cbc3fb8ac9d3e1be8f2e5c89bfd25
Cq-Include-Trybots: luci.chromium.try:linux_chromium_headless_rel
Reviewed-on: https://chromium-review.googlesource.com/1224932
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Jaroslav Sevcik <jarin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#55886}
2018-09-14 07:32:02 +00:00
|
|
|
SmiFromIntPtr(requested_size));
|
2018-06-14 10:13:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TF_BUILTIN(AllocateInOldSpace, CodeStubAssembler) {
|
[turbofan] Initial Word64 support in representation selection.
This adds support to TurboFan's representation selection for the Word64
representation, and makes use of that to handle indices for memory access
and allocation instructions (i.e. LoadElement, StoreElement, Allocate,
etc.). These instructions had previously used Word32 as representation
for the indices / sizes, and then internally converted it to the correct
representation (aka Word64 on 64-bit architectures) later on, but that
was kind of brittle, and sometimes led to weird generated code.
The change thus only adds support to convert integer values in the safe
integer range from all kinds of representations to Word64 (on 64-bit
architectures). We don't yet handle the opposite direction and none of
the representation selection heuristics for the numeric operations were
changed so far. This will be done in follow-up CLs.
This CL itself is supposed to be neutral wrt. functionality, and only
serves as a starting point, and a cleanup for the (weird) implicit
Word64 index/size handling.
Bug: v8:7881, v8:8015, v8:8171
Design-Document: http://bit.ly/turbofan-word64
Change-Id: I3c6961a0e96cbc3fb8ac9d3e1be8f2e5c89bfd25
Cq-Include-Trybots: luci.chromium.try:linux_chromium_headless_rel
Reviewed-on: https://chromium-review.googlesource.com/1224932
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Jaroslav Sevcik <jarin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#55886}
2018-09-14 07:32:02 +00:00
|
|
|
TNode<IntPtrT> requested_size =
|
|
|
|
UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
|
2018-06-14 10:13:22 +00:00
|
|
|
|
|
|
|
int flags = AllocateTargetSpace::encode(OLD_SPACE);
|
|
|
|
TailCallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(),
|
[turbofan] Initial Word64 support in representation selection.
This adds support to TurboFan's representation selection for the Word64
representation, and makes use of that to handle indices for memory access
and allocation instructions (i.e. LoadElement, StoreElement, Allocate,
etc.). These instructions had previously used Word32 as representation
for the indices / sizes, and then internally converted it to the correct
representation (aka Word64 on 64-bit architectures) later on, but that
was kind of brittle, and sometimes led to weird generated code.
The change thus only adds support to convert integer values in the safe
integer range from all kinds of representations to Word64 (on 64-bit
architectures). We don't yet handle the opposite direction and none of
the representation selection heuristics for the numeric operations were
changed so far. This will be done in follow-up CLs.
This CL itself is supposed to be neutral wrt. functionality, and only
serves as a starting point, and a cleanup for the (weird) implicit
Word64 index/size handling.
Bug: v8:7881, v8:8015, v8:8171
Design-Document: http://bit.ly/turbofan-word64
Change-Id: I3c6961a0e96cbc3fb8ac9d3e1be8f2e5c89bfd25
Cq-Include-Trybots: luci.chromium.try:linux_chromium_headless_rel
Reviewed-on: https://chromium-review.googlesource.com/1224932
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: Jaroslav Sevcik <jarin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#55886}
2018-09-14 07:32:02 +00:00
|
|
|
SmiFromIntPtr(requested_size), SmiConstant(flags));
|
2018-06-14 10:13:22 +00:00
|
|
|
}
|
|
|
|
|
2018-06-22 11:04:29 +00:00
|
|
|
TF_BUILTIN(Abort, CodeStubAssembler) {
|
|
|
|
TNode<Smi> message_id = CAST(Parameter(Descriptor::kMessageOrMessageId));
|
|
|
|
TailCallRuntime(Runtime::kAbort, NoContextConstant(), message_id);
|
|
|
|
}
|
|
|
|
|
2017-12-21 12:48:27 +00:00
|
|
|
TF_BUILTIN(AbortJS, CodeStubAssembler) {
|
2018-06-22 11:04:29 +00:00
|
|
|
TNode<String> message = CAST(Parameter(Descriptor::kMessageOrMessageId));
|
|
|
|
TailCallRuntime(Runtime::kAbortJS, NoContextConstant(), message);
|
2017-12-21 12:48:27 +00:00
|
|
|
}
|
|
|
|
|
2018-05-07 14:09:04 +00:00
|
|
|
void Builtins::Generate_CEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit(
|
|
|
|
MacroAssembler* masm) {
|
|
|
|
Generate_CEntry(masm, 1, kDontSaveFPRegs, kArgvOnStack, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Builtins::Generate_CEntry_Return1_DontSaveFPRegs_ArgvOnStack_BuiltinExit(
|
|
|
|
MacroAssembler* masm) {
|
|
|
|
Generate_CEntry(masm, 1, kDontSaveFPRegs, kArgvOnStack, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Builtins::
|
|
|
|
Generate_CEntry_Return1_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit(
|
|
|
|
MacroAssembler* masm) {
|
|
|
|
Generate_CEntry(masm, 1, kDontSaveFPRegs, kArgvInRegister, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Builtins::Generate_CEntry_Return1_SaveFPRegs_ArgvOnStack_NoBuiltinExit(
|
|
|
|
MacroAssembler* masm) {
|
|
|
|
Generate_CEntry(masm, 1, kSaveFPRegs, kArgvOnStack, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Builtins::Generate_CEntry_Return1_SaveFPRegs_ArgvOnStack_BuiltinExit(
|
|
|
|
MacroAssembler* masm) {
|
|
|
|
Generate_CEntry(masm, 1, kSaveFPRegs, kArgvOnStack, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Builtins::Generate_CEntry_Return2_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit(
|
|
|
|
MacroAssembler* masm) {
|
|
|
|
Generate_CEntry(masm, 2, kDontSaveFPRegs, kArgvOnStack, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Builtins::Generate_CEntry_Return2_DontSaveFPRegs_ArgvOnStack_BuiltinExit(
|
|
|
|
MacroAssembler* masm) {
|
|
|
|
Generate_CEntry(masm, 2, kDontSaveFPRegs, kArgvOnStack, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Builtins::
|
|
|
|
Generate_CEntry_Return2_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit(
|
|
|
|
MacroAssembler* masm) {
|
|
|
|
Generate_CEntry(masm, 2, kDontSaveFPRegs, kArgvInRegister, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Builtins::Generate_CEntry_Return2_SaveFPRegs_ArgvOnStack_NoBuiltinExit(
|
|
|
|
MacroAssembler* masm) {
|
|
|
|
Generate_CEntry(masm, 2, kSaveFPRegs, kArgvOnStack, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Builtins::Generate_CEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit(
|
|
|
|
MacroAssembler* masm) {
|
|
|
|
Generate_CEntry(masm, 2, kSaveFPRegs, kArgvOnStack, true);
|
|
|
|
}
|
|
|
|
|
2018-05-25 10:25:28 +00:00
|
|
|
void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
|
|
|
// CallApiGetterStub only exists as a stub to avoid duplicating code between
|
|
|
|
// here and code-stubs-<arch>.cc. For example, see CallApiFunctionAndReturn.
|
|
|
|
// Here we abuse the instantiated stub to generate code.
|
|
|
|
CallApiGetterStub stub(masm->isolate());
|
|
|
|
stub.Generate(masm);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Builtins::Generate_CallApiCallback_Argc0(MacroAssembler* masm) {
|
|
|
|
// The common variants of CallApiCallbackStub (i.e. all that are embedded into
|
|
|
|
// the snapshot) are generated as builtins. The rest remain available as code
|
|
|
|
// stubs. Here we abuse the instantiated stub to generate code and avoid
|
|
|
|
// duplication.
|
|
|
|
const int kArgc = 0;
|
|
|
|
CallApiCallbackStub stub(masm->isolate(), kArgc);
|
|
|
|
stub.Generate(masm);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Builtins::Generate_CallApiCallback_Argc1(MacroAssembler* masm) {
|
|
|
|
// The common variants of CallApiCallbackStub (i.e. all that are embedded into
|
|
|
|
// the snapshot) are generated as builtins. The rest remain available as code
|
|
|
|
// stubs. Here we abuse the instantiated stub to generate code and avoid
|
|
|
|
// duplication.
|
|
|
|
const int kArgc = 1;
|
|
|
|
CallApiCallbackStub stub(masm->isolate(), kArgc);
|
|
|
|
stub.Generate(masm);
|
|
|
|
}
|
|
|
|
|
2018-05-07 14:09:04 +00:00
|
|
|
// ES6 [[Get]] operation.
|
|
|
|
TF_BUILTIN(GetProperty, CodeStubAssembler) {
|
|
|
|
Node* object = Parameter(Descriptor::kObject);
|
|
|
|
Node* key = Parameter(Descriptor::kKey);
|
|
|
|
Node* context = Parameter(Descriptor::kContext);
|
[es2015] Handle proxies in GetProperty builtin.
Teach the GetProperty builtin how to perform [[Get]] on JSProxy
instances by calling into the dedicated ProxyGetProperty builtin
that we already use for the LOAD_IC / KEYED_LOAD_IC. This is
important when proxies are used in places were GetProperty builtin
is used like for example as iterables in for..of loops or in spreads.
On a simple micro-benchmark like the following
```js
const proxy = new Proxy([1, 2, 3], {
get(target, property) { return target[property]; }
});
const TESTS = [
function testForOfProxy() { for (const x of proxy) {} },
function testSpreadProxy() { return [...proxy]; }
];
function test(fn) {
var result;
for (var i = 0; i < 1e6; ++i) result = fn();
return result;
}
test(x => x);
for (var j = 0; j < TESTS.length; ++j) test(TESTS[j]);
for (var j = 0; j < TESTS.length; ++j) {
var startTime = Date.now();
test(TESTS[j]);
print(TESTS[j].name + ':', (Date.now() - startTime), 'ms.');
}
```
improves from around
testForOfProxy: 1672.6 ms.
testSpreadProxy: 1956.6 ms.
to
testForOfProxy: 408.4 ms.
testSpreadProxy: 530.8 ms.
on average, which corresponds to a 4-5x performance improvement, even
for small arrays. On the ARES-6 Air benchmark this completely eliminates
all calls to the %GetProperty runtime function, and thereby improves the
steady state mean by 2-3%.
Bug: v8:6344, v8:6557, v8:6559
Change-Id: Ifebdaff8f3ae5899a33ce408ecd54655247f3a02
Reviewed-on: https://chromium-review.googlesource.com/1199023
Reviewed-by: Maya Lekova <mslekova@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#55539}
2018-08-31 07:41:49 +00:00
|
|
|
Label if_notfound(this), if_proxy(this, Label::kDeferred),
|
|
|
|
if_slow(this, Label::kDeferred);
|
2018-05-07 14:09:04 +00:00
|
|
|
|
|
|
|
CodeStubAssembler::LookupInHolder lookup_property_in_holder =
|
[es2015] Handle proxies in GetProperty builtin.
Teach the GetProperty builtin how to perform [[Get]] on JSProxy
instances by calling into the dedicated ProxyGetProperty builtin
that we already use for the LOAD_IC / KEYED_LOAD_IC. This is
important when proxies are used in places were GetProperty builtin
is used like for example as iterables in for..of loops or in spreads.
On a simple micro-benchmark like the following
```js
const proxy = new Proxy([1, 2, 3], {
get(target, property) { return target[property]; }
});
const TESTS = [
function testForOfProxy() { for (const x of proxy) {} },
function testSpreadProxy() { return [...proxy]; }
];
function test(fn) {
var result;
for (var i = 0; i < 1e6; ++i) result = fn();
return result;
}
test(x => x);
for (var j = 0; j < TESTS.length; ++j) test(TESTS[j]);
for (var j = 0; j < TESTS.length; ++j) {
var startTime = Date.now();
test(TESTS[j]);
print(TESTS[j].name + ':', (Date.now() - startTime), 'ms.');
}
```
improves from around
testForOfProxy: 1672.6 ms.
testSpreadProxy: 1956.6 ms.
to
testForOfProxy: 408.4 ms.
testSpreadProxy: 530.8 ms.
on average, which corresponds to a 4-5x performance improvement, even
for small arrays. On the ARES-6 Air benchmark this completely eliminates
all calls to the %GetProperty runtime function, and thereby improves the
steady state mean by 2-3%.
Bug: v8:6344, v8:6557, v8:6559
Change-Id: Ifebdaff8f3ae5899a33ce408ecd54655247f3a02
Reviewed-on: https://chromium-review.googlesource.com/1199023
Reviewed-by: Maya Lekova <mslekova@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#55539}
2018-08-31 07:41:49 +00:00
|
|
|
[=](Node* receiver, Node* holder, Node* holder_map,
|
|
|
|
Node* holder_instance_type, Node* unique_name, Label* next_holder,
|
|
|
|
Label* if_bailout) {
|
2018-05-07 14:09:04 +00:00
|
|
|
VARIABLE(var_value, MachineRepresentation::kTagged);
|
|
|
|
Label if_found(this);
|
|
|
|
TryGetOwnProperty(context, receiver, holder, holder_map,
|
|
|
|
holder_instance_type, unique_name, &if_found,
|
|
|
|
&var_value, next_holder, if_bailout);
|
|
|
|
BIND(&if_found);
|
[es2015] Handle proxies in GetProperty builtin.
Teach the GetProperty builtin how to perform [[Get]] on JSProxy
instances by calling into the dedicated ProxyGetProperty builtin
that we already use for the LOAD_IC / KEYED_LOAD_IC. This is
important when proxies are used in places were GetProperty builtin
is used like for example as iterables in for..of loops or in spreads.
On a simple micro-benchmark like the following
```js
const proxy = new Proxy([1, 2, 3], {
get(target, property) { return target[property]; }
});
const TESTS = [
function testForOfProxy() { for (const x of proxy) {} },
function testSpreadProxy() { return [...proxy]; }
];
function test(fn) {
var result;
for (var i = 0; i < 1e6; ++i) result = fn();
return result;
}
test(x => x);
for (var j = 0; j < TESTS.length; ++j) test(TESTS[j]);
for (var j = 0; j < TESTS.length; ++j) {
var startTime = Date.now();
test(TESTS[j]);
print(TESTS[j].name + ':', (Date.now() - startTime), 'ms.');
}
```
improves from around
testForOfProxy: 1672.6 ms.
testSpreadProxy: 1956.6 ms.
to
testForOfProxy: 408.4 ms.
testSpreadProxy: 530.8 ms.
on average, which corresponds to a 4-5x performance improvement, even
for small arrays. On the ARES-6 Air benchmark this completely eliminates
all calls to the %GetProperty runtime function, and thereby improves the
steady state mean by 2-3%.
Bug: v8:6344, v8:6557, v8:6559
Change-Id: Ifebdaff8f3ae5899a33ce408ecd54655247f3a02
Reviewed-on: https://chromium-review.googlesource.com/1199023
Reviewed-by: Maya Lekova <mslekova@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#55539}
2018-08-31 07:41:49 +00:00
|
|
|
Return(var_value.value());
|
2018-05-07 14:09:04 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
CodeStubAssembler::LookupInHolder lookup_element_in_holder =
|
|
|
|
[=](Node* receiver, Node* holder, Node* holder_map,
|
|
|
|
Node* holder_instance_type, Node* index, Label* next_holder,
|
|
|
|
Label* if_bailout) {
|
|
|
|
// Not supported yet.
|
|
|
|
Use(next_holder);
|
|
|
|
Goto(if_bailout);
|
|
|
|
};
|
|
|
|
|
|
|
|
TryPrototypeChainLookup(object, key, lookup_property_in_holder,
|
[es2015] Handle proxies in GetProperty builtin.
Teach the GetProperty builtin how to perform [[Get]] on JSProxy
instances by calling into the dedicated ProxyGetProperty builtin
that we already use for the LOAD_IC / KEYED_LOAD_IC. This is
important when proxies are used in places were GetProperty builtin
is used like for example as iterables in for..of loops or in spreads.
On a simple micro-benchmark like the following
```js
const proxy = new Proxy([1, 2, 3], {
get(target, property) { return target[property]; }
});
const TESTS = [
function testForOfProxy() { for (const x of proxy) {} },
function testSpreadProxy() { return [...proxy]; }
];
function test(fn) {
var result;
for (var i = 0; i < 1e6; ++i) result = fn();
return result;
}
test(x => x);
for (var j = 0; j < TESTS.length; ++j) test(TESTS[j]);
for (var j = 0; j < TESTS.length; ++j) {
var startTime = Date.now();
test(TESTS[j]);
print(TESTS[j].name + ':', (Date.now() - startTime), 'ms.');
}
```
improves from around
testForOfProxy: 1672.6 ms.
testSpreadProxy: 1956.6 ms.
to
testForOfProxy: 408.4 ms.
testSpreadProxy: 530.8 ms.
on average, which corresponds to a 4-5x performance improvement, even
for small arrays. On the ARES-6 Air benchmark this completely eliminates
all calls to the %GetProperty runtime function, and thereby improves the
steady state mean by 2-3%.
Bug: v8:6344, v8:6557, v8:6559
Change-Id: Ifebdaff8f3ae5899a33ce408ecd54655247f3a02
Reviewed-on: https://chromium-review.googlesource.com/1199023
Reviewed-by: Maya Lekova <mslekova@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#55539}
2018-08-31 07:41:49 +00:00
|
|
|
lookup_element_in_holder, &if_notfound, &if_slow,
|
|
|
|
&if_proxy);
|
2018-05-07 14:09:04 +00:00
|
|
|
|
[es2015] Handle proxies in GetProperty builtin.
Teach the GetProperty builtin how to perform [[Get]] on JSProxy
instances by calling into the dedicated ProxyGetProperty builtin
that we already use for the LOAD_IC / KEYED_LOAD_IC. This is
important when proxies are used in places were GetProperty builtin
is used like for example as iterables in for..of loops or in spreads.
On a simple micro-benchmark like the following
```js
const proxy = new Proxy([1, 2, 3], {
get(target, property) { return target[property]; }
});
const TESTS = [
function testForOfProxy() { for (const x of proxy) {} },
function testSpreadProxy() { return [...proxy]; }
];
function test(fn) {
var result;
for (var i = 0; i < 1e6; ++i) result = fn();
return result;
}
test(x => x);
for (var j = 0; j < TESTS.length; ++j) test(TESTS[j]);
for (var j = 0; j < TESTS.length; ++j) {
var startTime = Date.now();
test(TESTS[j]);
print(TESTS[j].name + ':', (Date.now() - startTime), 'ms.');
}
```
improves from around
testForOfProxy: 1672.6 ms.
testSpreadProxy: 1956.6 ms.
to
testForOfProxy: 408.4 ms.
testSpreadProxy: 530.8 ms.
on average, which corresponds to a 4-5x performance improvement, even
for small arrays. On the ARES-6 Air benchmark this completely eliminates
all calls to the %GetProperty runtime function, and thereby improves the
steady state mean by 2-3%.
Bug: v8:6344, v8:6557, v8:6559
Change-Id: Ifebdaff8f3ae5899a33ce408ecd54655247f3a02
Reviewed-on: https://chromium-review.googlesource.com/1199023
Reviewed-by: Maya Lekova <mslekova@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#55539}
2018-08-31 07:41:49 +00:00
|
|
|
BIND(&if_notfound);
|
|
|
|
Return(UndefinedConstant());
|
2018-05-07 14:09:04 +00:00
|
|
|
|
[es2015] Handle proxies in GetProperty builtin.
Teach the GetProperty builtin how to perform [[Get]] on JSProxy
instances by calling into the dedicated ProxyGetProperty builtin
that we already use for the LOAD_IC / KEYED_LOAD_IC. This is
important when proxies are used in places were GetProperty builtin
is used like for example as iterables in for..of loops or in spreads.
On a simple micro-benchmark like the following
```js
const proxy = new Proxy([1, 2, 3], {
get(target, property) { return target[property]; }
});
const TESTS = [
function testForOfProxy() { for (const x of proxy) {} },
function testSpreadProxy() { return [...proxy]; }
];
function test(fn) {
var result;
for (var i = 0; i < 1e6; ++i) result = fn();
return result;
}
test(x => x);
for (var j = 0; j < TESTS.length; ++j) test(TESTS[j]);
for (var j = 0; j < TESTS.length; ++j) {
var startTime = Date.now();
test(TESTS[j]);
print(TESTS[j].name + ':', (Date.now() - startTime), 'ms.');
}
```
improves from around
testForOfProxy: 1672.6 ms.
testSpreadProxy: 1956.6 ms.
to
testForOfProxy: 408.4 ms.
testSpreadProxy: 530.8 ms.
on average, which corresponds to a 4-5x performance improvement, even
for small arrays. On the ARES-6 Air benchmark this completely eliminates
all calls to the %GetProperty runtime function, and thereby improves the
steady state mean by 2-3%.
Bug: v8:6344, v8:6557, v8:6559
Change-Id: Ifebdaff8f3ae5899a33ce408ecd54655247f3a02
Reviewed-on: https://chromium-review.googlesource.com/1199023
Reviewed-by: Maya Lekova <mslekova@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#55539}
2018-08-31 07:41:49 +00:00
|
|
|
BIND(&if_slow);
|
|
|
|
TailCallRuntime(Runtime::kGetProperty, context, object, key);
|
|
|
|
|
|
|
|
BIND(&if_proxy);
|
2018-05-07 14:09:04 +00:00
|
|
|
{
|
[es2015] Handle proxies in GetProperty builtin.
Teach the GetProperty builtin how to perform [[Get]] on JSProxy
instances by calling into the dedicated ProxyGetProperty builtin
that we already use for the LOAD_IC / KEYED_LOAD_IC. This is
important when proxies are used in places were GetProperty builtin
is used like for example as iterables in for..of loops or in spreads.
On a simple micro-benchmark like the following
```js
const proxy = new Proxy([1, 2, 3], {
get(target, property) { return target[property]; }
});
const TESTS = [
function testForOfProxy() { for (const x of proxy) {} },
function testSpreadProxy() { return [...proxy]; }
];
function test(fn) {
var result;
for (var i = 0; i < 1e6; ++i) result = fn();
return result;
}
test(x => x);
for (var j = 0; j < TESTS.length; ++j) test(TESTS[j]);
for (var j = 0; j < TESTS.length; ++j) {
var startTime = Date.now();
test(TESTS[j]);
print(TESTS[j].name + ':', (Date.now() - startTime), 'ms.');
}
```
improves from around
testForOfProxy: 1672.6 ms.
testSpreadProxy: 1956.6 ms.
to
testForOfProxy: 408.4 ms.
testSpreadProxy: 530.8 ms.
on average, which corresponds to a 4-5x performance improvement, even
for small arrays. On the ARES-6 Air benchmark this completely eliminates
all calls to the %GetProperty runtime function, and thereby improves the
steady state mean by 2-3%.
Bug: v8:6344, v8:6557, v8:6559
Change-Id: Ifebdaff8f3ae5899a33ce408ecd54655247f3a02
Reviewed-on: https://chromium-review.googlesource.com/1199023
Reviewed-by: Maya Lekova <mslekova@chromium.org>
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#55539}
2018-08-31 07:41:49 +00:00
|
|
|
// Convert the {key} to a Name first.
|
|
|
|
Node* name = CallBuiltin(Builtins::kToName, context, key);
|
|
|
|
|
|
|
|
// The {object} is a JSProxy instance, look up the {name} on it, passing
|
|
|
|
// {object} both as receiver and holder. If {name} is absent we can safely
|
|
|
|
// return undefined from here.
|
|
|
|
TailCallBuiltin(Builtins::kProxyGetProperty, context, object, name, object,
|
|
|
|
SmiConstant(OnNonExistent::kReturnUndefined));
|
2018-05-07 14:09:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-06 12:36:45 +00:00
|
|
|
// ES6 [[Set]] operation.
|
|
|
|
TF_BUILTIN(SetProperty, CodeStubAssembler) {
|
|
|
|
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
|
|
|
|
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
|
|
|
|
TNode<Object> key = CAST(Parameter(Descriptor::kKey));
|
|
|
|
TNode<Object> value = CAST(Parameter(Descriptor::kValue));
|
|
|
|
|
2018-08-09 08:17:01 +00:00
|
|
|
KeyedStoreGenericGenerator::SetProperty(state(), context, receiver, key,
|
|
|
|
value, LanguageMode::kStrict);
|
2018-08-06 12:36:45 +00:00
|
|
|
}
|
|
|
|
|
2017-03-16 11:32:01 +00:00
|
|
|
} // namespace internal
|
|
|
|
} // namespace v8
|