Removed flag optimize-constructed-arrays.

This eliminates a large amount of hand-written assembly in the platforms.

BUG=
R=danno@chromium.org, mstarzinger@chromium.org

Review URL: https://codereview.chromium.org/16453002

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15328 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
mvstanton@chromium.org 2013-06-25 16:31:07 +00:00
parent 53eb53f4af
commit 081134ecd1
22 changed files with 427 additions and 2518 deletions

View File

@ -104,360 +104,6 @@ static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
}
// Allocate an empty JSArray. The allocated array is put into the result
// register. An elements backing store is allocated with size initial_capacity
// and filled with the hole values.
static void AllocateEmptyJSArray(MacroAssembler* masm,
Register array_function,
Register result,
Register scratch1,
Register scratch2,
Register scratch3,
Label* gc_required) {
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
__ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
int size = JSArray::kSize;
if (initial_capacity > 0) {
size += FixedArray::SizeFor(initial_capacity);
}
__ Allocate(size, result, scratch2, scratch3, gc_required, TAG_OBJECT);
// Allocated the JSArray. Now initialize the fields except for the elements
// array.
// result: JSObject
// scratch1: initial map
// scratch2: start of next object
__ str(scratch1, FieldMemOperand(result, JSObject::kMapOffset));
__ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
__ str(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
// Field JSArray::kElementsOffset is initialized later.
__ mov(scratch3, Operand::Zero());
__ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
if (initial_capacity == 0) {
__ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
return;
}
// Calculate the location of the elements array and set elements array member
// of the JSArray.
// result: JSObject
// scratch2: start of next object
__ add(scratch1, result, Operand(JSArray::kSize));
__ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
// Clear the heap tag on the elements array.
__ sub(scratch1, scratch1, Operand(kHeapObjectTag));
// Initialize the FixedArray and fill it with holes. FixedArray length is
// stored as a smi.
// result: JSObject
// scratch1: elements array (untagged)
// scratch2: start of next object
__ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
STATIC_ASSERT(0 * kPointerSize == FixedArray::kMapOffset);
__ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
__ mov(scratch3, Operand(Smi::FromInt(initial_capacity)));
STATIC_ASSERT(1 * kPointerSize == FixedArray::kLengthOffset);
__ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
// Fill the FixedArray with the hole value. Inline the code if short.
STATIC_ASSERT(2 * kPointerSize == FixedArray::kHeaderSize);
__ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
static const int kLoopUnfoldLimit = 4;
if (initial_capacity <= kLoopUnfoldLimit) {
for (int i = 0; i < initial_capacity; i++) {
__ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
}
} else {
Label loop, entry;
__ add(scratch2, scratch1, Operand(initial_capacity * kPointerSize));
__ b(&entry);
__ bind(&loop);
__ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
__ bind(&entry);
__ cmp(scratch1, scratch2);
__ b(lt, &loop);
}
}
// Allocate a JSArray with the number of elements stored in a register. The
// register array_function holds the built-in Array function and the register
// array_size holds the size of the array as a smi. The allocated array is put
// into the result register and beginning and end of the FixedArray elements
// storage is put into registers elements_array_storage and elements_array_end
// (see below for when that is not the case). If the parameter fill_with_holes
// is true the allocated elements backing store is filled with the hole values
// otherwise it is left uninitialized. When the backing store is filled the
// register elements_array_storage is scratched.
static void AllocateJSArray(MacroAssembler* masm,
Register array_function, // Array function.
Register array_size, // As a smi, cannot be 0.
Register result,
Register elements_array_storage,
Register elements_array_end,
Register scratch1,
Register scratch2,
bool fill_with_hole,
Label* gc_required) {
// Load the initial map from the array function.
__ LoadInitialArrayMap(array_function, scratch2,
elements_array_storage, fill_with_hole);
if (FLAG_debug_code) { // Assert that array size is not zero.
__ tst(array_size, array_size);
__ Assert(ne, "array size is unexpectedly 0");
}
// Allocate the JSArray object together with space for a FixedArray with the
// requested number of elements.
__ mov(elements_array_end,
Operand((JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize));
__ add(elements_array_end, elements_array_end, Operand::SmiUntag(array_size));
__ Allocate(elements_array_end,
result,
scratch1,
scratch2,
gc_required,
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Allocated the JSArray. Now initialize the fields except for the elements
// array.
// result: JSObject
// elements_array_storage: initial map
// array_size: size of array (smi)
__ str(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
__ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
__ str(elements_array_storage,
FieldMemOperand(result, JSArray::kPropertiesOffset));
// Field JSArray::kElementsOffset is initialized later.
__ str(array_size, FieldMemOperand(result, JSArray::kLengthOffset));
// Calculate the location of the elements array and set elements array member
// of the JSArray.
// result: JSObject
// array_size: size of array (smi)
__ add(elements_array_storage, result, Operand(JSArray::kSize));
__ str(elements_array_storage,
FieldMemOperand(result, JSArray::kElementsOffset));
// Clear the heap tag on the elements array.
__ sub(elements_array_storage,
elements_array_storage,
Operand(kHeapObjectTag));
// Initialize the fixed array and fill it with holes. FixedArray length is
// stored as a smi.
// result: JSObject
// elements_array_storage: elements array (untagged)
// array_size: size of array (smi)
__ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
__ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
__ str(array_size,
MemOperand(elements_array_storage, kPointerSize, PostIndex));
// Calculate elements array and elements array end.
// result: JSObject
// elements_array_storage: elements array element storage
// array_size: smi-tagged size of elements array
__ add(elements_array_end,
elements_array_storage,
Operand::PointerOffsetFromSmiKey(array_size));
// Fill the allocated FixedArray with the hole value if requested.
// result: JSObject
// elements_array_storage: elements array element storage
// elements_array_end: start of next object
if (fill_with_hole) {
Label loop, entry;
__ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex);
__ jmp(&entry);
__ bind(&loop);
__ str(scratch1,
MemOperand(elements_array_storage, kPointerSize, PostIndex));
__ bind(&entry);
__ cmp(elements_array_storage, elements_array_end);
__ b(lt, &loop);
}
}
// Create a new array for the built-in Array function. This function allocates
// the JSArray object and the FixedArray elements array and initializes these.
// If the Array cannot be constructed in native code the runtime is called. This
// function assumes the following state:
// r0: argc
// r1: constructor (built-in Array function)
// lr: return address
// sp[0]: last argument
// This function is used for both construct and normal calls of Array. The only
// difference between handling a construct call and a normal call is that for a
// construct call the constructor function in r1 needs to be preserved for
// entering the generic code. In both cases argc in r0 needs to be preserved.
// Both registers are preserved by this code so no need to differentiate between
// construct call and normal call.
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) {
Counters* counters = masm->isolate()->counters();
Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array,
has_non_smi_element, finish, cant_transition_map, not_double;
// Check for array construction with zero arguments or one.
__ cmp(r0, Operand::Zero());
__ b(ne, &argc_one_or_more);
// Handle construction of an empty array.
__ bind(&empty_array);
AllocateEmptyJSArray(masm,
r1,
r2,
r3,
r4,
r5,
call_generic_code);
__ IncrementCounter(counters->array_function_native(), 1, r3, r4);
// Set up return value, remove receiver from stack and return.
__ mov(r0, r2);
__ add(sp, sp, Operand(kPointerSize));
__ Jump(lr);
// Check for one argument. Bail out if argument is not smi or if it is
// negative.
__ bind(&argc_one_or_more);
__ cmp(r0, Operand(1));
__ b(ne, &argc_two_or_more);
__ ldr(r2, MemOperand(sp)); // Get the argument from the stack.
__ tst(r2, r2);
__ b(ne, &not_empty_array);
__ Drop(1); // Adjust stack.
__ mov(r0, Operand::Zero()); // Treat this as a call with argc of zero.
__ b(&empty_array);
__ bind(&not_empty_array);
STATIC_ASSERT(kSmiTag == 0);
__ and_(r3, r2, Operand(kIntptrSignBit | kSmiTagMask), SetCC);
__ b(ne, call_generic_code);
// Handle construction of an empty array of a certain size. Bail out if size
// is too large to actually allocate an elements array.
STATIC_ASSERT(kSmiTag == 0);
__ cmp(r2, Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
__ b(ge, call_generic_code);
// r0: argc
// r1: constructor
// r2: array_size (smi)
// sp[0]: argument
AllocateJSArray(masm,
r1,
r2,
r3,
r4,
r5,
r6,
r7,
true,
call_generic_code);
__ IncrementCounter(counters->array_function_native(), 1, r2, r4);
// Set up return value, remove receiver and argument from stack and return.
__ mov(r0, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Jump(lr);
// Handle construction of an array from a list of arguments.
__ bind(&argc_two_or_more);
__ SmiTag(r2, r0);
// r0: argc
// r1: constructor
// r2: array_size (smi)
// sp[0]: last argument
AllocateJSArray(masm,
r1,
r2,
r3,
r4,
r5,
r6,
r7,
false,
call_generic_code);
__ IncrementCounter(counters->array_function_native(), 1, r2, r6);
// Fill arguments as array elements. Copy from the top of the stack (last
// element) to the array backing store filling it backwards. Note:
// elements_array_end points after the backing store therefore PreIndex is
// used when filling the backing store.
// r0: argc
// r3: JSArray
// r4: elements_array storage start (untagged)
// r5: elements_array_end (untagged)
// sp[0]: last argument
Label loop, entry;
__ mov(r7, sp);
__ jmp(&entry);
__ bind(&loop);
__ ldr(r2, MemOperand(r7, kPointerSize, PostIndex));
if (FLAG_smi_only_arrays) {
__ JumpIfNotSmi(r2, &has_non_smi_element);
}
__ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
__ bind(&entry);
__ cmp(r4, r5);
__ b(lt, &loop);
__ bind(&finish);
__ mov(sp, r7);
// Remove caller arguments and receiver from the stack, setup return value and
// return.
// r0: argc
// r3: JSArray
// sp[0]: receiver
__ add(sp, sp, Operand(kPointerSize));
__ mov(r0, r3);
__ Jump(lr);
__ bind(&has_non_smi_element);
// Double values are handled by the runtime.
__ CheckMap(
r2, r9, Heap::kHeapNumberMapRootIndex, &not_double, DONT_DO_SMI_CHECK);
__ bind(&cant_transition_map);
__ UndoAllocationInNewSpace(r3, r4);
__ b(call_generic_code);
__ bind(&not_double);
// Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
// r3: JSArray
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
r2,
r9,
&cant_transition_map);
__ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
__ RecordWriteField(r3,
HeapObject::kMapOffset,
r2,
r9,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
Label loop2;
__ sub(r7, r7, Operand(kPointerSize));
__ bind(&loop2);
__ ldr(r2, MemOperand(r7, kPointerSize, PostIndex));
__ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
__ cmp(r4, r5);
__ b(lt, &loop2);
__ b(&finish);
}
void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@ -480,20 +126,9 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
if (FLAG_optimize_constructed_arrays) {
// tail call a stub
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
} else {
ArrayNativeCode(masm, &generic_array_code);
// Jump to the generic array code if the specialized code cannot handle the
// construction.
__ bind(&generic_array_code);
Handle<Code> array_code =
masm->isolate()->builtins()->InternalArrayCodeGeneric();
__ Jump(array_code, RelocInfo::CODE_TARGET);
}
// tail call a stub
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@ -518,56 +153,13 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
// Run the native code for the Array function called as a normal function.
if (FLAG_optimize_constructed_arrays) {
// tail call a stub
Handle<Object> undefined_sentinel(
masm->isolate()->heap()->undefined_value(),
masm->isolate());
__ mov(r2, Operand(undefined_sentinel));
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
} else {
ArrayNativeCode(masm, &generic_array_code);
// Jump to the generic array code if the specialized code cannot handle
// the construction.
__ bind(&generic_array_code);
Handle<Code> array_code =
masm->isolate()->builtins()->ArrayCodeGeneric();
__ Jump(array_code, RelocInfo::CODE_TARGET);
}
}
void Builtins::Generate_CommonArrayConstructCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : constructor function
// -- r2 : type info cell
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
if (FLAG_debug_code) {
// The array construct code is only set for the builtin and internal
// Array functions which always have a map.
// Initial map for the builtin Array function should be a map.
__ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
__ SmiTst(r3);
__ Assert(ne, "Unexpected initial map for Array function");
__ CompareObjectType(r3, r3, r4, MAP_TYPE);
__ Assert(eq, "Unexpected initial map for Array function");
}
Label generic_constructor;
// Run the native code for the Array function called as a constructor.
ArrayNativeCode(masm, &generic_constructor);
// Jump to the generic construct code in case the specialized code cannot
// handle the construction.
__ bind(&generic_constructor);
Handle<Code> generic_construct_stub =
masm->isolate()->builtins()->JSConstructStubGeneric();
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
// tail call a stub
Handle<Object> undefined_sentinel(
masm->isolate()->heap()->undefined_value(),
masm->isolate());
__ mov(r2, Operand(undefined_sentinel));
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}

View File

@ -2996,9 +2996,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
if (FLAG_optimize_constructed_arrays) {
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
}
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
}
@ -4623,52 +4621,12 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
}
static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// r1 : the function to call
// r2 : cache cell for call target
Label done;
ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->undefined_value());
ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->the_hole_value());
// Load the cache state into r3.
__ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
__ cmp(r3, r1);
__ b(eq, &done);
__ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
__ b(eq, &done);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex, ne);
__ str(ip, FieldMemOperand(r2, Cell::kValueOffset), ne);
// An uninitialized cache is patched with the function.
__ str(r1, FieldMemOperand(r2, Cell::kValueOffset), eq);
// No need for a write barrier here - cells are rescanned.
__ bind(&done);
}
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// r1 : the function to call
// r2 : cache cell for call target
ASSERT(FLAG_optimize_constructed_arrays);
Label initialize, done, miss, megamorphic, not_array_function;
ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
@ -4772,11 +4730,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ b(ne, &slow);
if (RecordCallTarget()) {
if (FLAG_optimize_constructed_arrays) {
GenerateRecordCallTarget(masm);
} else {
GenerateRecordCallTargetNoArray(masm);
}
GenerateRecordCallTarget(masm);
}
// Fast-case: Invoke the function now.
@ -4851,15 +4805,11 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ b(ne, &slow);
if (RecordCallTarget()) {
if (FLAG_optimize_constructed_arrays) {
GenerateRecordCallTarget(masm);
} else {
GenerateRecordCallTargetNoArray(masm);
}
GenerateRecordCallTarget(masm);
}
// Jump to the function-specific construct stub.
Register jmp_reg = FLAG_optimize_constructed_arrays ? r3 : r2;
Register jmp_reg = r3;
__ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(jmp_reg, FieldMemOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset));
@ -7341,52 +7291,39 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ bind(&okay_here);
}
if (FLAG_optimize_constructed_arrays) {
Label no_info, switch_ready;
// Get the elements kind and case on that.
__ cmp(r2, Operand(undefined_sentinel));
__ b(eq, &no_info);
__ ldr(r3, FieldMemOperand(r2, PropertyCell::kValueOffset));
__ JumpIfNotSmi(r3, &no_info);
__ SmiUntag(r3);
__ jmp(&switch_ready);
__ bind(&no_info);
__ mov(r3, Operand(GetInitialFastElementsKind()));
__ bind(&switch_ready);
Label no_info, switch_ready;
// Get the elements kind and case on that.
__ cmp(r2, Operand(undefined_sentinel));
__ b(eq, &no_info);
__ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
__ JumpIfNotSmi(r3, &no_info);
__ SmiUntag(r3);
__ jmp(&switch_ready);
__ bind(&no_info);
__ mov(r3, Operand(GetInitialFastElementsKind()));
__ bind(&switch_ready);
if (argument_count_ == ANY) {
Label not_zero_case, not_one_case;
__ tst(r0, r0);
__ b(ne, &not_zero_case);
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
if (argument_count_ == ANY) {
Label not_zero_case, not_one_case;
__ tst(r0, r0);
__ b(ne, &not_zero_case);
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
__ bind(&not_zero_case);
__ cmp(r0, Operand(1));
__ b(gt, &not_one_case);
CreateArrayDispatchOneArgument(masm);
__ bind(&not_zero_case);
__ cmp(r0, Operand(1));
__ b(gt, &not_one_case);
CreateArrayDispatchOneArgument(masm);
__ bind(&not_one_case);
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
} else if (argument_count_ == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
} else if (argument_count_ == ONE) {
CreateArrayDispatchOneArgument(masm);
} else if (argument_count_ == MORE_THAN_ONE) {
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
} else {
UNREACHABLE();
}
__ bind(&not_one_case);
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
} else if (argument_count_ == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
} else if (argument_count_ == ONE) {
CreateArrayDispatchOneArgument(masm);
} else if (argument_count_ == MORE_THAN_ONE) {
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
} else {
Label generic_constructor;
// Run the native code for the Array function called as a constructor.
ArrayNativeCode(masm, &generic_constructor);
// Jump to the generic construct code in case the specialized code cannot
// handle the construction.
__ bind(&generic_constructor);
Handle<Code> generic_construct_stub =
masm->isolate()->builtins()->JSConstructStubGeneric();
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
UNREACHABLE();
}
}
@ -7448,45 +7385,31 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ Assert(eq, "Unexpected initial map for Array function");
}
if (FLAG_optimize_constructed_arrays) {
// Figure out the right elements kind
__ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
// Figure out the right elements kind
__ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
// Load the map's "bit field 2" into |result|. We only need the first byte,
// but the following bit field extraction takes care of that anyway.
__ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
__ Ubfx(r3, r3, Map::kElementsKindShift, Map::kElementsKindBitCount);
// Load the map's "bit field 2" into |result|. We only need the first byte,
// but the following bit field extraction takes care of that anyway.
__ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
__ Ubfx(r3, r3, Map::kElementsKindShift, Map::kElementsKindBitCount);
if (FLAG_debug_code) {
Label done;
__ cmp(r3, Operand(FAST_ELEMENTS));
__ b(eq, &done);
__ cmp(r3, Operand(FAST_HOLEY_ELEMENTS));
__ Assert(eq,
"Invalid ElementsKind for InternalArray or InternalPackedArray");
__ bind(&done);
}
Label fast_elements_case;
if (FLAG_debug_code) {
Label done;
__ cmp(r3, Operand(FAST_ELEMENTS));
__ b(eq, &fast_elements_case);
GenerateCase(masm, FAST_HOLEY_ELEMENTS);
__ bind(&fast_elements_case);
GenerateCase(masm, FAST_ELEMENTS);
} else {
Label generic_constructor;
// Run the native code for the Array function called as constructor.
ArrayNativeCode(masm, &generic_constructor);
// Jump to the generic construct code in case the specialized code cannot
// handle the construction.
__ bind(&generic_constructor);
Handle<Code> generic_construct_stub =
masm->isolate()->builtins()->JSConstructStubGeneric();
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
__ b(eq, &done);
__ cmp(r3, Operand(FAST_HOLEY_ELEMENTS));
__ Assert(eq,
"Invalid ElementsKind for InternalArray or InternalPackedArray");
__ bind(&done);
}
Label fast_elements_case;
__ cmp(r3, Operand(FAST_ELEMENTS));
__ b(eq, &fast_elements_case);
GenerateCase(masm, FAST_HOLEY_ELEMENTS);
__ bind(&fast_elements_case);
GenerateCase(masm, FAST_ELEMENTS);
}

View File

@ -4128,12 +4128,9 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
__ mov(r0, Operand(instr->arity()));
if (FLAG_optimize_constructed_arrays) {
// No cell in r2 for construct type feedback in optimized code
Handle<Object> undefined_value(isolate()->heap()->undefined_value(),
isolate());
__ mov(r2, Operand(undefined_value));
}
// No cell in r2 for construct type feedback in optimized code
Handle<Object> undefined_value(isolate()->factory()->undefined_value());
__ mov(r2, Operand(undefined_value));
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
@ -4142,7 +4139,6 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->constructor()).is(r1));
ASSERT(ToRegister(instr->result()).is(r0));
ASSERT(FLAG_optimize_constructed_arrays);
__ mov(r0, Operand(instr->arity()));
__ mov(r2, Operand(instr->hydrogen()->property_cell()));

View File

@ -886,16 +886,11 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// overwritten by JS code.
native_context()->set_array_function(*array_function);
if (FLAG_optimize_constructed_arrays) {
// Cache the array maps, needed by ArrayConstructorStub
CacheInitialJSArrayMaps(native_context(), initial_map);
ArrayConstructorStub array_constructor_stub(isolate);
Handle<Code> code = array_constructor_stub.GetCode(isolate);
array_function->shared()->set_construct_stub(*code);
} else {
array_function->shared()->set_construct_stub(
isolate->builtins()->builtin(Builtins::kCommonArrayConstructCode));
}
// Cache the array maps, needed by ArrayConstructorStub
CacheInitialJSArrayMaps(native_context(), initial_map);
ArrayConstructorStub array_constructor_stub(isolate);
Handle<Code> code = array_constructor_stub.GetCode(isolate);
array_function->shared()->set_construct_stub(*code);
}
{ // --- N u m b e r ---
@ -1623,15 +1618,9 @@ Handle<JSFunction> Genesis::InstallInternalArray(
factory()->NewJSObject(isolate()->object_function(), TENURED);
SetPrototype(array_function, prototype);
if (FLAG_optimize_constructed_arrays) {
InternalArrayConstructorStub internal_array_constructor_stub(isolate());
Handle<Code> code = internal_array_constructor_stub.GetCode(isolate());
array_function->shared()->set_construct_stub(*code);
} else {
array_function->shared()->set_construct_stub(
isolate()->builtins()->builtin(Builtins::kCommonArrayConstructCode));
}
InternalArrayConstructorStub internal_array_constructor_stub(isolate());
Handle<Code> code = internal_array_constructor_stub.GetCode(isolate());
array_function->shared()->set_construct_stub(*code);
array_function->shared()->DontAdaptArguments();
Handle<Map> original_map(array_function->initial_map());

View File

@ -209,23 +209,21 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
MaybeObject* maybe_array = array->Initialize(0);
if (maybe_array->IsFailure()) return maybe_array;
if (FLAG_optimize_constructed_arrays) {
AllocationSiteInfo* info = AllocationSiteInfo::FindForJSObject(array);
ElementsKind to_kind = array->GetElementsKind();
if (info != NULL && info->GetElementsKindPayload(&to_kind)) {
if (IsMoreGeneralElementsKindTransition(array->GetElementsKind(),
to_kind)) {
// We have advice that we should change the elements kind
if (FLAG_trace_track_allocation_sites) {
PrintF("AllocationSiteInfo: pre-transitioning array %p(%s->%s)\n",
reinterpret_cast<void*>(array),
ElementsKindToString(array->GetElementsKind()),
ElementsKindToString(to_kind));
}
maybe_array = array->TransitionElementsKind(to_kind);
if (maybe_array->IsFailure()) return maybe_array;
AllocationSiteInfo* info = AllocationSiteInfo::FindForJSObject(array);
ElementsKind to_kind = array->GetElementsKind();
if (info != NULL && info->GetElementsKindPayload(&to_kind)) {
if (IsMoreGeneralElementsKindTransition(array->GetElementsKind(),
to_kind)) {
// We have advice that we should change the elements kind
if (FLAG_trace_track_allocation_sites) {
PrintF("AllocationSiteInfo: pre-transitioning array %p(%s->%s)\n",
reinterpret_cast<void*>(array),
ElementsKindToString(array->GetElementsKind()),
ElementsKindToString(to_kind));
}
maybe_array = array->TransitionElementsKind(to_kind);
if (maybe_array->IsFailure()) return maybe_array;
}
}

View File

@ -208,8 +208,6 @@ enum BuiltinExtraArguments {
V(InternalArrayCode, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(ArrayCode, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(CommonArrayConstructCode, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
V(StringConstructCode, BUILTIN, UNINITIALIZED, \
@ -399,7 +397,6 @@ class Builtins {
static void Generate_InternalArrayCode(MacroAssembler* masm);
static void Generate_ArrayCode(MacroAssembler* masm);
static void Generate_CommonArrayConstructCode(MacroAssembler* masm);
static void Generate_StringConstructCode(MacroAssembler* masm);
static void Generate_OnStackReplacement(MacroAssembler* masm);

View File

@ -264,8 +264,6 @@ DEFINE_bool(unreachable_code_elimination, false,
"eliminate unreachable code (hidden behind soft deopts)")
DEFINE_bool(track_allocation_sites, true,
"Use allocation site info to reduce transitions")
DEFINE_bool(optimize_constructed_arrays, true,
"Use allocation site info on constructed arrays")
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")
DEFINE_bool(optimize_closures, true, "optimize closures")

View File

@ -8879,17 +8879,13 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
} else {
// The constructor function is both an operand to the instruction and an
// argument to the construct call.
Handle<JSFunction> array_function =
Handle<JSFunction>(isolate()->global_context()->array_function(),
isolate());
bool use_call_new_array = FLAG_optimize_constructed_arrays &&
expr->target().is_identical_to(array_function);
Handle<JSFunction> array_function(
isolate()->global_context()->array_function(), isolate());
CHECK_ALIVE(VisitArgument(expr->expression()));
HValue* constructor = HPushArgument::cast(Top())->argument();
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
HCallNew* call;
if (use_call_new_array) {
if (expr->target().is_identical_to(array_function)) {
Handle<Cell> cell = expr->allocation_info_cell();
AddInstruction(new(zone()) HCheckFunction(constructor, array_function));
call = new(zone()) HCallNewArray(context, constructor, argument_count,

View File

@ -1015,427 +1015,6 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
}
// Allocate an empty JSArray. The allocated array is put into the result
// register. If the parameter initial_capacity is larger than zero an elements
// backing store is allocated with this size and filled with the hole values.
// Otherwise the elements backing store is set to the empty FixedArray.
static void AllocateEmptyJSArray(MacroAssembler* masm,
Register array_function,
Register result,
Register scratch1,
Register scratch2,
Register scratch3,
Label* gc_required) {
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
__ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
int size = JSArray::kSize;
if (initial_capacity > 0) {
size += FixedArray::SizeFor(initial_capacity);
}
__ Allocate(size, result, scratch2, scratch3, gc_required, TAG_OBJECT);
// Allocated the JSArray. Now initialize the fields except for the elements
// array.
// result: JSObject
// scratch1: initial map
// scratch2: start of next object
__ mov(FieldOperand(result, JSObject::kMapOffset), scratch1);
Factory* factory = masm->isolate()->factory();
__ mov(FieldOperand(result, JSArray::kPropertiesOffset),
factory->empty_fixed_array());
// Field JSArray::kElementsOffset is initialized later.
__ mov(FieldOperand(result, JSArray::kLengthOffset), Immediate(0));
// If no storage is requested for the elements array just set the empty
// fixed array.
if (initial_capacity == 0) {
__ mov(FieldOperand(result, JSArray::kElementsOffset),
factory->empty_fixed_array());
return;
}
// Calculate the location of the elements array and set elements array member
// of the JSArray.
// result: JSObject
// scratch2: start of next object
__ lea(scratch1, Operand(result, JSArray::kSize));
__ mov(FieldOperand(result, JSArray::kElementsOffset), scratch1);
// Initialize the FixedArray and fill it with holes. FixedArray length is
// stored as a smi.
// result: JSObject
// scratch1: elements array
// scratch2: start of next object
__ mov(FieldOperand(scratch1, FixedArray::kMapOffset),
factory->fixed_array_map());
__ mov(FieldOperand(scratch1, FixedArray::kLengthOffset),
Immediate(Smi::FromInt(initial_capacity)));
// Fill the FixedArray with the hole value. Inline the code if short.
// Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
static const int kLoopUnfoldLimit = 4;
if (initial_capacity <= kLoopUnfoldLimit) {
// Use a scratch register here to have only one reloc info when unfolding
// the loop.
__ mov(scratch3, factory->the_hole_value());
for (int i = 0; i < initial_capacity; i++) {
__ mov(FieldOperand(scratch1,
FixedArray::kHeaderSize + i * kPointerSize),
scratch3);
}
} else {
Label loop, entry;
__ mov(scratch2, Immediate(initial_capacity));
__ jmp(&entry);
__ bind(&loop);
__ mov(FieldOperand(scratch1,
scratch2,
times_pointer_size,
FixedArray::kHeaderSize),
factory->the_hole_value());
__ bind(&entry);
__ dec(scratch2);
__ j(not_sign, &loop);
}
}
// Allocate a JSArray with the number of elements stored in a register. The
// register array_function holds the built-in Array function and the register
// array_size holds the size of the array as a smi. The allocated array is put
// into the result register and beginning and end of the FixedArray elements
// storage is put into registers elements_array and elements_array_end (see
// below for when that is not the case). If the parameter fill_with_holes is
// true the allocated elements backing store is filled with the hole values
// otherwise it is left uninitialized. When the backing store is filled the
// register elements_array is scratched.
static void AllocateJSArray(MacroAssembler* masm,
Register array_function, // Array function.
Register array_size, // As a smi, cannot be 0.
Register result,
Register elements_array,
Register elements_array_end,
Register scratch,
bool fill_with_hole,
Label* gc_required) {
ASSERT(scratch.is(edi)); // rep stos destination
ASSERT(!fill_with_hole || array_size.is(ecx)); // rep stos count
ASSERT(!fill_with_hole || !result.is(eax)); // result is never eax
__ LoadInitialArrayMap(array_function, scratch,
elements_array, fill_with_hole);
// Allocate the JSArray object together with space for a FixedArray with the
// requested elements.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ Allocate(JSArray::kSize + FixedArray::kHeaderSize,
times_pointer_size,
array_size,
REGISTER_VALUE_IS_SMI,
result,
elements_array_end,
scratch,
gc_required,
TAG_OBJECT);
// Allocated the JSArray. Now initialize the fields except for the elements
// array.
// result: JSObject
// elements_array: initial map
// elements_array_end: start of next object
// array_size: size of array (smi)
__ mov(FieldOperand(result, JSObject::kMapOffset), elements_array);
Factory* factory = masm->isolate()->factory();
__ mov(elements_array, factory->empty_fixed_array());
__ mov(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
// Field JSArray::kElementsOffset is initialized later.
__ mov(FieldOperand(result, JSArray::kLengthOffset), array_size);
// Calculate the location of the elements array and set elements array member
// of the JSArray.
// result: JSObject
// elements_array_end: start of next object
// array_size: size of array (smi)
__ lea(elements_array, Operand(result, JSArray::kSize));
__ mov(FieldOperand(result, JSArray::kElementsOffset), elements_array);
// Initialize the fixed array. FixedArray length is stored as a smi.
// result: JSObject
// elements_array: elements array
// elements_array_end: start of next object
// array_size: size of array (smi)
__ mov(FieldOperand(elements_array, FixedArray::kMapOffset),
factory->fixed_array_map());
// For non-empty JSArrays the length of the FixedArray and the JSArray is the
// same.
__ mov(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
// Fill the allocated FixedArray with the hole value if requested.
// result: JSObject
// elements_array: elements array
if (fill_with_hole) {
__ SmiUntag(array_size);
__ lea(edi, Operand(elements_array,
FixedArray::kHeaderSize - kHeapObjectTag));
__ mov(eax, factory->the_hole_value());
__ cld();
// Do not use rep stos when filling less than kRepStosThreshold
// words.
const int kRepStosThreshold = 16;
Label loop, entry, done;
__ cmp(ecx, kRepStosThreshold);
__ j(below, &loop); // Note: ecx > 0.
__ rep_stos();
__ jmp(&done);
__ bind(&loop);
__ stos();
__ bind(&entry);
__ cmp(edi, elements_array_end);
__ j(below, &loop);
__ bind(&done);
}
}
// Create a new array for the built-in Array function. This function allocates
// the JSArray object and the FixedArray elements array and initializes these.
// If the Array cannot be constructed in native code the runtime is called. This
// function assumes the following state:
// edi: constructor (built-in Array function)
// eax: argc
// esp[0]: return address
// esp[4]: last argument
// This function is used for both construct and normal calls of Array. Whether
// it is a construct call or not is indicated by the construct_call parameter.
// The only difference between handling a construct call and a normal call is
// that for a construct call the constructor function in edi needs to be
// preserved for entering the generic code. In both cases argc in eax needs to
// be preserved.
void ArrayNativeCode(MacroAssembler* masm,
bool construct_call,
Label* call_generic_code) {
Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call,
empty_array, not_empty_array, finish, cant_transition_map, not_double;
// Push the constructor and argc. No need to tag argc as a smi, as there will
// be no garbage collection with this on the stack.
int push_count = 0;
if (construct_call) {
push_count++;
__ push(edi);
}
push_count++;
__ push(eax);
// Check for array construction with zero arguments.
__ test(eax, eax);
__ j(not_zero, &argc_one_or_more);
__ bind(&empty_array);
// Handle construction of an empty array.
AllocateEmptyJSArray(masm,
edi,
eax,
ebx,
ecx,
edi,
&prepare_generic_code_call);
__ IncrementCounter(masm->isolate()->counters()->array_function_native(), 1);
__ pop(ebx);
if (construct_call) {
__ pop(edi);
}
__ ret(kPointerSize);
// Check for one argument. Bail out if argument is not smi or if it is
// negative.
__ bind(&argc_one_or_more);
__ cmp(eax, 1);
__ j(not_equal, &argc_two_or_more);
STATIC_ASSERT(kSmiTag == 0);
__ mov(ecx, Operand(esp, (push_count + 1) * kPointerSize));
__ test(ecx, ecx);
__ j(not_zero, &not_empty_array);
// The single argument passed is zero, so we jump to the code above used to
// handle the case of no arguments passed. To adapt the stack for that we move
// the return address and the pushed constructor (if pushed) one stack slot up
// thereby removing the passed argument. Argc is also on the stack - at the
// bottom - and it needs to be changed from 1 to 0 to have the call into the
// runtime system work in case a GC is required.
for (int i = push_count; i > 0; i--) {
__ mov(eax, Operand(esp, i * kPointerSize));
__ mov(Operand(esp, (i + 1) * kPointerSize), eax);
}
__ Drop(2); // Drop two stack slots.
__ push(Immediate(0)); // Treat this as a call with argc of zero.
__ jmp(&empty_array);
__ bind(&not_empty_array);
__ test(ecx, Immediate(kIntptrSignBit | kSmiTagMask));
__ j(not_zero, &prepare_generic_code_call);
// Handle construction of an empty array of a certain size. Get the size from
// the stack and bail out if size is to large to actually allocate an elements
// array.
__ cmp(ecx, JSObject::kInitialMaxFastElementArray << kSmiTagSize);
__ j(greater_equal, &prepare_generic_code_call);
// edx: array_size (smi)
// edi: constructor
// esp[0]: argc (cannot be 0 here)
// esp[4]: constructor (only if construct_call)
// esp[8]: return address
// esp[C]: argument
AllocateJSArray(masm,
edi,
ecx,
ebx,
eax,
edx,
edi,
true,
&prepare_generic_code_call);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->array_function_native(), 1);
__ mov(eax, ebx);
__ pop(ebx);
if (construct_call) {
__ pop(edi);
}
__ ret(2 * kPointerSize);
// Handle construction of an array from a list of arguments.
__ bind(&argc_two_or_more);
STATIC_ASSERT(kSmiTag == 0);
__ SmiTag(eax); // Convet argc to a smi.
// eax: array_size (smi)
// edi: constructor
// esp[0] : argc
// esp[4]: constructor (only if construct_call)
// esp[8] : return address
// esp[C] : last argument
AllocateJSArray(masm,
edi,
eax,
ebx,
ecx,
edx,
edi,
false,
&prepare_generic_code_call);
__ IncrementCounter(counters->array_function_native(), 1);
__ push(ebx);
__ mov(ebx, Operand(esp, kPointerSize));
// ebx: argc
// edx: elements_array_end (untagged)
// esp[0]: JSArray
// esp[4]: argc
// esp[8]: constructor (only if construct_call)
// esp[12]: return address
// esp[16]: last argument
// Location of the last argument
int last_arg_offset = (construct_call ? 4 : 3) * kPointerSize;
__ lea(edi, Operand(esp, last_arg_offset));
// Location of the first array element (Parameter fill_with_holes to
// AllocateJSArray is false, so the FixedArray is returned in ecx).
__ lea(edx, Operand(ecx, FixedArray::kHeaderSize - kHeapObjectTag));
Label has_non_smi_element;
// ebx: argc
// edx: location of the first array element
// edi: location of the last argument
// esp[0]: JSArray
// esp[4]: argc
// esp[8]: constructor (only if construct_call)
// esp[12]: return address
// esp[16]: last argument
Label loop, entry;
__ mov(ecx, ebx);
__ jmp(&entry);
__ bind(&loop);
__ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
if (FLAG_smi_only_arrays) {
__ JumpIfNotSmi(eax, &has_non_smi_element);
}
__ mov(Operand(edx, 0), eax);
__ add(edx, Immediate(kPointerSize));
__ bind(&entry);
__ dec(ecx);
__ j(greater_equal, &loop);
// Remove caller arguments from the stack and return.
// ebx: argc
// esp[0]: JSArray
// esp[4]: argc
// esp[8]: constructor (only if construct_call)
// esp[12]: return address
// esp[16]: last argument
__ bind(&finish);
__ mov(ecx, Operand(esp, last_arg_offset - kPointerSize));
__ pop(eax);
__ pop(ebx);
__ lea(esp, Operand(esp, ebx, times_pointer_size,
last_arg_offset - kPointerSize));
__ jmp(ecx);
__ bind(&has_non_smi_element);
// Double values are handled by the runtime.
__ CheckMap(eax,
masm->isolate()->factory()->heap_number_map(),
&not_double,
DONT_DO_SMI_CHECK);
__ bind(&cant_transition_map);
// Throw away the array that's only been partially constructed.
__ pop(eax);
__ UndoAllocationInNewSpace(eax);
__ jmp(&prepare_generic_code_call);
__ bind(&not_double);
// Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
__ mov(ebx, Operand(esp, 0));
__ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional(
FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
edi,
eax,
&cant_transition_map);
__ mov(FieldOperand(ebx, HeapObject::kMapOffset), edi);
__ RecordWriteField(ebx, HeapObject::kMapOffset, edi, eax,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Prepare to re-enter the loop
__ lea(edi, Operand(esp, last_arg_offset));
// Finish the array initialization loop.
Label loop2;
__ bind(&loop2);
__ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
__ mov(Operand(edx, 0), eax);
__ add(edx, Immediate(kPointerSize));
__ dec(ecx);
__ j(greater_equal, &loop2);
__ jmp(&finish);
// Restore argc and constructor before running the generic code.
__ bind(&prepare_generic_code_call);
__ pop(eax);
if (construct_call) {
__ pop(edi);
}
__ jmp(call_generic_code);
}
void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
@ -1459,20 +1038,9 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
if (FLAG_optimize_constructed_arrays) {
// tail call a stub
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
} else {
ArrayNativeCode(masm, false, &generic_array_code);
// Jump to the generic internal array code in case the specialized code
// cannot handle the construction.
__ bind(&generic_array_code);
Handle<Code> array_code =
masm->isolate()->builtins()->InternalArrayCodeGeneric();
__ jmp(array_code, RelocInfo::CODE_TARGET);
}
// tail call a stub
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@ -1498,58 +1066,13 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
// Run the native code for the Array function called as a normal function.
if (FLAG_optimize_constructed_arrays) {
// tail call a stub
Handle<Object> undefined_sentinel(
masm->isolate()->heap()->undefined_value(),
masm->isolate());
__ mov(ebx, Immediate(undefined_sentinel));
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
} else {
ArrayNativeCode(masm, false, &generic_array_code);
// Jump to the generic internal array code in case the specialized code
// cannot handle the construction.
__ bind(&generic_array_code);
Handle<Code> array_code =
masm->isolate()->builtins()->ArrayCodeGeneric();
__ jmp(array_code, RelocInfo::CODE_TARGET);
}
}
void Builtins::Generate_CommonArrayConstructCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
// -- ebx : type info cell
// -- edi : constructor
// -- esp[0] : return address
// -- esp[4] : last argument
// -----------------------------------
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
__ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ test(ecx, Immediate(kSmiTagMask));
__ Assert(not_zero, "Unexpected initial map for Array function");
__ CmpObjectType(ecx, MAP_TYPE, ecx);
__ Assert(equal, "Unexpected initial map for Array function");
}
Label generic_constructor;
// Run the native code for the Array function called as constructor.
ArrayNativeCode(masm, true, &generic_constructor);
// Jump to the generic construct code in case the specialized code cannot
// handle the construction.
__ bind(&generic_constructor);
Handle<Code> generic_construct_stub =
masm->isolate()->builtins()->JSConstructStubGeneric();
__ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
// tail call a stub
Handle<Object> undefined_sentinel(
masm->isolate()->heap()->undefined_value(),
masm->isolate());
__ mov(ebx, Immediate(undefined_sentinel));
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}

View File

@ -4678,51 +4678,12 @@ void InterruptStub::Generate(MacroAssembler* masm) {
}
static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// ebx : cache cell for call target
// edi : the function to call
Isolate* isolate = masm->isolate();
Label initialize, done;
// Load the cache state into ecx.
__ mov(ecx, FieldOperand(ebx, PropertyCell::kValueOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
__ cmp(ecx, edi);
__ j(equal, &done, Label::kNear);
__ cmp(ecx, Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
__ j(equal, &done, Label::kNear);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ cmp(ecx, Immediate(TypeFeedbackCells::UninitializedSentinel(isolate)));
__ j(equal, &initialize, Label::kNear);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ mov(FieldOperand(ebx, Cell::kValueOffset),
Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
__ jmp(&done, Label::kNear);
// An uninitialized cache is patched with the function.
__ bind(&initialize);
__ mov(FieldOperand(ebx, Cell::kValueOffset), edi);
// No need for a write barrier here - cells are rescanned.
__ bind(&done);
}
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// ebx : cache cell for call target
// edi : the function to call
ASSERT(FLAG_optimize_constructed_arrays);
Isolate* isolate = masm->isolate();
Label initialize, done, miss, megamorphic, not_array_function;
@ -4824,11 +4785,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &slow);
if (RecordCallTarget()) {
if (FLAG_optimize_constructed_arrays) {
GenerateRecordCallTarget(masm);
} else {
GenerateRecordCallTargetNoArray(masm);
}
GenerateRecordCallTarget(masm);
}
// Fast-case: Just invoke the function.
@ -4901,15 +4858,11 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &slow);
if (RecordCallTarget()) {
if (FLAG_optimize_constructed_arrays) {
GenerateRecordCallTarget(masm);
} else {
GenerateRecordCallTargetNoArray(masm);
}
GenerateRecordCallTarget(masm);
}
// Jump to the function-specific construct stub.
Register jmp_reg = FLAG_optimize_constructed_arrays ? ecx : ebx;
Register jmp_reg = ecx;
__ mov(jmp_reg, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(jmp_reg, FieldOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset));
@ -4955,9 +4908,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
// It is important that the store buffer overflow stubs are generated first.
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
if (FLAG_optimize_constructed_arrays) {
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
}
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
}
@ -7940,52 +7891,39 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ bind(&okay_here);
}
if (FLAG_optimize_constructed_arrays) {
Label no_info, switch_ready;
// Get the elements kind and case on that.
__ cmp(ebx, Immediate(undefined_sentinel));
__ j(equal, &no_info);
__ mov(edx, FieldOperand(ebx, Cell::kValueOffset));
__ JumpIfNotSmi(edx, &no_info);
__ SmiUntag(edx);
__ jmp(&switch_ready);
__ bind(&no_info);
__ mov(edx, Immediate(GetInitialFastElementsKind()));
__ bind(&switch_ready);
Label no_info, switch_ready;
// Get the elements kind and case on that.
__ cmp(ebx, Immediate(undefined_sentinel));
__ j(equal, &no_info);
__ mov(edx, FieldOperand(ebx, Cell::kValueOffset));
__ JumpIfNotSmi(edx, &no_info);
__ SmiUntag(edx);
__ jmp(&switch_ready);
__ bind(&no_info);
__ mov(edx, Immediate(GetInitialFastElementsKind()));
__ bind(&switch_ready);
if (argument_count_ == ANY) {
Label not_zero_case, not_one_case;
__ test(eax, eax);
__ j(not_zero, &not_zero_case);
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
if (argument_count_ == ANY) {
Label not_zero_case, not_one_case;
__ test(eax, eax);
__ j(not_zero, &not_zero_case);
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
__ bind(&not_zero_case);
__ cmp(eax, 1);
__ j(greater, &not_one_case);
CreateArrayDispatchOneArgument(masm);
__ bind(&not_zero_case);
__ cmp(eax, 1);
__ j(greater, &not_one_case);
CreateArrayDispatchOneArgument(masm);
__ bind(&not_one_case);
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
} else if (argument_count_ == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
} else if (argument_count_ == ONE) {
CreateArrayDispatchOneArgument(masm);
} else if (argument_count_ == MORE_THAN_ONE) {
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
} else {
UNREACHABLE();
}
__ bind(&not_one_case);
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
} else if (argument_count_ == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
} else if (argument_count_ == ONE) {
CreateArrayDispatchOneArgument(masm);
} else if (argument_count_ == MORE_THAN_ONE) {
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
} else {
Label generic_constructor;
// Run the native code for the Array function called as constructor.
ArrayNativeCode(masm, true, &generic_constructor);
// Jump to the generic construct code in case the specialized code cannot
// handle the construction.
__ bind(&generic_constructor);
Handle<Code> generic_construct_stub =
masm->isolate()->builtins()->JSConstructStubGeneric();
__ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
UNREACHABLE();
}
}
@ -8048,46 +7986,33 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ Assert(equal, "Unexpected initial map for Array function");
}
if (FLAG_optimize_constructed_arrays) {
// Figure out the right elements kind
__ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Figure out the right elements kind
__ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Load the map's "bit field 2" into |result|. We only need the first byte,
// but the following masking takes care of that anyway.
__ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
__ and_(ecx, Map::kElementsKindMask);
__ shr(ecx, Map::kElementsKindShift);
// Load the map's "bit field 2" into |result|. We only need the first byte,
// but the following masking takes care of that anyway.
__ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
__ and_(ecx, Map::kElementsKindMask);
__ shr(ecx, Map::kElementsKindShift);
if (FLAG_debug_code) {
Label done;
__ cmp(ecx, Immediate(FAST_ELEMENTS));
__ j(equal, &done);
__ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS));
__ Assert(equal,
"Invalid ElementsKind for InternalArray or InternalPackedArray");
__ bind(&done);
}
Label fast_elements_case;
if (FLAG_debug_code) {
Label done;
__ cmp(ecx, Immediate(FAST_ELEMENTS));
__ j(equal, &fast_elements_case);
GenerateCase(masm, FAST_HOLEY_ELEMENTS);
__ bind(&fast_elements_case);
GenerateCase(masm, FAST_ELEMENTS);
} else {
Label generic_constructor;
// Run the native code for the Array function called as constructor.
ArrayNativeCode(masm, true, &generic_constructor);
// Jump to the generic construct code in case the specialized code cannot
// handle the construction.
__ bind(&generic_constructor);
Handle<Code> generic_construct_stub =
masm->isolate()->builtins()->JSConstructStubGeneric();
__ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
__ j(equal, &done);
__ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS));
__ Assert(equal,
"Invalid ElementsKind for InternalArray or InternalPackedArray");
__ bind(&done);
}
Label fast_elements_case;
__ cmp(ecx, Immediate(FAST_ELEMENTS));
__ j(equal, &fast_elements_case);
GenerateCase(masm, FAST_HOLEY_ELEMENTS);
__ bind(&fast_elements_case);
GenerateCase(masm, FAST_ELEMENTS);
}

View File

@ -4169,12 +4169,9 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->constructor()).is(edi));
ASSERT(ToRegister(instr->result()).is(eax));
if (FLAG_optimize_constructed_arrays) {
// No cell in ebx for construct type feedback in optimized code
Handle<Object> undefined_value(isolate()->heap()->undefined_value(),
isolate());
__ mov(ebx, Immediate(undefined_value));
}
// No cell in ebx for construct type feedback in optimized code
Handle<Object> undefined_value(isolate()->factory()->undefined_value());
__ mov(ebx, Immediate(undefined_value));
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ Set(eax, Immediate(instr->arity()));
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
@ -4185,7 +4182,6 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->constructor()).is(edi));
ASSERT(ToRegister(instr->result()).is(eax));
ASSERT(FLAG_optimize_constructed_arrays);
__ Set(eax, Immediate(instr->arity()));
__ mov(ebx, instr->hydrogen()->property_cell());

View File

@ -1375,7 +1375,6 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
ASSERT(FLAG_optimize_constructed_arrays);
LOperand* context = UseFixed(instr->context(), esi);
LOperand* constructor = UseFixed(instr->constructor(), edi);
argument_count_ -= instr->argument_count();

View File

@ -108,372 +108,6 @@ static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
}
// Allocate an empty JSArray. The allocated array is put into the result
// register. An elements backing store is allocated with size initial_capacity
// and filled with the hole values.
static void AllocateEmptyJSArray(MacroAssembler* masm,
Register array_function,
Register result,
Register scratch1,
Register scratch2,
Register scratch3,
Label* gc_required) {
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
__ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
int size = JSArray::kSize;
if (initial_capacity > 0) {
size += FixedArray::SizeFor(initial_capacity);
}
__ Allocate(size, result, scratch2, scratch3, gc_required, TAG_OBJECT);
// Allocated the JSArray. Now initialize the fields except for the elements
// array.
// result: JSObject
// scratch1: initial map
// scratch2: start of next object
__ sw(scratch1, FieldMemOperand(result, JSObject::kMapOffset));
__ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
__ sw(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
// Field JSArray::kElementsOffset is initialized later.
__ mov(scratch3, zero_reg);
__ sw(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
if (initial_capacity == 0) {
__ sw(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
return;
}
// Calculate the location of the elements array and set elements array member
// of the JSArray.
// result: JSObject
// scratch2: start of next object
__ Addu(scratch1, result, Operand(JSArray::kSize));
__ sw(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
// Clear the heap tag on the elements array.
__ And(scratch1, scratch1, Operand(~kHeapObjectTagMask));
// Initialize the FixedArray and fill it with holes. FixedArray length is
// stored as a smi.
// result: JSObject
// scratch1: elements array (untagged)
// scratch2: start of next object
__ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
STATIC_ASSERT(0 * kPointerSize == FixedArray::kMapOffset);
__ sw(scratch3, MemOperand(scratch1));
__ Addu(scratch1, scratch1, kPointerSize);
__ li(scratch3, Operand(Smi::FromInt(initial_capacity)));
STATIC_ASSERT(1 * kPointerSize == FixedArray::kLengthOffset);
__ sw(scratch3, MemOperand(scratch1));
__ Addu(scratch1, scratch1, kPointerSize);
// Fill the FixedArray with the hole value. Inline the code if short.
STATIC_ASSERT(2 * kPointerSize == FixedArray::kHeaderSize);
__ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
static const int kLoopUnfoldLimit = 4;
if (initial_capacity <= kLoopUnfoldLimit) {
for (int i = 0; i < initial_capacity; i++) {
__ sw(scratch3, MemOperand(scratch1, i * kPointerSize));
}
} else {
Label loop, entry;
__ Addu(scratch2, scratch1, Operand(initial_capacity * kPointerSize));
__ Branch(&entry);
__ bind(&loop);
__ sw(scratch3, MemOperand(scratch1));
__ Addu(scratch1, scratch1, kPointerSize);
__ bind(&entry);
__ Branch(&loop, lt, scratch1, Operand(scratch2));
}
}
// Allocate a JSArray with the number of elements stored in a register. The
// register array_function holds the built-in Array function and the register
// array_size holds the size of the array as a smi. The allocated array is put
// into the result register and beginning and end of the FixedArray elements
// storage is put into registers elements_array_storage and elements_array_end
// (see below for when that is not the case). If the parameter fill_with_holes
// is true the allocated elements backing store is filled with the hole values
// otherwise it is left uninitialized. When the backing store is filled the
// register elements_array_storage is scratched.
static void AllocateJSArray(MacroAssembler* masm,
Register array_function, // Array function.
Register array_size, // As a smi, cannot be 0.
Register result,
Register elements_array_storage,
Register elements_array_end,
Register scratch1,
Register scratch2,
bool fill_with_hole,
Label* gc_required) {
// Load the initial map from the array function.
__ LoadInitialArrayMap(array_function, scratch2,
elements_array_storage, fill_with_hole);
if (FLAG_debug_code) { // Assert that array size is not zero.
__ Assert(
ne, "array size is unexpectedly 0", array_size, Operand(zero_reg));
}
// Allocate the JSArray object together with space for a FixedArray with the
// requested number of elements.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ li(elements_array_end,
(JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize);
__ sra(scratch1, array_size, kSmiTagSize);
__ Addu(elements_array_end, elements_array_end, scratch1);
__ Allocate(elements_array_end,
result,
scratch1,
scratch2,
gc_required,
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Allocated the JSArray. Now initialize the fields except for the elements
// array.
// result: JSObject
// elements_array_storage: initial map
// array_size: size of array (smi)
__ sw(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
__ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
__ sw(elements_array_storage,
FieldMemOperand(result, JSArray::kPropertiesOffset));
// Field JSArray::kElementsOffset is initialized later.
__ sw(array_size, FieldMemOperand(result, JSArray::kLengthOffset));
// Calculate the location of the elements array and set elements array member
// of the JSArray.
// result: JSObject
// array_size: size of array (smi)
__ Addu(elements_array_storage, result, Operand(JSArray::kSize));
__ sw(elements_array_storage,
FieldMemOperand(result, JSArray::kElementsOffset));
// Clear the heap tag on the elements array.
__ And(elements_array_storage,
elements_array_storage,
Operand(~kHeapObjectTagMask));
// Initialize the fixed array and fill it with holes. FixedArray length is
// stored as a smi.
// result: JSObject
// elements_array_storage: elements array (untagged)
// array_size: size of array (smi)
__ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
__ sw(scratch1, MemOperand(elements_array_storage));
__ Addu(elements_array_storage, elements_array_storage, kPointerSize);
// Length of the FixedArray is the number of pre-allocated elements if
// the actual JSArray has length 0 and the size of the JSArray for non-empty
// JSArrays. The length of a FixedArray is stored as a smi.
STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
__ sw(array_size, MemOperand(elements_array_storage));
__ Addu(elements_array_storage, elements_array_storage, kPointerSize);
// Calculate elements array and elements array end.
// result: JSObject
// elements_array_storage: elements array element storage
// array_size: smi-tagged size of elements array
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
__ sll(elements_array_end, array_size, kPointerSizeLog2 - kSmiTagSize);
__ Addu(elements_array_end, elements_array_storage, elements_array_end);
// Fill the allocated FixedArray with the hole value if requested.
// result: JSObject
// elements_array_storage: elements array element storage
// elements_array_end: start of next object
if (fill_with_hole) {
Label loop, entry;
__ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex);
__ Branch(&entry);
__ bind(&loop);
__ sw(scratch1, MemOperand(elements_array_storage));
__ Addu(elements_array_storage, elements_array_storage, kPointerSize);
__ bind(&entry);
__ Branch(&loop, lt, elements_array_storage, Operand(elements_array_end));
}
}
// Create a new array for the built-in Array function. This function allocates
// the JSArray object and the FixedArray elements array and initializes these.
// If the Array cannot be constructed in native code the runtime is called. This
// function assumes the following state:
// a0: argc
// a1: constructor (built-in Array function)
// ra: return address
// sp[0]: last argument
// This function is used for both construct and normal calls of Array. The only
// difference between handling a construct call and a normal call is that for a
// construct call the constructor function in a1 needs to be preserved for
// entering the generic code. In both cases argc in a0 needs to be preserved.
// Both registers are preserved by this code so no need to differentiate between
// construct call and normal call.
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) {
Counters* counters = masm->isolate()->counters();
Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array,
has_non_smi_element, finish, cant_transition_map, not_double;
// Check for array construction with zero arguments or one.
__ Branch(&argc_one_or_more, ne, a0, Operand(zero_reg));
// Handle construction of an empty array.
__ bind(&empty_array);
AllocateEmptyJSArray(masm,
a1,
a2,
a3,
t0,
t1,
call_generic_code);
__ IncrementCounter(counters->array_function_native(), 1, a3, t0);
// Set up return value, remove receiver from stack and return.
__ Addu(sp, sp, Operand(kPointerSize));
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a2);
// Check for one argument. Bail out if argument is not smi or if it is
// negative.
__ bind(&argc_one_or_more);
__ Branch(&argc_two_or_more, ne, a0, Operand(1));
STATIC_ASSERT(kSmiTag == 0);
__ lw(a2, MemOperand(sp)); // Get the argument from the stack.
__ Branch(&not_empty_array, ne, a2, Operand(zero_reg));
__ Drop(1); // Adjust stack.
__ mov(a0, zero_reg); // Treat this as a call with argc of zero.
__ Branch(&empty_array);
__ bind(&not_empty_array);
__ And(a3, a2, Operand(kIntptrSignBit | kSmiTagMask));
__ Branch(call_generic_code, eq, a3, Operand(zero_reg));
// Handle construction of an empty array of a certain size. Bail out if size
// is too large to actually allocate an elements array.
STATIC_ASSERT(kSmiTag == 0);
__ Branch(call_generic_code, Ugreater_equal, a2,
Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
// a0: argc
// a1: constructor
// a2: array_size (smi)
// sp[0]: argument
AllocateJSArray(masm,
a1,
a2,
a3,
t0,
t1,
t2,
t3,
true,
call_generic_code);
__ IncrementCounter(counters->array_function_native(), 1, a2, t0);
// Set up return value, remove receiver and argument from stack and return.
__ Addu(sp, sp, Operand(2 * kPointerSize));
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a3);
// Handle construction of an array from a list of arguments.
__ bind(&argc_two_or_more);
__ sll(a2, a0, kSmiTagSize); // Convert argc to a smi.
// a0: argc
// a1: constructor
// a2: array_size (smi)
// sp[0]: last argument
AllocateJSArray(masm,
a1,
a2,
a3,
t0,
t1,
t2,
t3,
false,
call_generic_code);
__ IncrementCounter(counters->array_function_native(), 1, a2, t2);
// Fill arguments as array elements. Copy from the top of the stack (last
// element) to the array backing store filling it backwards. Note:
// elements_array_end points after the backing store.
// a0: argc
// a3: JSArray
// t0: elements_array storage start (untagged)
// t1: elements_array_end (untagged)
// sp[0]: last argument
Label loop, entry;
__ Branch(USE_DELAY_SLOT, &entry);
__ mov(t3, sp);
__ bind(&loop);
__ lw(a2, MemOperand(t3));
if (FLAG_smi_only_arrays) {
__ JumpIfNotSmi(a2, &has_non_smi_element);
}
__ Addu(t3, t3, kPointerSize);
__ Addu(t1, t1, -kPointerSize);
__ sw(a2, MemOperand(t1));
__ bind(&entry);
__ Branch(&loop, lt, t0, Operand(t1));
__ bind(&finish);
__ mov(sp, t3);
// Remove caller arguments and receiver from the stack, setup return value and
// return.
// a0: argc
// a3: JSArray
// sp[0]: receiver
__ Addu(sp, sp, Operand(kPointerSize));
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a3);
__ bind(&has_non_smi_element);
// Double values are handled by the runtime.
__ CheckMap(
a2, t5, Heap::kHeapNumberMapRootIndex, &not_double, DONT_DO_SMI_CHECK);
__ bind(&cant_transition_map);
__ UndoAllocationInNewSpace(a3, t0);
__ Branch(call_generic_code);
__ bind(&not_double);
// Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
// a3: JSArray
__ lw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
a2,
t5,
&cant_transition_map);
__ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
__ RecordWriteField(a3,
HeapObject::kMapOffset,
a2,
t5,
kRAHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
Label loop2;
__ bind(&loop2);
__ lw(a2, MemOperand(t3));
__ Addu(t3, t3, kPointerSize);
__ Subu(t1, t1, kPointerSize);
__ sw(a2, MemOperand(t1));
__ Branch(&loop2, lt, t0, Operand(t1));
__ Branch(&finish);
}
void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@ -498,20 +132,9 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
if (FLAG_optimize_constructed_arrays) {
// Tail call a stub.
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
} else {
ArrayNativeCode(masm, &generic_array_code);
// Jump to the generic array code if the specialized code cannot handle the
// construction.
__ bind(&generic_array_code);
Handle<Code> array_code =
masm->isolate()->builtins()->InternalArrayCodeGeneric();
__ Jump(array_code, RelocInfo::CODE_TARGET);
}
// Tail call a stub.
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@ -538,58 +161,13 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
// Run the native code for the Array function called as a normal function.
if (FLAG_optimize_constructed_arrays) {
// Tail call a stub.
Handle<Object> undefined_sentinel(
masm->isolate()->heap()->undefined_value(),
masm->isolate());
__ li(a2, Operand(undefined_sentinel));
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
} else {
ArrayNativeCode(masm, &generic_array_code);
// Jump to the generic array code if the specialized code cannot handle
// the construction.
__ bind(&generic_array_code);
Handle<Code> array_code =
masm->isolate()->builtins()->ArrayCodeGeneric();
__ Jump(array_code, RelocInfo::CODE_TARGET);
}
}
void Builtins::Generate_CommonArrayConstructCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
// -- a2 : type info cell
// -- ra : return address
// -- sp[...]: constructor arguments
// -----------------------------------
if (FLAG_debug_code) {
// The array construct code is only set for the builtin and internal
// Array functions which always have a map.
// Initial map for the builtin Array function should be a map.
__ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
__ And(t0, a3, Operand(kSmiTagMask));
__ Assert(ne, "Unexpected initial map for Array function (3)",
t0, Operand(zero_reg));
__ GetObjectType(a3, a3, t0);
__ Assert(eq, "Unexpected initial map for Array function (4)",
t0, Operand(MAP_TYPE));
}
Label generic_constructor;
// Run the native code for the Array function called as a constructor.
ArrayNativeCode(masm, &generic_constructor);
// Jump to the generic construct code in case the specialized code cannot
// handle the construction.
__ bind(&generic_constructor);
Handle<Code> generic_construct_stub =
masm->isolate()->builtins()->JSConstructStubGeneric();
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
// Tail call a stub.
Handle<Object> undefined_sentinel(
masm->isolate()->heap()->undefined_value(),
masm->isolate());
__ li(a2, Operand(undefined_sentinel));
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}

View File

@ -3339,9 +3339,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
if (FLAG_optimize_constructed_arrays) {
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
}
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
}
@ -5018,55 +5016,12 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
}
static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// a1 : the function to call
// a2 : cache cell for call target
Label done;
ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->undefined_value());
ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->the_hole_value());
// Load the cache state into a3.
__ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
__ Branch(&done, eq, a3, Operand(a1));
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&done, eq, a3, Operand(at));
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Branch(USE_DELAY_SLOT, &done, eq, a3, Operand(at));
// An uninitialized cache is patched with the function.
// Store a1 in the delay slot. This may or may not get overwritten depending
// on the result of the comparison.
__ sw(a1, FieldMemOperand(a2, Cell::kValueOffset));
// No need for a write barrier here - cells are rescanned.
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ sw(at, FieldMemOperand(a2, Cell::kValueOffset));
__ bind(&done);
}
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// a1 : the function to call
// a2 : cache cell for call target
ASSERT(FLAG_optimize_constructed_arrays);
Label initialize, done, miss, megamorphic, not_array_function;
ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
@ -5166,11 +5121,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
if (RecordCallTarget()) {
if (FLAG_optimize_constructed_arrays) {
GenerateRecordCallTarget(masm);
} else {
GenerateRecordCallTargetNoArray(masm);
}
GenerateRecordCallTarget(masm);
}
// Fast-case: Invoke the function now.
@ -5244,15 +5195,11 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
if (RecordCallTarget()) {
if (FLAG_optimize_constructed_arrays) {
GenerateRecordCallTarget(masm);
} else {
GenerateRecordCallTargetNoArray(masm);
}
GenerateRecordCallTarget(masm);
}
// Jump to the function-specific construct stub.
Register jmp_reg = FLAG_optimize_constructed_arrays ? a3 : a2;
Register jmp_reg = a3;
__ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(jmp_reg, FieldMemOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset));
@ -7773,50 +7720,37 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ bind(&okay_here);
}
if (FLAG_optimize_constructed_arrays) {
Label no_info, switch_ready;
// Get the elements kind and case on that.
__ Branch(&no_info, eq, a2, Operand(undefined_sentinel));
__ lw(a3, FieldMemOperand(a2, PropertyCell::kValueOffset));
__ JumpIfNotSmi(a3, &no_info);
__ SmiUntag(a3);
__ jmp(&switch_ready);
__ bind(&no_info);
__ li(a3, Operand(GetInitialFastElementsKind()));
__ bind(&switch_ready);
Label no_info, switch_ready;
// Get the elements kind and case on that.
__ Branch(&no_info, eq, a2, Operand(undefined_sentinel));
__ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
__ JumpIfNotSmi(a3, &no_info);
__ SmiUntag(a3);
__ jmp(&switch_ready);
__ bind(&no_info);
__ li(a3, Operand(GetInitialFastElementsKind()));
__ bind(&switch_ready);
if (argument_count_ == ANY) {
Label not_zero_case, not_one_case;
__ And(at, a0, a0);
__ Branch(&not_zero_case, ne, at, Operand(zero_reg));
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
if (argument_count_ == ANY) {
Label not_zero_case, not_one_case;
__ And(at, a0, a0);
__ Branch(&not_zero_case, ne, at, Operand(zero_reg));
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
__ bind(&not_zero_case);
__ Branch(&not_one_case, gt, a0, Operand(1));
CreateArrayDispatchOneArgument(masm);
__ bind(&not_zero_case);
__ Branch(&not_one_case, gt, a0, Operand(1));
CreateArrayDispatchOneArgument(masm);
__ bind(&not_one_case);
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
} else if (argument_count_ == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
} else if (argument_count_ == ONE) {
CreateArrayDispatchOneArgument(masm);
} else if (argument_count_ == MORE_THAN_ONE) {
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
} else {
UNREACHABLE();
}
__ bind(&not_one_case);
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
} else if (argument_count_ == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
} else if (argument_count_ == ONE) {
CreateArrayDispatchOneArgument(masm);
} else if (argument_count_ == MORE_THAN_ONE) {
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
} else {
Label generic_constructor;
// Run the native code for the Array function called as a constructor.
ArrayNativeCode(masm, &generic_constructor);
// Jump to the generic construct code in case the specialized code cannot
// handle the construction.
__ bind(&generic_constructor);
Handle<Code> generic_construct_stub =
masm->isolate()->builtins()->JSConstructStubGeneric();
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
UNREACHABLE();
}
}
@ -7877,43 +7811,30 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
t0, Operand(MAP_TYPE));
}
if (FLAG_optimize_constructed_arrays) {
// Figure out the right elements kind.
__ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Figure out the right elements kind.
__ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Load the map's "bit field 2" into a3. We only need the first byte,
// but the following bit field extraction takes care of that anyway.
__ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
__ Ext(a3, a3, Map::kElementsKindShift, Map::kElementsKindBitCount);
// Load the map's "bit field 2" into a3. We only need the first byte,
// but the following bit field extraction takes care of that anyway.
__ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
__ Ext(a3, a3, Map::kElementsKindShift, Map::kElementsKindBitCount);
if (FLAG_debug_code) {
Label done;
__ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
__ Assert(
eq, "Invalid ElementsKind for InternalArray or InternalPackedArray",
a3, Operand(FAST_HOLEY_ELEMENTS));
__ bind(&done);
}
Label fast_elements_case;
__ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
GenerateCase(masm, FAST_HOLEY_ELEMENTS);
__ bind(&fast_elements_case);
GenerateCase(masm, FAST_ELEMENTS);
} else {
Label generic_constructor;
// Run the native code for the Array function called as constructor.
ArrayNativeCode(masm, &generic_constructor);
// Jump to the generic construct code in case the specialized code cannot
// handle the construction.
__ bind(&generic_constructor);
Handle<Code> generic_construct_stub =
masm->isolate()->builtins()->JSConstructStubGeneric();
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
if (FLAG_debug_code) {
Label done;
__ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
__ Assert(
eq, "Invalid ElementsKind for InternalArray or InternalPackedArray",
a3, Operand(FAST_HOLEY_ELEMENTS));
__ bind(&done);
}
Label fast_elements_case;
__ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
GenerateCase(masm, FAST_HOLEY_ELEMENTS);
__ bind(&fast_elements_case);
GenerateCase(masm, FAST_ELEMENTS);
}

View File

@ -4048,12 +4048,9 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
__ li(a0, Operand(instr->arity()));
if (FLAG_optimize_constructed_arrays) {
// No cell in a2 for construct type feedback in optimized code
Handle<Object> undefined_value(isolate()->heap()->undefined_value(),
isolate());
__ li(a2, Operand(undefined_value));
}
// No cell in a2 for construct type feedback in optimized code
Handle<Object> undefined_value(isolate()->factory()->undefined_value());
__ li(a2, Operand(undefined_value));
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
@ -4062,7 +4059,6 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->constructor()).is(a1));
ASSERT(ToRegister(instr->result()).is(v0));
ASSERT(FLAG_optimize_constructed_arrays);
__ li(a0, Operand(instr->arity()));
__ li(a2, Operand(instr->hydrogen()->property_cell()));

View File

@ -1093,366 +1093,6 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
}
// Allocate an empty JSArray. The allocated array is put into the result
// register. If the parameter initial_capacity is larger than zero an elements
// backing store is allocated with this size and filled with the hole values.
// Otherwise the elements backing store is set to the empty FixedArray.
static void AllocateEmptyJSArray(MacroAssembler* masm,
Register array_function,
Register result,
Register scratch1,
Register scratch2,
Register scratch3,
Label* gc_required) {
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
__ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
int size = JSArray::kSize;
if (initial_capacity > 0) {
size += FixedArray::SizeFor(initial_capacity);
}
__ Allocate(size, result, scratch2, scratch3, gc_required, TAG_OBJECT);
// Allocated the JSArray. Now initialize the fields except for the elements
// array.
// result: JSObject
// scratch1: initial map
// scratch2: start of next object
Factory* factory = masm->isolate()->factory();
__ movq(FieldOperand(result, JSObject::kMapOffset), scratch1);
__ Move(FieldOperand(result, JSArray::kPropertiesOffset),
factory->empty_fixed_array());
// Field JSArray::kElementsOffset is initialized later.
__ Move(FieldOperand(result, JSArray::kLengthOffset), Smi::FromInt(0));
// If no storage is requested for the elements array just set the empty
// fixed array.
if (initial_capacity == 0) {
__ Move(FieldOperand(result, JSArray::kElementsOffset),
factory->empty_fixed_array());
return;
}
// Calculate the location of the elements array and set elements array member
// of the JSArray.
// result: JSObject
// scratch2: start of next object
__ lea(scratch1, Operand(result, JSArray::kSize));
__ movq(FieldOperand(result, JSArray::kElementsOffset), scratch1);
// Initialize the FixedArray and fill it with holes. FixedArray length is
// stored as a smi.
// result: JSObject
// scratch1: elements array
// scratch2: start of next object
__ Move(FieldOperand(scratch1, HeapObject::kMapOffset),
factory->fixed_array_map());
__ Move(FieldOperand(scratch1, FixedArray::kLengthOffset),
Smi::FromInt(initial_capacity));
// Fill the FixedArray with the hole value. Inline the code if short.
// Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
static const int kLoopUnfoldLimit = 4;
__ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
if (initial_capacity <= kLoopUnfoldLimit) {
// Use a scratch register here to have only one reloc info when unfolding
// the loop.
for (int i = 0; i < initial_capacity; i++) {
__ movq(FieldOperand(scratch1,
FixedArray::kHeaderSize + i * kPointerSize),
scratch3);
}
} else {
Label loop, entry;
__ movq(scratch2, Immediate(initial_capacity));
__ jmp(&entry);
__ bind(&loop);
__ movq(FieldOperand(scratch1,
scratch2,
times_pointer_size,
FixedArray::kHeaderSize),
scratch3);
__ bind(&entry);
__ decq(scratch2);
__ j(not_sign, &loop);
}
}
// Allocate a JSArray with the number of elements stored in a register. The
// register array_function holds the built-in Array function and the register
// array_size holds the size of the array as a smi. The allocated array is put
// into the result register and beginning and end of the FixedArray elements
// storage is put into registers elements_array and elements_array_end (see
// below for when that is not the case). If the parameter fill_with_holes is
// true the allocated elements backing store is filled with the hole values
// otherwise it is left uninitialized. When the backing store is filled the
// register elements_array is scratched.
static void AllocateJSArray(MacroAssembler* masm,
Register array_function, // Array function.
Register array_size, // As a smi, cannot be 0.
Register result,
Register elements_array,
Register elements_array_end,
Register scratch,
bool fill_with_hole,
Label* gc_required) {
__ LoadInitialArrayMap(array_function, scratch,
elements_array, fill_with_hole);
if (FLAG_debug_code) { // Assert that array size is not zero.
__ testq(array_size, array_size);
__ Assert(not_zero, "array size is unexpectedly 0");
}
// Allocate the JSArray object together with space for a FixedArray with the
// requested elements.
SmiIndex index =
masm->SmiToIndex(kScratchRegister, array_size, kPointerSizeLog2);
__ Allocate(JSArray::kSize + FixedArray::kHeaderSize,
index.scale,
index.reg,
result,
elements_array_end,
scratch,
gc_required,
TAG_OBJECT);
// Allocated the JSArray. Now initialize the fields except for the elements
// array.
// result: JSObject
// elements_array: initial map
// elements_array_end: start of next object
// array_size: size of array (smi)
Factory* factory = masm->isolate()->factory();
__ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
__ Move(elements_array, factory->empty_fixed_array());
__ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
// Field JSArray::kElementsOffset is initialized later.
__ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
// Calculate the location of the elements array and set elements array member
// of the JSArray.
// result: JSObject
// elements_array_end: start of next object
// array_size: size of array (smi)
__ lea(elements_array, Operand(result, JSArray::kSize));
__ movq(FieldOperand(result, JSArray::kElementsOffset), elements_array);
// Initialize the fixed array. FixedArray length is stored as a smi.
// result: JSObject
// elements_array: elements array
// elements_array_end: start of next object
// array_size: size of array (smi)
__ Move(FieldOperand(elements_array, JSObject::kMapOffset),
factory->fixed_array_map());
// For non-empty JSArrays the length of the FixedArray and the JSArray is the
// same.
__ movq(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
// Fill the allocated FixedArray with the hole value if requested.
// result: JSObject
// elements_array: elements array
// elements_array_end: start of next object
if (fill_with_hole) {
Label loop, entry;
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
__ lea(elements_array, Operand(elements_array,
FixedArray::kHeaderSize - kHeapObjectTag));
__ jmp(&entry);
__ bind(&loop);
__ movq(Operand(elements_array, 0), scratch);
__ addq(elements_array, Immediate(kPointerSize));
__ bind(&entry);
__ cmpq(elements_array, elements_array_end);
__ j(below, &loop);
}
}
// Create a new array for the built-in Array function. This function allocates
// the JSArray object and the FixedArray elements array and initializes these.
// If the Array cannot be constructed in native code the runtime is called. This
// function assumes the following state:
// rdi: constructor (built-in Array function)
// rax: argc
// rsp[0]: return address
// rsp[8]: last argument
// This function is used for both construct and normal calls of Array. The only
// difference between handling a construct call and a normal call is that for a
// construct call the constructor function in rdi needs to be preserved for
// entering the generic code. In both cases argc in rax needs to be preserved.
// Both registers are preserved by this code so no need to differentiate between
// a construct call and a normal call.
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) {
Label argc_one_or_more, argc_two_or_more, empty_array, not_empty_array,
has_non_smi_element, finish, cant_transition_map, not_double;
// Check for array construction with zero arguments.
__ testq(rax, rax);
__ j(not_zero, &argc_one_or_more);
__ bind(&empty_array);
// Handle construction of an empty array.
AllocateEmptyJSArray(masm,
rdi,
rbx,
rcx,
rdx,
r8,
call_generic_code);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->array_function_native(), 1);
__ movq(rax, rbx);
__ ret(kPointerSize);
// Check for one argument. Bail out if argument is not smi or if it is
// negative.
__ bind(&argc_one_or_more);
__ cmpq(rax, Immediate(1));
__ j(not_equal, &argc_two_or_more);
__ movq(rdx, Operand(rsp, kPointerSize)); // Get the argument from the stack.
__ SmiTest(rdx);
__ j(not_zero, &not_empty_array);
__ pop(r8); // Adjust stack.
__ Drop(1);
__ push(r8);
__ movq(rax, Immediate(0)); // Treat this as a call with argc of zero.
__ jmp(&empty_array);
__ bind(&not_empty_array);
__ JumpUnlessNonNegativeSmi(rdx, call_generic_code);
// Handle construction of an empty array of a certain size. Bail out if size
// is to large to actually allocate an elements array.
__ SmiCompare(rdx, Smi::FromInt(JSObject::kInitialMaxFastElementArray));
__ j(greater_equal, call_generic_code);
// rax: argc
// rdx: array_size (smi)
// rdi: constructor
// esp[0]: return address
// esp[8]: argument
AllocateJSArray(masm,
rdi,
rdx,
rbx,
rcx,
r8,
r9,
true,
call_generic_code);
__ IncrementCounter(counters->array_function_native(), 1);
__ movq(rax, rbx);
__ ret(2 * kPointerSize);
// Handle construction of an array from a list of arguments.
__ bind(&argc_two_or_more);
__ movq(rdx, rax);
__ Integer32ToSmi(rdx, rdx); // Convet argc to a smi.
// rax: argc
// rdx: array_size (smi)
// rdi: constructor
// esp[0] : return address
// esp[8] : last argument
AllocateJSArray(masm,
rdi,
rdx,
rbx,
rcx,
r8,
r9,
false,
call_generic_code);
__ IncrementCounter(counters->array_function_native(), 1);
// rax: argc
// rbx: JSArray
// rcx: elements_array
// r8: elements_array_end (untagged)
// esp[0]: return address
// esp[8]: last argument
// Location of the last argument
__ lea(r9, Operand(rsp, kPointerSize));
// Location of the first array element (Parameter fill_with_holes to
// AllocateJSArrayis false, so the FixedArray is returned in rcx).
__ lea(rdx, Operand(rcx, FixedArray::kHeaderSize - kHeapObjectTag));
// rax: argc
// rbx: JSArray
// rdx: location of the first array element
// r9: location of the last argument
// esp[0]: return address
// esp[8]: last argument
Label loop, entry;
__ movq(rcx, rax);
__ jmp(&entry);
__ bind(&loop);
__ movq(r8, Operand(r9, rcx, times_pointer_size, 0));
if (FLAG_smi_only_arrays) {
__ JumpIfNotSmi(r8, &has_non_smi_element);
}
__ movq(Operand(rdx, 0), r8);
__ addq(rdx, Immediate(kPointerSize));
__ bind(&entry);
__ decq(rcx);
__ j(greater_equal, &loop);
// Remove caller arguments from the stack and return.
// rax: argc
// rbx: JSArray
// esp[0]: return address
// esp[8]: last argument
__ bind(&finish);
__ pop(rcx);
__ lea(rsp, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
__ push(rcx);
__ movq(rax, rbx);
__ ret(0);
__ bind(&has_non_smi_element);
// Double values are handled by the runtime.
__ CheckMap(r8,
masm->isolate()->factory()->heap_number_map(),
&not_double,
DONT_DO_SMI_CHECK);
__ bind(&cant_transition_map);
__ UndoAllocationInNewSpace(rbx);
__ jmp(call_generic_code);
__ bind(&not_double);
// Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
// rbx: JSArray
__ movq(r11, FieldOperand(rbx, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
r11,
kScratchRegister,
&cant_transition_map);
__ movq(FieldOperand(rbx, HeapObject::kMapOffset), r11);
__ RecordWriteField(rbx, HeapObject::kMapOffset, r11, r8,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Finish the array initialization loop.
Label loop2;
__ bind(&loop2);
__ movq(r8, Operand(r9, rcx, times_pointer_size, 0));
__ movq(Operand(rdx, 0), r8);
__ addq(rdx, Immediate(kPointerSize));
__ decq(rcx);
__ j(greater_equal, &loop2);
__ jmp(&finish);
}
void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
@ -1477,20 +1117,9 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
if (FLAG_optimize_constructed_arrays) {
// tail call a stub
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
} else {
ArrayNativeCode(masm, &generic_array_code);
// Jump to the generic array code in case the specialized code cannot handle
// the construction.
__ bind(&generic_array_code);
Handle<Code> array_code =
masm->isolate()->builtins()->InternalArrayCodeGeneric();
__ Jump(array_code, RelocInfo::CODE_TARGET);
}
// tail call a stub
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@ -1517,61 +1146,16 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
// Run the native code for the Array function called as a normal function.
if (FLAG_optimize_constructed_arrays) {
// tail call a stub
Handle<Object> undefined_sentinel(
masm->isolate()->heap()->undefined_value(),
masm->isolate());
__ Move(rbx, undefined_sentinel);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
} else {
ArrayNativeCode(masm, &generic_array_code);
// Jump to the generic array code in case the specialized code cannot handle
// the construction.
__ bind(&generic_array_code);
Handle<Code> array_code =
masm->isolate()->builtins()->ArrayCodeGeneric();
__ Jump(array_code, RelocInfo::CODE_TARGET);
}
// tail call a stub
Handle<Object> undefined_sentinel(
masm->isolate()->heap()->undefined_value(),
masm->isolate());
__ Move(rbx, undefined_sentinel);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
void Builtins::Generate_CommonArrayConstructCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
// -- rdi : constructor
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -----------------------------------
if (FLAG_debug_code) {
// The array construct code is only set for the builtin and internal
// Array functions which always have a map.
// Initial map for the builtin Array function should be a map.
__ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
__ Check(not_smi, "Unexpected initial map for Array function");
__ CmpObjectType(rcx, MAP_TYPE, rcx);
__ Check(equal, "Unexpected initial map for Array function");
}
Label generic_constructor;
// Run the native code for the Array function called as constructor.
ArrayNativeCode(masm, &generic_constructor);
// Jump to the generic construct code in case the specialized code cannot
// handle the construction.
__ bind(&generic_constructor);
Handle<Code> generic_construct_stub =
masm->isolate()->builtins()->JSConstructStubGeneric();
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
}
void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : number of arguments

View File

@ -3717,51 +3717,12 @@ void InterruptStub::Generate(MacroAssembler* masm) {
}
static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// rbx : cache cell for call target
// rdi : the function to call
Isolate* isolate = masm->isolate();
Label initialize, done;
// Load the cache state into rcx.
__ movq(rcx, FieldOperand(rbx, Cell::kValueOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
__ cmpq(rcx, rdi);
__ j(equal, &done, Label::kNear);
__ Cmp(rcx, TypeFeedbackCells::MegamorphicSentinel(isolate));
__ j(equal, &done, Label::kNear);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ Cmp(rcx, TypeFeedbackCells::UninitializedSentinel(isolate));
__ j(equal, &initialize, Label::kNear);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ Move(FieldOperand(rbx, Cell::kValueOffset),
TypeFeedbackCells::MegamorphicSentinel(isolate));
__ jmp(&done, Label::kNear);
// An uninitialized cache is patched with the function.
__ bind(&initialize);
__ movq(FieldOperand(rbx, Cell::kValueOffset), rdi);
// No need for a write barrier here - cells are rescanned.
__ bind(&done);
}
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// rbx : cache cell for call target
// rdi : the function to call
ASSERT(FLAG_optimize_constructed_arrays);
Isolate* isolate = masm->isolate();
Label initialize, done, miss, megamorphic, not_array_function;
@ -3860,11 +3821,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &slow);
if (RecordCallTarget()) {
if (FLAG_optimize_constructed_arrays) {
GenerateRecordCallTarget(masm);
} else {
GenerateRecordCallTargetNoArray(masm);
}
GenerateRecordCallTarget(masm);
}
// Fast-case: Just invoke the function.
@ -3939,15 +3896,11 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &slow);
if (RecordCallTarget()) {
if (FLAG_optimize_constructed_arrays) {
GenerateRecordCallTarget(masm);
} else {
GenerateRecordCallTargetNoArray(masm);
}
GenerateRecordCallTarget(masm);
}
// Jump to the function-specific construct stub.
Register jmp_reg = FLAG_optimize_constructed_arrays ? rcx : rbx;
Register jmp_reg = rcx;
__ movq(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movq(jmp_reg, FieldOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset));
@ -3995,9 +3948,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
// It is important that the store buffer overflow stubs are generated first.
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
if (FLAG_optimize_constructed_arrays) {
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
}
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
}
@ -6949,52 +6900,39 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ bind(&okay_here);
}
if (FLAG_optimize_constructed_arrays) {
Label no_info, switch_ready;
// Get the elements kind and case on that.
__ Cmp(rbx, undefined_sentinel);
__ j(equal, &no_info);
__ movq(rdx, FieldOperand(rbx, Cell::kValueOffset));
__ JumpIfNotSmi(rdx, &no_info);
__ SmiToInteger32(rdx, rdx);
__ jmp(&switch_ready);
__ bind(&no_info);
__ movq(rdx, Immediate(GetInitialFastElementsKind()));
__ bind(&switch_ready);
Label no_info, switch_ready;
// Get the elements kind and case on that.
__ Cmp(rbx, undefined_sentinel);
__ j(equal, &no_info);
__ movq(rdx, FieldOperand(rbx, Cell::kValueOffset));
__ JumpIfNotSmi(rdx, &no_info);
__ SmiToInteger32(rdx, rdx);
__ jmp(&switch_ready);
__ bind(&no_info);
__ movq(rdx, Immediate(GetInitialFastElementsKind()));
__ bind(&switch_ready);
if (argument_count_ == ANY) {
Label not_zero_case, not_one_case;
__ testq(rax, rax);
__ j(not_zero, &not_zero_case);
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
if (argument_count_ == ANY) {
Label not_zero_case, not_one_case;
__ testq(rax, rax);
__ j(not_zero, &not_zero_case);
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
__ bind(&not_zero_case);
__ cmpl(rax, Immediate(1));
__ j(greater, &not_one_case);
CreateArrayDispatchOneArgument(masm);
__ bind(&not_zero_case);
__ cmpl(rax, Immediate(1));
__ j(greater, &not_one_case);
CreateArrayDispatchOneArgument(masm);
__ bind(&not_one_case);
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
} else if (argument_count_ == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
} else if (argument_count_ == ONE) {
CreateArrayDispatchOneArgument(masm);
} else if (argument_count_ == MORE_THAN_ONE) {
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
} else {
UNREACHABLE();
}
__ bind(&not_one_case);
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
} else if (argument_count_ == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
} else if (argument_count_ == ONE) {
CreateArrayDispatchOneArgument(masm);
} else if (argument_count_ == MORE_THAN_ONE) {
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
} else {
Label generic_constructor;
// Run the native code for the Array function called as constructor.
ArrayNativeCode(masm, &generic_constructor);
// Jump to the generic construct code in case the specialized code cannot
// handle the construction.
__ bind(&generic_constructor);
Handle<Code> generic_construct_stub =
masm->isolate()->builtins()->JSConstructStubGeneric();
__ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
UNREACHABLE();
}
}
@ -7058,46 +6996,33 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ Check(equal, "Unexpected initial map for Array function");
}
if (FLAG_optimize_constructed_arrays) {
// Figure out the right elements kind
__ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Figure out the right elements kind
__ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Load the map's "bit field 2" into |result|. We only need the first byte,
// but the following masking takes care of that anyway.
__ movzxbq(rcx, FieldOperand(rcx, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
__ and_(rcx, Immediate(Map::kElementsKindMask));
__ shr(rcx, Immediate(Map::kElementsKindShift));
// Load the map's "bit field 2" into |result|. We only need the first byte,
// but the following masking takes care of that anyway.
__ movzxbq(rcx, FieldOperand(rcx, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
__ and_(rcx, Immediate(Map::kElementsKindMask));
__ shr(rcx, Immediate(Map::kElementsKindShift));
if (FLAG_debug_code) {
Label done;
__ cmpl(rcx, Immediate(FAST_ELEMENTS));
__ j(equal, &done);
__ cmpl(rcx, Immediate(FAST_HOLEY_ELEMENTS));
__ Assert(equal,
"Invalid ElementsKind for InternalArray or InternalPackedArray");
__ bind(&done);
}
Label fast_elements_case;
if (FLAG_debug_code) {
Label done;
__ cmpl(rcx, Immediate(FAST_ELEMENTS));
__ j(equal, &fast_elements_case);
GenerateCase(masm, FAST_HOLEY_ELEMENTS);
__ bind(&fast_elements_case);
GenerateCase(masm, FAST_ELEMENTS);
} else {
Label generic_constructor;
// Run the native code for the Array function called as constructor.
ArrayNativeCode(masm, &generic_constructor);
// Jump to the generic construct code in case the specialized code cannot
// handle the construction.
__ bind(&generic_constructor);
Handle<Code> generic_construct_stub =
masm->isolate()->builtins()->JSConstructStubGeneric();
__ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
__ j(equal, &done);
__ cmpl(rcx, Immediate(FAST_HOLEY_ELEMENTS));
__ Assert(equal,
"Invalid ElementsKind for InternalArray or InternalPackedArray");
__ bind(&done);
}
Label fast_elements_case;
__ cmpl(rcx, Immediate(FAST_ELEMENTS));
__ j(equal, &fast_elements_case);
GenerateCase(masm, FAST_HOLEY_ELEMENTS);
__ bind(&fast_elements_case);
GenerateCase(masm, FAST_ELEMENTS);
}

View File

@ -3872,11 +3872,9 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
__ Set(rax, instr->arity());
if (FLAG_optimize_constructed_arrays) {
// No cell in ebx for construct type feedback in optimized code
Handle<Object> undefined_value(isolate()->factory()->undefined_value());
__ Move(rbx, undefined_value);
}
// No cell in ebx for construct type feedback in optimized code
Handle<Object> undefined_value(isolate()->factory()->undefined_value());
__ Move(rbx, undefined_value);
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
@ -3885,7 +3883,6 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->constructor()).is(rdi));
ASSERT(ToRegister(instr->result()).is(rax));
ASSERT(FLAG_optimize_constructed_arrays);
__ Set(rax, instr->arity());
__ Move(rbx, instr->hydrogen()->property_cell());

View File

@ -1290,7 +1290,6 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
ASSERT(FLAG_optimize_constructed_arrays);
LOperand* constructor = UseFixed(instr->constructor(), rdi);
argument_count_ -= instr->argument_count();
LCallNewArray* result = new(zone()) LCallNewArray(constructor);

View File

@ -37,7 +37,6 @@
// support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
support_smi_only_arrays = true;
optimize_constructed_arrays = true;
if (support_smi_only_arrays) {
print("Tests include smi-only arrays.");
@ -45,12 +44,6 @@ if (support_smi_only_arrays) {
print("Tests do NOT include smi-only arrays.");
}
if (optimize_constructed_arrays) {
print("Tests include constructed array optimizations.");
} else {
print("Tests do NOT include constructed array optimizations.");
}
var elements_kind = {
fast_smi_only : 'fast smi only elements',
fast : 'fast elements',
@ -187,136 +180,134 @@ if (support_smi_only_arrays) {
// sites work again for fast literals
//assertKind(elements_kind.fast, obj);
if (optimize_constructed_arrays) {
function newarraycase_smidouble(value) {
var a = new Array();
a[0] = value;
return a;
}
// Case: new Array() as allocation site, smi->double
obj = newarraycase_smidouble(1);
assertKind(elements_kind.fast_smi_only, obj);
obj = newarraycase_smidouble(1.5);
assertKind(elements_kind.fast_double, obj);
obj = newarraycase_smidouble(2);
assertKind(elements_kind.fast_double, obj);
function newarraycase_smiobj(value) {
var a = new Array();
a[0] = value;
return a;
}
// Case: new Array() as allocation site, smi->fast
obj = newarraycase_smiobj(1);
assertKind(elements_kind.fast_smi_only, obj);
obj = newarraycase_smiobj("gloria");
assertKind(elements_kind.fast, obj);
obj = newarraycase_smiobj(2);
assertKind(elements_kind.fast, obj);
function newarraycase_length_smidouble(value) {
var a = new Array(3);
a[0] = value;
return a;
}
// Case: new Array(length) as allocation site
obj = newarraycase_length_smidouble(1);
assertKind(elements_kind.fast_smi_only, obj);
obj = newarraycase_length_smidouble(1.5);
assertKind(elements_kind.fast_double, obj);
obj = newarraycase_length_smidouble(2);
assertKind(elements_kind.fast_double, obj);
// Try to continue the transition to fast object, but
// we will not pretransition from double->fast, because
// it may hurt performance ("poisoning").
obj = newarraycase_length_smidouble("coates");
assertKind(elements_kind.fast, obj);
obj = newarraycase_length_smidouble(2.5);
// However, because of optimistic transitions, we will
// transition to the most general kind of elements kind found,
// therefore I can't count on this assert yet.
// assertKind(elements_kind.fast_double, obj);
function newarraycase_length_smiobj(value) {
var a = new Array(3);
a[0] = value;
return a;
}
// Case: new Array(<length>) as allocation site, smi->fast
obj = newarraycase_length_smiobj(1);
assertKind(elements_kind.fast_smi_only, obj);
obj = newarraycase_length_smiobj("gloria");
assertKind(elements_kind.fast, obj);
obj = newarraycase_length_smiobj(2);
assertKind(elements_kind.fast, obj);
function newarraycase_list_smidouble(value) {
var a = new Array(1, 2, 3);
a[0] = value;
return a;
}
obj = newarraycase_list_smidouble(1);
assertKind(elements_kind.fast_smi_only, obj);
obj = newarraycase_list_smidouble(1.5);
assertKind(elements_kind.fast_double, obj);
obj = newarraycase_list_smidouble(2);
assertKind(elements_kind.fast_double, obj);
function newarraycase_list_smiobj(value) {
var a = new Array(4, 5, 6);
a[0] = value;
return a;
}
obj = newarraycase_list_smiobj(1);
assertKind(elements_kind.fast_smi_only, obj);
obj = newarraycase_list_smiobj("coates");
assertKind(elements_kind.fast, obj);
obj = newarraycase_list_smiobj(2);
assertKind(elements_kind.fast, obj);
function newarraycase_onearg(len, value) {
var a = new Array(len);
a[0] = value;
return a;
}
obj = newarraycase_onearg(5, 3.5);
assertKind(elements_kind.fast_double, obj);
obj = newarraycase_onearg(10, 5);
assertKind(elements_kind.fast_double, obj);
obj = newarraycase_onearg(0, 5);
assertKind(elements_kind.fast_double, obj);
// Now pass a length that forces the dictionary path.
obj = newarraycase_onearg(100000, 5);
assertKind(elements_kind.dictionary, obj);
assertTrue(obj.length == 100000);
// Verify that cross context calls work
var realmA = Realm.current();
var realmB = Realm.create();
assertEquals(0, realmA);
assertEquals(1, realmB);
function instanceof_check(type) {
assertTrue(new type() instanceof type);
assertTrue(new type(5) instanceof type);
assertTrue(new type(1,2,3) instanceof type);
}
var realmBArray = Realm.eval(realmB, "Array");
instanceof_check(Array);
instanceof_check(realmBArray);
%OptimizeFunctionOnNextCall(instanceof_check);
instanceof_check(Array);
assertTrue(2 != %GetOptimizationStatus(instanceof_check));
instanceof_check(realmBArray);
assertTrue(1 != %GetOptimizationStatus(instanceof_check));
function newarraycase_smidouble(value) {
var a = new Array();
a[0] = value;
return a;
}
// Case: new Array() as allocation site, smi->double
obj = newarraycase_smidouble(1);
assertKind(elements_kind.fast_smi_only, obj);
obj = newarraycase_smidouble(1.5);
assertKind(elements_kind.fast_double, obj);
obj = newarraycase_smidouble(2);
assertKind(elements_kind.fast_double, obj);
function newarraycase_smiobj(value) {
var a = new Array();
a[0] = value;
return a;
}
// Case: new Array() as allocation site, smi->fast
obj = newarraycase_smiobj(1);
assertKind(elements_kind.fast_smi_only, obj);
obj = newarraycase_smiobj("gloria");
assertKind(elements_kind.fast, obj);
obj = newarraycase_smiobj(2);
assertKind(elements_kind.fast, obj);
function newarraycase_length_smidouble(value) {
var a = new Array(3);
a[0] = value;
return a;
}
// Case: new Array(length) as allocation site
obj = newarraycase_length_smidouble(1);
assertKind(elements_kind.fast_smi_only, obj);
obj = newarraycase_length_smidouble(1.5);
assertKind(elements_kind.fast_double, obj);
obj = newarraycase_length_smidouble(2);
assertKind(elements_kind.fast_double, obj);
// Try to continue the transition to fast object, but
// we will not pretransition from double->fast, because
// it may hurt performance ("poisoning").
obj = newarraycase_length_smidouble("coates");
assertKind(elements_kind.fast, obj);
obj = newarraycase_length_smidouble(2.5);
// However, because of optimistic transitions, we will
// transition to the most general kind of elements kind found,
// therefore I can't count on this assert yet.
// assertKind(elements_kind.fast_double, obj);
function newarraycase_length_smiobj(value) {
var a = new Array(3);
a[0] = value;
return a;
}
// Case: new Array(<length>) as allocation site, smi->fast
obj = newarraycase_length_smiobj(1);
assertKind(elements_kind.fast_smi_only, obj);
obj = newarraycase_length_smiobj("gloria");
assertKind(elements_kind.fast, obj);
obj = newarraycase_length_smiobj(2);
assertKind(elements_kind.fast, obj);
function newarraycase_list_smidouble(value) {
var a = new Array(1, 2, 3);
a[0] = value;
return a;
}
obj = newarraycase_list_smidouble(1);
assertKind(elements_kind.fast_smi_only, obj);
obj = newarraycase_list_smidouble(1.5);
assertKind(elements_kind.fast_double, obj);
obj = newarraycase_list_smidouble(2);
assertKind(elements_kind.fast_double, obj);
function newarraycase_list_smiobj(value) {
var a = new Array(4, 5, 6);
a[0] = value;
return a;
}
obj = newarraycase_list_smiobj(1);
assertKind(elements_kind.fast_smi_only, obj);
obj = newarraycase_list_smiobj("coates");
assertKind(elements_kind.fast, obj);
obj = newarraycase_list_smiobj(2);
assertKind(elements_kind.fast, obj);
function newarraycase_onearg(len, value) {
var a = new Array(len);
a[0] = value;
return a;
}
obj = newarraycase_onearg(5, 3.5);
assertKind(elements_kind.fast_double, obj);
obj = newarraycase_onearg(10, 5);
assertKind(elements_kind.fast_double, obj);
obj = newarraycase_onearg(0, 5);
assertKind(elements_kind.fast_double, obj);
// Now pass a length that forces the dictionary path.
obj = newarraycase_onearg(100000, 5);
assertKind(elements_kind.dictionary, obj);
assertTrue(obj.length == 100000);
// Verify that cross context calls work
var realmA = Realm.current();
var realmB = Realm.create();
assertEquals(0, realmA);
assertEquals(1, realmB);
function instanceof_check(type) {
assertTrue(new type() instanceof type);
assertTrue(new type(5) instanceof type);
assertTrue(new type(1,2,3) instanceof type);
}
var realmBArray = Realm.eval(realmB, "Array");
instanceof_check(Array);
instanceof_check(realmBArray);
%OptimizeFunctionOnNextCall(instanceof_check);
instanceof_check(Array);
assertTrue(2 != %GetOptimizationStatus(instanceof_check));
instanceof_check(realmBArray);
assertTrue(1 != %GetOptimizationStatus(instanceof_check));
}

View File

@ -37,7 +37,6 @@
// support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
support_smi_only_arrays = true;
optimize_constructed_arrays = true;
if (support_smi_only_arrays) {
print("Tests include smi-only arrays.");
@ -45,12 +44,6 @@ if (support_smi_only_arrays) {
print("Tests do NOT include smi-only arrays.");
}
if (optimize_constructed_arrays) {
print("Tests include constructed array optimizations.");
} else {
print("Tests do NOT include constructed array optimizations.");
}
var elements_kind = {
fast_smi_only : 'fast smi only elements',
fast : 'fast elements',
@ -87,7 +80,7 @@ function assertKind(expected, obj, name_opt) {
assertEquals(expected, getKind(obj), name_opt);
}
if (support_smi_only_arrays && optimize_constructed_arrays) {
if (support_smi_only_arrays) {
function bar0(t) {
return new t();
}

View File

@ -37,7 +37,6 @@
// support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
support_smi_only_arrays = true;
optimize_constructed_arrays = true;
if (support_smi_only_arrays) {
print("Tests include smi-only arrays.");
@ -45,12 +44,6 @@ if (support_smi_only_arrays) {
print("Tests do NOT include smi-only arrays.");
}
if (optimize_constructed_arrays) {
print("Tests include constructed array optimizations.");
} else {
print("Tests do NOT include constructed array optimizations.");
}
var elements_kind = {
fast_smi_only : 'fast smi only elements',
fast : 'fast elements',
@ -87,7 +80,7 @@ function assertKind(expected, obj, name_opt) {
assertEquals(expected, getKind(obj), name_opt);
}
if (support_smi_only_arrays && optimize_constructed_arrays) {
if (support_smi_only_arrays) {
// Verify that basic elements kind feedback works for non-constructor
// array calls (as long as the call is made through an IC, and not