Support all fast elements kinds in the major array operations.

Currently missing support for unshift.

BUG=

Review URL: https://chromiumcodereview.appspot.com/11377132

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@12969 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
verwaest@chromium.org 2012-11-15 12:19:14 +00:00
parent 50e975574b
commit a08194c83a
24 changed files with 1043 additions and 331 deletions

View File

@ -7517,7 +7517,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
__ StoreNumberToDoubleElements(r0, r3, r1,
__ StoreNumberToDoubleElements(r0, r3,
// Overwrites all regs after this.
r5, r6, r7, r9, r2,
&slow_elements);

View File

@ -1379,7 +1379,6 @@ static void KeyedStoreGenerateGenericHelper(
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(value,
key,
receiver,
elements, // Overwritten.
r3, // Scratch regs...
r4,

View File

@ -1955,13 +1955,13 @@ void MacroAssembler::CheckFastSmiElements(Register map,
void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
Register receiver_reg,
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Label* fail) {
Label* fail,
int elements_offset) {
Label smi_value, maybe_nan, have_double_value, is_nan, done;
Register mantissa_reg = scratch2;
Register exponent_reg = scratch3;
@ -1988,8 +1988,10 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
bind(&have_double_value);
add(scratch1, elements_reg,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
str(mantissa_reg, FieldMemOperand(
scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
sizeof(kHoleNanLower32);
str(exponent_reg, FieldMemOperand(scratch1, offset));
jmp(&done);
@ -2010,7 +2012,8 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
bind(&smi_value);
add(scratch1, elements_reg,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
elements_offset));
add(scratch1, scratch1,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
// scratch1 is now effective address of the double element

View File

@ -831,14 +831,14 @@ class MacroAssembler: public Assembler {
// case scratch2, scratch3 and scratch4 are unmodified.
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
Register receiver_reg,
// All regs below here overwritten.
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Label* fail);
Label* fail,
int elements_offset = 0);
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are

View File

@ -1623,7 +1623,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
Label call_builtin;
if (argc == 1) { // Otherwise fall through to call the builtin.
Label attempt_to_grow_elements;
Label attempt_to_grow_elements, with_write_barrier, check_double;
Register elements = r6;
Register end_elements = r5;
@ -1634,10 +1634,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ CheckMap(elements,
r0,
Heap::kFixedArrayMapRootIndex,
&call_builtin,
&check_double,
DONT_DO_SMI_CHECK);
// Get the array's length into r0 and calculate new length.
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
@ -1652,7 +1651,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ b(gt, &attempt_to_grow_elements);
// Check if value is a smi.
Label with_write_barrier;
__ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
__ JumpIfNotSmi(r4, &with_write_barrier);
@ -1672,6 +1670,40 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ Drop(argc + 1);
__ Ret();
__ bind(&check_double);
// Check that the elements are in fast mode and writable.
__ CheckMap(elements,
r0,
Heap::kFixedDoubleArrayMapRootIndex,
&call_builtin,
DONT_DO_SMI_CHECK);
// Get the array's length into r0 and calculate new length.
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
__ add(r0, r0, Operand(Smi::FromInt(argc)));
// Get the elements' length.
__ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
__ cmp(r0, r4);
__ b(gt, &call_builtin);
__ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
__ StoreNumberToDoubleElements(
r4, r0, elements, r3, r5, r2, r9,
&call_builtin, argc * kDoubleSize);
// Save new length.
__ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Check for a smi.
__ Drop(argc + 1);
__ Ret();
__ bind(&with_write_barrier);
__ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
@ -1683,6 +1715,11 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiElements(r3, r7, &call_builtin);
__ ldr(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r7, ip);
__ b(eq, &call_builtin);
// edx: receiver
// r3: map
Label try_holey_map;
@ -4698,7 +4735,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ bind(&finish_store);
__ StoreNumberToDoubleElements(value_reg,
key_reg,
receiver_reg,
// All registers after this are overwritten.
elements_reg,
scratch1,

View File

@ -325,6 +325,18 @@ BUILTIN(ArrayCodeGeneric) {
}
static void MoveDoubleElements(FixedDoubleArray* dst,
int dst_index,
FixedDoubleArray* src,
int src_index,
int len) {
if (len == 0) return;
memmove(dst->data_start() + dst_index,
src->data_start() + src_index,
len * kDoubleSize);
}
static void MoveElements(Heap* heap,
AssertNoAllocation* no_gc,
FixedArray* dst,
@ -351,24 +363,39 @@ static void FillWithHoles(Heap* heap, FixedArray* dst, int from, int to) {
}
static FixedArray* LeftTrimFixedArray(Heap* heap,
FixedArray* elms,
int to_trim) {
static void FillWithHoles(FixedDoubleArray* dst, int from, int to) {
for (int i = from; i < to; i++) {
dst->set_the_hole(i);
}
}
static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
FixedArrayBase* elms,
int to_trim) {
Map* map = elms->map();
int entry_size;
if (elms->IsFixedArray()) {
entry_size = kPointerSize;
} else {
entry_size = kDoubleSize;
}
ASSERT(elms->map() != HEAP->fixed_cow_array_map());
// For now this trick is only applied to fixed arrays in new and paged space.
// In large object space the object's start must coincide with chunk
// and thus the trick is just not applicable.
ASSERT(!HEAP->lo_space()->Contains(elms));
STATIC_ASSERT(FixedArray::kMapOffset == 0);
STATIC_ASSERT(FixedArray::kLengthOffset == kPointerSize);
STATIC_ASSERT(FixedArray::kHeaderSize == 2 * kPointerSize);
STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
Object** former_start = HeapObject::RawField(elms, 0);
const int len = elms->length();
if (to_trim > FixedArray::kHeaderSize / kPointerSize &&
if (to_trim > FixedArrayBase::kHeaderSize / entry_size &&
elms->IsFixedArray() &&
!heap->new_space()->Contains(elms)) {
// If we are doing a big trim in old space then we zap the space that was
// formerly part of the array so that the GC (aided by the card-based
@ -382,14 +409,15 @@ static FixedArray* LeftTrimFixedArray(Heap* heap,
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
heap->CreateFillerObjectAt(elms->address(), to_trim * kPointerSize);
heap->CreateFillerObjectAt(elms->address(), to_trim * entry_size);
former_start[to_trim] = heap->fixed_array_map();
former_start[to_trim + 1] = Smi::FromInt(len - to_trim);
int new_start_index = to_trim * (entry_size / kPointerSize);
former_start[new_start_index] = map;
former_start[new_start_index + 1] = Smi::FromInt(len - to_trim);
// Maintain marking consistency for HeapObjectIterator and
// IncrementalMarking.
int size_delta = to_trim * kPointerSize;
int size_delta = to_trim * entry_size;
if (heap->marking()->TransferMark(elms->address(),
elms->address() + size_delta)) {
MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
@ -397,8 +425,8 @@ static FixedArray* LeftTrimFixedArray(Heap* heap,
HEAP_PROFILE(heap, ObjectMoveEvent(elms->address(),
elms->address() + size_delta));
return FixedArray::cast(HeapObject::FromAddress(
elms->address() + to_trim * kPointerSize));
return FixedArrayBase::cast(HeapObject::FromAddress(
elms->address() + to_trim * entry_size));
}
@ -427,19 +455,14 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
Map* map = elms->map();
if (map == heap->fixed_array_map()) {
if (args == NULL || array->HasFastObjectElements()) return elms;
if (array->HasFastDoubleElements()) {
ASSERT(elms == heap->empty_fixed_array());
MaybeObject* maybe_transition =
array->TransitionElementsKind(FAST_ELEMENTS);
if (maybe_transition->IsFailure()) return maybe_transition;
return elms;
}
} else if (map == heap->fixed_cow_array_map()) {
MaybeObject* maybe_writable_result = array->EnsureWritableFastElements();
if (args == NULL || array->HasFastObjectElements() ||
maybe_writable_result->IsFailure()) {
!maybe_writable_result->To(&elms)) {
return maybe_writable_result;
}
} else if (map == heap->fixed_double_array_map()) {
if (args == NULL) return elms;
} else {
return NULL;
}
@ -449,13 +472,28 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
int args_length = args->length();
if (first_added_arg >= args_length) return array->elements();
MaybeObject* maybe_array = array->EnsureCanContainElements(
args,
first_added_arg,
args_length - first_added_arg,
DONT_ALLOW_DOUBLE_ELEMENTS);
if (maybe_array->IsFailure()) return maybe_array;
return array->elements();
ElementsKind origin_kind = array->map()->elements_kind();
ASSERT(!IsFastObjectElementsKind(origin_kind));
ElementsKind target_kind = origin_kind;
int arg_count = args->length() - first_added_arg;
Object** arguments = args->arguments() - first_added_arg - (arg_count - 1);
for (int i = 0; i < arg_count; i++) {
Object* arg = arguments[i];
if (arg->IsHeapObject()) {
if (arg->IsHeapNumber()) {
target_kind = FAST_DOUBLE_ELEMENTS;
} else {
target_kind = FAST_ELEMENTS;
break;
}
}
}
if (target_kind != origin_kind) {
MaybeObject* maybe_failure = array->TransitionElementsKind(target_kind);
if (maybe_failure->IsFailure()) return maybe_failure;
return array->elements();
}
return elms;
}
@ -499,75 +537,131 @@ MUST_USE_RESULT static MaybeObject* CallJsBuiltin(
BUILTIN(ArrayPush) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1);
if (maybe_elms_obj == NULL) {
return CallJsBuiltin(isolate, "ArrayPush", args);
}
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
FixedArrayBase* elms_obj;
MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1);
if (maybe_elms_obj == NULL) {
return CallJsBuiltin(isolate, "ArrayPush", args);
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
if (FLAG_harmony_observation && array->map()->is_observed()) {
if (FLAG_harmony_observation &&
JSObject::cast(receiver)->map()->is_observed()) {
return CallJsBuiltin(isolate, "ArrayPush", args);
}
int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
if (to_add == 0) {
return Smi::FromInt(len);
}
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
ASSERT(to_add <= (Smi::kMaxValue - len));
JSArray* array = JSArray::cast(receiver);
ElementsKind kind = array->GetElementsKind();
int new_length = len + to_add;
if (IsFastSmiOrObjectElementsKind(kind)) {
FixedArray* elms = FixedArray::cast(elms_obj);
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
Object* obj;
{ MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
if (to_add == 0) {
return Smi::FromInt(len);
}
FixedArray* new_elms = FixedArray::cast(obj);
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
ASSERT(to_add <= (Smi::kMaxValue - len));
ElementsKind kind = array->GetElementsKind();
CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, 0, len);
FillWithHoles(heap, new_elms, new_length, capacity);
int new_length = len + to_add;
elms = new_elms;
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
FixedArray* new_elms;
MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
if (!maybe_obj->To(&new_elms)) return maybe_obj;
ElementsAccessor* accessor = array->GetElementsAccessor();
MaybeObject* maybe_failure =
accessor->CopyElements(array, 0, new_elms, kind, 0, len, elms_obj);
ASSERT(!maybe_failure->IsFailure());
USE(maybe_failure);
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
}
// Add the provided values.
AssertNoAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
for (int index = 0; index < to_add; index++) {
elms->set(index + len, args[index + 1], mode);
}
if (elms != array->elements()) {
array->set_elements(elms);
}
// Set the length.
array->set_length(Smi::FromInt(new_length));
return Smi::FromInt(new_length);
} else {
int len = Smi::cast(array->length())->value();
int elms_len = elms_obj->length();
int to_add = args.length() - 1;
if (to_add == 0) {
return Smi::FromInt(len);
}
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
ASSERT(to_add <= (Smi::kMaxValue - len));
int new_length = len + to_add;
FixedDoubleArray* new_elms;
if (new_length > elms_len) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
MaybeObject* maybe_obj =
heap->AllocateUninitializedFixedDoubleArray(capacity);
if (!maybe_obj->To(&new_elms)) return maybe_obj;
ElementsAccessor* accessor = array->GetElementsAccessor();
MaybeObject* maybe_failure =
accessor->CopyElements(array, 0, new_elms, kind, 0, len, elms_obj);
ASSERT(!maybe_failure->IsFailure());
USE(maybe_failure);
FillWithHoles(new_elms, len + to_add, new_elms->length());
} else {
// to_add is > 0 and new_length <= elms_len, so elms_obj cannot be the
// empty_fixed_array.
new_elms = FixedDoubleArray::cast(elms_obj);
}
// Add the provided values.
AssertNoAllocation no_gc;
int index;
for (index = 0; index < to_add; index++) {
Object* arg = args[index + 1];
new_elms->set(index + len, arg->Number());
}
if (new_elms != array->elements()) {
array->set_elements(new_elms);
}
// Set the length.
array->set_length(Smi::FromInt(new_length));
return Smi::FromInt(new_length);
}
// Add the provided values.
AssertNoAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
for (int index = 0; index < to_add; index++) {
elms->set(index + len, args[index + 1], mode);
}
if (elms != array->elements()) {
array->set_elements(elms);
}
// Set the length.
array->set_length(Smi::FromInt(new_length));
return Smi::FromInt(new_length);
}
BUILTIN(ArrayPop) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayPop", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
}
FixedArray* elms = FixedArray::cast(elms_obj);
FixedArrayBase* elms_obj;
MaybeObject* maybe_elms =
EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
if (maybe_elms == NULL) return CallJsBuiltin(isolate, "ArrayPop", args);
if (!maybe_elms->To(&elms_obj)) return maybe_elms;
JSArray* array = JSArray::cast(receiver);
if (FLAG_harmony_observation && array->map()->is_observed()) {
@ -577,18 +671,15 @@ BUILTIN(ArrayPop) {
int len = Smi::cast(array->length())->value();
if (len == 0) return heap->undefined_value();
// Get top element
Object* top = elms->get(len - 1);
// Set the length.
array->set_length(Smi::FromInt(len - 1));
if (!top->IsTheHole()) {
// Delete the top element.
elms->set_the_hole(len - 1);
return top;
}
ElementsAccessor* accessor = array->GetElementsAccessor();
int new_length = len - 1;
Object* result;
MaybeObject* maybe_result = accessor->Get(array, array, new_length);
if (!maybe_result->To(&result)) return maybe_result;
MaybeObject* maybe_failure =
accessor->SetLength(array, Smi::FromInt(new_length));
if (maybe_failure->IsFailure()) return maybe_failure;
if (!result->IsTheHole()) return result;
return array->GetPrototype()->GetElement(len - 1);
}
@ -596,19 +687,17 @@ BUILTIN(ArrayPop) {
BUILTIN(ArrayShift) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
if (maybe_elms_obj == NULL)
return CallJsBuiltin(isolate, "ArrayShift", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
}
FixedArrayBase* elms_obj;
MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
if (maybe_elms_obj == NULL)
return CallJsBuiltin(isolate, "ArrayShift", args);
if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
return CallJsBuiltin(isolate, "ArrayShift", args);
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastSmiOrObjectElements());
if (FLAG_harmony_observation && array->map()->is_observed()) {
return CallJsBuiltin(isolate, "ArrayShift", args);
@ -618,18 +707,25 @@ BUILTIN(ArrayShift) {
if (len == 0) return heap->undefined_value();
// Get first element
Object* first = elms->get(0);
if (first->IsTheHole()) {
first = heap->undefined_value();
}
ElementsAccessor* accessor = array->GetElementsAccessor();
Object* first;
MaybeObject* maybe_first = accessor->Get(receiver, array, 0, elms_obj);
if (!maybe_first->To(&first)) return maybe_first;
if (!heap->lo_space()->Contains(elms)) {
array->set_elements(LeftTrimFixedArray(heap, elms, 1));
if (!heap->lo_space()->Contains(elms_obj)) {
array->set_elements(LeftTrimFixedArray(heap, elms_obj, 1));
} else {
// Shift the elements.
AssertNoAllocation no_gc;
MoveElements(heap, &no_gc, elms, 0, elms, 1, len - 1);
elms->set(len - 1, heap->the_hole_value());
if (elms_obj->IsFixedArray()) {
FixedArray* elms = FixedArray::cast(elms_obj);
AssertNoAllocation no_gc;
MoveElements(heap, &no_gc, elms, 0, elms, 1, len - 1);
elms->set(len - 1, heap->the_hole_value());
} else {
FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
MoveDoubleElements(elms, 0, elms, 1, len - 1);
elms->set_the_hole(len - 1);
}
}
// Set the length.
@ -642,19 +738,21 @@ BUILTIN(ArrayShift) {
BUILTIN(ArrayUnshift) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
if (maybe_elms_obj == NULL)
return CallJsBuiltin(isolate, "ArrayUnshift", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
}
FixedArrayBase* elms_obj;
MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
if (maybe_elms_obj == NULL)
return CallJsBuiltin(isolate, "ArrayUnshift", args);
if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
return CallJsBuiltin(isolate, "ArrayUnshift", args);
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastSmiOrObjectElements());
if (!array->HasFastSmiOrObjectElements()) {
return CallJsBuiltin(isolate, "ArrayUnshift", args);
}
FixedArray* elms = FixedArray::cast(elms_obj);
if (FLAG_harmony_observation && array->map()->is_observed()) {
return CallJsBuiltin(isolate, "ArrayUnshift", args);
@ -675,13 +773,17 @@ BUILTIN(ArrayUnshift) {
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
Object* obj;
{ MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* new_elms = FixedArray::cast(obj);
FixedArray* new_elms;
MaybeObject* maybe_elms = heap->AllocateUninitializedFixedArray(capacity);
if (!maybe_elms->To(&new_elms)) return maybe_elms;
ElementsKind kind = array->GetElementsKind();
CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, to_add, len);
ElementsAccessor* accessor = array->GetElementsAccessor();
MaybeObject* maybe_failure =
accessor->CopyElements(array, 0, new_elms, kind, to_add, len, elms);
ASSERT(!maybe_failure->IsFailure());
USE(maybe_failure);
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
array->set_elements(elms);
@ -706,16 +808,20 @@ BUILTIN(ArrayUnshift) {
BUILTIN(ArraySlice) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
FixedArray* elms;
FixedArrayBase* elms;
int len = -1;
if (receiver->IsJSArray()) {
JSArray* array = JSArray::cast(receiver);
if (!array->HasFastSmiOrObjectElements() ||
!IsJSArrayFastElementMovingAllowed(heap, array)) {
if (!IsJSArrayFastElementMovingAllowed(heap, array)) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
if (array->HasFastElements()) {
elms = array->elements();
} else {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
elms = FixedArray::cast(array->elements());
len = Smi::cast(array->length())->value();
} else {
// Array.slice(arguments, ...) is quite a common idiom (notably more
@ -724,15 +830,19 @@ BUILTIN(ArraySlice) {
isolate->context()->native_context()->arguments_boilerplate()->map();
bool is_arguments_object_with_fast_elements =
receiver->IsJSObject()
&& JSObject::cast(receiver)->map() == arguments_map
&& JSObject::cast(receiver)->HasFastSmiOrObjectElements();
receiver->IsJSObject() &&
JSObject::cast(receiver)->map() == arguments_map;
if (!is_arguments_object_with_fast_elements) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
elms = FixedArray::cast(JSObject::cast(receiver)->elements());
Object* len_obj = JSObject::cast(receiver)
->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
JSObject* object = JSObject::cast(receiver);
if (object->HasFastElements()) {
elms = object->elements();
} else {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
Object* len_obj = object->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
if (!len_obj->IsSmi()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@ -740,12 +850,27 @@ BUILTIN(ArraySlice) {
if (len > elms->length()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
}
JSObject* object = JSObject::cast(receiver);
ElementsKind kind = object->GetElementsKind();
if (IsHoleyElementsKind(kind)) {
bool packed = true;
ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
for (int i = 0; i < len; i++) {
if (elms->get(i) == heap->the_hole_value()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
if (!accessor->HasElement(object, object, i, elms)) {
packed = false;
break;
}
}
if (packed) {
kind = GetPackedElementsKind(kind);
} else if (!receiver->IsJSArray()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
}
ASSERT(len >= 0);
int n_arguments = args.length() - 1;
@ -758,6 +883,12 @@ BUILTIN(ArraySlice) {
Object* arg1 = args[1];
if (arg1->IsSmi()) {
relative_start = Smi::cast(arg1)->value();
} else if (arg1->IsHeapNumber()) {
double start = HeapNumber::cast(arg1)->value();
if (start < kMinInt || start > kMaxInt) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
relative_start = static_cast<int>(start);
} else if (!arg1->IsUndefined()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@ -765,6 +896,12 @@ BUILTIN(ArraySlice) {
Object* arg2 = args[2];
if (arg2->IsSmi()) {
relative_end = Smi::cast(arg2)->value();
} else if (arg2->IsHeapNumber()) {
double end = HeapNumber::cast(arg2)->value();
if (end < kMinInt || end > kMaxInt) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
relative_end = static_cast<int>(end);
} else if (!arg2->IsUndefined()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@ -779,21 +916,21 @@ BUILTIN(ArraySlice) {
int final = (relative_end < 0) ? Max(len + relative_end, 0)
: Min(relative_end, len);
ElementsKind elements_kind = JSObject::cast(receiver)->GetElementsKind();
// Calculate the length of result array.
int result_len = Max(final - k, 0);
MaybeObject* maybe_array =
heap->AllocateJSArrayAndStorage(elements_kind,
result_len,
result_len);
JSArray* result_array;
MaybeObject* maybe_array = heap->AllocateJSArrayAndStorage(kind,
result_len,
result_len);
if (!maybe_array->To(&result_array)) return maybe_array;
CopyObjectToObjectElements(elms, elements_kind, k,
FixedArray::cast(result_array->elements()),
elements_kind, 0, result_len);
ElementsAccessor* accessor = object->GetElementsAccessor();
MaybeObject* maybe_failure =
accessor->CopyElements(object, k, result_array->elements(),
kind, 0, result_len, elms);
ASSERT(!maybe_failure->IsFailure());
USE(maybe_failure);
return result_array;
}
@ -802,19 +939,18 @@ BUILTIN(ArraySlice) {
BUILTIN(ArraySplice) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3);
if (maybe_elms_obj == NULL)
return CallJsBuiltin(isolate, "ArraySplice", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
FixedArrayBase* elms_obj;
MaybeObject* maybe_elms =
EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3);
if (maybe_elms == NULL) {
return CallJsBuiltin(isolate, "ArraySplice", args);
}
if (!maybe_elms->To(&elms_obj)) return maybe_elms;
if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
return CallJsBuiltin(isolate, "ArraySplice", args);
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastSmiOrObjectElements());
if (FLAG_harmony_observation && array->map()->is_observed()) {
return CallJsBuiltin(isolate, "ArraySplice", args);
@ -829,6 +965,12 @@ BUILTIN(ArraySplice) {
Object* arg1 = args[1];
if (arg1->IsSmi()) {
relative_start = Smi::cast(arg1)->value();
} else if (arg1->IsHeapNumber()) {
double start = HeapNumber::cast(arg1)->value();
if (start < kMinInt || start > kMaxInt) {
return CallJsBuiltin(isolate, "ArraySplice", args);
}
relative_start = static_cast<int>(start);
} else if (!arg1->IsUndefined()) {
return CallJsBuiltin(isolate, "ArraySplice", args);
}
@ -858,51 +1000,83 @@ BUILTIN(ArraySplice) {
actual_delete_count = Min(Max(value, 0), len - actual_start);
}
ElementsKind elements_kind = array->GetElementsKind();
int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
int new_length = len - actual_delete_count + item_count;
// For double mode we do not support changing the length.
if (new_length > len && IsFastDoubleElementsKind(elements_kind)) {
return CallJsBuiltin(isolate, "ArraySplice", args);
}
if (new_length == 0) {
MaybeObject* maybe_array = heap->AllocateJSArrayWithElements(
elms_obj, elements_kind, actual_delete_count);
if (maybe_array->IsFailure()) return maybe_array;
array->set_elements(heap->empty_fixed_array());
array->set_length(Smi::FromInt(0));
return maybe_array;
}
JSArray* result_array = NULL;
ElementsKind elements_kind =
JSObject::cast(receiver)->GetElementsKind();
MaybeObject* maybe_array =
heap->AllocateJSArrayAndStorage(elements_kind,
actual_delete_count,
actual_delete_count);
if (!maybe_array->To(&result_array)) return maybe_array;
{
// Fill newly created array.
CopyObjectToObjectElements(elms, elements_kind, actual_start,
FixedArray::cast(result_array->elements()),
elements_kind, 0, actual_delete_count);
if (actual_delete_count > 0) {
ElementsAccessor* accessor = array->GetElementsAccessor();
MaybeObject* maybe_failure =
accessor->CopyElements(array, actual_start, result_array->elements(),
elements_kind, 0, actual_delete_count, elms_obj);
// Cannot fail since the origin and target array are of the same elements
// kind.
ASSERT(!maybe_failure->IsFailure());
USE(maybe_failure);
}
int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
int new_length = len - actual_delete_count + item_count;
bool elms_changed = false;
if (item_count < actual_delete_count) {
// Shrink the array.
const bool trim_array = !heap->lo_space()->Contains(elms) &&
const bool trim_array = !heap->lo_space()->Contains(elms_obj) &&
((actual_start + item_count) <
(len - actual_delete_count - actual_start));
if (trim_array) {
const int delta = actual_delete_count - item_count;
{
if (elms_obj->IsFixedDoubleArray()) {
FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
MoveDoubleElements(elms, delta, elms, 0, actual_start);
} else {
FixedArray* elms = FixedArray::cast(elms_obj);
AssertNoAllocation no_gc;
MoveElements(heap, &no_gc, elms, delta, elms, 0, actual_start);
}
elms = LeftTrimFixedArray(heap, elms, delta);
elms_obj = LeftTrimFixedArray(heap, elms_obj, delta);
elms_changed = true;
} else {
AssertNoAllocation no_gc;
MoveElements(heap, &no_gc,
elms, actual_start + item_count,
elms, actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
FillWithHoles(heap, elms, new_length, len);
if (elms_obj->IsFixedDoubleArray()) {
FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
MoveDoubleElements(elms, actual_start + item_count,
elms, actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
FillWithHoles(elms, new_length, len);
} else {
FixedArray* elms = FixedArray::cast(elms_obj);
AssertNoAllocation no_gc;
MoveElements(heap, &no_gc,
elms, actual_start + item_count,
elms, actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
FillWithHoles(heap, elms, new_length, len);
}
}
} else if (item_count > actual_delete_count) {
FixedArray* elms = FixedArray::cast(elms_obj);
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
ASSERT((item_count - actual_delete_count) <= (Smi::kMaxValue - len));
@ -911,28 +1085,27 @@ BUILTIN(ArraySplice) {
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
Object* obj;
{ MaybeObject* maybe_obj =
heap->AllocateUninitializedFixedArray(capacity);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* new_elms = FixedArray::cast(obj);
FixedArray* new_elms;
MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
if (!maybe_obj->To(&new_elms)) return maybe_obj;
{
// Copy the part before actual_start as is.
ElementsKind kind = array->GetElementsKind();
CopyObjectToObjectElements(elms, kind, 0,
new_elms, kind, 0, actual_start);
const int to_copy = len - actual_delete_count - actual_start;
CopyObjectToObjectElements(elms, kind,
actual_start + actual_delete_count,
new_elms, kind,
actual_start + item_count, to_copy);
}
// Copy the part before actual_start as is.
ElementsKind kind = array->GetElementsKind();
ElementsAccessor* accessor = array->GetElementsAccessor();
MaybeObject* maybe_failure = accessor->CopyElements(
array, 0, new_elms, kind, 0, actual_start, elms);
ASSERT(!maybe_failure->IsFailure());
USE(maybe_failure);
const int to_copy = len - actual_delete_count - actual_start;
maybe_failure = accessor->CopyElements(
array, actual_start + actual_delete_count, new_elms, kind,
actual_start + item_count, to_copy, elms);
ASSERT(!maybe_failure->IsFailure());
USE(maybe_failure);
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
elms_obj = new_elms;
elms_changed = true;
} else {
AssertNoAllocation no_gc;
@ -943,16 +1116,28 @@ BUILTIN(ArraySplice) {
}
}
AssertNoAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
for (int k = actual_start; k < actual_start + item_count; k++) {
elms->set(k, args[3 + k - actual_start], mode);
if (IsFastDoubleElementsKind(elements_kind)) {
FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
for (int k = actual_start; k < actual_start + item_count; k++) {
Object* arg = args[3 + k - actual_start];
if (arg->IsSmi()) {
elms->set(k, Smi::cast(arg)->value());
} else {
elms->set(k, HeapNumber::cast(arg)->value());
}
}
} else {
FixedArray* elms = FixedArray::cast(elms_obj);
AssertNoAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
for (int k = actual_start; k < actual_start + item_count; k++) {
elms->set(k, args[3 + k - actual_start], mode);
}
}
if (elms_changed) {
array->set_elements(elms);
array->set_elements(elms_obj);
}
// Set the length.
array->set_length(Smi::FromInt(new_length));
@ -977,11 +1162,10 @@ BUILTIN(ArrayConcat) {
for (int i = 0; i < n_arguments; i++) {
Object* arg = args[i];
if (!arg->IsJSArray() ||
!JSArray::cast(arg)->HasFastSmiOrObjectElements() ||
!JSArray::cast(arg)->HasFastElements() ||
JSArray::cast(arg)->GetPrototype() != array_proto) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
int len = Smi::cast(JSArray::cast(arg)->length())->value();
// We shouldn't overflow when adding another len.
@ -991,27 +1175,24 @@ BUILTIN(ArrayConcat) {
result_len += len;
ASSERT(result_len >= 0);
if (result_len > FixedArray::kMaxLength) {
if (result_len > FixedDoubleArray::kMaxLength) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
if (!JSArray::cast(arg)->HasFastSmiElements()) {
if (IsFastSmiElementsKind(elements_kind)) {
if (IsFastHoleyElementsKind(elements_kind)) {
elements_kind = FAST_HOLEY_ELEMENTS;
} else {
elements_kind = FAST_ELEMENTS;
}
ElementsKind arg_kind = JSArray::cast(arg)->map()->elements_kind();
ElementsKind packed_kind = GetPackedElementsKind(arg_kind);
if (IsMoreGeneralElementsKindTransition(
GetPackedElementsKind(elements_kind), packed_kind)) {
if (IsFastHoleyElementsKind(elements_kind)) {
elements_kind = GetHoleyElementsKind(arg_kind);
} else {
elements_kind = arg_kind;
}
}
if (JSArray::cast(arg)->HasFastHoleyElements()) {
elements_kind = GetHoleyElementsKind(elements_kind);
}
}
// Allocate result.
JSArray* result_array;
// Allocate result.
MaybeObject* maybe_array =
heap->AllocateJSArrayAndStorage(elements_kind,
result_len,
@ -1019,19 +1200,19 @@ BUILTIN(ArrayConcat) {
if (!maybe_array->To(&result_array)) return maybe_array;
if (result_len == 0) return result_array;
// Copy data.
int start_pos = 0;
FixedArray* result_elms(FixedArray::cast(result_array->elements()));
int j = 0;
FixedArrayBase* storage = result_array->elements();
for (int i = 0; i < n_arguments; i++) {
JSArray* array = JSArray::cast(args[i]);
ElementsAccessor* accessor = array->GetElementsAccessor();
int len = Smi::cast(array->length())->value();
FixedArray* elms = FixedArray::cast(array->elements());
CopyObjectToObjectElements(elms, elements_kind, 0,
result_elms, elements_kind,
start_pos, len);
start_pos += len;
MaybeObject* maybe_failure =
accessor->CopyElements(array, 0, storage, elements_kind, j, len);
if (maybe_failure->IsFailure()) return maybe_failure;
j += len;
}
ASSERT(start_pos == result_len);
ASSERT(j == result_len);
return result_array;
}

View File

@ -146,13 +146,13 @@ static Failure* ThrowArrayLengthRangeError(Heap* heap) {
}
void CopyObjectToObjectElements(FixedArray* from,
ElementsKind from_kind,
uint32_t from_start,
FixedArray* to,
ElementsKind to_kind,
uint32_t to_start,
int raw_copy_size) {
static void CopyObjectToObjectElements(FixedArray* from,
ElementsKind from_kind,
uint32_t from_start,
FixedArray* to,
ElementsKind to_kind,
uint32_t to_start,
int raw_copy_size) {
ASSERT(to->map() != HEAP->fixed_cow_array_map());
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {

View File

@ -197,16 +197,6 @@ class ElementsAccessor {
DISALLOW_COPY_AND_ASSIGN(ElementsAccessor);
};
void CopyObjectToObjectElements(FixedArray* from_obj,
ElementsKind from_kind,
uint32_t from_start,
FixedArray* to_obj,
ElementsKind to_kind,
uint32_t to_start,
int copy_size);
} } // namespace v8::internal
#endif // V8_ELEMENTS_H_

View File

@ -968,6 +968,7 @@ Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
isolate(),
isolate()->heap()->AllocateJSArrayWithElements(*elements,
elements_kind,
elements->length(),
pretenure),
JSArray);
}

View File

@ -4182,7 +4182,7 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
InitializeJSObjectFromMap(JSObject::cast(obj),
FixedArray::cast(properties),
map);
ASSERT(JSObject::cast(obj)->HasFastSmiOrObjectElements());
ASSERT(JSObject::cast(obj)->HasFastElements());
return obj;
}
@ -4247,7 +4247,7 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
FixedArrayBase* elms;
MaybeObject* maybe_elms = NULL;
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
if (IsFastDoubleElementsKind(elements_kind)) {
if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
} else {
@ -4274,13 +4274,14 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
MaybeObject* Heap::AllocateJSArrayWithElements(
FixedArrayBase* elements,
ElementsKind elements_kind,
int length,
PretenureFlag pretenure) {
MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
JSArray* array;
if (!maybe_array->To(&array)) return maybe_array;
array->set_elements(elements);
array->set_length(Smi::FromInt(elements->length()));
array->set_length(Smi::FromInt(length));
array->ValidateElements();
return array;
}

View File

@ -584,6 +584,7 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateJSArrayWithElements(
FixedArrayBase* array_base,
ElementsKind elements_kind,
int length,
PretenureFlag pretenure = NOT_TENURED);
// Allocates and initializes a new global object based on a constructor.

View File

@ -507,7 +507,8 @@ void MacroAssembler::StoreNumberToDoubleElements(
Register scratch1,
XMMRegister scratch2,
Label* fail,
bool specialize_for_processor) {
bool specialize_for_processor,
int elements_offset) {
Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value;
JumpIfSmi(maybe_number, &smi_value, Label::kNear);
@ -529,12 +530,14 @@ void MacroAssembler::StoreNumberToDoubleElements(
CpuFeatures::Scope use_sse2(SSE2);
movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
bind(&have_double_value);
movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize),
movdbl(FieldOperand(elements, key, times_4,
FixedDoubleArray::kHeaderSize - elements_offset),
scratch2);
} else {
fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset));
bind(&have_double_value);
fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize));
fstp_d(FieldOperand(elements, key, times_4,
FixedDoubleArray::kHeaderSize - elements_offset));
}
jmp(&done);
@ -561,13 +564,15 @@ void MacroAssembler::StoreNumberToDoubleElements(
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
CpuFeatures::Scope fscope(SSE2);
cvtsi2sd(scratch2, scratch1);
movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize),
movdbl(FieldOperand(elements, key, times_4,
FixedDoubleArray::kHeaderSize - elements_offset),
scratch2);
} else {
push(scratch1);
fild_s(Operand(esp, 0));
pop(scratch1);
fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize));
fstp_d(FieldOperand(elements, key, times_4,
FixedDoubleArray::kHeaderSize - elements_offset));
}
bind(&done);
}

View File

@ -388,7 +388,8 @@ class MacroAssembler: public Assembler {
Register scratch1,
XMMRegister scratch2,
Label* fail,
bool specialize_for_processor);
bool specialize_for_processor,
int offset = 0);
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with

View File

@ -1523,7 +1523,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
Label call_builtin;
if (argc == 1) { // Otherwise fall through to call builtin.
Label attempt_to_grow_elements, with_write_barrier;
Label attempt_to_grow_elements, with_write_barrier, check_double;
// Get the elements array of the object.
__ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
@ -1531,7 +1531,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Check that the elements are in fast mode and writable.
__ cmp(FieldOperand(edi, HeapObject::kMapOffset),
Immediate(factory()->fixed_array_map()));
__ j(not_equal, &call_builtin);
__ j(not_equal, &check_double);
// Get the array's length into eax and calculate new length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
@ -1562,17 +1562,49 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ ret((argc + 1) * kPointerSize);
__ bind(&check_double);
// Check that the elements are in double mode.
__ cmp(FieldOperand(edi, HeapObject::kMapOffset),
Immediate(factory()->fixed_double_array_map()));
__ j(not_equal, &call_builtin);
// Get the array's length into eax and calculate new length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
__ add(eax, Immediate(Smi::FromInt(argc)));
// Get the elements' length into ecx.
__ mov(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
__ cmp(eax, ecx);
__ j(greater, &call_builtin);
__ mov(ecx, Operand(esp, argc * kPointerSize));
__ StoreNumberToDoubleElements(
ecx, edi, eax, ecx, xmm0, &call_builtin, true, argc * kDoubleSize);
// Save new length.
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
__ ret((argc + 1) * kPointerSize);
__ bind(&with_write_barrier);
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
Label fast_object, not_fast_object;
__ CheckFastObjectElements(ebx, &not_fast_object, Label::kNear);
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiElements(ebx, &call_builtin);
__ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
Immediate(factory()->heap_number_map()));
__ j(equal, &call_builtin);
// edi: elements array
// edx: receiver
// ebx: map

View File

@ -1943,6 +1943,11 @@ void FixedArray::set_null_unchecked(Heap* heap, int index) {
}
double* FixedDoubleArray::data_start() {
return &READ_DOUBLE_FIELD(this, kHeaderSize);
}
Object** FixedArray::data_start() {
return HeapObject::RawField(this, kHeaderSize);
}
@ -4788,6 +4793,11 @@ bool JSObject::HasFastHoleyElements() {
}
bool JSObject::HasFastElements() {
return IsFastElementsKind(GetElementsKind());
}
bool JSObject::HasDictionaryElements() {
return GetElementsKind() == DICTIONARY_ELEMENTS;
}

View File

@ -1574,6 +1574,8 @@ class JSObject: public JSReceiver {
// Returns true if an object has elements of FAST_ELEMENTS or
// FAST_SMI_ONLY_ELEMENTS.
inline bool HasFastSmiOrObjectElements();
// Returns true if an object has any of the fast elements kinds.
inline bool HasFastElements();
// Returns true if an object has elements of FAST_DOUBLE_ELEMENTS
// ElementsKind.
inline bool HasFastDoubleElements();
@ -2384,12 +2386,12 @@ class FixedArray: public FixedArrayBase {
inline void set_unchecked(Heap* heap, int index, Object* value,
WriteBarrierMode mode);
// Gives access to raw memory which stores the array's data.
inline Object** data_start();
inline Object** GetFirstElementAddress();
inline bool ContainsOnlySmisOrHoles();
// Gives access to raw memory which stores the array's data.
inline Object** data_start();
// Copy operations.
MUST_USE_RESULT inline MaybeObject* Copy();
MUST_USE_RESULT MaybeObject* CopySize(int new_length);
@ -2491,6 +2493,9 @@ class FixedDoubleArray: public FixedArrayBase {
return kHeaderSize + length * kDoubleSize;
}
// Gives access to raw memory which stores the array's data.
inline double* data_start();
// Code Generation support.
static int OffsetOfElementAt(int index) { return SizeFor(index); }

View File

@ -3715,7 +3715,6 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
isolate()->factory()->NewFixedArray(values->length(), TENURED);
Handle<FixedDoubleArray> double_literals;
ElementsKind elements_kind = FAST_SMI_ELEMENTS;
bool has_only_undefined_values = true;
bool has_hole_values = false;
// Fill in the literals.
@ -3747,7 +3746,6 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
// FAST_DOUBLE_ELEMENTS and FAST_ELEMENTS as necessary. Always remember
// the tagged value, no matter what the ElementsKind is in case we
// ultimately end up in FAST_ELEMENTS.
has_only_undefined_values = false;
object_literals->set(i, *boilerplate_value);
if (elements_kind == FAST_SMI_ELEMENTS) {
// Smi only elements. Notice if a transition to FAST_DOUBLE_ELEMENTS or
@ -3786,13 +3784,6 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
}
}
// Very small array literals that don't have a concrete hint about their type
// from a constant value should default to the slow case to avoid lots of
// elements transitions on really small objects.
if (has_only_undefined_values && values->length() <= 2) {
elements_kind = FAST_ELEMENTS;
}
// Simple and shallow arrays can be lazily copied, we transform the
// elements array to a copy-on-write array.
if (is_simple && depth == 1 && values->length() > 0 &&

View File

@ -4102,8 +4102,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
// become FAST_DOUBLE_ELEMENTS.
Handle<JSObject> js_object(args.at<JSObject>(0));
ElementsKind elements_kind = js_object->GetElementsKind();
if (IsFastElementsKind(elements_kind) &&
!IsFastObjectElementsKind(elements_kind)) {
if (IsFastDoubleElementsKind(elements_kind)) {
FixedArrayBase* elements = js_object->elements();
if (args.at<Smi>(1)->value() >= elements->length()) {
if (IsFastHoleyElementsKind(elements_kind)) {
@ -4116,6 +4115,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
isolate);
if (maybe_object->IsFailure()) return maybe_object;
}
} else {
ASSERT(IsFastSmiOrObjectElementsKind(elements_kind) ||
!IsFastElementsKind(elements_kind));
}
}
} else if (args[0]->IsString() && args[1]->IsSmi()) {
@ -9303,7 +9305,7 @@ class ArrayConcatVisitor {
clear_storage();
set_storage(*result);
}
}
}
void increase_index_offset(uint32_t delta) {
if (JSObject::kMaxElementCount - index_offset_ < delta) {
@ -9394,10 +9396,22 @@ static uint32_t EstimateElementCount(Handle<JSArray> array) {
break;
}
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
// TODO(1810): Decide if it's worthwhile to implement this.
UNREACHABLE();
case FAST_HOLEY_DOUBLE_ELEMENTS: {
// Fast elements can't have lengths that are not representable by
// a 32-bit signed integer.
ASSERT(static_cast<int32_t>(FixedDoubleArray::kMaxLength) >= 0);
int fast_length = static_cast<int>(length);
if (array->elements()->IsFixedArray()) {
ASSERT(FixedArray::cast(array->elements())->length() == 0);
break;
}
Handle<FixedDoubleArray> elements(
FixedDoubleArray::cast(array->elements()));
for (int i = 0; i < fast_length; i++) {
if (!elements->is_the_hole(i)) element_count++;
}
break;
}
case DICTIONARY_ELEMENTS: {
Handle<SeededNumberDictionary> dictionary(
SeededNumberDictionary::cast(array->elements()));
@ -9640,8 +9654,27 @@ static bool IterateElements(Isolate* isolate,
}
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: {
// TODO(1810): Decide if it's worthwhile to implement this.
UNREACHABLE();
// Run through the elements FixedArray and use HasElement and GetElement
// to check the prototype for missing elements.
Handle<FixedDoubleArray> elements(
FixedDoubleArray::cast(receiver->elements()));
int fast_length = static_cast<int>(length);
ASSERT(fast_length <= elements->length());
for (int j = 0; j < fast_length; j++) {
HandleScope loop_scope(isolate);
if (!elements->is_the_hole(j)) {
double double_value = elements->get_scalar(j);
Handle<Object> element_value =
isolate->factory()->NewNumber(double_value);
visitor->visit(j, element_value);
} else if (receiver->HasElement(j)) {
// Call GetElement on receiver, not its prototype, or getters won't
// have the correct receiver.
Handle<Object> element_value = Object::GetElement(receiver, j);
RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element_value, false);
visitor->visit(j, element_value);
}
}
break;
}
case DICTIONARY_ELEMENTS: {
@ -9744,48 +9777,51 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
// that mutate other arguments (but will otherwise be precise).
// The number of elements is precise if there are no inherited elements.
ElementsKind kind = FAST_SMI_ELEMENTS;
uint32_t estimate_result_length = 0;
uint32_t estimate_nof_elements = 0;
{
for (int i = 0; i < argument_count; i++) {
HandleScope loop_scope;
Handle<Object> obj(elements->get(i));
uint32_t length_estimate;
uint32_t element_estimate;
if (obj->IsJSArray()) {
Handle<JSArray> array(Handle<JSArray>::cast(obj));
// TODO(1810): Find out if it's worthwhile to properly support
// arbitrary ElementsKinds. For now, pessimistically transition to
// FAST_*_ELEMENTS.
if (array->HasFastDoubleElements()) {
ElementsKind to_kind = FAST_ELEMENTS;
if (array->HasFastHoleyElements()) {
to_kind = FAST_HOLEY_ELEMENTS;
}
array = Handle<JSArray>::cast(
JSObject::TransitionElementsKind(array, to_kind));
for (int i = 0; i < argument_count; i++) {
HandleScope loop_scope;
Handle<Object> obj(elements->get(i));
uint32_t length_estimate;
uint32_t element_estimate;
if (obj->IsJSArray()) {
Handle<JSArray> array(Handle<JSArray>::cast(obj));
length_estimate = static_cast<uint32_t>(array->length()->Number());
if (length_estimate != 0) {
ElementsKind array_kind =
GetPackedElementsKind(array->map()->elements_kind());
if (IsMoreGeneralElementsKindTransition(kind, array_kind)) {
kind = array_kind;
}
length_estimate =
static_cast<uint32_t>(array->length()->Number());
element_estimate =
EstimateElementCount(array);
} else {
length_estimate = 1;
element_estimate = 1;
}
// Avoid overflows by capping at kMaxElementCount.
if (JSObject::kMaxElementCount - estimate_result_length <
length_estimate) {
estimate_result_length = JSObject::kMaxElementCount;
} else {
estimate_result_length += length_estimate;
}
if (JSObject::kMaxElementCount - estimate_nof_elements <
element_estimate) {
estimate_nof_elements = JSObject::kMaxElementCount;
} else {
estimate_nof_elements += element_estimate;
element_estimate = EstimateElementCount(array);
} else {
if (obj->IsHeapObject()) {
if (obj->IsNumber()) {
if (IsMoreGeneralElementsKindTransition(kind, FAST_DOUBLE_ELEMENTS)) {
kind = FAST_DOUBLE_ELEMENTS;
}
} else if (IsMoreGeneralElementsKindTransition(kind, FAST_ELEMENTS)) {
kind = FAST_ELEMENTS;
}
}
length_estimate = 1;
element_estimate = 1;
}
// Avoid overflows by capping at kMaxElementCount.
if (JSObject::kMaxElementCount - estimate_result_length <
length_estimate) {
estimate_result_length = JSObject::kMaxElementCount;
} else {
estimate_result_length += length_estimate;
}
if (JSObject::kMaxElementCount - estimate_nof_elements <
element_estimate) {
estimate_nof_elements = JSObject::kMaxElementCount;
} else {
estimate_nof_elements += element_estimate;
}
}
@ -9796,8 +9832,76 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
Handle<FixedArray> storage;
if (fast_case) {
// The backing storage array must have non-existing elements to
// preserve holes across concat operations.
if (kind == FAST_DOUBLE_ELEMENTS) {
Handle<FixedDoubleArray> double_storage =
isolate->factory()->NewFixedDoubleArray(estimate_result_length);
int j = 0;
bool failure = false;
for (int i = 0; i < argument_count; i++) {
Handle<Object> obj(elements->get(i));
if (obj->IsSmi()) {
double_storage->set(j, Smi::cast(*obj)->value());
j++;
} else if (obj->IsNumber()) {
double_storage->set(j, obj->Number());
j++;
} else {
JSArray* array = JSArray::cast(*obj);
uint32_t length = static_cast<uint32_t>(array->length()->Number());
switch (array->map()->elements_kind()) {
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: {
// Empty fixed array indicates that there are no elements.
if (array->elements()->IsFixedArray()) break;
FixedDoubleArray* elements =
FixedDoubleArray::cast(array->elements());
for (uint32_t i = 0; i < length; i++) {
if (elements->is_the_hole(i)) {
failure = true;
break;
}
double double_value = elements->get_scalar(i);
double_storage->set(j, double_value);
j++;
}
break;
}
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_SMI_ELEMENTS: {
FixedArray* elements(
FixedArray::cast(array->elements()));
for (uint32_t i = 0; i < length; i++) {
Object* element = elements->get(i);
if (element->IsTheHole()) {
failure = true;
break;
}
int32_t int_value = Smi::cast(element)->value();
double_storage->set(j, int_value);
j++;
}
break;
}
case FAST_HOLEY_ELEMENTS:
ASSERT_EQ(0, length);
break;
default:
UNREACHABLE();
}
}
if (failure) break;
}
Handle<JSArray> array = isolate->factory()->NewJSArray(0);
Smi* length = Smi::FromInt(j);
Handle<Map> map;
map = isolate->factory()->GetElementsTransitionMap(array, kind);
array->set_map(*map);
array->set_length(length);
array->set_elements(*double_storage);
return *array;
}
// The backing storage array must have non-existing elements to preserve
// holes across concat operations.
storage = isolate->factory()->NewFixedArrayWithHoles(
estimate_result_length);
} else {

View File

@ -2774,7 +2774,8 @@ void MacroAssembler::StoreNumberToDoubleElements(
Register elements,
Register index,
XMMRegister xmm_scratch,
Label* fail) {
Label* fail,
int elements_offset) {
Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
JumpIfSmi(maybe_number, &smi_value, Label::kNear);
@ -2793,7 +2794,8 @@ void MacroAssembler::StoreNumberToDoubleElements(
bind(&not_nan);
movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
bind(&have_double_value);
movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
movsd(FieldOperand(elements, index, times_8,
FixedDoubleArray::kHeaderSize - elements_offset),
xmm_scratch);
jmp(&done);
@ -2816,7 +2818,8 @@ void MacroAssembler::StoreNumberToDoubleElements(
// Preserve original value.
SmiToInteger32(kScratchRegister, maybe_number);
cvtlsi2sd(xmm_scratch, kScratchRegister);
movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
movsd(FieldOperand(elements, index, times_8,
FixedDoubleArray::kHeaderSize - elements_offset),
xmm_scratch);
bind(&done);
}

View File

@ -895,7 +895,8 @@ class MacroAssembler: public Assembler {
Register elements,
Register index,
XMMRegister xmm_scratch,
Label* fail);
Label* fail,
int elements_offset = 0);
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with

View File

@ -1487,7 +1487,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
Label call_builtin;
if (argc == 1) { // Otherwise fall through to call builtin.
Label attempt_to_grow_elements, with_write_barrier;
Label attempt_to_grow_elements, with_write_barrier, check_double;
// Get the elements array of the object.
__ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
@ -1495,7 +1495,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Check that the elements are in fast mode and writable.
__ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
factory()->fixed_array_map());
__ j(not_equal, &call_builtin);
__ j(not_equal, &check_double);
// Get the array's length into rax and calculate new length.
__ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
@ -1526,6 +1526,33 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ Integer32ToSmi(rax, rax); // Return new length as smi.
__ ret((argc + 1) * kPointerSize);
__ bind(&check_double);
// Check that the elements are in double mode.
__ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
factory()->fixed_double_array_map());
__ j(not_equal, &call_builtin);
// Get the array's length into rax and calculate new length.
__ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
__ addl(rax, Immediate(argc));
// Get the elements' length into rcx.
__ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
__ cmpl(rax, rcx);
__ j(greater, &call_builtin);
__ movq(rcx, Operand(rsp, argc * kPointerSize));
__ StoreNumberToDoubleElements(
rcx, rdi, rax, xmm0, &call_builtin, argc * kDoubleSize);
// Save new length.
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
__ ret((argc + 1) * kPointerSize);
__ bind(&with_write_barrier);
__ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
@ -1537,6 +1564,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiElements(rbx, &call_builtin);
__ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
factory()->heap_number_map());
__ j(equal, &call_builtin);
// rdx: receiver
// rbx: map

View File

@ -0,0 +1,307 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --smi-only-arrays
// Flags: --noparallel-recompilation
// Test element kind of objects.
// Since --smi-only-arrays affects builtins, its default setting at compile time
// sticks if built with snapshot. If --smi-only-arrays is deactivated by
// default, only a no-snapshot build actually has smi-only arrays enabled in
// this test case. Depending on whether smi-only arrays are actually enabled,
// this test takes the appropriate code path to check smi-only arrays.
support_smi_only_arrays = %HasFastSmiElements([1,2,3,4,5,6,7,8,9,10]);
if (support_smi_only_arrays) {
print("Tests include smi-only arrays.");
} else {
print("Tests do NOT include smi-only arrays.");
}
// IC and Crankshaft support for smi-only elements in dynamic array literals.
function get(foo) { return foo; } // Used to generate dynamic values.
function array_natives_test() {
// Ensure small array literals start in specific element kind mode.
assertTrue(%HasFastSmiElements([]));
assertTrue(%HasFastSmiElements([1]));
assertTrue(%HasFastSmiElements([1,2]));
assertTrue(%HasFastDoubleElements([1.1]));
assertTrue(%HasFastDoubleElements([1.1,2]));
// Push
var a0 = [1, 2, 3];
assertTrue(%HasFastSmiElements(a0));
a0.push(4);
assertTrue(%HasFastSmiElements(a0));
a0.push(1.3);
assertTrue(%HasFastDoubleElements(a0));
a0.push(1.5);
assertTrue(%HasFastDoubleElements(a0));
a0.push({});
assertTrue(%HasFastObjectElements(a0));
a0.push({});
assertTrue(%HasFastObjectElements(a0));
assertEquals([1,2,3,4,1.3,1.5,{},{}], a0);
// Concat
var a1;
a1 = [1,2,3].concat([]);
assertTrue(%HasFastSmiElements(a1));
assertEquals([1,2,3], a1);
a1 = [1,2,3].concat([4,5,6]);
assertTrue(%HasFastSmiElements(a1));
assertEquals([1,2,3,4,5,6], a1);
a1 = [1,2,3].concat([4,5,6], [7,8,9]);
assertTrue(%HasFastSmiElements(a1));
assertEquals([1,2,3,4,5,6,7,8,9], a1);
a1 = [1.1,2,3].concat([]);
assertTrue(%HasFastDoubleElements(a1));
assertEquals([1.1,2,3], a1);
a1 = [1,2,3].concat([1.1, 2]);
assertTrue(%HasFastDoubleElements(a1));
assertEquals([1,2,3,1.1,2], a1);
a1 = [1.1,2,3].concat([1, 2]);
assertTrue(%HasFastDoubleElements(a1));
assertEquals([1.1,2,3,1,2], a1);
a1 = [1.1,2,3].concat([1.2, 2]);
assertTrue(%HasFastDoubleElements(a1));
assertEquals([1.1,2,3,1.2,2], a1);
a1 = [1,2,3].concat([{}]);
assertTrue(%HasFastObjectElements(a1));
assertEquals([1,2,3,{}], a1);
a1 = [1.1,2,3].concat([{}]);
assertTrue(%HasFastObjectElements(a1));
assertEquals([1.1,2,3,{}], a1);
a1 = [{}].concat([1,2,3]);
assertTrue(%HasFastObjectElements(a1));
assertEquals([{},1,2,3], a1);
a1 = [{}].concat([1.1,2,3]);
assertTrue(%HasFastObjectElements(a1));
assertEquals([{},1.1,2,3], a1);
// Slice
var a2 = [1,2,3];
assertTrue(%HasFastSmiElements(a2.slice()));
assertTrue(%HasFastSmiElements(a2.slice(1)));
assertTrue(%HasFastSmiElements(a2.slice(1, 2)));
assertEquals([1,2,3], a2.slice());
assertEquals([2,3], a2.slice(1));
assertEquals([2], a2.slice(1,2));
a2 = [1.1,2,3];
assertTrue(%HasFastDoubleElements(a2.slice()));
assertTrue(%HasFastDoubleElements(a2.slice(1)));
assertTrue(%HasFastDoubleElements(a2.slice(1, 2)));
assertEquals([1.1,2,3], a2.slice());
assertEquals([2,3], a2.slice(1));
assertEquals([2], a2.slice(1,2));
a2 = [{},2,3];
assertTrue(%HasFastObjectElements(a2.slice()));
assertTrue(%HasFastObjectElements(a2.slice(1)));
assertTrue(%HasFastObjectElements(a2.slice(1, 2)));
assertEquals([{},2,3], a2.slice());
assertEquals([2,3], a2.slice(1));
assertEquals([2], a2.slice(1,2));
// Splice
var a3 = [1,2,3];
var a3r;
a3r = a3.splice(0, 0);
assertTrue(%HasFastSmiElements(a3r));
assertTrue(%HasFastSmiElements(a3));
assertEquals([], a3r);
assertEquals([1, 2, 3], a3);
a3 = [1,2,3];
a3r = a3.splice(0, 1);
assertTrue(%HasFastSmiElements(a3r));
assertTrue(%HasFastSmiElements(a3));
assertEquals([1], a3r);
assertEquals([2, 3], a3);
a3 = [1,2,3];
a3r = a3.splice(0, 0, 2);
assertTrue(%HasFastSmiElements(a3r));
assertTrue(%HasFastSmiElements(a3));
assertEquals([], a3r);
assertEquals([2, 1, 2, 3], a3);
a3 = [1,2,3];
a3r = a3.splice(0, 1, 2);
assertTrue(%HasFastSmiElements(a3r));
assertTrue(%HasFastSmiElements(a3));
assertEquals([1], a3r);
assertEquals([2, 2, 3], a3);
a3 = [1.1,2,3];
a3r = a3.splice(0, 0);
assertTrue(%HasFastDoubleElements(a3r));
assertTrue(%HasFastDoubleElements(a3));
assertEquals([], a3r);
assertEquals([1.1, 2, 3], a3);
a3 = [1.1,2,3];
a3r = a3.splice(0, 1);
assertTrue(%HasFastDoubleElements(a3r));
assertTrue(%HasFastDoubleElements(a3));
assertEquals([1.1], a3r);
assertEquals([2, 3], a3);
a3 = [1.1,2,3];
a3r = a3.splice(0, 0, 2);
// Commented out since handled in js, which takes the best fit.
// assertTrue(%HasFastDoubleElements(a3r));
assertTrue(%HasFastSmiElements(a3r));
assertTrue(%HasFastDoubleElements(a3));
assertEquals([], a3r);
assertEquals([2, 1.1, 2, 3], a3);
a3 = [1.1,2,3];
a3r = a3.splice(0, 1, 2);
assertTrue(%HasFastDoubleElements(a3r));
assertTrue(%HasFastDoubleElements(a3));
assertEquals([1.1], a3r);
assertEquals([2, 2, 3], a3);
a3 = [1.1,2,3];
a3r = a3.splice(0, 0, 2.1);
// Commented out since handled in js, which takes the best fit.
// assertTrue(%HasFastDoubleElements(a3r));
assertTrue(%HasFastSmiElements(a3r));
assertTrue(%HasFastDoubleElements(a3));
assertEquals([], a3r);
assertEquals([2.1, 1.1, 2, 3], a3);
a3 = [1.1,2,3];
a3r = a3.splice(0, 1, 2.2);
assertTrue(%HasFastDoubleElements(a3r));
assertTrue(%HasFastDoubleElements(a3));
assertEquals([1.1], a3r);
assertEquals([2.2, 2, 3], a3);
a3 = [1,2,3];
a3r = a3.splice(0, 0, 2.1);
// Commented out since handled in js, which takes the best fit.
// assertTrue(%HasFastDoubleElements(a3r));
assertTrue(%HasFastSmiElements(a3r));
assertTrue(%HasFastDoubleElements(a3));
assertEquals([], a3r);
assertEquals([2.1, 1, 2, 3], a3);
a3 = [1,2,3];
a3r = a3.splice(0, 1, 2.2);
assertTrue(%HasFastDoubleElements(a3r));
assertTrue(%HasFastDoubleElements(a3));
assertEquals([1], a3r);
assertEquals([2.2, 2, 3], a3);
a3 = [{},2,3];
a3r = a3.splice(0, 0);
assertTrue(%HasFastObjectElements(a3r));
assertTrue(%HasFastObjectElements(a3));
assertEquals([], a3r);
assertEquals([{}, 2, 3], a3);
a3 = [1,2,{}];
a3r = a3.splice(0, 1);
assertTrue(%HasFastObjectElements(a3r));
assertTrue(%HasFastObjectElements(a3));
assertEquals([1], a3r);
assertEquals([2, {}], a3);
a3 = [1,2,3];
a3r = a3.splice(0, 0, {});
assertTrue(%HasFastObjectElements(a3r));
assertTrue(%HasFastObjectElements(a3));
assertEquals([], a3r);
assertEquals([{}, 1, 2, 3], a3);
a3 = [1,2,3];
a3r = a3.splice(0, 1, {});
assertTrue(%HasFastObjectElements(a3r));
assertTrue(%HasFastObjectElements(a3));
assertEquals([1], a3r);
assertEquals([{}, 2, 3], a3);
a3 = [1.1,2,3];
a3r = a3.splice(0, 0, {});
assertTrue(%HasFastObjectElements(a3r));
assertTrue(%HasFastObjectElements(a3));
assertEquals([], a3r);
assertEquals([{}, 1.1, 2, 3], a3);
a3 = [1.1,2,3];
a3r = a3.splice(0, 1, {});
assertTrue(%HasFastObjectElements(a3r));
assertTrue(%HasFastObjectElements(a3));
assertEquals([1.1], a3r);
assertEquals([{}, 2, 3], a3);
// Pop
var a4 = [1,2,3];
assertEquals(3, a4.pop());
assertTrue(%HasFastSmiElements(a4));
a4 = [1.1,2,3];
assertEquals(3, a4.pop());
assertTrue(%HasFastDoubleElements(a4));
a4 = [{},2,3];
assertEquals(3, a4.pop());
assertTrue(%HasFastObjectElements(a4));
// Shift
var a4 = [1,2,3];
assertEquals(1, a4.shift());
assertTrue(%HasFastSmiElements(a4));
a4 = [1.1,2,3];
assertEquals(1.1, a4.shift());
assertTrue(%HasFastDoubleElements(a4));
a4 = [{},2,3];
assertEquals({}, a4.shift());
assertTrue(%HasFastObjectElements(a4));
// Unshift
var a4 = [1,2,3];
a4.unshift(1);
assertTrue(%HasFastSmiElements(a4));
assertEquals([1,1,2,3], a4);
a4 = [1,2,3];
a4.unshift(1.1);
// TODO(verwaest): We'll want to support double unshifting as well.
// assertTrue(%HasFastDoubleElements(a4));
assertTrue(%HasFastObjectElements(a4));
assertEquals([1.1,1,2,3], a4);
a4 = [1.1,2,3];
a4.unshift(1);
// assertTrue(%HasFastDoubleElements(a4));
assertTrue(%HasFastObjectElements(a4));
assertEquals([1,1.1,2,3], a4);
a4 = [{},2,3];
a4.unshift(1);
assertTrue(%HasFastObjectElements(a4));
assertEquals([1,{},2,3], a4);
a4 = [{},2,3];
a4.unshift(1.1);
assertTrue(%HasFastObjectElements(a4));
assertEquals([1.1,{},2,3], a4);
}
if (support_smi_only_arrays) {
for (var i = 0; i < 3; i++) {
array_natives_test();
}
%OptimizeFunctionOnNextCall(array_natives_test);
array_natives_test();
}

View File

@ -290,3 +290,15 @@
func('a', 'b', 'c');
})();
// Check slicing of holey objects with elements in the prototype
(function() {
function f() {
delete arguments[1];
arguments.__proto__[1] = 5;
var result = Array.prototype.slice.call(arguments);
delete arguments.__proto__[1];
assertEquals([1,5,3], result);
}
f(1,2,3);
})();

View File

@ -321,8 +321,7 @@ if (support_smi_only_arrays) {
assertKind(elements_kind.fast_double, b);
var c = a.concat(b);
assertEquals([1, 2, 4.5, 5.5], c);
// TODO(1810): Change implementation so that we get DOUBLE elements here?
assertKind(elements_kind.fast, c);
assertKind(elements_kind.fast_double, c);
}
// Test that Array.push() correctly handles SMI elements.