Remove hand-written assembly ArrayPush stubs

R=mstarzinger@chromium.org, verwaest@chromium.org

Review URL: https://codereview.chromium.org/233293005

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@20839 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
danno@chromium.org 2014-04-17 11:37:59 +00:00
parent 85cf6ce9fa
commit 59b3dc5812
12 changed files with 150 additions and 1047 deletions

View File

@ -3842,211 +3842,6 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
}
void ArrayPushStub::Generate(MacroAssembler* masm) {
Register receiver = r0;
Register scratch = r1;
int argc = arguments_count();
if (argc == 0) {
// Nothing to do, just return the length.
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ Drop(argc + 1);
__ Ret();
return;
}
Isolate* isolate = masm->isolate();
if (argc != 1) {
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
return;
}
Label call_builtin, attempt_to_grow_elements, with_write_barrier;
Register elements = r6;
Register end_elements = r5;
// Get the elements array of the object.
__ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
if (IsFastSmiOrObjectElementsKind(elements_kind())) {
// Check that the elements are in fast mode and writable.
__ CheckMap(elements,
scratch,
Heap::kFixedArrayMapRootIndex,
&call_builtin,
DONT_DO_SMI_CHECK);
}
// Get the array's length into scratch and calculate new length.
__ ldr(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ add(scratch, scratch, Operand(Smi::FromInt(argc)));
// Get the elements' length.
__ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
__ cmp(scratch, r4);
const int kEndElementsOffset =
FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
if (IsFastSmiOrObjectElementsKind(elements_kind())) {
__ b(gt, &attempt_to_grow_elements);
// Check if value is a smi.
__ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
__ JumpIfNotSmi(r4, &with_write_barrier);
// Store the value.
// We may need a register containing the address end_elements below, so
// write back the value in end_elements.
__ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch));
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
} else {
// Check if we could survive without allocation.
__ cmp(scratch, r4);
__ b(gt, &call_builtin);
__ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
__ StoreNumberToDoubleElements(r4, scratch, elements, r5, d0,
&call_builtin, argc * kDoubleSize);
}
// Save new length.
__ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ Drop(argc + 1);
__ mov(r0, scratch);
__ Ret();
if (IsFastDoubleElementsKind(elements_kind())) {
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
return;
}
__ bind(&with_write_barrier);
if (IsFastSmiElementsKind(elements_kind())) {
if (FLAG_trace_elements_transitions) __ jmp(&call_builtin);
__ ldr(r9, FieldMemOperand(r4, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r9, ip);
__ b(eq, &call_builtin);
ElementsKind target_kind = IsHoleyElementsKind(elements_kind())
? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
__ ldr(r3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
__ ldr(r3, ContextOperand(r3, Context::JS_ARRAY_MAPS_INDEX));
const int header_size = FixedArrayBase::kHeaderSize;
// Verify that the object can be transitioned in place.
const int origin_offset = header_size + elements_kind() * kPointerSize;
__ ldr(r2, FieldMemOperand(receiver, origin_offset));
__ ldr(ip, FieldMemOperand(r3, HeapObject::kMapOffset));
__ cmp(r2, ip);
__ b(ne, &call_builtin);
const int target_offset = header_size + target_kind * kPointerSize;
__ ldr(r3, FieldMemOperand(r3, target_offset));
__ mov(r2, receiver);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, DONT_TRACK_ALLOCATION_SITE, NULL);
}
// Save new length.
__ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Store the value.
// We may need a register containing the address end_elements below, so write
// back the value in end_elements.
__ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch));
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
__ RecordWrite(elements,
end_elements,
r4,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ Drop(argc + 1);
__ mov(r0, scratch);
__ Ret();
__ bind(&attempt_to_grow_elements);
// scratch: array's length + 1.
if (!FLAG_inline_new) {
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
return;
}
__ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize));
// Growing elements that are SMI-only requires special handling in case the
// new element is non-Smi. For now, delegate to the builtin.
if (IsFastSmiElementsKind(elements_kind())) {
__ JumpIfNotSmi(r2, &call_builtin);
}
// We could be lucky and the elements array could be at the top of new-space.
// In this case we can just grow it in place by moving the allocation pointer
// up.
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate);
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address(isolate);
const int kAllocationDelta = 4;
ASSERT(kAllocationDelta >= argc);
// Load top and check if it is the end of elements.
__ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch));
__ add(end_elements, end_elements, Operand(kEndElementsOffset));
__ mov(r4, Operand(new_space_allocation_top));
__ ldr(r3, MemOperand(r4));
__ cmp(end_elements, r3);
__ b(ne, &call_builtin);
__ mov(r9, Operand(new_space_allocation_limit));
__ ldr(r9, MemOperand(r9));
__ add(r3, r3, Operand(kAllocationDelta * kPointerSize));
__ cmp(r3, r9);
__ b(hi, &call_builtin);
// We fit and could grow elements.
// Update new_space_allocation_top.
__ str(r3, MemOperand(r4));
// Push the argument.
__ str(r2, MemOperand(end_elements));
// Fill the rest with holes.
__ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) {
__ str(r3, MemOperand(end_elements, i * kPointerSize));
}
// Update elements' and array's sizes.
__ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ add(r4, r4, Operand(Smi::FromInt(kAllocationDelta)));
__ str(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Elements are in new space, so write barrier is not required.
__ Drop(argc + 1);
__ mov(r0, scratch);
__ Ret();
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
}
void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : left

View File

@ -4405,215 +4405,6 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
}
void ArrayPushStub::Generate(MacroAssembler* masm) {
Register receiver = x0;
int argc = arguments_count();
if (argc == 0) {
// Nothing to do, just return the length.
__ Ldr(x0, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ Drop(argc + 1);
__ Ret();
return;
}
Isolate* isolate = masm->isolate();
if (argc != 1) {
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
return;
}
Label call_builtin, attempt_to_grow_elements, with_write_barrier;
Register elements_length = x8;
Register length = x7;
Register elements = x6;
Register end_elements = x5;
Register value = x4;
// Get the elements array of the object.
__ Ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
if (IsFastSmiOrObjectElementsKind(elements_kind())) {
// Check that the elements are in fast mode and writable.
__ CheckMap(elements,
x10,
Heap::kFixedArrayMapRootIndex,
&call_builtin,
DONT_DO_SMI_CHECK);
}
// Get the array's length and calculate new length.
__ Ldr(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTag == 0);
__ Add(length, length, Smi::FromInt(argc));
// Check if we could survive without allocation.
__ Ldr(elements_length,
FieldMemOperand(elements, FixedArray::kLengthOffset));
__ Cmp(length, elements_length);
const int kEndElementsOffset =
FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
if (IsFastSmiOrObjectElementsKind(elements_kind())) {
__ B(gt, &attempt_to_grow_elements);
// Check if value is a smi.
__ Peek(value, (argc - 1) * kPointerSize);
__ JumpIfNotSmi(value, &with_write_barrier);
// Store the value.
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
__ Add(end_elements, elements,
Operand::UntagSmiAndScale(length, kPointerSizeLog2));
__ Str(value, MemOperand(end_elements, kEndElementsOffset, PreIndex));
} else {
__ B(gt, &call_builtin);
__ Peek(value, (argc - 1) * kPointerSize);
__ StoreNumberToDoubleElements(value, length, elements, x10, d0, d1,
&call_builtin, argc * kDoubleSize);
}
// Save new length.
__ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Return length.
__ Drop(argc + 1);
__ Mov(x0, length);
__ Ret();
if (IsFastDoubleElementsKind(elements_kind())) {
__ Bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
return;
}
__ Bind(&with_write_barrier);
if (IsFastSmiElementsKind(elements_kind())) {
if (FLAG_trace_elements_transitions) {
__ B(&call_builtin);
}
__ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
__ JumpIfHeapNumber(x10, &call_builtin);
ElementsKind target_kind = IsHoleyElementsKind(elements_kind())
? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
__ Ldr(x10, GlobalObjectMemOperand());
__ Ldr(x10, FieldMemOperand(x10, GlobalObject::kNativeContextOffset));
__ Ldr(x10, ContextMemOperand(x10, Context::JS_ARRAY_MAPS_INDEX));
const int header_size = FixedArrayBase::kHeaderSize;
// Verify that the object can be transitioned in place.
const int origin_offset = header_size + elements_kind() * kPointerSize;
__ ldr(x11, FieldMemOperand(receiver, origin_offset));
__ ldr(x12, FieldMemOperand(x10, HeapObject::kMapOffset));
__ cmp(x11, x12);
__ B(ne, &call_builtin);
const int target_offset = header_size + target_kind * kPointerSize;
__ Ldr(x10, FieldMemOperand(x10, target_offset));
__ Mov(x11, receiver);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, DONT_TRACK_ALLOCATION_SITE, NULL);
}
// Save new length.
__ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Store the value.
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
__ Add(end_elements, elements,
Operand::UntagSmiAndScale(length, kPointerSizeLog2));
__ Str(value, MemOperand(end_elements, kEndElementsOffset, PreIndex));
__ RecordWrite(elements,
end_elements,
value,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ Drop(argc + 1);
__ Mov(x0, length);
__ Ret();
__ Bind(&attempt_to_grow_elements);
if (!FLAG_inline_new) {
__ B(&call_builtin);
}
Register argument = x2;
__ Peek(argument, (argc - 1) * kPointerSize);
// Growing elements that are SMI-only requires special handling in case
// the new element is non-Smi. For now, delegate to the builtin.
if (IsFastSmiElementsKind(elements_kind())) {
__ JumpIfNotSmi(argument, &call_builtin);
}
// We could be lucky and the elements array could be at the top of new-space.
// In this case we can just grow it in place by moving the allocation pointer
// up.
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate);
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address(isolate);
const int kAllocationDelta = 4;
ASSERT(kAllocationDelta >= argc);
Register allocation_top_addr = x5;
Register allocation_top = x9;
// Load top and check if it is the end of elements.
__ Add(end_elements, elements,
Operand::UntagSmiAndScale(length, kPointerSizeLog2));
__ Add(end_elements, end_elements, kEndElementsOffset);
__ Mov(allocation_top_addr, new_space_allocation_top);
__ Ldr(allocation_top, MemOperand(allocation_top_addr));
__ Cmp(end_elements, allocation_top);
__ B(ne, &call_builtin);
__ Mov(x10, new_space_allocation_limit);
__ Ldr(x10, MemOperand(x10));
__ Add(allocation_top, allocation_top, kAllocationDelta * kPointerSize);
__ Cmp(allocation_top, x10);
__ B(hi, &call_builtin);
// We fit and could grow elements.
// Update new_space_allocation_top.
__ Str(allocation_top, MemOperand(allocation_top_addr));
// Push the argument.
__ Str(argument, MemOperand(end_elements));
// Fill the rest with holes.
__ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
ASSERT(kAllocationDelta == 4);
__ Stp(x10, x10, MemOperand(end_elements, 1 * kPointerSize));
__ Stp(x10, x10, MemOperand(end_elements, 3 * kPointerSize));
// Update elements' and array's sizes.
__ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ Add(elements_length, elements_length, Smi::FromInt(kAllocationDelta));
__ Str(elements_length,
FieldMemOperand(elements, FixedArray::kLengthOffset));
// Elements are in new space, so write barrier is not required.
__ Drop(argc + 1);
__ Mov(x0, length);
__ Ret();
__ Bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
}
void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x1 : left

View File

@ -468,7 +468,13 @@ function ArrayPush() {
return ObservedArrayPush.apply(this, arguments);
for (var i = 0; i < m; i++) {
this[i+n] = %_Arguments(i);
// Use SetProperty rather than a direct keyed store to ensure that the store
// site doesn't become poisened with an elements transition KeyedStoreIC.
//
// TODO(danno): Using %SetProperty is a temporary workaround. The spec says
// that ToObject needs to be called for primitive values (and
// Runtime_SetProperty seem to ignore them).
%SetProperty(this, i+n, %_Arguments(i), 0, kStrictMode);
}
var new_length = n + m;

View File

@ -74,7 +74,6 @@ namespace internal {
V(CEntry) \
V(JSEntry) \
V(KeyedLoadElement) \
V(ArrayPush) \
V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \
V(ArrayNArgumentsConstructor) \
@ -1165,30 +1164,6 @@ class BinaryOpICStub : public HydrogenCodeStub {
};
class ArrayPushStub: public PlatformCodeStub {
public:
ArrayPushStub(ElementsKind kind, int argc) {
bit_field_ = ElementsKindBits::encode(kind) | ArgcBits::encode(argc);
}
void Generate(MacroAssembler* masm);
private:
int arguments_count() { return ArgcBits::decode(bit_field_); }
ElementsKind elements_kind() {
return ElementsKindBits::decode(bit_field_);
}
virtual CodeStub::Major MajorKey() { return ArrayPush; }
virtual int MinorKey() { return bit_field_; }
class ElementsKindBits: public BitField<ElementsKind, 0, 3> {};
class ArgcBits: public BitField<int, 3, Code::kArgumentsBits> {};
int bit_field_;
};
// TODO(bmeurer): Merge this into the BinaryOpICStub once we have proper tail
// call support for stubs in Hydrogen.
class BinaryOpICWithAllocationSiteStub V8_FINAL : public PlatformCodeStub {

View File

@ -7696,6 +7696,8 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
if (receiver_map->instance_type() != JS_ARRAY_TYPE) return false;
ElementsKind elements_kind = receiver_map->elements_kind();
if (!IsFastElementsKind(elements_kind)) return false;
if (receiver_map->is_observed()) return false;
ASSERT(receiver_map->is_extensible());
Drop(expr->arguments()->length());
HValue* result;
@ -7758,6 +7760,8 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
if (receiver_map->instance_type() != JS_ARRAY_TYPE) return false;
ElementsKind elements_kind = receiver_map->elements_kind();
if (!IsFastElementsKind(elements_kind)) return false;
if (receiver_map->is_observed()) return false;
ASSERT(receiver_map->is_extensible());
// If there may be elements accessors in the prototype chain, the fast
// inlined version can't be used.
@ -7770,31 +7774,29 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
Handle<JSObject> prototype(JSObject::cast(receiver_map->prototype()));
BuildCheckPrototypeMaps(prototype, Handle<JSObject>());
HValue* op_vals[] = {
context(),
// Receiver.
environment()->ExpressionStackAt(expr->arguments()->length())
};
const int argc = expr->arguments()->length();
// Includes receiver.
PushArgumentsFromEnvironment(argc + 1);
if (argc != 1) return false;
CallInterfaceDescriptor* descriptor =
isolate()->call_descriptor(Isolate::CallHandler);
HValue* value_to_push = Pop();
HValue* array = Pop();
ArrayPushStub stub(receiver_map->elements_kind(), argc);
Handle<Code> code = stub.GetCode(isolate());
HConstant* code_value = Add<HConstant>(code);
HValue* length = Add<HLoadNamedField>(array, static_cast<HValue*>(NULL),
HObjectAccess::ForArrayLength(elements_kind));
ASSERT((sizeof(op_vals) / kPointerSize) ==
descriptor->environment_length());
{
NoObservableSideEffectsScope scope(this);
HInstruction* call = New<HCallWithDescriptor>(
code_value, argc + 1, descriptor,
Vector<HValue*>(op_vals, descriptor->environment_length()));
bool is_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
BuildUncheckedMonomorphicElementAccess(array, length,
value_to_push, is_array,
elements_kind, STORE,
NEVER_RETURN_HOLE,
STORE_AND_GROW_NO_TRANSITION);
}
HInstruction* new_size = NewUncasted<HAdd>(length, Add<HConstant>(argc));
Drop(1); // Drop function.
ast_context()->ReturnInstruction(call, expr->id());
ast_context()->ReturnInstruction(new_size, expr->id());
return true;
}
default:

View File

@ -3744,198 +3744,6 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
}
void ArrayPushStub::Generate(MacroAssembler* masm) {
int argc = arguments_count();
if (argc == 0) {
// Noop, return the length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
__ ret((argc + 1) * kPointerSize);
return;
}
Isolate* isolate = masm->isolate();
if (argc != 1) {
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
return;
}
Label call_builtin, attempt_to_grow_elements, with_write_barrier;
// Get the elements array of the object.
__ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
if (IsFastSmiOrObjectElementsKind(elements_kind())) {
// Check that the elements are in fast mode and writable.
__ cmp(FieldOperand(edi, HeapObject::kMapOffset),
isolate->factory()->fixed_array_map());
__ j(not_equal, &call_builtin);
}
// Get the array's length into eax and calculate new length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
__ add(eax, Immediate(Smi::FromInt(argc)));
// Get the elements' length into ecx.
__ mov(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
__ cmp(eax, ecx);
if (IsFastSmiOrObjectElementsKind(elements_kind())) {
__ j(greater, &attempt_to_grow_elements);
// Check if value is a smi.
__ mov(ecx, Operand(esp, argc * kPointerSize));
__ JumpIfNotSmi(ecx, &with_write_barrier);
// Store the value.
__ mov(FieldOperand(edi, eax, times_half_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize),
ecx);
} else {
__ j(greater, &call_builtin);
__ mov(ecx, Operand(esp, argc * kPointerSize));
__ StoreNumberToDoubleElements(
ecx, edi, eax, ecx, xmm0, &call_builtin, true, argc * kDoubleSize);
}
// Save new length.
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
__ ret((argc + 1) * kPointerSize);
if (IsFastDoubleElementsKind(elements_kind())) {
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
return;
}
__ bind(&with_write_barrier);
if (IsFastSmiElementsKind(elements_kind())) {
if (FLAG_trace_elements_transitions) __ jmp(&call_builtin);
__ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
isolate->factory()->heap_number_map());
__ j(equal, &call_builtin);
ElementsKind target_kind = IsHoleyElementsKind(elements_kind())
? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
__ mov(ebx, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX));
__ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
__ mov(ebx, ContextOperand(ebx, Context::JS_ARRAY_MAPS_INDEX));
const int header_size = FixedArrayBase::kHeaderSize;
// Verify that the object can be transitioned in place.
const int origin_offset = header_size + elements_kind() * kPointerSize;
__ mov(edi, FieldOperand(ebx, origin_offset));
__ cmp(edi, FieldOperand(edx, HeapObject::kMapOffset));
__ j(not_equal, &call_builtin);
const int target_offset = header_size + target_kind * kPointerSize;
__ mov(ebx, FieldOperand(ebx, target_offset));
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, DONT_TRACK_ALLOCATION_SITE, NULL);
// Restore edi used as a scratch register for the write barrier used while
// setting the map.
__ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
}
// Save new length.
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
// Store the value.
__ lea(edx, FieldOperand(edi, eax, times_half_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
__ mov(Operand(edx, 0), ecx);
__ RecordWrite(edi, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ ret((argc + 1) * kPointerSize);
__ bind(&attempt_to_grow_elements);
if (!FLAG_inline_new) {
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
return;
}
__ mov(ebx, Operand(esp, argc * kPointerSize));
// Growing elements that are SMI-only requires special handling in case the
// new element is non-Smi. For now, delegate to the builtin.
if (IsFastSmiElementsKind(elements_kind())) {
__ JumpIfNotSmi(ebx, &call_builtin);
}
// We could be lucky and the elements array could be at the top of new-space.
// In this case we can just grow it in place by moving the allocation pointer
// up.
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate);
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address(isolate);
const int kAllocationDelta = 4;
ASSERT(kAllocationDelta >= argc);
// Load top.
__ mov(ecx, Operand::StaticVariable(new_space_allocation_top));
// Check if it's the end of elements.
__ lea(edx, FieldOperand(edi, eax, times_half_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
__ cmp(edx, ecx);
__ j(not_equal, &call_builtin);
__ add(ecx, Immediate(kAllocationDelta * kPointerSize));
__ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
__ j(above, &call_builtin);
// We fit and could grow elements.
__ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
// Push the argument...
__ mov(Operand(edx, 0), ebx);
// ... and fill the rest with holes.
for (int i = 1; i < kAllocationDelta; i++) {
__ mov(Operand(edx, i * kPointerSize),
isolate->factory()->the_hole_value());
}
if (IsFastObjectElementsKind(elements_kind())) {
// We know the elements array is in new space so we don't need the
// remembered set, but we just pushed a value onto it so we may have to tell
// the incremental marker to rescan the object that we just grew. We don't
// need to worry about the holes because they are in old space and already
// marked black.
__ RecordWrite(edi, edx, ebx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
}
// Restore receiver to edx as finish sequence assumes it's here.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Increment element's and array's sizes.
__ add(FieldOperand(edi, FixedArray::kLengthOffset),
Immediate(Smi::FromInt(kAllocationDelta)));
// NOTE: This only happen in new-space, where we don't care about the
// black-byte-count on pages. Otherwise we should update that too if the
// object is black.
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
__ ret((argc + 1) * kPointerSize);
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
}
void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- edx : left

View File

@ -87,6 +87,10 @@ const kMaxYear = 1000000;
const kMinMonth = -10000000;
const kMaxMonth = 10000000;
# Strict mode flags for passing to %SetProperty
const kSloppyMode = 0;
const kStrictMode = 1;
# Native cache ids.
const STRING_TO_REGEXP_CACHE_ID = 0;

View File

@ -4008,206 +4008,6 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
}
void ArrayPushStub::Generate(MacroAssembler* masm) {
Register receiver = a0;
Register scratch = a1;
int argc = arguments_count();
if (argc == 0) {
// Nothing to do, just return the length.
__ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ DropAndRet(argc + 1);
return;
}
Isolate* isolate = masm->isolate();
if (argc != 1) {
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
return;
}
Label call_builtin, attempt_to_grow_elements, with_write_barrier;
Register elements = t2;
Register end_elements = t1;
// Get the elements array of the object.
__ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
if (IsFastSmiOrObjectElementsKind(elements_kind())) {
// Check that the elements are in fast mode and writable.
__ CheckMap(elements,
scratch,
Heap::kFixedArrayMapRootIndex,
&call_builtin,
DONT_DO_SMI_CHECK);
}
// Get the array's length into scratch and calculate new length.
__ lw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ Addu(scratch, scratch, Operand(Smi::FromInt(argc)));
// Get the elements' length.
__ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
const int kEndElementsOffset =
FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
if (IsFastSmiOrObjectElementsKind(elements_kind())) {
// Check if we could survive without allocation.
__ Branch(&attempt_to_grow_elements, gt, scratch, Operand(t0));
// Check if value is a smi.
__ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
__ JumpIfNotSmi(t0, &with_write_barrier);
// Store the value.
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
__ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
__ Addu(end_elements, elements, end_elements);
__ Addu(end_elements, end_elements, kEndElementsOffset);
__ sw(t0, MemOperand(end_elements));
} else {
// Check if we could survive without allocation.
__ Branch(&call_builtin, gt, scratch, Operand(t0));
__ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
__ StoreNumberToDoubleElements(t0, scratch, elements, a3, t1, a2,
&call_builtin, argc * kDoubleSize);
}
// Save new length.
__ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ mov(v0, scratch);
__ DropAndRet(argc + 1);
if (IsFastDoubleElementsKind(elements_kind())) {
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
return;
}
__ bind(&with_write_barrier);
if (IsFastSmiElementsKind(elements_kind())) {
if (FLAG_trace_elements_transitions) __ jmp(&call_builtin);
__ lw(t3, FieldMemOperand(t0, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
__ Branch(&call_builtin, eq, t3, Operand(at));
ElementsKind target_kind = IsHoleyElementsKind(elements_kind())
? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
__ lw(a3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
__ lw(a3, ContextOperand(a3, Context::JS_ARRAY_MAPS_INDEX));
const int header_size = FixedArrayBase::kHeaderSize;
// Verify that the object can be transitioned in place.
const int origin_offset = header_size + elements_kind() * kPointerSize;
__ lw(a2, FieldMemOperand(receiver, origin_offset));
__ lw(at, FieldMemOperand(a3, HeapObject::kMapOffset));
__ Branch(&call_builtin, ne, a2, Operand(at));
const int target_offset = header_size + target_kind * kPointerSize;
__ lw(a3, FieldMemOperand(a3, target_offset));
__ mov(a2, receiver);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, DONT_TRACK_ALLOCATION_SITE, NULL);
}
// Save new length.
__ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Store the value.
// We may need a register containing the address end_elements below, so write
// back the value in end_elements.
__ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
__ Addu(end_elements, elements, end_elements);
__ Addu(end_elements, end_elements, kEndElementsOffset);
__ sw(t0, MemOperand(end_elements));
__ RecordWrite(elements,
end_elements,
t0,
kRAHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ mov(v0, scratch);
__ DropAndRet(argc + 1);
__ bind(&attempt_to_grow_elements);
// scratch: array's length + 1.
if (!FLAG_inline_new) {
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
return;
}
__ lw(a2, MemOperand(sp, (argc - 1) * kPointerSize));
// Growing elements that are SMI-only requires special handling in case the
// new element is non-Smi. For now, delegate to the builtin.
if (IsFastSmiElementsKind(elements_kind())) {
__ JumpIfNotSmi(a2, &call_builtin);
}
// We could be lucky and the elements array could be at the top of new-space.
// In this case we can just grow it in place by moving the allocation pointer
// up.
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate);
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address(isolate);
const int kAllocationDelta = 4;
ASSERT(kAllocationDelta >= argc);
// Load top and check if it is the end of elements.
__ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
__ Addu(end_elements, elements, end_elements);
__ Addu(end_elements, end_elements, Operand(kEndElementsOffset));
__ li(t0, Operand(new_space_allocation_top));
__ lw(a3, MemOperand(t0));
__ Branch(&call_builtin, ne, a3, Operand(end_elements));
__ li(t3, Operand(new_space_allocation_limit));
__ lw(t3, MemOperand(t3));
__ Addu(a3, a3, Operand(kAllocationDelta * kPointerSize));
__ Branch(&call_builtin, hi, a3, Operand(t3));
// We fit and could grow elements.
// Update new_space_allocation_top.
__ sw(a3, MemOperand(t0));
// Push the argument.
__ sw(a2, MemOperand(end_elements));
// Fill the rest with holes.
__ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) {
__ sw(a3, MemOperand(end_elements, i * kPointerSize));
}
// Update elements' and array's sizes.
__ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ Addu(t0, t0, Operand(Smi::FromInt(kAllocationDelta)));
__ sw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Elements are in new space, so write barrier is not required.
__ mov(v0, scratch);
__ DropAndRet(argc + 1);
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
}
void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a1 : left

View File

@ -3620,202 +3620,6 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
}
void ArrayPushStub::Generate(MacroAssembler* masm) {
int argc = arguments_count();
StackArgumentsAccessor args(rsp, argc);
if (argc == 0) {
// Noop, return the length.
__ movp(rax, FieldOperand(rdx, JSArray::kLengthOffset));
__ ret((argc + 1) * kPointerSize);
return;
}
Isolate* isolate = masm->isolate();
if (argc != 1) {
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
return;
}
Label call_builtin, attempt_to_grow_elements, with_write_barrier;
// Get the elements array of the object.
__ movp(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
if (IsFastSmiOrObjectElementsKind(elements_kind())) {
// Check that the elements are in fast mode and writable.
__ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
isolate->factory()->fixed_array_map());
__ j(not_equal, &call_builtin);
}
// Get the array's length into rax and calculate new length.
__ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
__ addl(rax, Immediate(argc));
// Get the elements' length into rcx.
__ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
__ cmpl(rax, rcx);
if (IsFastSmiOrObjectElementsKind(elements_kind())) {
__ j(greater, &attempt_to_grow_elements);
// Check if value is a smi.
__ movp(rcx, args.GetArgumentOperand(1));
__ JumpIfNotSmi(rcx, &with_write_barrier);
// Store the value.
__ movp(FieldOperand(rdi,
rax,
times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize),
rcx);
} else {
__ j(greater, &call_builtin);
__ movp(rcx, args.GetArgumentOperand(1));
__ StoreNumberToDoubleElements(
rcx, rdi, rax, xmm0, &call_builtin, argc * kDoubleSize);
}
// Save new length.
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
__ Integer32ToSmi(rax, rax); // Return new length as smi.
__ ret((argc + 1) * kPointerSize);
if (IsFastDoubleElementsKind(elements_kind())) {
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
return;
}
__ bind(&with_write_barrier);
if (IsFastSmiElementsKind(elements_kind())) {
if (FLAG_trace_elements_transitions) __ jmp(&call_builtin);
__ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
isolate->factory()->heap_number_map());
__ j(equal, &call_builtin);
ElementsKind target_kind = IsHoleyElementsKind(elements_kind())
? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
__ movp(rbx, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
__ movp(rbx, FieldOperand(rbx, GlobalObject::kNativeContextOffset));
__ movp(rbx, ContextOperand(rbx, Context::JS_ARRAY_MAPS_INDEX));
const int header_size = FixedArrayBase::kHeaderSize;
// Verify that the object can be transitioned in place.
const int origin_offset = header_size + elements_kind() * kPointerSize;
__ movp(rdi, FieldOperand(rbx, origin_offset));
__ cmpp(rdi, FieldOperand(rdx, HeapObject::kMapOffset));
__ j(not_equal, &call_builtin);
const int target_offset = header_size + target_kind * kPointerSize;
__ movp(rbx, FieldOperand(rbx, target_offset));
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, DONT_TRACK_ALLOCATION_SITE, NULL);
__ movp(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
}
// Save new length.
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
// Store the value.
__ leap(rdx, FieldOperand(rdi,
rax, times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
__ movp(Operand(rdx, 0), rcx);
__ RecordWrite(rdi, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ Integer32ToSmi(rax, rax); // Return new length as smi.
__ ret((argc + 1) * kPointerSize);
__ bind(&attempt_to_grow_elements);
if (!FLAG_inline_new) {
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
return;
}
__ movp(rbx, args.GetArgumentOperand(1));
// Growing elements that are SMI-only requires special handling in case the
// new element is non-Smi. For now, delegate to the builtin.
Label no_fast_elements_check;
__ JumpIfSmi(rbx, &no_fast_elements_check);
__ movp(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
__ CheckFastObjectElements(rcx, &call_builtin, Label::kFar);
__ bind(&no_fast_elements_check);
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate);
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address(isolate);
const int kAllocationDelta = 4;
ASSERT(kAllocationDelta >= argc);
// Load top.
__ Load(rcx, new_space_allocation_top);
// Check if it's the end of elements.
__ leap(rdx, FieldOperand(rdi,
rax, times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
__ cmpp(rdx, rcx);
__ j(not_equal, &call_builtin);
__ addp(rcx, Immediate(kAllocationDelta * kPointerSize));
Operand limit_operand = masm->ExternalOperand(new_space_allocation_limit);
__ cmpp(rcx, limit_operand);
__ j(above, &call_builtin);
// We fit and could grow elements.
__ Store(new_space_allocation_top, rcx);
// Push the argument...
__ movp(Operand(rdx, 0), rbx);
// ... and fill the rest with holes.
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) {
__ movp(Operand(rdx, i * kPointerSize), kScratchRegister);
}
if (IsFastObjectElementsKind(elements_kind())) {
// We know the elements array is in new space so we don't need the
// remembered set, but we just pushed a value onto it so we may have to tell
// the incremental marker to rescan the object that we just grew. We don't
// need to worry about the holes because they are in old space and already
// marked black.
__ RecordWrite(rdi, rdx, rbx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
}
// Restore receiver to rdx as finish sequence assumes it's here.
__ movp(rdx, args.GetReceiverOperand());
// Increment element's and array's sizes.
__ SmiAddConstant(FieldOperand(rdi, FixedArray::kLengthOffset),
Smi::FromInt(kAllocationDelta));
// Make new length a smi before returning it.
__ Integer32ToSmi(rax, rax);
__ movp(FieldOperand(rdx, JSArray::kLengthOffset), rax);
__ ret((argc + 1) * kPointerSize);
__ bind(&call_builtin);
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
}
void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rdx : left

View File

@ -0,0 +1,22 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax
function push_wrapper(array, value) {
array.push(value);
}
// Test that optimization of Array.push() for non-Arrays works correctly.
var object = { x : 8, length: 3 };
object[18] = 5;
object.__proto__ = Array.prototype;
push_wrapper(object, 1);
push_wrapper(object, 1);
assertEquals(5, object.length);
%OptimizeFunctionOnNextCall(push_wrapper);
push_wrapper(object, 1);
push_wrapper(object, 1);
assertEquals(8, object.x);
assertEquals(7, object.length);

View File

@ -0,0 +1,59 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax
var v = 0;
function push_wrapper(array, value) {
array.push(value);
}
function pop_wrapper(array) {
return array.pop();
}
// Test that Object.observe() notification events are properly sent from
// Array.push() and Array.pop() both from optimized and un-optimized code.
var array = [];
function somethingChanged(changes) {
v++;
}
Object.observe(array, somethingChanged);
push_wrapper(array, 1);
%RunMicrotasks();
assertEquals(1, array.length);
assertEquals(1, v);
push_wrapper(array, 1);
%RunMicrotasks();
assertEquals(2, array.length);
assertEquals(2, v);
%OptimizeFunctionOnNextCall(push_wrapper);
push_wrapper(array, 1);
%RunMicrotasks();
assertEquals(3, array.length);
assertEquals(3, v);
push_wrapper(array, 1);
%RunMicrotasks();
assertEquals(4, array.length);
assertEquals(4, v);
pop_wrapper(array);
%RunMicrotasks();
assertEquals(3, array.length);
assertEquals(5, v);
pop_wrapper(array);
%RunMicrotasks();
assertEquals(2, array.length);
assertEquals(6, v);
%OptimizeFunctionOnNextCall(pop_wrapper);
pop_wrapper(array);
%RunMicrotasks();
assertEquals(1, array.length);
assertEquals(7, v);
pop_wrapper(array);
%RunMicrotasks();
assertEquals(0, array.length);
assertEquals(8, v);

View File

@ -0,0 +1,37 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax
function push_wrapper(array, value) {
array.push(value);
}
function pop_wrapper(array) {
return array.pop();
}
// Test the frzon arrays throw an exception if you try to push to them, both in
// optimized and non-optimized code.
var array = [2, 2];
Object.freeze(array);
try { push_wrapper(array, 1); } catch (e) {}
assertEquals(2, array.length);
try { push_wrapper(array, 1); } catch (e) {}
assertEquals(2, array.length);
%OptimizeFunctionOnNextCall(push_wrapper);
try { push_wrapper(array, 1); } catch (e) {}
assertEquals(2, array.length);
try { push_wrapper(array, 1); } catch (e) {}
assertEquals(2, array.length);
try { pop_wrapper(array); } catch (e) {}
assertEquals(2, array.length);
try { pop_wrapper(array); } catch (e) {}
assertEquals(2, array.length);
%OptimizeFunctionOnNextCall(pop_wrapper);
try { pop_wrapper(array); } catch (e) {}
assertEquals(2, array.length);
try { pop_wrapper(array); } catch (e) {}
assertEquals(2, array.length);