2012-01-09 16:37:47 +00:00
|
|
|
// Copyright 2012 the V8 project authors. All rights reserved.
|
2008-07-03 15:10:15 +00:00
|
|
|
// Redistribution and use in source and binary forms, with or without
|
|
|
|
// modification, are permitted provided that the following conditions are
|
|
|
|
// met:
|
|
|
|
//
|
|
|
|
// * Redistributions of source code must retain the above copyright
|
|
|
|
// notice, this list of conditions and the following disclaimer.
|
|
|
|
// * Redistributions in binary form must reproduce the above
|
|
|
|
// copyright notice, this list of conditions and the following
|
|
|
|
// disclaimer in the documentation and/or other materials provided
|
|
|
|
// with the distribution.
|
|
|
|
// * Neither the name of Google Inc. nor the names of its
|
|
|
|
// contributors may be used to endorse or promote products derived
|
|
|
|
// from this software without specific prior written permission.
|
|
|
|
//
|
|
|
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
|
|
|
|
#include "v8.h"
|
|
|
|
|
2013-06-28 15:34:48 +00:00
|
|
|
#if V8_TARGET_ARCH_IA32
|
2010-05-17 15:41:35 +00:00
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
#include "bootstrapper.h"
|
2011-04-07 14:42:37 +00:00
|
|
|
#include "codegen.h"
|
2013-07-03 15:39:18 +00:00
|
|
|
#include "cpu-profiler.h"
|
2008-07-03 15:10:15 +00:00
|
|
|
#include "debug.h"
|
2013-10-31 11:43:23 +00:00
|
|
|
#include "isolate-inl.h"
|
2008-07-03 15:10:15 +00:00
|
|
|
#include "runtime.h"
|
|
|
|
#include "serialize.h"
|
|
|
|
|
2009-05-25 10:05:56 +00:00
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2009-02-27 13:00:32 +00:00
|
|
|
// -------------------------------------------------------------------------
|
|
|
|
// MacroAssembler implementation.
|
|
|
|
|
2011-04-01 14:46:30 +00:00
|
|
|
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
|
|
|
|
: Assembler(arg_isolate, buffer, size),
|
2008-07-30 08:49:36 +00:00
|
|
|
generating_stub_(false),
|
2011-09-15 11:30:45 +00:00
|
|
|
allow_stub_calls_(true),
|
|
|
|
has_frame_(false) {
|
2011-04-01 14:46:30 +00:00
|
|
|
if (isolate() != NULL) {
|
2013-09-04 13:53:24 +00:00
|
|
|
// TODO(titzer): should we just use a null handle here instead?
|
2011-04-01 14:46:30 +00:00
|
|
|
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
|
|
|
|
isolate());
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-11-08 17:35:58 +00:00
|
|
|
void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
|
|
|
|
ASSERT(!r.IsDouble());
|
|
|
|
if (r.IsInteger8()) {
|
|
|
|
movsx_b(dst, src);
|
|
|
|
} else if (r.IsUInteger8()) {
|
|
|
|
movzx_b(dst, src);
|
|
|
|
} else if (r.IsInteger16()) {
|
|
|
|
movsx_w(dst, src);
|
|
|
|
} else if (r.IsUInteger16()) {
|
|
|
|
movzx_w(dst, src);
|
|
|
|
} else {
|
|
|
|
mov(dst, src);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::Store(Register src, const Operand& dst, Representation r) {
|
|
|
|
ASSERT(!r.IsDouble());
|
|
|
|
if (r.IsInteger8() || r.IsUInteger8()) {
|
|
|
|
mov_b(dst, src);
|
|
|
|
} else if (r.IsInteger16() || r.IsUInteger16()) {
|
|
|
|
mov_w(dst, src);
|
|
|
|
} else {
|
|
|
|
mov(dst, src);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-08-09 13:43:46 +00:00
|
|
|
void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
|
|
|
|
if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
|
|
|
|
Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
|
|
|
|
mov(destination, value);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ExternalReference roots_array_start =
|
|
|
|
ExternalReference::roots_array_start(isolate());
|
|
|
|
mov(destination, Immediate(index));
|
|
|
|
mov(destination, Operand::StaticArray(destination,
|
|
|
|
times_pointer_size,
|
|
|
|
roots_array_start));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::StoreRoot(Register source,
|
|
|
|
Register scratch,
|
|
|
|
Heap::RootListIndex index) {
|
|
|
|
ASSERT(Heap::RootCanBeWrittenAfterInitialization(index));
|
|
|
|
ExternalReference roots_array_start =
|
|
|
|
ExternalReference::roots_array_start(isolate());
|
|
|
|
mov(scratch, Immediate(index));
|
|
|
|
mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
|
|
|
|
source);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::CompareRoot(Register with,
|
|
|
|
Register scratch,
|
|
|
|
Heap::RootListIndex index) {
|
|
|
|
ExternalReference roots_array_start =
|
|
|
|
ExternalReference::roots_array_start(isolate());
|
|
|
|
mov(scratch, Immediate(index));
|
|
|
|
cmp(with, Operand::StaticArray(scratch,
|
|
|
|
times_pointer_size,
|
|
|
|
roots_array_start));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
|
|
|
|
ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index));
|
|
|
|
Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
|
|
|
|
cmp(with, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::CompareRoot(const Operand& with,
|
|
|
|
Heap::RootListIndex index) {
|
|
|
|
ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index));
|
|
|
|
Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
|
|
|
|
cmp(with, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
void MacroAssembler::InNewSpace(
|
|
|
|
Register object,
|
|
|
|
Register scratch,
|
|
|
|
Condition cc,
|
|
|
|
Label* condition_met,
|
|
|
|
Label::Distance condition_met_distance) {
|
|
|
|
ASSERT(cc == equal || cc == not_equal);
|
|
|
|
if (scratch.is(object)) {
|
|
|
|
and_(scratch, Immediate(~Page::kPageAlignmentMask));
|
|
|
|
} else {
|
|
|
|
mov(scratch, Immediate(~Page::kPageAlignmentMask));
|
2011-10-03 11:44:39 +00:00
|
|
|
and_(scratch, object);
|
2010-05-04 11:06:59 +00:00
|
|
|
}
|
2011-09-19 18:36:47 +00:00
|
|
|
// Check that we can use a test_b.
|
|
|
|
ASSERT(MemoryChunk::IN_FROM_SPACE < 8);
|
|
|
|
ASSERT(MemoryChunk::IN_TO_SPACE < 8);
|
|
|
|
int mask = (1 << MemoryChunk::IN_FROM_SPACE)
|
|
|
|
| (1 << MemoryChunk::IN_TO_SPACE);
|
|
|
|
// If non-zero, the page belongs to new-space.
|
|
|
|
test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
|
|
|
|
static_cast<uint8_t>(mask));
|
|
|
|
j(cc, condition_met, condition_met_distance);
|
|
|
|
}
|
2010-05-04 11:06:59 +00:00
|
|
|
|
2010-05-27 12:30:45 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
void MacroAssembler::RememberedSetHelper(
|
2011-09-20 13:32:27 +00:00
|
|
|
Register object, // Only used for debug checks.
|
2011-09-19 18:36:47 +00:00
|
|
|
Register addr,
|
|
|
|
Register scratch,
|
|
|
|
SaveFPRegsMode save_fp,
|
|
|
|
MacroAssembler::RememberedSetFinalAction and_then) {
|
|
|
|
Label done;
|
2012-07-31 14:59:32 +00:00
|
|
|
if (emit_debug_code()) {
|
2011-09-20 13:32:27 +00:00
|
|
|
Label ok;
|
|
|
|
JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
|
|
|
|
int3();
|
|
|
|
bind(&ok);
|
|
|
|
}
|
2011-09-19 18:36:47 +00:00
|
|
|
// Load store buffer top.
|
|
|
|
ExternalReference store_buffer =
|
|
|
|
ExternalReference::store_buffer_top(isolate());
|
|
|
|
mov(scratch, Operand::StaticVariable(store_buffer));
|
|
|
|
// Store pointer to buffer.
|
|
|
|
mov(Operand(scratch, 0), addr);
|
|
|
|
// Increment buffer top.
|
2011-10-03 11:44:39 +00:00
|
|
|
add(scratch, Immediate(kPointerSize));
|
2011-09-19 18:36:47 +00:00
|
|
|
// Write back new top of buffer.
|
|
|
|
mov(Operand::StaticVariable(store_buffer), scratch);
|
|
|
|
// Call stub on end of buffer.
|
|
|
|
// Check for end of buffer.
|
|
|
|
test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
|
|
|
|
if (and_then == kReturnAtEnd) {
|
|
|
|
Label buffer_overflowed;
|
|
|
|
j(not_equal, &buffer_overflowed, Label::kNear);
|
|
|
|
ret(0);
|
|
|
|
bind(&buffer_overflowed);
|
|
|
|
} else {
|
|
|
|
ASSERT(and_then == kFallThroughAtEnd);
|
|
|
|
j(equal, &done, Label::kNear);
|
|
|
|
}
|
|
|
|
StoreBufferOverflowStub store_buffer_overflow =
|
|
|
|
StoreBufferOverflowStub(save_fp);
|
|
|
|
CallStub(&store_buffer_overflow);
|
|
|
|
if (and_then == kReturnAtEnd) {
|
|
|
|
ret(0);
|
|
|
|
} else {
|
|
|
|
ASSERT(and_then == kFallThroughAtEnd);
|
|
|
|
bind(&done);
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-05-16 14:10:56 +00:00
|
|
|
void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
|
|
|
|
XMMRegister scratch_reg,
|
|
|
|
Register result_reg) {
|
|
|
|
Label done;
|
2012-10-19 13:20:22 +00:00
|
|
|
Label conv_failure;
|
|
|
|
pxor(scratch_reg, scratch_reg);
|
2012-08-22 14:27:11 +00:00
|
|
|
cvtsd2si(result_reg, input_reg);
|
2011-05-16 14:10:56 +00:00
|
|
|
test(result_reg, Immediate(0xFFFFFF00));
|
|
|
|
j(zero, &done, Label::kNear);
|
2012-10-19 13:20:22 +00:00
|
|
|
cmp(result_reg, Immediate(0x80000000));
|
|
|
|
j(equal, &conv_failure, Label::kNear);
|
|
|
|
mov(result_reg, Immediate(0));
|
|
|
|
setcc(above, result_reg);
|
|
|
|
sub(result_reg, Immediate(1));
|
|
|
|
and_(result_reg, Immediate(255));
|
|
|
|
jmp(&done, Label::kNear);
|
|
|
|
bind(&conv_failure);
|
|
|
|
Set(result_reg, Immediate(0));
|
|
|
|
ucomisd(input_reg, scratch_reg);
|
|
|
|
j(below, &done, Label::kNear);
|
2011-05-16 14:10:56 +00:00
|
|
|
Set(result_reg, Immediate(255));
|
|
|
|
bind(&done);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::ClampUint8(Register reg) {
|
|
|
|
Label done;
|
|
|
|
test(reg, Immediate(0xFFFFFF00));
|
|
|
|
j(zero, &done, Label::kNear);
|
|
|
|
setcc(negative, reg); // 1 if negative, 0 if positive.
|
|
|
|
dec_b(reg); // 0 if negative, 255 if positive.
|
|
|
|
bind(&done);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-09-02 09:30:54 +00:00
|
|
|
void MacroAssembler::SlowTruncateToI(Register result_reg,
|
|
|
|
Register input_reg,
|
|
|
|
int offset) {
|
|
|
|
DoubleToIStub stub(input_reg, result_reg, offset, true);
|
|
|
|
call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::TruncateDoubleToI(Register result_reg,
|
|
|
|
XMMRegister input_reg) {
|
|
|
|
Label done;
|
|
|
|
cvttsd2si(result_reg, Operand(input_reg));
|
|
|
|
cmp(result_reg, 0x80000000u);
|
|
|
|
j(not_equal, &done, Label::kNear);
|
|
|
|
|
|
|
|
sub(esp, Immediate(kDoubleSize));
|
2013-10-18 10:54:45 +00:00
|
|
|
movsd(MemOperand(esp, 0), input_reg);
|
2013-09-02 09:30:54 +00:00
|
|
|
SlowTruncateToI(result_reg, esp, 0);
|
|
|
|
add(esp, Immediate(kDoubleSize));
|
|
|
|
bind(&done);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::TruncateX87TOSToI(Register result_reg) {
|
|
|
|
sub(esp, Immediate(kDoubleSize));
|
|
|
|
fst_d(MemOperand(esp, 0));
|
|
|
|
SlowTruncateToI(result_reg, esp, 0);
|
|
|
|
add(esp, Immediate(kDoubleSize));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::X87TOSToI(Register result_reg,
|
|
|
|
MinusZeroMode minus_zero_mode,
|
|
|
|
Label* conversion_failed,
|
|
|
|
Label::Distance dst) {
|
|
|
|
Label done;
|
|
|
|
sub(esp, Immediate(kPointerSize));
|
2013-10-01 18:00:02 +00:00
|
|
|
fld(0);
|
2013-10-04 08:17:11 +00:00
|
|
|
fist_s(MemOperand(esp, 0));
|
2013-09-02 09:30:54 +00:00
|
|
|
fild_s(MemOperand(esp, 0));
|
|
|
|
pop(result_reg);
|
|
|
|
FCmp();
|
|
|
|
j(not_equal, conversion_failed, dst);
|
|
|
|
j(parity_even, conversion_failed, dst);
|
|
|
|
if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
|
|
|
|
test(result_reg, Operand(result_reg));
|
|
|
|
j(not_zero, &done, Label::kNear);
|
|
|
|
// To check for minus zero, we load the value again as float, and check
|
|
|
|
// if that is still 0.
|
|
|
|
sub(esp, Immediate(kPointerSize));
|
|
|
|
fst_s(MemOperand(esp, 0));
|
|
|
|
pop(result_reg);
|
|
|
|
test(result_reg, Operand(result_reg));
|
|
|
|
j(not_zero, conversion_failed, dst);
|
|
|
|
}
|
|
|
|
bind(&done);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::DoubleToI(Register result_reg,
|
|
|
|
XMMRegister input_reg,
|
|
|
|
XMMRegister scratch,
|
|
|
|
MinusZeroMode minus_zero_mode,
|
|
|
|
Label* conversion_failed,
|
|
|
|
Label::Distance dst) {
|
|
|
|
ASSERT(!input_reg.is(scratch));
|
|
|
|
cvttsd2si(result_reg, Operand(input_reg));
|
2013-09-13 07:59:48 +00:00
|
|
|
Cvtsi2sd(scratch, Operand(result_reg));
|
2013-09-02 09:30:54 +00:00
|
|
|
ucomisd(scratch, input_reg);
|
|
|
|
j(not_equal, conversion_failed, dst);
|
|
|
|
j(parity_even, conversion_failed, dst); // NaN.
|
|
|
|
if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
|
2013-09-10 12:37:30 +00:00
|
|
|
Label done;
|
|
|
|
// The integer converted back is equal to the original. We
|
|
|
|
// only have to test if we got -0 as an input.
|
2013-09-02 09:30:54 +00:00
|
|
|
test(result_reg, Operand(result_reg));
|
|
|
|
j(not_zero, &done, Label::kNear);
|
|
|
|
movmskpd(result_reg, input_reg);
|
2013-09-10 12:37:30 +00:00
|
|
|
// Bit 0 contains the sign of the double in input_reg.
|
|
|
|
// If input was positive, we are ok and return 0, otherwise
|
|
|
|
// jump to conversion_failed.
|
2013-09-02 09:30:54 +00:00
|
|
|
and_(result_reg, 1);
|
|
|
|
j(not_zero, conversion_failed, dst);
|
2013-09-10 12:37:30 +00:00
|
|
|
bind(&done);
|
2013-09-02 09:30:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
|
|
|
|
Register input_reg) {
|
|
|
|
Label done, slow_case;
|
|
|
|
|
|
|
|
if (CpuFeatures::IsSupported(SSE3)) {
|
|
|
|
CpuFeatureScope scope(this, SSE3);
|
|
|
|
Label convert;
|
|
|
|
// Use more powerful conversion when sse3 is available.
|
|
|
|
// Load x87 register with heap number.
|
|
|
|
fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
|
|
|
|
// Get exponent alone and check for too-big exponent.
|
|
|
|
mov(result_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
|
|
|
|
and_(result_reg, HeapNumber::kExponentMask);
|
|
|
|
const uint32_t kTooBigExponent =
|
|
|
|
(HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
|
|
|
|
cmp(Operand(result_reg), Immediate(kTooBigExponent));
|
|
|
|
j(greater_equal, &slow_case, Label::kNear);
|
|
|
|
|
|
|
|
// Reserve space for 64 bit answer.
|
|
|
|
sub(Operand(esp), Immediate(kDoubleSize));
|
|
|
|
// Do conversion, which cannot fail because we checked the exponent.
|
|
|
|
fisttp_d(Operand(esp, 0));
|
|
|
|
mov(result_reg, Operand(esp, 0)); // Low word of answer is the result.
|
|
|
|
add(Operand(esp), Immediate(kDoubleSize));
|
|
|
|
jmp(&done, Label::kNear);
|
|
|
|
|
|
|
|
// Slow case.
|
|
|
|
bind(&slow_case);
|
|
|
|
if (input_reg.is(result_reg)) {
|
|
|
|
// Input is clobbered. Restore number from fpu stack
|
|
|
|
sub(Operand(esp), Immediate(kDoubleSize));
|
|
|
|
fstp_d(Operand(esp, 0));
|
|
|
|
SlowTruncateToI(result_reg, esp, 0);
|
|
|
|
add(esp, Immediate(kDoubleSize));
|
|
|
|
} else {
|
|
|
|
fstp(0);
|
|
|
|
SlowTruncateToI(result_reg, input_reg);
|
|
|
|
}
|
|
|
|
} else if (CpuFeatures::IsSupported(SSE2)) {
|
|
|
|
CpuFeatureScope scope(this, SSE2);
|
2013-10-18 10:54:45 +00:00
|
|
|
movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
|
2013-09-02 09:30:54 +00:00
|
|
|
cvttsd2si(result_reg, Operand(xmm0));
|
|
|
|
cmp(result_reg, 0x80000000u);
|
|
|
|
j(not_equal, &done, Label::kNear);
|
|
|
|
// Check if the input was 0x8000000 (kMinInt).
|
|
|
|
// If no, then we got an overflow and we deoptimize.
|
|
|
|
ExternalReference min_int = ExternalReference::address_of_min_int();
|
|
|
|
ucomisd(xmm0, Operand::StaticVariable(min_int));
|
|
|
|
j(not_equal, &slow_case, Label::kNear);
|
|
|
|
j(parity_even, &slow_case, Label::kNear); // NaN.
|
|
|
|
jmp(&done, Label::kNear);
|
|
|
|
|
|
|
|
// Slow case.
|
|
|
|
bind(&slow_case);
|
|
|
|
if (input_reg.is(result_reg)) {
|
|
|
|
// Input is clobbered. Restore number from double scratch.
|
|
|
|
sub(esp, Immediate(kDoubleSize));
|
2013-10-18 10:54:45 +00:00
|
|
|
movsd(MemOperand(esp, 0), xmm0);
|
2013-09-02 09:30:54 +00:00
|
|
|
SlowTruncateToI(result_reg, esp, 0);
|
|
|
|
add(esp, Immediate(kDoubleSize));
|
|
|
|
} else {
|
|
|
|
SlowTruncateToI(result_reg, input_reg);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
SlowTruncateToI(result_reg, input_reg);
|
|
|
|
}
|
|
|
|
bind(&done);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::TaggedToI(Register result_reg,
|
|
|
|
Register input_reg,
|
|
|
|
XMMRegister temp,
|
|
|
|
MinusZeroMode minus_zero_mode,
|
|
|
|
Label* lost_precision) {
|
|
|
|
Label done;
|
|
|
|
ASSERT(!temp.is(xmm0));
|
|
|
|
|
|
|
|
cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
|
|
|
|
isolate()->factory()->heap_number_map());
|
|
|
|
j(not_equal, lost_precision, Label::kNear);
|
|
|
|
|
|
|
|
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
|
|
|
|
ASSERT(!temp.is(no_xmm_reg));
|
|
|
|
CpuFeatureScope scope(this, SSE2);
|
|
|
|
|
2013-10-18 10:54:45 +00:00
|
|
|
movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
|
2013-09-02 09:30:54 +00:00
|
|
|
cvttsd2si(result_reg, Operand(xmm0));
|
2013-09-13 07:59:48 +00:00
|
|
|
Cvtsi2sd(temp, Operand(result_reg));
|
2013-09-02 09:30:54 +00:00
|
|
|
ucomisd(xmm0, temp);
|
|
|
|
RecordComment("Deferred TaggedToI: lost precision");
|
|
|
|
j(not_equal, lost_precision, Label::kNear);
|
|
|
|
RecordComment("Deferred TaggedToI: NaN");
|
|
|
|
j(parity_even, lost_precision, Label::kNear);
|
|
|
|
if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
|
|
|
|
test(result_reg, Operand(result_reg));
|
|
|
|
j(not_zero, &done, Label::kNear);
|
|
|
|
movmskpd(result_reg, xmm0);
|
|
|
|
and_(result_reg, 1);
|
|
|
|
RecordComment("Deferred TaggedToI: minus zero");
|
|
|
|
j(not_zero, lost_precision, Label::kNear);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// TODO(olivf) Converting a number on the fpu is actually quite slow. We
|
|
|
|
// should first try a fast conversion and then bailout to this slow case.
|
|
|
|
Label lost_precision_pop, zero_check;
|
|
|
|
Label* lost_precision_int = (minus_zero_mode == FAIL_ON_MINUS_ZERO)
|
|
|
|
? &lost_precision_pop : lost_precision;
|
|
|
|
sub(esp, Immediate(kPointerSize));
|
|
|
|
fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
|
|
|
|
if (minus_zero_mode == FAIL_ON_MINUS_ZERO) fld(0);
|
|
|
|
fist_s(MemOperand(esp, 0));
|
|
|
|
fild_s(MemOperand(esp, 0));
|
|
|
|
FCmp();
|
|
|
|
pop(result_reg);
|
|
|
|
j(not_equal, lost_precision_int, Label::kNear);
|
|
|
|
j(parity_even, lost_precision_int, Label::kNear); // NaN.
|
|
|
|
if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
|
|
|
|
test(result_reg, Operand(result_reg));
|
|
|
|
j(zero, &zero_check, Label::kNear);
|
|
|
|
fstp(0);
|
|
|
|
jmp(&done, Label::kNear);
|
|
|
|
bind(&zero_check);
|
|
|
|
// To check for minus zero, we load the value again as float, and check
|
|
|
|
// if that is still 0.
|
|
|
|
sub(esp, Immediate(kPointerSize));
|
|
|
|
fstp_s(Operand(esp, 0));
|
|
|
|
pop(result_reg);
|
|
|
|
test(result_reg, Operand(result_reg));
|
|
|
|
j(zero, &done, Label::kNear);
|
|
|
|
jmp(lost_precision, Label::kNear);
|
|
|
|
|
|
|
|
bind(&lost_precision_pop);
|
|
|
|
fstp(0);
|
|
|
|
jmp(lost_precision, Label::kNear);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
bind(&done);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-08-22 15:44:17 +00:00
|
|
|
void MacroAssembler::LoadUint32(XMMRegister dst,
|
|
|
|
Register src,
|
|
|
|
XMMRegister scratch) {
|
|
|
|
Label done;
|
|
|
|
cmp(src, Immediate(0));
|
2013-10-15 16:12:25 +00:00
|
|
|
ExternalReference uint32_bias =
|
|
|
|
ExternalReference::address_of_uint32_bias();
|
2013-10-18 10:54:45 +00:00
|
|
|
movsd(scratch, Operand::StaticVariable(uint32_bias));
|
2013-09-13 07:59:48 +00:00
|
|
|
Cvtsi2sd(dst, src);
|
2012-08-22 15:44:17 +00:00
|
|
|
j(not_sign, &done, Label::kNear);
|
|
|
|
addsd(dst, scratch);
|
|
|
|
bind(&done);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-10-04 08:17:11 +00:00
|
|
|
void MacroAssembler::LoadUint32NoSSE2(Register src) {
|
|
|
|
Label done;
|
|
|
|
push(src);
|
|
|
|
fild_s(Operand(esp, 0));
|
|
|
|
cmp(src, Immediate(0));
|
|
|
|
j(not_sign, &done, Label::kNear);
|
2013-10-15 16:12:25 +00:00
|
|
|
ExternalReference uint32_bias =
|
|
|
|
ExternalReference::address_of_uint32_bias();
|
|
|
|
fld_d(Operand::StaticVariable(uint32_bias));
|
2013-10-04 08:17:11 +00:00
|
|
|
faddp(1);
|
|
|
|
bind(&done);
|
|
|
|
add(esp, Immediate(kPointerSize));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
void MacroAssembler::RecordWriteArray(Register object,
|
|
|
|
Register value,
|
|
|
|
Register index,
|
|
|
|
SaveFPRegsMode save_fp,
|
|
|
|
RememberedSetAction remembered_set_action,
|
|
|
|
SmiCheck smi_check) {
|
|
|
|
// First, check if a write barrier is even needed. The tests below
|
|
|
|
// catch stores of Smis.
|
|
|
|
Label done;
|
|
|
|
|
|
|
|
// Skip barrier if writing a smi.
|
|
|
|
if (smi_check == INLINE_SMI_CHECK) {
|
|
|
|
ASSERT_EQ(0, kSmiTag);
|
|
|
|
test(value, Immediate(kSmiTagMask));
|
|
|
|
j(zero, &done);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Array access: calculate the destination address in the same manner as
|
|
|
|
// KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
|
|
|
|
// into an array of words.
|
|
|
|
Register dst = index;
|
|
|
|
lea(dst, Operand(object, index, times_half_pointer_size,
|
|
|
|
FixedArray::kHeaderSize - kHeapObjectTag));
|
|
|
|
|
|
|
|
RecordWrite(
|
|
|
|
object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
|
|
|
|
|
|
|
|
bind(&done);
|
|
|
|
|
|
|
|
// Clobber clobbered input registers when running with the debug-code flag
|
|
|
|
// turned on to provoke errors.
|
|
|
|
if (emit_debug_code()) {
|
|
|
|
mov(value, Immediate(BitCast<int32_t>(kZapValue)));
|
|
|
|
mov(index, Immediate(BitCast<int32_t>(kZapValue)));
|
2011-05-11 09:12:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
void MacroAssembler::RecordWriteField(
|
|
|
|
Register object,
|
|
|
|
int offset,
|
|
|
|
Register value,
|
|
|
|
Register dst,
|
|
|
|
SaveFPRegsMode save_fp,
|
|
|
|
RememberedSetAction remembered_set_action,
|
|
|
|
SmiCheck smi_check) {
|
2010-05-27 12:30:45 +00:00
|
|
|
// First, check if a write barrier is even needed. The tests below
|
2011-09-19 18:36:47 +00:00
|
|
|
// catch stores of Smis.
|
2011-05-11 09:12:16 +00:00
|
|
|
Label done;
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2009-08-03 13:17:34 +00:00
|
|
|
// Skip barrier if writing a smi.
|
2011-09-19 18:36:47 +00:00
|
|
|
if (smi_check == INLINE_SMI_CHECK) {
|
|
|
|
JumpIfSmi(value, &done, Label::kNear);
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
// Although the object register is tagged, the offset is relative to the start
|
|
|
|
// of the object, so so offset must be a multiple of kPointerSize.
|
|
|
|
ASSERT(IsAligned(offset, kPointerSize));
|
2010-04-15 14:43:32 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
lea(dst, FieldOperand(object, offset));
|
|
|
|
if (emit_debug_code()) {
|
|
|
|
Label ok;
|
2011-10-03 11:44:39 +00:00
|
|
|
test_b(dst, (1 << kPointerSizeLog2) - 1);
|
2011-09-19 18:36:47 +00:00
|
|
|
j(zero, &ok, Label::kNear);
|
|
|
|
int3();
|
|
|
|
bind(&ok);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
2011-09-19 18:36:47 +00:00
|
|
|
|
|
|
|
RecordWrite(
|
|
|
|
object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
bind(&done);
|
2010-02-02 07:58:09 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
// Clobber clobbered input registers when running with the debug-code flag
|
2010-02-02 07:58:09 +00:00
|
|
|
// turned on to provoke errors.
|
2011-03-15 14:49:10 +00:00
|
|
|
if (emit_debug_code()) {
|
2010-03-12 10:20:01 +00:00
|
|
|
mov(value, Immediate(BitCast<int32_t>(kZapValue)));
|
2011-09-19 18:36:47 +00:00
|
|
|
mov(dst, Immediate(BitCast<int32_t>(kZapValue)));
|
2010-02-02 07:58:09 +00:00
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-06-06 11:05:28 +00:00
|
|
|
void MacroAssembler::RecordWriteForMap(
|
|
|
|
Register object,
|
|
|
|
Handle<Map> map,
|
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
|
|
|
SaveFPRegsMode save_fp) {
|
|
|
|
Label done;
|
|
|
|
|
|
|
|
Register address = scratch1;
|
|
|
|
Register value = scratch2;
|
|
|
|
if (emit_debug_code()) {
|
|
|
|
Label ok;
|
|
|
|
lea(address, FieldOperand(object, HeapObject::kMapOffset));
|
|
|
|
test_b(address, (1 << kPointerSizeLog2) - 1);
|
|
|
|
j(zero, &ok, Label::kNear);
|
|
|
|
int3();
|
|
|
|
bind(&ok);
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT(!object.is(value));
|
|
|
|
ASSERT(!object.is(address));
|
|
|
|
ASSERT(!value.is(address));
|
2012-10-12 11:09:14 +00:00
|
|
|
AssertNotSmi(object);
|
2012-06-06 11:05:28 +00:00
|
|
|
|
|
|
|
if (!FLAG_incremental_marking) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// A single check of the map's pages interesting flag suffices, since it is
|
|
|
|
// only set during incremental collection, and then it's also guaranteed that
|
|
|
|
// the from object's page's interesting flag is also set. This optimization
|
|
|
|
// relies on the fact that maps can never be in new space.
|
|
|
|
ASSERT(!isolate()->heap()->InNewSpace(*map));
|
|
|
|
CheckPageFlagForMap(map,
|
|
|
|
MemoryChunk::kPointersToHereAreInterestingMask,
|
|
|
|
zero,
|
|
|
|
&done,
|
|
|
|
Label::kNear);
|
|
|
|
|
|
|
|
// Delay the initialization of |address| and |value| for the stub until it's
|
2012-06-12 12:16:19 +00:00
|
|
|
// known that the will be needed. Up until this point their values are not
|
2012-06-06 11:05:28 +00:00
|
|
|
// needed since they are embedded in the operands of instructions that need
|
|
|
|
// them.
|
|
|
|
lea(address, FieldOperand(object, HeapObject::kMapOffset));
|
|
|
|
mov(value, Immediate(map));
|
|
|
|
RecordWriteStub stub(object, value, address, OMIT_REMEMBERED_SET, save_fp);
|
|
|
|
CallStub(&stub);
|
|
|
|
|
|
|
|
bind(&done);
|
|
|
|
|
|
|
|
// Clobber clobbered input registers when running with the debug-code flag
|
|
|
|
// turned on to provoke errors.
|
|
|
|
if (emit_debug_code()) {
|
|
|
|
mov(value, Immediate(BitCast<int32_t>(kZapValue)));
|
|
|
|
mov(scratch1, Immediate(BitCast<int32_t>(kZapValue)));
|
|
|
|
mov(scratch2, Immediate(BitCast<int32_t>(kZapValue)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-06-30 12:27:49 +00:00
|
|
|
void MacroAssembler::RecordWrite(Register object,
|
|
|
|
Register address,
|
2011-09-19 18:36:47 +00:00
|
|
|
Register value,
|
|
|
|
SaveFPRegsMode fp_mode,
|
|
|
|
RememberedSetAction remembered_set_action,
|
|
|
|
SmiCheck smi_check) {
|
|
|
|
ASSERT(!object.is(value));
|
|
|
|
ASSERT(!object.is(address));
|
|
|
|
ASSERT(!value.is(address));
|
2012-10-12 11:09:14 +00:00
|
|
|
AssertNotSmi(object);
|
2011-09-19 18:36:47 +00:00
|
|
|
|
|
|
|
if (remembered_set_action == OMIT_REMEMBERED_SET &&
|
|
|
|
!FLAG_incremental_marking) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-07-31 14:59:32 +00:00
|
|
|
if (emit_debug_code()) {
|
2011-09-19 18:36:47 +00:00
|
|
|
Label ok;
|
|
|
|
cmp(value, Operand(address, 0));
|
|
|
|
j(equal, &ok, Label::kNear);
|
|
|
|
int3();
|
|
|
|
bind(&ok);
|
|
|
|
}
|
|
|
|
|
2010-06-30 12:27:49 +00:00
|
|
|
// First, check if a write barrier is even needed. The tests below
|
|
|
|
// catch stores of Smis and stores into young gen.
|
|
|
|
Label done;
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
if (smi_check == INLINE_SMI_CHECK) {
|
|
|
|
// Skip barrier if writing a smi.
|
|
|
|
JumpIfSmi(value, &done, Label::kNear);
|
|
|
|
}
|
|
|
|
|
|
|
|
CheckPageFlag(value,
|
|
|
|
value, // Used as scratch.
|
|
|
|
MemoryChunk::kPointersToHereAreInterestingMask,
|
|
|
|
zero,
|
|
|
|
&done,
|
|
|
|
Label::kNear);
|
|
|
|
CheckPageFlag(object,
|
|
|
|
value, // Used as scratch.
|
|
|
|
MemoryChunk::kPointersFromHereAreInterestingMask,
|
|
|
|
zero,
|
|
|
|
&done,
|
|
|
|
Label::kNear);
|
|
|
|
|
|
|
|
RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
|
|
|
|
CallStub(&stub);
|
2010-06-30 12:27:49 +00:00
|
|
|
|
|
|
|
bind(&done);
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
// Clobber clobbered registers when running with the debug-code flag
|
2010-06-30 12:27:49 +00:00
|
|
|
// turned on to provoke errors.
|
2011-03-15 14:49:10 +00:00
|
|
|
if (emit_debug_code()) {
|
2010-06-30 12:27:49 +00:00
|
|
|
mov(address, Immediate(BitCast<int32_t>(kZapValue)));
|
|
|
|
mov(value, Immediate(BitCast<int32_t>(kZapValue)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-04-20 16:36:13 +00:00
|
|
|
#ifdef ENABLE_DEBUGGER_SUPPORT
|
2010-02-08 13:44:49 +00:00
|
|
|
void MacroAssembler::DebugBreak() {
|
|
|
|
Set(eax, Immediate(0));
|
2011-03-22 13:20:04 +00:00
|
|
|
mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak, isolate())));
|
2010-02-08 13:44:49 +00:00
|
|
|
CEntryStub ces(1);
|
2013-02-27 12:33:24 +00:00
|
|
|
call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
|
2010-02-08 13:44:49 +00:00
|
|
|
}
|
2009-04-20 16:36:13 +00:00
|
|
|
#endif
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-08-27 07:08:03 +00:00
|
|
|
|
2013-09-13 07:59:48 +00:00
|
|
|
void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
|
|
|
|
xorps(dst, dst);
|
|
|
|
cvtsi2sd(dst, src);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
void MacroAssembler::Set(Register dst, const Immediate& x) {
|
|
|
|
if (x.is_zero()) {
|
2011-10-03 11:44:39 +00:00
|
|
|
xor_(dst, dst); // Shorter than mov.
|
2008-07-03 15:10:15 +00:00
|
|
|
} else {
|
2008-11-11 06:10:07 +00:00
|
|
|
mov(dst, x);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::Set(const Operand& dst, const Immediate& x) {
|
|
|
|
mov(dst, x);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-05-11 14:16:24 +00:00
|
|
|
bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
|
|
|
|
static const int kMaxImmediateBits = 17;
|
2013-01-03 14:20:08 +00:00
|
|
|
if (!RelocInfo::IsNone(x.rmode_)) return false;
|
2011-05-11 14:16:24 +00:00
|
|
|
return !is_intn(x.x_, kMaxImmediateBits);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::SafeSet(Register dst, const Immediate& x) {
|
|
|
|
if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
|
|
|
|
Set(dst, Immediate(x.x_ ^ jit_cookie()));
|
|
|
|
xor_(dst, jit_cookie());
|
|
|
|
} else {
|
|
|
|
Set(dst, x);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::SafePush(const Immediate& x) {
|
|
|
|
if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
|
|
|
|
push(Immediate(x.x_ ^ jit_cookie()));
|
|
|
|
xor_(Operand(esp, 0), Immediate(jit_cookie()));
|
|
|
|
} else {
|
|
|
|
push(x);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-03-09 14:00:51 +00:00
|
|
|
void MacroAssembler::CmpObjectType(Register heap_object,
|
|
|
|
InstanceType type,
|
|
|
|
Register map) {
|
|
|
|
mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
|
|
|
|
CmpInstanceType(map, type);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
|
|
|
|
cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
|
|
|
|
static_cast<int8_t>(type));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-03 07:41:37 +00:00
|
|
|
void MacroAssembler::CheckFastElements(Register map,
|
|
|
|
Label* fail,
|
|
|
|
Label::Distance distance) {
|
2012-05-23 14:24:29 +00:00
|
|
|
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
|
|
|
|
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
|
|
|
|
STATIC_ASSERT(FAST_ELEMENTS == 2);
|
|
|
|
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
|
2011-06-03 07:41:37 +00:00
|
|
|
cmpb(FieldOperand(map, Map::kBitField2Offset),
|
2012-05-23 14:24:29 +00:00
|
|
|
Map::kMaximumBitField2FastHoleyElementValue);
|
2011-06-03 07:41:37 +00:00
|
|
|
j(above, fail, distance);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-22 11:30:04 +00:00
|
|
|
void MacroAssembler::CheckFastObjectElements(Register map,
|
|
|
|
Label* fail,
|
|
|
|
Label::Distance distance) {
|
2012-05-23 14:24:29 +00:00
|
|
|
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
|
|
|
|
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
|
|
|
|
STATIC_ASSERT(FAST_ELEMENTS == 2);
|
|
|
|
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
|
2011-09-22 11:30:04 +00:00
|
|
|
cmpb(FieldOperand(map, Map::kBitField2Offset),
|
2012-05-23 14:24:29 +00:00
|
|
|
Map::kMaximumBitField2FastHoleySmiElementValue);
|
2011-09-22 11:30:04 +00:00
|
|
|
j(below_equal, fail, distance);
|
|
|
|
cmpb(FieldOperand(map, Map::kBitField2Offset),
|
2012-05-23 14:24:29 +00:00
|
|
|
Map::kMaximumBitField2FastHoleyElementValue);
|
2011-09-22 11:30:04 +00:00
|
|
|
j(above, fail, distance);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-05-23 14:24:29 +00:00
|
|
|
void MacroAssembler::CheckFastSmiElements(Register map,
|
|
|
|
Label* fail,
|
|
|
|
Label::Distance distance) {
|
|
|
|
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
|
|
|
|
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
|
2011-09-22 11:30:04 +00:00
|
|
|
cmpb(FieldOperand(map, Map::kBitField2Offset),
|
2012-05-23 14:24:29 +00:00
|
|
|
Map::kMaximumBitField2FastHoleySmiElementValue);
|
2011-09-22 11:30:04 +00:00
|
|
|
j(above, fail, distance);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-27 16:15:29 +00:00
|
|
|
void MacroAssembler::StoreNumberToDoubleElements(
|
|
|
|
Register maybe_number,
|
|
|
|
Register elements,
|
|
|
|
Register key,
|
|
|
|
Register scratch1,
|
|
|
|
XMMRegister scratch2,
|
|
|
|
Label* fail,
|
2012-11-15 12:19:14 +00:00
|
|
|
bool specialize_for_processor,
|
|
|
|
int elements_offset) {
|
2011-09-27 16:15:29 +00:00
|
|
|
Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value;
|
|
|
|
JumpIfSmi(maybe_number, &smi_value, Label::kNear);
|
|
|
|
|
|
|
|
CheckMap(maybe_number,
|
|
|
|
isolate()->factory()->heap_number_map(),
|
|
|
|
fail,
|
|
|
|
DONT_DO_SMI_CHECK);
|
|
|
|
|
|
|
|
// Double value, canonicalize NaN.
|
|
|
|
uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
|
|
|
|
cmp(FieldOperand(maybe_number, offset),
|
|
|
|
Immediate(kNaNOrInfinityLowerBoundUpper32));
|
|
|
|
j(greater_equal, &maybe_nan, Label::kNear);
|
|
|
|
|
|
|
|
bind(¬_nan);
|
|
|
|
ExternalReference canonical_nan_reference =
|
|
|
|
ExternalReference::address_of_canonical_non_hole_nan();
|
|
|
|
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
|
2013-03-05 10:48:16 +00:00
|
|
|
CpuFeatureScope use_sse2(this, SSE2);
|
2013-10-18 10:54:45 +00:00
|
|
|
movsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
|
2011-09-27 16:15:29 +00:00
|
|
|
bind(&have_double_value);
|
2013-10-18 10:54:45 +00:00
|
|
|
movsd(FieldOperand(elements, key, times_4,
|
2012-11-15 12:19:14 +00:00
|
|
|
FixedDoubleArray::kHeaderSize - elements_offset),
|
2011-09-27 16:15:29 +00:00
|
|
|
scratch2);
|
|
|
|
} else {
|
|
|
|
fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset));
|
|
|
|
bind(&have_double_value);
|
2012-11-15 12:19:14 +00:00
|
|
|
fstp_d(FieldOperand(elements, key, times_4,
|
|
|
|
FixedDoubleArray::kHeaderSize - elements_offset));
|
2011-09-27 16:15:29 +00:00
|
|
|
}
|
|
|
|
jmp(&done);
|
|
|
|
|
|
|
|
bind(&maybe_nan);
|
|
|
|
// Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
|
|
|
|
// it's an Infinity, and the non-NaN code path applies.
|
|
|
|
j(greater, &is_nan, Label::kNear);
|
|
|
|
cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
|
|
|
|
j(zero, ¬_nan);
|
|
|
|
bind(&is_nan);
|
|
|
|
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
|
2013-03-05 10:48:16 +00:00
|
|
|
CpuFeatureScope use_sse2(this, SSE2);
|
2013-10-18 10:54:45 +00:00
|
|
|
movsd(scratch2, Operand::StaticVariable(canonical_nan_reference));
|
2011-09-27 16:15:29 +00:00
|
|
|
} else {
|
|
|
|
fld_d(Operand::StaticVariable(canonical_nan_reference));
|
|
|
|
}
|
|
|
|
jmp(&have_double_value, Label::kNear);
|
|
|
|
|
|
|
|
bind(&smi_value);
|
|
|
|
// Value is a smi. Convert to a double and store.
|
|
|
|
// Preserve original value.
|
|
|
|
mov(scratch1, maybe_number);
|
|
|
|
SmiUntag(scratch1);
|
|
|
|
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
|
2013-03-05 10:48:16 +00:00
|
|
|
CpuFeatureScope fscope(this, SSE2);
|
2013-09-13 07:59:48 +00:00
|
|
|
Cvtsi2sd(scratch2, scratch1);
|
2013-10-18 10:54:45 +00:00
|
|
|
movsd(FieldOperand(elements, key, times_4,
|
2012-11-15 12:19:14 +00:00
|
|
|
FixedDoubleArray::kHeaderSize - elements_offset),
|
2011-09-27 16:15:29 +00:00
|
|
|
scratch2);
|
|
|
|
} else {
|
|
|
|
push(scratch1);
|
|
|
|
fild_s(Operand(esp, 0));
|
|
|
|
pop(scratch1);
|
2012-11-15 12:19:14 +00:00
|
|
|
fstp_d(FieldOperand(elements, key, times_4,
|
|
|
|
FixedDoubleArray::kHeaderSize - elements_offset));
|
2011-09-27 16:15:29 +00:00
|
|
|
}
|
|
|
|
bind(&done);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-11-08 10:52:07 +00:00
|
|
|
void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
|
2012-01-09 16:37:47 +00:00
|
|
|
cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-05 13:57:18 +00:00
|
|
|
void MacroAssembler::CheckMap(Register obj,
|
|
|
|
Handle<Map> map,
|
|
|
|
Label* fail,
|
2013-05-23 09:19:18 +00:00
|
|
|
SmiCheckType smi_check_type) {
|
2011-05-24 08:59:51 +00:00
|
|
|
if (smi_check_type == DO_SMI_CHECK) {
|
2011-05-17 12:05:06 +00:00
|
|
|
JumpIfSmi(obj, fail);
|
2010-02-05 13:57:18 +00:00
|
|
|
}
|
2012-01-09 16:37:47 +00:00
|
|
|
|
2013-11-08 10:52:07 +00:00
|
|
|
CompareMap(obj, map);
|
2010-02-05 13:57:18 +00:00
|
|
|
j(not_equal, fail);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-05-18 13:17:29 +00:00
|
|
|
void MacroAssembler::DispatchMap(Register obj,
|
2013-03-15 12:07:35 +00:00
|
|
|
Register unused,
|
2011-05-18 13:17:29 +00:00
|
|
|
Handle<Map> map,
|
|
|
|
Handle<Code> success,
|
|
|
|
SmiCheckType smi_check_type) {
|
|
|
|
Label fail;
|
2011-05-23 13:42:33 +00:00
|
|
|
if (smi_check_type == DO_SMI_CHECK) {
|
2011-05-18 13:17:29 +00:00
|
|
|
JumpIfSmi(obj, &fail);
|
|
|
|
}
|
|
|
|
cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map));
|
|
|
|
j(equal, success);
|
|
|
|
|
|
|
|
bind(&fail);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-01-06 11:09:30 +00:00
|
|
|
Condition MacroAssembler::IsObjectStringType(Register heap_object,
|
|
|
|
Register map,
|
|
|
|
Register instance_type) {
|
|
|
|
mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
|
|
|
|
movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
|
2011-08-29 13:02:35 +00:00
|
|
|
STATIC_ASSERT(kNotStringTag != 0);
|
2010-01-06 11:09:30 +00:00
|
|
|
test(instance_type, Immediate(kIsNotStringMask));
|
|
|
|
return zero;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-03-04 15:00:57 +00:00
|
|
|
Condition MacroAssembler::IsObjectNameType(Register heap_object,
|
|
|
|
Register map,
|
|
|
|
Register instance_type) {
|
|
|
|
mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
|
|
|
|
movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
|
2013-03-04 15:25:33 +00:00
|
|
|
cmpb(instance_type, static_cast<uint8_t>(LAST_NAME_TYPE));
|
2013-03-04 15:00:57 +00:00
|
|
|
return below_equal;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-06-04 10:46:04 +00:00
|
|
|
void MacroAssembler::IsObjectJSObjectType(Register heap_object,
|
|
|
|
Register map,
|
|
|
|
Register scratch,
|
|
|
|
Label* fail) {
|
|
|
|
mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
|
|
|
|
IsInstanceJSObjectType(map, scratch, fail);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::IsInstanceJSObjectType(Register map,
|
|
|
|
Register scratch,
|
|
|
|
Label* fail) {
|
|
|
|
movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
|
2011-10-03 11:44:39 +00:00
|
|
|
sub(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
|
Implement set trap for proxies, and revamp class hierarchy in preparation:
- Introduce a class JSReceiver, that is a common superclass of JSObject and
JSProxy. Use JSReceiver where appropriate (probably lots of places that we
still have to migrate, but we will find those later with proxy test suite).
- Move appropriate methods to JSReceiver class (SetProperty,
GetPropertyAttribute, Get/SetPrototype, Lookup, and so on).
- Introduce new JSFunctionProxy subclass of JSProxy. Currently only a stub.
- Overhaul enum InstanceType:
* Introduce FIRST/LAST_SPEC_OBJECT_TYPE that ranges over all types that
represent JS objects, and use that consistently to check language types.
* Rename FIRST/LAST_JS_OBJECT_TYPE and FIRST/LAST_FUNCTION_CLASS_TYPE
to FIRST/LAST_[NON]CALLABLE_SPEC_OBJECT_TYPE for clarity.
* Eliminate the overlap over JS_REGEXP_TYPE.
* Also replace FIRST_JS_OBJECT with FIRST_JS_RECEIVER, but only use it where
we exclusively talk about the internal representation type.
* Insert JS_PROXY and JS_FUNCTION_PROXY in the appropriate places.
- Fix all checks concerning classification, especially for functions, to
use the CALLABLE_SPEC_OBJECT range (that includes funciton proxies).
- Handle proxies in SetProperty (that was the easiest part :) ).
- A few simple test cases.
R=kmillikin@chromium.org
Review URL: http://codereview.chromium.org/6992072
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@8126 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2011-05-31 16:38:40 +00:00
|
|
|
cmp(scratch,
|
|
|
|
LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
|
2010-06-04 10:46:04 +00:00
|
|
|
j(above, fail);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
void MacroAssembler::FCmp() {
|
2011-03-31 16:17:37 +00:00
|
|
|
if (CpuFeatures::IsSupported(CMOV)) {
|
2009-10-21 09:24:25 +00:00
|
|
|
fucomip();
|
2011-11-25 14:26:54 +00:00
|
|
|
fstp(0);
|
2009-10-21 09:24:25 +00:00
|
|
|
} else {
|
|
|
|
fucompp();
|
|
|
|
push(eax);
|
|
|
|
fnstsw_ax();
|
|
|
|
sahf();
|
|
|
|
pop(eax);
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-10-12 11:09:14 +00:00
|
|
|
void MacroAssembler::AssertNumber(Register object) {
|
|
|
|
if (emit_debug_code()) {
|
|
|
|
Label ok;
|
|
|
|
JumpIfSmi(object, &ok);
|
|
|
|
cmp(FieldOperand(object, HeapObject::kMapOffset),
|
|
|
|
isolate()->factory()->heap_number_map());
|
2013-08-02 09:53:11 +00:00
|
|
|
Check(equal, kOperandNotANumber);
|
2012-10-12 11:09:14 +00:00
|
|
|
bind(&ok);
|
|
|
|
}
|
2010-02-15 14:24:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-10-12 11:09:14 +00:00
|
|
|
void MacroAssembler::AssertSmi(Register object) {
|
|
|
|
if (emit_debug_code()) {
|
|
|
|
test(object, Immediate(kSmiTagMask));
|
2013-08-02 09:53:11 +00:00
|
|
|
Check(equal, kOperandIsNotASmi);
|
2012-10-12 11:09:14 +00:00
|
|
|
}
|
2010-03-11 10:28:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-10-12 11:09:14 +00:00
|
|
|
void MacroAssembler::AssertString(Register object) {
|
|
|
|
if (emit_debug_code()) {
|
|
|
|
test(object, Immediate(kSmiTagMask));
|
2013-08-02 09:53:11 +00:00
|
|
|
Check(not_equal, kOperandIsASmiAndNotAString);
|
2012-10-12 11:09:14 +00:00
|
|
|
push(object);
|
|
|
|
mov(object, FieldOperand(object, HeapObject::kMapOffset));
|
|
|
|
CmpInstanceType(object, FIRST_NONSTRING_TYPE);
|
|
|
|
pop(object);
|
2013-08-02 09:53:11 +00:00
|
|
|
Check(below, kOperandIsNotAString);
|
2012-10-12 11:09:14 +00:00
|
|
|
}
|
2010-08-27 11:47:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-03-04 15:00:57 +00:00
|
|
|
void MacroAssembler::AssertName(Register object) {
|
|
|
|
if (emit_debug_code()) {
|
|
|
|
test(object, Immediate(kSmiTagMask));
|
2013-08-02 09:53:11 +00:00
|
|
|
Check(not_equal, kOperandIsASmiAndNotAName);
|
2013-03-04 15:00:57 +00:00
|
|
|
push(object);
|
|
|
|
mov(object, FieldOperand(object, HeapObject::kMapOffset));
|
|
|
|
CmpInstanceType(object, LAST_NAME_TYPE);
|
|
|
|
pop(object);
|
2013-08-02 09:53:11 +00:00
|
|
|
Check(below_equal, kOperandIsNotAName);
|
2013-03-04 15:00:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-10-12 11:09:14 +00:00
|
|
|
void MacroAssembler::AssertNotSmi(Register object) {
|
|
|
|
if (emit_debug_code()) {
|
|
|
|
test(object, Immediate(kSmiTagMask));
|
2013-08-02 09:53:11 +00:00
|
|
|
Check(not_equal, kOperandIsASmi);
|
2012-10-12 11:09:14 +00:00
|
|
|
}
|
2010-08-09 13:12:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-10-23 13:48:04 +00:00
|
|
|
void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
|
|
|
|
if (frame_mode == BUILD_STUB_FRAME) {
|
|
|
|
push(ebp); // Caller's frame pointer.
|
|
|
|
mov(ebp, esp);
|
|
|
|
push(esi); // Callee's context.
|
|
|
|
push(Immediate(Smi::FromInt(StackFrame::STUB)));
|
|
|
|
} else {
|
|
|
|
PredictableCodeSizeScope predictible_code_size_scope(this,
|
|
|
|
kNoCodeAgeSequenceLength);
|
2013-10-31 11:43:23 +00:00
|
|
|
if (isolate()->IsCodePreAgingActive()) {
|
2013-10-23 13:48:04 +00:00
|
|
|
// Pre-age the code.
|
|
|
|
call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
|
|
|
|
RelocInfo::CODE_AGE_SEQUENCE);
|
|
|
|
Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength);
|
|
|
|
} else {
|
|
|
|
push(ebp); // Caller's frame pointer.
|
|
|
|
mov(ebp, esp);
|
|
|
|
push(esi); // Callee's context.
|
|
|
|
push(edi); // Callee's JS function.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-10-10 09:09:38 +00:00
|
|
|
void MacroAssembler::EnterFrame(StackFrame::Type type) {
|
2008-07-03 15:10:15 +00:00
|
|
|
push(ebp);
|
2011-10-03 11:44:39 +00:00
|
|
|
mov(ebp, esp);
|
2008-07-03 15:10:15 +00:00
|
|
|
push(esi);
|
|
|
|
push(Immediate(Smi::FromInt(type)));
|
2009-02-25 16:52:15 +00:00
|
|
|
push(Immediate(CodeObject()));
|
2011-03-15 14:49:10 +00:00
|
|
|
if (emit_debug_code()) {
|
2011-03-25 13:21:30 +00:00
|
|
|
cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
|
2013-08-02 09:53:11 +00:00
|
|
|
Check(not_equal, kCodeObjectNotProperlyPatched);
|
2009-02-25 16:52:15 +00:00
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-10-10 09:09:38 +00:00
|
|
|
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
|
2011-03-15 14:49:10 +00:00
|
|
|
if (emit_debug_code()) {
|
2008-07-03 15:10:15 +00:00
|
|
|
cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
|
|
|
|
Immediate(Smi::FromInt(type)));
|
2013-08-02 09:53:11 +00:00
|
|
|
Check(equal, kStackFrameTypesMustMatch);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
leave();
|
|
|
|
}
|
|
|
|
|
2010-08-27 07:08:03 +00:00
|
|
|
|
|
|
|
void MacroAssembler::EnterExitFramePrologue() {
|
2012-01-13 09:38:20 +00:00
|
|
|
// Set up the frame structure on the stack.
|
2009-06-10 15:08:25 +00:00
|
|
|
ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
|
2008-09-23 08:19:26 +00:00
|
|
|
ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
|
|
|
|
ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
|
|
|
|
push(ebp);
|
2011-10-03 11:44:39 +00:00
|
|
|
mov(ebp, esp);
|
2008-09-23 08:19:26 +00:00
|
|
|
|
2010-08-27 07:08:03 +00:00
|
|
|
// Reserve room for entry stack pointer and push the code object.
|
2008-09-23 08:19:26 +00:00
|
|
|
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
|
2010-02-08 13:44:49 +00:00
|
|
|
push(Immediate(0)); // Saved entry sp, patched before call.
|
|
|
|
push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
|
2008-09-23 08:19:26 +00:00
|
|
|
|
|
|
|
// Save the frame pointer and the context in top.
|
2011-09-08 16:29:57 +00:00
|
|
|
ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
|
2011-03-22 13:20:04 +00:00
|
|
|
isolate());
|
2011-09-08 16:29:57 +00:00
|
|
|
ExternalReference context_address(Isolate::kContextAddress,
|
2011-03-22 13:20:04 +00:00
|
|
|
isolate());
|
2008-09-23 08:19:26 +00:00
|
|
|
mov(Operand::StaticVariable(c_entry_fp_address), ebp);
|
|
|
|
mov(Operand::StaticVariable(context_address), esi);
|
2009-11-04 08:51:48 +00:00
|
|
|
}
|
2008-09-23 08:19:26 +00:00
|
|
|
|
2008-09-23 12:21:54 +00:00
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
|
|
|
|
// Optionally save all XMM registers.
|
|
|
|
if (save_doubles) {
|
2013-03-05 10:48:16 +00:00
|
|
|
CpuFeatureScope scope(this, SSE2);
|
2010-12-07 11:31:57 +00:00
|
|
|
int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
|
2011-10-03 11:44:39 +00:00
|
|
|
sub(esp, Immediate(space));
|
2011-01-25 07:49:39 +00:00
|
|
|
const int offset = -2 * kPointerSize;
|
2010-12-07 11:31:57 +00:00
|
|
|
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
|
|
|
|
XMMRegister reg = XMMRegister::from_code(i);
|
2013-10-18 10:54:45 +00:00
|
|
|
movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
} else {
|
2011-10-03 11:44:39 +00:00
|
|
|
sub(esp, Immediate(argc * kPointerSize));
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
2008-09-23 12:21:54 +00:00
|
|
|
|
|
|
|
// Get the required frame alignment for the OS.
|
2011-03-18 20:35:07 +00:00
|
|
|
const int kFrameAlignment = OS::ActivationFrameAlignment();
|
2008-09-23 12:21:54 +00:00
|
|
|
if (kFrameAlignment > 0) {
|
|
|
|
ASSERT(IsPowerOf2(kFrameAlignment));
|
|
|
|
and_(esp, -kFrameAlignment);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Patch the saved entry sp.
|
|
|
|
mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp);
|
2008-09-23 08:19:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
void MacroAssembler::EnterExitFrame(bool save_doubles) {
|
2010-08-27 07:08:03 +00:00
|
|
|
EnterExitFramePrologue();
|
2009-11-04 08:51:48 +00:00
|
|
|
|
2012-01-13 09:38:20 +00:00
|
|
|
// Set up argc and argv in callee-saved registers.
|
2009-11-04 08:51:48 +00:00
|
|
|
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
|
2011-10-03 11:44:39 +00:00
|
|
|
mov(edi, eax);
|
2009-11-04 08:51:48 +00:00
|
|
|
lea(esi, Operand(ebp, eax, times_4, offset));
|
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
// Reserve space for argc, argv and isolate.
|
|
|
|
EnterExitFrameEpilogue(3, save_doubles);
|
2009-11-04 08:51:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-11-16 15:04:41 +00:00
|
|
|
void MacroAssembler::EnterApiExitFrame(int argc) {
|
2010-08-27 07:08:03 +00:00
|
|
|
EnterExitFramePrologue();
|
2010-12-07 11:31:57 +00:00
|
|
|
EnterExitFrameEpilogue(argc, false);
|
2009-11-04 08:51:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
void MacroAssembler::LeaveExitFrame(bool save_doubles) {
|
|
|
|
// Optionally restore all XMM registers.
|
|
|
|
if (save_doubles) {
|
2013-03-05 10:48:16 +00:00
|
|
|
CpuFeatureScope scope(this, SSE2);
|
2011-01-25 07:49:39 +00:00
|
|
|
const int offset = -2 * kPointerSize;
|
2010-12-07 11:31:57 +00:00
|
|
|
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
|
|
|
|
XMMRegister reg = XMMRegister::from_code(i);
|
2013-10-18 10:54:45 +00:00
|
|
|
movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-09-23 08:19:26 +00:00
|
|
|
// Get the return address from the stack and restore the frame pointer.
|
|
|
|
mov(ecx, Operand(ebp, 1 * kPointerSize));
|
|
|
|
mov(ebp, Operand(ebp, 0 * kPointerSize));
|
|
|
|
|
|
|
|
// Pop the arguments and the receiver from the caller stack.
|
|
|
|
lea(esp, Operand(esi, 1 * kPointerSize));
|
|
|
|
|
2010-11-16 15:04:41 +00:00
|
|
|
// Push the return address to get ready to return.
|
|
|
|
push(ecx);
|
2010-11-16 15:23:47 +00:00
|
|
|
|
2013-09-17 11:37:48 +00:00
|
|
|
LeaveExitFrameEpilogue(true);
|
2010-11-16 15:04:41 +00:00
|
|
|
}
|
|
|
|
|
2013-07-05 09:52:11 +00:00
|
|
|
|
2013-09-17 11:37:48 +00:00
|
|
|
void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
|
2008-09-23 08:19:26 +00:00
|
|
|
// Restore current context from top and clear it in debug mode.
|
2011-09-08 16:29:57 +00:00
|
|
|
ExternalReference context_address(Isolate::kContextAddress, isolate());
|
2013-09-17 11:37:48 +00:00
|
|
|
if (restore_context) {
|
|
|
|
mov(esi, Operand::StaticVariable(context_address));
|
|
|
|
}
|
2009-04-22 09:25:41 +00:00
|
|
|
#ifdef DEBUG
|
|
|
|
mov(Operand::StaticVariable(context_address), Immediate(0));
|
|
|
|
#endif
|
2008-09-23 08:19:26 +00:00
|
|
|
|
|
|
|
// Clear the top frame.
|
2011-09-08 16:29:57 +00:00
|
|
|
ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
|
2011-03-22 13:20:04 +00:00
|
|
|
isolate());
|
2008-09-23 08:19:26 +00:00
|
|
|
mov(Operand::StaticVariable(c_entry_fp_address), Immediate(0));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-09-17 11:37:48 +00:00
|
|
|
void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
|
2011-10-03 11:44:39 +00:00
|
|
|
mov(esp, ebp);
|
2010-11-16 15:04:41 +00:00
|
|
|
pop(ebp);
|
|
|
|
|
2013-09-17 11:37:48 +00:00
|
|
|
LeaveExitFrameEpilogue(restore_context);
|
2010-11-16 15:04:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-02-09 09:43:37 +00:00
|
|
|
void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
|
2011-11-11 13:48:14 +00:00
|
|
|
int handler_index) {
|
2009-06-08 14:39:50 +00:00
|
|
|
// Adjust this code if not the case.
|
2011-08-12 10:52:49 +00:00
|
|
|
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
|
|
|
|
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
|
2011-11-11 13:48:14 +00:00
|
|
|
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
|
|
|
|
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
|
|
|
|
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
|
|
|
|
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
|
|
|
|
|
|
|
|
// We will build up the handler from the bottom by pushing on the stack.
|
2012-02-09 09:43:37 +00:00
|
|
|
// First push the frame pointer and context.
|
|
|
|
if (kind == StackHandler::JS_ENTRY) {
|
2011-11-11 13:48:14 +00:00
|
|
|
// The frame pointer does not point to a JS frame so we save NULL for
|
|
|
|
// ebp. We expect the code throwing an exception to check ebp before
|
|
|
|
// dereferencing it to restore the context.
|
2009-06-10 09:00:07 +00:00
|
|
|
push(Immediate(0)); // NULL frame pointer.
|
2011-08-12 10:52:49 +00:00
|
|
|
push(Immediate(Smi::FromInt(0))); // No context.
|
2012-02-09 09:43:37 +00:00
|
|
|
} else {
|
|
|
|
push(ebp);
|
|
|
|
push(esi);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
2011-11-11 13:48:14 +00:00
|
|
|
// Push the state and the code object.
|
2012-02-09 09:43:37 +00:00
|
|
|
unsigned state =
|
|
|
|
StackHandler::IndexField::encode(handler_index) |
|
|
|
|
StackHandler::KindField::encode(kind);
|
2011-11-11 13:48:14 +00:00
|
|
|
push(Immediate(state));
|
2011-12-06 12:11:08 +00:00
|
|
|
Push(CodeObject());
|
2011-11-11 13:48:14 +00:00
|
|
|
|
|
|
|
// Link the current handler as the next handler.
|
|
|
|
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
|
|
|
|
push(Operand::StaticVariable(handler_address));
|
|
|
|
// Set this new handler as the current one.
|
|
|
|
mov(Operand::StaticVariable(handler_address), esp);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-12-10 14:06:08 +00:00
|
|
|
void MacroAssembler::PopTryHandler() {
|
2011-08-12 10:52:49 +00:00
|
|
|
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
|
2011-11-11 13:48:14 +00:00
|
|
|
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
|
|
|
|
pop(Operand::StaticVariable(handler_address));
|
2011-10-03 11:44:39 +00:00
|
|
|
add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
|
2009-12-10 14:06:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-11-11 13:48:14 +00:00
|
|
|
void MacroAssembler::JumpToHandlerEntry() {
|
|
|
|
// Compute the handler entry address and jump to it. The handler table is
|
|
|
|
// a fixed array of (smi-tagged) code offsets.
|
|
|
|
// eax = exception, edi = code object, edx = state.
|
|
|
|
mov(ebx, FieldOperand(edi, Code::kHandlerTableOffset));
|
|
|
|
shr(edx, StackHandler::kKindWidth);
|
|
|
|
mov(edx, FieldOperand(ebx, edx, times_4, FixedArray::kHeaderSize));
|
|
|
|
SmiUntag(edx);
|
|
|
|
lea(edi, FieldOperand(edi, edx, times_1, Code::kHeaderSize));
|
|
|
|
jmp(edi);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-15 13:53:51 +00:00
|
|
|
void MacroAssembler::Throw(Register value) {
|
|
|
|
// Adjust this code if not the case.
|
2011-08-12 10:52:49 +00:00
|
|
|
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
|
|
|
|
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
|
2011-11-11 13:48:14 +00:00
|
|
|
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
|
|
|
|
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
|
|
|
|
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
|
|
|
|
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
|
|
|
|
|
|
|
|
// The exception is expected in eax.
|
2011-02-15 13:53:51 +00:00
|
|
|
if (!value.is(eax)) {
|
|
|
|
mov(eax, value);
|
|
|
|
}
|
2011-11-11 13:48:14 +00:00
|
|
|
// Drop the stack pointer to the top of the top handler.
|
|
|
|
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
|
2011-11-11 13:13:35 +00:00
|
|
|
mov(esp, Operand::StaticVariable(handler_address));
|
2011-11-11 13:48:14 +00:00
|
|
|
// Restore the next handler.
|
2011-11-11 13:13:35 +00:00
|
|
|
pop(Operand::StaticVariable(handler_address));
|
2011-11-11 13:48:14 +00:00
|
|
|
|
|
|
|
// Remove the code object and state, compute the handler address in edi.
|
|
|
|
pop(edi); // Code object.
|
|
|
|
pop(edx); // Index and state.
|
|
|
|
|
|
|
|
// Restore the context and frame pointer.
|
2011-08-12 10:52:49 +00:00
|
|
|
pop(esi); // Context.
|
|
|
|
pop(ebp); // Frame pointer.
|
2011-02-15 13:53:51 +00:00
|
|
|
|
2011-08-12 10:52:49 +00:00
|
|
|
// If the handler is a JS frame, restore the context to the frame.
|
2011-11-11 13:48:14 +00:00
|
|
|
// (kind == ENTRY) == (ebp == 0) == (esi == 0), so we could test either
|
|
|
|
// ebp or esi.
|
2011-05-10 09:03:42 +00:00
|
|
|
Label skip;
|
2011-11-11 13:48:14 +00:00
|
|
|
test(esi, esi);
|
|
|
|
j(zero, &skip, Label::kNear);
|
2011-08-12 10:52:49 +00:00
|
|
|
mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
|
2011-02-15 13:53:51 +00:00
|
|
|
bind(&skip);
|
|
|
|
|
2011-11-11 13:48:14 +00:00
|
|
|
JumpToHandlerEntry();
|
2011-02-15 13:53:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-02-10 08:47:35 +00:00
|
|
|
void MacroAssembler::ThrowUncatchable(Register value) {
|
2011-02-15 13:53:51 +00:00
|
|
|
// Adjust this code if not the case.
|
2011-08-12 10:52:49 +00:00
|
|
|
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
|
|
|
|
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
|
2011-11-11 13:48:14 +00:00
|
|
|
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
|
|
|
|
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
|
|
|
|
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
|
|
|
|
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
|
2011-02-15 13:53:51 +00:00
|
|
|
|
2011-11-08 11:09:00 +00:00
|
|
|
// The exception is expected in eax.
|
2012-02-10 08:47:35 +00:00
|
|
|
if (!value.is(eax)) {
|
2011-11-08 11:09:00 +00:00
|
|
|
mov(eax, value);
|
2011-02-15 13:53:51 +00:00
|
|
|
}
|
2011-11-08 11:09:00 +00:00
|
|
|
// Drop the stack pointer to the top of the top stack handler.
|
|
|
|
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
|
|
|
|
mov(esp, Operand::StaticVariable(handler_address));
|
|
|
|
|
|
|
|
// Unwind the handlers until the top ENTRY handler is found.
|
|
|
|
Label fetch_next, check_kind;
|
|
|
|
jmp(&check_kind, Label::kNear);
|
|
|
|
bind(&fetch_next);
|
|
|
|
mov(esp, Operand(esp, StackHandlerConstants::kNextOffset));
|
|
|
|
|
|
|
|
bind(&check_kind);
|
2012-02-09 09:43:37 +00:00
|
|
|
STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
|
2011-11-11 13:48:14 +00:00
|
|
|
test(Operand(esp, StackHandlerConstants::kStateOffset),
|
|
|
|
Immediate(StackHandler::KindField::kMask));
|
|
|
|
j(not_zero, &fetch_next);
|
2011-11-08 11:09:00 +00:00
|
|
|
|
|
|
|
// Set the top handler address to next handler past the top ENTRY handler.
|
|
|
|
pop(Operand::StaticVariable(handler_address));
|
2011-02-15 13:53:51 +00:00
|
|
|
|
2011-11-11 13:48:14 +00:00
|
|
|
// Remove the code object and state, compute the handler address in edi.
|
|
|
|
pop(edi); // Code object.
|
|
|
|
pop(edx); // Index and state.
|
|
|
|
|
|
|
|
// Clear the context pointer and frame pointer (0 was saved in the handler).
|
2011-11-08 11:09:00 +00:00
|
|
|
pop(esi);
|
2011-02-15 13:53:51 +00:00
|
|
|
pop(ebp);
|
|
|
|
|
2011-11-11 13:48:14 +00:00
|
|
|
JumpToHandlerEntry();
|
2011-02-15 13:53:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
Split window support from V8.
Here is a description of the background and design of split window in Chrome and V8:
https://docs.google.com/a/google.com/Doc?id=chhjkpg_47fwddxbfr
This change list splits the window object into two parts: 1) an inner window object used as the global object of contexts; 2) an outer window object exposed to JavaScript and accessible by the name 'window'. Firefox did it awhile ago, here are some discussions: https://wiki.mozilla.org/Gecko:SplitWindow. One additional benefit of splitting window in Chrome is that accessing global variables don't need security checks anymore, it can improve applications that use many global variables.
V8 support of split window:
There are a small number of changes on V8 api to support split window:
Security context is removed from V8, so does related API functions;
A global object can be detached from its context and reused by a new context;
Access checks on an object template can be turned on/off by default;
An object can turn on its access checks later;
V8 has a new object type, ApiGlobalObject, which is the outer window object type. The existing JSGlobalObject becomes the inner window object type. Security checks are moved from JSGlobalObject to ApiGlobalObject. ApiGlobalObject is the one exposed to JavaScript, it is accessible through Context::Global(). ApiGlobalObject's prototype is set to JSGlobalObject so that property lookups are forwarded to JSGlobalObject. ApiGlobalObject forwards all other property access requests to JSGlobalObject, such as SetProperty, DeleteProperty, etc.
Security token is moved to a global context, and ApiGlobalObject has a reference to its global context. JSGlobalObject has a reference to its global context as well. When accessing properties on a global object in JavaScript, the domain security check is performed by comparing the security token of the lexical context (Top::global_context()) to the token of global object's context. The check is only needed when the receiver is a window object, such as 'window.document'. Accessing global variables, such as 'var foo = 3; foo' does not need checks because the receiver is the inner window object.
When an outer window is detached from its global context (when a frame navigates away from a page), it is completely detached from the inner window. A new context is created for the new page, and the outer global object is reused. At this point, the access check on the DOMWindow wrapper of the old context is turned on. The code in old context is still able to access DOMWindow properties, but it has to go through domain security checks.
It is debatable on how to implement the outer window object. Currently each property access function has to check if the receiver is ApiGlobalObject type. This approach might be error-prone that one may forget to check the receiver when adding new functions. It is unlikely a performance issue because accessing global variables are more common than 'window.foo' style coding.
I am still working on the ARM port, and I'd like to hear comments and suggestions on the best way to support it in V8.
Review URL: http://codereview.chromium.org/7366
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@540 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2008-10-21 19:07:58 +00:00
|
|
|
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
|
2013-04-05 12:06:34 +00:00
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
2009-06-08 09:46:09 +00:00
|
|
|
Label* miss) {
|
Split window support from V8.
Here is a description of the background and design of split window in Chrome and V8:
https://docs.google.com/a/google.com/Doc?id=chhjkpg_47fwddxbfr
This change list splits the window object into two parts: 1) an inner window object used as the global object of contexts; 2) an outer window object exposed to JavaScript and accessible by the name 'window'. Firefox did it awhile ago, here are some discussions: https://wiki.mozilla.org/Gecko:SplitWindow. One additional benefit of splitting window in Chrome is that accessing global variables don't need security checks anymore, it can improve applications that use many global variables.
V8 support of split window:
There are a small number of changes on V8 api to support split window:
Security context is removed from V8, so does related API functions;
A global object can be detached from its context and reused by a new context;
Access checks on an object template can be turned on/off by default;
An object can turn on its access checks later;
V8 has a new object type, ApiGlobalObject, which is the outer window object type. The existing JSGlobalObject becomes the inner window object type. Security checks are moved from JSGlobalObject to ApiGlobalObject. ApiGlobalObject is the one exposed to JavaScript, it is accessible through Context::Global(). ApiGlobalObject's prototype is set to JSGlobalObject so that property lookups are forwarded to JSGlobalObject. ApiGlobalObject forwards all other property access requests to JSGlobalObject, such as SetProperty, DeleteProperty, etc.
Security token is moved to a global context, and ApiGlobalObject has a reference to its global context. JSGlobalObject has a reference to its global context as well. When accessing properties on a global object in JavaScript, the domain security check is performed by comparing the security token of the lexical context (Top::global_context()) to the token of global object's context. The check is only needed when the receiver is a window object, such as 'window.document'. Accessing global variables, such as 'var foo = 3; foo' does not need checks because the receiver is the inner window object.
When an outer window is detached from its global context (when a frame navigates away from a page), it is completely detached from the inner window. A new context is created for the new page, and the outer global object is reused. At this point, the access check on the DOMWindow wrapper of the old context is turned on. The code in old context is still able to access DOMWindow properties, but it has to go through domain security checks.
It is debatable on how to implement the outer window object. Currently each property access function has to check if the receiver is ApiGlobalObject type. This approach might be error-prone that one may forget to check the receiver when adding new functions. It is unlikely a performance issue because accessing global variables are more common than 'window.foo' style coding.
I am still working on the ARM port, and I'd like to hear comments and suggestions on the best way to support it in V8.
Review URL: http://codereview.chromium.org/7366
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@540 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2008-10-21 19:07:58 +00:00
|
|
|
Label same_contexts;
|
|
|
|
|
2013-04-05 12:06:34 +00:00
|
|
|
ASSERT(!holder_reg.is(scratch1));
|
|
|
|
ASSERT(!holder_reg.is(scratch2));
|
|
|
|
ASSERT(!scratch1.is(scratch2));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
Split window support from V8.
Here is a description of the background and design of split window in Chrome and V8:
https://docs.google.com/a/google.com/Doc?id=chhjkpg_47fwddxbfr
This change list splits the window object into two parts: 1) an inner window object used as the global object of contexts; 2) an outer window object exposed to JavaScript and accessible by the name 'window'. Firefox did it awhile ago, here are some discussions: https://wiki.mozilla.org/Gecko:SplitWindow. One additional benefit of splitting window in Chrome is that accessing global variables don't need security checks anymore, it can improve applications that use many global variables.
V8 support of split window:
There are a small number of changes on V8 api to support split window:
Security context is removed from V8, so does related API functions;
A global object can be detached from its context and reused by a new context;
Access checks on an object template can be turned on/off by default;
An object can turn on its access checks later;
V8 has a new object type, ApiGlobalObject, which is the outer window object type. The existing JSGlobalObject becomes the inner window object type. Security checks are moved from JSGlobalObject to ApiGlobalObject. ApiGlobalObject is the one exposed to JavaScript, it is accessible through Context::Global(). ApiGlobalObject's prototype is set to JSGlobalObject so that property lookups are forwarded to JSGlobalObject. ApiGlobalObject forwards all other property access requests to JSGlobalObject, such as SetProperty, DeleteProperty, etc.
Security token is moved to a global context, and ApiGlobalObject has a reference to its global context. JSGlobalObject has a reference to its global context as well. When accessing properties on a global object in JavaScript, the domain security check is performed by comparing the security token of the lexical context (Top::global_context()) to the token of global object's context. The check is only needed when the receiver is a window object, such as 'window.document'. Accessing global variables, such as 'var foo = 3; foo' does not need checks because the receiver is the inner window object.
When an outer window is detached from its global context (when a frame navigates away from a page), it is completely detached from the inner window. A new context is created for the new page, and the outer global object is reused. At this point, the access check on the DOMWindow wrapper of the old context is turned on. The code in old context is still able to access DOMWindow properties, but it has to go through domain security checks.
It is debatable on how to implement the outer window object. Currently each property access function has to check if the receiver is ApiGlobalObject type. This approach might be error-prone that one may forget to check the receiver when adding new functions. It is unlikely a performance issue because accessing global variables are more common than 'window.foo' style coding.
I am still working on the ARM port, and I'd like to hear comments and suggestions on the best way to support it in V8.
Review URL: http://codereview.chromium.org/7366
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@540 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2008-10-21 19:07:58 +00:00
|
|
|
// Load current lexical context from the stack frame.
|
2013-04-05 12:06:34 +00:00
|
|
|
mov(scratch1, Operand(ebp, StandardFrameConstants::kContextOffset));
|
Split window support from V8.
Here is a description of the background and design of split window in Chrome and V8:
https://docs.google.com/a/google.com/Doc?id=chhjkpg_47fwddxbfr
This change list splits the window object into two parts: 1) an inner window object used as the global object of contexts; 2) an outer window object exposed to JavaScript and accessible by the name 'window'. Firefox did it awhile ago, here are some discussions: https://wiki.mozilla.org/Gecko:SplitWindow. One additional benefit of splitting window in Chrome is that accessing global variables don't need security checks anymore, it can improve applications that use many global variables.
V8 support of split window:
There are a small number of changes on V8 api to support split window:
Security context is removed from V8, so does related API functions;
A global object can be detached from its context and reused by a new context;
Access checks on an object template can be turned on/off by default;
An object can turn on its access checks later;
V8 has a new object type, ApiGlobalObject, which is the outer window object type. The existing JSGlobalObject becomes the inner window object type. Security checks are moved from JSGlobalObject to ApiGlobalObject. ApiGlobalObject is the one exposed to JavaScript, it is accessible through Context::Global(). ApiGlobalObject's prototype is set to JSGlobalObject so that property lookups are forwarded to JSGlobalObject. ApiGlobalObject forwards all other property access requests to JSGlobalObject, such as SetProperty, DeleteProperty, etc.
Security token is moved to a global context, and ApiGlobalObject has a reference to its global context. JSGlobalObject has a reference to its global context as well. When accessing properties on a global object in JavaScript, the domain security check is performed by comparing the security token of the lexical context (Top::global_context()) to the token of global object's context. The check is only needed when the receiver is a window object, such as 'window.document'. Accessing global variables, such as 'var foo = 3; foo' does not need checks because the receiver is the inner window object.
When an outer window is detached from its global context (when a frame navigates away from a page), it is completely detached from the inner window. A new context is created for the new page, and the outer global object is reused. At this point, the access check on the DOMWindow wrapper of the old context is turned on. The code in old context is still able to access DOMWindow properties, but it has to go through domain security checks.
It is debatable on how to implement the outer window object. Currently each property access function has to check if the receiver is ApiGlobalObject type. This approach might be error-prone that one may forget to check the receiver when adding new functions. It is unlikely a performance issue because accessing global variables are more common than 'window.foo' style coding.
I am still working on the ARM port, and I'd like to hear comments and suggestions on the best way to support it in V8.
Review URL: http://codereview.chromium.org/7366
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@540 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2008-10-21 19:07:58 +00:00
|
|
|
|
|
|
|
// When generating debug code, make sure the lexical context is set.
|
2011-03-15 14:49:10 +00:00
|
|
|
if (emit_debug_code()) {
|
2013-04-05 12:06:34 +00:00
|
|
|
cmp(scratch1, Immediate(0));
|
2013-08-02 09:53:11 +00:00
|
|
|
Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
2012-08-17 09:03:08 +00:00
|
|
|
// Load the native context of the current context.
|
2012-08-17 12:59:00 +00:00
|
|
|
int offset =
|
|
|
|
Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
|
2013-04-05 12:06:34 +00:00
|
|
|
mov(scratch1, FieldOperand(scratch1, offset));
|
|
|
|
mov(scratch1, FieldOperand(scratch1, GlobalObject::kNativeContextOffset));
|
2008-10-21 20:08:49 +00:00
|
|
|
|
2012-08-17 09:03:08 +00:00
|
|
|
// Check the context is a native context.
|
2011-03-15 14:49:10 +00:00
|
|
|
if (emit_debug_code()) {
|
2012-08-17 09:03:08 +00:00
|
|
|
// Read the first word and compare to native_context_map.
|
2013-04-05 12:06:34 +00:00
|
|
|
cmp(FieldOperand(scratch1, HeapObject::kMapOffset),
|
|
|
|
isolate()->factory()->native_context_map());
|
2013-08-02 09:53:11 +00:00
|
|
|
Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
|
Split window support from V8.
Here is a description of the background and design of split window in Chrome and V8:
https://docs.google.com/a/google.com/Doc?id=chhjkpg_47fwddxbfr
This change list splits the window object into two parts: 1) an inner window object used as the global object of contexts; 2) an outer window object exposed to JavaScript and accessible by the name 'window'. Firefox did it awhile ago, here are some discussions: https://wiki.mozilla.org/Gecko:SplitWindow. One additional benefit of splitting window in Chrome is that accessing global variables don't need security checks anymore, it can improve applications that use many global variables.
V8 support of split window:
There are a small number of changes on V8 api to support split window:
Security context is removed from V8, so does related API functions;
A global object can be detached from its context and reused by a new context;
Access checks on an object template can be turned on/off by default;
An object can turn on its access checks later;
V8 has a new object type, ApiGlobalObject, which is the outer window object type. The existing JSGlobalObject becomes the inner window object type. Security checks are moved from JSGlobalObject to ApiGlobalObject. ApiGlobalObject is the one exposed to JavaScript, it is accessible through Context::Global(). ApiGlobalObject's prototype is set to JSGlobalObject so that property lookups are forwarded to JSGlobalObject. ApiGlobalObject forwards all other property access requests to JSGlobalObject, such as SetProperty, DeleteProperty, etc.
Security token is moved to a global context, and ApiGlobalObject has a reference to its global context. JSGlobalObject has a reference to its global context as well. When accessing properties on a global object in JavaScript, the domain security check is performed by comparing the security token of the lexical context (Top::global_context()) to the token of global object's context. The check is only needed when the receiver is a window object, such as 'window.document'. Accessing global variables, such as 'var foo = 3; foo' does not need checks because the receiver is the inner window object.
When an outer window is detached from its global context (when a frame navigates away from a page), it is completely detached from the inner window. A new context is created for the new page, and the outer global object is reused. At this point, the access check on the DOMWindow wrapper of the old context is turned on. The code in old context is still able to access DOMWindow properties, but it has to go through domain security checks.
It is debatable on how to implement the outer window object. Currently each property access function has to check if the receiver is ApiGlobalObject type. This approach might be error-prone that one may forget to check the receiver when adding new functions. It is unlikely a performance issue because accessing global variables are more common than 'window.foo' style coding.
I am still working on the ARM port, and I'd like to hear comments and suggestions on the best way to support it in V8.
Review URL: http://codereview.chromium.org/7366
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@540 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2008-10-21 19:07:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check if both contexts are the same.
|
2013-04-05 12:06:34 +00:00
|
|
|
cmp(scratch1, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
|
2011-05-11 13:26:07 +00:00
|
|
|
j(equal, &same_contexts);
|
Split window support from V8.
Here is a description of the background and design of split window in Chrome and V8:
https://docs.google.com/a/google.com/Doc?id=chhjkpg_47fwddxbfr
This change list splits the window object into two parts: 1) an inner window object used as the global object of contexts; 2) an outer window object exposed to JavaScript and accessible by the name 'window'. Firefox did it awhile ago, here are some discussions: https://wiki.mozilla.org/Gecko:SplitWindow. One additional benefit of splitting window in Chrome is that accessing global variables don't need security checks anymore, it can improve applications that use many global variables.
V8 support of split window:
There are a small number of changes on V8 api to support split window:
Security context is removed from V8, so does related API functions;
A global object can be detached from its context and reused by a new context;
Access checks on an object template can be turned on/off by default;
An object can turn on its access checks later;
V8 has a new object type, ApiGlobalObject, which is the outer window object type. The existing JSGlobalObject becomes the inner window object type. Security checks are moved from JSGlobalObject to ApiGlobalObject. ApiGlobalObject is the one exposed to JavaScript, it is accessible through Context::Global(). ApiGlobalObject's prototype is set to JSGlobalObject so that property lookups are forwarded to JSGlobalObject. ApiGlobalObject forwards all other property access requests to JSGlobalObject, such as SetProperty, DeleteProperty, etc.
Security token is moved to a global context, and ApiGlobalObject has a reference to its global context. JSGlobalObject has a reference to its global context as well. When accessing properties on a global object in JavaScript, the domain security check is performed by comparing the security token of the lexical context (Top::global_context()) to the token of global object's context. The check is only needed when the receiver is a window object, such as 'window.document'. Accessing global variables, such as 'var foo = 3; foo' does not need checks because the receiver is the inner window object.
When an outer window is detached from its global context (when a frame navigates away from a page), it is completely detached from the inner window. A new context is created for the new page, and the outer global object is reused. At this point, the access check on the DOMWindow wrapper of the old context is turned on. The code in old context is still able to access DOMWindow properties, but it has to go through domain security checks.
It is debatable on how to implement the outer window object. Currently each property access function has to check if the receiver is ApiGlobalObject type. This approach might be error-prone that one may forget to check the receiver when adding new functions. It is unlikely a performance issue because accessing global variables are more common than 'window.foo' style coding.
I am still working on the ARM port, and I'd like to hear comments and suggestions on the best way to support it in V8.
Review URL: http://codereview.chromium.org/7366
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@540 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2008-10-21 19:07:58 +00:00
|
|
|
|
|
|
|
// Compare security tokens, save holder_reg on the stack so we can use it
|
|
|
|
// as a temporary register.
|
|
|
|
//
|
2008-07-03 15:10:15 +00:00
|
|
|
// Check that the security token in the calling global object is
|
|
|
|
// compatible with the security token in the receiving global
|
|
|
|
// object.
|
2013-04-05 12:06:34 +00:00
|
|
|
mov(scratch2,
|
2012-08-20 11:35:50 +00:00
|
|
|
FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
|
Split window support from V8.
Here is a description of the background and design of split window in Chrome and V8:
https://docs.google.com/a/google.com/Doc?id=chhjkpg_47fwddxbfr
This change list splits the window object into two parts: 1) an inner window object used as the global object of contexts; 2) an outer window object exposed to JavaScript and accessible by the name 'window'. Firefox did it awhile ago, here are some discussions: https://wiki.mozilla.org/Gecko:SplitWindow. One additional benefit of splitting window in Chrome is that accessing global variables don't need security checks anymore, it can improve applications that use many global variables.
V8 support of split window:
There are a small number of changes on V8 api to support split window:
Security context is removed from V8, so does related API functions;
A global object can be detached from its context and reused by a new context;
Access checks on an object template can be turned on/off by default;
An object can turn on its access checks later;
V8 has a new object type, ApiGlobalObject, which is the outer window object type. The existing JSGlobalObject becomes the inner window object type. Security checks are moved from JSGlobalObject to ApiGlobalObject. ApiGlobalObject is the one exposed to JavaScript, it is accessible through Context::Global(). ApiGlobalObject's prototype is set to JSGlobalObject so that property lookups are forwarded to JSGlobalObject. ApiGlobalObject forwards all other property access requests to JSGlobalObject, such as SetProperty, DeleteProperty, etc.
Security token is moved to a global context, and ApiGlobalObject has a reference to its global context. JSGlobalObject has a reference to its global context as well. When accessing properties on a global object in JavaScript, the domain security check is performed by comparing the security token of the lexical context (Top::global_context()) to the token of global object's context. The check is only needed when the receiver is a window object, such as 'window.document'. Accessing global variables, such as 'var foo = 3; foo' does not need checks because the receiver is the inner window object.
When an outer window is detached from its global context (when a frame navigates away from a page), it is completely detached from the inner window. A new context is created for the new page, and the outer global object is reused. At this point, the access check on the DOMWindow wrapper of the old context is turned on. The code in old context is still able to access DOMWindow properties, but it has to go through domain security checks.
It is debatable on how to implement the outer window object. Currently each property access function has to check if the receiver is ApiGlobalObject type. This approach might be error-prone that one may forget to check the receiver when adding new functions. It is unlikely a performance issue because accessing global variables are more common than 'window.foo' style coding.
I am still working on the ARM port, and I'd like to hear comments and suggestions on the best way to support it in V8.
Review URL: http://codereview.chromium.org/7366
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@540 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2008-10-21 19:07:58 +00:00
|
|
|
|
2012-08-17 09:03:08 +00:00
|
|
|
// Check the context is a native context.
|
2011-03-15 14:49:10 +00:00
|
|
|
if (emit_debug_code()) {
|
2013-04-05 12:06:34 +00:00
|
|
|
cmp(scratch2, isolate()->factory()->null_value());
|
2013-08-02 09:53:11 +00:00
|
|
|
Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
|
Split window support from V8.
Here is a description of the background and design of split window in Chrome and V8:
https://docs.google.com/a/google.com/Doc?id=chhjkpg_47fwddxbfr
This change list splits the window object into two parts: 1) an inner window object used as the global object of contexts; 2) an outer window object exposed to JavaScript and accessible by the name 'window'. Firefox did it awhile ago, here are some discussions: https://wiki.mozilla.org/Gecko:SplitWindow. One additional benefit of splitting window in Chrome is that accessing global variables don't need security checks anymore, it can improve applications that use many global variables.
V8 support of split window:
There are a small number of changes on V8 api to support split window:
Security context is removed from V8, so does related API functions;
A global object can be detached from its context and reused by a new context;
Access checks on an object template can be turned on/off by default;
An object can turn on its access checks later;
V8 has a new object type, ApiGlobalObject, which is the outer window object type. The existing JSGlobalObject becomes the inner window object type. Security checks are moved from JSGlobalObject to ApiGlobalObject. ApiGlobalObject is the one exposed to JavaScript, it is accessible through Context::Global(). ApiGlobalObject's prototype is set to JSGlobalObject so that property lookups are forwarded to JSGlobalObject. ApiGlobalObject forwards all other property access requests to JSGlobalObject, such as SetProperty, DeleteProperty, etc.
Security token is moved to a global context, and ApiGlobalObject has a reference to its global context. JSGlobalObject has a reference to its global context as well. When accessing properties on a global object in JavaScript, the domain security check is performed by comparing the security token of the lexical context (Top::global_context()) to the token of global object's context. The check is only needed when the receiver is a window object, such as 'window.document'. Accessing global variables, such as 'var foo = 3; foo' does not need checks because the receiver is the inner window object.
When an outer window is detached from its global context (when a frame navigates away from a page), it is completely detached from the inner window. A new context is created for the new page, and the outer global object is reused. At this point, the access check on the DOMWindow wrapper of the old context is turned on. The code in old context is still able to access DOMWindow properties, but it has to go through domain security checks.
It is debatable on how to implement the outer window object. Currently each property access function has to check if the receiver is ApiGlobalObject type. This approach might be error-prone that one may forget to check the receiver when adding new functions. It is unlikely a performance issue because accessing global variables are more common than 'window.foo' style coding.
I am still working on the ARM port, and I'd like to hear comments and suggestions on the best way to support it in V8.
Review URL: http://codereview.chromium.org/7366
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@540 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2008-10-21 19:07:58 +00:00
|
|
|
|
2012-08-17 09:03:08 +00:00
|
|
|
// Read the first word and compare to native_context_map(),
|
2013-04-05 12:06:34 +00:00
|
|
|
cmp(FieldOperand(scratch2, HeapObject::kMapOffset),
|
|
|
|
isolate()->factory()->native_context_map());
|
2013-08-02 09:53:11 +00:00
|
|
|
Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
|
Split window support from V8.
Here is a description of the background and design of split window in Chrome and V8:
https://docs.google.com/a/google.com/Doc?id=chhjkpg_47fwddxbfr
This change list splits the window object into two parts: 1) an inner window object used as the global object of contexts; 2) an outer window object exposed to JavaScript and accessible by the name 'window'. Firefox did it awhile ago, here are some discussions: https://wiki.mozilla.org/Gecko:SplitWindow. One additional benefit of splitting window in Chrome is that accessing global variables don't need security checks anymore, it can improve applications that use many global variables.
V8 support of split window:
There are a small number of changes on V8 api to support split window:
Security context is removed from V8, so does related API functions;
A global object can be detached from its context and reused by a new context;
Access checks on an object template can be turned on/off by default;
An object can turn on its access checks later;
V8 has a new object type, ApiGlobalObject, which is the outer window object type. The existing JSGlobalObject becomes the inner window object type. Security checks are moved from JSGlobalObject to ApiGlobalObject. ApiGlobalObject is the one exposed to JavaScript, it is accessible through Context::Global(). ApiGlobalObject's prototype is set to JSGlobalObject so that property lookups are forwarded to JSGlobalObject. ApiGlobalObject forwards all other property access requests to JSGlobalObject, such as SetProperty, DeleteProperty, etc.
Security token is moved to a global context, and ApiGlobalObject has a reference to its global context. JSGlobalObject has a reference to its global context as well. When accessing properties on a global object in JavaScript, the domain security check is performed by comparing the security token of the lexical context (Top::global_context()) to the token of global object's context. The check is only needed when the receiver is a window object, such as 'window.document'. Accessing global variables, such as 'var foo = 3; foo' does not need checks because the receiver is the inner window object.
When an outer window is detached from its global context (when a frame navigates away from a page), it is completely detached from the inner window. A new context is created for the new page, and the outer global object is reused. At this point, the access check on the DOMWindow wrapper of the old context is turned on. The code in old context is still able to access DOMWindow properties, but it has to go through domain security checks.
It is debatable on how to implement the outer window object. Currently each property access function has to check if the receiver is ApiGlobalObject type. This approach might be error-prone that one may forget to check the receiver when adding new functions. It is unlikely a performance issue because accessing global variables are more common than 'window.foo' style coding.
I am still working on the ARM port, and I'd like to hear comments and suggestions on the best way to support it in V8.
Review URL: http://codereview.chromium.org/7366
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@540 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2008-10-21 19:07:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int token_offset = Context::kHeaderSize +
|
|
|
|
Context::SECURITY_TOKEN_INDEX * kPointerSize;
|
2013-04-05 12:06:34 +00:00
|
|
|
mov(scratch1, FieldOperand(scratch1, token_offset));
|
|
|
|
cmp(scratch1, FieldOperand(scratch2, token_offset));
|
2011-05-11 13:26:07 +00:00
|
|
|
j(not_equal, miss);
|
Split window support from V8.
Here is a description of the background and design of split window in Chrome and V8:
https://docs.google.com/a/google.com/Doc?id=chhjkpg_47fwddxbfr
This change list splits the window object into two parts: 1) an inner window object used as the global object of contexts; 2) an outer window object exposed to JavaScript and accessible by the name 'window'. Firefox did it awhile ago, here are some discussions: https://wiki.mozilla.org/Gecko:SplitWindow. One additional benefit of splitting window in Chrome is that accessing global variables don't need security checks anymore, it can improve applications that use many global variables.
V8 support of split window:
There are a small number of changes on V8 api to support split window:
Security context is removed from V8, so does related API functions;
A global object can be detached from its context and reused by a new context;
Access checks on an object template can be turned on/off by default;
An object can turn on its access checks later;
V8 has a new object type, ApiGlobalObject, which is the outer window object type. The existing JSGlobalObject becomes the inner window object type. Security checks are moved from JSGlobalObject to ApiGlobalObject. ApiGlobalObject is the one exposed to JavaScript, it is accessible through Context::Global(). ApiGlobalObject's prototype is set to JSGlobalObject so that property lookups are forwarded to JSGlobalObject. ApiGlobalObject forwards all other property access requests to JSGlobalObject, such as SetProperty, DeleteProperty, etc.
Security token is moved to a global context, and ApiGlobalObject has a reference to its global context. JSGlobalObject has a reference to its global context as well. When accessing properties on a global object in JavaScript, the domain security check is performed by comparing the security token of the lexical context (Top::global_context()) to the token of global object's context. The check is only needed when the receiver is a window object, such as 'window.document'. Accessing global variables, such as 'var foo = 3; foo' does not need checks because the receiver is the inner window object.
When an outer window is detached from its global context (when a frame navigates away from a page), it is completely detached from the inner window. A new context is created for the new page, and the outer global object is reused. At this point, the access check on the DOMWindow wrapper of the old context is turned on. The code in old context is still able to access DOMWindow properties, but it has to go through domain security checks.
It is debatable on how to implement the outer window object. Currently each property access function has to check if the receiver is ApiGlobalObject type. This approach might be error-prone that one may forget to check the receiver when adding new functions. It is unlikely a performance issue because accessing global variables are more common than 'window.foo' style coding.
I am still working on the ARM port, and I'd like to hear comments and suggestions on the best way to support it in V8.
Review URL: http://codereview.chromium.org/7366
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@540 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2008-10-21 19:07:58 +00:00
|
|
|
|
|
|
|
bind(&same_contexts);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-01-10 12:58:41 +00:00
|
|
|
// Compute the hash code from the untagged key. This must be kept in sync
|
|
|
|
// with ComputeIntegerHash in utils.h.
|
|
|
|
//
|
|
|
|
// Note: r0 will contain hash code
|
|
|
|
void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
|
|
|
|
// Xor original key with a seed.
|
|
|
|
if (Serializer::enabled()) {
|
|
|
|
ExternalReference roots_array_start =
|
|
|
|
ExternalReference::roots_array_start(isolate());
|
2012-01-10 13:24:18 +00:00
|
|
|
mov(scratch, Immediate(Heap::kHashSeedRootIndex));
|
2012-01-13 09:38:20 +00:00
|
|
|
mov(scratch,
|
|
|
|
Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
|
|
|
|
SmiUntag(scratch);
|
|
|
|
xor_(r0, scratch);
|
2012-01-10 12:58:41 +00:00
|
|
|
} else {
|
2012-01-10 13:24:18 +00:00
|
|
|
int32_t seed = isolate()->heap()->HashSeed();
|
2012-01-10 12:58:41 +00:00
|
|
|
xor_(r0, Immediate(seed));
|
|
|
|
}
|
|
|
|
|
|
|
|
// hash = ~hash + (hash << 15);
|
|
|
|
mov(scratch, r0);
|
|
|
|
not_(r0);
|
|
|
|
shl(scratch, 15);
|
|
|
|
add(r0, scratch);
|
|
|
|
// hash = hash ^ (hash >> 12);
|
|
|
|
mov(scratch, r0);
|
|
|
|
shr(scratch, 12);
|
|
|
|
xor_(r0, scratch);
|
|
|
|
// hash = hash + (hash << 2);
|
|
|
|
lea(r0, Operand(r0, r0, times_4, 0));
|
|
|
|
// hash = hash ^ (hash >> 4);
|
|
|
|
mov(scratch, r0);
|
|
|
|
shr(scratch, 4);
|
|
|
|
xor_(r0, scratch);
|
|
|
|
// hash = hash * 2057;
|
|
|
|
imul(r0, r0, 2057);
|
|
|
|
// hash = hash ^ (hash >> 16);
|
|
|
|
mov(scratch, r0);
|
|
|
|
shr(scratch, 16);
|
|
|
|
xor_(r0, scratch);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
2011-07-08 10:46:10 +00:00
|
|
|
void MacroAssembler::LoadFromNumberDictionary(Label* miss,
|
|
|
|
Register elements,
|
|
|
|
Register key,
|
|
|
|
Register r0,
|
|
|
|
Register r1,
|
|
|
|
Register r2,
|
|
|
|
Register result) {
|
|
|
|
// Register use:
|
|
|
|
//
|
|
|
|
// elements - holds the slow-case elements of the receiver and is unchanged.
|
|
|
|
//
|
|
|
|
// key - holds the smi key on entry and is unchanged.
|
|
|
|
//
|
|
|
|
// Scratch registers:
|
|
|
|
//
|
|
|
|
// r0 - holds the untagged key on entry and holds the hash once computed.
|
|
|
|
//
|
|
|
|
// r1 - used to hold the capacity mask of the dictionary
|
|
|
|
//
|
|
|
|
// r2 - used for the index into the dictionary.
|
|
|
|
//
|
|
|
|
// result - holds the result on exit if the load succeeds and we fall through.
|
|
|
|
|
|
|
|
Label done;
|
|
|
|
|
2012-01-10 12:58:41 +00:00
|
|
|
GetNumberHash(r0, r1);
|
2011-07-08 10:46:10 +00:00
|
|
|
|
|
|
|
// Compute capacity mask.
|
2012-01-16 09:44:35 +00:00
|
|
|
mov(r1, FieldOperand(elements, SeededNumberDictionary::kCapacityOffset));
|
2011-07-08 10:46:10 +00:00
|
|
|
shr(r1, kSmiTagSize); // convert smi to int
|
|
|
|
dec(r1);
|
|
|
|
|
|
|
|
// Generate an unrolled loop that performs a few probes before giving up.
|
|
|
|
const int kProbes = 4;
|
|
|
|
for (int i = 0; i < kProbes; i++) {
|
|
|
|
// Use r2 for index calculations and keep the hash intact in r0.
|
|
|
|
mov(r2, r0);
|
|
|
|
// Compute the masked index: (hash + i + i * i) & mask.
|
|
|
|
if (i > 0) {
|
2012-01-16 09:44:35 +00:00
|
|
|
add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
|
2011-07-08 10:46:10 +00:00
|
|
|
}
|
2011-10-03 11:44:39 +00:00
|
|
|
and_(r2, r1);
|
2011-07-08 10:46:10 +00:00
|
|
|
|
|
|
|
// Scale the index by multiplying by the entry size.
|
2012-01-16 09:44:35 +00:00
|
|
|
ASSERT(SeededNumberDictionary::kEntrySize == 3);
|
2011-07-08 10:46:10 +00:00
|
|
|
lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
|
|
|
|
|
|
|
|
// Check if the key matches.
|
|
|
|
cmp(key, FieldOperand(elements,
|
|
|
|
r2,
|
|
|
|
times_pointer_size,
|
2012-01-16 09:44:35 +00:00
|
|
|
SeededNumberDictionary::kElementsStartOffset));
|
2011-07-08 10:46:10 +00:00
|
|
|
if (i != (kProbes - 1)) {
|
|
|
|
j(equal, &done);
|
|
|
|
} else {
|
|
|
|
j(not_equal, miss);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bind(&done);
|
|
|
|
// Check that the value is a normal propety.
|
|
|
|
const int kDetailsOffset =
|
2012-01-16 09:44:35 +00:00
|
|
|
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
|
2011-07-08 10:46:10 +00:00
|
|
|
ASSERT_EQ(NORMAL, 0);
|
|
|
|
test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
|
2011-09-12 10:50:50 +00:00
|
|
|
Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
|
2011-07-08 10:46:10 +00:00
|
|
|
j(not_zero, miss);
|
|
|
|
|
|
|
|
// Get the value at the masked, scaled index.
|
|
|
|
const int kValueOffset =
|
2012-01-16 09:44:35 +00:00
|
|
|
SeededNumberDictionary::kElementsStartOffset + kPointerSize;
|
2011-07-08 10:46:10 +00:00
|
|
|
mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-09-07 09:44:29 +00:00
|
|
|
void MacroAssembler::LoadAllocationTopHelper(Register result,
|
|
|
|
Register scratch,
|
|
|
|
AllocationFlags flags) {
|
2013-03-14 08:32:52 +00:00
|
|
|
ExternalReference allocation_top =
|
|
|
|
AllocationUtils::GetAllocationTopReference(isolate(), flags);
|
2009-08-27 09:08:16 +00:00
|
|
|
|
|
|
|
// Just return if allocation top is already known.
|
2009-09-07 09:44:29 +00:00
|
|
|
if ((flags & RESULT_CONTAINS_TOP) != 0) {
|
2009-08-27 09:08:16 +00:00
|
|
|
// No use of scratch if allocation top is provided.
|
|
|
|
ASSERT(scratch.is(no_reg));
|
2009-09-07 12:24:10 +00:00
|
|
|
#ifdef DEBUG
|
|
|
|
// Assert that result actually contains top on entry.
|
2013-03-14 08:32:52 +00:00
|
|
|
cmp(result, Operand::StaticVariable(allocation_top));
|
2013-08-02 09:53:11 +00:00
|
|
|
Check(equal, kUnexpectedAllocationTop);
|
2009-09-07 12:24:10 +00:00
|
|
|
#endif
|
2009-08-27 09:08:16 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Move address of new object to result. Use scratch register if available.
|
|
|
|
if (scratch.is(no_reg)) {
|
2013-03-14 08:32:52 +00:00
|
|
|
mov(result, Operand::StaticVariable(allocation_top));
|
2009-08-27 09:08:16 +00:00
|
|
|
} else {
|
2013-03-14 08:32:52 +00:00
|
|
|
mov(scratch, Immediate(allocation_top));
|
2009-08-27 09:08:16 +00:00
|
|
|
mov(result, Operand(scratch, 0));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
|
2013-03-14 08:32:52 +00:00
|
|
|
Register scratch,
|
|
|
|
AllocationFlags flags) {
|
2011-03-15 14:49:10 +00:00
|
|
|
if (emit_debug_code()) {
|
2009-12-04 07:43:40 +00:00
|
|
|
test(result_end, Immediate(kObjectAlignmentMask));
|
2013-08-02 09:53:11 +00:00
|
|
|
Check(zero, kUnalignedAllocationInNewSpace);
|
2009-12-04 07:43:40 +00:00
|
|
|
}
|
|
|
|
|
2013-03-14 08:32:52 +00:00
|
|
|
ExternalReference allocation_top =
|
|
|
|
AllocationUtils::GetAllocationTopReference(isolate(), flags);
|
2009-08-27 09:08:16 +00:00
|
|
|
|
|
|
|
// Update new top. Use scratch if available.
|
|
|
|
if (scratch.is(no_reg)) {
|
2013-03-14 08:32:52 +00:00
|
|
|
mov(Operand::StaticVariable(allocation_top), result_end);
|
2009-08-27 09:08:16 +00:00
|
|
|
} else {
|
|
|
|
mov(Operand(scratch, 0), result_end);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-09-07 09:44:29 +00:00
|
|
|
|
2013-03-14 08:32:52 +00:00
|
|
|
void MacroAssembler::Allocate(int object_size,
|
|
|
|
Register result,
|
|
|
|
Register result_end,
|
|
|
|
Register scratch,
|
|
|
|
Label* gc_required,
|
|
|
|
AllocationFlags flags) {
|
2012-12-28 11:09:16 +00:00
|
|
|
ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
|
2013-07-23 20:01:38 +00:00
|
|
|
ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
|
2010-10-21 13:15:12 +00:00
|
|
|
if (!FLAG_inline_new) {
|
2011-03-15 14:49:10 +00:00
|
|
|
if (emit_debug_code()) {
|
2010-10-21 13:15:12 +00:00
|
|
|
// Trash the registers to simulate an allocation failure.
|
|
|
|
mov(result, Immediate(0x7091));
|
|
|
|
if (result_end.is_valid()) {
|
|
|
|
mov(result_end, Immediate(0x7191));
|
|
|
|
}
|
|
|
|
if (scratch.is_valid()) {
|
|
|
|
mov(scratch, Immediate(0x7291));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
jmp(gc_required);
|
|
|
|
return;
|
|
|
|
}
|
2009-08-27 09:08:16 +00:00
|
|
|
ASSERT(!result.is(result_end));
|
|
|
|
|
|
|
|
// Load address of new object into result.
|
2010-11-24 06:26:36 +00:00
|
|
|
LoadAllocationTopHelper(result, scratch, flags);
|
2009-08-27 09:08:16 +00:00
|
|
|
|
2013-08-19 17:46:43 +00:00
|
|
|
ExternalReference allocation_limit =
|
|
|
|
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
|
|
|
|
|
2012-12-28 11:09:16 +00:00
|
|
|
// Align the next allocation. Storing the filler map without checking top is
|
2013-08-19 17:46:43 +00:00
|
|
|
// safe in new-space because the limit of the heap is aligned there.
|
2012-12-28 11:09:16 +00:00
|
|
|
if ((flags & DOUBLE_ALIGNMENT) != 0) {
|
2013-03-14 08:32:52 +00:00
|
|
|
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
|
2012-12-28 11:09:16 +00:00
|
|
|
ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
|
|
|
|
Label aligned;
|
|
|
|
test(result, Immediate(kDoubleAlignmentMask));
|
|
|
|
j(zero, &aligned, Label::kNear);
|
2013-08-19 17:46:43 +00:00
|
|
|
if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
|
|
|
|
cmp(result, Operand::StaticVariable(allocation_limit));
|
|
|
|
j(above_equal, gc_required);
|
|
|
|
}
|
2012-12-28 11:09:16 +00:00
|
|
|
mov(Operand(result, 0),
|
|
|
|
Immediate(isolate()->factory()->one_pointer_filler_map()));
|
|
|
|
add(result, Immediate(kDoubleSize / 2));
|
|
|
|
bind(&aligned);
|
|
|
|
}
|
|
|
|
|
2013-03-14 08:32:52 +00:00
|
|
|
// Calculate new top and bail out if space is exhausted.
|
2013-08-19 17:46:43 +00:00
|
|
|
Register top_reg = result_end.is_valid() ? result_end : result;
|
2011-02-08 17:25:40 +00:00
|
|
|
if (!top_reg.is(result)) {
|
|
|
|
mov(top_reg, result);
|
2009-09-07 09:44:29 +00:00
|
|
|
}
|
2011-10-03 11:44:39 +00:00
|
|
|
add(top_reg, Immediate(object_size));
|
2011-05-11 13:26:07 +00:00
|
|
|
j(carry, gc_required);
|
2013-03-14 08:32:52 +00:00
|
|
|
cmp(top_reg, Operand::StaticVariable(allocation_limit));
|
2011-05-11 13:26:07 +00:00
|
|
|
j(above, gc_required);
|
2009-12-23 13:27:58 +00:00
|
|
|
|
|
|
|
// Update allocation top.
|
2013-03-14 08:32:52 +00:00
|
|
|
UpdateAllocationTopHelper(top_reg, scratch, flags);
|
2010-08-06 13:04:27 +00:00
|
|
|
|
|
|
|
// Tag result if requested.
|
2012-12-28 11:09:16 +00:00
|
|
|
bool tag_result = (flags & TAG_OBJECT) != 0;
|
2010-08-06 13:04:27 +00:00
|
|
|
if (top_reg.is(result)) {
|
2012-12-28 11:09:16 +00:00
|
|
|
if (tag_result) {
|
2011-10-03 11:44:39 +00:00
|
|
|
sub(result, Immediate(object_size - kHeapObjectTag));
|
2010-08-06 13:04:27 +00:00
|
|
|
} else {
|
2011-10-03 11:44:39 +00:00
|
|
|
sub(result, Immediate(object_size));
|
2010-08-06 13:04:27 +00:00
|
|
|
}
|
2012-12-28 11:09:16 +00:00
|
|
|
} else if (tag_result) {
|
|
|
|
ASSERT(kHeapObjectTag == 1);
|
|
|
|
inc(result);
|
2010-08-06 13:04:27 +00:00
|
|
|
}
|
2009-08-27 09:08:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-03-25 15:54:15 +00:00
|
|
|
void MacroAssembler::Allocate(int header_size,
|
|
|
|
ScaleFactor element_size,
|
|
|
|
Register element_count,
|
|
|
|
RegisterValueType element_count_type,
|
|
|
|
Register result,
|
|
|
|
Register result_end,
|
|
|
|
Register scratch,
|
|
|
|
Label* gc_required,
|
|
|
|
AllocationFlags flags) {
|
2012-12-28 11:09:16 +00:00
|
|
|
ASSERT((flags & SIZE_IN_WORDS) == 0);
|
2010-10-21 13:15:12 +00:00
|
|
|
if (!FLAG_inline_new) {
|
2011-03-15 14:49:10 +00:00
|
|
|
if (emit_debug_code()) {
|
2010-10-21 13:15:12 +00:00
|
|
|
// Trash the registers to simulate an allocation failure.
|
|
|
|
mov(result, Immediate(0x7091));
|
|
|
|
mov(result_end, Immediate(0x7191));
|
|
|
|
if (scratch.is_valid()) {
|
|
|
|
mov(scratch, Immediate(0x7291));
|
|
|
|
}
|
|
|
|
// Register element_count is not modified by the function.
|
|
|
|
}
|
|
|
|
jmp(gc_required);
|
|
|
|
return;
|
|
|
|
}
|
2009-08-27 09:08:16 +00:00
|
|
|
ASSERT(!result.is(result_end));
|
|
|
|
|
|
|
|
// Load address of new object into result.
|
2010-11-24 06:26:36 +00:00
|
|
|
LoadAllocationTopHelper(result, scratch, flags);
|
2009-08-27 09:08:16 +00:00
|
|
|
|
2013-08-19 17:46:43 +00:00
|
|
|
ExternalReference allocation_limit =
|
|
|
|
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
|
|
|
|
|
2012-12-28 11:09:16 +00:00
|
|
|
// Align the next allocation. Storing the filler map without checking top is
|
2013-08-19 17:46:43 +00:00
|
|
|
// safe in new-space because the limit of the heap is aligned there.
|
2012-12-28 11:09:16 +00:00
|
|
|
if ((flags & DOUBLE_ALIGNMENT) != 0) {
|
2013-03-25 15:54:15 +00:00
|
|
|
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
|
2012-12-28 11:09:16 +00:00
|
|
|
ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
|
|
|
|
Label aligned;
|
|
|
|
test(result, Immediate(kDoubleAlignmentMask));
|
|
|
|
j(zero, &aligned, Label::kNear);
|
2013-08-19 17:46:43 +00:00
|
|
|
if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
|
|
|
|
cmp(result, Operand::StaticVariable(allocation_limit));
|
|
|
|
j(above_equal, gc_required);
|
|
|
|
}
|
2012-12-28 11:09:16 +00:00
|
|
|
mov(Operand(result, 0),
|
|
|
|
Immediate(isolate()->factory()->one_pointer_filler_map()));
|
|
|
|
add(result, Immediate(kDoubleSize / 2));
|
|
|
|
bind(&aligned);
|
|
|
|
}
|
|
|
|
|
2013-03-25 15:54:15 +00:00
|
|
|
// Calculate new top and bail out if space is exhausted.
|
2011-02-08 17:25:40 +00:00
|
|
|
// We assume that element_count*element_size + header_size does not
|
|
|
|
// overflow.
|
2012-12-28 11:09:16 +00:00
|
|
|
if (element_count_type == REGISTER_VALUE_IS_SMI) {
|
|
|
|
STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1);
|
|
|
|
STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2);
|
|
|
|
STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4);
|
|
|
|
ASSERT(element_size >= times_2);
|
|
|
|
ASSERT(kSmiTagSize == 1);
|
|
|
|
element_size = static_cast<ScaleFactor>(element_size - 1);
|
|
|
|
} else {
|
|
|
|
ASSERT(element_count_type == REGISTER_VALUE_IS_INT32);
|
|
|
|
}
|
2011-02-08 17:25:40 +00:00
|
|
|
lea(result_end, Operand(element_count, element_size, header_size));
|
2011-10-03 11:44:39 +00:00
|
|
|
add(result_end, result);
|
2011-02-08 17:25:40 +00:00
|
|
|
j(carry, gc_required);
|
2013-03-25 15:54:15 +00:00
|
|
|
cmp(result_end, Operand::StaticVariable(allocation_limit));
|
2009-08-27 09:08:16 +00:00
|
|
|
j(above, gc_required);
|
|
|
|
|
2009-09-07 09:44:29 +00:00
|
|
|
if ((flags & TAG_OBJECT) != 0) {
|
2012-12-28 11:09:16 +00:00
|
|
|
ASSERT(kHeapObjectTag == 1);
|
|
|
|
inc(result);
|
2009-09-07 09:44:29 +00:00
|
|
|
}
|
2009-12-23 13:27:58 +00:00
|
|
|
|
|
|
|
// Update allocation top.
|
2013-03-14 08:32:52 +00:00
|
|
|
UpdateAllocationTopHelper(result_end, scratch, flags);
|
2009-08-27 09:08:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-03-25 15:54:15 +00:00
|
|
|
void MacroAssembler::Allocate(Register object_size,
|
|
|
|
Register result,
|
|
|
|
Register result_end,
|
|
|
|
Register scratch,
|
|
|
|
Label* gc_required,
|
|
|
|
AllocationFlags flags) {
|
2013-02-04 12:01:59 +00:00
|
|
|
ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
|
2010-10-21 13:15:12 +00:00
|
|
|
if (!FLAG_inline_new) {
|
2011-03-15 14:49:10 +00:00
|
|
|
if (emit_debug_code()) {
|
2010-10-21 13:15:12 +00:00
|
|
|
// Trash the registers to simulate an allocation failure.
|
|
|
|
mov(result, Immediate(0x7091));
|
|
|
|
mov(result_end, Immediate(0x7191));
|
|
|
|
if (scratch.is_valid()) {
|
|
|
|
mov(scratch, Immediate(0x7291));
|
|
|
|
}
|
|
|
|
// object_size is left unchanged by this function.
|
|
|
|
}
|
|
|
|
jmp(gc_required);
|
|
|
|
return;
|
|
|
|
}
|
2009-08-27 09:08:16 +00:00
|
|
|
ASSERT(!result.is(result_end));
|
|
|
|
|
|
|
|
// Load address of new object into result.
|
2010-11-24 06:26:36 +00:00
|
|
|
LoadAllocationTopHelper(result, scratch, flags);
|
2009-08-27 09:08:16 +00:00
|
|
|
|
2013-08-19 17:46:43 +00:00
|
|
|
ExternalReference allocation_limit =
|
|
|
|
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
|
|
|
|
|
2013-02-04 12:01:59 +00:00
|
|
|
// Align the next allocation. Storing the filler map without checking top is
|
2013-08-19 17:46:43 +00:00
|
|
|
// safe in new-space because the limit of the heap is aligned there.
|
2013-02-04 12:01:59 +00:00
|
|
|
if ((flags & DOUBLE_ALIGNMENT) != 0) {
|
2013-03-25 15:54:15 +00:00
|
|
|
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
|
2013-02-04 12:01:59 +00:00
|
|
|
ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
|
|
|
|
Label aligned;
|
|
|
|
test(result, Immediate(kDoubleAlignmentMask));
|
|
|
|
j(zero, &aligned, Label::kNear);
|
2013-08-19 17:46:43 +00:00
|
|
|
if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
|
|
|
|
cmp(result, Operand::StaticVariable(allocation_limit));
|
|
|
|
j(above_equal, gc_required);
|
|
|
|
}
|
2013-02-04 12:01:59 +00:00
|
|
|
mov(Operand(result, 0),
|
|
|
|
Immediate(isolate()->factory()->one_pointer_filler_map()));
|
|
|
|
add(result, Immediate(kDoubleSize / 2));
|
|
|
|
bind(&aligned);
|
|
|
|
}
|
|
|
|
|
2013-03-25 15:54:15 +00:00
|
|
|
// Calculate new top and bail out if space is exhausted.
|
2009-08-27 09:08:16 +00:00
|
|
|
if (!object_size.is(result_end)) {
|
|
|
|
mov(result_end, object_size);
|
|
|
|
}
|
2011-10-03 11:44:39 +00:00
|
|
|
add(result_end, result);
|
2011-05-11 13:26:07 +00:00
|
|
|
j(carry, gc_required);
|
2013-03-25 15:54:15 +00:00
|
|
|
cmp(result_end, Operand::StaticVariable(allocation_limit));
|
2011-05-11 13:26:07 +00:00
|
|
|
j(above, gc_required);
|
2009-08-27 09:08:16 +00:00
|
|
|
|
2009-09-07 09:44:29 +00:00
|
|
|
// Tag result if requested.
|
|
|
|
if ((flags & TAG_OBJECT) != 0) {
|
2013-02-04 12:01:59 +00:00
|
|
|
ASSERT(kHeapObjectTag == 1);
|
|
|
|
inc(result);
|
2009-09-07 09:44:29 +00:00
|
|
|
}
|
2009-12-23 13:27:58 +00:00
|
|
|
|
|
|
|
// Update allocation top.
|
2013-03-14 08:32:52 +00:00
|
|
|
UpdateAllocationTopHelper(result_end, scratch, flags);
|
2009-08-27 09:08:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::UndoAllocationInNewSpace(Register object) {
|
|
|
|
ExternalReference new_space_allocation_top =
|
2011-03-22 13:20:04 +00:00
|
|
|
ExternalReference::new_space_allocation_top_address(isolate());
|
2009-08-27 09:08:16 +00:00
|
|
|
|
|
|
|
// Make sure the object has no tag before resetting top.
|
2011-10-03 11:44:39 +00:00
|
|
|
and_(object, Immediate(~kHeapObjectTagMask));
|
2009-08-27 09:08:16 +00:00
|
|
|
#ifdef DEBUG
|
|
|
|
cmp(object, Operand::StaticVariable(new_space_allocation_top));
|
2013-08-02 09:53:11 +00:00
|
|
|
Check(below, kUndoAllocationOfNonAllocatedMemory);
|
2009-08-27 09:08:16 +00:00
|
|
|
#endif
|
|
|
|
mov(Operand::StaticVariable(new_space_allocation_top), object);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-10-20 15:26:17 +00:00
|
|
|
void MacroAssembler::AllocateHeapNumber(Register result,
|
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
|
|
|
Label* gc_required) {
|
|
|
|
// Allocate heap number in new space.
|
2013-03-14 08:32:52 +00:00
|
|
|
Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
|
|
|
|
TAG_OBJECT);
|
2009-10-20 15:26:17 +00:00
|
|
|
|
|
|
|
// Set the map.
|
|
|
|
mov(FieldOperand(result, HeapObject::kMapOffset),
|
2011-03-25 13:21:30 +00:00
|
|
|
Immediate(isolate()->factory()->heap_number_map()));
|
2009-10-20 15:26:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-12-03 07:56:21 +00:00
|
|
|
void MacroAssembler::AllocateTwoByteString(Register result,
|
|
|
|
Register length,
|
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
|
|
|
Register scratch3,
|
|
|
|
Label* gc_required) {
|
2009-12-04 07:43:40 +00:00
|
|
|
// Calculate the number of bytes needed for the characters in the string while
|
|
|
|
// observing object alignment.
|
|
|
|
ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
|
|
|
|
ASSERT(kShortSize == 2);
|
2009-12-09 09:35:41 +00:00
|
|
|
// scratch1 = length * 2 + kObjectAlignmentMask.
|
|
|
|
lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
|
2011-10-03 11:44:39 +00:00
|
|
|
and_(scratch1, Immediate(~kObjectAlignmentMask));
|
2009-12-03 07:56:21 +00:00
|
|
|
|
|
|
|
// Allocate two byte string in new space.
|
2013-03-25 15:54:15 +00:00
|
|
|
Allocate(SeqTwoByteString::kHeaderSize,
|
|
|
|
times_1,
|
|
|
|
scratch1,
|
|
|
|
REGISTER_VALUE_IS_INT32,
|
|
|
|
result,
|
|
|
|
scratch2,
|
|
|
|
scratch3,
|
|
|
|
gc_required,
|
|
|
|
TAG_OBJECT);
|
2009-12-03 07:56:21 +00:00
|
|
|
|
|
|
|
// Set the map, length and hash field.
|
|
|
|
mov(FieldOperand(result, HeapObject::kMapOffset),
|
2011-03-25 13:21:30 +00:00
|
|
|
Immediate(isolate()->factory()->string_map()));
|
2010-05-04 14:49:50 +00:00
|
|
|
mov(scratch1, length);
|
|
|
|
SmiTag(scratch1);
|
|
|
|
mov(FieldOperand(result, String::kLengthOffset), scratch1);
|
2009-12-03 07:56:21 +00:00
|
|
|
mov(FieldOperand(result, String::kHashFieldOffset),
|
|
|
|
Immediate(String::kEmptyHashField));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::AllocateAsciiString(Register result,
|
|
|
|
Register length,
|
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
|
|
|
Register scratch3,
|
|
|
|
Label* gc_required) {
|
2009-12-04 07:43:40 +00:00
|
|
|
// Calculate the number of bytes needed for the characters in the string while
|
|
|
|
// observing object alignment.
|
2012-11-15 13:31:27 +00:00
|
|
|
ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
|
2009-12-03 07:56:21 +00:00
|
|
|
mov(scratch1, length);
|
2009-12-04 07:43:40 +00:00
|
|
|
ASSERT(kCharSize == 1);
|
2011-10-03 11:44:39 +00:00
|
|
|
add(scratch1, Immediate(kObjectAlignmentMask));
|
|
|
|
and_(scratch1, Immediate(~kObjectAlignmentMask));
|
2009-12-03 07:56:21 +00:00
|
|
|
|
2012-01-16 12:38:59 +00:00
|
|
|
// Allocate ASCII string in new space.
|
2013-03-25 15:54:15 +00:00
|
|
|
Allocate(SeqOneByteString::kHeaderSize,
|
|
|
|
times_1,
|
|
|
|
scratch1,
|
|
|
|
REGISTER_VALUE_IS_INT32,
|
|
|
|
result,
|
|
|
|
scratch2,
|
|
|
|
scratch3,
|
|
|
|
gc_required,
|
|
|
|
TAG_OBJECT);
|
2009-12-03 07:56:21 +00:00
|
|
|
|
|
|
|
// Set the map, length and hash field.
|
|
|
|
mov(FieldOperand(result, HeapObject::kMapOffset),
|
2011-03-25 13:21:30 +00:00
|
|
|
Immediate(isolate()->factory()->ascii_string_map()));
|
2010-05-04 14:49:50 +00:00
|
|
|
mov(scratch1, length);
|
|
|
|
SmiTag(scratch1);
|
|
|
|
mov(FieldOperand(result, String::kLengthOffset), scratch1);
|
2009-12-03 07:56:21 +00:00
|
|
|
mov(FieldOperand(result, String::kHashFieldOffset),
|
|
|
|
Immediate(String::kEmptyHashField));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-09-01 17:20:36 +00:00
|
|
|
void MacroAssembler::AllocateAsciiString(Register result,
|
|
|
|
int length,
|
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
|
|
|
Label* gc_required) {
|
|
|
|
ASSERT(length > 0);
|
|
|
|
|
2012-01-16 12:38:59 +00:00
|
|
|
// Allocate ASCII string in new space.
|
2013-03-14 08:32:52 +00:00
|
|
|
Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2,
|
|
|
|
gc_required, TAG_OBJECT);
|
2010-09-01 17:20:36 +00:00
|
|
|
|
|
|
|
// Set the map, length and hash field.
|
|
|
|
mov(FieldOperand(result, HeapObject::kMapOffset),
|
2011-03-25 13:21:30 +00:00
|
|
|
Immediate(isolate()->factory()->ascii_string_map()));
|
2010-09-01 17:20:36 +00:00
|
|
|
mov(FieldOperand(result, String::kLengthOffset),
|
|
|
|
Immediate(Smi::FromInt(length)));
|
|
|
|
mov(FieldOperand(result, String::kHashFieldOffset),
|
|
|
|
Immediate(String::kEmptyHashField));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-01 15:24:26 +00:00
|
|
|
void MacroAssembler::AllocateTwoByteConsString(Register result,
|
2009-12-03 07:56:21 +00:00
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
|
|
|
Label* gc_required) {
|
|
|
|
// Allocate heap number in new space.
|
2013-03-14 08:32:52 +00:00
|
|
|
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
|
|
|
|
TAG_OBJECT);
|
2009-12-03 07:56:21 +00:00
|
|
|
|
|
|
|
// Set the map. The other fields are left uninitialized.
|
|
|
|
mov(FieldOperand(result, HeapObject::kMapOffset),
|
2011-03-25 13:21:30 +00:00
|
|
|
Immediate(isolate()->factory()->cons_string_map()));
|
2009-12-03 07:56:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::AllocateAsciiConsString(Register result,
|
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
|
|
|
Label* gc_required) {
|
2013-05-03 10:36:16 +00:00
|
|
|
Label allocate_new_space, install_map;
|
|
|
|
AllocationFlags flags = TAG_OBJECT;
|
|
|
|
|
|
|
|
ExternalReference high_promotion_mode = ExternalReference::
|
|
|
|
new_space_high_promotion_mode_active_address(isolate());
|
|
|
|
|
|
|
|
test(Operand::StaticVariable(high_promotion_mode), Immediate(1));
|
|
|
|
j(zero, &allocate_new_space);
|
|
|
|
|
|
|
|
Allocate(ConsString::kSize,
|
|
|
|
result,
|
|
|
|
scratch1,
|
|
|
|
scratch2,
|
|
|
|
gc_required,
|
|
|
|
static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
|
|
|
|
jmp(&install_map);
|
|
|
|
|
|
|
|
bind(&allocate_new_space);
|
|
|
|
Allocate(ConsString::kSize,
|
|
|
|
result,
|
|
|
|
scratch1,
|
|
|
|
scratch2,
|
|
|
|
gc_required,
|
|
|
|
flags);
|
2009-12-03 07:56:21 +00:00
|
|
|
|
2013-05-03 10:36:16 +00:00
|
|
|
bind(&install_map);
|
2009-12-03 07:56:21 +00:00
|
|
|
// Set the map. The other fields are left uninitialized.
|
|
|
|
mov(FieldOperand(result, HeapObject::kMapOffset),
|
2011-03-25 13:21:30 +00:00
|
|
|
Immediate(isolate()->factory()->cons_ascii_string_map()));
|
2009-12-03 07:56:21 +00:00
|
|
|
}
|
|
|
|
|
2011-01-14 10:57:49 +00:00
|
|
|
|
2011-09-01 15:24:26 +00:00
|
|
|
void MacroAssembler::AllocateTwoByteSlicedString(Register result,
|
2011-08-30 08:22:41 +00:00
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
|
|
|
Label* gc_required) {
|
|
|
|
// Allocate heap number in new space.
|
2013-03-14 08:32:52 +00:00
|
|
|
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
|
|
|
|
TAG_OBJECT);
|
2011-08-30 08:22:41 +00:00
|
|
|
|
|
|
|
// Set the map. The other fields are left uninitialized.
|
|
|
|
mov(FieldOperand(result, HeapObject::kMapOffset),
|
|
|
|
Immediate(isolate()->factory()->sliced_string_map()));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::AllocateAsciiSlicedString(Register result,
|
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
|
|
|
Label* gc_required) {
|
|
|
|
// Allocate heap number in new space.
|
2013-03-14 08:32:52 +00:00
|
|
|
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
|
|
|
|
TAG_OBJECT);
|
2011-08-30 08:22:41 +00:00
|
|
|
|
|
|
|
// Set the map. The other fields are left uninitialized.
|
|
|
|
mov(FieldOperand(result, HeapObject::kMapOffset),
|
|
|
|
Immediate(isolate()->factory()->sliced_ascii_string_map()));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-01-14 10:57:49 +00:00
|
|
|
// Copy memory, byte-by-byte, from source to destination. Not optimized for
|
|
|
|
// long or aligned copies. The contents of scratch and length are destroyed.
|
|
|
|
// Source and destination are incremented by length.
|
|
|
|
// Many variants of movsb, loop unrolling, word moves, and indexed operands
|
|
|
|
// have been tried here already, and this is fastest.
|
|
|
|
// A simpler loop is faster on small copies, but 30% slower on large ones.
|
|
|
|
// The cld() instruction must have been emitted, to set the direction flag(),
|
|
|
|
// before calling this function.
|
|
|
|
void MacroAssembler::CopyBytes(Register source,
|
|
|
|
Register destination,
|
|
|
|
Register length,
|
|
|
|
Register scratch) {
|
|
|
|
Label loop, done, short_string, short_loop;
|
|
|
|
// Experimentation shows that the short string loop is faster if length < 10.
|
2011-10-03 11:44:39 +00:00
|
|
|
cmp(length, Immediate(10));
|
2011-01-14 10:57:49 +00:00
|
|
|
j(less_equal, &short_string);
|
|
|
|
|
|
|
|
ASSERT(source.is(esi));
|
|
|
|
ASSERT(destination.is(edi));
|
|
|
|
ASSERT(length.is(ecx));
|
|
|
|
|
|
|
|
// Because source is 4-byte aligned in our uses of this function,
|
|
|
|
// we keep source aligned for the rep_movs call by copying the odd bytes
|
|
|
|
// at the end of the ranges.
|
|
|
|
mov(scratch, Operand(source, length, times_1, -4));
|
|
|
|
mov(Operand(destination, length, times_1, -4), scratch);
|
|
|
|
mov(scratch, ecx);
|
|
|
|
shr(ecx, 2);
|
|
|
|
rep_movs();
|
2011-10-03 11:44:39 +00:00
|
|
|
and_(scratch, Immediate(0x3));
|
|
|
|
add(destination, scratch);
|
2011-01-14 10:57:49 +00:00
|
|
|
jmp(&done);
|
|
|
|
|
|
|
|
bind(&short_string);
|
2011-10-03 11:44:39 +00:00
|
|
|
test(length, length);
|
2011-01-14 10:57:49 +00:00
|
|
|
j(zero, &done);
|
|
|
|
|
|
|
|
bind(&short_loop);
|
|
|
|
mov_b(scratch, Operand(source, 0));
|
|
|
|
mov_b(Operand(destination, 0), scratch);
|
|
|
|
inc(source);
|
|
|
|
inc(destination);
|
|
|
|
dec(length);
|
|
|
|
j(not_zero, &short_loop);
|
|
|
|
|
|
|
|
bind(&done);
|
2010-11-19 09:25:46 +00:00
|
|
|
}
|
|
|
|
|
2009-12-03 07:56:21 +00:00
|
|
|
|
2011-09-20 10:06:23 +00:00
|
|
|
void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
|
|
|
|
Register end_offset,
|
|
|
|
Register filler) {
|
|
|
|
Label loop, entry;
|
|
|
|
jmp(&entry);
|
|
|
|
bind(&loop);
|
|
|
|
mov(Operand(start_offset, 0), filler);
|
2011-10-03 11:44:39 +00:00
|
|
|
add(start_offset, Immediate(kPointerSize));
|
2011-09-20 10:06:23 +00:00
|
|
|
bind(&entry);
|
2011-10-03 11:44:39 +00:00
|
|
|
cmp(start_offset, end_offset);
|
2011-09-20 10:06:23 +00:00
|
|
|
j(less, &loop);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-10-17 12:44:16 +00:00
|
|
|
void MacroAssembler::BooleanBitTest(Register object,
|
|
|
|
int field_offset,
|
|
|
|
int bit_index) {
|
|
|
|
bit_index += kSmiTagSize + kSmiShiftSize;
|
|
|
|
ASSERT(IsPowerOf2(kBitsPerByte));
|
|
|
|
int byte_index = bit_index / kBitsPerByte;
|
|
|
|
int byte_bit_index = bit_index & (kBitsPerByte - 1);
|
|
|
|
test_b(FieldOperand(object, field_offset + byte_index),
|
|
|
|
static_cast<byte>(1 << byte_bit_index));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
void MacroAssembler::NegativeZeroTest(Register result,
|
|
|
|
Register op,
|
|
|
|
Label* then_label) {
|
|
|
|
Label ok;
|
2011-10-03 11:44:39 +00:00
|
|
|
test(result, result);
|
2011-05-11 13:26:07 +00:00
|
|
|
j(not_zero, &ok);
|
2011-10-03 11:44:39 +00:00
|
|
|
test(op, op);
|
2011-05-11 13:26:07 +00:00
|
|
|
j(sign, then_label);
|
2008-07-03 15:10:15 +00:00
|
|
|
bind(&ok);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::NegativeZeroTest(Register result,
|
|
|
|
Register op1,
|
|
|
|
Register op2,
|
|
|
|
Register scratch,
|
|
|
|
Label* then_label) {
|
|
|
|
Label ok;
|
2011-10-03 11:44:39 +00:00
|
|
|
test(result, result);
|
2011-05-11 13:26:07 +00:00
|
|
|
j(not_zero, &ok);
|
2011-10-03 11:44:39 +00:00
|
|
|
mov(scratch, op1);
|
|
|
|
or_(scratch, op2);
|
2011-05-11 13:26:07 +00:00
|
|
|
j(sign, then_label);
|
2008-07-03 15:10:15 +00:00
|
|
|
bind(&ok);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-10-08 13:33:16 +00:00
|
|
|
void MacroAssembler::TryGetFunctionPrototype(Register function,
|
|
|
|
Register result,
|
|
|
|
Register scratch,
|
2011-10-17 12:44:16 +00:00
|
|
|
Label* miss,
|
|
|
|
bool miss_on_bound_function) {
|
2008-10-08 13:33:16 +00:00
|
|
|
// Check that the receiver isn't a smi.
|
2011-06-17 18:32:36 +00:00
|
|
|
JumpIfSmi(function, miss);
|
2008-10-08 13:33:16 +00:00
|
|
|
|
|
|
|
// Check that the function really is a function.
|
2009-03-09 14:00:51 +00:00
|
|
|
CmpObjectType(function, JS_FUNCTION_TYPE, result);
|
2011-05-11 13:26:07 +00:00
|
|
|
j(not_equal, miss);
|
2008-10-08 13:33:16 +00:00
|
|
|
|
2011-10-17 12:44:16 +00:00
|
|
|
if (miss_on_bound_function) {
|
|
|
|
// If a bound function, go to miss label.
|
|
|
|
mov(scratch,
|
|
|
|
FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
|
|
|
|
BooleanBitTest(scratch, SharedFunctionInfo::kCompilerHintsOffset,
|
|
|
|
SharedFunctionInfo::kBoundFunction);
|
|
|
|
j(not_zero, miss);
|
|
|
|
}
|
|
|
|
|
2008-10-08 13:33:16 +00:00
|
|
|
// Make sure that the function has an instance prototype.
|
|
|
|
Label non_instance;
|
|
|
|
movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
|
|
|
|
test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
|
2011-05-11 13:26:07 +00:00
|
|
|
j(not_zero, &non_instance);
|
2008-10-08 13:33:16 +00:00
|
|
|
|
|
|
|
// Get the prototype or initial map from the function.
|
|
|
|
mov(result,
|
|
|
|
FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
|
|
|
|
|
|
|
|
// If the prototype or initial map is the hole, don't return it and
|
|
|
|
// simply miss the cache instead. This will allow us to allocate a
|
|
|
|
// prototype object on-demand in the runtime system.
|
2011-10-03 11:44:39 +00:00
|
|
|
cmp(result, Immediate(isolate()->factory()->the_hole_value()));
|
2011-05-11 13:26:07 +00:00
|
|
|
j(equal, miss);
|
2008-10-08 13:33:16 +00:00
|
|
|
|
|
|
|
// If the function does not have an initial map, we're done.
|
|
|
|
Label done;
|
2009-03-09 14:00:51 +00:00
|
|
|
CmpObjectType(result, MAP_TYPE, scratch);
|
2008-10-08 13:33:16 +00:00
|
|
|
j(not_equal, &done);
|
|
|
|
|
|
|
|
// Get the prototype from the initial map.
|
|
|
|
mov(result, FieldOperand(result, Map::kPrototypeOffset));
|
|
|
|
jmp(&done);
|
|
|
|
|
|
|
|
// Non-instance prototype: Fetch prototype from constructor field
|
|
|
|
// in initial map.
|
|
|
|
bind(&non_instance);
|
|
|
|
mov(result, FieldOperand(result, Map::kConstructorOffset));
|
|
|
|
|
|
|
|
// All done.
|
|
|
|
bind(&done);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-08-06 14:13:09 +00:00
|
|
|
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
|
2011-09-15 11:30:45 +00:00
|
|
|
ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
|
2013-02-27 12:33:24 +00:00
|
|
|
call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
|
2009-12-03 07:56:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::TailCallStub(CodeStub* stub) {
|
2013-03-15 11:52:58 +00:00
|
|
|
ASSERT(allow_stub_calls_ ||
|
|
|
|
stub->CompilingCallsToThisStubIsGCSafe(isolate()));
|
2013-02-27 12:33:24 +00:00
|
|
|
jmp(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::StubReturn(int argc) {
|
|
|
|
ASSERT(argc >= 1 && generating_stub());
|
|
|
|
ret((argc - 1) * kPointerSize);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-15 11:30:45 +00:00
|
|
|
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
|
|
|
|
if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
|
2013-03-15 11:52:58 +00:00
|
|
|
return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate());
|
2011-09-15 11:30:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-10-06 06:41:10 +00:00
|
|
|
void MacroAssembler::IllegalOperation(int num_arguments) {
|
|
|
|
if (num_arguments > 0) {
|
2011-10-03 11:44:39 +00:00
|
|
|
add(esp, Immediate(num_arguments * kPointerSize));
|
2008-10-06 06:41:10 +00:00
|
|
|
}
|
2011-03-25 13:21:30 +00:00
|
|
|
mov(eax, Immediate(isolate()->factory()->undefined_value()));
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-08-27 11:47:12 +00:00
|
|
|
void MacroAssembler::IndexFromHash(Register hash, Register index) {
|
|
|
|
// The assert checks that the constants for the maximum number of digits
|
|
|
|
// for an array index cached in the hash field and the number of bits
|
|
|
|
// reserved for it does not conflict.
|
|
|
|
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
|
|
|
|
(1 << String::kArrayIndexValueBits));
|
|
|
|
// We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
|
|
|
|
// the low kHashShift bits.
|
|
|
|
and_(hash, String::kArrayIndexValueMask);
|
|
|
|
STATIC_ASSERT(String::kHashShift >= kSmiTagSize && kSmiTag == 0);
|
|
|
|
if (String::kHashShift > kSmiTagSize) {
|
|
|
|
shr(hash, String::kHashShift - kSmiTagSize);
|
|
|
|
}
|
|
|
|
if (!index.is(hash)) {
|
|
|
|
mov(index, hash);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
void MacroAssembler::CallRuntime(const Runtime::Function* f,
|
2013-10-01 11:56:42 +00:00
|
|
|
int num_arguments,
|
|
|
|
SaveFPRegsMode save_doubles) {
|
2008-08-13 09:32:07 +00:00
|
|
|
// If the expected number of arguments of the runtime function is
|
|
|
|
// constant, we check that the actual number of arguments match the
|
|
|
|
// expectation.
|
|
|
|
if (f->nargs >= 0 && f->nargs != num_arguments) {
|
2008-10-06 06:41:10 +00:00
|
|
|
IllegalOperation(num_arguments);
|
2008-07-03 15:10:15 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-01-29 12:41:11 +00:00
|
|
|
// TODO(1236192): Most runtime routines don't need the number of
|
|
|
|
// arguments passed in because it is constant. At some point we
|
|
|
|
// should remove this need and make the runtime routine entry code
|
|
|
|
// smarter.
|
|
|
|
Set(eax, Immediate(num_arguments));
|
2011-03-22 13:20:04 +00:00
|
|
|
mov(ebx, Immediate(ExternalReference(f, isolate())));
|
2013-10-01 11:56:42 +00:00
|
|
|
CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? save_doubles
|
|
|
|
: kDontSaveFPRegs);
|
2010-01-29 12:41:11 +00:00
|
|
|
CallStub(&ces);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-07-28 09:36:53 +00:00
|
|
|
void MacroAssembler::CallExternalReference(ExternalReference ref,
|
|
|
|
int num_arguments) {
|
|
|
|
mov(eax, Immediate(num_arguments));
|
|
|
|
mov(ebx, Immediate(ref));
|
|
|
|
|
|
|
|
CEntryStub stub(1);
|
|
|
|
CallStub(&stub);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-24 08:33:51 +00:00
|
|
|
void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
|
|
|
|
int num_arguments,
|
|
|
|
int result_size) {
|
2008-08-13 09:32:07 +00:00
|
|
|
// TODO(1236192): Most runtime routines don't need the number of
|
|
|
|
// arguments passed in because it is constant. At some point we
|
|
|
|
// should remove this need and make the runtime routine entry code
|
|
|
|
// smarter.
|
2008-11-11 06:10:07 +00:00
|
|
|
Set(eax, Immediate(num_arguments));
|
2010-02-24 08:33:51 +00:00
|
|
|
JumpToExternalReference(ext);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
|
|
|
|
int num_arguments,
|
|
|
|
int result_size) {
|
2011-03-22 13:20:04 +00:00
|
|
|
TailCallExternalReference(ExternalReference(fid, isolate()),
|
|
|
|
num_arguments,
|
|
|
|
result_size);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-08-27 13:41:44 +00:00
|
|
|
Operand ApiParameterOperand(int index) {
|
|
|
|
return Operand(esp, index * kPointerSize);
|
2009-11-04 08:51:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-08-27 13:41:44 +00:00
|
|
|
void MacroAssembler::PrepareCallApiFunction(int argc) {
|
|
|
|
EnterApiExitFrame(argc);
|
|
|
|
if (emit_debug_code()) {
|
|
|
|
mov(esi, Immediate(BitCast<int32_t>(kZapValue)));
|
2010-10-21 14:21:00 +00:00
|
|
|
}
|
2010-11-16 15:04:41 +00:00
|
|
|
}
|
|
|
|
|
2010-10-21 14:21:00 +00:00
|
|
|
|
2013-09-17 11:37:48 +00:00
|
|
|
void MacroAssembler::CallApiFunctionAndReturn(
|
|
|
|
Address function_address,
|
|
|
|
Address thunk_address,
|
|
|
|
Operand thunk_last_arg,
|
|
|
|
int stack_space,
|
|
|
|
Operand return_value_operand,
|
|
|
|
Operand* context_restore_operand) {
|
2010-10-21 14:21:00 +00:00
|
|
|
ExternalReference next_address =
|
2013-02-25 14:46:09 +00:00
|
|
|
ExternalReference::handle_scope_next_address(isolate());
|
2010-10-21 14:21:00 +00:00
|
|
|
ExternalReference limit_address =
|
2013-02-25 14:46:09 +00:00
|
|
|
ExternalReference::handle_scope_limit_address(isolate());
|
2010-10-21 14:21:00 +00:00
|
|
|
ExternalReference level_address =
|
2013-02-25 14:46:09 +00:00
|
|
|
ExternalReference::handle_scope_level_address(isolate());
|
2009-12-20 08:40:13 +00:00
|
|
|
|
2010-10-21 14:21:00 +00:00
|
|
|
// Allocate HandleScope in callee-save registers.
|
|
|
|
mov(ebx, Operand::StaticVariable(next_address));
|
|
|
|
mov(edi, Operand::StaticVariable(limit_address));
|
|
|
|
add(Operand::StaticVariable(level_address), Immediate(1));
|
2009-12-20 08:40:13 +00:00
|
|
|
|
2012-11-28 15:11:21 +00:00
|
|
|
if (FLAG_log_timer_events) {
|
|
|
|
FrameScope frame(this, StackFrame::MANUAL);
|
|
|
|
PushSafepointRegisters();
|
2013-04-24 14:44:08 +00:00
|
|
|
PrepareCallCFunction(1, eax);
|
|
|
|
mov(Operand(esp, 0),
|
|
|
|
Immediate(ExternalReference::isolate_address(isolate())));
|
|
|
|
CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
|
2012-11-28 15:11:21 +00:00
|
|
|
PopSafepointRegisters();
|
|
|
|
}
|
|
|
|
|
2013-06-13 19:16:35 +00:00
|
|
|
|
|
|
|
Label profiler_disabled;
|
|
|
|
Label end_profiler_check;
|
|
|
|
bool* is_profiling_flag =
|
|
|
|
isolate()->cpu_profiler()->is_profiling_address();
|
|
|
|
STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
|
|
|
|
mov(eax, Immediate(reinterpret_cast<Address>(is_profiling_flag)));
|
|
|
|
cmpb(Operand(eax, 0), 0);
|
|
|
|
j(zero, &profiler_disabled);
|
|
|
|
|
|
|
|
// Additional parameter is the address of the actual getter function.
|
|
|
|
mov(thunk_last_arg, Immediate(function_address));
|
|
|
|
// Call the api function.
|
|
|
|
call(thunk_address, RelocInfo::RUNTIME_ENTRY);
|
|
|
|
jmp(&end_profiler_check);
|
|
|
|
|
|
|
|
bind(&profiler_disabled);
|
2011-10-28 12:37:29 +00:00
|
|
|
// Call the api function.
|
|
|
|
call(function_address, RelocInfo::RUNTIME_ENTRY);
|
2013-06-13 19:16:35 +00:00
|
|
|
bind(&end_profiler_check);
|
2009-12-20 08:40:13 +00:00
|
|
|
|
2012-11-28 15:11:21 +00:00
|
|
|
if (FLAG_log_timer_events) {
|
|
|
|
FrameScope frame(this, StackFrame::MANUAL);
|
|
|
|
PushSafepointRegisters();
|
2013-04-24 14:44:08 +00:00
|
|
|
PrepareCallCFunction(1, eax);
|
|
|
|
mov(Operand(esp, 0),
|
|
|
|
Immediate(ExternalReference::isolate_address(isolate())));
|
|
|
|
CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
|
2012-11-28 15:11:21 +00:00
|
|
|
PopSafepointRegisters();
|
|
|
|
}
|
|
|
|
|
2010-10-21 14:21:00 +00:00
|
|
|
Label prologue;
|
2013-05-21 06:36:24 +00:00
|
|
|
// Load the value from ReturnValue
|
2013-09-17 11:37:48 +00:00
|
|
|
mov(eax, return_value_operand);
|
2013-05-21 06:36:24 +00:00
|
|
|
|
2010-10-21 14:21:00 +00:00
|
|
|
Label promote_scheduled_exception;
|
2013-09-17 11:37:48 +00:00
|
|
|
Label exception_handled;
|
2010-10-21 14:21:00 +00:00
|
|
|
Label delete_allocated_handles;
|
|
|
|
Label leave_exit_frame;
|
|
|
|
|
|
|
|
bind(&prologue);
|
|
|
|
// No more valid handles (the result handle was the last one). Restore
|
|
|
|
// previous handle scope.
|
|
|
|
mov(Operand::StaticVariable(next_address), ebx);
|
|
|
|
sub(Operand::StaticVariable(level_address), Immediate(1));
|
2013-08-02 09:53:11 +00:00
|
|
|
Assert(above_equal, kInvalidHandleScopeLevel);
|
2010-10-21 14:21:00 +00:00
|
|
|
cmp(edi, Operand::StaticVariable(limit_address));
|
2011-05-11 13:26:07 +00:00
|
|
|
j(not_equal, &delete_allocated_handles);
|
2010-10-21 14:21:00 +00:00
|
|
|
bind(&leave_exit_frame);
|
|
|
|
|
|
|
|
// Check if the function scheduled an exception.
|
|
|
|
ExternalReference scheduled_exception_address =
|
2011-03-22 13:20:04 +00:00
|
|
|
ExternalReference::scheduled_exception_address(isolate());
|
2010-10-21 14:21:00 +00:00
|
|
|
cmp(Operand::StaticVariable(scheduled_exception_address),
|
2011-03-22 13:20:04 +00:00
|
|
|
Immediate(isolate()->factory()->the_hole_value()));
|
2011-05-11 13:26:07 +00:00
|
|
|
j(not_equal, &promote_scheduled_exception);
|
2013-09-17 11:37:48 +00:00
|
|
|
bind(&exception_handled);
|
2012-09-05 16:08:13 +00:00
|
|
|
|
|
|
|
#if ENABLE_EXTRA_CHECKS
|
|
|
|
// Check if the function returned a valid JavaScript value.
|
|
|
|
Label ok;
|
|
|
|
Register return_value = eax;
|
|
|
|
Register map = ecx;
|
|
|
|
|
|
|
|
JumpIfSmi(return_value, &ok, Label::kNear);
|
|
|
|
mov(map, FieldOperand(return_value, HeapObject::kMapOffset));
|
|
|
|
|
|
|
|
CmpInstanceType(map, FIRST_NONSTRING_TYPE);
|
|
|
|
j(below, &ok, Label::kNear);
|
|
|
|
|
|
|
|
CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
|
|
|
|
j(above_equal, &ok, Label::kNear);
|
|
|
|
|
|
|
|
cmp(map, isolate()->factory()->heap_number_map());
|
|
|
|
j(equal, &ok, Label::kNear);
|
|
|
|
|
|
|
|
cmp(return_value, isolate()->factory()->undefined_value());
|
|
|
|
j(equal, &ok, Label::kNear);
|
|
|
|
|
|
|
|
cmp(return_value, isolate()->factory()->true_value());
|
|
|
|
j(equal, &ok, Label::kNear);
|
|
|
|
|
|
|
|
cmp(return_value, isolate()->factory()->false_value());
|
|
|
|
j(equal, &ok, Label::kNear);
|
|
|
|
|
|
|
|
cmp(return_value, isolate()->factory()->null_value());
|
|
|
|
j(equal, &ok, Label::kNear);
|
|
|
|
|
2013-08-02 09:53:11 +00:00
|
|
|
Abort(kAPICallReturnedInvalidObject);
|
2012-09-05 16:08:13 +00:00
|
|
|
|
|
|
|
bind(&ok);
|
|
|
|
#endif
|
|
|
|
|
2013-09-17 11:37:48 +00:00
|
|
|
bool restore_context = context_restore_operand != NULL;
|
|
|
|
if (restore_context) {
|
|
|
|
mov(esi, *context_restore_operand);
|
|
|
|
}
|
|
|
|
LeaveApiExitFrame(!restore_context);
|
2010-11-16 15:04:41 +00:00
|
|
|
ret(stack_space * kPointerSize);
|
2011-10-28 12:37:29 +00:00
|
|
|
|
2012-07-25 16:33:32 +00:00
|
|
|
bind(&promote_scheduled_exception);
|
2013-09-17 11:37:48 +00:00
|
|
|
{
|
|
|
|
FrameScope frame(this, StackFrame::INTERNAL);
|
|
|
|
CallRuntime(Runtime::kPromoteScheduledException, 0);
|
|
|
|
}
|
|
|
|
jmp(&exception_handled);
|
2012-07-25 16:33:32 +00:00
|
|
|
|
2010-10-21 14:21:00 +00:00
|
|
|
// HandleScope limit has changed. Delete allocated extensions.
|
2011-03-22 13:20:04 +00:00
|
|
|
ExternalReference delete_extensions =
|
|
|
|
ExternalReference::delete_handle_scope_extensions(isolate());
|
2010-10-21 14:21:00 +00:00
|
|
|
bind(&delete_allocated_handles);
|
|
|
|
mov(Operand::StaticVariable(limit_address), edi);
|
|
|
|
mov(edi, eax);
|
2013-04-24 07:39:35 +00:00
|
|
|
mov(Operand(esp, 0),
|
|
|
|
Immediate(ExternalReference::isolate_address(isolate())));
|
2011-03-22 13:20:04 +00:00
|
|
|
mov(eax, Immediate(delete_extensions));
|
2011-10-03 11:44:39 +00:00
|
|
|
call(eax);
|
2010-10-21 14:21:00 +00:00
|
|
|
mov(eax, edi);
|
|
|
|
jmp(&leave_exit_frame);
|
2009-11-04 08:51:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-24 08:33:51 +00:00
|
|
|
void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
|
2008-07-03 15:10:15 +00:00
|
|
|
// Set the entry point and jump to the C entry runtime stub.
|
2008-11-11 06:10:07 +00:00
|
|
|
mov(ebx, Immediate(ext));
|
2009-09-08 11:52:05 +00:00
|
|
|
CEntryStub ces(1);
|
2013-02-27 12:33:24 +00:00
|
|
|
jmp(ces.GetCode(isolate()), RelocInfo::CODE_TARGET);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-05-24 14:01:36 +00:00
|
|
|
void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
|
|
|
|
// This macro takes the dst register to make the code more readable
|
|
|
|
// at the call sites. However, the dst register has to be ecx to
|
|
|
|
// follow the calling convention which requires the call type to be
|
|
|
|
// in ecx.
|
|
|
|
ASSERT(dst.is(ecx));
|
|
|
|
if (call_kind == CALL_AS_FUNCTION) {
|
|
|
|
// Set to some non-zero smi by updating the least significant
|
|
|
|
// byte.
|
2011-10-03 11:44:39 +00:00
|
|
|
mov_b(dst, 1 << kSmiTagSize);
|
2011-05-24 14:01:36 +00:00
|
|
|
} else {
|
|
|
|
// Set to smi zero by clearing the register.
|
2011-10-03 11:44:39 +00:00
|
|
|
xor_(dst, dst);
|
2011-05-24 14:01:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
|
|
|
|
const ParameterCount& actual,
|
|
|
|
Handle<Code> code_constant,
|
|
|
|
const Operand& code_operand,
|
2011-05-11 09:12:16 +00:00
|
|
|
Label* done,
|
2012-01-17 15:53:58 +00:00
|
|
|
bool* definitely_mismatches,
|
2010-12-07 11:31:57 +00:00
|
|
|
InvokeFlag flag,
|
2011-05-11 09:12:16 +00:00
|
|
|
Label::Distance done_near,
|
2011-05-24 14:01:36 +00:00
|
|
|
const CallWrapper& call_wrapper,
|
|
|
|
CallKind call_kind) {
|
2008-07-03 15:10:15 +00:00
|
|
|
bool definitely_matches = false;
|
2012-01-17 15:53:58 +00:00
|
|
|
*definitely_mismatches = false;
|
2008-07-03 15:10:15 +00:00
|
|
|
Label invoke;
|
|
|
|
if (expected.is_immediate()) {
|
|
|
|
ASSERT(actual.is_immediate());
|
|
|
|
if (expected.immediate() == actual.immediate()) {
|
|
|
|
definitely_matches = true;
|
|
|
|
} else {
|
|
|
|
mov(eax, actual.immediate());
|
2008-09-16 07:24:46 +00:00
|
|
|
const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
|
|
|
|
if (expected.immediate() == sentinel) {
|
|
|
|
// Don't worry about adapting arguments for builtins that
|
|
|
|
// don't want that done. Skip adaption code by making it look
|
|
|
|
// like we have a match between expected and actual number of
|
|
|
|
// arguments.
|
|
|
|
definitely_matches = true;
|
|
|
|
} else {
|
2012-01-17 15:53:58 +00:00
|
|
|
*definitely_mismatches = true;
|
2008-09-16 07:24:46 +00:00
|
|
|
mov(ebx, expected.immediate());
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (actual.is_immediate()) {
|
|
|
|
// Expected is in register, actual is immediate. This is the
|
|
|
|
// case when we invoke function values without going through the
|
|
|
|
// IC mechanism.
|
|
|
|
cmp(expected.reg(), actual.immediate());
|
|
|
|
j(equal, &invoke);
|
|
|
|
ASSERT(expected.reg().is(ebx));
|
|
|
|
mov(eax, actual.immediate());
|
|
|
|
} else if (!expected.reg().is(actual.reg())) {
|
|
|
|
// Both expected and actual are in (different) registers. This
|
|
|
|
// is the case when we invoke functions using call and apply.
|
2011-10-03 11:44:39 +00:00
|
|
|
cmp(expected.reg(), actual.reg());
|
2008-07-03 15:10:15 +00:00
|
|
|
j(equal, &invoke);
|
|
|
|
ASSERT(actual.reg().is(eax));
|
|
|
|
ASSERT(expected.reg().is(ebx));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!definitely_matches) {
|
|
|
|
Handle<Code> adaptor =
|
2011-03-23 13:40:07 +00:00
|
|
|
isolate()->builtins()->ArgumentsAdaptorTrampoline();
|
2008-07-03 15:10:15 +00:00
|
|
|
if (!code_constant.is_null()) {
|
2008-11-11 06:10:07 +00:00
|
|
|
mov(edx, Immediate(code_constant));
|
2011-10-03 11:44:39 +00:00
|
|
|
add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
|
2008-07-03 15:10:15 +00:00
|
|
|
} else if (!code_operand.is_reg(edx)) {
|
|
|
|
mov(edx, code_operand);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flag == CALL_FUNCTION) {
|
2011-05-03 15:12:40 +00:00
|
|
|
call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
|
2011-05-24 14:01:36 +00:00
|
|
|
SetCallKind(ecx, call_kind);
|
2008-09-22 13:57:03 +00:00
|
|
|
call(adaptor, RelocInfo::CODE_TARGET);
|
2011-05-03 15:12:40 +00:00
|
|
|
call_wrapper.AfterCall();
|
2012-01-17 15:53:58 +00:00
|
|
|
if (!*definitely_mismatches) {
|
|
|
|
jmp(done, done_near);
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
} else {
|
2011-05-24 14:01:36 +00:00
|
|
|
SetCallKind(ecx, call_kind);
|
2008-09-22 13:57:03 +00:00
|
|
|
jmp(adaptor, RelocInfo::CODE_TARGET);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
bind(&invoke);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::InvokeCode(const Operand& code,
|
|
|
|
const ParameterCount& expected,
|
|
|
|
const ParameterCount& actual,
|
2010-12-07 11:31:57 +00:00
|
|
|
InvokeFlag flag,
|
2011-05-24 14:01:36 +00:00
|
|
|
const CallWrapper& call_wrapper,
|
|
|
|
CallKind call_kind) {
|
2011-09-15 11:30:45 +00:00
|
|
|
// You can't call a function without a valid frame.
|
|
|
|
ASSERT(flag == JUMP_FUNCTION || has_frame());
|
|
|
|
|
2011-05-11 09:12:16 +00:00
|
|
|
Label done;
|
2012-01-17 15:53:58 +00:00
|
|
|
bool definitely_mismatches = false;
|
2010-12-07 11:31:57 +00:00
|
|
|
InvokePrologue(expected, actual, Handle<Code>::null(), code,
|
2012-01-17 15:53:58 +00:00
|
|
|
&done, &definitely_mismatches, flag, Label::kNear,
|
|
|
|
call_wrapper, call_kind);
|
|
|
|
if (!definitely_mismatches) {
|
|
|
|
if (flag == CALL_FUNCTION) {
|
|
|
|
call_wrapper.BeforeCall(CallSize(code));
|
|
|
|
SetCallKind(ecx, call_kind);
|
|
|
|
call(code);
|
|
|
|
call_wrapper.AfterCall();
|
|
|
|
} else {
|
|
|
|
ASSERT(flag == JUMP_FUNCTION);
|
|
|
|
SetCallKind(ecx, call_kind);
|
|
|
|
jmp(code);
|
|
|
|
}
|
|
|
|
bind(&done);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::InvokeCode(Handle<Code> code,
|
|
|
|
const ParameterCount& expected,
|
|
|
|
const ParameterCount& actual,
|
2008-09-22 13:57:03 +00:00
|
|
|
RelocInfo::Mode rmode,
|
2010-12-07 11:31:57 +00:00
|
|
|
InvokeFlag flag,
|
2011-05-24 14:01:36 +00:00
|
|
|
const CallWrapper& call_wrapper,
|
|
|
|
CallKind call_kind) {
|
2011-09-15 11:30:45 +00:00
|
|
|
// You can't call a function without a valid frame.
|
|
|
|
ASSERT(flag == JUMP_FUNCTION || has_frame());
|
|
|
|
|
2011-05-11 09:12:16 +00:00
|
|
|
Label done;
|
2011-10-03 11:44:39 +00:00
|
|
|
Operand dummy(eax, 0);
|
2012-01-17 15:53:58 +00:00
|
|
|
bool definitely_mismatches = false;
|
|
|
|
InvokePrologue(expected, actual, code, dummy, &done, &definitely_mismatches,
|
|
|
|
flag, Label::kNear, call_wrapper, call_kind);
|
|
|
|
if (!definitely_mismatches) {
|
|
|
|
if (flag == CALL_FUNCTION) {
|
|
|
|
call_wrapper.BeforeCall(CallSize(code, rmode));
|
|
|
|
SetCallKind(ecx, call_kind);
|
|
|
|
call(code, rmode);
|
|
|
|
call_wrapper.AfterCall();
|
|
|
|
} else {
|
|
|
|
ASSERT(flag == JUMP_FUNCTION);
|
|
|
|
SetCallKind(ecx, call_kind);
|
|
|
|
jmp(code, rmode);
|
|
|
|
}
|
|
|
|
bind(&done);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::InvokeFunction(Register fun,
|
|
|
|
const ParameterCount& actual,
|
2010-12-07 11:31:57 +00:00
|
|
|
InvokeFlag flag,
|
2011-05-24 14:01:36 +00:00
|
|
|
const CallWrapper& call_wrapper,
|
|
|
|
CallKind call_kind) {
|
2011-09-15 11:30:45 +00:00
|
|
|
// You can't call a function without a valid frame.
|
|
|
|
ASSERT(flag == JUMP_FUNCTION || has_frame());
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
ASSERT(fun.is(edi));
|
|
|
|
mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
|
|
|
|
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
|
|
|
|
mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
|
2010-05-27 12:30:45 +00:00
|
|
|
SmiUntag(ebx);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
ParameterCount expected(ebx);
|
2010-08-20 07:10:18 +00:00
|
|
|
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
|
2011-05-24 14:01:36 +00:00
|
|
|
expected, actual, flag, call_wrapper, call_kind);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-10-28 12:37:29 +00:00
|
|
|
void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
|
2013-04-23 09:23:07 +00:00
|
|
|
const ParameterCount& expected,
|
2010-02-15 12:32:27 +00:00
|
|
|
const ParameterCount& actual,
|
2010-12-07 11:31:57 +00:00
|
|
|
InvokeFlag flag,
|
2011-05-30 13:23:17 +00:00
|
|
|
const CallWrapper& call_wrapper,
|
|
|
|
CallKind call_kind) {
|
2011-09-15 11:30:45 +00:00
|
|
|
// You can't call a function without a valid frame.
|
|
|
|
ASSERT(flag == JUMP_FUNCTION || has_frame());
|
|
|
|
|
2010-02-15 12:32:27 +00:00
|
|
|
// Get the function and setup the context.
|
2011-12-06 12:11:08 +00:00
|
|
|
LoadHeapObject(edi, function);
|
2010-02-15 12:32:27 +00:00
|
|
|
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
|
2010-12-07 11:31:57 +00:00
|
|
|
|
2011-10-20 17:08:53 +00:00
|
|
|
// We call indirectly through the code field in the function to
|
|
|
|
// allow recompilation to take effect without changing any of the
|
|
|
|
// call sites.
|
|
|
|
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
|
|
|
|
expected, actual, flag, call_wrapper, call_kind);
|
2010-02-15 12:32:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
|
|
|
|
InvokeFlag flag,
|
2011-05-03 15:12:40 +00:00
|
|
|
const CallWrapper& call_wrapper) {
|
2011-09-15 11:30:45 +00:00
|
|
|
// You can't call a builtin without a valid frame.
|
|
|
|
ASSERT(flag == JUMP_FUNCTION || has_frame());
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
// Rely on the assertion to check that the number of provided
|
|
|
|
// arguments match the expected number of arguments. Fake a
|
|
|
|
// parameter count to avoid emitting code to do the check.
|
|
|
|
ParameterCount expected(0);
|
2010-08-20 07:10:18 +00:00
|
|
|
GetBuiltinFunction(edi, id);
|
|
|
|
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
|
2011-05-30 13:23:17 +00:00
|
|
|
expected, expected, flag, call_wrapper, CALL_AS_METHOD);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
2011-09-15 11:30:45 +00:00
|
|
|
|
2010-08-20 07:10:18 +00:00
|
|
|
void MacroAssembler::GetBuiltinFunction(Register target,
|
|
|
|
Builtins::JavaScript id) {
|
|
|
|
// Load the JavaScript builtin function from the builtins object.
|
2012-08-17 12:59:00 +00:00
|
|
|
mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
|
2010-04-14 20:16:19 +00:00
|
|
|
mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
|
2010-08-20 07:10:18 +00:00
|
|
|
mov(target, FieldOperand(target,
|
|
|
|
JSBuiltinsObject::OffsetOfFunctionWithId(id)));
|
|
|
|
}
|
2010-04-14 20:16:19 +00:00
|
|
|
|
2011-09-15 11:30:45 +00:00
|
|
|
|
2010-08-20 07:10:18 +00:00
|
|
|
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
|
|
|
|
ASSERT(!target.is(edi));
|
2010-02-11 08:05:33 +00:00
|
|
|
// Load the JavaScript builtin function from the builtins object.
|
2010-08-20 07:10:18 +00:00
|
|
|
GetBuiltinFunction(edi, id);
|
|
|
|
// Load the code entry point from the function into the target register.
|
|
|
|
mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset));
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-11-26 10:28:32 +00:00
|
|
|
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
|
|
|
|
if (context_chain_length > 0) {
|
|
|
|
// Move up the chain of contexts to the context containing the slot.
|
2011-06-09 12:45:26 +00:00
|
|
|
mov(dst, Operand(esi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
|
2009-11-26 10:28:32 +00:00
|
|
|
for (int i = 1; i < context_chain_length; i++) {
|
2011-06-09 12:45:26 +00:00
|
|
|
mov(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
|
2009-11-26 10:28:32 +00:00
|
|
|
}
|
2011-02-04 12:06:41 +00:00
|
|
|
} else {
|
|
|
|
// Slot is in the current function context. Move it into the
|
|
|
|
// destination register in case we store into it (the write barrier
|
|
|
|
// cannot be allowed to destroy the context in esi).
|
|
|
|
mov(dst, esi);
|
|
|
|
}
|
|
|
|
|
2011-06-30 15:49:14 +00:00
|
|
|
// We should not have found a with context by walking the context chain
|
|
|
|
// (i.e., the static scope chain and runtime context chain do not agree).
|
|
|
|
// A variable occurring in such a scope should have slot type LOOKUP and
|
|
|
|
// not CONTEXT.
|
2011-03-15 14:49:10 +00:00
|
|
|
if (emit_debug_code()) {
|
2011-06-28 15:22:08 +00:00
|
|
|
cmp(FieldOperand(dst, HeapObject::kMapOffset),
|
|
|
|
isolate()->factory()->with_context_map());
|
2013-08-02 09:53:11 +00:00
|
|
|
Check(not_equal, kVariableResolvedToWithContext);
|
2009-11-26 10:28:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-02-02 11:22:26 +00:00
|
|
|
void MacroAssembler::LoadTransitionedArrayMapConditional(
|
|
|
|
ElementsKind expected_kind,
|
|
|
|
ElementsKind transitioned_kind,
|
|
|
|
Register map_in_out,
|
|
|
|
Register scratch,
|
|
|
|
Label* no_map_match) {
|
|
|
|
// Load the global or builtins object from the current context.
|
2012-08-17 12:59:00 +00:00
|
|
|
mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
|
2012-08-17 09:03:08 +00:00
|
|
|
mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
|
2012-02-02 11:22:26 +00:00
|
|
|
|
|
|
|
// Check that the function's map is the same as the expected cached map.
|
2012-05-23 14:24:29 +00:00
|
|
|
mov(scratch, Operand(scratch,
|
|
|
|
Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
|
|
|
|
|
|
|
|
size_t offset = expected_kind * kPointerSize +
|
|
|
|
FixedArrayBase::kHeaderSize;
|
|
|
|
cmp(map_in_out, FieldOperand(scratch, offset));
|
2012-02-02 11:22:26 +00:00
|
|
|
j(not_equal, no_map_match);
|
|
|
|
|
|
|
|
// Use the transitioned cached map.
|
2012-05-23 14:24:29 +00:00
|
|
|
offset = transitioned_kind * kPointerSize +
|
|
|
|
FixedArrayBase::kHeaderSize;
|
|
|
|
mov(map_in_out, FieldOperand(scratch, offset));
|
2012-02-02 11:22:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::LoadInitialArrayMap(
|
2012-05-23 14:24:29 +00:00
|
|
|
Register function_in, Register scratch,
|
|
|
|
Register map_out, bool can_have_holes) {
|
2012-01-26 21:47:57 +00:00
|
|
|
ASSERT(!function_in.is(map_out));
|
|
|
|
Label done;
|
|
|
|
mov(map_out, FieldOperand(function_in,
|
|
|
|
JSFunction::kPrototypeOrInitialMapOffset));
|
|
|
|
if (!FLAG_smi_only_arrays) {
|
2012-05-23 14:24:29 +00:00
|
|
|
ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
|
|
|
|
LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
|
|
|
|
kind,
|
|
|
|
map_out,
|
|
|
|
scratch,
|
|
|
|
&done);
|
|
|
|
} else if (can_have_holes) {
|
|
|
|
LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
|
|
|
|
FAST_HOLEY_SMI_ELEMENTS,
|
2012-02-02 11:22:26 +00:00
|
|
|
map_out,
|
|
|
|
scratch,
|
|
|
|
&done);
|
2012-01-26 21:47:57 +00:00
|
|
|
}
|
|
|
|
bind(&done);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-03-01 16:06:34 +00:00
|
|
|
void MacroAssembler::LoadGlobalContext(Register global_context) {
|
|
|
|
// Load the global or builtins object from the current context.
|
|
|
|
mov(global_context,
|
|
|
|
Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
|
|
|
|
// Load the native context from the global or builtins object.
|
|
|
|
mov(global_context,
|
|
|
|
FieldOperand(global_context, GlobalObject::kNativeContextOffset));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-08-26 13:59:37 +00:00
|
|
|
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
|
|
|
|
// Load the global or builtins object from the current context.
|
2012-08-17 12:59:00 +00:00
|
|
|
mov(function,
|
|
|
|
Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
|
2012-08-17 09:03:08 +00:00
|
|
|
// Load the native context from the global or builtins object.
|
2013-03-01 16:06:34 +00:00
|
|
|
mov(function,
|
|
|
|
FieldOperand(function, GlobalObject::kNativeContextOffset));
|
2012-08-17 09:03:08 +00:00
|
|
|
// Load the function from the native context.
|
2010-08-26 13:59:37 +00:00
|
|
|
mov(function, Operand(function, Context::SlotOffset(index)));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
|
|
|
|
Register map) {
|
|
|
|
// Load the initial map. The global functions all have initial maps.
|
|
|
|
mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
|
2011-03-15 14:49:10 +00:00
|
|
|
if (emit_debug_code()) {
|
2010-08-26 13:59:37 +00:00
|
|
|
Label ok, fail;
|
2011-05-17 12:05:06 +00:00
|
|
|
CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
|
2010-08-26 13:59:37 +00:00
|
|
|
jmp(&ok);
|
|
|
|
bind(&fail);
|
2013-08-02 09:53:11 +00:00
|
|
|
Abort(kGlobalFunctionsMustHaveInitialMap);
|
2010-08-26 13:59:37 +00:00
|
|
|
bind(&ok);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-26 10:28:32 +00:00
|
|
|
|
2011-02-21 11:29:45 +00:00
|
|
|
// Store the value in register src in the safepoint register stack
|
|
|
|
// slot for register dst.
|
|
|
|
void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
|
|
|
|
mov(SafepointRegisterSlot(dst), src);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Immediate src) {
|
|
|
|
mov(SafepointRegisterSlot(dst), src);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
|
|
|
|
mov(dst, SafepointRegisterSlot(src));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
|
|
|
|
return Operand(esp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
|
|
|
|
// The registers are pushed starting with the lowest encoding,
|
|
|
|
// which means that lowest encodings are furthest away from
|
|
|
|
// the stack pointer.
|
|
|
|
ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
|
|
|
|
return kNumSafepointRegisters - reg_code - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-12-06 12:11:08 +00:00
|
|
|
void MacroAssembler::LoadHeapObject(Register result,
|
|
|
|
Handle<HeapObject> object) {
|
2013-06-03 15:32:22 +00:00
|
|
|
AllowDeferredHandleDereference embedding_raw_address;
|
2011-12-06 12:11:08 +00:00
|
|
|
if (isolate()->heap()->InNewSpace(*object)) {
|
2013-06-12 15:03:44 +00:00
|
|
|
Handle<Cell> cell = isolate()->factory()->NewCell(object);
|
2013-06-12 16:23:17 +00:00
|
|
|
mov(result, Operand::ForCell(cell));
|
2011-12-06 12:11:08 +00:00
|
|
|
} else {
|
|
|
|
mov(result, object);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-05-23 14:06:28 +00:00
|
|
|
void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
|
2013-06-03 15:32:22 +00:00
|
|
|
AllowDeferredHandleDereference using_raw_address;
|
2013-05-23 14:06:28 +00:00
|
|
|
if (isolate()->heap()->InNewSpace(*object)) {
|
2013-06-12 15:03:44 +00:00
|
|
|
Handle<Cell> cell = isolate()->factory()->NewCell(object);
|
2013-06-12 16:23:17 +00:00
|
|
|
cmp(reg, Operand::ForCell(cell));
|
2013-05-23 14:06:28 +00:00
|
|
|
} else {
|
|
|
|
cmp(reg, object);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-12-06 12:11:08 +00:00
|
|
|
void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
|
2013-06-03 15:32:22 +00:00
|
|
|
AllowDeferredHandleDereference using_raw_address;
|
2011-12-06 12:11:08 +00:00
|
|
|
if (isolate()->heap()->InNewSpace(*object)) {
|
2013-06-12 15:03:44 +00:00
|
|
|
Handle<Cell> cell = isolate()->factory()->NewCell(object);
|
2013-06-12 16:23:17 +00:00
|
|
|
push(Operand::ForCell(cell));
|
2011-12-06 12:11:08 +00:00
|
|
|
} else {
|
|
|
|
Push(object);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
void MacroAssembler::Ret() {
|
|
|
|
ret(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-09 12:46:22 +00:00
|
|
|
void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
|
|
|
|
if (is_uint16(bytes_dropped)) {
|
|
|
|
ret(bytes_dropped);
|
|
|
|
} else {
|
|
|
|
pop(scratch);
|
2011-10-03 11:44:39 +00:00
|
|
|
add(esp, Immediate(bytes_dropped));
|
2011-02-09 12:46:22 +00:00
|
|
|
push(scratch);
|
|
|
|
ret(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-04-09 09:02:28 +00:00
|
|
|
void MacroAssembler::VerifyX87StackDepth(uint32_t depth) {
|
2013-04-09 08:42:57 +00:00
|
|
|
// Make sure the floating point stack is either empty or has depth items.
|
|
|
|
ASSERT(depth <= 7);
|
2013-09-12 11:54:47 +00:00
|
|
|
// This is very expensive.
|
|
|
|
ASSERT(FLAG_debug_code && FLAG_enable_slow_asserts);
|
2013-04-09 08:42:57 +00:00
|
|
|
|
|
|
|
// The top-of-stack (tos) is 7 if there is one item pushed.
|
|
|
|
int tos = (8 - depth) % 8;
|
|
|
|
const int kTopMask = 0x3800;
|
|
|
|
push(eax);
|
|
|
|
fwait();
|
|
|
|
fnstsw_ax();
|
|
|
|
and_(eax, kTopMask);
|
|
|
|
shr(eax, 11);
|
|
|
|
cmp(eax, Immediate(tos));
|
2013-08-02 09:53:11 +00:00
|
|
|
Check(equal, kUnexpectedFPUStackDepthAfterInstruction);
|
2013-04-09 08:42:57 +00:00
|
|
|
fnclex();
|
|
|
|
pop(eax);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-12-10 14:06:08 +00:00
|
|
|
void MacroAssembler::Drop(int stack_elements) {
|
|
|
|
if (stack_elements > 0) {
|
2011-10-03 11:44:39 +00:00
|
|
|
add(esp, Immediate(stack_elements * kPointerSize));
|
2009-12-10 14:06:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-09-23 09:22:45 +00:00
|
|
|
void MacroAssembler::Move(Register dst, Register src) {
|
|
|
|
if (!dst.is(src)) {
|
|
|
|
mov(dst, src);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
|
|
|
|
if (FLAG_native_code_counters && counter->Enabled()) {
|
|
|
|
mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
|
|
|
|
ASSERT(value > 0);
|
|
|
|
if (FLAG_native_code_counters && counter->Enabled()) {
|
|
|
|
Operand operand = Operand::StaticVariable(ExternalReference(counter));
|
|
|
|
if (value == 1) {
|
|
|
|
inc(operand);
|
|
|
|
} else {
|
|
|
|
add(operand, Immediate(value));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
|
|
|
|
ASSERT(value > 0);
|
|
|
|
if (FLAG_native_code_counters && counter->Enabled()) {
|
|
|
|
Operand operand = Operand::StaticVariable(ExternalReference(counter));
|
|
|
|
if (value == 1) {
|
|
|
|
dec(operand);
|
|
|
|
} else {
|
|
|
|
sub(operand, Immediate(value));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-01-19 14:17:34 +00:00
|
|
|
void MacroAssembler::IncrementCounter(Condition cc,
|
|
|
|
StatsCounter* counter,
|
|
|
|
int value) {
|
|
|
|
ASSERT(value > 0);
|
|
|
|
if (FLAG_native_code_counters && counter->Enabled()) {
|
|
|
|
Label skip;
|
|
|
|
j(NegateCondition(cc), &skip);
|
|
|
|
pushfd();
|
|
|
|
IncrementCounter(counter, value);
|
|
|
|
popfd();
|
|
|
|
bind(&skip);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::DecrementCounter(Condition cc,
|
|
|
|
StatsCounter* counter,
|
|
|
|
int value) {
|
|
|
|
ASSERT(value > 0);
|
|
|
|
if (FLAG_native_code_counters && counter->Enabled()) {
|
|
|
|
Label skip;
|
|
|
|
j(NegateCondition(cc), &skip);
|
|
|
|
pushfd();
|
|
|
|
DecrementCounter(counter, value);
|
|
|
|
popfd();
|
|
|
|
bind(&skip);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-08-02 09:53:11 +00:00
|
|
|
void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
|
|
|
|
if (emit_debug_code()) Check(cc, reason);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-08-16 16:06:46 +00:00
|
|
|
void MacroAssembler::AssertFastElements(Register elements) {
|
2011-03-15 14:49:10 +00:00
|
|
|
if (emit_debug_code()) {
|
2011-03-25 13:21:30 +00:00
|
|
|
Factory* factory = isolate()->factory();
|
2010-08-16 16:06:46 +00:00
|
|
|
Label ok;
|
|
|
|
cmp(FieldOperand(elements, HeapObject::kMapOffset),
|
2011-03-25 13:21:30 +00:00
|
|
|
Immediate(factory->fixed_array_map()));
|
2010-08-16 16:06:46 +00:00
|
|
|
j(equal, &ok);
|
2011-07-13 13:50:27 +00:00
|
|
|
cmp(FieldOperand(elements, HeapObject::kMapOffset),
|
|
|
|
Immediate(factory->fixed_double_array_map()));
|
|
|
|
j(equal, &ok);
|
2010-08-16 16:06:46 +00:00
|
|
|
cmp(FieldOperand(elements, HeapObject::kMapOffset),
|
2011-03-25 13:21:30 +00:00
|
|
|
Immediate(factory->fixed_cow_array_map()));
|
2010-08-16 16:06:46 +00:00
|
|
|
j(equal, &ok);
|
2013-08-02 09:53:11 +00:00
|
|
|
Abort(kJSObjectWithFastElementsMapHasSlowElements);
|
2010-08-16 16:06:46 +00:00
|
|
|
bind(&ok);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-08-02 09:53:11 +00:00
|
|
|
void MacroAssembler::Check(Condition cc, BailoutReason reason) {
|
2008-07-03 15:10:15 +00:00
|
|
|
Label L;
|
2011-05-11 13:26:07 +00:00
|
|
|
j(cc, &L);
|
2013-08-02 09:53:11 +00:00
|
|
|
Abort(reason);
|
2008-07-03 15:10:15 +00:00
|
|
|
// will not return here
|
|
|
|
bind(&L);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-04-15 12:41:30 +00:00
|
|
|
void MacroAssembler::CheckStackAlignment() {
|
|
|
|
int frame_alignment = OS::ActivationFrameAlignment();
|
|
|
|
int frame_alignment_mask = frame_alignment - 1;
|
|
|
|
if (frame_alignment > kPointerSize) {
|
|
|
|
ASSERT(IsPowerOf2(frame_alignment));
|
|
|
|
Label alignment_as_expected;
|
|
|
|
test(esp, Immediate(frame_alignment_mask));
|
|
|
|
j(zero, &alignment_as_expected);
|
|
|
|
// Abort if stack is not aligned.
|
|
|
|
int3();
|
|
|
|
bind(&alignment_as_expected);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-08-02 09:53:11 +00:00
|
|
|
void MacroAssembler::Abort(BailoutReason reason) {
|
2008-07-03 15:10:15 +00:00
|
|
|
// We want to pass the msg string like a smi to avoid GC
|
|
|
|
// problems, however msg is not guaranteed to be aligned
|
|
|
|
// properly. Instead, we pass an aligned pointer that is
|
2009-01-15 19:08:34 +00:00
|
|
|
// a proper v8 smi, but also pass the alignment difference
|
2008-07-03 15:10:15 +00:00
|
|
|
// from the real pointer as a smi.
|
2013-08-02 09:53:11 +00:00
|
|
|
const char* msg = GetBailoutReason(reason);
|
2008-07-03 15:10:15 +00:00
|
|
|
intptr_t p1 = reinterpret_cast<intptr_t>(msg);
|
|
|
|
intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
|
|
|
|
ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
|
|
|
|
#ifdef DEBUG
|
|
|
|
if (msg != NULL) {
|
|
|
|
RecordComment("Abort message: ");
|
|
|
|
RecordComment(msg);
|
|
|
|
}
|
2013-08-26 11:22:39 +00:00
|
|
|
|
|
|
|
if (FLAG_trap_on_abort) {
|
|
|
|
int3();
|
|
|
|
return;
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
#endif
|
2009-12-04 07:43:40 +00:00
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
push(eax);
|
|
|
|
push(Immediate(p0));
|
|
|
|
push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0))));
|
2011-09-15 11:30:45 +00:00
|
|
|
// Disable stub call restrictions to always allow calls to abort.
|
|
|
|
if (!has_frame_) {
|
|
|
|
// We don't actually want to generate a pile of code for this, so just
|
|
|
|
// claim there is a stack frame, without generating one.
|
|
|
|
FrameScope scope(this, StackFrame::NONE);
|
|
|
|
CallRuntime(Runtime::kAbort, 2);
|
|
|
|
} else {
|
|
|
|
CallRuntime(Runtime::kAbort, 2);
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
// will not return here
|
2009-12-04 07:43:40 +00:00
|
|
|
int3();
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-05-23 15:59:38 +00:00
|
|
|
void MacroAssembler::LoadInstanceDescriptors(Register map,
|
|
|
|
Register descriptors) {
|
2012-10-17 13:04:49 +00:00
|
|
|
mov(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
|
2011-05-23 15:59:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
Sharing of descriptor arrays.
This CL adds multiple things:
Transition arrays do not directly point at their descriptor array anymore, but rather do so via an indirect pointer (a JSGlobalPropertyCell).
An ownership bit is added to maps indicating whether it owns its own descriptor array or not.
Maps owning a descriptor array can pass on ownership if a transition from that map is generated; but only if the descriptor array stays exactly the same; or if a descriptor is added.
Maps that don't have ownership get ownership back if their direct child to which ownership was passed is cleared in ClearNonLiveTransitions.
To detect which descriptors in an array are valid, each map knows its own NumberOfOwnDescriptors. Since the descriptors are sorted in order of addition, if we search and find a descriptor with index bigger than this number, it is not valid for the given map.
We currently still build up an enumeration cache (although this may disappear). The enumeration cache is always built for the entire descriptor array, even if not all descriptors are owned by the map. Once a descriptor array has an enumeration cache for a given map; this invariant will always be true, even if the descriptor array was extended. The extended array will inherit the enumeration cache from the smaller descriptor array. If a map with more descriptors needs an enumeration cache, it's EnumLength will still be set to invalid, so it will have to recompute the enumeration cache. This new cache will also be valid for smaller maps since they have their own enumlength; and use this to loop over the cache. If the EnumLength is still invalid, but there is already a cache present that is big enough; we just initialize the EnumLength field for the map.
When we apply ClearNonLiveTransitions and descriptor ownership is passed back to a parent map, the descriptor array is trimmed in-place and resorted. At the same time, the enumeration cache is trimmed in-place.
Only transition arrays contain descriptor arrays. If we transition to a map and pass ownership of the descriptor array along, the child map will not store the descriptor array it owns. Rather its parent will keep the pointer. So for every leaf-map, we find the descriptor array by following the back pointer, reading out the transition array, and fetching the descriptor array from the JSGlobalPropertyCell. If a map has a transition array, we fetch it from there. If a map has undefined as its back-pointer and has no transition array; it is considered to have an empty descriptor array.
When we modify properties, we cannot share the descriptor array. To accommodate this, the child map will get its own transition array; even if there are not necessarily any transitions leaving from the child map. This is necessary since it's the only way to store its own descriptor array.
Review URL: https://chromiumcodereview.appspot.com/10909007
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@12492 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2012-09-12 16:43:57 +00:00
|
|
|
void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
|
|
|
|
mov(dst, FieldOperand(map, Map::kBitField3Offset));
|
|
|
|
DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-09-21 12:54:12 +00:00
|
|
|
void MacroAssembler::LoadPowerOf2(XMMRegister dst,
|
|
|
|
Register scratch,
|
|
|
|
int power) {
|
|
|
|
ASSERT(is_uintn(power + HeapNumber::kExponentBias,
|
|
|
|
HeapNumber::kExponentBits));
|
|
|
|
mov(scratch, Immediate(power + HeapNumber::kExponentBias));
|
2011-10-03 11:44:39 +00:00
|
|
|
movd(dst, scratch);
|
2010-09-21 12:54:12 +00:00
|
|
|
psllq(dst, HeapNumber::kMantissaBits);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-09-19 06:07:23 +00:00
|
|
|
void MacroAssembler::LookupNumberStringCache(Register object,
|
|
|
|
Register result,
|
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
|
|
|
Label* not_found) {
|
|
|
|
// Use of registers. Register result is used as a temporary.
|
|
|
|
Register number_string_cache = result;
|
|
|
|
Register mask = scratch1;
|
|
|
|
Register scratch = scratch2;
|
|
|
|
|
|
|
|
// Load the number string cache.
|
|
|
|
LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
|
|
|
|
// Make the hash mask from the length of the number string cache. It
|
|
|
|
// contains two elements (number and string) for each cache entry.
|
|
|
|
mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
|
|
|
|
shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
|
|
|
|
sub(mask, Immediate(1)); // Make mask.
|
|
|
|
|
|
|
|
// Calculate the entry in the number string cache. The hash value in the
|
|
|
|
// number string cache for smis is just the smi value, and the hash for
|
|
|
|
// doubles is the xor of the upper and lower words. See
|
|
|
|
// Heap::GetNumberStringCache.
|
|
|
|
Label smi_hash_calculated;
|
|
|
|
Label load_result_from_cache;
|
|
|
|
Label not_smi;
|
|
|
|
STATIC_ASSERT(kSmiTag == 0);
|
|
|
|
JumpIfNotSmi(object, ¬_smi, Label::kNear);
|
|
|
|
mov(scratch, object);
|
|
|
|
SmiUntag(scratch);
|
|
|
|
jmp(&smi_hash_calculated, Label::kNear);
|
|
|
|
bind(¬_smi);
|
|
|
|
cmp(FieldOperand(object, HeapObject::kMapOffset),
|
|
|
|
isolate()->factory()->heap_number_map());
|
|
|
|
j(not_equal, not_found);
|
|
|
|
STATIC_ASSERT(8 == kDoubleSize);
|
|
|
|
mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
|
|
|
|
xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
|
|
|
|
// Object is heap number and hash is now in scratch. Calculate cache index.
|
|
|
|
and_(scratch, mask);
|
|
|
|
Register index = scratch;
|
|
|
|
Register probe = mask;
|
|
|
|
mov(probe,
|
|
|
|
FieldOperand(number_string_cache,
|
|
|
|
index,
|
|
|
|
times_twice_pointer_size,
|
|
|
|
FixedArray::kHeaderSize));
|
|
|
|
JumpIfSmi(probe, not_found);
|
|
|
|
if (CpuFeatures::IsSupported(SSE2)) {
|
|
|
|
CpuFeatureScope fscope(this, SSE2);
|
2013-10-18 10:54:45 +00:00
|
|
|
movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
|
2013-09-19 06:07:23 +00:00
|
|
|
ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
|
|
|
|
} else {
|
|
|
|
fld_d(FieldOperand(object, HeapNumber::kValueOffset));
|
|
|
|
fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
|
|
|
|
FCmp();
|
|
|
|
}
|
|
|
|
j(parity_even, not_found); // Bail out if NaN is involved.
|
|
|
|
j(not_equal, not_found); // The cache did not contain this value.
|
|
|
|
jmp(&load_result_from_cache, Label::kNear);
|
|
|
|
|
|
|
|
bind(&smi_hash_calculated);
|
|
|
|
// Object is smi and hash is now in scratch. Calculate cache index.
|
|
|
|
and_(scratch, mask);
|
|
|
|
// Check if the entry is the smi we are looking for.
|
|
|
|
cmp(object,
|
|
|
|
FieldOperand(number_string_cache,
|
|
|
|
index,
|
|
|
|
times_twice_pointer_size,
|
|
|
|
FixedArray::kHeaderSize));
|
|
|
|
j(not_equal, not_found);
|
|
|
|
|
|
|
|
// Get the result from the cache.
|
|
|
|
bind(&load_result_from_cache);
|
|
|
|
mov(result,
|
|
|
|
FieldOperand(number_string_cache,
|
|
|
|
index,
|
|
|
|
times_twice_pointer_size,
|
|
|
|
FixedArray::kHeaderSize + kPointerSize));
|
|
|
|
IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-12 11:43:00 +00:00
|
|
|
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
|
|
|
|
Register instance_type,
|
|
|
|
Register scratch,
|
2010-03-17 14:53:16 +00:00
|
|
|
Label* failure) {
|
2010-02-12 11:43:00 +00:00
|
|
|
if (!scratch.is(instance_type)) {
|
|
|
|
mov(scratch, instance_type);
|
|
|
|
}
|
|
|
|
and_(scratch,
|
|
|
|
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
|
2012-11-08 12:14:29 +00:00
|
|
|
cmp(scratch, kStringTag | kSeqStringTag | kOneByteStringTag);
|
2010-02-12 11:43:00 +00:00
|
|
|
j(not_equal, failure);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-01-21 12:10:56 +00:00
|
|
|
void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
|
|
|
|
Register object2,
|
|
|
|
Register scratch1,
|
|
|
|
Register scratch2,
|
|
|
|
Label* failure) {
|
|
|
|
// Check that both objects are not smis.
|
2011-08-29 13:02:35 +00:00
|
|
|
STATIC_ASSERT(kSmiTag == 0);
|
2011-10-03 11:44:39 +00:00
|
|
|
mov(scratch1, object1);
|
|
|
|
and_(scratch1, object2);
|
2011-06-17 18:32:36 +00:00
|
|
|
JumpIfSmi(scratch1, failure);
|
2010-01-21 12:10:56 +00:00
|
|
|
|
|
|
|
// Load instance type for both strings.
|
|
|
|
mov(scratch1, FieldOperand(object1, HeapObject::kMapOffset));
|
|
|
|
mov(scratch2, FieldOperand(object2, HeapObject::kMapOffset));
|
|
|
|
movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
|
|
|
|
movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
|
|
|
|
|
2012-01-16 12:38:59 +00:00
|
|
|
// Check that both are flat ASCII strings.
|
2013-01-14 15:17:56 +00:00
|
|
|
const int kFlatAsciiStringMask =
|
|
|
|
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
|
2013-07-22 09:55:14 +00:00
|
|
|
const int kFlatAsciiStringTag =
|
|
|
|
kStringTag | kOneByteStringTag | kSeqStringTag;
|
2010-01-21 12:10:56 +00:00
|
|
|
// Interleave bits from both instance types and compare them in one check.
|
2013-01-14 15:17:56 +00:00
|
|
|
ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
|
2010-01-21 12:10:56 +00:00
|
|
|
and_(scratch1, kFlatAsciiStringMask);
|
|
|
|
and_(scratch2, kFlatAsciiStringMask);
|
2013-01-14 15:17:56 +00:00
|
|
|
lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
|
|
|
|
cmp(scratch1, kFlatAsciiStringTag | (kFlatAsciiStringTag << 3));
|
2010-01-21 12:10:56 +00:00
|
|
|
j(not_equal, failure);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-06-27 13:39:44 +00:00
|
|
|
void MacroAssembler::JumpIfNotUniqueName(Operand operand,
|
|
|
|
Label* not_unique_name,
|
|
|
|
Label::Distance distance) {
|
2013-07-19 11:29:11 +00:00
|
|
|
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
|
|
|
|
Label succeed;
|
|
|
|
test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
|
|
|
|
j(zero, &succeed);
|
|
|
|
cmpb(operand, static_cast<uint8_t>(SYMBOL_TYPE));
|
|
|
|
j(not_equal, not_unique_name, distance);
|
|
|
|
|
|
|
|
bind(&succeed);
|
2013-06-27 13:39:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-25 12:18:55 +00:00
|
|
|
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
|
2011-03-30 18:05:16 +00:00
|
|
|
int frame_alignment = OS::ActivationFrameAlignment();
|
|
|
|
if (frame_alignment != 0) {
|
2010-02-25 12:18:55 +00:00
|
|
|
// Make stack end at alignment and make room for num_arguments words
|
|
|
|
// and the original value of esp.
|
|
|
|
mov(scratch, esp);
|
2011-10-03 11:44:39 +00:00
|
|
|
sub(esp, Immediate((num_arguments + 1) * kPointerSize));
|
2011-03-30 18:05:16 +00:00
|
|
|
ASSERT(IsPowerOf2(frame_alignment));
|
|
|
|
and_(esp, -frame_alignment);
|
2010-02-25 12:18:55 +00:00
|
|
|
mov(Operand(esp, num_arguments * kPointerSize), scratch);
|
|
|
|
} else {
|
2011-10-03 11:44:39 +00:00
|
|
|
sub(esp, Immediate(num_arguments * kPointerSize));
|
2010-02-25 12:18:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::CallCFunction(ExternalReference function,
|
|
|
|
int num_arguments) {
|
|
|
|
// Trashing eax is ok as it will be the return value.
|
2011-10-03 11:44:39 +00:00
|
|
|
mov(eax, Immediate(function));
|
2010-02-25 12:18:55 +00:00
|
|
|
CallCFunction(eax, num_arguments);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::CallCFunction(Register function,
|
|
|
|
int num_arguments) {
|
2011-09-15 11:30:45 +00:00
|
|
|
ASSERT(has_frame());
|
2010-04-15 12:41:30 +00:00
|
|
|
// Check stack alignment.
|
2011-03-15 14:49:10 +00:00
|
|
|
if (emit_debug_code()) {
|
2010-04-15 12:41:30 +00:00
|
|
|
CheckStackAlignment();
|
|
|
|
}
|
|
|
|
|
2011-10-03 11:44:39 +00:00
|
|
|
call(function);
|
2010-02-25 12:18:55 +00:00
|
|
|
if (OS::ActivationFrameAlignment() != 0) {
|
|
|
|
mov(esp, Operand(esp, num_arguments * kPointerSize));
|
|
|
|
} else {
|
2011-10-03 11:44:39 +00:00
|
|
|
add(esp, Immediate(num_arguments * kPointerSize));
|
2010-02-25 12:18:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
|
|
|
|
if (r1.is(r2)) return true;
|
|
|
|
if (r1.is(r3)) return true;
|
|
|
|
if (r1.is(r4)) return true;
|
|
|
|
if (r2.is(r3)) return true;
|
|
|
|
if (r2.is(r4)) return true;
|
|
|
|
if (r3.is(r4)) return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
CodePatcher::CodePatcher(byte* address, int size)
|
2011-04-01 14:46:30 +00:00
|
|
|
: address_(address),
|
|
|
|
size_(size),
|
2012-05-03 10:54:17 +00:00
|
|
|
masm_(NULL, address, size + Assembler::kGap) {
|
2009-01-15 19:08:34 +00:00
|
|
|
// Create a new macro assembler pointing to the address of the code to patch.
|
2008-07-03 15:10:15 +00:00
|
|
|
// The size is adjusted with kGap on order for the assembler to generate size
|
|
|
|
// bytes of instructions without failing with buffer size constraints.
|
|
|
|
ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
CodePatcher::~CodePatcher() {
|
|
|
|
// Indicate that code has changed.
|
|
|
|
CPU::FlushICache(address_, size_);
|
|
|
|
|
|
|
|
// Check that the code was patched as expected.
|
|
|
|
ASSERT(masm_.pc_ == address_ + size_);
|
|
|
|
ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
void MacroAssembler::CheckPageFlag(
|
|
|
|
Register object,
|
|
|
|
Register scratch,
|
|
|
|
int mask,
|
|
|
|
Condition cc,
|
|
|
|
Label* condition_met,
|
|
|
|
Label::Distance condition_met_distance) {
|
|
|
|
ASSERT(cc == zero || cc == not_zero);
|
|
|
|
if (scratch.is(object)) {
|
|
|
|
and_(scratch, Immediate(~Page::kPageAlignmentMask));
|
|
|
|
} else {
|
|
|
|
mov(scratch, Immediate(~Page::kPageAlignmentMask));
|
2011-10-03 11:44:39 +00:00
|
|
|
and_(scratch, object);
|
2011-09-19 18:36:47 +00:00
|
|
|
}
|
|
|
|
if (mask < (1 << kBitsPerByte)) {
|
|
|
|
test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
|
|
|
|
static_cast<uint8_t>(mask));
|
|
|
|
} else {
|
|
|
|
test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
|
|
|
|
}
|
|
|
|
j(cc, condition_met, condition_met_distance);
|
2012-06-06 11:05:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::CheckPageFlagForMap(
|
|
|
|
Handle<Map> map,
|
|
|
|
int mask,
|
|
|
|
Condition cc,
|
|
|
|
Label* condition_met,
|
|
|
|
Label::Distance condition_met_distance) {
|
|
|
|
ASSERT(cc == zero || cc == not_zero);
|
|
|
|
Page* page = Page::FromAddress(map->address());
|
|
|
|
ExternalReference reference(ExternalReference::page_flags(page));
|
|
|
|
// The inlined static address check of the page's flags relies
|
|
|
|
// on maps never being compacted.
|
|
|
|
ASSERT(!isolate()->heap()->mark_compact_collector()->
|
|
|
|
IsOnEvacuationCandidate(*map));
|
|
|
|
if (mask < (1 << kBitsPerByte)) {
|
|
|
|
test_b(Operand::StaticVariable(reference), static_cast<uint8_t>(mask));
|
|
|
|
} else {
|
|
|
|
test(Operand::StaticVariable(reference), Immediate(mask));
|
|
|
|
}
|
|
|
|
j(cc, condition_met, condition_met_distance);
|
2011-09-19 18:36:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-04-26 15:30:41 +00:00
|
|
|
void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
|
|
|
|
Register scratch,
|
|
|
|
Label* if_deprecated) {
|
|
|
|
if (map->CanBeDeprecated()) {
|
|
|
|
mov(scratch, map);
|
|
|
|
mov(scratch, FieldOperand(scratch, Map::kBitField3Offset));
|
|
|
|
and_(scratch, Immediate(Smi::FromInt(Map::Deprecated::kMask)));
|
|
|
|
j(not_zero, if_deprecated);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
void MacroAssembler::JumpIfBlack(Register object,
|
|
|
|
Register scratch0,
|
|
|
|
Register scratch1,
|
|
|
|
Label* on_black,
|
|
|
|
Label::Distance on_black_near) {
|
|
|
|
HasColor(object, scratch0, scratch1,
|
|
|
|
on_black, on_black_near,
|
|
|
|
1, 0); // kBlackBitPattern.
|
|
|
|
ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::HasColor(Register object,
|
|
|
|
Register bitmap_scratch,
|
|
|
|
Register mask_scratch,
|
|
|
|
Label* has_color,
|
|
|
|
Label::Distance has_color_distance,
|
|
|
|
int first_bit,
|
|
|
|
int second_bit) {
|
|
|
|
ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
|
|
|
|
|
|
|
|
GetMarkBits(object, bitmap_scratch, mask_scratch);
|
|
|
|
|
|
|
|
Label other_color, word_boundary;
|
|
|
|
test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
|
|
|
|
j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear);
|
2011-10-03 11:44:39 +00:00
|
|
|
add(mask_scratch, mask_scratch); // Shift left 1 by adding.
|
2011-09-19 18:36:47 +00:00
|
|
|
j(zero, &word_boundary, Label::kNear);
|
|
|
|
test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
|
|
|
|
j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
|
|
|
|
jmp(&other_color, Label::kNear);
|
|
|
|
|
|
|
|
bind(&word_boundary);
|
|
|
|
test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1);
|
|
|
|
|
|
|
|
j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
|
|
|
|
bind(&other_color);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::GetMarkBits(Register addr_reg,
|
|
|
|
Register bitmap_reg,
|
|
|
|
Register mask_reg) {
|
|
|
|
ASSERT(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
|
|
|
|
mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
|
2011-10-03 11:44:39 +00:00
|
|
|
and_(bitmap_reg, addr_reg);
|
|
|
|
mov(ecx, addr_reg);
|
2011-09-19 18:36:47 +00:00
|
|
|
int shift =
|
|
|
|
Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
|
|
|
|
shr(ecx, shift);
|
|
|
|
and_(ecx,
|
|
|
|
(Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1));
|
|
|
|
|
2011-10-03 11:44:39 +00:00
|
|
|
add(bitmap_reg, ecx);
|
|
|
|
mov(ecx, addr_reg);
|
2011-09-19 18:36:47 +00:00
|
|
|
shr(ecx, kPointerSizeLog2);
|
|
|
|
and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1);
|
|
|
|
mov(mask_reg, Immediate(1));
|
|
|
|
shl_cl(mask_reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void MacroAssembler::EnsureNotWhite(
|
|
|
|
Register value,
|
|
|
|
Register bitmap_scratch,
|
|
|
|
Register mask_scratch,
|
|
|
|
Label* value_is_white_and_not_data,
|
|
|
|
Label::Distance distance) {
|
|
|
|
ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
|
|
|
|
GetMarkBits(value, bitmap_scratch, mask_scratch);
|
|
|
|
|
|
|
|
// If the value is black or grey we don't need to do anything.
|
|
|
|
ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
|
|
|
|
ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
|
|
|
|
ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
|
|
|
|
ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
|
|
|
|
|
|
|
|
Label done;
|
|
|
|
|
|
|
|
// Since both black and grey have a 1 in the first position and white does
|
|
|
|
// not have a 1 there we only need to check one bit.
|
|
|
|
test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
|
|
|
|
j(not_zero, &done, Label::kNear);
|
|
|
|
|
2012-07-31 14:59:32 +00:00
|
|
|
if (emit_debug_code()) {
|
2011-09-19 18:36:47 +00:00
|
|
|
// Check for impossible bit pattern.
|
|
|
|
Label ok;
|
|
|
|
push(mask_scratch);
|
|
|
|
// shl. May overflow making the check conservative.
|
2011-10-03 11:44:39 +00:00
|
|
|
add(mask_scratch, mask_scratch);
|
2011-09-19 18:36:47 +00:00
|
|
|
test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
|
|
|
|
j(zero, &ok, Label::kNear);
|
|
|
|
int3();
|
|
|
|
bind(&ok);
|
|
|
|
pop(mask_scratch);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Value is white. We check whether it is data that doesn't need scanning.
|
|
|
|
// Currently only checks for HeapNumber and non-cons strings.
|
|
|
|
Register map = ecx; // Holds map while checking type.
|
|
|
|
Register length = ecx; // Holds length of object after checking type.
|
|
|
|
Label not_heap_number;
|
|
|
|
Label is_data_object;
|
|
|
|
|
|
|
|
// Check for heap-number
|
|
|
|
mov(map, FieldOperand(value, HeapObject::kMapOffset));
|
2013-06-04 10:30:05 +00:00
|
|
|
cmp(map, isolate()->factory()->heap_number_map());
|
2011-09-19 18:36:47 +00:00
|
|
|
j(not_equal, ¬_heap_number, Label::kNear);
|
|
|
|
mov(length, Immediate(HeapNumber::kSize));
|
|
|
|
jmp(&is_data_object, Label::kNear);
|
|
|
|
|
|
|
|
bind(¬_heap_number);
|
|
|
|
// Check for strings.
|
|
|
|
ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
|
|
|
|
ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
|
|
|
|
// If it's a string and it's not a cons string then it's an object containing
|
|
|
|
// no GC pointers.
|
|
|
|
Register instance_type = ecx;
|
|
|
|
movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
|
2011-10-03 11:44:39 +00:00
|
|
|
test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask);
|
2011-09-19 18:36:47 +00:00
|
|
|
j(not_zero, value_is_white_and_not_data);
|
|
|
|
// It's a non-indirect (non-cons and non-slice) string.
|
|
|
|
// If it's external, the length is just ExternalString::kSize.
|
|
|
|
// Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
|
|
|
|
Label not_external;
|
|
|
|
// External strings are the only ones with the kExternalStringTag bit
|
|
|
|
// set.
|
|
|
|
ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
|
|
|
|
ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
|
2011-10-03 11:44:39 +00:00
|
|
|
test_b(instance_type, kExternalStringTag);
|
2011-09-19 18:36:47 +00:00
|
|
|
j(zero, ¬_external, Label::kNear);
|
|
|
|
mov(length, Immediate(ExternalString::kSize));
|
|
|
|
jmp(&is_data_object, Label::kNear);
|
|
|
|
|
|
|
|
bind(¬_external);
|
|
|
|
// Sequential string, either ASCII or UC16.
|
2012-11-08 12:14:29 +00:00
|
|
|
ASSERT(kOneByteStringTag == 0x04);
|
2011-10-03 11:44:39 +00:00
|
|
|
and_(length, Immediate(kStringEncodingMask));
|
|
|
|
xor_(length, Immediate(kStringEncodingMask));
|
|
|
|
add(length, Immediate(0x04));
|
2011-09-19 18:36:47 +00:00
|
|
|
// Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted
|
|
|
|
// by 2. If we multiply the string length as smi by this, it still
|
|
|
|
// won't overflow a 32-bit value.
|
2012-11-15 13:31:27 +00:00
|
|
|
ASSERT_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
|
|
|
|
ASSERT(SeqOneByteString::kMaxSize <=
|
2011-09-19 18:36:47 +00:00
|
|
|
static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
|
|
|
|
imul(length, FieldOperand(value, String::kLengthOffset));
|
2011-09-20 11:20:00 +00:00
|
|
|
shr(length, 2 + kSmiTagSize + kSmiShiftSize);
|
2011-10-03 11:44:39 +00:00
|
|
|
add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
|
|
|
|
and_(length, Immediate(~kObjectAlignmentMask));
|
2011-09-19 18:36:47 +00:00
|
|
|
|
|
|
|
bind(&is_data_object);
|
|
|
|
// Value is a data object, and it is white. Mark it black. Since we know
|
|
|
|
// that the object is white we can make it black by flipping one bit.
|
|
|
|
or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
|
|
|
|
|
|
|
|
and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
|
|
|
|
add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
|
|
|
|
length);
|
2012-07-31 14:59:32 +00:00
|
|
|
if (emit_debug_code()) {
|
2011-09-20 11:20:00 +00:00
|
|
|
mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
|
|
|
|
cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
|
2013-08-02 09:53:11 +00:00
|
|
|
Check(less_equal, kLiveBytesCountOverflowChunkSize);
|
2011-09-20 11:20:00 +00:00
|
|
|
}
|
2011-09-19 18:36:47 +00:00
|
|
|
|
|
|
|
bind(&done);
|
|
|
|
}
|
|
|
|
|
2012-02-22 12:47:42 +00:00
|
|
|
|
2012-08-28 14:20:50 +00:00
|
|
|
void MacroAssembler::EnumLength(Register dst, Register map) {
|
|
|
|
STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
|
|
|
|
mov(dst, FieldOperand(map, Map::kBitField3Offset));
|
|
|
|
and_(dst, Immediate(Smi::FromInt(Map::EnumLengthBits::kMask)));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-02-22 12:47:42 +00:00
|
|
|
void MacroAssembler::CheckEnumCache(Label* call_runtime) {
|
2012-08-28 14:20:50 +00:00
|
|
|
Label next, start;
|
2012-02-22 12:47:42 +00:00
|
|
|
mov(ecx, eax);
|
|
|
|
|
2012-08-28 14:20:50 +00:00
|
|
|
// Check if the enum length field is properly initialized, indicating that
|
|
|
|
// there is an enum cache.
|
2012-02-22 12:47:42 +00:00
|
|
|
mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
|
|
|
|
|
2012-08-28 14:20:50 +00:00
|
|
|
EnumLength(edx, ebx);
|
|
|
|
cmp(edx, Immediate(Smi::FromInt(Map::kInvalidEnumCache)));
|
2012-08-13 08:43:16 +00:00
|
|
|
j(equal, call_runtime);
|
|
|
|
|
2012-08-28 14:20:50 +00:00
|
|
|
jmp(&start);
|
|
|
|
|
|
|
|
bind(&next);
|
|
|
|
mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
|
2012-02-22 12:47:42 +00:00
|
|
|
|
|
|
|
// For all objects but the receiver, check that the cache is empty.
|
2012-08-28 14:20:50 +00:00
|
|
|
EnumLength(edx, ebx);
|
|
|
|
cmp(edx, Immediate(Smi::FromInt(0)));
|
|
|
|
j(not_equal, call_runtime);
|
|
|
|
|
|
|
|
bind(&start);
|
|
|
|
|
|
|
|
// Check that there are no elements. Register rcx contains the current JS
|
|
|
|
// object we've reached through the prototype chain.
|
|
|
|
mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
|
|
|
|
cmp(ecx, isolate()->factory()->empty_fixed_array());
|
2012-02-22 12:47:42 +00:00
|
|
|
j(not_equal, call_runtime);
|
|
|
|
|
|
|
|
mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
|
|
|
|
cmp(ecx, isolate()->factory()->null_value());
|
|
|
|
j(not_equal, &next);
|
|
|
|
}
|
|
|
|
|
2013-01-08 09:03:16 +00:00
|
|
|
|
2013-07-19 13:30:49 +00:00
|
|
|
void MacroAssembler::TestJSArrayForAllocationMemento(
|
2013-01-08 09:03:16 +00:00
|
|
|
Register receiver_reg,
|
2013-10-15 15:04:29 +00:00
|
|
|
Register scratch_reg,
|
|
|
|
Label* no_memento_found) {
|
2013-01-08 09:03:16 +00:00
|
|
|
ExternalReference new_space_start =
|
|
|
|
ExternalReference::new_space_start(isolate());
|
|
|
|
ExternalReference new_space_allocation_top =
|
|
|
|
ExternalReference::new_space_allocation_top_address(isolate());
|
|
|
|
|
|
|
|
lea(scratch_reg, Operand(receiver_reg,
|
2013-07-19 13:30:49 +00:00
|
|
|
JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
|
2013-01-08 09:03:16 +00:00
|
|
|
cmp(scratch_reg, Immediate(new_space_start));
|
2013-10-15 15:04:29 +00:00
|
|
|
j(less, no_memento_found);
|
2013-01-08 09:03:16 +00:00
|
|
|
cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
|
2013-10-15 15:04:29 +00:00
|
|
|
j(greater, no_memento_found);
|
2013-07-19 13:30:49 +00:00
|
|
|
cmp(MemOperand(scratch_reg, -AllocationMemento::kSize),
|
2013-09-20 09:27:40 +00:00
|
|
|
Immediate(isolate()->factory()->allocation_memento_map()));
|
2013-01-08 09:03:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-11-06 15:45:43 +00:00
|
|
|
void MacroAssembler::JumpIfDictionaryInPrototypeChain(
|
|
|
|
Register object,
|
|
|
|
Register scratch0,
|
|
|
|
Register scratch1,
|
|
|
|
Label* found) {
|
|
|
|
ASSERT(!scratch1.is(scratch0));
|
|
|
|
Factory* factory = isolate()->factory();
|
|
|
|
Register current = scratch0;
|
|
|
|
Label loop_again;
|
|
|
|
|
|
|
|
// scratch contained elements pointer.
|
|
|
|
mov(current, object);
|
|
|
|
|
|
|
|
// Loop based on the map going up the prototype chain.
|
|
|
|
bind(&loop_again);
|
|
|
|
mov(current, FieldOperand(current, HeapObject::kMapOffset));
|
|
|
|
mov(scratch1, FieldOperand(current, Map::kBitField2Offset));
|
|
|
|
and_(scratch1, Map::kElementsKindMask);
|
|
|
|
shr(scratch1, Map::kElementsKindShift);
|
|
|
|
cmp(scratch1, Immediate(DICTIONARY_ELEMENTS));
|
|
|
|
j(equal, found);
|
|
|
|
mov(current, FieldOperand(current, Map::kPrototypeOffset));
|
|
|
|
cmp(current, Immediate(factory->null_value()));
|
|
|
|
j(not_equal, &loop_again);
|
|
|
|
}
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
} } // namespace v8::internal
|
2010-05-17 15:41:35 +00:00
|
|
|
|
|
|
|
#endif // V8_TARGET_ARCH_IA32
|