2012-01-18 16:16:11 +00:00
|
|
|
// Copyright 2012 the V8 project authors. All rights reserved.
|
2008-07-03 15:10:15 +00:00
|
|
|
// Redistribution and use in source and binary forms, with or without
|
|
|
|
// modification, are permitted provided that the following conditions are
|
|
|
|
// met:
|
|
|
|
//
|
|
|
|
// * Redistributions of source code must retain the above copyright
|
|
|
|
// notice, this list of conditions and the following disclaimer.
|
|
|
|
// * Redistributions in binary form must reproduce the above
|
|
|
|
// copyright notice, this list of conditions and the following
|
|
|
|
// disclaimer in the documentation and/or other materials provided
|
|
|
|
// with the distribution.
|
|
|
|
// * Neither the name of Google Inc. nor the names of its
|
|
|
|
// contributors may be used to endorse or promote products derived
|
|
|
|
// from this software without specific prior written permission.
|
|
|
|
//
|
|
|
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
|
|
|
|
#include "v8.h"
|
|
|
|
|
|
|
|
#include "accessors.h"
|
|
|
|
#include "api.h"
|
|
|
|
#include "bootstrapper.h"
|
2011-04-07 14:42:37 +00:00
|
|
|
#include "codegen.h"
|
2008-09-11 10:51:52 +00:00
|
|
|
#include "compilation-cache.h"
|
2013-07-03 15:39:18 +00:00
|
|
|
#include "cpu-profiler.h"
|
2008-07-03 15:10:15 +00:00
|
|
|
#include "debug.h"
|
2011-06-29 13:02:00 +00:00
|
|
|
#include "deoptimizer.h"
|
2008-07-03 15:10:15 +00:00
|
|
|
#include "global-handles.h"
|
2011-05-03 08:23:58 +00:00
|
|
|
#include "heap-profiler.h"
|
2011-09-19 18:36:47 +00:00
|
|
|
#include "incremental-marking.h"
|
2008-07-03 15:10:15 +00:00
|
|
|
#include "mark-compact.h"
|
|
|
|
#include "natives.h"
|
2010-08-11 14:30:14 +00:00
|
|
|
#include "objects-visiting.h"
|
2011-09-19 18:36:47 +00:00
|
|
|
#include "objects-visiting-inl.h"
|
2012-04-05 14:10:39 +00:00
|
|
|
#include "once.h"
|
2010-12-07 11:31:57 +00:00
|
|
|
#include "runtime-profiler.h"
|
2008-07-03 15:10:15 +00:00
|
|
|
#include "scopeinfo.h"
|
2009-10-21 15:03:34 +00:00
|
|
|
#include "snapshot.h"
|
2011-09-19 18:36:47 +00:00
|
|
|
#include "store-buffer.h"
|
2008-07-03 15:10:15 +00:00
|
|
|
#include "v8threads.h"
|
2012-09-12 11:15:20 +00:00
|
|
|
#include "v8utils.h"
|
2010-12-07 11:31:57 +00:00
|
|
|
#include "vm-state-inl.h"
|
2010-04-19 19:30:11 +00:00
|
|
|
#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
|
2009-08-31 12:40:37 +00:00
|
|
|
#include "regexp-macro-assembler.h"
|
2009-10-26 12:26:42 +00:00
|
|
|
#include "arm/regexp-macro-assembler-arm.h"
|
2009-08-31 12:40:37 +00:00
|
|
|
#endif
|
2011-03-28 13:05:36 +00:00
|
|
|
#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
|
|
|
|
#include "regexp-macro-assembler.h"
|
|
|
|
#include "mips/regexp-macro-assembler-mips.h"
|
|
|
|
#endif
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2009-05-25 10:05:56 +00:00
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2008-07-30 08:49:36 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
Heap::Heap()
|
|
|
|
: isolate_(NULL),
|
2008-07-03 15:10:15 +00:00
|
|
|
// semispace_size_ should be a power of 2 and old_generation_size_ should be
|
|
|
|
// a multiple of Page::kPageSize.
|
2013-06-28 15:34:48 +00:00
|
|
|
#if V8_TARGET_ARCH_X64
|
2011-09-19 18:36:47 +00:00
|
|
|
#define LUMP_OF_MEMORY (2 * MB)
|
2011-03-18 20:35:07 +00:00
|
|
|
code_range_size_(512*MB),
|
2010-11-15 10:12:01 +00:00
|
|
|
#else
|
2011-09-19 18:36:47 +00:00
|
|
|
#define LUMP_OF_MEMORY MB
|
2011-03-18 20:35:07 +00:00
|
|
|
code_range_size_(0),
|
2010-11-15 10:12:01 +00:00
|
|
|
#endif
|
2013-07-10 10:01:30 +00:00
|
|
|
#if defined(ANDROID) || V8_TARGET_ARCH_MIPS
|
2012-06-08 13:21:40 +00:00
|
|
|
reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
|
|
|
|
max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
|
|
|
|
initial_semispace_size_(Page::kPageSize),
|
|
|
|
max_old_generation_size_(192*MB),
|
|
|
|
max_executable_size_(max_old_generation_size_),
|
|
|
|
#else
|
2011-09-19 18:36:47 +00:00
|
|
|
reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
|
|
|
|
max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
|
2011-12-02 14:19:53 +00:00
|
|
|
initial_semispace_size_(Page::kPageSize),
|
2011-09-22 17:10:40 +00:00
|
|
|
max_old_generation_size_(700ul * LUMP_OF_MEMORY),
|
2012-03-14 13:21:44 +00:00
|
|
|
max_executable_size_(256l * LUMP_OF_MEMORY),
|
2012-06-08 13:21:40 +00:00
|
|
|
#endif
|
2011-09-19 18:36:47 +00:00
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// Variables set based on semispace_size_ and old_generation_size_ in
|
2011-03-18 20:35:07 +00:00
|
|
|
// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
|
2009-10-21 15:03:34 +00:00
|
|
|
// Will be 4 * reserved_semispace_size_ to ensure that young
|
|
|
|
// generation can be aligned to its size.
|
2011-03-18 20:35:07 +00:00
|
|
|
survived_since_last_expansion_(0),
|
2011-07-05 06:19:53 +00:00
|
|
|
sweep_generation_(0),
|
2011-03-18 20:35:07 +00:00
|
|
|
always_allocate_scope_depth_(0),
|
|
|
|
linear_allocation_scope_depth_(0),
|
|
|
|
contexts_disposed_(0),
|
2012-03-23 13:33:11 +00:00
|
|
|
global_ic_age_(0),
|
2012-10-12 13:49:12 +00:00
|
|
|
flush_monomorphic_ics_(false),
|
2011-09-19 18:36:47 +00:00
|
|
|
scan_on_scavenge_pages_(0),
|
2011-03-18 20:35:07 +00:00
|
|
|
new_space_(this),
|
|
|
|
old_pointer_space_(NULL),
|
|
|
|
old_data_space_(NULL),
|
|
|
|
code_space_(NULL),
|
|
|
|
map_space_(NULL),
|
|
|
|
cell_space_(NULL),
|
2013-06-12 15:03:44 +00:00
|
|
|
property_cell_space_(NULL),
|
2011-03-18 20:35:07 +00:00
|
|
|
lo_space_(NULL),
|
|
|
|
gc_state_(NOT_IN_GC),
|
2011-06-01 11:46:14 +00:00
|
|
|
gc_post_processing_depth_(0),
|
2011-03-18 20:35:07 +00:00
|
|
|
ms_count_(0),
|
|
|
|
gc_count_(0),
|
2012-03-16 14:13:22 +00:00
|
|
|
remembered_unmapped_pages_index_(0),
|
2011-03-18 20:35:07 +00:00
|
|
|
unflattened_strings_length_(0),
|
2008-07-30 08:49:36 +00:00
|
|
|
#ifdef DEBUG
|
2011-03-18 20:35:07 +00:00
|
|
|
allocation_timeout_(0),
|
|
|
|
disallow_allocation_failure_(false),
|
2011-03-18 19:41:05 +00:00
|
|
|
#endif // DEBUG
|
2011-11-25 14:41:38 +00:00
|
|
|
new_space_high_promotion_mode_active_(false),
|
2013-05-23 15:11:43 +00:00
|
|
|
old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
|
2011-09-19 18:36:47 +00:00
|
|
|
size_of_old_gen_at_last_old_space_gc_(0),
|
2011-03-18 20:35:07 +00:00
|
|
|
external_allocation_limit_(0),
|
|
|
|
amount_of_external_allocated_memory_(0),
|
|
|
|
amount_of_external_allocated_memory_at_last_global_gc_(0),
|
|
|
|
old_gen_exhausted_(false),
|
2011-09-19 18:36:47 +00:00
|
|
|
store_buffer_rebuilder_(store_buffer()),
|
2013-02-28 17:03:34 +00:00
|
|
|
hidden_string_(NULL),
|
2011-03-18 20:35:07 +00:00
|
|
|
global_gc_prologue_callback_(NULL),
|
|
|
|
global_gc_epilogue_callback_(NULL),
|
|
|
|
gc_safe_size_of_old_object_(NULL),
|
2011-05-13 11:10:18 +00:00
|
|
|
total_regexp_code_generated_(0),
|
2011-03-18 20:35:07 +00:00
|
|
|
tracer_(NULL),
|
|
|
|
young_survivors_after_last_gc_(0),
|
|
|
|
high_survival_rate_period_length_(0),
|
2012-11-15 09:25:40 +00:00
|
|
|
low_survival_rate_period_length_(0),
|
2011-03-18 20:35:07 +00:00
|
|
|
survival_rate_(0),
|
|
|
|
previous_survival_rate_trend_(Heap::STABLE),
|
|
|
|
survival_rate_trend_(Heap::STABLE),
|
2013-02-19 11:59:48 +00:00
|
|
|
max_gc_pause_(0.0),
|
|
|
|
total_gc_time_ms_(0.0),
|
2011-03-18 20:35:07 +00:00
|
|
|
max_alive_after_gc_(0),
|
|
|
|
min_in_mutator_(kMaxInt),
|
|
|
|
alive_after_last_gc_(0),
|
|
|
|
last_gc_end_timestamp_(0.0),
|
2013-01-30 10:51:13 +00:00
|
|
|
marking_time_(0.0),
|
|
|
|
sweeping_time_(0.0),
|
2011-09-19 18:36:47 +00:00
|
|
|
store_buffer_(this),
|
|
|
|
marking_(this),
|
|
|
|
incremental_marking_(this),
|
2011-03-18 20:35:07 +00:00
|
|
|
number_idle_notifications_(0),
|
|
|
|
last_idle_notification_gc_count_(0),
|
|
|
|
last_idle_notification_gc_count_init_(false),
|
2011-11-30 11:13:36 +00:00
|
|
|
mark_sweeps_since_idle_round_started_(0),
|
|
|
|
gc_count_at_last_idle_gc_(0),
|
|
|
|
scavenges_since_last_idle_round_(kIdleScavengeThreshold),
|
2013-04-23 15:21:11 +00:00
|
|
|
gcs_since_last_deopt_(0),
|
2013-02-04 10:56:50 +00:00
|
|
|
#ifdef VERIFY_HEAP
|
|
|
|
no_weak_embedded_maps_verification_scope_depth_(0),
|
|
|
|
#endif
|
2011-11-09 13:48:43 +00:00
|
|
|
promotion_queue_(this),
|
2011-03-18 20:35:07 +00:00
|
|
|
configured_(false),
|
2013-04-26 08:49:20 +00:00
|
|
|
chunks_queued_for_free_(NULL),
|
|
|
|
relocation_mutex_(NULL) {
|
2011-03-18 20:35:07 +00:00
|
|
|
// Allow build-time customization of the max semispace size. Building
|
|
|
|
// V8 with snapshots and a non-default max semispace size is much
|
|
|
|
// easier if you can define it as part of the build environment.
|
|
|
|
#if defined(V8_MAX_SEMISPACE_SIZE)
|
|
|
|
max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
|
|
|
|
#endif
|
|
|
|
|
2011-06-22 10:13:10 +00:00
|
|
|
intptr_t max_virtual = OS::MaxVirtualMemory();
|
|
|
|
|
|
|
|
if (max_virtual > 0) {
|
|
|
|
if (code_range_size_ > 0) {
|
|
|
|
// Reserve no more than 1/8 of the memory for the code range.
|
|
|
|
code_range_size_ = Min(code_range_size_, max_virtual >> 3);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
|
2012-08-17 09:03:08 +00:00
|
|
|
native_contexts_list_ = NULL;
|
2013-06-07 10:52:11 +00:00
|
|
|
array_buffers_list_ = Smi::FromInt(0);
|
2013-07-17 11:50:24 +00:00
|
|
|
allocation_sites_list_ = Smi::FromInt(0);
|
2011-03-18 20:35:07 +00:00
|
|
|
mark_compact_collector_.heap_ = this;
|
|
|
|
external_string_table_.heap_ = this;
|
2012-05-15 09:54:54 +00:00
|
|
|
// Put a dummy entry in the remembered pages so we can find the list the
|
|
|
|
// minidump even if there are no real unmapped pages.
|
|
|
|
RememberUnmappedPage(NULL, false);
|
2012-07-13 12:22:09 +00:00
|
|
|
|
|
|
|
ClearObjectStats(true);
|
2011-03-18 20:35:07 +00:00
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
|
2010-09-30 07:22:53 +00:00
|
|
|
intptr_t Heap::Capacity() {
|
2012-01-13 13:09:52 +00:00
|
|
|
if (!HasBeenSetUp()) return 0;
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2008-10-17 09:13:27 +00:00
|
|
|
return new_space_.Capacity() +
|
2008-09-05 12:34:09 +00:00
|
|
|
old_pointer_space_->Capacity() +
|
|
|
|
old_data_space_->Capacity() +
|
2008-07-03 15:10:15 +00:00
|
|
|
code_space_->Capacity() +
|
2009-07-09 11:13:08 +00:00
|
|
|
map_space_->Capacity() +
|
2013-06-12 15:03:44 +00:00
|
|
|
cell_space_->Capacity() +
|
|
|
|
property_cell_space_->Capacity();
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-09-30 07:22:53 +00:00
|
|
|
intptr_t Heap::CommittedMemory() {
|
2012-01-13 13:09:52 +00:00
|
|
|
if (!HasBeenSetUp()) return 0;
|
2009-10-20 07:51:49 +00:00
|
|
|
|
|
|
|
return new_space_.CommittedMemory() +
|
|
|
|
old_pointer_space_->CommittedMemory() +
|
|
|
|
old_data_space_->CommittedMemory() +
|
|
|
|
code_space_->CommittedMemory() +
|
|
|
|
map_space_->CommittedMemory() +
|
|
|
|
cell_space_->CommittedMemory() +
|
2013-06-12 15:03:44 +00:00
|
|
|
property_cell_space_->CommittedMemory() +
|
2009-10-20 07:51:49 +00:00
|
|
|
lo_space_->Size();
|
|
|
|
}
|
|
|
|
|
2012-10-22 16:33:10 +00:00
|
|
|
|
|
|
|
size_t Heap::CommittedPhysicalMemory() {
|
|
|
|
if (!HasBeenSetUp()) return 0;
|
|
|
|
|
|
|
|
return new_space_.CommittedPhysicalMemory() +
|
|
|
|
old_pointer_space_->CommittedPhysicalMemory() +
|
|
|
|
old_data_space_->CommittedPhysicalMemory() +
|
|
|
|
code_space_->CommittedPhysicalMemory() +
|
|
|
|
map_space_->CommittedPhysicalMemory() +
|
|
|
|
cell_space_->CommittedPhysicalMemory() +
|
2013-06-12 15:03:44 +00:00
|
|
|
property_cell_space_->CommittedPhysicalMemory() +
|
2012-10-22 16:33:10 +00:00
|
|
|
lo_space_->CommittedPhysicalMemory();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-11-10 08:38:42 +00:00
|
|
|
intptr_t Heap::CommittedMemoryExecutable() {
|
2012-01-13 13:09:52 +00:00
|
|
|
if (!HasBeenSetUp()) return 0;
|
2010-11-10 08:38:42 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
return isolate()->memory_allocator()->SizeExecutable();
|
2010-11-10 08:38:42 +00:00
|
|
|
}
|
|
|
|
|
2009-10-20 07:51:49 +00:00
|
|
|
|
2010-09-30 07:22:53 +00:00
|
|
|
intptr_t Heap::Available() {
|
2012-01-13 13:09:52 +00:00
|
|
|
if (!HasBeenSetUp()) return 0;
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2008-10-17 09:13:27 +00:00
|
|
|
return new_space_.Available() +
|
2008-09-05 12:34:09 +00:00
|
|
|
old_pointer_space_->Available() +
|
|
|
|
old_data_space_->Available() +
|
2008-07-03 15:10:15 +00:00
|
|
|
code_space_->Available() +
|
2009-07-09 11:13:08 +00:00
|
|
|
map_space_->Available() +
|
2013-06-12 15:03:44 +00:00
|
|
|
cell_space_->Available() +
|
|
|
|
property_cell_space_->Available();
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-01-13 13:09:52 +00:00
|
|
|
bool Heap::HasBeenSetUp() {
|
2008-10-17 09:13:27 +00:00
|
|
|
return old_pointer_space_ != NULL &&
|
2008-09-05 12:34:09 +00:00
|
|
|
old_data_space_ != NULL &&
|
|
|
|
code_space_ != NULL &&
|
|
|
|
map_space_ != NULL &&
|
2009-07-09 11:13:08 +00:00
|
|
|
cell_space_ != NULL &&
|
2013-06-12 15:03:44 +00:00
|
|
|
property_cell_space_ != NULL &&
|
2008-09-05 12:34:09 +00:00
|
|
|
lo_space_ != NULL;
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-08-30 08:54:43 +00:00
|
|
|
int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
|
2011-09-19 18:36:47 +00:00
|
|
|
if (IntrusiveMarking::IsMarked(object)) {
|
|
|
|
return IntrusiveMarking::SizeOfMarkedObject(object);
|
2010-08-30 08:54:43 +00:00
|
|
|
}
|
2011-09-19 18:36:47 +00:00
|
|
|
return object->SizeFromMap(object->map());
|
2010-08-30 08:54:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-02-03 14:16:40 +00:00
|
|
|
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
|
|
|
|
const char** reason) {
|
2008-07-03 15:10:15 +00:00
|
|
|
// Is global GC requested?
|
2012-04-25 11:35:32 +00:00
|
|
|
if (space != NEW_SPACE) {
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->counters()->gc_compactor_caused_by_request()->Increment();
|
2012-02-03 14:16:40 +00:00
|
|
|
*reason = "GC in old space requested";
|
2008-07-03 15:10:15 +00:00
|
|
|
return MARK_COMPACTOR;
|
|
|
|
}
|
|
|
|
|
2012-04-25 11:35:32 +00:00
|
|
|
if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
|
|
|
|
*reason = "GC in old space forced by flags";
|
|
|
|
return MARK_COMPACTOR;
|
|
|
|
}
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// Is enough data promoted to justify a global GC?
|
2013-05-23 15:11:43 +00:00
|
|
|
if (OldGenerationAllocationLimitReached()) {
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
|
2012-02-03 14:16:40 +00:00
|
|
|
*reason = "promotion limit reached";
|
2008-07-03 15:10:15 +00:00
|
|
|
return MARK_COMPACTOR;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Have allocation in OLD and LO failed?
|
|
|
|
if (old_gen_exhausted_) {
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->counters()->
|
|
|
|
gc_compactor_caused_by_oldspace_exhaustion()->Increment();
|
2012-02-03 14:16:40 +00:00
|
|
|
*reason = "old generations exhausted";
|
2008-07-03 15:10:15 +00:00
|
|
|
return MARK_COMPACTOR;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Is there enough space left in OLD to guarantee that a scavenge can
|
|
|
|
// succeed?
|
|
|
|
//
|
2008-09-05 12:34:09 +00:00
|
|
|
// Note that MemoryAllocator->MaxAvailable() undercounts the memory available
|
2008-07-03 15:10:15 +00:00
|
|
|
// for object promotion. It counts only the bytes that the memory
|
|
|
|
// allocator has not yet allocated from the OS and assigned to any space,
|
|
|
|
// and does not count available bytes already in the old space or code
|
|
|
|
// space. Undercounting is safe---we may get an unrequested full GC when
|
|
|
|
// a scavenge would have succeeded.
|
2011-03-18 20:35:07 +00:00
|
|
|
if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
|
|
|
|
isolate_->counters()->
|
|
|
|
gc_compactor_caused_by_oldspace_exhaustion()->Increment();
|
2012-02-03 14:16:40 +00:00
|
|
|
*reason = "scavenge might not succeed";
|
2008-07-03 15:10:15 +00:00
|
|
|
return MARK_COMPACTOR;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Default
|
2012-02-03 14:16:40 +00:00
|
|
|
*reason = NULL;
|
2008-07-03 15:10:15 +00:00
|
|
|
return SCAVENGER;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// TODO(1238405): Combine the infrastructure for --heap-stats and
|
|
|
|
// --log-gc to avoid the complicated preprocessor and flag testing.
|
|
|
|
void Heap::ReportStatisticsBeforeGC() {
|
|
|
|
// Heap::ReportHeapStatistics will also log NewSpace statistics when
|
2011-07-13 09:09:04 +00:00
|
|
|
// compiled --log-gc is set. The following logic is used to avoid
|
|
|
|
// double logging.
|
|
|
|
#ifdef DEBUG
|
2008-10-17 09:13:27 +00:00
|
|
|
if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
|
2008-07-03 15:10:15 +00:00
|
|
|
if (FLAG_heap_stats) {
|
|
|
|
ReportHeapStatistics("Before GC");
|
|
|
|
} else if (FLAG_log_gc) {
|
2008-10-17 09:13:27 +00:00
|
|
|
new_space_.ReportStatistics();
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
2008-10-17 09:13:27 +00:00
|
|
|
if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
|
2011-07-13 09:09:04 +00:00
|
|
|
#else
|
2008-07-03 15:10:15 +00:00
|
|
|
if (FLAG_log_gc) {
|
2008-10-17 09:13:27 +00:00
|
|
|
new_space_.CollectStatistics();
|
|
|
|
new_space_.ReportStatistics();
|
|
|
|
new_space_.ClearHistograms();
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
2011-07-13 09:09:04 +00:00
|
|
|
#endif // DEBUG
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-07-13 21:24:54 +00:00
|
|
|
void Heap::PrintShortHeapStatistics() {
|
|
|
|
if (!FLAG_trace_gc_verbose) return;
|
2012-07-10 12:52:36 +00:00
|
|
|
PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX "d KB"
|
|
|
|
", available: %6" V8_PTR_PREFIX "d KB\n",
|
|
|
|
isolate_->memory_allocator()->Size() / KB,
|
|
|
|
isolate_->memory_allocator()->Available() / KB);
|
|
|
|
PrintPID("New space, used: %6" V8_PTR_PREFIX "d KB"
|
|
|
|
", available: %6" V8_PTR_PREFIX "d KB"
|
|
|
|
", committed: %6" V8_PTR_PREFIX "d KB\n",
|
|
|
|
new_space_.Size() / KB,
|
|
|
|
new_space_.Available() / KB,
|
|
|
|
new_space_.CommittedMemory() / KB);
|
|
|
|
PrintPID("Old pointers, used: %6" V8_PTR_PREFIX "d KB"
|
|
|
|
", available: %6" V8_PTR_PREFIX "d KB"
|
|
|
|
", committed: %6" V8_PTR_PREFIX "d KB\n",
|
|
|
|
old_pointer_space_->SizeOfObjects() / KB,
|
|
|
|
old_pointer_space_->Available() / KB,
|
|
|
|
old_pointer_space_->CommittedMemory() / KB);
|
|
|
|
PrintPID("Old data space, used: %6" V8_PTR_PREFIX "d KB"
|
|
|
|
", available: %6" V8_PTR_PREFIX "d KB"
|
|
|
|
", committed: %6" V8_PTR_PREFIX "d KB\n",
|
|
|
|
old_data_space_->SizeOfObjects() / KB,
|
|
|
|
old_data_space_->Available() / KB,
|
|
|
|
old_data_space_->CommittedMemory() / KB);
|
|
|
|
PrintPID("Code space, used: %6" V8_PTR_PREFIX "d KB"
|
|
|
|
", available: %6" V8_PTR_PREFIX "d KB"
|
|
|
|
", committed: %6" V8_PTR_PREFIX "d KB\n",
|
|
|
|
code_space_->SizeOfObjects() / KB,
|
|
|
|
code_space_->Available() / KB,
|
|
|
|
code_space_->CommittedMemory() / KB);
|
|
|
|
PrintPID("Map space, used: %6" V8_PTR_PREFIX "d KB"
|
|
|
|
", available: %6" V8_PTR_PREFIX "d KB"
|
|
|
|
", committed: %6" V8_PTR_PREFIX "d KB\n",
|
|
|
|
map_space_->SizeOfObjects() / KB,
|
|
|
|
map_space_->Available() / KB,
|
|
|
|
map_space_->CommittedMemory() / KB);
|
|
|
|
PrintPID("Cell space, used: %6" V8_PTR_PREFIX "d KB"
|
|
|
|
", available: %6" V8_PTR_PREFIX "d KB"
|
|
|
|
", committed: %6" V8_PTR_PREFIX "d KB\n",
|
|
|
|
cell_space_->SizeOfObjects() / KB,
|
|
|
|
cell_space_->Available() / KB,
|
|
|
|
cell_space_->CommittedMemory() / KB);
|
2013-06-12 15:03:44 +00:00
|
|
|
PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX "d KB"
|
|
|
|
", available: %6" V8_PTR_PREFIX "d KB"
|
|
|
|
", committed: %6" V8_PTR_PREFIX "d KB\n",
|
|
|
|
property_cell_space_->SizeOfObjects() / KB,
|
|
|
|
property_cell_space_->Available() / KB,
|
|
|
|
property_cell_space_->CommittedMemory() / KB);
|
2012-07-10 12:52:36 +00:00
|
|
|
PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
|
|
|
|
", available: %6" V8_PTR_PREFIX "d KB"
|
|
|
|
", committed: %6" V8_PTR_PREFIX "d KB\n",
|
|
|
|
lo_space_->SizeOfObjects() / KB,
|
|
|
|
lo_space_->Available() / KB,
|
|
|
|
lo_space_->CommittedMemory() / KB);
|
2012-09-24 13:03:11 +00:00
|
|
|
PrintPID("All spaces, used: %6" V8_PTR_PREFIX "d KB"
|
|
|
|
", available: %6" V8_PTR_PREFIX "d KB"
|
|
|
|
", committed: %6" V8_PTR_PREFIX "d KB\n",
|
|
|
|
this->SizeOfObjects() / KB,
|
|
|
|
this->Available() / KB,
|
|
|
|
this->CommittedMemory() / KB);
|
2013-06-13 09:48:23 +00:00
|
|
|
PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
|
|
|
|
amount_of_external_allocated_memory_ / KB);
|
2013-02-19 11:59:48 +00:00
|
|
|
PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
|
2009-07-13 21:24:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// TODO(1238405): Combine the infrastructure for --heap-stats and
|
|
|
|
// --log-gc to avoid the complicated preprocessor and flag testing.
|
|
|
|
void Heap::ReportStatisticsAfterGC() {
|
|
|
|
// Similar to the before GC, we use some complicated logic to ensure that
|
|
|
|
// NewSpace statistics are logged exactly once when --log-gc is turned on.
|
2011-07-13 09:09:04 +00:00
|
|
|
#if defined(DEBUG)
|
2008-07-03 15:10:15 +00:00
|
|
|
if (FLAG_heap_stats) {
|
2009-07-01 13:20:09 +00:00
|
|
|
new_space_.CollectStatistics();
|
2008-07-03 15:10:15 +00:00
|
|
|
ReportHeapStatistics("After GC");
|
|
|
|
} else if (FLAG_log_gc) {
|
2008-10-17 09:13:27 +00:00
|
|
|
new_space_.ReportStatistics();
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
2011-07-13 09:09:04 +00:00
|
|
|
#else
|
2008-10-17 09:13:27 +00:00
|
|
|
if (FLAG_log_gc) new_space_.ReportStatistics();
|
2011-07-13 09:09:04 +00:00
|
|
|
#endif // DEBUG
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Heap::GarbageCollectionPrologue() {
|
2013-06-03 15:32:22 +00:00
|
|
|
{ AllowHeapAllocation for_the_first_part_of_prologue;
|
|
|
|
isolate_->transcendental_cache()->Clear();
|
|
|
|
ClearJSFunctionResultCaches();
|
|
|
|
gc_count_++;
|
|
|
|
unflattened_strings_length_ = 0;
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2013-06-03 15:32:22 +00:00
|
|
|
if (FLAG_flush_code && FLAG_flush_code_incrementally) {
|
|
|
|
mark_compact_collector()->EnableCodeFlushing(true);
|
|
|
|
}
|
2012-10-23 08:25:04 +00:00
|
|
|
|
2012-10-12 11:41:14 +00:00
|
|
|
#ifdef VERIFY_HEAP
|
2013-06-03 15:32:22 +00:00
|
|
|
if (FLAG_verify_heap) {
|
|
|
|
Verify();
|
|
|
|
}
|
2012-10-12 11:41:14 +00:00
|
|
|
#endif
|
2013-06-03 15:32:22 +00:00
|
|
|
}
|
2012-10-12 11:41:14 +00:00
|
|
|
|
|
|
|
#ifdef DEBUG
|
2013-06-03 15:32:22 +00:00
|
|
|
ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
if (FLAG_gc_verbose) Print();
|
|
|
|
|
|
|
|
ReportStatisticsBeforeGC();
|
2011-07-13 09:09:04 +00:00
|
|
|
#endif // DEBUG
|
2011-01-25 12:35:06 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
store_buffer()->GCPrologue();
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
2012-09-14 11:16:56 +00:00
|
|
|
|
2010-09-30 07:22:53 +00:00
|
|
|
intptr_t Heap::SizeOfObjects() {
|
|
|
|
intptr_t total = 0;
|
2013-02-11 13:02:20 +00:00
|
|
|
AllSpaces spaces(this);
|
2010-01-25 22:53:18 +00:00
|
|
|
for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
|
2010-11-15 10:38:24 +00:00
|
|
|
total += space->SizeOfObjects();
|
2009-08-13 12:35:59 +00:00
|
|
|
}
|
2008-09-05 12:34:09 +00:00
|
|
|
return total;
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
2012-09-14 11:16:56 +00:00
|
|
|
|
|
|
|
void Heap::RepairFreeListsAfterBoot() {
|
2013-02-11 13:02:20 +00:00
|
|
|
PagedSpaces spaces(this);
|
2012-09-14 11:16:56 +00:00
|
|
|
for (PagedSpace* space = spaces.next();
|
|
|
|
space != NULL;
|
|
|
|
space = spaces.next()) {
|
|
|
|
space->RepairFreeListsAfterBoot();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
void Heap::GarbageCollectionEpilogue() {
|
2011-09-19 18:36:47 +00:00
|
|
|
store_buffer()->GCEpilogue();
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2012-10-12 11:41:14 +00:00
|
|
|
// In release mode, we only zap the from space under heap verification.
|
|
|
|
if (Heap::ShouldZapGarbage()) {
|
|
|
|
ZapFromSpace();
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef VERIFY_HEAP
|
2008-07-03 15:10:15 +00:00
|
|
|
if (FLAG_verify_heap) {
|
|
|
|
Verify();
|
|
|
|
}
|
2012-10-12 11:41:14 +00:00
|
|
|
#endif
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2013-06-03 15:32:22 +00:00
|
|
|
AllowHeapAllocation for_the_rest_of_the_epilogue;
|
|
|
|
|
2012-10-12 11:41:14 +00:00
|
|
|
#ifdef DEBUG
|
2011-03-18 20:35:07 +00:00
|
|
|
if (FLAG_print_global_handles) isolate_->global_handles()->Print();
|
2008-07-03 15:10:15 +00:00
|
|
|
if (FLAG_print_handles) PrintHandles();
|
|
|
|
if (FLAG_gc_verbose) Print();
|
|
|
|
if (FLAG_code_stats) ReportCodeStatistics("After GC");
|
|
|
|
#endif
|
2013-04-23 15:21:11 +00:00
|
|
|
if (FLAG_deopt_every_n_garbage_collections > 0) {
|
|
|
|
if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
|
|
|
|
Deoptimizer::DeoptimizeAll(isolate());
|
|
|
|
gcs_since_last_deopt_ = 0;
|
|
|
|
}
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->counters()->alive_after_last_gc()->Set(
|
|
|
|
static_cast<int>(SizeOfObjects()));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2013-02-28 17:03:34 +00:00
|
|
|
isolate_->counters()->string_table_capacity()->Set(
|
|
|
|
string_table()->Capacity());
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->counters()->number_of_symbols()->Set(
|
2013-02-28 17:03:34 +00:00
|
|
|
string_table()->NumberOfElements());
|
2012-06-26 11:46:16 +00:00
|
|
|
|
2012-07-16 15:17:00 +00:00
|
|
|
if (CommittedMemory() > 0) {
|
|
|
|
isolate_->counters()->external_fragmentation_total()->AddSample(
|
|
|
|
static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
|
2012-08-10 13:09:31 +00:00
|
|
|
|
|
|
|
isolate_->counters()->heap_fraction_map_space()->AddSample(
|
|
|
|
static_cast<int>(
|
|
|
|
(map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
|
|
|
|
isolate_->counters()->heap_fraction_cell_space()->AddSample(
|
|
|
|
static_cast<int>(
|
|
|
|
(cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
|
2013-06-12 15:03:44 +00:00
|
|
|
isolate_->counters()->heap_fraction_property_cell_space()->
|
|
|
|
AddSample(static_cast<int>(
|
|
|
|
(property_cell_space()->CommittedMemory() * 100.0) /
|
|
|
|
CommittedMemory()));
|
2012-08-10 13:09:31 +00:00
|
|
|
|
|
|
|
isolate_->counters()->heap_sample_total_committed()->AddSample(
|
2012-08-10 14:21:46 +00:00
|
|
|
static_cast<int>(CommittedMemory() / KB));
|
2012-08-10 13:09:31 +00:00
|
|
|
isolate_->counters()->heap_sample_total_used()->AddSample(
|
2012-08-10 14:21:46 +00:00
|
|
|
static_cast<int>(SizeOfObjects() / KB));
|
2012-08-10 13:09:31 +00:00
|
|
|
isolate_->counters()->heap_sample_map_space_committed()->AddSample(
|
2012-08-10 14:21:46 +00:00
|
|
|
static_cast<int>(map_space()->CommittedMemory() / KB));
|
2012-08-10 13:09:31 +00:00
|
|
|
isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
|
2012-08-10 14:21:46 +00:00
|
|
|
static_cast<int>(cell_space()->CommittedMemory() / KB));
|
2013-06-12 15:03:44 +00:00
|
|
|
isolate_->counters()->
|
|
|
|
heap_sample_property_cell_space_committed()->
|
|
|
|
AddSample(static_cast<int>(
|
|
|
|
property_cell_space()->CommittedMemory() / KB));
|
2012-07-13 12:12:09 +00:00
|
|
|
}
|
2012-07-16 15:17:00 +00:00
|
|
|
|
|
|
|
#define UPDATE_COUNTERS_FOR_SPACE(space) \
|
|
|
|
isolate_->counters()->space##_bytes_available()->Set( \
|
|
|
|
static_cast<int>(space()->Available())); \
|
|
|
|
isolate_->counters()->space##_bytes_committed()->Set( \
|
|
|
|
static_cast<int>(space()->CommittedMemory())); \
|
|
|
|
isolate_->counters()->space##_bytes_used()->Set( \
|
|
|
|
static_cast<int>(space()->SizeOfObjects()));
|
|
|
|
#define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
|
|
|
|
if (space()->CommittedMemory() > 0) { \
|
|
|
|
isolate_->counters()->external_fragmentation_##space()->AddSample( \
|
|
|
|
static_cast<int>(100 - \
|
|
|
|
(space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
|
|
|
|
}
|
|
|
|
#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
|
|
|
|
UPDATE_COUNTERS_FOR_SPACE(space) \
|
|
|
|
UPDATE_FRAGMENTATION_FOR_SPACE(space)
|
|
|
|
|
2012-07-13 12:12:09 +00:00
|
|
|
UPDATE_COUNTERS_FOR_SPACE(new_space)
|
2012-07-16 15:17:00 +00:00
|
|
|
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
|
|
|
|
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
|
|
|
|
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
|
|
|
|
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
|
|
|
|
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
|
2013-06-12 15:03:44 +00:00
|
|
|
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
|
2012-07-16 15:17:00 +00:00
|
|
|
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
|
2012-07-13 12:12:09 +00:00
|
|
|
#undef UPDATE_COUNTERS_FOR_SPACE
|
2012-07-16 15:17:00 +00:00
|
|
|
#undef UPDATE_FRAGMENTATION_FOR_SPACE
|
|
|
|
#undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
|
2012-06-26 11:46:16 +00:00
|
|
|
|
2011-07-13 09:09:04 +00:00
|
|
|
#if defined(DEBUG)
|
2008-07-03 15:10:15 +00:00
|
|
|
ReportStatisticsAfterGC();
|
2011-07-13 09:09:04 +00:00
|
|
|
#endif // DEBUG
|
2011-08-05 09:44:30 +00:00
|
|
|
#ifdef ENABLE_DEBUGGER_SUPPORT
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->debug()->AfterGarbageCollection();
|
2011-08-05 09:44:30 +00:00
|
|
|
#endif // ENABLE_DEBUGGER_SUPPORT
|
2013-01-14 13:19:27 +00:00
|
|
|
|
|
|
|
error_object_list_.DeferredFormatStackTrace(isolate());
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-02-03 14:16:40 +00:00
|
|
|
void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
|
2008-09-05 12:34:09 +00:00
|
|
|
// Since we are ignoring the return value, the exact choice of space does
|
|
|
|
// not matter, so long as we do not specify NEW_SPACE, which would not
|
|
|
|
// cause a full GC.
|
2011-09-19 18:36:47 +00:00
|
|
|
mark_compact_collector_.SetFlags(flags);
|
2012-02-03 14:16:40 +00:00
|
|
|
CollectGarbage(OLD_POINTER_SPACE, gc_reason);
|
2011-09-19 18:36:47 +00:00
|
|
|
mark_compact_collector_.SetFlags(kNoGCFlags);
|
2008-09-05 12:34:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-02-03 14:16:40 +00:00
|
|
|
void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
|
2010-11-03 13:00:28 +00:00
|
|
|
// Since we are ignoring the return value, the exact choice of space does
|
|
|
|
// not matter, so long as we do not specify NEW_SPACE, which would not
|
|
|
|
// cause a full GC.
|
|
|
|
// Major GC would invoke weak handle callbacks on weakly reachable
|
|
|
|
// handles, but won't collect weakly reachable objects until next
|
|
|
|
// major GC. Therefore if we collect aggressively and weak handle callback
|
|
|
|
// has been invoked, we rerun major GC to release objects which become
|
|
|
|
// garbage.
|
|
|
|
// Note: as weak callbacks can execute arbitrary code, we cannot
|
|
|
|
// hope that eventually there will be no weak callbacks invocations.
|
|
|
|
// Therefore stop recollecting after several attempts.
|
2012-02-03 14:16:40 +00:00
|
|
|
mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
|
|
|
|
kReduceMemoryFootprintMask);
|
2011-11-08 12:42:02 +00:00
|
|
|
isolate_->compilation_cache()->Clear();
|
2010-11-03 13:00:28 +00:00
|
|
|
const int kMaxNumberOfAttempts = 7;
|
2013-07-08 14:41:33 +00:00
|
|
|
const int kMinNumberOfAttempts = 2;
|
2010-11-03 13:00:28 +00:00
|
|
|
for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
|
2013-07-08 14:41:33 +00:00
|
|
|
if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL) &&
|
|
|
|
attempt + 1 >= kMinNumberOfAttempts) {
|
2010-11-03 13:00:28 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2011-09-19 18:36:47 +00:00
|
|
|
mark_compact_collector()->SetFlags(kNoGCFlags);
|
2011-11-08 12:42:02 +00:00
|
|
|
new_space_.Shrink();
|
2011-12-21 08:51:59 +00:00
|
|
|
UncommitFromSpace();
|
2011-11-08 12:42:02 +00:00
|
|
|
incremental_marking()->UncommitMarkingDeque();
|
2010-11-03 13:00:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-02-03 14:16:40 +00:00
|
|
|
bool Heap::CollectGarbage(AllocationSpace space,
|
|
|
|
GarbageCollector collector,
|
|
|
|
const char* gc_reason,
|
|
|
|
const char* collector_reason) {
|
2008-07-03 15:10:15 +00:00
|
|
|
// The VM is in the GC state until exiting this function.
|
2013-04-24 14:44:08 +00:00
|
|
|
VMState<GC> state(isolate_);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
#ifdef DEBUG
|
|
|
|
// Reset the allocation timeout to the GC interval, but make sure to
|
|
|
|
// allow at least a few allocations after a collection. The reason
|
|
|
|
// for this is that we have a lot of allocation sequences and we
|
|
|
|
// assume that a garbage collection will allow the subsequent
|
|
|
|
// allocation attempts to go through.
|
|
|
|
allocation_timeout_ = Max(6, FLAG_gc_interval);
|
|
|
|
#endif
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
|
|
|
|
if (FLAG_trace_incremental_marking) {
|
|
|
|
PrintF("[IncrementalMarking] Scavenge during marking.\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (collector == MARK_COMPACTOR &&
|
2012-11-27 13:18:55 +00:00
|
|
|
!mark_compact_collector()->abort_incremental_marking() &&
|
2011-09-19 18:36:47 +00:00
|
|
|
!incremental_marking()->IsStopped() &&
|
|
|
|
!incremental_marking()->should_hurry() &&
|
|
|
|
FLAG_incremental_marking_steps) {
|
2012-04-03 07:32:19 +00:00
|
|
|
// Make progress in incremental marking.
|
|
|
|
const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
|
|
|
|
incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
|
|
|
|
IncrementalMarking::NO_GC_VIA_STACK_GUARD);
|
|
|
|
if (!incremental_marking()->IsComplete()) {
|
|
|
|
if (FLAG_trace_incremental_marking) {
|
|
|
|
PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
|
|
|
|
}
|
|
|
|
collector = SCAVENGER;
|
|
|
|
collector_reason = "incremental marking delaying mark-sweep";
|
2011-09-19 18:36:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-11-03 13:00:28 +00:00
|
|
|
bool next_gc_likely_to_collect_more = false;
|
|
|
|
|
2012-02-03 14:16:40 +00:00
|
|
|
{ GCTracer tracer(this, gc_reason, collector_reason);
|
2013-06-03 15:32:22 +00:00
|
|
|
ASSERT(AllowHeapAllocation::IsAllowed());
|
|
|
|
DisallowHeapAllocation no_allocation_during_gc;
|
2008-07-03 15:10:15 +00:00
|
|
|
GarbageCollectionPrologue();
|
2008-07-30 08:49:36 +00:00
|
|
|
// The GC count was incremented in the prologue. Tell the tracer about
|
|
|
|
// it.
|
|
|
|
tracer.set_gc_count(gc_count_);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2008-07-30 08:49:36 +00:00
|
|
|
// Tell the tracer which collector we've selected.
|
2008-07-03 15:10:15 +00:00
|
|
|
tracer.set_collector(collector);
|
|
|
|
|
2012-11-22 13:04:11 +00:00
|
|
|
{
|
|
|
|
HistogramTimerScope histogram_timer_scope(
|
|
|
|
(collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
|
|
|
|
: isolate_->counters()->gc_compactor());
|
|
|
|
next_gc_likely_to_collect_more =
|
|
|
|
PerformGarbageCollection(collector, &tracer);
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
GarbageCollectionEpilogue();
|
|
|
|
}
|
|
|
|
|
2012-11-27 13:18:55 +00:00
|
|
|
// Start incremental marking for the next cycle. The heap snapshot
|
|
|
|
// generator needs incremental marking to stay off after it aborted.
|
|
|
|
if (!mark_compact_collector()->abort_incremental_marking() &&
|
|
|
|
incremental_marking()->IsStopped() &&
|
|
|
|
incremental_marking()->WorthActivating() &&
|
|
|
|
NextGCIsLikelyToBeFull()) {
|
|
|
|
incremental_marking()->Start();
|
2011-09-19 18:36:47 +00:00
|
|
|
}
|
|
|
|
|
2010-11-03 13:00:28 +00:00
|
|
|
return next_gc_likely_to_collect_more;
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-07-30 08:49:36 +00:00
|
|
|
void Heap::PerformScavenge() {
|
2012-02-03 14:16:40 +00:00
|
|
|
GCTracer tracer(this, NULL, NULL);
|
2011-09-19 18:36:47 +00:00
|
|
|
if (incremental_marking()->IsStopped()) {
|
|
|
|
PerformGarbageCollection(SCAVENGER, &tracer);
|
|
|
|
} else {
|
|
|
|
PerformGarbageCollection(MARK_COMPACTOR, &tracer);
|
|
|
|
}
|
2008-07-30 08:49:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-01-09 14:01:39 +00:00
|
|
|
void Heap::MoveElements(FixedArray* array,
|
|
|
|
int dst_index,
|
|
|
|
int src_index,
|
|
|
|
int len) {
|
|
|
|
if (len == 0) return;
|
|
|
|
|
|
|
|
ASSERT(array->map() != HEAP->fixed_cow_array_map());
|
|
|
|
Object** dst_objects = array->data_start() + dst_index;
|
2013-04-16 12:30:51 +00:00
|
|
|
OS::MemMove(dst_objects,
|
|
|
|
array->data_start() + src_index,
|
|
|
|
len * kPointerSize);
|
2013-01-09 14:01:39 +00:00
|
|
|
if (!InNewSpace(array)) {
|
|
|
|
for (int i = 0; i < len; i++) {
|
|
|
|
// TODO(hpayer): check store buffer for entries
|
|
|
|
if (InNewSpace(dst_objects[i])) {
|
|
|
|
RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
incremental_marking()->RecordWrites(array);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-10-12 11:41:14 +00:00
|
|
|
#ifdef VERIFY_HEAP
|
2013-02-28 17:03:34 +00:00
|
|
|
// Helper class for verifying the string table.
|
|
|
|
class StringTableVerifier : public ObjectVisitor {
|
2009-04-14 14:01:00 +00:00
|
|
|
public:
|
|
|
|
void VisitPointers(Object** start, Object** end) {
|
|
|
|
// Visit all HeapObject pointers in [start, end).
|
|
|
|
for (Object** p = start; p < end; p++) {
|
|
|
|
if ((*p)->IsHeapObject()) {
|
2013-02-28 17:03:34 +00:00
|
|
|
// Check that the string is actually internalized.
|
|
|
|
CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
|
|
|
|
(*p)->IsInternalizedString());
|
2009-04-14 12:00:56 +00:00
|
|
|
}
|
|
|
|
}
|
2009-04-14 14:01:00 +00:00
|
|
|
}
|
|
|
|
};
|
2009-04-14 12:00:56 +00:00
|
|
|
|
2009-04-14 14:01:00 +00:00
|
|
|
|
2013-02-28 17:03:34 +00:00
|
|
|
static void VerifyStringTable() {
|
|
|
|
StringTableVerifier verifier;
|
|
|
|
HEAP->string_table()->IterateElements(&verifier);
|
2009-04-14 12:00:56 +00:00
|
|
|
}
|
2012-10-12 11:41:14 +00:00
|
|
|
#endif // VERIFY_HEAP
|
2009-04-14 12:00:56 +00:00
|
|
|
|
|
|
|
|
2012-03-07 16:24:11 +00:00
|
|
|
static bool AbortIncrementalMarkingAndCollectGarbage(
|
|
|
|
Heap* heap,
|
|
|
|
AllocationSpace space,
|
|
|
|
const char* gc_reason = NULL) {
|
2012-03-07 17:52:16 +00:00
|
|
|
heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
|
2012-03-07 16:24:11 +00:00
|
|
|
bool result = heap->CollectGarbage(space, gc_reason);
|
|
|
|
heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-01-14 14:46:31 +00:00
|
|
|
void Heap::ReserveSpace(
|
2012-09-14 11:48:31 +00:00
|
|
|
int *sizes,
|
2012-09-14 11:16:56 +00:00
|
|
|
Address *locations_out) {
|
2010-01-14 14:46:31 +00:00
|
|
|
bool gc_performed = true;
|
2012-01-20 17:21:26 +00:00
|
|
|
int counter = 0;
|
|
|
|
static const int kThreshold = 20;
|
|
|
|
while (gc_performed && counter++ < kThreshold) {
|
2010-01-14 14:46:31 +00:00
|
|
|
gc_performed = false;
|
2012-09-14 11:16:56 +00:00
|
|
|
ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
|
|
|
|
for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
|
|
|
|
if (sizes[space] != 0) {
|
|
|
|
MaybeObject* allocation;
|
|
|
|
if (space == NEW_SPACE) {
|
|
|
|
allocation = new_space()->AllocateRaw(sizes[space]);
|
|
|
|
} else {
|
|
|
|
allocation = paged_space(space)->AllocateRaw(sizes[space]);
|
|
|
|
}
|
|
|
|
FreeListNode* node;
|
|
|
|
if (!allocation->To<FreeListNode>(&node)) {
|
|
|
|
if (space == NEW_SPACE) {
|
|
|
|
Heap::CollectGarbage(NEW_SPACE,
|
|
|
|
"failed to reserve space in the new space");
|
|
|
|
} else {
|
|
|
|
AbortIncrementalMarkingAndCollectGarbage(
|
|
|
|
this,
|
|
|
|
static_cast<AllocationSpace>(space),
|
|
|
|
"failed to reserve space in paged space");
|
|
|
|
}
|
|
|
|
gc_performed = true;
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
// Mark with a free list node, in case we have a GC before
|
|
|
|
// deserializing.
|
|
|
|
node->set_size(this, sizes[space]);
|
|
|
|
locations_out[space] = node->address();
|
|
|
|
}
|
|
|
|
}
|
2010-01-14 14:46:31 +00:00
|
|
|
}
|
|
|
|
}
|
2012-01-20 17:21:26 +00:00
|
|
|
|
|
|
|
if (gc_performed) {
|
|
|
|
// Failed to reserve the space after several attempts.
|
2012-01-30 10:20:13 +00:00
|
|
|
V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
|
2012-01-20 17:21:26 +00:00
|
|
|
}
|
2010-01-14 14:46:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-08-13 12:03:42 +00:00
|
|
|
void Heap::EnsureFromSpaceIsCommitted() {
|
|
|
|
if (new_space_.CommitFromSpaceIfNeeded()) return;
|
|
|
|
|
|
|
|
// Committing memory to from space failed.
|
|
|
|
// Memory is exhausted and we will die.
|
|
|
|
V8::FatalProcessOutOfMemory("Committing semi space failed.");
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-29 08:13:19 +00:00
|
|
|
void Heap::ClearJSFunctionResultCaches() {
|
2011-03-18 20:35:07 +00:00
|
|
|
if (isolate_->bootstrapper()->IsActive()) return;
|
2010-05-04 16:42:11 +00:00
|
|
|
|
2012-08-17 09:03:08 +00:00
|
|
|
Object* context = native_contexts_list_;
|
2010-10-29 08:13:19 +00:00
|
|
|
while (!context->IsUndefined()) {
|
2011-12-13 14:20:03 +00:00
|
|
|
// Get the caches for this context. GC can happen when the context
|
|
|
|
// is not fully initialized, so the caches can be undefined.
|
|
|
|
Object* caches_or_undefined =
|
|
|
|
Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
|
|
|
|
if (!caches_or_undefined->IsUndefined()) {
|
|
|
|
FixedArray* caches = FixedArray::cast(caches_or_undefined);
|
|
|
|
// Clear the caches:
|
|
|
|
int length = caches->length();
|
|
|
|
for (int i = 0; i < length; i++) {
|
|
|
|
JSFunctionResultCache::cast(caches->get(i))->Clear();
|
|
|
|
}
|
2010-05-04 16:42:11 +00:00
|
|
|
}
|
2010-10-29 08:13:19 +00:00
|
|
|
// Get the next context:
|
|
|
|
context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
|
2010-05-04 16:42:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-08-25 13:25:54 +00:00
|
|
|
void Heap::ClearNormalizedMapCaches() {
|
2011-09-19 18:36:47 +00:00
|
|
|
if (isolate_->bootstrapper()->IsActive() &&
|
|
|
|
!incremental_marking()->IsMarking()) {
|
|
|
|
return;
|
|
|
|
}
|
2010-10-19 08:14:41 +00:00
|
|
|
|
2012-08-17 09:03:08 +00:00
|
|
|
Object* context = native_contexts_list_;
|
2010-10-19 08:14:41 +00:00
|
|
|
while (!context->IsUndefined()) {
|
2011-12-13 14:20:03 +00:00
|
|
|
// GC can happen when the context is not fully initialized,
|
|
|
|
// so the cache can be undefined.
|
|
|
|
Object* cache =
|
|
|
|
Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
|
|
|
|
if (!cache->IsUndefined()) {
|
|
|
|
NormalizedMapCache::cast(cache)->Clear();
|
|
|
|
}
|
2010-10-19 08:14:41 +00:00
|
|
|
context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
|
|
|
|
}
|
2010-08-25 13:25:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-06-24 11:44:23 +00:00
|
|
|
void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
|
|
|
|
double survival_rate =
|
|
|
|
(static_cast<double>(young_survivors_after_last_gc_) * 100) /
|
|
|
|
start_new_space_size;
|
|
|
|
|
2012-01-25 13:46:42 +00:00
|
|
|
if (survival_rate > kYoungSurvivalRateHighThreshold) {
|
2010-06-24 11:44:23 +00:00
|
|
|
high_survival_rate_period_length_++;
|
|
|
|
} else {
|
|
|
|
high_survival_rate_period_length_ = 0;
|
|
|
|
}
|
|
|
|
|
2012-01-25 13:46:42 +00:00
|
|
|
if (survival_rate < kYoungSurvivalRateLowThreshold) {
|
|
|
|
low_survival_rate_period_length_++;
|
|
|
|
} else {
|
|
|
|
low_survival_rate_period_length_ = 0;
|
|
|
|
}
|
|
|
|
|
2010-06-24 11:44:23 +00:00
|
|
|
double survival_rate_diff = survival_rate_ - survival_rate;
|
|
|
|
|
|
|
|
if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
|
|
|
|
set_survival_rate_trend(DECREASING);
|
|
|
|
} else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
|
|
|
|
set_survival_rate_trend(INCREASING);
|
|
|
|
} else {
|
|
|
|
set_survival_rate_trend(STABLE);
|
|
|
|
}
|
|
|
|
|
|
|
|
survival_rate_ = survival_rate;
|
|
|
|
}
|
2010-05-27 12:30:45 +00:00
|
|
|
|
2010-11-03 13:00:28 +00:00
|
|
|
bool Heap::PerformGarbageCollection(GarbageCollector collector,
|
2010-10-21 06:16:52 +00:00
|
|
|
GCTracer* tracer) {
|
2010-11-03 13:00:28 +00:00
|
|
|
bool next_gc_likely_to_collect_more = false;
|
|
|
|
|
2010-10-19 16:45:11 +00:00
|
|
|
if (collector != SCAVENGER) {
|
2011-03-18 20:35:07 +00:00
|
|
|
PROFILE(isolate_, CodeMovingGCEvent());
|
2010-10-19 16:45:11 +00:00
|
|
|
}
|
|
|
|
|
2012-10-12 11:41:14 +00:00
|
|
|
#ifdef VERIFY_HEAP
|
2011-10-25 13:27:46 +00:00
|
|
|
if (FLAG_verify_heap) {
|
2013-02-28 17:03:34 +00:00
|
|
|
VerifyStringTable();
|
2011-10-25 13:27:46 +00:00
|
|
|
}
|
2012-10-12 11:41:14 +00:00
|
|
|
#endif
|
|
|
|
|
2010-03-23 13:11:44 +00:00
|
|
|
GCType gc_type =
|
|
|
|
collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
|
|
|
|
|
2013-01-14 12:59:41 +00:00
|
|
|
{
|
|
|
|
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
|
2013-04-24 14:44:08 +00:00
|
|
|
VMState<EXTERNAL> state(isolate_);
|
2013-04-25 12:08:10 +00:00
|
|
|
CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
|
2010-03-23 13:11:44 +00:00
|
|
|
}
|
|
|
|
|
2009-08-13 12:03:42 +00:00
|
|
|
EnsureFromSpaceIsCommitted();
|
2010-02-26 11:48:18 +00:00
|
|
|
|
2010-09-30 07:22:53 +00:00
|
|
|
int start_new_space_size = Heap::new_space()->SizeAsInt();
|
2010-06-24 11:44:23 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
if (IsHighSurvivalRate()) {
|
|
|
|
// We speed up the incremental marker if it is running so that it
|
|
|
|
// does not fall behind the rate of promotion, which would cause a
|
|
|
|
// constantly growing old space.
|
|
|
|
incremental_marking()->NotifyOfHighPromotionRate();
|
|
|
|
}
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
if (collector == MARK_COMPACTOR) {
|
2010-04-14 07:26:20 +00:00
|
|
|
// Perform mark-sweep with optional compaction.
|
2008-07-30 08:49:36 +00:00
|
|
|
MarkCompact(tracer);
|
2011-07-05 06:19:53 +00:00
|
|
|
sweep_generation_++;
|
2010-06-24 11:44:23 +00:00
|
|
|
|
|
|
|
UpdateSurvivalRateTrend(start_new_space_size);
|
|
|
|
|
2012-05-04 09:36:46 +00:00
|
|
|
size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
|
2010-10-21 06:16:52 +00:00
|
|
|
|
2013-05-23 15:11:43 +00:00
|
|
|
old_generation_allocation_limit_ =
|
|
|
|
OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
|
2011-09-19 18:36:47 +00:00
|
|
|
|
2010-10-21 06:16:52 +00:00
|
|
|
old_gen_exhausted_ = false;
|
2010-04-14 07:26:20 +00:00
|
|
|
} else {
|
2010-05-18 16:50:17 +00:00
|
|
|
tracer_ = tracer;
|
2010-04-14 07:26:20 +00:00
|
|
|
Scavenge();
|
2010-05-18 16:50:17 +00:00
|
|
|
tracer_ = NULL;
|
2010-06-24 11:44:23 +00:00
|
|
|
|
|
|
|
UpdateSurvivalRateTrend(start_new_space_size);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
2009-08-26 12:51:43 +00:00
|
|
|
|
2012-01-25 13:46:42 +00:00
|
|
|
if (!new_space_high_promotion_mode_active_ &&
|
|
|
|
new_space_.Capacity() == new_space_.MaximumCapacity() &&
|
|
|
|
IsStableOrIncreasingSurvivalTrend() &&
|
|
|
|
IsHighSurvivalRate()) {
|
|
|
|
// Stable high survival rates even though young generation is at
|
|
|
|
// maximum capacity indicates that most objects will be promoted.
|
|
|
|
// To decrease scavenger pauses and final mark-sweep pauses, we
|
|
|
|
// have to limit maximal capacity of the young generation.
|
2013-05-23 08:17:03 +00:00
|
|
|
SetNewSpaceHighPromotionModeActive(true);
|
2012-01-25 13:46:42 +00:00
|
|
|
if (FLAG_trace_gc) {
|
2012-07-10 12:52:36 +00:00
|
|
|
PrintPID("Limited new space size due to high promotion rate: %d MB\n",
|
|
|
|
new_space_.InitialCapacity() / MB);
|
2012-01-25 13:46:42 +00:00
|
|
|
}
|
2013-04-12 08:58:22 +00:00
|
|
|
// Support for global pre-tenuring uses the high promotion mode as a
|
|
|
|
// heuristic indicator of whether to pretenure or not, we trigger
|
|
|
|
// deoptimization here to take advantage of pre-tenuring as soon as
|
|
|
|
// possible.
|
2013-05-23 08:17:03 +00:00
|
|
|
if (FLAG_pretenuring) {
|
2013-04-12 08:58:22 +00:00
|
|
|
isolate_->stack_guard()->FullDeopt();
|
|
|
|
}
|
2012-01-25 13:46:42 +00:00
|
|
|
} else if (new_space_high_promotion_mode_active_ &&
|
|
|
|
IsStableOrDecreasingSurvivalTrend() &&
|
|
|
|
IsLowSurvivalRate()) {
|
|
|
|
// Decreasing low survival rates might indicate that the above high
|
|
|
|
// promotion mode is over and we should allow the young generation
|
|
|
|
// to grow again.
|
2013-05-23 08:17:03 +00:00
|
|
|
SetNewSpaceHighPromotionModeActive(false);
|
2012-01-25 13:46:42 +00:00
|
|
|
if (FLAG_trace_gc) {
|
2012-07-10 12:52:36 +00:00
|
|
|
PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
|
|
|
|
new_space_.MaximumCapacity() / MB);
|
2012-01-25 13:46:42 +00:00
|
|
|
}
|
2013-04-12 11:14:48 +00:00
|
|
|
// Trigger deoptimization here to turn off pre-tenuring as soon as
|
|
|
|
// possible.
|
2013-05-23 08:17:03 +00:00
|
|
|
if (FLAG_pretenuring) {
|
2013-04-12 11:14:48 +00:00
|
|
|
isolate_->stack_guard()->FullDeopt();
|
|
|
|
}
|
2012-01-25 13:46:42 +00:00
|
|
|
}
|
|
|
|
|
2011-11-25 14:41:38 +00:00
|
|
|
if (new_space_high_promotion_mode_active_ &&
|
|
|
|
new_space_.Capacity() > new_space_.InitialCapacity()) {
|
|
|
|
new_space_.Shrink();
|
|
|
|
}
|
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->counters()->objs_since_last_young()->Set(0);
|
2011-06-01 12:24:55 +00:00
|
|
|
|
2012-11-27 17:03:12 +00:00
|
|
|
// Callbacks that fire after this point might trigger nested GCs and
|
|
|
|
// restart incremental marking, the assertion can't be moved down.
|
|
|
|
ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
|
|
|
|
|
2011-06-01 11:46:14 +00:00
|
|
|
gc_post_processing_depth_++;
|
2013-06-03 15:32:22 +00:00
|
|
|
{ AllowHeapAllocation allow_allocation;
|
2010-10-21 06:16:52 +00:00
|
|
|
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
|
2010-11-03 13:00:28 +00:00
|
|
|
next_gc_likely_to_collect_more =
|
2012-12-07 09:44:10 +00:00
|
|
|
isolate_->global_handles()->PostGarbageCollectionProcessing(
|
|
|
|
collector, tracer);
|
2010-10-21 06:16:52 +00:00
|
|
|
}
|
2011-06-01 11:46:14 +00:00
|
|
|
gc_post_processing_depth_--;
|
2010-10-21 06:16:52 +00:00
|
|
|
|
2009-10-16 12:11:59 +00:00
|
|
|
// Update relocatables.
|
|
|
|
Relocatable::PostGarbageCollectionProcessing();
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2008-07-30 08:49:36 +00:00
|
|
|
if (collector == MARK_COMPACTOR) {
|
|
|
|
// Register the amount of external allocated memory.
|
|
|
|
amount_of_external_allocated_memory_at_last_global_gc_ =
|
|
|
|
amount_of_external_allocated_memory_;
|
|
|
|
}
|
|
|
|
|
2013-01-14 12:59:41 +00:00
|
|
|
{
|
|
|
|
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
|
2013-04-24 14:44:08 +00:00
|
|
|
VMState<EXTERNAL> state(isolate_);
|
2013-01-24 07:54:40 +00:00
|
|
|
CallGCEpilogueCallbacks(gc_type);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
2012-10-12 11:41:14 +00:00
|
|
|
|
|
|
|
#ifdef VERIFY_HEAP
|
2011-10-25 13:27:46 +00:00
|
|
|
if (FLAG_verify_heap) {
|
2013-02-28 17:03:34 +00:00
|
|
|
VerifyStringTable();
|
2011-10-25 13:27:46 +00:00
|
|
|
}
|
2012-10-12 11:41:14 +00:00
|
|
|
#endif
|
2010-11-03 13:00:28 +00:00
|
|
|
|
|
|
|
return next_gc_likely_to_collect_more;
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-04-25 12:08:10 +00:00
|
|
|
void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
|
2013-01-24 07:54:40 +00:00
|
|
|
if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) {
|
|
|
|
global_gc_prologue_callback_();
|
|
|
|
}
|
|
|
|
for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
|
|
|
|
if (gc_type & gc_prologue_callbacks_[i].gc_type) {
|
2013-04-25 12:08:10 +00:00
|
|
|
gc_prologue_callbacks_[i].callback(gc_type, flags);
|
2013-01-24 07:54:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
|
|
|
|
for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
|
|
|
|
if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
|
|
|
|
gc_epilogue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (gc_type == kGCTypeMarkSweepCompact && global_gc_epilogue_callback_) {
|
|
|
|
global_gc_epilogue_callback_();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-07-30 08:49:36 +00:00
|
|
|
void Heap::MarkCompact(GCTracer* tracer) {
|
2008-07-03 15:10:15 +00:00
|
|
|
gc_state_ = MARK_COMPACT;
|
2011-03-18 20:35:07 +00:00
|
|
|
LOG(isolate_, ResourceEvent("markcompact", "begin"));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
mark_compact_collector_.Prepare(tracer);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
ms_count_++;
|
|
|
|
tracer->set_full_gc_count(ms_count_);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
MarkCompactPrologue();
|
2010-06-07 11:31:44 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
mark_compact_collector_.CollectGarbage();
|
2009-02-25 16:52:15 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
LOG(isolate_, ResourceEvent("markcompact", "end"));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
gc_state_ = NOT_IN_GC;
|
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->counters()->objs_since_last_full()->Set(0);
|
2010-03-01 08:49:33 +00:00
|
|
|
|
|
|
|
contexts_disposed_ = 0;
|
2012-01-18 16:16:11 +00:00
|
|
|
|
2012-10-12 13:49:12 +00:00
|
|
|
flush_monomorphic_ics_ = false;
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
void Heap::MarkCompactPrologue() {
|
2009-02-25 16:52:15 +00:00
|
|
|
// At any old GC clear the keyed lookup cache to enable collection of unused
|
|
|
|
// maps.
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->keyed_lookup_cache()->Clear();
|
|
|
|
isolate_->context_slot_cache()->Clear();
|
|
|
|
isolate_->descriptor_lookup_cache()->Clear();
|
2012-08-31 09:28:01 +00:00
|
|
|
RegExpResultsCache::Clear(string_split_cache());
|
|
|
|
RegExpResultsCache::Clear(regexp_multiple_cache());
|
2009-02-25 16:52:15 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->compilation_cache()->MarkCompactPrologue();
|
2009-02-25 16:52:15 +00:00
|
|
|
|
2010-05-06 09:35:18 +00:00
|
|
|
CompletelyClearInstanceofCache();
|
|
|
|
|
2012-01-26 11:32:01 +00:00
|
|
|
FlushNumberStringCache();
|
2011-06-06 13:15:11 +00:00
|
|
|
if (FLAG_cleanup_code_caches_at_gc) {
|
|
|
|
polymorphic_code_cache()->set_cache(undefined_value());
|
|
|
|
}
|
2010-08-25 13:25:54 +00:00
|
|
|
|
|
|
|
ClearNormalizedMapCaches();
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Helper class for copying HeapObjects
|
2008-10-21 08:48:11 +00:00
|
|
|
class ScavengeVisitor: public ObjectVisitor {
|
2008-07-03 15:10:15 +00:00
|
|
|
public:
|
2011-03-18 20:35:07 +00:00
|
|
|
explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2008-10-21 08:48:11 +00:00
|
|
|
void VisitPointer(Object** p) { ScavengePointer(p); }
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
void VisitPointers(Object** start, Object** end) {
|
|
|
|
// Copy all HeapObject pointers in [start, end)
|
2008-10-21 08:48:11 +00:00
|
|
|
for (Object** p = start; p < end; p++) ScavengePointer(p);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2008-10-21 08:48:11 +00:00
|
|
|
void ScavengePointer(Object** p) {
|
|
|
|
Object* object = *p;
|
2011-03-18 20:35:07 +00:00
|
|
|
if (!heap_->InNewSpace(object)) return;
|
2008-10-21 08:48:11 +00:00
|
|
|
Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
|
|
|
|
reinterpret_cast<HeapObject*>(object));
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
Heap* heap_;
|
2009-05-15 14:46:59 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2012-10-12 11:41:14 +00:00
|
|
|
#ifdef VERIFY_HEAP
|
2008-09-05 12:34:09 +00:00
|
|
|
// Visitor class to verify pointers in code or data space do not point into
|
2008-07-03 15:10:15 +00:00
|
|
|
// new space.
|
2008-09-05 12:34:09 +00:00
|
|
|
class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
|
2008-07-03 15:10:15 +00:00
|
|
|
public:
|
|
|
|
void VisitPointers(Object** start, Object**end) {
|
|
|
|
for (Object** current = start; current < end; current++) {
|
|
|
|
if ((*current)->IsHeapObject()) {
|
2012-10-12 11:41:14 +00:00
|
|
|
CHECK(!HEAP->InNewSpace(HeapObject::cast(*current)));
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
2009-05-07 10:43:33 +00:00
|
|
|
|
2009-05-14 08:55:34 +00:00
|
|
|
|
|
|
|
static void VerifyNonPointerSpacePointers() {
|
|
|
|
// Verify that there are no pointers to new space in spaces where we
|
|
|
|
// do not expect them.
|
|
|
|
VerifyNonPointerSpacePointersVisitor v;
|
2011-03-18 20:35:07 +00:00
|
|
|
HeapObjectIterator code_it(HEAP->code_space());
|
2011-09-19 18:36:47 +00:00
|
|
|
for (HeapObject* object = code_it.Next();
|
|
|
|
object != NULL; object = code_it.Next())
|
2009-09-21 10:35:47 +00:00
|
|
|
object->Iterate(&v);
|
2009-05-14 08:55:34 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
// The old data space was normally swept conservatively so that the iterator
|
|
|
|
// doesn't work, so we normally skip the next bit.
|
|
|
|
if (!HEAP->old_data_space()->was_swept_conservatively()) {
|
|
|
|
HeapObjectIterator data_it(HEAP->old_data_space());
|
|
|
|
for (HeapObject* object = data_it.Next();
|
|
|
|
object != NULL; object = data_it.Next())
|
|
|
|
object->Iterate(&v);
|
|
|
|
}
|
2009-05-14 08:55:34 +00:00
|
|
|
}
|
2012-10-12 11:41:14 +00:00
|
|
|
#endif // VERIFY_HEAP
|
2009-05-14 08:55:34 +00:00
|
|
|
|
2009-07-09 13:28:22 +00:00
|
|
|
|
2010-04-14 07:26:20 +00:00
|
|
|
void Heap::CheckNewSpaceExpansionCriteria() {
|
|
|
|
if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
|
2011-11-25 14:41:38 +00:00
|
|
|
survived_since_last_expansion_ > new_space_.Capacity() &&
|
|
|
|
!new_space_high_promotion_mode_active_) {
|
|
|
|
// Grow the size of new space if there is room to grow, enough data
|
|
|
|
// has survived scavenge since the last expansion and we are not in
|
|
|
|
// high promotion mode.
|
2010-04-14 07:26:20 +00:00
|
|
|
new_space_.Grow();
|
|
|
|
survived_since_last_expansion_ = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-05-17 12:18:19 +00:00
|
|
|
static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
|
|
|
|
return heap->InNewSpace(*p) &&
|
|
|
|
!HeapObject::cast(*p)->map_word().IsForwardingAddress();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
void Heap::ScavengeStoreBufferCallback(
|
|
|
|
Heap* heap,
|
|
|
|
MemoryChunk* page,
|
|
|
|
StoreBufferEvent event) {
|
|
|
|
heap->store_buffer_rebuilder_.Callback(page, event);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
|
|
|
|
if (event == kStoreBufferStartScanningPagesEvent) {
|
|
|
|
start_of_current_page_ = NULL;
|
|
|
|
current_page_ = NULL;
|
|
|
|
} else if (event == kStoreBufferScanningPageEvent) {
|
|
|
|
if (current_page_ != NULL) {
|
|
|
|
// If this page already overflowed the store buffer during this iteration.
|
|
|
|
if (current_page_->scan_on_scavenge()) {
|
|
|
|
// Then we should wipe out the entries that have been added for it.
|
|
|
|
store_buffer_->SetTop(start_of_current_page_);
|
|
|
|
} else if (store_buffer_->Top() - start_of_current_page_ >=
|
|
|
|
(store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
|
|
|
|
// Did we find too many pointers in the previous page? The heuristic is
|
|
|
|
// that no page can take more then 1/5 the remaining slots in the store
|
|
|
|
// buffer.
|
|
|
|
current_page_->set_scan_on_scavenge(true);
|
|
|
|
store_buffer_->SetTop(start_of_current_page_);
|
|
|
|
} else {
|
|
|
|
// In this case the page we scanned took a reasonable number of slots in
|
|
|
|
// the store buffer. It has now been rehabilitated and is no longer
|
|
|
|
// marked scan_on_scavenge.
|
|
|
|
ASSERT(!current_page_->scan_on_scavenge());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
start_of_current_page_ = store_buffer_->Top();
|
|
|
|
current_page_ = page;
|
|
|
|
} else if (event == kStoreBufferFullEvent) {
|
|
|
|
// The current page overflowed the store buffer again. Wipe out its entries
|
|
|
|
// in the store buffer and mark it scan-on-scavenge again. This may happen
|
|
|
|
// several times while scanning.
|
|
|
|
if (current_page_ == NULL) {
|
|
|
|
// Store Buffer overflowed while scanning promoted objects. These are not
|
|
|
|
// in any particular page, though they are likely to be clustered by the
|
|
|
|
// allocation routines.
|
2013-06-12 13:14:35 +00:00
|
|
|
store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
|
2011-09-19 18:36:47 +00:00
|
|
|
} else {
|
|
|
|
// Store Buffer overflowed while scanning a particular old space page for
|
|
|
|
// pointers to new space.
|
|
|
|
ASSERT(current_page_ == page);
|
|
|
|
ASSERT(page != NULL);
|
|
|
|
current_page_->set_scan_on_scavenge(true);
|
|
|
|
ASSERT(start_of_current_page_ != store_buffer_->Top());
|
|
|
|
store_buffer_->SetTop(start_of_current_page_);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-11-09 13:48:43 +00:00
|
|
|
void PromotionQueue::Initialize() {
|
|
|
|
// Assumes that a NewSpacePage exactly fits a number of promotion queue
|
|
|
|
// entries (where each is a pair of intptr_t). This allows us to simplify
|
|
|
|
// the test fpr when to switch pages.
|
|
|
|
ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
|
|
|
|
== 0);
|
|
|
|
limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
|
|
|
|
front_ = rear_ =
|
|
|
|
reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
|
|
|
|
emergency_stack_ = NULL;
|
|
|
|
guard_ = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void PromotionQueue::RelocateQueueHead() {
|
|
|
|
ASSERT(emergency_stack_ == NULL);
|
|
|
|
|
|
|
|
Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
|
|
|
|
intptr_t* head_start = rear_;
|
|
|
|
intptr_t* head_end =
|
2012-02-23 12:11:24 +00:00
|
|
|
Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
|
2011-11-09 13:48:43 +00:00
|
|
|
|
2011-11-09 15:40:08 +00:00
|
|
|
int entries_count =
|
|
|
|
static_cast<int>(head_end - head_start) / kEntrySizeInWords;
|
2011-11-09 13:48:43 +00:00
|
|
|
|
|
|
|
emergency_stack_ = new List<Entry>(2 * entries_count);
|
|
|
|
|
|
|
|
while (head_start != head_end) {
|
2011-11-09 15:40:08 +00:00
|
|
|
int size = static_cast<int>(*(head_start++));
|
2011-11-09 13:48:43 +00:00
|
|
|
HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
|
|
|
|
emergency_stack_->Add(Entry(obj, size));
|
|
|
|
}
|
|
|
|
rear_ = head_end;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-04-17 10:37:41 +00:00
|
|
|
class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
|
|
|
|
public:
|
|
|
|
explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
|
|
|
|
|
|
|
|
virtual Object* RetainAs(Object* object) {
|
|
|
|
if (!heap_->InFromSpace(object)) {
|
|
|
|
return object;
|
|
|
|
}
|
|
|
|
|
|
|
|
MapWord map_word = HeapObject::cast(object)->map_word();
|
|
|
|
if (map_word.IsForwardingAddress()) {
|
|
|
|
return map_word.ToForwardingAddress();
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
Heap* heap_;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2009-05-14 08:55:34 +00:00
|
|
|
void Heap::Scavenge() {
|
2013-04-26 07:35:07 +00:00
|
|
|
RelocationLock relocation_lock(this);
|
|
|
|
|
2012-10-12 11:41:14 +00:00
|
|
|
#ifdef VERIFY_HEAP
|
2011-10-25 13:27:46 +00:00
|
|
|
if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
|
2008-07-03 15:10:15 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
gc_state_ = SCAVENGE;
|
|
|
|
|
|
|
|
// Implements Cheney's copying algorithm
|
2011-03-18 20:35:07 +00:00
|
|
|
LOG(isolate_, ResourceEvent("scavenge", "begin"));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2009-06-22 14:29:35 +00:00
|
|
|
// Clear descriptor cache.
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->descriptor_lookup_cache()->Clear();
|
2009-06-22 14:29:35 +00:00
|
|
|
|
2009-06-12 11:11:04 +00:00
|
|
|
// Used for updating survived_since_last_expansion_ at function end.
|
2012-01-25 13:46:42 +00:00
|
|
|
intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
|
2009-06-12 11:11:04 +00:00
|
|
|
|
2010-04-14 07:26:20 +00:00
|
|
|
CheckNewSpaceExpansionCriteria();
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
SelectScavengingVisitorsTable();
|
|
|
|
|
|
|
|
incremental_marking()->PrepareForScavenge();
|
|
|
|
|
2013-01-30 12:19:32 +00:00
|
|
|
paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
|
|
|
|
paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
|
2011-09-19 18:36:47 +00:00
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// Flip the semispaces. After flipping, to space is empty, from space has
|
|
|
|
// live objects.
|
2008-10-17 09:13:27 +00:00
|
|
|
new_space_.Flip();
|
|
|
|
new_space_.ResetAllocationInfo();
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2009-05-14 08:55:34 +00:00
|
|
|
// We need to sweep newly copied objects which can be either in the
|
|
|
|
// to space or promoted to the old generation. For to-space
|
|
|
|
// objects, we treat the bottom of the to space as a queue. Newly
|
|
|
|
// copied and unswept objects lie between a 'front' mark and the
|
|
|
|
// allocation pointer.
|
2008-07-03 15:10:15 +00:00
|
|
|
//
|
2009-05-14 08:55:34 +00:00
|
|
|
// Promoted objects can go into various old-generation spaces, and
|
|
|
|
// can be allocated internally in the spaces (from the free list).
|
|
|
|
// We treat the top of the to space as a queue of addresses of
|
|
|
|
// promoted objects. The addresses of newly promoted and unswept
|
|
|
|
// objects lie between a 'front' mark and a 'rear' mark that is
|
|
|
|
// updated as a side effect of promoting an object.
|
|
|
|
//
|
|
|
|
// There is guaranteed to be enough room at the top of the to space
|
|
|
|
// for the addresses of promoted objects: every object promoted
|
|
|
|
// frees up its size in bytes from the top of the new space, and
|
|
|
|
// objects are at least one pointer in size.
|
2011-09-19 18:36:47 +00:00
|
|
|
Address new_space_front = new_space_.ToSpaceStart();
|
2011-11-09 13:48:43 +00:00
|
|
|
promotion_queue_.Initialize();
|
2011-09-19 18:36:47 +00:00
|
|
|
|
|
|
|
#ifdef DEBUG
|
|
|
|
store_buffer()->Clean();
|
|
|
|
#endif
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
ScavengeVisitor scavenge_visitor(this);
|
2008-07-03 15:10:15 +00:00
|
|
|
// Copy roots.
|
2009-12-09 14:32:45 +00:00
|
|
|
IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
|
2009-05-14 08:55:34 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
// Copy objects reachable from the old generation.
|
|
|
|
{
|
|
|
|
StoreBufferRebuildScope scope(this,
|
|
|
|
store_buffer(),
|
|
|
|
&ScavengeStoreBufferCallback);
|
|
|
|
store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2013-06-12 15:03:44 +00:00
|
|
|
// Copy objects reachable from simple cells by scavenging cell values
|
|
|
|
// directly.
|
2009-07-09 13:28:22 +00:00
|
|
|
HeapObjectIterator cell_iterator(cell_space_);
|
2012-07-31 09:25:23 +00:00
|
|
|
for (HeapObject* heap_object = cell_iterator.Next();
|
|
|
|
heap_object != NULL;
|
|
|
|
heap_object = cell_iterator.Next()) {
|
2013-06-12 15:03:44 +00:00
|
|
|
if (heap_object->IsCell()) {
|
|
|
|
Cell* cell = Cell::cast(heap_object);
|
|
|
|
Address value_address = cell->ValueAddress();
|
|
|
|
scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copy objects reachable from global property cells by scavenging global
|
|
|
|
// property cell values directly.
|
|
|
|
HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
|
|
|
|
for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
|
|
|
|
heap_object != NULL;
|
|
|
|
heap_object = js_global_property_cell_iterator.Next()) {
|
2013-06-14 16:06:12 +00:00
|
|
|
if (heap_object->IsPropertyCell()) {
|
|
|
|
PropertyCell* cell = PropertyCell::cast(heap_object);
|
2012-07-31 09:25:23 +00:00
|
|
|
Address value_address = cell->ValueAddress();
|
2009-07-09 13:28:22 +00:00
|
|
|
scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
|
2013-06-12 15:03:44 +00:00
|
|
|
Address type_address = cell->TypeAddress();
|
|
|
|
scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
|
2009-07-09 13:28:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-26 09:44:34 +00:00
|
|
|
// Copy objects reachable from the code flushing candidates list.
|
|
|
|
MarkCompactCollector* collector = mark_compact_collector();
|
|
|
|
if (collector->is_code_flushing_enabled()) {
|
|
|
|
collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
|
|
|
|
}
|
|
|
|
|
2012-08-17 09:03:08 +00:00
|
|
|
// Scavenge object reachable from the native contexts list directly.
|
|
|
|
scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
|
2010-10-18 14:59:03 +00:00
|
|
|
|
2009-12-09 14:32:45 +00:00
|
|
|
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
|
2012-11-06 17:32:15 +00:00
|
|
|
|
2012-12-04 10:23:43 +00:00
|
|
|
while (isolate()->global_handles()->IterateObjectGroups(
|
|
|
|
&scavenge_visitor, &IsUnscavengedHeapObject)) {
|
2012-11-06 17:32:15 +00:00
|
|
|
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
|
|
|
|
}
|
|
|
|
isolate()->global_handles()->RemoveObjectGroups();
|
2013-01-24 07:54:40 +00:00
|
|
|
isolate()->global_handles()->RemoveImplicitRefGroups();
|
2012-11-06 17:32:15 +00:00
|
|
|
|
2011-06-06 15:23:04 +00:00
|
|
|
isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
|
2011-05-17 12:18:19 +00:00
|
|
|
&IsUnscavengedHeapObject);
|
2011-06-06 15:23:04 +00:00
|
|
|
isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
|
|
|
|
&scavenge_visitor);
|
2011-05-17 12:18:19 +00:00
|
|
|
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
|
|
|
|
|
2010-04-14 07:26:20 +00:00
|
|
|
UpdateNewSpaceReferencesInExternalStringTable(
|
|
|
|
&UpdateNewSpaceReferenceInExternalStringTableEntry);
|
|
|
|
|
2013-01-14 13:19:27 +00:00
|
|
|
error_object_list_.UpdateReferencesInNewSpace(this);
|
|
|
|
|
2011-11-09 13:48:43 +00:00
|
|
|
promotion_queue_.Destroy();
|
|
|
|
|
2012-02-09 13:30:01 +00:00
|
|
|
if (!FLAG_watch_ic_patching) {
|
2012-02-09 10:19:46 +00:00
|
|
|
isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
|
|
|
|
}
|
2011-09-19 18:36:47 +00:00
|
|
|
incremental_marking()->UpdateMarkingDequeAfterScavenge();
|
2011-01-25 12:35:06 +00:00
|
|
|
|
2012-04-17 10:37:41 +00:00
|
|
|
ScavengeWeakObjectRetainer weak_object_retainer(this);
|
|
|
|
ProcessWeakReferences(&weak_object_retainer);
|
|
|
|
|
2009-12-09 14:32:45 +00:00
|
|
|
ASSERT(new_space_front == new_space_.top());
|
|
|
|
|
|
|
|
// Set age mark.
|
|
|
|
new_space_.set_age_mark(new_space_.top());
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
new_space_.LowerInlineAllocationLimit(
|
|
|
|
new_space_.inline_allocation_limit_step());
|
|
|
|
|
2009-12-09 14:32:45 +00:00
|
|
|
// Update how much has survived scavenge.
|
2010-09-30 07:22:53 +00:00
|
|
|
IncrementYoungSurvivorsCounter(static_cast<int>(
|
2012-01-25 13:46:42 +00:00
|
|
|
(PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
|
2009-12-09 14:32:45 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
LOG(isolate_, ResourceEvent("scavenge", "end"));
|
2009-12-09 14:32:45 +00:00
|
|
|
|
|
|
|
gc_state_ = NOT_IN_GC;
|
2011-11-30 11:13:36 +00:00
|
|
|
|
|
|
|
scavenges_since_last_idle_round_++;
|
2009-12-09 14:32:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
|
|
|
|
Object** p) {
|
2010-04-14 07:26:20 +00:00
|
|
|
MapWord first_word = HeapObject::cast(*p)->map_word();
|
|
|
|
|
|
|
|
if (!first_word.IsForwardingAddress()) {
|
|
|
|
// Unreachable external string can be finalized.
|
2011-03-18 20:35:07 +00:00
|
|
|
heap->FinalizeExternalString(String::cast(*p));
|
2010-04-14 07:26:20 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
// String is still reachable.
|
|
|
|
return String::cast(first_word.ToForwardingAddress());
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Heap::UpdateNewSpaceReferencesInExternalStringTable(
|
|
|
|
ExternalStringTableUpdaterCallback updater_func) {
|
2012-10-12 11:41:14 +00:00
|
|
|
#ifdef VERIFY_HEAP
|
2011-10-25 13:27:46 +00:00
|
|
|
if (FLAG_verify_heap) {
|
|
|
|
external_string_table_.Verify();
|
|
|
|
}
|
2012-10-12 11:41:14 +00:00
|
|
|
#endif
|
2009-12-09 14:32:45 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
if (external_string_table_.new_space_strings_.is_empty()) return;
|
2009-12-09 14:32:45 +00:00
|
|
|
|
2012-10-04 11:09:17 +00:00
|
|
|
Object** start = &external_string_table_.new_space_strings_[0];
|
|
|
|
Object** end = start + external_string_table_.new_space_strings_.length();
|
|
|
|
Object** last = start;
|
2009-12-09 14:32:45 +00:00
|
|
|
|
2012-10-04 11:09:17 +00:00
|
|
|
for (Object** p = start; p < end; ++p) {
|
2011-03-18 20:35:07 +00:00
|
|
|
ASSERT(InFromSpace(*p));
|
|
|
|
String* target = updater_func(this, p);
|
2009-12-09 14:32:45 +00:00
|
|
|
|
2010-04-14 07:26:20 +00:00
|
|
|
if (target == NULL) continue;
|
2009-12-09 14:32:45 +00:00
|
|
|
|
|
|
|
ASSERT(target->IsExternalString());
|
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
if (InNewSpace(target)) {
|
2009-12-09 14:32:45 +00:00
|
|
|
// String is still in new space. Update the table entry.
|
|
|
|
*last = target;
|
|
|
|
++last;
|
|
|
|
} else {
|
|
|
|
// String got promoted. Move it to the old string list.
|
2011-03-18 20:35:07 +00:00
|
|
|
external_string_table_.AddOldString(target);
|
2009-12-09 14:32:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-04 11:09:17 +00:00
|
|
|
ASSERT(last <= end);
|
|
|
|
external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
|
2009-12-09 14:32:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
void Heap::UpdateReferencesInExternalStringTable(
|
|
|
|
ExternalStringTableUpdaterCallback updater_func) {
|
|
|
|
|
|
|
|
// Update old space string references.
|
|
|
|
if (external_string_table_.old_space_strings_.length() > 0) {
|
2012-10-04 11:09:17 +00:00
|
|
|
Object** start = &external_string_table_.old_space_strings_[0];
|
|
|
|
Object** end = start + external_string_table_.old_space_strings_.length();
|
|
|
|
for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
|
2011-09-19 18:36:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
UpdateNewSpaceReferencesInExternalStringTable(updater_func);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-06-12 09:31:39 +00:00
|
|
|
template <class T>
|
|
|
|
struct WeakListVisitor;
|
|
|
|
|
|
|
|
|
|
|
|
template <class T>
|
|
|
|
static Object* VisitWeakList(Heap* heap,
|
|
|
|
Object* list,
|
|
|
|
WeakObjectRetainer* retainer,
|
|
|
|
bool record_slots) {
|
2011-09-19 18:36:47 +00:00
|
|
|
Object* undefined = heap->undefined_value();
|
|
|
|
Object* head = undefined;
|
2013-06-12 09:31:39 +00:00
|
|
|
T* tail = NULL;
|
|
|
|
MarkCompactCollector* collector = heap->mark_compact_collector();
|
|
|
|
while (list != undefined) {
|
2010-12-07 11:31:57 +00:00
|
|
|
// Check whether to keep the candidate in the list.
|
2013-06-12 09:31:39 +00:00
|
|
|
T* candidate = reinterpret_cast<T*>(list);
|
|
|
|
Object* retained = retainer->RetainAs(list);
|
|
|
|
if (retained != NULL) {
|
2011-09-19 18:36:47 +00:00
|
|
|
if (head == undefined) {
|
2010-12-07 11:31:57 +00:00
|
|
|
// First element in the list.
|
2013-06-12 09:31:39 +00:00
|
|
|
head = retained;
|
2010-12-07 11:31:57 +00:00
|
|
|
} else {
|
|
|
|
// Subsequent elements in the list.
|
|
|
|
ASSERT(tail != NULL);
|
2013-06-12 09:31:39 +00:00
|
|
|
WeakListVisitor<T>::SetWeakNext(tail, retained);
|
2012-04-17 10:37:41 +00:00
|
|
|
if (record_slots) {
|
2013-06-12 09:31:39 +00:00
|
|
|
Object** next_slot =
|
|
|
|
HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset());
|
|
|
|
collector->RecordSlot(next_slot, next_slot, retained);
|
2012-04-17 10:37:41 +00:00
|
|
|
}
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
2013-06-12 09:31:39 +00:00
|
|
|
// Retained object is new tail.
|
|
|
|
ASSERT(!retained->IsUndefined());
|
|
|
|
candidate = reinterpret_cast<T*>(retained);
|
|
|
|
tail = candidate;
|
2011-09-19 18:36:47 +00:00
|
|
|
|
|
|
|
|
2013-06-12 09:31:39 +00:00
|
|
|
// tail is a live object, visit it.
|
|
|
|
WeakListVisitor<T>::VisitLiveObject(
|
|
|
|
heap, tail, retainer, record_slots);
|
2013-06-19 11:53:30 +00:00
|
|
|
} else {
|
|
|
|
WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
2011-09-19 18:36:47 +00:00
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
// Move to next element in the list.
|
2013-06-12 09:31:39 +00:00
|
|
|
list = WeakListVisitor<T>::WeakNext(candidate);
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Terminate the list if there is one or more elements.
|
|
|
|
if (tail != NULL) {
|
2013-06-12 09:31:39 +00:00
|
|
|
WeakListVisitor<T>::SetWeakNext(tail, undefined);
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
|
|
|
return head;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-06-12 09:31:39 +00:00
|
|
|
template<>
|
|
|
|
struct WeakListVisitor<JSFunction> {
|
|
|
|
static void SetWeakNext(JSFunction* function, Object* next) {
|
|
|
|
function->set_next_function_link(next);
|
|
|
|
}
|
|
|
|
|
|
|
|
static Object* WeakNext(JSFunction* function) {
|
|
|
|
return function->next_function_link();
|
|
|
|
}
|
|
|
|
|
|
|
|
static int WeakNextOffset() {
|
|
|
|
return JSFunction::kNextFunctionLinkOffset;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void VisitLiveObject(Heap*, JSFunction*,
|
|
|
|
WeakObjectRetainer*, bool) {
|
|
|
|
}
|
2013-06-19 11:53:30 +00:00
|
|
|
|
|
|
|
static void VisitPhantomObject(Heap*, JSFunction*) {
|
|
|
|
}
|
2013-06-12 09:31:39 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
template<>
|
|
|
|
struct WeakListVisitor<Context> {
|
|
|
|
static void SetWeakNext(Context* context, Object* next) {
|
|
|
|
context->set(Context::NEXT_CONTEXT_LINK,
|
|
|
|
next,
|
|
|
|
UPDATE_WRITE_BARRIER);
|
|
|
|
}
|
|
|
|
|
|
|
|
static Object* WeakNext(Context* context) {
|
|
|
|
return context->get(Context::NEXT_CONTEXT_LINK);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void VisitLiveObject(Heap* heap,
|
|
|
|
Context* context,
|
|
|
|
WeakObjectRetainer* retainer,
|
|
|
|
bool record_slots) {
|
|
|
|
// Process the weak list of optimized functions for the context.
|
|
|
|
Object* function_list_head =
|
|
|
|
VisitWeakList<JSFunction>(
|
|
|
|
heap,
|
|
|
|
context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
|
|
|
|
retainer,
|
|
|
|
record_slots);
|
|
|
|
context->set(Context::OPTIMIZED_FUNCTIONS_LIST,
|
|
|
|
function_list_head,
|
|
|
|
UPDATE_WRITE_BARRIER);
|
|
|
|
if (record_slots) {
|
|
|
|
Object** optimized_functions =
|
|
|
|
HeapObject::RawField(
|
|
|
|
context, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
|
|
|
|
heap->mark_compact_collector()->RecordSlot(
|
|
|
|
optimized_functions, optimized_functions, function_list_head);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-19 11:53:30 +00:00
|
|
|
static void VisitPhantomObject(Heap*, Context*) {
|
|
|
|
}
|
|
|
|
|
2013-06-12 09:31:39 +00:00
|
|
|
static int WeakNextOffset() {
|
|
|
|
return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2010-10-18 14:59:03 +00:00
|
|
|
void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
|
2012-04-17 10:37:41 +00:00
|
|
|
// We don't record weak slots during marking or scavenges.
|
|
|
|
// Instead we do it once when we complete mark-compact cycle.
|
|
|
|
// Note that write barrier has no effect if we are already in the middle of
|
|
|
|
// compacting mark-sweep cycle and we have to record slots manually.
|
|
|
|
bool record_slots =
|
|
|
|
gc_state() == MARK_COMPACT &&
|
|
|
|
mark_compact_collector()->is_compacting();
|
2013-06-07 10:52:11 +00:00
|
|
|
ProcessArrayBuffers(retainer, record_slots);
|
|
|
|
ProcessNativeContexts(retainer, record_slots);
|
2013-07-17 11:50:24 +00:00
|
|
|
ProcessAllocationSites(retainer, record_slots);
|
2013-06-07 10:52:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
|
|
|
|
bool record_slots) {
|
2013-06-12 09:31:39 +00:00
|
|
|
Object* head =
|
|
|
|
VisitWeakList<Context>(
|
|
|
|
this, native_contexts_list(), retainer, record_slots);
|
2010-10-18 14:59:03 +00:00
|
|
|
// Update the head of the list of contexts.
|
2012-08-17 09:03:08 +00:00
|
|
|
native_contexts_list_ = head;
|
2010-10-18 14:59:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-06-07 10:52:11 +00:00
|
|
|
template<>
|
2013-06-21 13:02:38 +00:00
|
|
|
struct WeakListVisitor<JSArrayBufferView> {
|
|
|
|
static void SetWeakNext(JSArrayBufferView* obj, Object* next) {
|
2013-06-07 10:52:11 +00:00
|
|
|
obj->set_weak_next(next);
|
|
|
|
}
|
|
|
|
|
2013-06-21 13:02:38 +00:00
|
|
|
static Object* WeakNext(JSArrayBufferView* obj) {
|
2013-06-07 10:52:11 +00:00
|
|
|
return obj->weak_next();
|
|
|
|
}
|
|
|
|
|
2013-06-12 09:31:39 +00:00
|
|
|
static void VisitLiveObject(Heap*,
|
2013-06-21 13:02:38 +00:00
|
|
|
JSArrayBufferView* obj,
|
2013-06-07 10:52:11 +00:00
|
|
|
WeakObjectRetainer* retainer,
|
|
|
|
bool record_slots) {}
|
|
|
|
|
2013-06-21 13:02:38 +00:00
|
|
|
static void VisitPhantomObject(Heap*, JSArrayBufferView*) {}
|
2013-06-19 11:53:30 +00:00
|
|
|
|
2013-06-12 09:31:39 +00:00
|
|
|
static int WeakNextOffset() {
|
2013-06-21 13:02:38 +00:00
|
|
|
return JSArrayBufferView::kWeakNextOffset;
|
2013-06-12 09:31:39 +00:00
|
|
|
}
|
2013-06-07 10:52:11 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
template<>
|
|
|
|
struct WeakListVisitor<JSArrayBuffer> {
|
2013-06-12 09:31:39 +00:00
|
|
|
static void SetWeakNext(JSArrayBuffer* obj, Object* next) {
|
2013-06-07 10:52:11 +00:00
|
|
|
obj->set_weak_next(next);
|
|
|
|
}
|
|
|
|
|
2013-06-12 09:31:39 +00:00
|
|
|
static Object* WeakNext(JSArrayBuffer* obj) {
|
2013-06-07 10:52:11 +00:00
|
|
|
return obj->weak_next();
|
|
|
|
}
|
|
|
|
|
2013-06-12 09:31:39 +00:00
|
|
|
static void VisitLiveObject(Heap* heap,
|
|
|
|
JSArrayBuffer* array_buffer,
|
2013-06-07 10:52:11 +00:00
|
|
|
WeakObjectRetainer* retainer,
|
|
|
|
bool record_slots) {
|
|
|
|
Object* typed_array_obj =
|
2013-06-21 13:02:38 +00:00
|
|
|
VisitWeakList<JSArrayBufferView>(
|
2013-06-12 09:31:39 +00:00
|
|
|
heap,
|
2013-06-21 13:02:38 +00:00
|
|
|
array_buffer->weak_first_view(),
|
2013-06-12 09:31:39 +00:00
|
|
|
retainer, record_slots);
|
2013-06-21 13:02:38 +00:00
|
|
|
array_buffer->set_weak_first_view(typed_array_obj);
|
2013-06-12 09:31:39 +00:00
|
|
|
if (typed_array_obj != heap->undefined_value() && record_slots) {
|
2013-06-07 10:52:11 +00:00
|
|
|
Object** slot = HeapObject::RawField(
|
2013-06-21 13:02:38 +00:00
|
|
|
array_buffer, JSArrayBuffer::kWeakFirstViewOffset);
|
2013-06-12 09:31:39 +00:00
|
|
|
heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj);
|
2013-06-07 10:52:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-19 11:53:30 +00:00
|
|
|
static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) {
|
|
|
|
Runtime::FreeArrayBuffer(heap->isolate(), phantom);
|
|
|
|
}
|
|
|
|
|
2013-06-12 09:31:39 +00:00
|
|
|
static int WeakNextOffset() {
|
|
|
|
return JSArrayBuffer::kWeakNextOffset;
|
|
|
|
}
|
2013-06-07 10:52:11 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
|
|
|
|
bool record_slots) {
|
|
|
|
Object* array_buffer_obj =
|
2013-06-12 09:31:39 +00:00
|
|
|
VisitWeakList<JSArrayBuffer>(this,
|
|
|
|
array_buffers_list(),
|
2013-06-07 10:52:11 +00:00
|
|
|
retainer, record_slots);
|
|
|
|
set_array_buffers_list(array_buffer_obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-06-19 11:53:30 +00:00
|
|
|
void Heap::TearDownArrayBuffers() {
|
|
|
|
Object* undefined = undefined_value();
|
|
|
|
for (Object* o = array_buffers_list(); o != undefined;) {
|
|
|
|
JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
|
|
|
|
Runtime::FreeArrayBuffer(isolate(), buffer);
|
|
|
|
o = buffer->weak_next();
|
|
|
|
}
|
|
|
|
array_buffers_list_ = undefined;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-07-17 11:50:24 +00:00
|
|
|
template<>
|
|
|
|
struct WeakListVisitor<AllocationSite> {
|
|
|
|
static void SetWeakNext(AllocationSite* obj, Object* next) {
|
|
|
|
obj->set_weak_next(next);
|
|
|
|
}
|
|
|
|
|
|
|
|
static Object* WeakNext(AllocationSite* obj) {
|
|
|
|
return obj->weak_next();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void VisitLiveObject(Heap* heap,
|
|
|
|
AllocationSite* array_buffer,
|
|
|
|
WeakObjectRetainer* retainer,
|
|
|
|
bool record_slots) {}
|
|
|
|
|
|
|
|
static void VisitPhantomObject(Heap* heap, AllocationSite* phantom) {}
|
|
|
|
|
|
|
|
static int WeakNextOffset() {
|
|
|
|
return AllocationSite::kWeakNextOffset;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer,
|
|
|
|
bool record_slots) {
|
|
|
|
Object* allocation_site_obj =
|
|
|
|
VisitWeakList<AllocationSite>(this,
|
|
|
|
allocation_sites_list(),
|
|
|
|
retainer, record_slots);
|
|
|
|
set_allocation_sites_list(allocation_site_obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-01-13 15:14:45 +00:00
|
|
|
void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
|
2013-06-03 15:32:22 +00:00
|
|
|
DisallowHeapAllocation no_allocation;
|
2012-01-13 15:14:45 +00:00
|
|
|
|
2013-02-28 17:03:34 +00:00
|
|
|
// Both the external string table and the string table may contain
|
2012-11-02 12:45:00 +00:00
|
|
|
// external strings, but neither lists them exhaustively, nor is the
|
|
|
|
// intersection set empty. Therefore we iterate over the external string
|
2013-02-28 17:03:34 +00:00
|
|
|
// table first, ignoring internalized strings, and then over the
|
|
|
|
// internalized string table.
|
2012-11-02 12:45:00 +00:00
|
|
|
|
|
|
|
class ExternalStringTableVisitorAdapter : public ObjectVisitor {
|
|
|
|
public:
|
|
|
|
explicit ExternalStringTableVisitorAdapter(
|
|
|
|
v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
|
|
|
|
virtual void VisitPointers(Object** start, Object** end) {
|
|
|
|
for (Object** p = start; p < end; p++) {
|
2013-02-28 17:03:34 +00:00
|
|
|
// Visit non-internalized external strings,
|
|
|
|
// since internalized strings are listed in the string table.
|
|
|
|
if (!(*p)->IsInternalizedString()) {
|
2012-11-02 12:45:00 +00:00
|
|
|
ASSERT((*p)->IsExternalString());
|
|
|
|
visitor_->VisitExternalString(Utils::ToLocal(
|
|
|
|
Handle<String>(String::cast(*p))));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
private:
|
|
|
|
v8::ExternalResourceVisitor* visitor_;
|
|
|
|
} external_string_table_visitor(visitor);
|
|
|
|
|
|
|
|
external_string_table_.Iterate(&external_string_table_visitor);
|
|
|
|
|
2013-02-28 17:03:34 +00:00
|
|
|
class StringTableVisitorAdapter : public ObjectVisitor {
|
2012-01-13 15:14:45 +00:00
|
|
|
public:
|
2013-02-28 17:03:34 +00:00
|
|
|
explicit StringTableVisitorAdapter(
|
2012-11-02 12:45:00 +00:00
|
|
|
v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
|
2012-01-13 15:14:45 +00:00
|
|
|
virtual void VisitPointers(Object** start, Object** end) {
|
|
|
|
for (Object** p = start; p < end; p++) {
|
|
|
|
if ((*p)->IsExternalString()) {
|
2013-02-28 17:03:34 +00:00
|
|
|
ASSERT((*p)->IsInternalizedString());
|
2012-01-13 15:14:45 +00:00
|
|
|
visitor_->VisitExternalString(Utils::ToLocal(
|
|
|
|
Handle<String>(String::cast(*p))));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
private:
|
|
|
|
v8::ExternalResourceVisitor* visitor_;
|
2013-02-28 17:03:34 +00:00
|
|
|
} string_table_visitor(visitor);
|
2012-11-02 12:45:00 +00:00
|
|
|
|
2013-02-28 17:03:34 +00:00
|
|
|
string_table()->IterateElements(&string_table_visitor);
|
2012-01-13 15:14:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-08-11 14:30:14 +00:00
|
|
|
class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
|
|
|
|
public:
|
2011-03-18 20:35:07 +00:00
|
|
|
static inline void VisitPointer(Heap* heap, Object** p) {
|
2010-08-11 14:30:14 +00:00
|
|
|
Object* object = *p;
|
2011-03-18 20:35:07 +00:00
|
|
|
if (!heap->InNewSpace(object)) return;
|
2010-08-11 14:30:14 +00:00
|
|
|
Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
|
|
|
|
reinterpret_cast<HeapObject*>(object));
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2009-12-09 14:32:45 +00:00
|
|
|
Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
|
|
|
|
Address new_space_front) {
|
2009-05-14 08:55:34 +00:00
|
|
|
do {
|
2011-09-19 18:36:47 +00:00
|
|
|
SemiSpace::AssertValidRange(new_space_front, new_space_.top());
|
2009-05-14 08:55:34 +00:00
|
|
|
// The addresses new_space_front and new_space_.top() define a
|
|
|
|
// queue of unprocessed copied objects. Process them until the
|
|
|
|
// queue is empty.
|
2011-09-19 18:36:47 +00:00
|
|
|
while (new_space_front != new_space_.top()) {
|
|
|
|
if (!NewSpacePage::IsAtEnd(new_space_front)) {
|
|
|
|
HeapObject* object = HeapObject::FromAddress(new_space_front);
|
|
|
|
new_space_front +=
|
|
|
|
NewSpaceScavenger::IterateBody(object->map(), object);
|
|
|
|
} else {
|
|
|
|
new_space_front =
|
2012-02-23 12:11:24 +00:00
|
|
|
NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
|
2011-09-19 18:36:47 +00:00
|
|
|
}
|
2009-05-14 08:55:34 +00:00
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2009-05-15 15:02:03 +00:00
|
|
|
// Promote and process all the to-be-promoted objects.
|
2011-09-19 18:36:47 +00:00
|
|
|
{
|
|
|
|
StoreBufferRebuildScope scope(this,
|
|
|
|
store_buffer(),
|
|
|
|
&ScavengeStoreBufferCallback);
|
|
|
|
while (!promotion_queue()->is_empty()) {
|
|
|
|
HeapObject* target;
|
|
|
|
int size;
|
|
|
|
promotion_queue()->remove(&target, &size);
|
|
|
|
|
|
|
|
// Promoted object might be already partially visited
|
|
|
|
// during old space pointer iteration. Thus we search specificly
|
|
|
|
// for pointers to from semispace instead of looking for pointers
|
|
|
|
// to new space.
|
|
|
|
ASSERT(!target->IsMap());
|
|
|
|
IterateAndMarkPointersToFromSpace(target->address(),
|
|
|
|
target->address() + size,
|
|
|
|
&ScavengeObject);
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
2009-05-14 08:55:34 +00:00
|
|
|
// Take another spin if there are now unswept objects in new space
|
|
|
|
// (there are currently no more unswept promoted objects).
|
2011-09-19 18:36:47 +00:00
|
|
|
} while (new_space_front != new_space_.top());
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2009-12-09 14:32:45 +00:00
|
|
|
return new_space_front;
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-04-17 07:52:39 +00:00
|
|
|
STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
|
|
|
|
|
|
|
|
|
|
|
|
INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
|
|
|
|
HeapObject* object,
|
|
|
|
int size));
|
|
|
|
|
|
|
|
static HeapObject* EnsureDoubleAligned(Heap* heap,
|
|
|
|
HeapObject* object,
|
|
|
|
int size) {
|
|
|
|
if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
|
|
|
|
heap->CreateFillerObjectAt(object->address(), kPointerSize);
|
|
|
|
return HeapObject::FromAddress(object->address() + kPointerSize);
|
|
|
|
} else {
|
|
|
|
heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
|
|
|
|
kPointerSize);
|
|
|
|
return object;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-03-30 15:17:10 +00:00
|
|
|
enum LoggingAndProfiling {
|
|
|
|
LOGGING_AND_PROFILING_ENABLED,
|
|
|
|
LOGGING_AND_PROFILING_DISABLED
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
|
2011-03-30 15:17:10 +00:00
|
|
|
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
template<MarksHandling marks_handling,
|
|
|
|
LoggingAndProfiling logging_and_profiling_mode>
|
2010-08-11 14:30:14 +00:00
|
|
|
class ScavengingVisitor : public StaticVisitorBase {
|
|
|
|
public:
|
|
|
|
static void Initialize() {
|
2012-11-15 13:31:27 +00:00
|
|
|
table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
|
2010-08-11 14:30:14 +00:00
|
|
|
table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
|
|
|
|
table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
|
|
|
|
table_.Register(kVisitByteArray, &EvacuateByteArray);
|
|
|
|
table_.Register(kVisitFixedArray, &EvacuateFixedArray);
|
2011-06-09 10:03:35 +00:00
|
|
|
table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
|
2011-03-30 15:17:10 +00:00
|
|
|
|
2012-08-17 09:03:08 +00:00
|
|
|
table_.Register(kVisitNativeContext,
|
2010-10-18 14:59:03 +00:00
|
|
|
&ObjectEvacuationStrategy<POINTER_OBJECT>::
|
2011-03-30 15:17:10 +00:00
|
|
|
template VisitSpecialized<Context::kSize>);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-08-11 14:30:14 +00:00
|
|
|
table_.Register(kVisitConsString,
|
|
|
|
&ObjectEvacuationStrategy<POINTER_OBJECT>::
|
2011-03-30 15:17:10 +00:00
|
|
|
template VisitSpecialized<ConsString::kSize>);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-08-26 13:03:30 +00:00
|
|
|
table_.Register(kVisitSlicedString,
|
|
|
|
&ObjectEvacuationStrategy<POINTER_OBJECT>::
|
|
|
|
template VisitSpecialized<SlicedString::kSize>);
|
|
|
|
|
2013-03-22 16:51:28 +00:00
|
|
|
table_.Register(kVisitSymbol,
|
|
|
|
&ObjectEvacuationStrategy<POINTER_OBJECT>::
|
|
|
|
template VisitSpecialized<Symbol::kSize>);
|
|
|
|
|
2010-08-11 14:30:14 +00:00
|
|
|
table_.Register(kVisitSharedFunctionInfo,
|
|
|
|
&ObjectEvacuationStrategy<POINTER_OBJECT>::
|
2011-03-30 15:17:10 +00:00
|
|
|
template VisitSpecialized<SharedFunctionInfo::kSize>);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-08-03 12:48:30 +00:00
|
|
|
table_.Register(kVisitJSWeakMap,
|
|
|
|
&ObjectEvacuationStrategy<POINTER_OBJECT>::
|
2013-06-07 10:52:11 +00:00
|
|
|
Visit);
|
|
|
|
|
|
|
|
table_.Register(kVisitJSArrayBuffer,
|
|
|
|
&ObjectEvacuationStrategy<POINTER_OBJECT>::
|
|
|
|
Visit);
|
|
|
|
|
|
|
|
table_.Register(kVisitJSTypedArray,
|
|
|
|
&ObjectEvacuationStrategy<POINTER_OBJECT>::
|
2011-08-03 12:48:30 +00:00
|
|
|
Visit);
|
|
|
|
|
2013-06-21 13:02:38 +00:00
|
|
|
table_.Register(kVisitJSDataView,
|
|
|
|
&ObjectEvacuationStrategy<POINTER_OBJECT>::
|
|
|
|
Visit);
|
|
|
|
|
2011-07-05 06:19:53 +00:00
|
|
|
table_.Register(kVisitJSRegExp,
|
|
|
|
&ObjectEvacuationStrategy<POINTER_OBJECT>::
|
|
|
|
Visit);
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
if (marks_handling == IGNORE_MARKS) {
|
|
|
|
table_.Register(kVisitJSFunction,
|
|
|
|
&ObjectEvacuationStrategy<POINTER_OBJECT>::
|
|
|
|
template VisitSpecialized<JSFunction::kSize>);
|
|
|
|
} else {
|
|
|
|
table_.Register(kVisitJSFunction, &EvacuateJSFunction);
|
|
|
|
}
|
2010-08-17 11:44:01 +00:00
|
|
|
|
2010-08-11 14:30:14 +00:00
|
|
|
table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
|
|
|
|
kVisitDataObject,
|
|
|
|
kVisitDataObjectGeneric>();
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-08-11 14:30:14 +00:00
|
|
|
table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
|
|
|
|
kVisitJSObject,
|
|
|
|
kVisitJSObjectGeneric>();
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-08-11 14:30:14 +00:00
|
|
|
table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
|
|
|
|
kVisitStruct,
|
|
|
|
kVisitStructGeneric>();
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-03-30 15:17:10 +00:00
|
|
|
static VisitorDispatchTable<ScavengingCallback>* GetTable() {
|
|
|
|
return &table_;
|
2010-08-11 14:30:14 +00:00
|
|
|
}
|
2010-07-13 08:05:10 +00:00
|
|
|
|
2010-08-11 14:30:14 +00:00
|
|
|
private:
|
|
|
|
enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
|
|
|
|
enum SizeRestriction { SMALL, UNKNOWN_SIZE };
|
2010-07-13 08:05:10 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
|
2010-08-11 14:30:14 +00:00
|
|
|
bool should_record = false;
|
|
|
|
#ifdef DEBUG
|
|
|
|
should_record = FLAG_heap_stats;
|
|
|
|
#endif
|
|
|
|
should_record = should_record || FLAG_log_gc;
|
|
|
|
if (should_record) {
|
2011-03-18 20:35:07 +00:00
|
|
|
if (heap->new_space()->Contains(obj)) {
|
|
|
|
heap->new_space()->RecordAllocation(obj);
|
2010-07-13 08:05:10 +00:00
|
|
|
} else {
|
2011-03-18 20:35:07 +00:00
|
|
|
heap->new_space()->RecordPromotion(obj);
|
2010-07-13 08:05:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-07-12 12:47:09 +00:00
|
|
|
|
2010-08-11 14:30:14 +00:00
|
|
|
// Helper function used by CopyObject to copy a source object to an
|
|
|
|
// allocated target object and update the forwarding pointer in the source
|
|
|
|
// object. Returns the target object.
|
2011-11-29 10:02:38 +00:00
|
|
|
INLINE(static void MigrateObject(Heap* heap,
|
|
|
|
HeapObject* source,
|
|
|
|
HeapObject* target,
|
|
|
|
int size)) {
|
2010-08-11 14:30:14 +00:00
|
|
|
// Copy the content of source to target.
|
2011-03-18 20:35:07 +00:00
|
|
|
heap->CopyBlock(target->address(), source->address(), size);
|
2010-07-12 12:47:09 +00:00
|
|
|
|
2010-08-11 14:30:14 +00:00
|
|
|
// Set the forwarding address.
|
|
|
|
source->set_map_word(MapWord::FromForwardingAddress(target));
|
2010-07-13 08:05:10 +00:00
|
|
|
|
2011-03-30 15:17:10 +00:00
|
|
|
if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
|
|
|
|
// Update NewSpace stats if necessary.
|
|
|
|
RecordCopiedObject(heap, target);
|
|
|
|
HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
|
|
|
|
Isolate* isolate = heap->isolate();
|
2012-08-28 14:43:28 +00:00
|
|
|
if (isolate->logger()->is_logging_code_events() ||
|
2013-04-02 07:53:50 +00:00
|
|
|
isolate->cpu_profiler()->is_profiling()) {
|
2011-03-30 15:17:10 +00:00
|
|
|
if (target->IsSharedFunctionInfo()) {
|
|
|
|
PROFILE(isolate, SharedFunctionInfoMoveEvent(
|
|
|
|
source->address(), target->address()));
|
|
|
|
}
|
2010-09-24 11:45:12 +00:00
|
|
|
}
|
2011-03-30 15:17:10 +00:00
|
|
|
}
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
if (marks_handling == TRANSFER_MARKS) {
|
|
|
|
if (Marking::TransferColor(source, target)) {
|
2012-01-18 09:21:07 +00:00
|
|
|
MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
|
2011-09-19 18:36:47 +00:00
|
|
|
}
|
|
|
|
}
|
2010-08-11 14:30:14 +00:00
|
|
|
}
|
2010-07-13 08:05:10 +00:00
|
|
|
|
2012-04-17 07:52:39 +00:00
|
|
|
|
|
|
|
template<ObjectContents object_contents,
|
|
|
|
SizeRestriction size_restriction,
|
|
|
|
int alignment>
|
2010-08-11 14:30:14 +00:00
|
|
|
static inline void EvacuateObject(Map* map,
|
|
|
|
HeapObject** slot,
|
|
|
|
HeapObject* object,
|
|
|
|
int object_size) {
|
2011-10-13 11:50:00 +00:00
|
|
|
SLOW_ASSERT((size_restriction != SMALL) ||
|
2012-02-23 12:11:24 +00:00
|
|
|
(object_size <= Page::kMaxNonCodeHeapObjectSize));
|
2011-10-13 11:50:00 +00:00
|
|
|
SLOW_ASSERT(object->Size() == object_size);
|
2010-07-13 08:05:10 +00:00
|
|
|
|
2012-04-17 07:52:39 +00:00
|
|
|
int allocation_size = object_size;
|
|
|
|
if (alignment != kObjectAlignment) {
|
|
|
|
ASSERT(alignment == kDoubleAlignment);
|
|
|
|
allocation_size += kPointerSize;
|
|
|
|
}
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
Heap* heap = map->GetHeap();
|
2011-03-18 20:35:07 +00:00
|
|
|
if (heap->ShouldBePromoted(object->address(), object_size)) {
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* maybe_result;
|
2010-07-12 12:47:09 +00:00
|
|
|
|
2010-08-11 14:30:14 +00:00
|
|
|
if ((size_restriction != SMALL) &&
|
2012-04-17 07:52:39 +00:00
|
|
|
(allocation_size > Page::kMaxNonCodeHeapObjectSize)) {
|
|
|
|
maybe_result = heap->lo_space()->AllocateRaw(allocation_size,
|
2011-09-19 18:36:47 +00:00
|
|
|
NOT_EXECUTABLE);
|
2010-08-11 14:30:14 +00:00
|
|
|
} else {
|
|
|
|
if (object_contents == DATA_OBJECT) {
|
2012-04-17 07:52:39 +00:00
|
|
|
maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
|
2010-08-11 14:30:14 +00:00
|
|
|
} else {
|
2012-04-17 07:52:39 +00:00
|
|
|
maybe_result =
|
|
|
|
heap->old_pointer_space()->AllocateRaw(allocation_size);
|
2010-08-11 14:30:14 +00:00
|
|
|
}
|
|
|
|
}
|
2010-07-13 08:05:10 +00:00
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result = NULL; // Initialization to please compiler.
|
|
|
|
if (maybe_result->ToObject(&result)) {
|
2010-08-11 14:30:14 +00:00
|
|
|
HeapObject* target = HeapObject::cast(result);
|
2011-11-29 10:02:38 +00:00
|
|
|
|
2012-04-17 07:52:39 +00:00
|
|
|
if (alignment != kObjectAlignment) {
|
|
|
|
target = EnsureDoubleAligned(heap, target, allocation_size);
|
|
|
|
}
|
|
|
|
|
2011-11-29 10:02:38 +00:00
|
|
|
// Order is important: slot might be inside of the target if target
|
|
|
|
// was allocated over a dead object and slot comes from the store
|
|
|
|
// buffer.
|
|
|
|
*slot = target;
|
|
|
|
MigrateObject(heap, object, target, object_size);
|
2010-07-13 08:05:10 +00:00
|
|
|
|
2010-08-11 14:30:14 +00:00
|
|
|
if (object_contents == POINTER_OBJECT) {
|
2012-04-17 10:37:41 +00:00
|
|
|
if (map->instance_type() == JS_FUNCTION_TYPE) {
|
|
|
|
heap->promotion_queue()->insert(
|
|
|
|
target, JSFunction::kNonWeakFieldsEndOffset);
|
|
|
|
} else {
|
|
|
|
heap->promotion_queue()->insert(target, object_size);
|
|
|
|
}
|
2010-08-11 14:30:14 +00:00
|
|
|
}
|
2010-07-13 08:05:10 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
heap->tracer()->increment_promoted_objects_size(object_size);
|
2010-08-11 14:30:14 +00:00
|
|
|
return;
|
2010-07-13 08:05:10 +00:00
|
|
|
}
|
2010-08-11 14:30:14 +00:00
|
|
|
}
|
2012-04-17 07:52:39 +00:00
|
|
|
MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
|
2011-11-09 13:48:43 +00:00
|
|
|
heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
|
2011-09-19 18:36:47 +00:00
|
|
|
Object* result = allocation->ToObjectUnchecked();
|
2011-11-29 10:02:38 +00:00
|
|
|
HeapObject* target = HeapObject::cast(result);
|
2011-09-19 18:36:47 +00:00
|
|
|
|
2012-04-17 07:52:39 +00:00
|
|
|
if (alignment != kObjectAlignment) {
|
|
|
|
target = EnsureDoubleAligned(heap, target, allocation_size);
|
|
|
|
}
|
|
|
|
|
2011-11-29 10:02:38 +00:00
|
|
|
// Order is important: slot might be inside of the target if target
|
|
|
|
// was allocated over a dead object and slot comes from the store
|
|
|
|
// buffer.
|
|
|
|
*slot = target;
|
|
|
|
MigrateObject(heap, object, target, object_size);
|
2010-08-11 14:30:14 +00:00
|
|
|
return;
|
|
|
|
}
|
2010-07-13 08:05:10 +00:00
|
|
|
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
static inline void EvacuateJSFunction(Map* map,
|
|
|
|
HeapObject** slot,
|
|
|
|
HeapObject* object) {
|
|
|
|
ObjectEvacuationStrategy<POINTER_OBJECT>::
|
|
|
|
template VisitSpecialized<JSFunction::kSize>(map, slot, object);
|
|
|
|
|
|
|
|
HeapObject* target = *slot;
|
|
|
|
MarkBit mark_bit = Marking::MarkBitFrom(target);
|
|
|
|
if (Marking::IsBlack(mark_bit)) {
|
|
|
|
// This object is black and it might not be rescanned by marker.
|
|
|
|
// We should explicitly record code entry slot for compaction because
|
|
|
|
// promotion queue processing (IterateAndMarkPointersToFromSpace) will
|
|
|
|
// miss it as it is not HeapObject-tagged.
|
|
|
|
Address code_entry_slot =
|
|
|
|
target->address() + JSFunction::kCodeEntryOffset;
|
|
|
|
Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
|
|
|
|
map->GetHeap()->mark_compact_collector()->
|
|
|
|
RecordCodeEntrySlot(code_entry_slot, code);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-08-11 14:30:14 +00:00
|
|
|
static inline void EvacuateFixedArray(Map* map,
|
|
|
|
HeapObject** slot,
|
|
|
|
HeapObject* object) {
|
|
|
|
int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
|
2012-04-17 07:52:39 +00:00
|
|
|
EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(map,
|
2010-08-11 14:30:14 +00:00
|
|
|
slot,
|
|
|
|
object,
|
|
|
|
object_size);
|
2010-07-13 08:05:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-09 10:03:35 +00:00
|
|
|
static inline void EvacuateFixedDoubleArray(Map* map,
|
|
|
|
HeapObject** slot,
|
|
|
|
HeapObject* object) {
|
|
|
|
int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
|
|
|
|
int object_size = FixedDoubleArray::SizeFor(length);
|
2012-04-17 07:52:39 +00:00
|
|
|
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kDoubleAlignment>(
|
|
|
|
map,
|
|
|
|
slot,
|
|
|
|
object,
|
|
|
|
object_size);
|
2011-06-09 10:03:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-08-11 14:30:14 +00:00
|
|
|
static inline void EvacuateByteArray(Map* map,
|
|
|
|
HeapObject** slot,
|
|
|
|
HeapObject* object) {
|
|
|
|
int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
|
2012-04-17 07:52:39 +00:00
|
|
|
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
|
|
|
|
map, slot, object, object_size);
|
2010-08-11 14:30:14 +00:00
|
|
|
}
|
2010-07-13 08:05:10 +00:00
|
|
|
|
|
|
|
|
2012-11-15 13:31:27 +00:00
|
|
|
static inline void EvacuateSeqOneByteString(Map* map,
|
2010-07-13 08:05:10 +00:00
|
|
|
HeapObject** slot,
|
|
|
|
HeapObject* object) {
|
2012-11-15 13:31:27 +00:00
|
|
|
int object_size = SeqOneByteString::cast(object)->
|
|
|
|
SeqOneByteStringSize(map->instance_type());
|
2012-04-17 07:52:39 +00:00
|
|
|
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
|
|
|
|
map, slot, object, object_size);
|
2010-08-11 14:30:14 +00:00
|
|
|
}
|
2010-07-13 08:05:10 +00:00
|
|
|
|
|
|
|
|
2010-08-11 14:30:14 +00:00
|
|
|
static inline void EvacuateSeqTwoByteString(Map* map,
|
|
|
|
HeapObject** slot,
|
|
|
|
HeapObject* object) {
|
|
|
|
int object_size = SeqTwoByteString::cast(object)->
|
|
|
|
SeqTwoByteStringSize(map->instance_type());
|
2012-04-17 07:52:39 +00:00
|
|
|
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
|
|
|
|
map, slot, object, object_size);
|
2010-08-11 14:30:14 +00:00
|
|
|
}
|
2010-07-13 08:05:10 +00:00
|
|
|
|
|
|
|
|
2010-08-11 14:30:14 +00:00
|
|
|
static inline bool IsShortcutCandidate(int type) {
|
|
|
|
return ((type & kShortcutTypeMask) == kShortcutTypeTag);
|
|
|
|
}
|
2010-07-13 08:05:10 +00:00
|
|
|
|
2010-08-11 14:30:14 +00:00
|
|
|
static inline void EvacuateShortcutCandidate(Map* map,
|
|
|
|
HeapObject** slot,
|
|
|
|
HeapObject* object) {
|
|
|
|
ASSERT(IsShortcutCandidate(map->instance_type()));
|
2010-07-13 08:05:10 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
Heap* heap = map->GetHeap();
|
|
|
|
|
|
|
|
if (marks_handling == IGNORE_MARKS &&
|
|
|
|
ConsString::cast(object)->unchecked_second() ==
|
|
|
|
heap->empty_string()) {
|
2010-08-11 14:30:14 +00:00
|
|
|
HeapObject* first =
|
|
|
|
HeapObject::cast(ConsString::cast(object)->unchecked_first());
|
2010-07-13 08:05:10 +00:00
|
|
|
|
2010-08-11 14:30:14 +00:00
|
|
|
*slot = first;
|
2010-07-13 08:05:10 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
if (!heap->InNewSpace(first)) {
|
2010-08-11 14:30:14 +00:00
|
|
|
object->set_map_word(MapWord::FromForwardingAddress(first));
|
|
|
|
return;
|
|
|
|
}
|
2010-07-13 08:05:10 +00:00
|
|
|
|
2010-08-11 14:30:14 +00:00
|
|
|
MapWord first_word = first->map_word();
|
|
|
|
if (first_word.IsForwardingAddress()) {
|
|
|
|
HeapObject* target = first_word.ToForwardingAddress();
|
|
|
|
|
|
|
|
*slot = target;
|
|
|
|
object->set_map_word(MapWord::FromForwardingAddress(target));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
heap->DoScavengeObject(first->map(), slot, first);
|
2010-08-11 14:30:14 +00:00
|
|
|
object->set_map_word(MapWord::FromForwardingAddress(*slot));
|
2008-07-03 15:10:15 +00:00
|
|
|
return;
|
|
|
|
}
|
2010-07-13 08:05:10 +00:00
|
|
|
|
2010-08-11 14:30:14 +00:00
|
|
|
int object_size = ConsString::kSize;
|
2012-04-17 07:52:39 +00:00
|
|
|
EvacuateObject<POINTER_OBJECT, SMALL, kObjectAlignment>(
|
|
|
|
map, slot, object, object_size);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
2010-08-11 14:30:14 +00:00
|
|
|
template<ObjectContents object_contents>
|
|
|
|
class ObjectEvacuationStrategy {
|
|
|
|
public:
|
|
|
|
template<int object_size>
|
|
|
|
static inline void VisitSpecialized(Map* map,
|
|
|
|
HeapObject** slot,
|
|
|
|
HeapObject* object) {
|
2012-04-17 07:52:39 +00:00
|
|
|
EvacuateObject<object_contents, SMALL, kObjectAlignment>(
|
|
|
|
map, slot, object, object_size);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
2010-07-13 08:05:10 +00:00
|
|
|
|
2010-08-11 14:30:14 +00:00
|
|
|
static inline void Visit(Map* map,
|
|
|
|
HeapObject** slot,
|
|
|
|
HeapObject* object) {
|
|
|
|
int object_size = map->instance_size();
|
2012-04-17 07:52:39 +00:00
|
|
|
EvacuateObject<object_contents, SMALL, kObjectAlignment>(
|
|
|
|
map, slot, object, object_size);
|
2010-08-11 14:30:14 +00:00
|
|
|
}
|
|
|
|
};
|
2010-07-13 08:05:10 +00:00
|
|
|
|
2011-03-30 15:17:10 +00:00
|
|
|
static VisitorDispatchTable<ScavengingCallback> table_;
|
2010-08-11 14:30:14 +00:00
|
|
|
};
|
2010-07-13 08:05:10 +00:00
|
|
|
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
template<MarksHandling marks_handling,
|
|
|
|
LoggingAndProfiling logging_and_profiling_mode>
|
2011-03-30 15:17:10 +00:00
|
|
|
VisitorDispatchTable<ScavengingCallback>
|
2011-09-19 18:36:47 +00:00
|
|
|
ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
|
2011-03-30 15:17:10 +00:00
|
|
|
|
|
|
|
|
|
|
|
static void InitializeScavengingVisitorsTables() {
|
2011-09-19 18:36:47 +00:00
|
|
|
ScavengingVisitor<TRANSFER_MARKS,
|
|
|
|
LOGGING_AND_PROFILING_DISABLED>::Initialize();
|
|
|
|
ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
|
|
|
|
ScavengingVisitor<TRANSFER_MARKS,
|
|
|
|
LOGGING_AND_PROFILING_ENABLED>::Initialize();
|
|
|
|
ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
|
2011-03-30 15:17:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
void Heap::SelectScavengingVisitorsTable() {
|
|
|
|
bool logging_and_profiling =
|
|
|
|
isolate()->logger()->is_logging() ||
|
2013-04-02 07:53:50 +00:00
|
|
|
isolate()->cpu_profiler()->is_profiling() ||
|
2011-03-30 15:17:10 +00:00
|
|
|
(isolate()->heap_profiler() != NULL &&
|
2011-09-19 18:36:47 +00:00
|
|
|
isolate()->heap_profiler()->is_profiling());
|
2011-03-30 15:17:10 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
if (!incremental_marking()->IsMarking()) {
|
|
|
|
if (!logging_and_profiling) {
|
|
|
|
scavenging_visitors_table_.CopyFrom(
|
|
|
|
ScavengingVisitor<IGNORE_MARKS,
|
|
|
|
LOGGING_AND_PROFILING_DISABLED>::GetTable());
|
|
|
|
} else {
|
|
|
|
scavenging_visitors_table_.CopyFrom(
|
|
|
|
ScavengingVisitor<IGNORE_MARKS,
|
|
|
|
LOGGING_AND_PROFILING_ENABLED>::GetTable());
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (!logging_and_profiling) {
|
|
|
|
scavenging_visitors_table_.CopyFrom(
|
|
|
|
ScavengingVisitor<TRANSFER_MARKS,
|
|
|
|
LOGGING_AND_PROFILING_DISABLED>::GetTable());
|
|
|
|
} else {
|
|
|
|
scavenging_visitors_table_.CopyFrom(
|
|
|
|
ScavengingVisitor<TRANSFER_MARKS,
|
|
|
|
LOGGING_AND_PROFILING_ENABLED>::GetTable());
|
|
|
|
}
|
2011-10-10 10:35:08 +00:00
|
|
|
|
|
|
|
if (incremental_marking()->IsCompacting()) {
|
|
|
|
// When compacting forbid short-circuiting of cons-strings.
|
|
|
|
// Scavenging code relies on the fact that new space object
|
|
|
|
// can't be evacuated into evacuation candidate but
|
|
|
|
// short-circuiting violates this assumption.
|
|
|
|
scavenging_visitors_table_.Register(
|
|
|
|
StaticVisitorBase::kVisitShortcutCandidate,
|
|
|
|
scavenging_visitors_table_.GetVisitorById(
|
|
|
|
StaticVisitorBase::kVisitConsString));
|
|
|
|
}
|
2011-03-30 15:17:10 +00:00
|
|
|
}
|
|
|
|
}
|
2010-07-13 08:05:10 +00:00
|
|
|
|
|
|
|
|
|
|
|
void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
|
2011-10-13 11:50:00 +00:00
|
|
|
SLOW_ASSERT(HEAP->InFromSpace(object));
|
2010-07-13 08:05:10 +00:00
|
|
|
MapWord first_word = object->map_word();
|
2011-10-13 11:50:00 +00:00
|
|
|
SLOW_ASSERT(!first_word.IsForwardingAddress());
|
2010-07-13 08:05:10 +00:00
|
|
|
Map* map = first_word.ToMap();
|
2011-09-19 18:36:47 +00:00
|
|
|
map->GetHeap()->DoScavengeObject(map, p, object);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
|
|
|
|
int instance_size) {
|
|
|
|
Object* result;
|
2012-08-27 13:47:34 +00:00
|
|
|
MaybeObject* maybe_result = AllocateRawMap();
|
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
// Map::cast cannot be used due to uninitialized map field.
|
2009-07-08 19:12:58 +00:00
|
|
|
reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
|
2008-07-03 15:10:15 +00:00
|
|
|
reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
|
|
|
|
reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
|
2011-03-18 20:35:07 +00:00
|
|
|
reinterpret_cast<Map*>(result)->set_visitor_id(
|
|
|
|
StaticVisitorBase::GetVisitorId(instance_type, instance_size));
|
2008-10-15 06:03:26 +00:00
|
|
|
reinterpret_cast<Map*>(result)->set_inobject_properties(0);
|
2010-01-27 20:14:46 +00:00
|
|
|
reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
|
2008-07-03 15:10:15 +00:00
|
|
|
reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
|
2010-01-27 20:14:46 +00:00
|
|
|
reinterpret_cast<Map*>(result)->set_bit_field(0);
|
|
|
|
reinterpret_cast<Map*>(result)->set_bit_field2(0);
|
Sharing of descriptor arrays.
This CL adds multiple things:
Transition arrays do not directly point at their descriptor array anymore, but rather do so via an indirect pointer (a JSGlobalPropertyCell).
An ownership bit is added to maps indicating whether it owns its own descriptor array or not.
Maps owning a descriptor array can pass on ownership if a transition from that map is generated; but only if the descriptor array stays exactly the same; or if a descriptor is added.
Maps that don't have ownership get ownership back if their direct child to which ownership was passed is cleared in ClearNonLiveTransitions.
To detect which descriptors in an array are valid, each map knows its own NumberOfOwnDescriptors. Since the descriptors are sorted in order of addition, if we search and find a descriptor with index bigger than this number, it is not valid for the given map.
We currently still build up an enumeration cache (although this may disappear). The enumeration cache is always built for the entire descriptor array, even if not all descriptors are owned by the map. Once a descriptor array has an enumeration cache for a given map; this invariant will always be true, even if the descriptor array was extended. The extended array will inherit the enumeration cache from the smaller descriptor array. If a map with more descriptors needs an enumeration cache, it's EnumLength will still be set to invalid, so it will have to recompute the enumeration cache. This new cache will also be valid for smaller maps since they have their own enumlength; and use this to loop over the cache. If the EnumLength is still invalid, but there is already a cache present that is big enough; we just initialize the EnumLength field for the map.
When we apply ClearNonLiveTransitions and descriptor ownership is passed back to a parent map, the descriptor array is trimmed in-place and resorted. At the same time, the enumeration cache is trimmed in-place.
Only transition arrays contain descriptor arrays. If we transition to a map and pass ownership of the descriptor array along, the child map will not store the descriptor array it owns. Rather its parent will keep the pointer. So for every leaf-map, we find the descriptor array by following the back pointer, reading out the transition array, and fetching the descriptor array from the JSGlobalPropertyCell. If a map has a transition array, we fetch it from there. If a map has undefined as its back-pointer and has no transition array; it is considered to have an empty descriptor array.
When we modify properties, we cannot share the descriptor array. To accommodate this, the child map will get its own transition array; even if there are not necessarily any transitions leaving from the child map. This is necessary since it's the only way to store its own descriptor array.
Review URL: https://chromiumcodereview.appspot.com/10909007
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@12492 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2012-09-12 16:43:57 +00:00
|
|
|
int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
|
|
|
|
Map::OwnsDescriptors::encode(true);
|
|
|
|
reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
|
2008-07-03 15:10:15 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-22 11:30:04 +00:00
|
|
|
MaybeObject* Heap::AllocateMap(InstanceType instance_type,
|
|
|
|
int instance_size,
|
|
|
|
ElementsKind elements_kind) {
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
2012-07-19 14:45:19 +00:00
|
|
|
MaybeObject* maybe_result = AllocateRawMap();
|
|
|
|
if (!maybe_result->To(&result)) return maybe_result;
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
Map* map = reinterpret_cast<Map*>(result);
|
2011-12-07 08:43:18 +00:00
|
|
|
map->set_map_no_write_barrier(meta_map());
|
2008-07-03 15:10:15 +00:00
|
|
|
map->set_instance_type(instance_type);
|
2010-08-11 14:30:14 +00:00
|
|
|
map->set_visitor_id(
|
|
|
|
StaticVisitorBase::GetVisitorId(instance_type, instance_size));
|
2011-11-15 14:01:02 +00:00
|
|
|
map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
|
|
|
|
map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
|
2008-07-03 15:10:15 +00:00
|
|
|
map->set_instance_size(instance_size);
|
2008-10-15 06:03:26 +00:00
|
|
|
map->set_inobject_properties(0);
|
2009-08-19 07:30:20 +00:00
|
|
|
map->set_pre_allocated_property_fields(0);
|
2011-11-15 14:01:02 +00:00
|
|
|
map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
|
2013-02-20 11:49:54 +00:00
|
|
|
map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
|
|
|
|
SKIP_WRITE_BARRIER);
|
2012-07-10 07:53:00 +00:00
|
|
|
map->init_back_pointer(undefined_value());
|
2008-07-03 15:10:15 +00:00
|
|
|
map->set_unused_property_fields(0);
|
2012-10-17 13:04:49 +00:00
|
|
|
map->set_instance_descriptors(empty_descriptor_array());
|
2008-07-03 15:10:15 +00:00
|
|
|
map->set_bit_field(0);
|
2011-06-03 07:41:37 +00:00
|
|
|
map->set_bit_field2(1 << Map::kIsExtensible);
|
Sharing of descriptor arrays.
This CL adds multiple things:
Transition arrays do not directly point at their descriptor array anymore, but rather do so via an indirect pointer (a JSGlobalPropertyCell).
An ownership bit is added to maps indicating whether it owns its own descriptor array or not.
Maps owning a descriptor array can pass on ownership if a transition from that map is generated; but only if the descriptor array stays exactly the same; or if a descriptor is added.
Maps that don't have ownership get ownership back if their direct child to which ownership was passed is cleared in ClearNonLiveTransitions.
To detect which descriptors in an array are valid, each map knows its own NumberOfOwnDescriptors. Since the descriptors are sorted in order of addition, if we search and find a descriptor with index bigger than this number, it is not valid for the given map.
We currently still build up an enumeration cache (although this may disappear). The enumeration cache is always built for the entire descriptor array, even if not all descriptors are owned by the map. Once a descriptor array has an enumeration cache for a given map; this invariant will always be true, even if the descriptor array was extended. The extended array will inherit the enumeration cache from the smaller descriptor array. If a map with more descriptors needs an enumeration cache, it's EnumLength will still be set to invalid, so it will have to recompute the enumeration cache. This new cache will also be valid for smaller maps since they have their own enumlength; and use this to loop over the cache. If the EnumLength is still invalid, but there is already a cache present that is big enough; we just initialize the EnumLength field for the map.
When we apply ClearNonLiveTransitions and descriptor ownership is passed back to a parent map, the descriptor array is trimmed in-place and resorted. At the same time, the enumeration cache is trimmed in-place.
Only transition arrays contain descriptor arrays. If we transition to a map and pass ownership of the descriptor array along, the child map will not store the descriptor array it owns. Rather its parent will keep the pointer. So for every leaf-map, we find the descriptor array by following the back pointer, reading out the transition array, and fetching the descriptor array from the JSGlobalPropertyCell. If a map has a transition array, we fetch it from there. If a map has undefined as its back-pointer and has no transition array; it is considered to have an empty descriptor array.
When we modify properties, we cannot share the descriptor array. To accommodate this, the child map will get its own transition array; even if there are not necessarily any transitions leaving from the child map. This is necessary since it's the only way to store its own descriptor array.
Review URL: https://chromiumcodereview.appspot.com/10909007
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@12492 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2012-09-12 16:43:57 +00:00
|
|
|
int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
|
|
|
|
Map::OwnsDescriptors::encode(true);
|
2012-08-28 14:20:50 +00:00
|
|
|
map->set_bit_field3(bit_field3);
|
2011-09-22 11:30:04 +00:00
|
|
|
map->set_elements_kind(elements_kind);
|
2009-12-17 08:53:18 +00:00
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
return map;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateCodeCache() {
|
2012-03-05 12:26:19 +00:00
|
|
|
CodeCache* code_cache;
|
|
|
|
{ MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
|
|
|
|
if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
|
2010-10-25 15:22:03 +00:00
|
|
|
}
|
2011-11-15 14:01:02 +00:00
|
|
|
code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
|
|
|
|
code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
|
2010-03-09 10:49:41 +00:00
|
|
|
return code_cache;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-06 13:15:11 +00:00
|
|
|
MaybeObject* Heap::AllocatePolymorphicCodeCache() {
|
|
|
|
return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-01-10 16:11:33 +00:00
|
|
|
MaybeObject* Heap::AllocateAccessorPair() {
|
2012-03-05 12:26:19 +00:00
|
|
|
AccessorPair* accessors;
|
|
|
|
{ MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
|
|
|
|
if (!maybe_accessors->To(&accessors)) return maybe_accessors;
|
2012-01-10 16:11:33 +00:00
|
|
|
}
|
2012-03-05 12:26:19 +00:00
|
|
|
accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
|
|
|
|
accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
|
2012-01-10 16:11:33 +00:00
|
|
|
return accessors;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-02-20 12:57:23 +00:00
|
|
|
MaybeObject* Heap::AllocateTypeFeedbackInfo() {
|
|
|
|
TypeFeedbackInfo* info;
|
2012-03-05 12:26:19 +00:00
|
|
|
{ MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
|
|
|
|
if (!maybe_info->To(&info)) return maybe_info;
|
2012-02-20 12:57:23 +00:00
|
|
|
}
|
2012-08-23 21:08:58 +00:00
|
|
|
info->initialize_storage();
|
2012-02-20 12:57:23 +00:00
|
|
|
info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
|
|
|
|
SKIP_WRITE_BARRIER);
|
|
|
|
return info;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-02-24 14:34:01 +00:00
|
|
|
MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
|
|
|
|
AliasedArgumentsEntry* entry;
|
2012-03-05 12:26:19 +00:00
|
|
|
{ MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
|
|
|
|
if (!maybe_entry->To(&entry)) return maybe_entry;
|
2012-02-24 14:34:01 +00:00
|
|
|
}
|
|
|
|
entry->set_aliased_context_slot(aliased_context_slot);
|
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-07-08 19:12:58 +00:00
|
|
|
const Heap::StringTypeTable Heap::string_type_table[] = {
|
|
|
|
#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
|
|
|
|
{type, size, k##camel_name##MapRootIndex},
|
|
|
|
STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
|
|
|
|
#undef STRING_TYPE_ELEMENT
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2013-02-28 17:03:34 +00:00
|
|
|
const Heap::ConstantStringTable Heap::constant_string_table[] = {
|
|
|
|
#define CONSTANT_STRING_ELEMENT(name, contents) \
|
2009-07-08 19:12:58 +00:00
|
|
|
{contents, k##name##RootIndex},
|
2013-02-28 17:03:34 +00:00
|
|
|
INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
|
|
|
|
#undef CONSTANT_STRING_ELEMENT
|
2009-07-08 19:12:58 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
const Heap::StructTable Heap::struct_table[] = {
|
|
|
|
#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
|
|
|
|
{ NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
|
|
|
|
STRUCT_LIST(STRUCT_TABLE_ELEMENT)
|
|
|
|
#undef STRUCT_TABLE_ELEMENT
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
bool Heap::CreateInitialMaps() {
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* obj;
|
|
|
|
{ MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
// Map::cast cannot be used due to uninitialized map field.
|
2009-07-08 19:12:58 +00:00
|
|
|
Map* new_meta_map = reinterpret_cast<Map*>(obj);
|
|
|
|
set_meta_map(new_meta_map);
|
|
|
|
new_meta_map->set_map(new_meta_map);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj =
|
|
|
|
AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2009-07-08 19:12:58 +00:00
|
|
|
set_fixed_array_map(Map::cast(obj));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2009-07-08 19:12:58 +00:00
|
|
|
set_oddball_map(Map::cast(obj));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-03-23 06:04:44 +00:00
|
|
|
// Allocate the empty array.
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateEmptyFixedArray();
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2009-07-08 19:12:58 +00:00
|
|
|
set_empty_fixed_array(FixedArray::cast(obj));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
{ MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2011-10-10 09:21:48 +00:00
|
|
|
set_null_value(Oddball::cast(obj));
|
2011-03-18 20:35:07 +00:00
|
|
|
Oddball::cast(obj)->set_kind(Oddball::kNull);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-10-10 09:21:48 +00:00
|
|
|
{ MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_undefined_value(Oddball::cast(obj));
|
|
|
|
Oddball::cast(obj)->set_kind(Oddball::kUndefined);
|
|
|
|
ASSERT(!InNewSpace(undefined_value()));
|
|
|
|
|
2009-07-09 11:13:08 +00:00
|
|
|
// Allocate the empty descriptor array.
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateEmptyFixedArray();
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2009-07-09 11:13:08 +00:00
|
|
|
set_empty_descriptor_array(DescriptorArray::cast(obj));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2008-08-27 10:11:39 +00:00
|
|
|
// Fix the instance_descriptors for the existing maps.
|
2010-02-25 15:43:27 +00:00
|
|
|
meta_map()->set_code_cache(empty_fixed_array());
|
2013-02-20 11:49:54 +00:00
|
|
|
meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
|
2012-07-10 07:53:00 +00:00
|
|
|
meta_map()->init_back_pointer(undefined_value());
|
2012-10-17 13:04:49 +00:00
|
|
|
meta_map()->set_instance_descriptors(empty_descriptor_array());
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-02-25 15:43:27 +00:00
|
|
|
fixed_array_map()->set_code_cache(empty_fixed_array());
|
2013-02-20 11:49:54 +00:00
|
|
|
fixed_array_map()->set_dependent_code(
|
|
|
|
DependentCode::cast(empty_fixed_array()));
|
2012-07-10 07:53:00 +00:00
|
|
|
fixed_array_map()->init_back_pointer(undefined_value());
|
2012-10-17 13:04:49 +00:00
|
|
|
fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-02-25 15:43:27 +00:00
|
|
|
oddball_map()->set_code_cache(empty_fixed_array());
|
2013-02-20 11:49:54 +00:00
|
|
|
oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
|
2012-07-10 07:53:00 +00:00
|
|
|
oddball_map()->init_back_pointer(undefined_value());
|
2012-10-17 13:04:49 +00:00
|
|
|
oddball_map()->set_instance_descriptors(empty_descriptor_array());
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
// Fix prototype object for existing maps.
|
|
|
|
meta_map()->set_prototype(null_value());
|
|
|
|
meta_map()->set_constructor(null_value());
|
|
|
|
|
|
|
|
fixed_array_map()->set_prototype(null_value());
|
|
|
|
fixed_array_map()->set_constructor(null_value());
|
2009-07-09 11:13:08 +00:00
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
oddball_map()->set_prototype(null_value());
|
|
|
|
oddball_map()->set_constructor(null_value());
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj =
|
|
|
|
AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2010-08-16 16:06:46 +00:00
|
|
|
set_fixed_cow_array_map(Map::cast(obj));
|
|
|
|
ASSERT(fixed_array_map() != fixed_cow_array_map());
|
|
|
|
|
2011-08-11 16:29:28 +00:00
|
|
|
{ MaybeObject* maybe_obj =
|
|
|
|
AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2011-11-03 10:36:55 +00:00
|
|
|
set_scope_info_map(Map::cast(obj));
|
2011-08-11 16:29:28 +00:00
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2009-07-08 19:12:58 +00:00
|
|
|
set_heap_number_map(Map::cast(obj));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2013-03-01 10:34:31 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateMap(SYMBOL_TYPE, Symbol::kSize);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_symbol_map(Map::cast(obj));
|
|
|
|
|
2011-05-19 11:47:34 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2011-05-19 11:47:34 +00:00
|
|
|
set_foreign_map(Map::cast(obj));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2009-07-08 19:12:58 +00:00
|
|
|
for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
|
|
|
|
const StringTypeTable& entry = string_type_table[i];
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2009-07-08 19:12:58 +00:00
|
|
|
roots_[entry.index] = Map::cast(obj);
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2009-11-24 14:10:06 +00:00
|
|
|
set_undetectable_string_map(Map::cast(obj));
|
2009-07-08 19:12:58 +00:00
|
|
|
Map::cast(obj)->set_is_undetectable();
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj =
|
|
|
|
AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2009-11-24 14:10:06 +00:00
|
|
|
set_undetectable_ascii_string_map(Map::cast(obj));
|
2009-07-08 19:12:58 +00:00
|
|
|
Map::cast(obj)->set_is_undetectable();
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-06-09 10:03:35 +00:00
|
|
|
{ MaybeObject* maybe_obj =
|
|
|
|
AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_fixed_double_array_map(Map::cast(obj));
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj =
|
|
|
|
AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2009-07-08 19:12:58 +00:00
|
|
|
set_byte_array_map(Map::cast(obj));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
{ MaybeObject* maybe_obj =
|
|
|
|
AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_free_space_map(Map::cast(obj));
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_empty_byte_array(ByteArray::cast(obj));
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj =
|
2011-03-09 15:01:16 +00:00
|
|
|
AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2011-03-09 15:01:16 +00:00
|
|
|
set_external_pixel_array_map(Map::cast(obj));
|
2009-07-28 08:43:51 +00:00
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
|
|
|
|
ExternalArray::kAlignedSize);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2009-10-20 15:26:17 +00:00
|
|
|
set_external_byte_array_map(Map::cast(obj));
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
|
|
|
|
ExternalArray::kAlignedSize);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2009-10-20 15:26:17 +00:00
|
|
|
set_external_unsigned_byte_array_map(Map::cast(obj));
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
|
|
|
|
ExternalArray::kAlignedSize);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2009-10-20 15:26:17 +00:00
|
|
|
set_external_short_array_map(Map::cast(obj));
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
|
|
|
|
ExternalArray::kAlignedSize);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2009-10-20 15:26:17 +00:00
|
|
|
set_external_unsigned_short_array_map(Map::cast(obj));
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
|
|
|
|
ExternalArray::kAlignedSize);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2009-10-20 15:26:17 +00:00
|
|
|
set_external_int_array_map(Map::cast(obj));
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
|
|
|
|
ExternalArray::kAlignedSize);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2009-10-20 15:26:17 +00:00
|
|
|
set_external_unsigned_int_array_map(Map::cast(obj));
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
|
|
|
|
ExternalArray::kAlignedSize);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2009-10-20 15:26:17 +00:00
|
|
|
set_external_float_array_map(Map::cast(obj));
|
|
|
|
|
2011-06-16 14:12:58 +00:00
|
|
|
{ MaybeObject* maybe_obj =
|
|
|
|
AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_non_strict_arguments_elements_map(Map::cast(obj));
|
|
|
|
|
2011-04-21 07:15:43 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
|
|
|
|
ExternalArray::kAlignedSize);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_external_double_array_map(Map::cast(obj));
|
|
|
|
|
2013-05-15 15:23:53 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalByteArray);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_empty_external_byte_array(ExternalArray::cast(obj));
|
|
|
|
|
|
|
|
{ MaybeObject* maybe_obj =
|
|
|
|
AllocateEmptyExternalArray(kExternalUnsignedByteArray);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_empty_external_unsigned_byte_array(ExternalArray::cast(obj));
|
|
|
|
|
|
|
|
{ MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalShortArray);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_empty_external_short_array(ExternalArray::cast(obj));
|
|
|
|
|
|
|
|
{ MaybeObject* maybe_obj = AllocateEmptyExternalArray(
|
|
|
|
kExternalUnsignedShortArray);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_empty_external_unsigned_short_array(ExternalArray::cast(obj));
|
|
|
|
|
|
|
|
{ MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalIntArray);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_empty_external_int_array(ExternalArray::cast(obj));
|
|
|
|
|
|
|
|
{ MaybeObject* maybe_obj =
|
|
|
|
AllocateEmptyExternalArray(kExternalUnsignedIntArray);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_empty_external_unsigned_int_array(ExternalArray::cast(obj));
|
|
|
|
|
|
|
|
{ MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalFloatArray);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_empty_external_float_array(ExternalArray::cast(obj));
|
|
|
|
|
|
|
|
{ MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalDoubleArray);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_empty_external_double_array(ExternalArray::cast(obj));
|
|
|
|
|
|
|
|
{ MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalPixelArray);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_empty_external_pixel_array(ExternalArray::cast(obj));
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2009-07-08 19:12:58 +00:00
|
|
|
set_code_map(Map::cast(obj));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2013-06-12 15:03:44 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateMap(CELL_TYPE, Cell::kSize);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_cell_map(Map::cast(obj));
|
|
|
|
|
|
|
|
{ MaybeObject* maybe_obj = AllocateMap(PROPERTY_CELL_TYPE,
|
2013-06-14 16:06:12 +00:00
|
|
|
PropertyCell::kSize);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2009-07-09 11:13:08 +00:00
|
|
|
set_global_property_cell_map(Map::cast(obj));
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2009-07-09 11:13:08 +00:00
|
|
|
set_one_pointer_filler_map(Map::cast(obj));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2009-07-09 11:13:08 +00:00
|
|
|
set_two_pointer_filler_map(Map::cast(obj));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2009-07-08 19:12:58 +00:00
|
|
|
for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
|
|
|
|
const StructTable& entry = struct_table[i];
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2009-07-08 19:12:58 +00:00
|
|
|
roots_[entry.index] = Map::cast(obj);
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj =
|
|
|
|
AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2009-07-08 19:12:58 +00:00
|
|
|
set_hash_table_map(Map::cast(obj));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj =
|
|
|
|
AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2011-06-09 11:26:01 +00:00
|
|
|
set_function_context_map(Map::cast(obj));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj =
|
|
|
|
AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2009-07-08 19:12:58 +00:00
|
|
|
set_catch_context_map(Map::cast(obj));
|
2008-12-18 11:28:13 +00:00
|
|
|
|
2011-06-09 11:26:01 +00:00
|
|
|
{ MaybeObject* maybe_obj =
|
|
|
|
AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_with_context_map(Map::cast(obj));
|
|
|
|
|
2011-08-11 16:29:28 +00:00
|
|
|
{ MaybeObject* maybe_obj =
|
|
|
|
AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_block_context_map(Map::cast(obj));
|
|
|
|
|
2012-02-20 14:02:59 +00:00
|
|
|
{ MaybeObject* maybe_obj =
|
|
|
|
AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_module_context_map(Map::cast(obj));
|
|
|
|
|
2012-08-27 09:40:26 +00:00
|
|
|
{ MaybeObject* maybe_obj =
|
|
|
|
AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_global_context_map(Map::cast(obj));
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj =
|
|
|
|
AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2012-08-17 09:03:08 +00:00
|
|
|
Map* native_context_map = Map::cast(obj);
|
|
|
|
native_context_map->set_dictionary_map(true);
|
|
|
|
native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
|
|
|
|
set_native_context_map(native_context_map);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
|
|
|
|
SharedFunctionInfo::kAlignedSize);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2009-07-08 19:12:58 +00:00
|
|
|
set_shared_function_info_map(Map::cast(obj));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-02-02 13:31:52 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
|
|
|
|
JSMessageObject::kSize);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_message_object_map(Map::cast(obj));
|
|
|
|
|
2012-11-13 12:27:03 +00:00
|
|
|
Map* external_map;
|
|
|
|
{ MaybeObject* maybe_obj =
|
|
|
|
AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
|
|
|
|
if (!maybe_obj->To(&external_map)) return false;
|
|
|
|
}
|
|
|
|
external_map->set_is_extensible(false);
|
|
|
|
set_external_map(external_map);
|
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
ASSERT(!InNewSpace(empty_fixed_array()));
|
2008-07-03 15:10:15 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
|
2008-07-03 15:10:15 +00:00
|
|
|
// Statically ensure that it is safe to allocate heap numbers in paged
|
|
|
|
// spaces.
|
2012-02-23 12:11:24 +00:00
|
|
|
STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
|
2008-09-05 12:34:09 +00:00
|
|
|
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
|
2009-10-02 13:35:37 +00:00
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
|
|
|
{ MaybeObject* maybe_result =
|
|
|
|
AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
|
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-12-07 08:43:18 +00:00
|
|
|
HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
|
2008-07-03 15:10:15 +00:00
|
|
|
HeapNumber::cast(result)->set_value(value);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateHeapNumber(double value) {
|
2008-10-30 09:15:58 +00:00
|
|
|
// Use general version, if we're forced to always allocate.
|
2009-10-02 13:35:37 +00:00
|
|
|
if (always_allocate()) return AllocateHeapNumber(value, TENURED);
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// This version of AllocateHeapNumber is optimized for
|
|
|
|
// allocation in new space.
|
2012-02-23 12:11:24 +00:00
|
|
|
STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
|
|
|
{ MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
|
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2011-12-07 08:43:18 +00:00
|
|
|
HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
|
2008-07-03 15:10:15 +00:00
|
|
|
HeapNumber::cast(result)->set_value(value);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-06-12 15:03:44 +00:00
|
|
|
MaybeObject* Heap::AllocateCell(Object* value) {
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
|
|
|
{ MaybeObject* maybe_result = AllocateRawCell();
|
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2013-06-12 15:03:44 +00:00
|
|
|
HeapObject::cast(result)->set_map_no_write_barrier(cell_map());
|
|
|
|
Cell::cast(result)->set_value(value);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-06-14 16:06:12 +00:00
|
|
|
MaybeObject* Heap::AllocatePropertyCell(Object* value) {
|
2013-06-12 15:03:44 +00:00
|
|
|
Object* result;
|
2013-07-05 10:34:02 +00:00
|
|
|
MaybeObject* maybe_result = AllocateRawPropertyCell();
|
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
|
2011-12-07 08:43:18 +00:00
|
|
|
HeapObject::cast(result)->set_map_no_write_barrier(
|
|
|
|
global_property_cell_map());
|
2013-06-26 16:17:12 +00:00
|
|
|
PropertyCell* cell = PropertyCell::cast(result);
|
|
|
|
cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
|
|
|
|
SKIP_WRITE_BARRIER);
|
|
|
|
cell->set_value(value);
|
|
|
|
cell->set_type(Type::None());
|
2013-07-05 10:34:02 +00:00
|
|
|
maybe_result = cell->SetValueInferType(value);
|
|
|
|
if (maybe_result->IsFailure()) return maybe_result;
|
2009-06-30 10:05:36 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-06-06 15:40:28 +00:00
|
|
|
MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
|
|
|
|
Box* result;
|
|
|
|
MaybeObject* maybe_result = AllocateStruct(BOX_TYPE);
|
|
|
|
if (!maybe_result->To(&result)) return maybe_result;
|
|
|
|
result->set_value(value);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-07-08 10:02:16 +00:00
|
|
|
MaybeObject* Heap::AllocateAllocationSite() {
|
|
|
|
Object* result;
|
|
|
|
MaybeObject* maybe_result = Allocate(allocation_site_map(),
|
|
|
|
OLD_POINTER_SPACE);
|
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
2013-07-17 11:50:24 +00:00
|
|
|
AllocationSite* site = AllocationSite::cast(result);
|
|
|
|
site->Initialize();
|
|
|
|
|
|
|
|
// Link the site
|
|
|
|
site->set_weak_next(allocation_sites_list());
|
|
|
|
set_allocation_sites_list(site);
|
2013-07-08 10:02:16 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::CreateOddball(const char* to_string,
|
2011-03-18 20:35:07 +00:00
|
|
|
Object* to_number,
|
|
|
|
byte kind) {
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
2011-09-19 18:36:47 +00:00
|
|
|
{ MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2011-03-18 20:35:07 +00:00
|
|
|
return Oddball::cast(result)->Initialize(to_string, to_number, kind);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool Heap::CreateApiObjects() {
|
|
|
|
Object* obj;
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2011-09-22 11:30:04 +00:00
|
|
|
// Don't use Smi-only elements optimizations for objects with the neander
|
|
|
|
// map. There are too many cases where element values are set directly with a
|
|
|
|
// bottleneck to trap the Smi-only -> fast elements transition, and there
|
|
|
|
// appears to be no benefit for optimize this case.
|
|
|
|
Map* new_neander_map = Map::cast(obj);
|
2012-05-23 14:24:29 +00:00
|
|
|
new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
|
2011-09-22 11:30:04 +00:00
|
|
|
set_neander_map(new_neander_map);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
Object* elements;
|
|
|
|
{ MaybeObject* maybe_elements = AllocateFixedArray(2);
|
|
|
|
if (!maybe_elements->ToObject(&elements)) return false;
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
FixedArray::cast(elements)->set(0, Smi::FromInt(0));
|
|
|
|
JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
|
2009-07-08 19:12:58 +00:00
|
|
|
set_message_listeners(JSObject::cast(obj));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-06-29 21:15:03 +00:00
|
|
|
|
|
|
|
void Heap::CreateJSEntryStub() {
|
|
|
|
JSEntryStub stub;
|
2013-02-27 12:33:24 +00:00
|
|
|
set_js_entry_code(*stub.GetCode(isolate()));
|
2009-06-29 21:15:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Heap::CreateJSConstructEntryStub() {
|
|
|
|
JSConstructEntryStub stub;
|
2013-02-27 12:33:24 +00:00
|
|
|
set_js_construct_entry_code(*stub.GetCode(isolate()));
|
2009-06-29 21:15:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
void Heap::CreateFixedStubs() {
|
|
|
|
// Here we create roots for fixed stubs. They are needed at GC
|
|
|
|
// for cooking and uncooking (check out frames.cc).
|
|
|
|
// The eliminates the need for doing dictionary lookup in the
|
|
|
|
// stub cache for these stubs.
|
2013-02-15 09:27:10 +00:00
|
|
|
HandleScope scope(isolate());
|
2009-06-29 21:15:03 +00:00
|
|
|
// gcc-4.4 has problem generating correct code of following snippet:
|
2011-03-09 10:38:19 +00:00
|
|
|
// { JSEntryStub stub;
|
|
|
|
// js_entry_code_ = *stub.GetCode();
|
2009-06-29 21:15:03 +00:00
|
|
|
// }
|
2011-03-09 10:38:19 +00:00
|
|
|
// { JSConstructEntryStub stub;
|
|
|
|
// js_construct_entry_code_ = *stub.GetCode();
|
2009-06-29 21:15:03 +00:00
|
|
|
// }
|
|
|
|
// To workaround the problem, make separate functions without inlining.
|
|
|
|
Heap::CreateJSEntryStub();
|
|
|
|
Heap::CreateJSConstructEntryStub();
|
2011-09-16 11:29:13 +00:00
|
|
|
|
|
|
|
// Create stubs that should be there, so we don't unexpectedly have to
|
|
|
|
// create them if we need them during the creation of another stub.
|
|
|
|
// Stub creation mixes raw pointers and handles in an unsafe manner so
|
|
|
|
// we cannot create stubs while we are creating stubs.
|
2013-02-27 12:33:24 +00:00
|
|
|
CodeStub::GenerateStubsAheadOfTime(isolate());
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool Heap::CreateInitialObjects() {
|
|
|
|
Object* obj;
|
|
|
|
|
|
|
|
// The -0 value must be set before NumberFromDouble works.
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2011-10-10 09:21:48 +00:00
|
|
|
set_minus_zero_value(HeapNumber::cast(obj));
|
2013-04-19 13:26:47 +00:00
|
|
|
ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2011-10-10 09:21:48 +00:00
|
|
|
set_nan_value(HeapNumber::cast(obj));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-09-23 08:00:06 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2011-10-10 09:21:48 +00:00
|
|
|
set_infinity_value(HeapNumber::cast(obj));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2012-01-04 15:12:15 +00:00
|
|
|
// The hole has not been created yet, but we want to put something
|
2013-02-28 17:03:34 +00:00
|
|
|
// predictable in the gaps in the string table, so lets make that Smi zero.
|
2012-01-04 15:12:15 +00:00
|
|
|
set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
|
|
|
|
|
2013-02-28 17:03:34 +00:00
|
|
|
// Allocate initial string table.
|
2013-03-12 07:06:36 +00:00
|
|
|
{ MaybeObject* maybe_obj =
|
|
|
|
StringTable::Allocate(this, kInitialStringTableSize);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2013-02-28 17:03:34 +00:00
|
|
|
// Don't use set_string_table() due to asserts.
|
|
|
|
roots_[kStringTableRootIndex] = obj;
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2013-02-28 17:03:34 +00:00
|
|
|
// Finish initializing oddballs after creating the string table.
|
2011-10-10 09:21:48 +00:00
|
|
|
{ MaybeObject* maybe_obj =
|
|
|
|
undefined_value()->Initialize("undefined",
|
|
|
|
nan_value(),
|
|
|
|
Oddball::kUndefined);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
2010-10-25 15:22:03 +00:00
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-10-10 09:21:48 +00:00
|
|
|
// Initialize the null_value.
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj =
|
2011-10-10 09:21:48 +00:00
|
|
|
null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
{ MaybeObject* maybe_obj = CreateOddball("true",
|
|
|
|
Smi::FromInt(1),
|
|
|
|
Oddball::kTrue);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2011-10-10 09:21:48 +00:00
|
|
|
set_true_value(Oddball::cast(obj));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
{ MaybeObject* maybe_obj = CreateOddball("false",
|
|
|
|
Smi::FromInt(0),
|
|
|
|
Oddball::kFalse);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2011-10-10 09:21:48 +00:00
|
|
|
set_false_value(Oddball::cast(obj));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
{ MaybeObject* maybe_obj = CreateOddball("hole",
|
|
|
|
Smi::FromInt(-1),
|
|
|
|
Oddball::kTheHole);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2011-10-10 09:21:48 +00:00
|
|
|
set_the_hole_value(Oddball::cast(obj));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2013-06-06 14:21:35 +00:00
|
|
|
{ MaybeObject* maybe_obj = CreateOddball("uninitialized",
|
|
|
|
Smi::FromInt(-1),
|
|
|
|
Oddball::kUninitialized);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_uninitialized_value(Oddball::cast(obj));
|
|
|
|
|
2011-01-06 15:53:56 +00:00
|
|
|
{ MaybeObject* maybe_obj = CreateOddball("arguments_marker",
|
2012-03-19 07:45:06 +00:00
|
|
|
Smi::FromInt(-4),
|
2011-03-18 20:35:07 +00:00
|
|
|
Oddball::kArgumentMarker);
|
2011-01-06 15:53:56 +00:00
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2011-10-10 09:21:48 +00:00
|
|
|
set_arguments_marker(Oddball::cast(obj));
|
2011-01-06 15:53:56 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
{ MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
|
2012-03-19 07:45:06 +00:00
|
|
|
Smi::FromInt(-2),
|
2011-03-18 20:35:07 +00:00
|
|
|
Oddball::kOther);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2009-07-29 12:34:21 +00:00
|
|
|
set_no_interceptor_result_sentinel(obj);
|
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
{ MaybeObject* maybe_obj = CreateOddball("termination_exception",
|
2012-03-19 07:45:06 +00:00
|
|
|
Smi::FromInt(-3),
|
2011-03-18 20:35:07 +00:00
|
|
|
Oddball::kOther);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2009-08-19 15:14:11 +00:00
|
|
|
set_termination_exception(obj);
|
2009-07-29 12:34:21 +00:00
|
|
|
|
2013-02-28 17:03:34 +00:00
|
|
|
for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj =
|
2013-02-28 17:03:34 +00:00
|
|
|
InternalizeUtf8String(constant_string_table[i].contents);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2013-02-28 17:03:34 +00:00
|
|
|
roots_[constant_string_table[i].index] = String::cast(obj);
|
2009-07-08 19:12:58 +00:00
|
|
|
}
|
2009-03-19 19:29:23 +00:00
|
|
|
|
2013-02-28 17:03:34 +00:00
|
|
|
// Allocate the hidden string which is used to identify the hidden properties
|
2009-03-19 18:50:00 +00:00
|
|
|
// in JSObjects. The hash code has a special value so that it will not match
|
|
|
|
// the empty string when searching for the property. It cannot be part of the
|
2009-07-08 19:12:58 +00:00
|
|
|
// loop above because it needs to be allocated manually with the special
|
2013-02-28 17:03:34 +00:00
|
|
|
// hash code in place. The hash code for the hidden_string is zero to ensure
|
2009-03-19 18:50:00 +00:00
|
|
|
// that it will always be at the first entry in property descriptors.
|
2013-02-28 17:03:34 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
|
|
|
|
OneByteVector("", 0), String::kEmptyStringHash);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2013-02-28 17:03:34 +00:00
|
|
|
hidden_string_ = String::cast(obj);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2009-07-30 09:13:48 +00:00
|
|
|
// Allocate the code_stubs dictionary. The initial size is set to avoid
|
|
|
|
// expanding the dictionary during bootstrapping.
|
2013-03-12 07:06:36 +00:00
|
|
|
{ MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2012-01-16 09:44:35 +00:00
|
|
|
set_code_stubs(UnseededNumberDictionary::cast(obj));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-12-09 09:26:14 +00:00
|
|
|
|
2009-07-30 09:13:48 +00:00
|
|
|
// Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
|
|
|
|
// is set to avoid expanding the dictionary during bootstrapping.
|
2013-03-12 07:06:36 +00:00
|
|
|
{ MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2012-01-16 09:44:35 +00:00
|
|
|
set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-06-06 13:15:11 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
|
|
|
|
|
2010-05-06 09:35:18 +00:00
|
|
|
set_instanceof_cache_function(Smi::FromInt(0));
|
|
|
|
set_instanceof_cache_map(Smi::FromInt(0));
|
|
|
|
set_instanceof_cache_answer(Smi::FromInt(0));
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
CreateFixedStubs();
|
|
|
|
|
2010-09-14 14:52:53 +00:00
|
|
|
// Allocate the dictionary of intrinsic function names.
|
2013-03-12 07:06:36 +00:00
|
|
|
{ MaybeObject* maybe_obj =
|
|
|
|
NameDictionary::Allocate(this, Runtime::kNumFunctions);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2011-03-18 20:35:07 +00:00
|
|
|
{ MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
|
|
|
|
obj);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2013-03-04 15:00:57 +00:00
|
|
|
set_intrinsic_function_names(NameDictionary::cast(obj));
|
2010-09-14 14:52:53 +00:00
|
|
|
|
2012-01-26 11:32:01 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_number_string_cache(FixedArray::cast(obj));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2013-01-09 10:30:54 +00:00
|
|
|
// Allocate cache for single character one byte strings.
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj =
|
2013-01-09 10:30:54 +00:00
|
|
|
AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2009-07-08 19:12:58 +00:00
|
|
|
set_single_character_string_cache(FixedArray::cast(obj));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-09-07 11:28:48 +00:00
|
|
|
// Allocate cache for string split.
|
2012-08-31 09:28:01 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateFixedArray(
|
|
|
|
RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
|
2011-09-07 11:28:48 +00:00
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_string_split_cache(FixedArray::cast(obj));
|
|
|
|
|
2012-08-31 09:28:01 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateFixedArray(
|
|
|
|
RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_regexp_multiple_cache(FixedArray::cast(obj));
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// Allocate cache for external strings pointing to native source code.
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
2009-07-08 19:12:58 +00:00
|
|
|
set_natives_source_cache(FixedArray::cast(obj));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2012-11-06 16:47:15 +00:00
|
|
|
// Allocate object to hold object observation state.
|
|
|
|
{ MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
{ MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_observation_state(JSObject::cast(obj));
|
|
|
|
|
2013-05-23 07:05:58 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateSymbol();
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_frozen_symbol(Symbol::cast(obj));
|
|
|
|
|
|
|
|
{ MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
SeededNumberDictionary::cast(obj)->set_requires_slow_elements();
|
|
|
|
set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj));
|
|
|
|
|
2013-07-13 00:20:40 +00:00
|
|
|
{ MaybeObject* maybe_obj = AllocateSymbol();
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return false;
|
|
|
|
}
|
|
|
|
set_observed_symbol(Symbol::cast(obj));
|
|
|
|
|
2013-06-04 10:30:05 +00:00
|
|
|
// Handling of script id generation is in Factory::NewScript.
|
2013-06-25 14:57:47 +00:00
|
|
|
set_last_script_id(Smi::FromInt(v8::Script::kNoScriptId));
|
2009-03-10 12:05:20 +00:00
|
|
|
|
2008-10-23 07:04:56 +00:00
|
|
|
// Initialize keyed lookup cache.
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->keyed_lookup_cache()->Clear();
|
2008-10-23 07:04:56 +00:00
|
|
|
|
2009-06-22 08:09:57 +00:00
|
|
|
// Initialize context slot cache.
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->context_slot_cache()->Clear();
|
2009-06-22 08:09:57 +00:00
|
|
|
|
2009-06-22 14:29:35 +00:00
|
|
|
// Initialize descriptor cache.
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->descriptor_lookup_cache()->Clear();
|
2009-06-22 14:29:35 +00:00
|
|
|
|
2008-09-11 10:51:52 +00:00
|
|
|
// Initialize compilation cache.
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->compilation_cache()->Clear();
|
2008-09-05 16:27:56 +00:00
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-11-08 10:32:39 +00:00
|
|
|
bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
|
|
|
|
RootListIndex writable_roots[] = {
|
|
|
|
kStoreBufferTopRootIndex,
|
|
|
|
kStackLimitRootIndex,
|
2012-12-27 10:11:08 +00:00
|
|
|
kNumberStringCacheRootIndex,
|
2012-11-08 10:32:39 +00:00
|
|
|
kInstanceofCacheFunctionRootIndex,
|
|
|
|
kInstanceofCacheMapRootIndex,
|
|
|
|
kInstanceofCacheAnswerRootIndex,
|
|
|
|
kCodeStubsRootIndex,
|
|
|
|
kNonMonomorphicCacheRootIndex,
|
|
|
|
kPolymorphicCodeCacheRootIndex,
|
|
|
|
kLastScriptIdRootIndex,
|
|
|
|
kEmptyScriptRootIndex,
|
|
|
|
kRealStackLimitRootIndex,
|
|
|
|
kArgumentsAdaptorDeoptPCOffsetRootIndex,
|
|
|
|
kConstructStubDeoptPCOffsetRootIndex,
|
|
|
|
kGetterStubDeoptPCOffsetRootIndex,
|
|
|
|
kSetterStubDeoptPCOffsetRootIndex,
|
2013-02-28 17:03:34 +00:00
|
|
|
kStringTableRootIndex,
|
2012-11-08 10:32:39 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
|
|
|
|
if (root_index == writable_roots[i])
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-08-31 09:28:01 +00:00
|
|
|
Object* RegExpResultsCache::Lookup(Heap* heap,
|
|
|
|
String* key_string,
|
|
|
|
Object* key_pattern,
|
|
|
|
ResultsCacheType type) {
|
|
|
|
FixedArray* cache;
|
2013-02-28 17:03:34 +00:00
|
|
|
if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
|
2012-08-31 09:28:01 +00:00
|
|
|
if (type == STRING_SPLIT_SUBSTRINGS) {
|
|
|
|
ASSERT(key_pattern->IsString());
|
2013-02-28 17:03:34 +00:00
|
|
|
if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
|
2012-08-31 09:28:01 +00:00
|
|
|
cache = heap->string_split_cache();
|
|
|
|
} else {
|
|
|
|
ASSERT(type == REGEXP_MULTIPLE_INDICES);
|
|
|
|
ASSERT(key_pattern->IsFixedArray());
|
|
|
|
cache = heap->regexp_multiple_cache();
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t hash = key_string->Hash();
|
|
|
|
uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
|
2011-09-07 11:28:48 +00:00
|
|
|
~(kArrayEntriesPerCacheEntry - 1));
|
2012-08-31 09:28:01 +00:00
|
|
|
if (cache->get(index + kStringOffset) == key_string &&
|
|
|
|
cache->get(index + kPatternOffset) == key_pattern) {
|
2011-09-07 11:28:48 +00:00
|
|
|
return cache->get(index + kArrayOffset);
|
|
|
|
}
|
2012-08-31 09:28:01 +00:00
|
|
|
index =
|
|
|
|
((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
|
|
|
|
if (cache->get(index + kStringOffset) == key_string &&
|
|
|
|
cache->get(index + kPatternOffset) == key_pattern) {
|
2011-09-07 11:28:48 +00:00
|
|
|
return cache->get(index + kArrayOffset);
|
|
|
|
}
|
|
|
|
return Smi::FromInt(0);
|
|
|
|
}
|
|
|
|
|
2011-09-07 11:49:12 +00:00
|
|
|
|
2012-08-31 09:28:01 +00:00
|
|
|
void RegExpResultsCache::Enter(Heap* heap,
|
|
|
|
String* key_string,
|
|
|
|
Object* key_pattern,
|
|
|
|
FixedArray* value_array,
|
|
|
|
ResultsCacheType type) {
|
|
|
|
FixedArray* cache;
|
2013-02-28 17:03:34 +00:00
|
|
|
if (!key_string->IsInternalizedString()) return;
|
2012-08-31 09:28:01 +00:00
|
|
|
if (type == STRING_SPLIT_SUBSTRINGS) {
|
|
|
|
ASSERT(key_pattern->IsString());
|
2013-02-28 17:03:34 +00:00
|
|
|
if (!key_pattern->IsInternalizedString()) return;
|
2012-08-31 09:28:01 +00:00
|
|
|
cache = heap->string_split_cache();
|
|
|
|
} else {
|
|
|
|
ASSERT(type == REGEXP_MULTIPLE_INDICES);
|
|
|
|
ASSERT(key_pattern->IsFixedArray());
|
|
|
|
cache = heap->regexp_multiple_cache();
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t hash = key_string->Hash();
|
|
|
|
uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
|
2011-09-07 11:28:48 +00:00
|
|
|
~(kArrayEntriesPerCacheEntry - 1));
|
|
|
|
if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
|
2012-08-31 09:28:01 +00:00
|
|
|
cache->set(index + kStringOffset, key_string);
|
|
|
|
cache->set(index + kPatternOffset, key_pattern);
|
|
|
|
cache->set(index + kArrayOffset, value_array);
|
2011-09-08 09:24:32 +00:00
|
|
|
} else {
|
|
|
|
uint32_t index2 =
|
2012-08-31 09:28:01 +00:00
|
|
|
((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
|
2011-09-08 09:24:32 +00:00
|
|
|
if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
|
2012-08-31 09:28:01 +00:00
|
|
|
cache->set(index2 + kStringOffset, key_string);
|
|
|
|
cache->set(index2 + kPatternOffset, key_pattern);
|
|
|
|
cache->set(index2 + kArrayOffset, value_array);
|
2011-09-08 09:24:32 +00:00
|
|
|
} else {
|
|
|
|
cache->set(index2 + kStringOffset, Smi::FromInt(0));
|
|
|
|
cache->set(index2 + kPatternOffset, Smi::FromInt(0));
|
|
|
|
cache->set(index2 + kArrayOffset, Smi::FromInt(0));
|
2012-08-31 09:28:01 +00:00
|
|
|
cache->set(index + kStringOffset, key_string);
|
|
|
|
cache->set(index + kPatternOffset, key_pattern);
|
|
|
|
cache->set(index + kArrayOffset, value_array);
|
2011-09-08 09:24:32 +00:00
|
|
|
}
|
2011-09-07 11:28:48 +00:00
|
|
|
}
|
2012-08-31 09:28:01 +00:00
|
|
|
// If the array is a reasonably short list of substrings, convert it into a
|
2013-02-28 17:03:34 +00:00
|
|
|
// list of internalized strings.
|
2012-08-31 09:28:01 +00:00
|
|
|
if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
|
|
|
|
for (int i = 0; i < value_array->length(); i++) {
|
|
|
|
String* str = String::cast(value_array->get(i));
|
2013-02-28 17:03:34 +00:00
|
|
|
Object* internalized_str;
|
|
|
|
MaybeObject* maybe_string = heap->InternalizeString(str);
|
|
|
|
if (maybe_string->ToObject(&internalized_str)) {
|
|
|
|
value_array->set(i, internalized_str);
|
2011-09-07 11:28:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-08-31 09:28:01 +00:00
|
|
|
// Convert backing store to a copy-on-write array.
|
|
|
|
value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
|
2011-09-07 11:28:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-08-31 09:28:01 +00:00
|
|
|
void RegExpResultsCache::Clear(FixedArray* cache) {
|
|
|
|
for (int i = 0; i < kRegExpResultsCacheSize; i++) {
|
2011-09-07 11:28:48 +00:00
|
|
|
cache->set(i, Smi::FromInt(0));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-01-26 11:32:01 +00:00
|
|
|
MaybeObject* Heap::AllocateInitialNumberStringCache() {
|
2012-01-25 14:27:58 +00:00
|
|
|
MaybeObject* maybe_obj =
|
2012-01-26 11:32:01 +00:00
|
|
|
AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
|
2012-01-25 15:17:26 +00:00
|
|
|
return maybe_obj;
|
2012-01-25 14:27:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-01-26 11:32:01 +00:00
|
|
|
int Heap::FullSizeNumberStringCacheLength() {
|
|
|
|
// Compute the size of the number string cache based on the max newspace size.
|
|
|
|
// The number string cache has a minimum size based on twice the initial cache
|
|
|
|
// size to ensure that it is bigger after being made 'full size'.
|
|
|
|
int number_string_cache_size = max_semispace_size_ / 512;
|
|
|
|
number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
|
|
|
|
Min(0x4000, number_string_cache_size));
|
|
|
|
// There is a string and a number per entry so the length is twice the number
|
|
|
|
// of entries.
|
|
|
|
return number_string_cache_size * 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Heap::AllocateFullSizeNumberStringCache() {
|
|
|
|
// The idea is to have a small number string cache in the snapshot to keep
|
|
|
|
// boot-time memory usage down. If we expand the number string cache already
|
|
|
|
// while creating the snapshot then that didn't work out.
|
2012-06-19 18:38:03 +00:00
|
|
|
ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
|
2012-01-26 11:32:01 +00:00
|
|
|
MaybeObject* maybe_obj =
|
|
|
|
AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
|
|
|
|
Object* new_cache;
|
|
|
|
if (maybe_obj->ToObject(&new_cache)) {
|
|
|
|
// We don't bother to repopulate the cache with entries from the old cache.
|
|
|
|
// It will be repopulated soon enough with new strings.
|
|
|
|
set_number_string_cache(FixedArray::cast(new_cache));
|
|
|
|
}
|
|
|
|
// If allocation fails then we just return without doing anything. It is only
|
|
|
|
// a cache, so best effort is OK here.
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-01-06 11:19:28 +00:00
|
|
|
void Heap::FlushNumberStringCache() {
|
|
|
|
// Flush the number to string cache.
|
|
|
|
int len = number_string_cache()->length();
|
|
|
|
for (int i = 0; i < len; i++) {
|
2011-03-18 20:35:07 +00:00
|
|
|
number_string_cache()->set_undefined(this, i);
|
2010-01-06 11:19:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-12-22 13:07:27 +00:00
|
|
|
static inline int double_get_hash(double d) {
|
2009-12-22 11:35:05 +00:00
|
|
|
DoubleRepresentation rep(d);
|
2010-01-06 11:19:28 +00:00
|
|
|
return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-12-22 13:07:27 +00:00
|
|
|
static inline int smi_get_hash(Smi* smi) {
|
2010-01-06 11:19:28 +00:00
|
|
|
return smi->value();
|
2009-12-22 11:35:05 +00:00
|
|
|
}
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
Object* Heap::GetNumberStringCache(Object* number) {
|
|
|
|
int hash;
|
2010-01-06 11:19:28 +00:00
|
|
|
int mask = (number_string_cache()->length() >> 1) - 1;
|
2008-07-03 15:10:15 +00:00
|
|
|
if (number->IsSmi()) {
|
2010-01-06 11:19:28 +00:00
|
|
|
hash = smi_get_hash(Smi::cast(number)) & mask;
|
2008-07-03 15:10:15 +00:00
|
|
|
} else {
|
2010-01-06 11:19:28 +00:00
|
|
|
hash = double_get_hash(number->Number()) & mask;
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
2009-07-08 19:12:58 +00:00
|
|
|
Object* key = number_string_cache()->get(hash * 2);
|
2008-07-03 15:10:15 +00:00
|
|
|
if (key == number) {
|
2009-07-08 19:12:58 +00:00
|
|
|
return String::cast(number_string_cache()->get(hash * 2 + 1));
|
2008-07-03 15:10:15 +00:00
|
|
|
} else if (key->IsHeapNumber() &&
|
|
|
|
number->IsHeapNumber() &&
|
|
|
|
key->Number() == number->Number()) {
|
2009-07-08 19:12:58 +00:00
|
|
|
return String::cast(number_string_cache()->get(hash * 2 + 1));
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
return undefined_value();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Heap::SetNumberStringCache(Object* number, String* string) {
|
|
|
|
int hash;
|
2010-01-06 11:19:28 +00:00
|
|
|
int mask = (number_string_cache()->length() >> 1) - 1;
|
2008-07-03 15:10:15 +00:00
|
|
|
if (number->IsSmi()) {
|
2010-01-06 11:19:28 +00:00
|
|
|
hash = smi_get_hash(Smi::cast(number)) & mask;
|
2008-07-03 15:10:15 +00:00
|
|
|
} else {
|
2010-01-06 11:19:28 +00:00
|
|
|
hash = double_get_hash(number->Number()) & mask;
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
2012-01-26 11:32:01 +00:00
|
|
|
if (number_string_cache()->get(hash * 2) != undefined_value() &&
|
|
|
|
number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
|
|
|
|
// The first time we have a hash collision, we move to the full sized
|
|
|
|
// number string cache.
|
|
|
|
AllocateFullSizeNumberStringCache();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
number_string_cache()->set(hash * 2, number);
|
2009-07-08 19:12:58 +00:00
|
|
|
number_string_cache()->set(hash * 2 + 1, string);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::NumberToString(Object* number,
|
2013-05-03 10:36:16 +00:00
|
|
|
bool check_number_string_cache,
|
|
|
|
PretenureFlag pretenure) {
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->counters()->number_to_string_runtime()->Increment();
|
2010-04-07 11:13:05 +00:00
|
|
|
if (check_number_string_cache) {
|
|
|
|
Object* cached = GetNumberStringCache(number);
|
|
|
|
if (cached != undefined_value()) {
|
|
|
|
return cached;
|
|
|
|
}
|
2009-10-02 13:43:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
char arr[100];
|
|
|
|
Vector<char> buffer(arr, ARRAY_SIZE(arr));
|
|
|
|
const char* str;
|
|
|
|
if (number->IsSmi()) {
|
|
|
|
int num = Smi::cast(number)->value();
|
|
|
|
str = IntToCString(num, buffer);
|
|
|
|
} else {
|
|
|
|
double num = HeapNumber::cast(number)->value();
|
|
|
|
str = DoubleToCString(num, buffer);
|
|
|
|
}
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* js_string;
|
2013-05-03 10:36:16 +00:00
|
|
|
MaybeObject* maybe_js_string =
|
|
|
|
AllocateStringFromOneByte(CStrVector(str), pretenure);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (maybe_js_string->ToObject(&js_string)) {
|
|
|
|
SetNumberStringCache(number, String::cast(js_string));
|
2009-10-02 13:43:16 +00:00
|
|
|
}
|
2010-10-25 15:22:03 +00:00
|
|
|
return maybe_js_string;
|
2009-10-02 13:43:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-22 10:45:37 +00:00
|
|
|
MaybeObject* Heap::Uint32ToString(uint32_t value,
|
|
|
|
bool check_number_string_cache) {
|
|
|
|
Object* number;
|
|
|
|
MaybeObject* maybe = NumberFromUint32(value);
|
|
|
|
if (!maybe->To<Object>(&number)) return maybe;
|
|
|
|
return NumberToString(number, check_number_string_cache);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-10-20 15:26:17 +00:00
|
|
|
Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
|
|
|
|
return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Heap::RootListIndex Heap::RootIndexForExternalArrayType(
|
|
|
|
ExternalArrayType array_type) {
|
|
|
|
switch (array_type) {
|
|
|
|
case kExternalByteArray:
|
|
|
|
return kExternalByteArrayMapRootIndex;
|
|
|
|
case kExternalUnsignedByteArray:
|
|
|
|
return kExternalUnsignedByteArrayMapRootIndex;
|
|
|
|
case kExternalShortArray:
|
|
|
|
return kExternalShortArrayMapRootIndex;
|
|
|
|
case kExternalUnsignedShortArray:
|
|
|
|
return kExternalUnsignedShortArrayMapRootIndex;
|
|
|
|
case kExternalIntArray:
|
|
|
|
return kExternalIntArrayMapRootIndex;
|
|
|
|
case kExternalUnsignedIntArray:
|
|
|
|
return kExternalUnsignedIntArrayMapRootIndex;
|
|
|
|
case kExternalFloatArray:
|
|
|
|
return kExternalFloatArrayMapRootIndex;
|
2011-04-21 07:15:43 +00:00
|
|
|
case kExternalDoubleArray:
|
|
|
|
return kExternalDoubleArrayMapRootIndex;
|
2011-03-09 15:01:16 +00:00
|
|
|
case kExternalPixelArray:
|
|
|
|
return kExternalPixelArrayMapRootIndex;
|
2009-10-20 15:26:17 +00:00
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
return kUndefinedValueRootIndex;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-15 15:23:53 +00:00
|
|
|
Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
|
|
|
|
ElementsKind elementsKind) {
|
|
|
|
switch (elementsKind) {
|
|
|
|
case EXTERNAL_BYTE_ELEMENTS:
|
|
|
|
return kEmptyExternalByteArrayRootIndex;
|
|
|
|
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
|
|
|
|
return kEmptyExternalUnsignedByteArrayRootIndex;
|
|
|
|
case EXTERNAL_SHORT_ELEMENTS:
|
|
|
|
return kEmptyExternalShortArrayRootIndex;
|
|
|
|
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
|
|
|
|
return kEmptyExternalUnsignedShortArrayRootIndex;
|
|
|
|
case EXTERNAL_INT_ELEMENTS:
|
|
|
|
return kEmptyExternalIntArrayRootIndex;
|
|
|
|
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
|
|
|
|
return kEmptyExternalUnsignedIntArrayRootIndex;
|
|
|
|
case EXTERNAL_FLOAT_ELEMENTS:
|
|
|
|
return kEmptyExternalFloatArrayRootIndex;
|
|
|
|
case EXTERNAL_DOUBLE_ELEMENTS:
|
|
|
|
return kEmptyExternalDoubleArrayRootIndex;
|
|
|
|
case EXTERNAL_PIXEL_ELEMENTS:
|
|
|
|
return kEmptyExternalPixelArrayRootIndex;
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
return kUndefinedValueRootIndex;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-05 09:52:11 +00:00
|
|
|
|
2013-05-15 15:23:53 +00:00
|
|
|
ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
|
|
|
|
return ExternalArray::cast(
|
|
|
|
roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
2009-10-20 15:26:17 +00:00
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
|
2010-04-29 14:01:37 +00:00
|
|
|
// We need to distinguish the minus zero value and this cannot be
|
|
|
|
// done after conversion to int. Doing this by comparing bit
|
|
|
|
// patterns is faster than using fpclassify() et al.
|
|
|
|
static const DoubleRepresentation minus_zero(-0.0);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-04-29 14:01:37 +00:00
|
|
|
DoubleRepresentation rep(value);
|
|
|
|
if (rep.bits == minus_zero.bits) {
|
|
|
|
return AllocateHeapNumber(-0.0, pretenure);
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-04-29 14:01:37 +00:00
|
|
|
int int_value = FastD2I(value);
|
|
|
|
if (value == int_value && Smi::IsValid(int_value)) {
|
|
|
|
return Smi::FromInt(int_value);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Materialize the value in the heap.
|
|
|
|
return AllocateHeapNumber(value, pretenure);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-05-19 11:47:34 +00:00
|
|
|
MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
|
|
|
|
// Statically ensure that it is safe to allocate foreigns in paged spaces.
|
2012-02-23 12:11:24 +00:00
|
|
|
STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
|
2009-07-28 08:43:51 +00:00
|
|
|
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
|
2011-10-28 12:37:29 +00:00
|
|
|
Foreign* result;
|
|
|
|
MaybeObject* maybe_result = Allocate(foreign_map(), space);
|
|
|
|
if (!maybe_result->To(&result)) return maybe_result;
|
|
|
|
result->set_foreign_address(address);
|
2008-07-03 15:10:15 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
|
2011-08-08 16:14:46 +00:00
|
|
|
SharedFunctionInfo* share;
|
|
|
|
MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
|
|
|
|
if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-08-08 16:14:46 +00:00
|
|
|
// Set pointer fields.
|
2008-07-03 15:10:15 +00:00
|
|
|
share->set_name(name);
|
2011-03-23 13:40:07 +00:00
|
|
|
Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
|
2008-07-03 15:10:15 +00:00
|
|
|
share->set_code(illegal);
|
2013-05-13 11:10:31 +00:00
|
|
|
share->set_optimized_code_map(Smi::FromInt(0));
|
2013-02-27 14:45:59 +00:00
|
|
|
share->set_scope_info(ScopeInfo::Empty(isolate_));
|
2011-08-08 16:14:46 +00:00
|
|
|
Code* construct_stub =
|
|
|
|
isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
|
2009-06-19 07:36:16 +00:00
|
|
|
share->set_construct_stub(construct_stub);
|
2013-02-28 17:03:34 +00:00
|
|
|
share->set_instance_class_name(Object_string());
|
2011-11-15 14:01:02 +00:00
|
|
|
share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
|
|
|
|
share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
|
|
|
|
share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
|
|
|
|
share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
|
|
|
|
share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
|
2012-03-26 13:08:08 +00:00
|
|
|
share->set_ast_node_count(0);
|
2012-06-11 16:57:27 +00:00
|
|
|
share->set_counters(0);
|
2011-08-08 16:14:46 +00:00
|
|
|
|
|
|
|
// Set integer fields (smi or int, depending on the architecture).
|
|
|
|
share->set_length(0);
|
|
|
|
share->set_formal_parameter_count(0);
|
|
|
|
share->set_expected_nof_properties(0);
|
2010-05-27 12:30:45 +00:00
|
|
|
share->set_num_literals(0);
|
2011-08-08 16:14:46 +00:00
|
|
|
share->set_start_position_and_type(0);
|
2010-05-27 12:30:45 +00:00
|
|
|
share->set_end_position(0);
|
|
|
|
share->set_function_token_position(0);
|
2011-08-08 16:14:46 +00:00
|
|
|
// All compiler hints default to false or 0.
|
|
|
|
share->set_compiler_hints(0);
|
|
|
|
share->set_opt_count(0);
|
|
|
|
|
|
|
|
return share;
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-02 13:31:52 +00:00
|
|
|
MaybeObject* Heap::AllocateJSMessageObject(String* type,
|
|
|
|
JSArray* arguments,
|
|
|
|
int start_position,
|
|
|
|
int end_position,
|
|
|
|
Object* script,
|
|
|
|
Object* stack_trace,
|
|
|
|
Object* stack_frames) {
|
|
|
|
Object* result;
|
|
|
|
{ MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
|
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
|
|
|
JSMessageObject* message = JSMessageObject::cast(result);
|
2011-11-15 14:01:02 +00:00
|
|
|
message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
|
2012-05-23 14:24:29 +00:00
|
|
|
message->initialize_elements();
|
2011-11-15 14:01:02 +00:00
|
|
|
message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
|
2011-02-02 13:31:52 +00:00
|
|
|
message->set_type(type);
|
|
|
|
message->set_arguments(arguments);
|
|
|
|
message->set_start_position(start_position);
|
|
|
|
message->set_end_position(end_position);
|
|
|
|
message->set_script(script);
|
|
|
|
message->set_stack_trace(stack_trace);
|
|
|
|
message->set_stack_frames(stack_frames);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
2009-11-20 10:11:45 +00:00
|
|
|
// Returns true for a character in a range. Both limits are inclusive.
|
|
|
|
static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
|
|
|
|
// This makes uses of the the unsigned wraparound.
|
|
|
|
return character - from <= to - from;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
|
2011-03-18 20:35:07 +00:00
|
|
|
Heap* heap,
|
2012-12-19 13:27:20 +00:00
|
|
|
uint16_t c1,
|
|
|
|
uint16_t c2) {
|
2013-02-28 17:03:34 +00:00
|
|
|
String* result;
|
2009-11-20 10:11:45 +00:00
|
|
|
// Numeric strings have a different hash algorithm not known by
|
2013-02-28 17:03:34 +00:00
|
|
|
// LookupTwoCharsStringIfExists, so we skip this step for such strings.
|
2009-11-20 10:11:45 +00:00
|
|
|
if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
|
2013-02-28 17:03:34 +00:00
|
|
|
heap->string_table()->LookupTwoCharsStringIfExists(c1, c2, &result)) {
|
|
|
|
return result;
|
2009-11-20 10:11:45 +00:00
|
|
|
// Now we know the length is 2, we might as well make use of that fact
|
|
|
|
// when building the new string.
|
2013-01-09 10:30:54 +00:00
|
|
|
} else if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
|
2012-12-19 13:57:51 +00:00
|
|
|
// We can do this.
|
2013-01-09 10:30:54 +00:00
|
|
|
ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1)); // because of this.
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
2012-11-21 10:01:05 +00:00
|
|
|
{ MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2013-01-09 15:47:53 +00:00
|
|
|
uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
|
|
|
|
dest[0] = static_cast<uint8_t>(c1);
|
|
|
|
dest[1] = static_cast<uint8_t>(c2);
|
2009-11-20 10:11:45 +00:00
|
|
|
return result;
|
|
|
|
} else {
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
2011-03-18 20:35:07 +00:00
|
|
|
{ MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2009-11-20 10:11:45 +00:00
|
|
|
uc16* dest = SeqTwoByteString::cast(result)->GetChars();
|
|
|
|
dest[0] = c1;
|
|
|
|
dest[1] = c2;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateConsString(String* first, String* second) {
|
2009-03-17 09:33:06 +00:00
|
|
|
int first_length = first->length();
|
2009-11-10 13:23:05 +00:00
|
|
|
if (first_length == 0) {
|
|
|
|
return second;
|
|
|
|
}
|
2009-06-26 13:09:50 +00:00
|
|
|
|
2009-03-17 09:33:06 +00:00
|
|
|
int second_length = second->length();
|
2009-11-10 13:23:05 +00:00
|
|
|
if (second_length == 0) {
|
|
|
|
return first;
|
|
|
|
}
|
2009-06-26 13:09:50 +00:00
|
|
|
|
2008-10-22 09:09:07 +00:00
|
|
|
int length = first_length + second_length;
|
2009-11-20 10:11:45 +00:00
|
|
|
|
|
|
|
// Optimization for 2-byte strings often used as keys in a decompression
|
2013-02-28 17:03:34 +00:00
|
|
|
// dictionary. Check whether we already have the string in the string
|
2009-11-20 10:11:45 +00:00
|
|
|
// table to prevent creation of many unneccesary strings.
|
|
|
|
if (length == 2) {
|
2012-12-19 13:27:20 +00:00
|
|
|
uint16_t c1 = first->Get(0);
|
|
|
|
uint16_t c2 = second->Get(0);
|
2011-03-18 20:35:07 +00:00
|
|
|
return MakeOrFindTwoCharacterString(this, c1, c2);
|
2009-11-20 10:11:45 +00:00
|
|
|
}
|
|
|
|
|
2013-01-09 10:30:54 +00:00
|
|
|
bool first_is_one_byte = first->IsOneByteRepresentation();
|
|
|
|
bool second_is_one_byte = second->IsOneByteRepresentation();
|
|
|
|
bool is_one_byte = first_is_one_byte && second_is_one_byte;
|
2009-06-26 13:09:50 +00:00
|
|
|
// Make sure that an out of memory exception is thrown if the length
|
2009-10-08 12:36:12 +00:00
|
|
|
// of the new cons string is too large.
|
|
|
|
if (length > String::kMaxLength || length < 0) {
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate()->context()->mark_out_of_memory();
|
2013-01-09 12:29:06 +00:00
|
|
|
return Failure::OutOfMemoryException(0x4);
|
2009-06-26 13:09:50 +00:00
|
|
|
}
|
|
|
|
|
2013-04-26 11:34:44 +00:00
|
|
|
bool is_one_byte_data_in_two_byte_string = false;
|
2013-01-09 10:30:54 +00:00
|
|
|
if (!is_one_byte) {
|
2010-06-17 16:19:28 +00:00
|
|
|
// At least one of the strings uses two-byte representation so we
|
2012-01-16 12:38:59 +00:00
|
|
|
// can't use the fast case code for short ASCII strings below, but
|
|
|
|
// we can try to save memory if all chars actually fit in ASCII.
|
2013-04-26 11:34:44 +00:00
|
|
|
is_one_byte_data_in_two_byte_string =
|
|
|
|
first->HasOnlyOneByteChars() && second->HasOnlyOneByteChars();
|
|
|
|
if (is_one_byte_data_in_two_byte_string) {
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
|
2010-06-17 16:19:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// If the resulting string is small make a flat string.
|
2012-01-17 14:29:17 +00:00
|
|
|
if (length < ConsString::kMinLength) {
|
2011-08-26 13:03:30 +00:00
|
|
|
// Note that neither of the two inputs can be a slice because:
|
2012-01-17 14:29:17 +00:00
|
|
|
STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
|
2009-03-17 09:33:06 +00:00
|
|
|
ASSERT(first->IsFlat());
|
|
|
|
ASSERT(second->IsFlat());
|
2013-01-09 10:30:54 +00:00
|
|
|
if (is_one_byte) {
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
2012-11-21 10:01:05 +00:00
|
|
|
{ MaybeObject* maybe_result = AllocateRawOneByteString(length);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2008-10-22 09:09:07 +00:00
|
|
|
// Copy the characters into the new object.
|
2013-01-09 15:47:53 +00:00
|
|
|
uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
|
2009-06-26 13:09:50 +00:00
|
|
|
// Copy first part.
|
2013-01-09 15:47:53 +00:00
|
|
|
const uint8_t* src;
|
2009-12-02 12:58:10 +00:00
|
|
|
if (first->IsExternalString()) {
|
2011-11-17 17:05:12 +00:00
|
|
|
src = ExternalAsciiString::cast(first)->GetChars();
|
2009-12-02 12:58:10 +00:00
|
|
|
} else {
|
2012-11-15 13:31:27 +00:00
|
|
|
src = SeqOneByteString::cast(first)->GetChars();
|
2009-12-02 12:58:10 +00:00
|
|
|
}
|
2009-06-26 13:09:50 +00:00
|
|
|
for (int i = 0; i < first_length; i++) *dest++ = src[i];
|
|
|
|
// Copy second part.
|
2009-12-02 12:58:10 +00:00
|
|
|
if (second->IsExternalString()) {
|
2011-11-17 17:05:12 +00:00
|
|
|
src = ExternalAsciiString::cast(second)->GetChars();
|
2009-12-02 12:58:10 +00:00
|
|
|
} else {
|
2012-11-15 13:31:27 +00:00
|
|
|
src = SeqOneByteString::cast(second)->GetChars();
|
2009-12-02 12:58:10 +00:00
|
|
|
}
|
2009-06-26 13:09:50 +00:00
|
|
|
for (int i = 0; i < second_length; i++) *dest++ = src[i];
|
2008-10-22 09:09:07 +00:00
|
|
|
return result;
|
|
|
|
} else {
|
2013-04-26 11:34:44 +00:00
|
|
|
if (is_one_byte_data_in_two_byte_string) {
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
2012-11-21 10:01:05 +00:00
|
|
|
{ MaybeObject* maybe_result = AllocateRawOneByteString(length);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2010-03-26 23:33:37 +00:00
|
|
|
// Copy the characters into the new object.
|
2013-01-09 15:47:53 +00:00
|
|
|
uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
|
2010-03-26 23:33:37 +00:00
|
|
|
String::WriteToFlat(first, dest, 0, first_length);
|
|
|
|
String::WriteToFlat(second, dest + first_length, 0, second_length);
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
|
2010-03-26 23:33:37 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
|
|
|
{ MaybeObject* maybe_result = AllocateRawTwoByteString(length);
|
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2008-10-22 09:09:07 +00:00
|
|
|
// Copy the characters into the new object.
|
|
|
|
uc16* dest = SeqTwoByteString::cast(result)->GetChars();
|
2009-03-17 09:33:06 +00:00
|
|
|
String::WriteToFlat(first, dest, 0, first_length);
|
|
|
|
String::WriteToFlat(second, dest + first_length, 0, second_length);
|
2008-10-22 09:09:07 +00:00
|
|
|
return result;
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-26 11:34:44 +00:00
|
|
|
Map* map = (is_one_byte || is_one_byte_data_in_two_byte_string) ?
|
2010-06-17 16:19:28 +00:00
|
|
|
cons_ascii_string_map() : cons_string_map();
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
|
|
|
{ MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
|
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2010-01-29 11:46:55 +00:00
|
|
|
|
2013-06-03 15:32:22 +00:00
|
|
|
DisallowHeapAllocation no_gc;
|
2008-07-03 15:10:15 +00:00
|
|
|
ConsString* cons_string = ConsString::cast(result);
|
2010-01-29 11:46:55 +00:00
|
|
|
WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
|
2009-11-24 14:10:06 +00:00
|
|
|
cons_string->set_length(length);
|
|
|
|
cons_string->set_hash_field(String::kEmptyHashField);
|
2009-11-11 15:25:51 +00:00
|
|
|
cons_string->set_first(first, mode);
|
|
|
|
cons_string->set_second(second, mode);
|
2008-07-03 15:10:15 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateSubString(String* buffer,
|
2011-06-24 13:44:27 +00:00
|
|
|
int start,
|
|
|
|
int end,
|
|
|
|
PretenureFlag pretenure) {
|
2008-07-03 15:10:15 +00:00
|
|
|
int length = end - start;
|
2012-01-16 15:21:38 +00:00
|
|
|
if (length <= 0) {
|
2011-06-24 13:44:27 +00:00
|
|
|
return empty_string();
|
|
|
|
} else if (length == 1) {
|
2011-03-18 20:35:07 +00:00
|
|
|
return LookupSingleCharacterStringFromCode(buffer->Get(start));
|
2009-11-20 10:11:45 +00:00
|
|
|
} else if (length == 2) {
|
|
|
|
// Optimization for 2-byte strings often used as keys in a decompression
|
2013-02-28 17:03:34 +00:00
|
|
|
// dictionary. Check whether we already have the string in the string
|
|
|
|
// table to prevent creation of many unnecessary strings.
|
2012-12-19 13:27:20 +00:00
|
|
|
uint16_t c1 = buffer->Get(start);
|
|
|
|
uint16_t c2 = buffer->Get(start + 1);
|
2011-03-18 20:35:07 +00:00
|
|
|
return MakeOrFindTwoCharacterString(this, c1, c2);
|
2008-10-07 13:04:56 +00:00
|
|
|
}
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// Make an attempt to flatten the buffer to reduce access time.
|
2010-05-20 09:01:39 +00:00
|
|
|
buffer = buffer->TryFlattenGetString();
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-08-26 13:03:30 +00:00
|
|
|
if (!FLAG_string_slices ||
|
2011-09-15 11:10:01 +00:00
|
|
|
!buffer->IsFlat() ||
|
2011-08-26 13:03:30 +00:00
|
|
|
length < SlicedString::kMinLength ||
|
|
|
|
pretenure == TENURED) {
|
|
|
|
Object* result;
|
2011-09-15 11:10:01 +00:00
|
|
|
// WriteToFlat takes care of the case when an indirect string has a
|
|
|
|
// different encoding from its underlying string. These encodings may
|
|
|
|
// differ because of externalization.
|
2013-01-09 10:30:54 +00:00
|
|
|
bool is_one_byte = buffer->IsOneByteRepresentation();
|
|
|
|
{ MaybeObject* maybe_result = is_one_byte
|
2012-11-21 10:01:05 +00:00
|
|
|
? AllocateRawOneByteString(length, pretenure)
|
2011-09-15 11:10:01 +00:00
|
|
|
: AllocateRawTwoByteString(length, pretenure);
|
2011-08-26 13:03:30 +00:00
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
|
|
|
String* string_result = String::cast(result);
|
|
|
|
// Copy the characters into the new object.
|
2013-01-09 10:30:54 +00:00
|
|
|
if (is_one_byte) {
|
2012-11-21 10:01:05 +00:00
|
|
|
ASSERT(string_result->IsOneByteRepresentation());
|
2013-01-09 15:47:53 +00:00
|
|
|
uint8_t* dest = SeqOneByteString::cast(string_result)->GetChars();
|
2011-08-26 13:03:30 +00:00
|
|
|
String::WriteToFlat(buffer, dest, start, end);
|
|
|
|
} else {
|
|
|
|
ASSERT(string_result->IsTwoByteRepresentation());
|
|
|
|
uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
|
|
|
|
String::WriteToFlat(buffer, dest, start, end);
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT(buffer->IsFlat());
|
2012-10-12 11:41:14 +00:00
|
|
|
#if VERIFY_HEAP
|
2011-10-25 13:27:46 +00:00
|
|
|
if (FLAG_verify_heap) {
|
|
|
|
buffer->StringVerify();
|
|
|
|
}
|
2011-08-26 13:03:30 +00:00
|
|
|
#endif
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
2011-09-15 11:10:01 +00:00
|
|
|
// When slicing an indirect string we use its encoding for a newly created
|
|
|
|
// slice and don't check the encoding of the underlying string. This is safe
|
|
|
|
// even if the encodings are different because of externalization. If an
|
|
|
|
// indirect ASCII string is pointing to a two-byte string, the two-byte char
|
|
|
|
// codes of the underlying string must still fit into ASCII (because
|
|
|
|
// externalization must not change char codes).
|
2012-11-21 10:01:05 +00:00
|
|
|
{ Map* map = buffer->IsOneByteRepresentation()
|
2011-08-26 13:03:30 +00:00
|
|
|
? sliced_ascii_string_map()
|
|
|
|
: sliced_string_map();
|
|
|
|
MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2011-08-26 13:03:30 +00:00
|
|
|
|
2013-06-03 15:32:22 +00:00
|
|
|
DisallowHeapAllocation no_gc;
|
2011-08-26 13:03:30 +00:00
|
|
|
SlicedString* sliced_string = SlicedString::cast(result);
|
|
|
|
sliced_string->set_length(length);
|
|
|
|
sliced_string->set_hash_field(String::kEmptyHashField);
|
|
|
|
if (buffer->IsConsString()) {
|
|
|
|
ConsString* cons = ConsString::cast(buffer);
|
|
|
|
ASSERT(cons->second()->length() == 0);
|
|
|
|
sliced_string->set_parent(cons->first());
|
|
|
|
sliced_string->set_offset(start);
|
|
|
|
} else if (buffer->IsSlicedString()) {
|
|
|
|
// Prevent nesting sliced strings.
|
|
|
|
SlicedString* parent_slice = SlicedString::cast(buffer);
|
|
|
|
sliced_string->set_parent(parent_slice->parent());
|
|
|
|
sliced_string->set_offset(start + parent_slice->offset());
|
2009-11-10 13:23:05 +00:00
|
|
|
} else {
|
2011-08-26 13:03:30 +00:00
|
|
|
sliced_string->set_parent(buffer);
|
|
|
|
sliced_string->set_offset(start);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
2011-09-15 11:10:01 +00:00
|
|
|
ASSERT(sliced_string->parent()->IsSeqString() ||
|
|
|
|
sliced_string->parent()->IsExternalString());
|
2008-07-03 15:10:15 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateExternalStringFromAscii(
|
2011-09-21 13:28:09 +00:00
|
|
|
const ExternalAsciiString::Resource* resource) {
|
2009-11-11 09:50:06 +00:00
|
|
|
size_t length = resource->length();
|
2009-11-24 14:10:06 +00:00
|
|
|
if (length > static_cast<size_t>(String::kMaxLength)) {
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate()->context()->mark_out_of_memory();
|
2013-01-09 12:29:06 +00:00
|
|
|
return Failure::OutOfMemoryException(0x5);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
2009-11-24 14:10:06 +00:00
|
|
|
Map* map = external_ascii_string_map();
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
|
|
|
{ MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
|
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
|
2009-11-11 09:50:06 +00:00
|
|
|
external_string->set_length(static_cast<int>(length));
|
2009-11-24 14:10:06 +00:00
|
|
|
external_string->set_hash_field(String::kEmptyHashField);
|
2008-07-03 15:10:15 +00:00
|
|
|
external_string->set_resource(resource);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateExternalStringFromTwoByte(
|
2011-09-21 13:28:09 +00:00
|
|
|
const ExternalTwoByteString::Resource* resource) {
|
2009-11-11 09:50:06 +00:00
|
|
|
size_t length = resource->length();
|
|
|
|
if (length > static_cast<size_t>(String::kMaxLength)) {
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate()->context()->mark_out_of_memory();
|
2013-01-09 12:29:06 +00:00
|
|
|
return Failure::OutOfMemoryException(0x6);
|
2009-11-11 09:50:06 +00:00
|
|
|
}
|
2009-11-24 14:10:06 +00:00
|
|
|
|
2010-06-17 16:19:28 +00:00
|
|
|
// For small strings we check whether the resource contains only
|
2013-01-09 10:30:54 +00:00
|
|
|
// one byte characters. If yes, we use a different string map.
|
2013-04-26 11:34:44 +00:00
|
|
|
static const size_t kOneByteCheckLengthLimit = 32;
|
|
|
|
bool is_one_byte = length <= kOneByteCheckLengthLimit &&
|
2013-01-09 10:30:54 +00:00
|
|
|
String::IsOneByte(resource->data(), static_cast<int>(length));
|
|
|
|
Map* map = is_one_byte ?
|
2013-04-26 11:34:44 +00:00
|
|
|
external_string_with_one_byte_data_map() : external_string_map();
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
|
|
|
{ MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
|
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
|
2009-11-11 09:50:06 +00:00
|
|
|
external_string->set_length(static_cast<int>(length));
|
2009-11-24 14:10:06 +00:00
|
|
|
external_string->set_hash_field(String::kEmptyHashField);
|
2008-07-03 15:10:15 +00:00
|
|
|
external_string->set_resource(resource);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
|
2013-01-09 10:30:54 +00:00
|
|
|
if (code <= String::kMaxOneByteCharCode) {
|
2011-03-18 20:35:07 +00:00
|
|
|
Object* value = single_character_string_cache()->get(code);
|
|
|
|
if (value != undefined_value()) return value;
|
2008-10-21 16:28:44 +00:00
|
|
|
|
2013-01-09 10:30:54 +00:00
|
|
|
uint8_t buffer[1];
|
|
|
|
buffer[0] = static_cast<uint8_t>(code);
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
2012-12-17 15:56:16 +00:00
|
|
|
MaybeObject* maybe_result =
|
2013-02-28 17:03:34 +00:00
|
|
|
InternalizeOneByteString(Vector<const uint8_t>(buffer, 1));
|
2008-10-21 16:28:44 +00:00
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
2011-03-18 20:35:07 +00:00
|
|
|
single_character_string_cache()->set(code, result);
|
2008-07-03 15:10:15 +00:00
|
|
|
return result;
|
|
|
|
}
|
2008-10-21 16:28:44 +00:00
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
2011-03-18 20:35:07 +00:00
|
|
|
{ MaybeObject* maybe_result = AllocateRawTwoByteString(1);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2008-11-03 10:16:05 +00:00
|
|
|
String* answer = String::cast(result);
|
2009-03-17 09:33:06 +00:00
|
|
|
answer->Set(0, code);
|
2008-11-03 10:16:05 +00:00
|
|
|
return answer;
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
|
2010-01-07 13:17:18 +00:00
|
|
|
if (length < 0 || length > ByteArray::kMaxLength) {
|
2013-01-09 12:29:06 +00:00
|
|
|
return Failure::OutOfMemoryException(0x7);
|
2010-01-07 13:17:18 +00:00
|
|
|
}
|
2008-11-25 11:07:48 +00:00
|
|
|
if (pretenure == NOT_TENURED) {
|
|
|
|
return AllocateByteArray(length);
|
|
|
|
}
|
|
|
|
int size = ByteArray::SizeFor(length);
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
2012-02-23 12:11:24 +00:00
|
|
|
{ MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
|
2010-10-25 15:22:03 +00:00
|
|
|
? old_data_space_->AllocateRaw(size)
|
2011-09-19 18:36:47 +00:00
|
|
|
: lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2008-11-25 11:07:48 +00:00
|
|
|
|
2011-12-07 08:43:18 +00:00
|
|
|
reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
|
|
|
|
byte_array_map());
|
2010-05-27 12:30:45 +00:00
|
|
|
reinterpret_cast<ByteArray*>(result)->set_length(length);
|
2008-11-25 11:07:48 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateByteArray(int length) {
|
2010-01-07 13:17:18 +00:00
|
|
|
if (length < 0 || length > ByteArray::kMaxLength) {
|
2013-01-09 12:29:06 +00:00
|
|
|
return Failure::OutOfMemoryException(0x8);
|
2010-01-07 13:17:18 +00:00
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
int size = ByteArray::SizeFor(length);
|
2008-09-05 12:34:09 +00:00
|
|
|
AllocationSpace space =
|
2012-02-23 12:11:24 +00:00
|
|
|
(size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
|
|
|
{ MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
|
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-12-07 08:43:18 +00:00
|
|
|
reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
|
|
|
|
byte_array_map());
|
2010-05-27 12:30:45 +00:00
|
|
|
reinterpret_cast<ByteArray*>(result)->set_length(length);
|
2008-07-03 15:10:15 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-02-11 23:52:52 +00:00
|
|
|
void Heap::CreateFillerObjectAt(Address addr, int size) {
|
|
|
|
if (size == 0) return;
|
|
|
|
HeapObject* filler = HeapObject::FromAddress(addr);
|
|
|
|
if (size == kPointerSize) {
|
2011-12-07 08:43:18 +00:00
|
|
|
filler->set_map_no_write_barrier(one_pointer_filler_map());
|
2010-04-22 18:50:27 +00:00
|
|
|
} else if (size == 2 * kPointerSize) {
|
2011-12-07 08:43:18 +00:00
|
|
|
filler->set_map_no_write_barrier(two_pointer_filler_map());
|
2009-02-11 23:52:52 +00:00
|
|
|
} else {
|
2011-12-07 08:43:18 +00:00
|
|
|
filler->set_map_no_write_barrier(free_space_map());
|
2011-09-19 18:36:47 +00:00
|
|
|
FreeSpace::cast(filler)->set_size(size);
|
2009-02-11 23:52:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateExternalArray(int length,
|
|
|
|
ExternalArrayType array_type,
|
|
|
|
void* external_pointer,
|
|
|
|
PretenureFlag pretenure) {
|
2009-10-20 15:26:17 +00:00
|
|
|
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
|
|
|
{ MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
|
|
|
|
space,
|
|
|
|
OLD_DATA_SPACE);
|
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2009-10-20 15:26:17 +00:00
|
|
|
|
2011-12-07 08:43:18 +00:00
|
|
|
reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
|
2009-10-20 15:26:17 +00:00
|
|
|
MapForExternalArrayType(array_type));
|
|
|
|
reinterpret_cast<ExternalArray*>(result)->set_length(length);
|
|
|
|
reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
|
|
|
|
external_pointer);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::CreateCode(const CodeDesc& desc,
|
|
|
|
Code::Flags flags,
|
2011-03-09 10:38:19 +00:00
|
|
|
Handle<Object> self_reference,
|
2013-04-18 09:50:46 +00:00
|
|
|
bool immovable,
|
|
|
|
bool crankshafted) {
|
2010-07-06 13:48:51 +00:00
|
|
|
// Allocate ByteArray before the Code object, so that we do not risk
|
|
|
|
// leaving uninitialized Code object (and breaking the heap).
|
2011-11-11 13:48:14 +00:00
|
|
|
ByteArray* reloc_info;
|
|
|
|
MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
|
|
|
|
if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
|
2010-07-06 13:48:51 +00:00
|
|
|
|
2011-03-09 10:38:19 +00:00
|
|
|
// Compute size.
|
2010-07-05 11:45:11 +00:00
|
|
|
int body_size = RoundUp(desc.instr_size, kObjectAlignment);
|
2010-07-13 13:06:33 +00:00
|
|
|
int obj_size = Code::SizeFor(body_size);
|
2010-09-24 21:48:44 +00:00
|
|
|
ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* maybe_result;
|
2011-03-09 10:38:19 +00:00
|
|
|
// Large code objects and code objects which should stay at a fixed address
|
|
|
|
// are allocated in large object space.
|
2012-09-14 11:16:56 +00:00
|
|
|
HeapObject* result;
|
|
|
|
bool force_lo_space = obj_size > code_space()->AreaSize();
|
|
|
|
if (force_lo_space) {
|
2011-09-19 18:36:47 +00:00
|
|
|
maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
|
2008-09-05 12:34:09 +00:00
|
|
|
} else {
|
2010-10-25 15:22:03 +00:00
|
|
|
maybe_result = code_space_->AllocateRaw(obj_size);
|
2008-09-05 12:34:09 +00:00
|
|
|
}
|
2012-09-14 11:16:56 +00:00
|
|
|
if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2012-09-14 11:16:56 +00:00
|
|
|
if (immovable && !force_lo_space &&
|
|
|
|
// Objects on the first page of each space are never moved.
|
|
|
|
!code_space_->FirstPage()->Contains(result->address())) {
|
|
|
|
// Discard the first code allocation, which was on a page where it could be
|
|
|
|
// moved.
|
|
|
|
CreateFillerObjectAt(result->address(), obj_size);
|
|
|
|
maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
|
|
|
|
if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
// Initialize the object
|
2012-09-14 11:16:56 +00:00
|
|
|
result->set_map_no_write_barrier(code_map());
|
2008-07-03 15:10:15 +00:00
|
|
|
Code* code = Code::cast(result);
|
2011-03-18 20:35:07 +00:00
|
|
|
ASSERT(!isolate_->code_range()->exists() ||
|
|
|
|
isolate_->code_range()->contains(code->address()));
|
2008-07-03 15:10:15 +00:00
|
|
|
code->set_instruction_size(desc.instr_size);
|
2011-11-11 13:48:14 +00:00
|
|
|
code->set_relocation_info(reloc_info);
|
2008-07-03 15:10:15 +00:00
|
|
|
code->set_flags(flags);
|
2011-01-13 14:16:08 +00:00
|
|
|
if (code->is_call_stub() || code->is_keyed_call_stub()) {
|
|
|
|
code->set_check_type(RECEIVER_MAP_CHECK);
|
|
|
|
}
|
2013-04-18 09:50:46 +00:00
|
|
|
code->set_is_crankshafted(crankshafted);
|
2011-11-15 14:01:02 +00:00
|
|
|
code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
|
2012-11-14 15:59:45 +00:00
|
|
|
code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
|
2011-11-15 14:01:02 +00:00
|
|
|
code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
|
2012-01-25 15:11:59 +00:00
|
|
|
code->set_gc_metadata(Smi::FromInt(0));
|
2012-03-23 13:33:11 +00:00
|
|
|
code->set_ic_age(global_ic_age_);
|
2012-11-29 07:38:00 +00:00
|
|
|
code->set_prologue_offset(kPrologueOffsetNotSet);
|
2013-01-24 11:55:05 +00:00
|
|
|
if (code->kind() == Code::OPTIMIZED_FUNCTION) {
|
|
|
|
code->set_marked_for_deoptimization(false);
|
|
|
|
}
|
2009-02-25 16:52:15 +00:00
|
|
|
// Allow self references to created code object by patching the handle to
|
|
|
|
// point to the newly allocated Code object.
|
|
|
|
if (!self_reference.is_null()) {
|
|
|
|
*(self_reference.location()) = code;
|
2008-11-25 11:07:48 +00:00
|
|
|
}
|
|
|
|
// Migrate generated code.
|
|
|
|
// The generated code can contain Object** values (typically from handles)
|
|
|
|
// that are dereferenced during the copy to point directly to the actual heap
|
|
|
|
// objects. These pointers can include references to the code object itself,
|
|
|
|
// through the self_reference parameter.
|
|
|
|
code->CopyFrom(desc);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2012-10-12 11:41:14 +00:00
|
|
|
#ifdef VERIFY_HEAP
|
2011-10-25 13:27:46 +00:00
|
|
|
if (FLAG_verify_heap) {
|
|
|
|
code->Verify();
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
#endif
|
|
|
|
return code;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::CopyCode(Code* code) {
|
2008-07-03 15:10:15 +00:00
|
|
|
// Allocate an object the same size as the code object.
|
|
|
|
int obj_size = code->Size();
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* maybe_result;
|
2012-02-23 12:11:24 +00:00
|
|
|
if (obj_size > code_space()->AreaSize()) {
|
2011-09-19 18:36:47 +00:00
|
|
|
maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
|
2008-09-05 12:34:09 +00:00
|
|
|
} else {
|
2010-10-25 15:22:03 +00:00
|
|
|
maybe_result = code_space_->AllocateRaw(obj_size);
|
2008-09-05 12:34:09 +00:00
|
|
|
}
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
// Copy code object.
|
|
|
|
Address old_addr = code->address();
|
|
|
|
Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
|
2010-05-27 12:30:45 +00:00
|
|
|
CopyBlock(new_addr, old_addr, obj_size);
|
2008-07-03 15:10:15 +00:00
|
|
|
// Relocate the copy.
|
|
|
|
Code* new_code = Code::cast(result);
|
2011-03-18 20:35:07 +00:00
|
|
|
ASSERT(!isolate_->code_range()->exists() ||
|
|
|
|
isolate_->code_range()->contains(code->address()));
|
2008-07-03 15:10:15 +00:00
|
|
|
new_code->Relocate(new_addr - old_addr);
|
|
|
|
return new_code;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
|
2010-07-06 13:48:51 +00:00
|
|
|
// Allocate ByteArray before the Code object, so that we do not risk
|
|
|
|
// leaving uninitialized Code object (and breaking the heap).
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* reloc_info_array;
|
|
|
|
{ MaybeObject* maybe_reloc_info_array =
|
|
|
|
AllocateByteArray(reloc_info.length(), TENURED);
|
|
|
|
if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
|
|
|
|
return maybe_reloc_info_array;
|
|
|
|
}
|
|
|
|
}
|
2010-07-06 13:48:51 +00:00
|
|
|
|
2010-07-05 11:45:11 +00:00
|
|
|
int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
|
2010-03-15 21:06:51 +00:00
|
|
|
|
2010-07-13 13:06:33 +00:00
|
|
|
int new_obj_size = Code::SizeFor(new_body_size);
|
2010-03-15 21:06:51 +00:00
|
|
|
|
|
|
|
Address old_addr = code->address();
|
|
|
|
|
2010-04-13 11:59:37 +00:00
|
|
|
size_t relocation_offset =
|
2010-07-05 11:45:11 +00:00
|
|
|
static_cast<size_t>(code->instruction_end() - old_addr);
|
2010-03-15 21:06:51 +00:00
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* maybe_result;
|
2012-02-23 12:11:24 +00:00
|
|
|
if (new_obj_size > code_space()->AreaSize()) {
|
2011-09-19 18:36:47 +00:00
|
|
|
maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
|
2010-03-15 21:06:51 +00:00
|
|
|
} else {
|
2010-10-25 15:22:03 +00:00
|
|
|
maybe_result = code_space_->AllocateRaw(new_obj_size);
|
2010-03-15 21:06:51 +00:00
|
|
|
}
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
2010-03-15 21:06:51 +00:00
|
|
|
|
|
|
|
// Copy code object.
|
|
|
|
Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
|
|
|
|
|
|
|
|
// Copy header and instructions.
|
2013-04-02 13:29:26 +00:00
|
|
|
CopyBytes(new_addr, old_addr, relocation_offset);
|
2010-03-15 21:06:51 +00:00
|
|
|
|
|
|
|
Code* new_code = Code::cast(result);
|
2010-07-05 11:45:11 +00:00
|
|
|
new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
|
2010-03-15 21:06:51 +00:00
|
|
|
|
2010-07-05 11:45:11 +00:00
|
|
|
// Copy patched rinfo.
|
2013-03-21 10:28:03 +00:00
|
|
|
CopyBytes(new_code->relocation_start(),
|
|
|
|
reloc_info.start(),
|
2013-04-02 13:29:26 +00:00
|
|
|
static_cast<size_t>(reloc_info.length()));
|
2010-03-15 21:06:51 +00:00
|
|
|
|
|
|
|
// Relocate the copy.
|
2011-03-18 20:35:07 +00:00
|
|
|
ASSERT(!isolate_->code_range()->exists() ||
|
|
|
|
isolate_->code_range()->contains(code->address()));
|
2010-03-15 21:06:51 +00:00
|
|
|
new_code->Relocate(new_addr - old_addr);
|
|
|
|
|
2012-10-12 11:41:14 +00:00
|
|
|
#ifdef VERIFY_HEAP
|
2011-10-25 13:27:46 +00:00
|
|
|
if (FLAG_verify_heap) {
|
|
|
|
code->Verify();
|
|
|
|
}
|
2010-03-15 21:06:51 +00:00
|
|
|
#endif
|
|
|
|
return new_code;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-03-01 16:06:34 +00:00
|
|
|
MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
|
2013-07-08 10:02:16 +00:00
|
|
|
Handle<AllocationSite> allocation_site) {
|
2013-03-01 16:06:34 +00:00
|
|
|
ASSERT(gc_state_ == NOT_IN_GC);
|
|
|
|
ASSERT(map->instance_type() != MAP_TYPE);
|
|
|
|
// If allocation failures are disallowed, we may allocate in a different
|
|
|
|
// space when new space is full and the object is not a large object.
|
|
|
|
AllocationSpace retry_space =
|
|
|
|
(space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
|
2013-07-19 13:30:49 +00:00
|
|
|
int size = map->instance_size() + AllocationMemento::kSize;
|
2013-03-01 16:06:34 +00:00
|
|
|
Object* result;
|
|
|
|
MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
|
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
// No need for write barrier since object is white and map is in old space.
|
|
|
|
HeapObject::cast(result)->set_map_no_write_barrier(map);
|
2013-07-19 13:30:49 +00:00
|
|
|
AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
|
2013-03-01 16:06:34 +00:00
|
|
|
reinterpret_cast<Address>(result) + map->instance_size());
|
2013-07-19 13:30:49 +00:00
|
|
|
alloc_memento->set_map_no_write_barrier(allocation_memento_map());
|
|
|
|
alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
|
2013-03-01 16:06:34 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
|
2008-07-03 15:10:15 +00:00
|
|
|
ASSERT(gc_state_ == NOT_IN_GC);
|
|
|
|
ASSERT(map->instance_type() != MAP_TYPE);
|
2009-12-22 13:34:02 +00:00
|
|
|
// If allocation failures are disallowed, we may allocate in a different
|
|
|
|
// space when new space is full and the object is not a large object.
|
|
|
|
AllocationSpace retry_space =
|
|
|
|
(space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
|
2013-03-01 16:06:34 +00:00
|
|
|
int size = map->instance_size();
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
2013-03-01 16:06:34 +00:00
|
|
|
MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
|
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
2011-11-15 14:01:02 +00:00
|
|
|
// No need for write barrier since object is white and map is in old space.
|
2011-12-07 08:43:18 +00:00
|
|
|
HeapObject::cast(result)->set_map_no_write_barrier(map);
|
2008-07-03 15:10:15 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-10-05 09:41:54 +00:00
|
|
|
void Heap::InitializeFunction(JSFunction* function,
|
|
|
|
SharedFunctionInfo* shared,
|
|
|
|
Object* prototype) {
|
2008-07-03 15:10:15 +00:00
|
|
|
ASSERT(!prototype->IsMap());
|
|
|
|
function->initialize_properties();
|
|
|
|
function->initialize_elements();
|
|
|
|
function->set_shared(shared);
|
2010-08-11 08:12:53 +00:00
|
|
|
function->set_code(shared->code());
|
2008-07-03 15:10:15 +00:00
|
|
|
function->set_prototype_or_initial_map(prototype);
|
|
|
|
function->set_context(undefined_value());
|
2011-10-17 12:44:16 +00:00
|
|
|
function->set_literals_or_bindings(empty_fixed_array());
|
2010-12-07 11:31:57 +00:00
|
|
|
function->set_next_function_link(undefined_value());
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
|
2013-04-11 16:28:19 +00:00
|
|
|
// Make sure to use globals from the function's context, since the function
|
|
|
|
// can be from a different context.
|
|
|
|
Context* native_context = function->context()->native_context();
|
2011-09-22 12:57:54 +00:00
|
|
|
Map* new_map;
|
2013-04-11 16:28:19 +00:00
|
|
|
if (function->shared()->is_generator()) {
|
|
|
|
// Generator prototypes can share maps since they don't have "constructor"
|
|
|
|
// properties.
|
|
|
|
new_map = native_context->generator_object_prototype_map();
|
|
|
|
} else {
|
|
|
|
// Each function prototype gets a fresh map to avoid unwanted sharing of
|
|
|
|
// maps between prototypes of different constructors.
|
|
|
|
JSFunction* object_function = native_context->object_function();
|
|
|
|
ASSERT(object_function->has_initial_map());
|
|
|
|
MaybeObject* maybe_map = object_function->initial_map()->Copy();
|
|
|
|
if (!maybe_map->To(&new_map)) return maybe_map;
|
|
|
|
}
|
2012-07-17 13:50:19 +00:00
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* prototype;
|
2012-07-17 13:50:19 +00:00
|
|
|
MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
|
|
|
|
if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
|
|
|
|
|
2013-05-17 13:54:12 +00:00
|
|
|
if (!function->shared()->is_generator()) {
|
2013-04-11 16:28:19 +00:00
|
|
|
MaybeObject* maybe_failure =
|
|
|
|
JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
|
|
|
|
constructor_string(), function, DONT_ENUM);
|
|
|
|
if (maybe_failure->IsFailure()) return maybe_failure;
|
|
|
|
}
|
2012-07-17 13:50:19 +00:00
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
return prototype;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateFunction(Map* function_map,
|
|
|
|
SharedFunctionInfo* shared,
|
|
|
|
Object* prototype,
|
|
|
|
PretenureFlag pretenure) {
|
2009-12-16 15:43:20 +00:00
|
|
|
AllocationSpace space =
|
|
|
|
(pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
|
|
|
{ MaybeObject* maybe_result = Allocate(function_map, space);
|
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2011-10-05 09:41:54 +00:00
|
|
|
InitializeFunction(JSFunction::cast(result), shared, prototype);
|
|
|
|
return result;
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
|
2008-08-22 13:33:59 +00:00
|
|
|
// To get fast allocation and map sharing for arguments objects we
|
|
|
|
// allocate them based on an arguments boilerplate.
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-03-17 20:28:41 +00:00
|
|
|
JSObject* boilerplate;
|
|
|
|
int arguments_object_size;
|
|
|
|
bool strict_mode_callee = callee->IsJSFunction() &&
|
2011-11-24 15:17:04 +00:00
|
|
|
!JSFunction::cast(callee)->shared()->is_classic_mode();
|
2011-03-17 20:28:41 +00:00
|
|
|
if (strict_mode_callee) {
|
|
|
|
boilerplate =
|
2012-08-17 09:03:08 +00:00
|
|
|
isolate()->context()->native_context()->
|
2011-03-18 20:35:07 +00:00
|
|
|
strict_mode_arguments_boilerplate();
|
2011-03-17 20:28:41 +00:00
|
|
|
arguments_object_size = kArgumentsObjectSizeStrict;
|
|
|
|
} else {
|
2011-03-18 20:35:07 +00:00
|
|
|
boilerplate =
|
2012-08-17 09:03:08 +00:00
|
|
|
isolate()->context()->native_context()->arguments_boilerplate();
|
2011-03-17 20:28:41 +00:00
|
|
|
arguments_object_size = kArgumentsObjectSize;
|
|
|
|
}
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// This calls Copy directly rather than using Heap::AllocateRaw so we
|
|
|
|
// duplicate the check here.
|
2013-06-03 15:32:22 +00:00
|
|
|
ASSERT(AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2009-12-17 15:35:15 +00:00
|
|
|
// Check that the size of the boilerplate matches our
|
|
|
|
// expectations. The ArgumentsAccessStub::GenerateNewObject relies
|
|
|
|
// on the size being a known constant.
|
2011-03-17 20:28:41 +00:00
|
|
|
ASSERT(arguments_object_size == boilerplate->map()->instance_size());
|
2009-12-17 15:35:15 +00:00
|
|
|
|
|
|
|
// Do the allocation.
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
|
|
|
{ MaybeObject* maybe_result =
|
2011-03-17 20:28:41 +00:00
|
|
|
AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2008-10-23 08:46:32 +00:00
|
|
|
|
2008-10-30 09:15:58 +00:00
|
|
|
// Copy the content. The arguments boilerplate doesn't have any
|
|
|
|
// fields that point to new space so it's safe to skip the write
|
|
|
|
// barrier here.
|
2010-05-27 12:30:45 +00:00
|
|
|
CopyBlock(HeapObject::cast(result)->address(),
|
|
|
|
boilerplate->address(),
|
2011-03-17 20:28:41 +00:00
|
|
|
JSObject::kHeaderSize);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-03-17 20:28:41 +00:00
|
|
|
// Set the length property.
|
|
|
|
JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
|
2008-10-23 08:46:32 +00:00
|
|
|
Smi::FromInt(length),
|
|
|
|
SKIP_WRITE_BARRIER);
|
2011-03-17 20:28:41 +00:00
|
|
|
// Set the callee property for non-strict mode arguments object only.
|
|
|
|
if (!strict_mode_callee) {
|
|
|
|
JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
|
|
|
|
callee);
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
// Check the state of the object
|
|
|
|
ASSERT(JSObject::cast(result)->HasFastProperties());
|
2012-05-23 14:24:29 +00:00
|
|
|
ASSERT(JSObject::cast(result)->HasFastObjectElements());
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
|
2008-07-03 15:10:15 +00:00
|
|
|
ASSERT(!fun->has_initial_map());
|
|
|
|
|
2009-08-19 07:30:20 +00:00
|
|
|
// First create a new map with the size and number of in-object properties
|
|
|
|
// suggested by the function.
|
2013-04-11 16:28:19 +00:00
|
|
|
InstanceType instance_type;
|
|
|
|
int instance_size;
|
|
|
|
int in_object_properties;
|
|
|
|
if (fun->shared()->is_generator()) {
|
2013-04-15 12:29:44 +00:00
|
|
|
instance_type = JS_GENERATOR_OBJECT_TYPE;
|
|
|
|
instance_size = JSGeneratorObject::kSize;
|
2013-04-11 16:28:19 +00:00
|
|
|
in_object_properties = 0;
|
|
|
|
} else {
|
|
|
|
instance_type = JS_OBJECT_TYPE;
|
|
|
|
instance_size = fun->shared()->CalculateInstanceSize();
|
|
|
|
in_object_properties = fun->shared()->CalculateInObjectProperties();
|
|
|
|
}
|
2012-07-18 15:38:58 +00:00
|
|
|
Map* map;
|
2013-04-11 16:28:19 +00:00
|
|
|
MaybeObject* maybe_map = AllocateMap(instance_type, instance_size);
|
2012-07-18 15:38:58 +00:00
|
|
|
if (!maybe_map->To(&map)) return maybe_map;
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
// Fetch or allocate prototype.
|
|
|
|
Object* prototype;
|
|
|
|
if (fun->has_instance_prototype()) {
|
|
|
|
prototype = fun->instance_prototype();
|
|
|
|
} else {
|
2012-07-18 15:38:58 +00:00
|
|
|
MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
|
|
|
|
if (!maybe_prototype->To(&prototype)) return maybe_prototype;
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
2009-08-19 07:30:20 +00:00
|
|
|
map->set_inobject_properties(in_object_properties);
|
|
|
|
map->set_unused_property_fields(in_object_properties);
|
2008-07-03 15:10:15 +00:00
|
|
|
map->set_prototype(prototype);
|
2012-05-23 14:24:29 +00:00
|
|
|
ASSERT(map->has_fast_object_elements());
|
2009-08-19 07:30:20 +00:00
|
|
|
|
2013-05-17 13:54:12 +00:00
|
|
|
if (!fun->shared()->is_generator()) {
|
2013-04-11 16:28:19 +00:00
|
|
|
fun->shared()->StartInobjectSlackTracking(map);
|
|
|
|
}
|
2010-09-23 09:15:26 +00:00
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
return map;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Heap::InitializeJSObjectFromMap(JSObject* obj,
|
|
|
|
FixedArray* properties,
|
|
|
|
Map* map) {
|
|
|
|
obj->set_properties(properties);
|
|
|
|
obj->initialize_elements();
|
|
|
|
// TODO(1240798): Initialize the object's body using valid initial values
|
|
|
|
// according to the object's initial map. For example, if the map's
|
|
|
|
// instance type is JS_ARRAY_TYPE, the length field should be initialized
|
2012-01-16 12:38:59 +00:00
|
|
|
// to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
|
|
|
|
// fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
|
2008-07-03 15:10:15 +00:00
|
|
|
// verification code has to cope with (temporarily) invalid objects. See
|
|
|
|
// for example, JSArray::JSArrayVerify).
|
2010-09-23 09:15:26 +00:00
|
|
|
Object* filler;
|
|
|
|
// We cannot always fill with one_pointer_filler_map because objects
|
|
|
|
// created from API functions expect their internal fields to be initialized
|
|
|
|
// with undefined_value.
|
2011-09-20 10:06:23 +00:00
|
|
|
// Pre-allocated fields need to be initialized with undefined_value as well
|
|
|
|
// so that object accesses before the constructor completes (e.g. in the
|
|
|
|
// debugger) will not cause a crash.
|
2010-09-23 09:15:26 +00:00
|
|
|
if (map->constructor()->IsJSFunction() &&
|
|
|
|
JSFunction::cast(map->constructor())->shared()->
|
|
|
|
IsInobjectSlackTrackingInProgress()) {
|
|
|
|
// We might want to shrink the object later.
|
|
|
|
ASSERT(obj->GetInternalFieldCount() == 0);
|
|
|
|
filler = Heap::one_pointer_filler_map();
|
|
|
|
} else {
|
|
|
|
filler = Heap::undefined_value();
|
|
|
|
}
|
2011-09-20 10:06:23 +00:00
|
|
|
obj->InitializeBody(map, Heap::undefined_value(), filler);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
|
2008-07-03 15:10:15 +00:00
|
|
|
// JSFunctions should be allocated using AllocateFunction to be
|
|
|
|
// properly initialized.
|
|
|
|
ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
|
|
|
|
|
2010-06-24 13:56:35 +00:00
|
|
|
// Both types of global objects should be allocated using
|
|
|
|
// AllocateGlobalObject to be properly initialized.
|
2009-07-30 07:33:05 +00:00
|
|
|
ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
|
|
|
|
ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// Allocate the backing storage for the properties.
|
2013-07-15 15:12:16 +00:00
|
|
|
int prop_size = map->InitialPropertiesLength();
|
2009-08-19 07:30:20 +00:00
|
|
|
ASSERT(prop_size >= 0);
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* properties;
|
|
|
|
{ MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
|
|
|
|
if (!maybe_properties->ToObject(&properties)) return maybe_properties;
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
// Allocate the JSObject.
|
2008-09-05 12:34:09 +00:00
|
|
|
AllocationSpace space =
|
|
|
|
(pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
|
2012-02-23 12:11:24 +00:00
|
|
|
if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* obj;
|
2013-03-01 16:06:34 +00:00
|
|
|
MaybeObject* maybe_obj = Allocate(map, space);
|
|
|
|
if (!maybe_obj->To(&obj)) return maybe_obj;
|
|
|
|
|
|
|
|
// Initialize the JSObject.
|
|
|
|
InitializeJSObjectFromMap(JSObject::cast(obj),
|
|
|
|
FixedArray::cast(properties),
|
|
|
|
map);
|
2013-05-15 15:23:53 +00:00
|
|
|
ASSERT(JSObject::cast(obj)->HasFastElements() ||
|
|
|
|
JSObject::cast(obj)->HasExternalArrayElements());
|
2013-03-01 16:06:34 +00:00
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
|
2013-07-08 10:02:16 +00:00
|
|
|
Handle<AllocationSite> allocation_site) {
|
2013-03-01 16:06:34 +00:00
|
|
|
// JSFunctions should be allocated using AllocateFunction to be
|
|
|
|
// properly initialized.
|
|
|
|
ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
|
|
|
|
|
|
|
|
// Both types of global objects should be allocated using
|
|
|
|
// AllocateGlobalObject to be properly initialized.
|
|
|
|
ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
|
|
|
|
ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
|
|
|
|
|
|
|
|
// Allocate the backing storage for the properties.
|
2013-07-15 15:12:16 +00:00
|
|
|
int prop_size = map->InitialPropertiesLength();
|
2013-03-01 16:06:34 +00:00
|
|
|
ASSERT(prop_size >= 0);
|
|
|
|
Object* properties;
|
|
|
|
{ MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
|
|
|
|
if (!maybe_properties->ToObject(&properties)) return maybe_properties;
|
2010-10-25 15:22:03 +00:00
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2013-03-01 16:06:34 +00:00
|
|
|
// Allocate the JSObject.
|
|
|
|
AllocationSpace space = NEW_SPACE;
|
|
|
|
if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
|
|
|
|
Object* obj;
|
2013-07-08 10:02:16 +00:00
|
|
|
MaybeObject* maybe_obj =
|
|
|
|
AllocateWithAllocationSite(map, space, allocation_site);
|
2013-03-01 16:06:34 +00:00
|
|
|
if (!maybe_obj->To(&obj)) return maybe_obj;
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// Initialize the JSObject.
|
|
|
|
InitializeJSObjectFromMap(JSObject::cast(obj),
|
|
|
|
FixedArray::cast(properties),
|
|
|
|
map);
|
2012-11-15 12:19:14 +00:00
|
|
|
ASSERT(JSObject::cast(obj)->HasFastElements());
|
2008-07-03 15:10:15 +00:00
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
|
|
|
|
PretenureFlag pretenure) {
|
2008-07-03 15:10:15 +00:00
|
|
|
// Allocate the initial map if absent.
|
|
|
|
if (!constructor->has_initial_map()) {
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* initial_map;
|
|
|
|
{ MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
|
|
|
|
if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
constructor->set_initial_map(Map::cast(initial_map));
|
|
|
|
Map::cast(initial_map)->set_constructor(constructor);
|
|
|
|
}
|
|
|
|
// Allocate the object based on the constructors initial map.
|
2012-01-26 21:47:57 +00:00
|
|
|
MaybeObject* result = AllocateJSObjectFromMap(
|
|
|
|
constructor->initial_map(), pretenure);
|
2010-10-25 15:22:03 +00:00
|
|
|
#ifdef DEBUG
|
2009-07-01 11:44:37 +00:00
|
|
|
// Make sure result is NOT a global object if valid.
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* non_failure;
|
|
|
|
ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
|
|
|
|
#endif
|
2009-06-30 10:05:36 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-03-01 16:06:34 +00:00
|
|
|
MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
|
2013-07-08 10:02:16 +00:00
|
|
|
Handle<AllocationSite> allocation_site) {
|
2013-03-01 16:06:34 +00:00
|
|
|
// Allocate the initial map if absent.
|
|
|
|
if (!constructor->has_initial_map()) {
|
|
|
|
Object* initial_map;
|
|
|
|
{ MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
|
|
|
|
if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
|
|
|
|
}
|
|
|
|
constructor->set_initial_map(Map::cast(initial_map));
|
|
|
|
Map::cast(initial_map)->set_constructor(constructor);
|
|
|
|
}
|
|
|
|
// Allocate the object based on the constructors initial map, or the payload
|
|
|
|
// advice
|
|
|
|
Map* initial_map = constructor->initial_map();
|
|
|
|
|
2013-07-08 15:00:12 +00:00
|
|
|
Smi* smi = Smi::cast(allocation_site->transition_info());
|
2013-03-01 16:06:34 +00:00
|
|
|
ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
|
|
|
|
AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
|
|
|
|
if (to_kind != initial_map->elements_kind()) {
|
2013-05-07 21:01:53 +00:00
|
|
|
MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
|
2013-03-01 16:06:34 +00:00
|
|
|
if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
|
|
|
|
// Possibly alter the mode, since we found an updated elements kind
|
|
|
|
// in the type info cell.
|
2013-07-08 10:02:16 +00:00
|
|
|
mode = AllocationSite::GetMode(to_kind);
|
2013-03-01 16:06:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
MaybeObject* result;
|
|
|
|
if (mode == TRACK_ALLOCATION_SITE) {
|
|
|
|
result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
|
2013-07-08 10:02:16 +00:00
|
|
|
allocation_site);
|
2013-03-01 16:06:34 +00:00
|
|
|
} else {
|
|
|
|
result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
|
|
|
|
}
|
|
|
|
#ifdef DEBUG
|
|
|
|
// Make sure result is NOT a global object if valid.
|
|
|
|
Object* non_failure;
|
|
|
|
ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
|
|
|
|
#endif
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-04-15 12:29:44 +00:00
|
|
|
MaybeObject* Heap::AllocateJSGeneratorObject(JSFunction *function) {
|
|
|
|
ASSERT(function->shared()->is_generator());
|
|
|
|
Map *map;
|
|
|
|
if (function->has_initial_map()) {
|
|
|
|
map = function->initial_map();
|
|
|
|
} else {
|
|
|
|
// Allocate the initial map if absent.
|
|
|
|
MaybeObject* maybe_map = AllocateInitialMap(function);
|
|
|
|
if (!maybe_map->To(&map)) return maybe_map;
|
|
|
|
function->set_initial_map(map);
|
2013-04-17 15:01:25 +00:00
|
|
|
map->set_constructor(function);
|
2013-04-15 12:29:44 +00:00
|
|
|
}
|
|
|
|
ASSERT(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
|
|
|
|
return AllocateJSObjectFromMap(map);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-07-09 08:59:03 +00:00
|
|
|
MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
|
2012-04-16 14:43:27 +00:00
|
|
|
// Allocate a fresh map. Modules do not have a prototype.
|
|
|
|
Map* map;
|
|
|
|
MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
|
|
|
|
if (!maybe_map->To(&map)) return maybe_map;
|
|
|
|
// Allocate the object based on the map.
|
2012-07-09 08:59:03 +00:00
|
|
|
JSModule* module;
|
|
|
|
MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
|
|
|
|
if (!maybe_module->To(&module)) return maybe_module;
|
|
|
|
module->set_context(context);
|
|
|
|
module->set_scope_info(scope_info);
|
|
|
|
return module;
|
2012-04-16 14:43:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-01-26 21:47:57 +00:00
|
|
|
MaybeObject* Heap::AllocateJSArrayAndStorage(
|
|
|
|
ElementsKind elements_kind,
|
|
|
|
int length,
|
|
|
|
int capacity,
|
|
|
|
ArrayStorageAllocationMode mode,
|
|
|
|
PretenureFlag pretenure) {
|
|
|
|
MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
|
|
|
|
JSArray* array;
|
|
|
|
if (!maybe_array->To(&array)) return maybe_array;
|
|
|
|
|
2013-03-01 16:06:34 +00:00
|
|
|
// TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
|
|
|
|
// for performance reasons.
|
|
|
|
ASSERT(capacity >= length);
|
|
|
|
|
2012-01-26 21:47:57 +00:00
|
|
|
if (capacity == 0) {
|
|
|
|
array->set_length(Smi::FromInt(0));
|
|
|
|
array->set_elements(empty_fixed_array());
|
|
|
|
return array;
|
|
|
|
}
|
|
|
|
|
|
|
|
FixedArrayBase* elms;
|
|
|
|
MaybeObject* maybe_elms = NULL;
|
2012-11-15 12:19:14 +00:00
|
|
|
if (IsFastDoubleElementsKind(elements_kind)) {
|
2012-01-26 21:47:57 +00:00
|
|
|
if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
|
|
|
|
maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
|
|
|
|
} else {
|
|
|
|
ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
|
|
|
|
maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
|
|
|
|
}
|
|
|
|
} else {
|
2012-05-23 14:24:29 +00:00
|
|
|
ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
|
2012-01-26 21:47:57 +00:00
|
|
|
if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
|
|
|
|
maybe_elms = AllocateUninitializedFixedArray(capacity);
|
|
|
|
} else {
|
|
|
|
ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
|
|
|
|
maybe_elms = AllocateFixedArrayWithHoles(capacity);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!maybe_elms->To(&elms)) return maybe_elms;
|
|
|
|
|
|
|
|
array->set_elements(elms);
|
|
|
|
array->set_length(Smi::FromInt(length));
|
|
|
|
return array;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-03-01 16:06:34 +00:00
|
|
|
MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite(
|
|
|
|
ElementsKind elements_kind,
|
|
|
|
int length,
|
|
|
|
int capacity,
|
2013-07-08 10:02:16 +00:00
|
|
|
Handle<AllocationSite> allocation_site,
|
2013-03-01 16:06:34 +00:00
|
|
|
ArrayStorageAllocationMode mode) {
|
|
|
|
MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind,
|
2013-07-08 10:02:16 +00:00
|
|
|
allocation_site);
|
2013-03-01 16:06:34 +00:00
|
|
|
JSArray* array;
|
|
|
|
if (!maybe_array->To(&array)) return maybe_array;
|
|
|
|
return AllocateJSArrayStorage(array, length, capacity, mode);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
MaybeObject* Heap::AllocateJSArrayStorage(
|
|
|
|
JSArray* array,
|
|
|
|
int length,
|
|
|
|
int capacity,
|
|
|
|
ArrayStorageAllocationMode mode) {
|
|
|
|
ASSERT(capacity >= length);
|
|
|
|
|
|
|
|
if (capacity == 0) {
|
|
|
|
array->set_length(Smi::FromInt(0));
|
|
|
|
array->set_elements(empty_fixed_array());
|
|
|
|
return array;
|
|
|
|
}
|
|
|
|
|
|
|
|
FixedArrayBase* elms;
|
|
|
|
MaybeObject* maybe_elms = NULL;
|
|
|
|
ElementsKind elements_kind = array->GetElementsKind();
|
|
|
|
if (IsFastDoubleElementsKind(elements_kind)) {
|
|
|
|
if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
|
|
|
|
maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
|
|
|
|
} else {
|
|
|
|
ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
|
|
|
|
maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
|
|
|
|
if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
|
|
|
|
maybe_elms = AllocateUninitializedFixedArray(capacity);
|
|
|
|
} else {
|
|
|
|
ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
|
|
|
|
maybe_elms = AllocateFixedArrayWithHoles(capacity);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!maybe_elms->To(&elms)) return maybe_elms;
|
|
|
|
|
|
|
|
array->set_elements(elms);
|
|
|
|
array->set_length(Smi::FromInt(length));
|
|
|
|
return array;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-01-26 21:47:57 +00:00
|
|
|
MaybeObject* Heap::AllocateJSArrayWithElements(
|
|
|
|
FixedArrayBase* elements,
|
|
|
|
ElementsKind elements_kind,
|
2012-11-15 12:19:14 +00:00
|
|
|
int length,
|
2012-01-26 21:47:57 +00:00
|
|
|
PretenureFlag pretenure) {
|
|
|
|
MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
|
|
|
|
JSArray* array;
|
|
|
|
if (!maybe_array->To(&array)) return maybe_array;
|
|
|
|
|
|
|
|
array->set_elements(elements);
|
2012-11-15 12:19:14 +00:00
|
|
|
array->set_length(Smi::FromInt(length));
|
2012-05-23 14:24:29 +00:00
|
|
|
array->ValidateElements();
|
2012-01-26 21:47:57 +00:00
|
|
|
return array;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-05-13 10:58:25 +00:00
|
|
|
MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
|
|
|
|
// Allocate map.
|
|
|
|
// TODO(rossberg): Once we optimize proxies, think about a scheme to share
|
|
|
|
// maps. Will probably depend on the identity of the handler object, too.
|
2011-05-23 09:11:24 +00:00
|
|
|
Map* map;
|
2011-05-13 10:58:25 +00:00
|
|
|
MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
|
2011-05-23 09:11:24 +00:00
|
|
|
if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
|
2011-05-13 10:58:25 +00:00
|
|
|
map->set_prototype(prototype);
|
|
|
|
|
|
|
|
// Allocate the proxy object.
|
2011-09-13 11:42:57 +00:00
|
|
|
JSProxy* result;
|
2011-05-13 10:58:25 +00:00
|
|
|
MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
|
2011-09-13 11:42:57 +00:00
|
|
|
if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
|
|
|
|
result->InitializeBody(map->instance_size(), Smi::FromInt(0));
|
|
|
|
result->set_handler(handler);
|
2011-11-15 14:01:02 +00:00
|
|
|
result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
|
2011-09-13 11:42:57 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
|
|
|
|
Object* call_trap,
|
|
|
|
Object* construct_trap,
|
|
|
|
Object* prototype) {
|
|
|
|
// Allocate map.
|
|
|
|
// TODO(rossberg): Once we optimize proxies, think about a scheme to share
|
|
|
|
// maps. Will probably depend on the identity of the handler object, too.
|
|
|
|
Map* map;
|
|
|
|
MaybeObject* maybe_map_obj =
|
|
|
|
AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
|
|
|
|
if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
|
|
|
|
map->set_prototype(prototype);
|
|
|
|
|
|
|
|
// Allocate the proxy object.
|
|
|
|
JSFunctionProxy* result;
|
|
|
|
MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
|
|
|
|
if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
|
|
|
|
result->InitializeBody(map->instance_size(), Smi::FromInt(0));
|
|
|
|
result->set_handler(handler);
|
2011-11-15 14:01:02 +00:00
|
|
|
result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
|
2011-09-13 11:42:57 +00:00
|
|
|
result->set_call_trap(call_trap);
|
|
|
|
result->set_construct_trap(construct_trap);
|
2011-05-13 10:58:25 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
|
2009-06-30 10:05:36 +00:00
|
|
|
ASSERT(constructor->has_initial_map());
|
2009-07-30 07:33:05 +00:00
|
|
|
Map* map = constructor->initial_map();
|
2012-08-06 14:25:19 +00:00
|
|
|
ASSERT(map->is_dictionary_map());
|
2009-07-30 07:33:05 +00:00
|
|
|
|
2009-06-30 10:05:36 +00:00
|
|
|
// Make sure no field properties are described in the initial map.
|
|
|
|
// This guarantees us that normalizing the properties does not
|
2013-06-14 16:06:12 +00:00
|
|
|
// require us to change property values to PropertyCells.
|
2009-07-30 07:33:05 +00:00
|
|
|
ASSERT(map->NextFreePropertyIndex() == 0);
|
2009-06-30 10:05:36 +00:00
|
|
|
|
2009-07-01 11:44:37 +00:00
|
|
|
// Make sure we don't have a ton of pre-allocated slots in the
|
|
|
|
// global objects. They will be unused once we normalize the object.
|
2009-07-30 07:33:05 +00:00
|
|
|
ASSERT(map->unused_property_fields() == 0);
|
|
|
|
ASSERT(map->inobject_properties() == 0);
|
|
|
|
|
|
|
|
// Initial size of the backing store to avoid resize of the storage during
|
|
|
|
// bootstrapping. The size differs between the JS global object ad the
|
|
|
|
// builtins object.
|
|
|
|
int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
|
|
|
|
|
|
|
|
// Allocate a dictionary object for backing storage.
|
2013-03-04 15:00:57 +00:00
|
|
|
NameDictionary* dictionary;
|
2012-07-26 13:48:34 +00:00
|
|
|
MaybeObject* maybe_dictionary =
|
2013-03-04 15:00:57 +00:00
|
|
|
NameDictionary::Allocate(
|
2013-03-12 07:06:36 +00:00
|
|
|
this,
|
2012-09-19 10:06:02 +00:00
|
|
|
map->NumberOfOwnDescriptors() * 2 + initial_size);
|
2012-07-26 13:48:34 +00:00
|
|
|
if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
|
2009-07-30 07:33:05 +00:00
|
|
|
|
|
|
|
// The global object might be created from an object template with accessors.
|
|
|
|
// Fill these accessors into the dictionary.
|
|
|
|
DescriptorArray* descs = map->instance_descriptors();
|
2013-05-07 13:09:23 +00:00
|
|
|
for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
|
2012-04-17 07:16:19 +00:00
|
|
|
PropertyDetails details = descs->GetDetails(i);
|
2009-07-30 07:33:05 +00:00
|
|
|
ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
|
2013-05-07 13:09:23 +00:00
|
|
|
PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1);
|
2009-07-30 07:33:05 +00:00
|
|
|
Object* value = descs->GetCallbacksObject(i);
|
2013-06-14 16:06:12 +00:00
|
|
|
MaybeObject* maybe_value = AllocatePropertyCell(value);
|
2012-07-26 13:48:34 +00:00
|
|
|
if (!maybe_value->ToObject(&value)) return maybe_value;
|
2009-07-30 07:33:05 +00:00
|
|
|
|
2012-07-26 13:48:34 +00:00
|
|
|
MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
|
|
|
|
if (!maybe_added->To(&dictionary)) return maybe_added;
|
2009-07-30 07:33:05 +00:00
|
|
|
}
|
2009-07-01 11:44:37 +00:00
|
|
|
|
2009-07-30 07:33:05 +00:00
|
|
|
// Allocate the global object and initialize it with the backing store.
|
2012-07-26 13:48:34 +00:00
|
|
|
JSObject* global;
|
|
|
|
MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
|
|
|
|
if (!maybe_global->To(&global)) return maybe_global;
|
|
|
|
|
2009-07-30 07:33:05 +00:00
|
|
|
InitializeJSObjectFromMap(global, dictionary, map);
|
2009-06-30 10:05:36 +00:00
|
|
|
|
2009-07-30 07:33:05 +00:00
|
|
|
// Create a new map for the global object.
|
2012-07-11 14:29:16 +00:00
|
|
|
Map* new_map;
|
2012-07-26 13:48:34 +00:00
|
|
|
MaybeObject* maybe_map = map->CopyDropDescriptors();
|
|
|
|
if (!maybe_map->To(&new_map)) return maybe_map;
|
2012-08-06 14:25:19 +00:00
|
|
|
new_map->set_dictionary_map(true);
|
2012-07-26 13:48:34 +00:00
|
|
|
|
2012-01-13 13:09:52 +00:00
|
|
|
// Set up the global object as a normalized object.
|
2009-07-30 07:33:05 +00:00
|
|
|
global->set_map(new_map);
|
|
|
|
global->set_properties(dictionary);
|
2009-06-30 10:05:36 +00:00
|
|
|
|
2009-07-01 11:44:37 +00:00
|
|
|
// Make sure result is a global object with properties in dictionary.
|
|
|
|
ASSERT(global->IsGlobalObject());
|
2009-06-30 10:05:36 +00:00
|
|
|
ASSERT(!global->HasFastProperties());
|
|
|
|
return global;
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-03-01 16:06:34 +00:00
|
|
|
MaybeObject* Heap::CopyJSObject(JSObject* source) {
|
2008-10-22 08:21:18 +00:00
|
|
|
// Never used to copy functions. If functions need to be copied we
|
|
|
|
// have to be careful to clear the literals array.
|
2011-10-13 11:50:00 +00:00
|
|
|
SLOW_ASSERT(!source->IsJSFunction());
|
2008-10-22 08:21:18 +00:00
|
|
|
|
|
|
|
// Make the clone.
|
|
|
|
Map* map = source->map();
|
|
|
|
int object_size = map->instance_size();
|
2008-10-30 09:15:58 +00:00
|
|
|
Object* clone;
|
|
|
|
|
2013-03-01 16:06:34 +00:00
|
|
|
WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
|
2013-01-17 08:41:27 +00:00
|
|
|
|
2013-03-01 16:06:34 +00:00
|
|
|
// If we're forced to always allocate, we use the general allocation
|
|
|
|
// functions which may leave us with an object in old space.
|
|
|
|
if (always_allocate()) {
|
|
|
|
{ MaybeObject* maybe_clone =
|
|
|
|
AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
|
|
|
|
if (!maybe_clone->ToObject(&clone)) return maybe_clone;
|
|
|
|
}
|
|
|
|
Address clone_address = HeapObject::cast(clone)->address();
|
|
|
|
CopyBlock(clone_address,
|
|
|
|
source->address(),
|
|
|
|
object_size);
|
|
|
|
// Update write barrier for all fields that lie beyond the header.
|
|
|
|
RecordWrites(clone_address,
|
|
|
|
JSObject::kHeaderSize,
|
|
|
|
(object_size - JSObject::kHeaderSize) / kPointerSize);
|
|
|
|
} else {
|
|
|
|
wb_mode = SKIP_WRITE_BARRIER;
|
|
|
|
|
|
|
|
{ MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
|
|
|
|
if (!maybe_clone->ToObject(&clone)) return maybe_clone;
|
|
|
|
}
|
|
|
|
SLOW_ASSERT(InNewSpace(clone));
|
|
|
|
// Since we know the clone is allocated in new space, we can copy
|
|
|
|
// the contents without worrying about updating the write barrier.
|
|
|
|
CopyBlock(HeapObject::cast(clone)->address(),
|
|
|
|
source->address(),
|
|
|
|
object_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
SLOW_ASSERT(
|
|
|
|
JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
|
|
|
|
FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
|
|
|
|
FixedArray* properties = FixedArray::cast(source->properties());
|
|
|
|
// Update elements if necessary.
|
|
|
|
if (elements->length() > 0) {
|
|
|
|
Object* elem;
|
|
|
|
{ MaybeObject* maybe_elem;
|
|
|
|
if (elements->map() == fixed_cow_array_map()) {
|
|
|
|
maybe_elem = FixedArray::cast(elements);
|
|
|
|
} else if (source->HasFastDoubleElements()) {
|
|
|
|
maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
|
|
|
|
} else {
|
|
|
|
maybe_elem = CopyFixedArray(FixedArray::cast(elements));
|
|
|
|
}
|
|
|
|
if (!maybe_elem->ToObject(&elem)) return maybe_elem;
|
|
|
|
}
|
|
|
|
JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
|
|
|
|
}
|
|
|
|
// Update properties if necessary.
|
|
|
|
if (properties->length() > 0) {
|
|
|
|
Object* prop;
|
|
|
|
{ MaybeObject* maybe_prop = CopyFixedArray(properties);
|
|
|
|
if (!maybe_prop->ToObject(&prop)) return maybe_prop;
|
|
|
|
}
|
|
|
|
JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
|
|
|
|
}
|
|
|
|
// Return the new clone.
|
|
|
|
return clone;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-07-08 10:02:16 +00:00
|
|
|
MaybeObject* Heap::CopyJSObjectWithAllocationSite(
|
|
|
|
JSObject* source,
|
|
|
|
AllocationSite* site) {
|
2013-03-01 16:06:34 +00:00
|
|
|
// Never used to copy functions. If functions need to be copied we
|
|
|
|
// have to be careful to clear the literals array.
|
|
|
|
SLOW_ASSERT(!source->IsJSFunction());
|
|
|
|
|
|
|
|
// Make the clone.
|
|
|
|
Map* map = source->map();
|
|
|
|
int object_size = map->instance_size();
|
|
|
|
Object* clone;
|
|
|
|
|
|
|
|
ASSERT(map->CanTrackAllocationSite());
|
|
|
|
ASSERT(map->instance_type() == JS_ARRAY_TYPE);
|
2011-10-14 09:20:19 +00:00
|
|
|
WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
|
|
|
|
|
2008-10-30 09:15:58 +00:00
|
|
|
// If we're forced to always allocate, we use the general allocation
|
|
|
|
// functions which may leave us with an object in old space.
|
2013-01-17 08:41:27 +00:00
|
|
|
int adjusted_object_size = object_size;
|
2008-10-30 09:15:58 +00:00
|
|
|
if (always_allocate()) {
|
2013-01-17 08:41:27 +00:00
|
|
|
// We'll only track origin if we are certain to allocate in new space
|
2013-03-01 16:06:34 +00:00
|
|
|
const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
|
2013-07-19 13:30:49 +00:00
|
|
|
if ((object_size + AllocationMemento::kSize) < kMinFreeNewSpaceAfterGC) {
|
|
|
|
adjusted_object_size += AllocationMemento::kSize;
|
2013-01-17 08:41:27 +00:00
|
|
|
}
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
{ MaybeObject* maybe_clone =
|
2013-01-17 08:41:27 +00:00
|
|
|
AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_clone->ToObject(&clone)) return maybe_clone;
|
|
|
|
}
|
2008-10-30 09:15:58 +00:00
|
|
|
Address clone_address = HeapObject::cast(clone)->address();
|
2010-05-27 12:30:45 +00:00
|
|
|
CopyBlock(clone_address,
|
|
|
|
source->address(),
|
2008-10-30 09:15:58 +00:00
|
|
|
object_size);
|
|
|
|
// Update write barrier for all fields that lie beyond the header.
|
2013-03-01 16:06:34 +00:00
|
|
|
int write_barrier_offset = adjusted_object_size > object_size
|
2013-07-19 13:30:49 +00:00
|
|
|
? JSArray::kSize + AllocationMemento::kSize
|
2013-03-01 16:06:34 +00:00
|
|
|
: JSObject::kHeaderSize;
|
|
|
|
if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
|
|
|
|
RecordWrites(clone_address,
|
|
|
|
write_barrier_offset,
|
|
|
|
(object_size - write_barrier_offset) / kPointerSize);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Track allocation site information, if we failed to allocate it inline.
|
|
|
|
if (InNewSpace(clone) &&
|
|
|
|
adjusted_object_size == object_size) {
|
2013-07-19 13:30:49 +00:00
|
|
|
MaybeObject* maybe_alloc_memento =
|
|
|
|
AllocateStruct(ALLOCATION_MEMENTO_TYPE);
|
|
|
|
AllocationMemento* alloc_memento;
|
|
|
|
if (maybe_alloc_memento->To(&alloc_memento)) {
|
|
|
|
alloc_memento->set_map_no_write_barrier(allocation_memento_map());
|
|
|
|
alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
|
2013-03-01 16:06:34 +00:00
|
|
|
}
|
|
|
|
}
|
2008-10-30 09:15:58 +00:00
|
|
|
} else {
|
2011-10-14 09:20:19 +00:00
|
|
|
wb_mode = SKIP_WRITE_BARRIER;
|
2013-07-19 13:30:49 +00:00
|
|
|
adjusted_object_size += AllocationMemento::kSize;
|
2013-01-17 08:41:27 +00:00
|
|
|
|
|
|
|
{ MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_clone->ToObject(&clone)) return maybe_clone;
|
|
|
|
}
|
2011-10-13 11:50:00 +00:00
|
|
|
SLOW_ASSERT(InNewSpace(clone));
|
2008-10-30 09:15:58 +00:00
|
|
|
// Since we know the clone is allocated in new space, we can copy
|
2009-01-15 19:08:34 +00:00
|
|
|
// the contents without worrying about updating the write barrier.
|
2010-05-27 12:30:45 +00:00
|
|
|
CopyBlock(HeapObject::cast(clone)->address(),
|
|
|
|
source->address(),
|
2008-10-30 09:15:58 +00:00
|
|
|
object_size);
|
|
|
|
}
|
2008-10-22 08:21:18 +00:00
|
|
|
|
2013-01-17 08:41:27 +00:00
|
|
|
if (adjusted_object_size > object_size) {
|
2013-07-19 13:30:49 +00:00
|
|
|
AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
|
2013-01-17 08:41:27 +00:00
|
|
|
reinterpret_cast<Address>(clone) + object_size);
|
2013-07-19 13:30:49 +00:00
|
|
|
alloc_memento->set_map_no_write_barrier(allocation_memento_map());
|
|
|
|
alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
|
2013-01-17 08:41:27 +00:00
|
|
|
}
|
|
|
|
|
2011-10-13 11:50:00 +00:00
|
|
|
SLOW_ASSERT(
|
|
|
|
JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
|
2011-07-27 15:08:50 +00:00
|
|
|
FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
|
2008-10-22 08:21:18 +00:00
|
|
|
FixedArray* properties = FixedArray::cast(source->properties());
|
|
|
|
// Update elements if necessary.
|
2010-03-25 15:32:58 +00:00
|
|
|
if (elements->length() > 0) {
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* elem;
|
2011-07-27 15:08:50 +00:00
|
|
|
{ MaybeObject* maybe_elem;
|
|
|
|
if (elements->map() == fixed_cow_array_map()) {
|
|
|
|
maybe_elem = FixedArray::cast(elements);
|
|
|
|
} else if (source->HasFastDoubleElements()) {
|
|
|
|
maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
|
|
|
|
} else {
|
|
|
|
maybe_elem = CopyFixedArray(FixedArray::cast(elements));
|
|
|
|
}
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_elem->ToObject(&elem)) return maybe_elem;
|
|
|
|
}
|
2011-10-14 09:20:19 +00:00
|
|
|
JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
|
2008-10-22 08:21:18 +00:00
|
|
|
}
|
|
|
|
// Update properties if necessary.
|
|
|
|
if (properties->length() > 0) {
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* prop;
|
|
|
|
{ MaybeObject* maybe_prop = CopyFixedArray(properties);
|
|
|
|
if (!maybe_prop->ToObject(&prop)) return maybe_prop;
|
|
|
|
}
|
2011-10-14 09:20:19 +00:00
|
|
|
JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
|
2008-10-22 08:21:18 +00:00
|
|
|
}
|
|
|
|
// Return the new clone.
|
|
|
|
return clone;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-13 11:42:57 +00:00
|
|
|
MaybeObject* Heap::ReinitializeJSReceiver(
|
|
|
|
JSReceiver* object, InstanceType type, int size) {
|
2011-09-22 13:54:53 +00:00
|
|
|
ASSERT(type >= FIRST_JS_OBJECT_TYPE);
|
|
|
|
|
2011-07-18 13:04:52 +00:00
|
|
|
// Allocate fresh map.
|
|
|
|
// TODO(rossberg): Once we optimize proxies, cache these maps.
|
|
|
|
Map* map;
|
2011-09-22 13:54:53 +00:00
|
|
|
MaybeObject* maybe = AllocateMap(type, size);
|
|
|
|
if (!maybe->To<Map>(&map)) return maybe;
|
2011-07-18 13:04:52 +00:00
|
|
|
|
2011-09-13 11:42:57 +00:00
|
|
|
// Check that the receiver has at least the size of the fresh object.
|
|
|
|
int size_difference = object->map()->instance_size() - map->instance_size();
|
|
|
|
ASSERT(size_difference >= 0);
|
2011-07-18 13:04:52 +00:00
|
|
|
|
|
|
|
map->set_prototype(object->map()->prototype());
|
|
|
|
|
|
|
|
// Allocate the backing storage for the properties.
|
|
|
|
int prop_size = map->unused_property_fields() - map->inobject_properties();
|
|
|
|
Object* properties;
|
2011-10-05 09:41:54 +00:00
|
|
|
maybe = AllocateFixedArray(prop_size, TENURED);
|
|
|
|
if (!maybe->ToObject(&properties)) return maybe;
|
|
|
|
|
|
|
|
// Functions require some allocation, which might fail here.
|
|
|
|
SharedFunctionInfo* shared = NULL;
|
|
|
|
if (type == JS_FUNCTION_TYPE) {
|
|
|
|
String* name;
|
2013-02-28 17:03:34 +00:00
|
|
|
maybe =
|
|
|
|
InternalizeOneByteString(STATIC_ASCII_VECTOR("<freezing call trap>"));
|
2011-10-05 09:41:54 +00:00
|
|
|
if (!maybe->To<String>(&name)) return maybe;
|
|
|
|
maybe = AllocateSharedFunctionInfo(name);
|
|
|
|
if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
|
2011-07-18 13:04:52 +00:00
|
|
|
}
|
|
|
|
|
2011-10-05 09:41:54 +00:00
|
|
|
// Because of possible retries of this function after failure,
|
|
|
|
// we must NOT fail after this point, where we have changed the type!
|
|
|
|
|
2011-07-18 13:04:52 +00:00
|
|
|
// Reset the map for the object.
|
2011-11-16 10:40:50 +00:00
|
|
|
object->set_map(map);
|
2011-09-22 13:54:53 +00:00
|
|
|
JSObject* jsobj = JSObject::cast(object);
|
2011-07-18 13:04:52 +00:00
|
|
|
|
|
|
|
// Reinitialize the object from the constructor map.
|
2011-09-22 13:54:53 +00:00
|
|
|
InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
|
2011-09-13 11:42:57 +00:00
|
|
|
|
|
|
|
// Functions require some minimal initialization.
|
|
|
|
if (type == JS_FUNCTION_TYPE) {
|
2011-09-16 12:26:29 +00:00
|
|
|
map->set_function_with_prototype(true);
|
2011-10-05 09:41:54 +00:00
|
|
|
InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
|
|
|
|
JSFunction::cast(object)->set_context(
|
2012-08-17 09:03:08 +00:00
|
|
|
isolate()->context()->native_context());
|
2011-09-13 11:42:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Put in filler if the new object is smaller than the old.
|
|
|
|
if (size_difference > 0) {
|
|
|
|
CreateFillerObjectAt(
|
|
|
|
object->address() + map->instance_size(), size_difference);
|
|
|
|
}
|
|
|
|
|
2011-07-18 13:04:52 +00:00
|
|
|
return object;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
|
|
|
|
JSGlobalProxy* object) {
|
2010-09-23 09:15:26 +00:00
|
|
|
ASSERT(constructor->has_initial_map());
|
2008-07-03 15:10:15 +00:00
|
|
|
Map* map = constructor->initial_map();
|
|
|
|
|
2010-09-23 09:15:26 +00:00
|
|
|
// Check that the already allocated object has the same size and type as
|
2008-07-03 15:10:15 +00:00
|
|
|
// objects allocated using the constructor.
|
|
|
|
ASSERT(map->instance_size() == object->map()->instance_size());
|
2010-09-23 09:15:26 +00:00
|
|
|
ASSERT(map->instance_type() == object->map()->instance_type());
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
// Allocate the backing storage for the properties.
|
2008-10-15 06:03:26 +00:00
|
|
|
int prop_size = map->unused_property_fields() - map->inobject_properties();
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* properties;
|
|
|
|
{ MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
|
|
|
|
if (!maybe_properties->ToObject(&properties)) return maybe_properties;
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
// Reset the map for the object.
|
|
|
|
object->set_map(constructor->initial_map());
|
|
|
|
|
|
|
|
// Reinitialize the object from the constructor map.
|
|
|
|
InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
|
|
|
|
return object;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-01-09 10:30:54 +00:00
|
|
|
MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
|
2010-10-25 15:22:03 +00:00
|
|
|
PretenureFlag pretenure) {
|
2012-09-12 11:15:20 +00:00
|
|
|
int length = string.length();
|
|
|
|
if (length == 1) {
|
2011-09-07 13:13:56 +00:00
|
|
|
return Heap::LookupSingleCharacterStringFromCode(string[0]);
|
|
|
|
}
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
|
|
|
{ MaybeObject* maybe_result =
|
2012-11-21 10:01:05 +00:00
|
|
|
AllocateRawOneByteString(string.length(), pretenure);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
// Copy the characters into the new object.
|
2013-01-09 15:47:53 +00:00
|
|
|
CopyChars(SeqOneByteString::cast(result)->GetChars(),
|
2013-01-09 10:30:54 +00:00
|
|
|
string.start(),
|
|
|
|
length);
|
2008-07-03 15:10:15 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-12-21 13:24:23 +00:00
|
|
|
MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
|
2012-10-18 15:08:11 +00:00
|
|
|
int non_ascii_start,
|
2010-12-21 13:24:23 +00:00
|
|
|
PretenureFlag pretenure) {
|
2012-10-18 15:08:11 +00:00
|
|
|
// Continue counting the number of characters in the UTF-8 string, starting
|
|
|
|
// from the first non-ascii character or word.
|
2011-04-12 08:27:38 +00:00
|
|
|
Access<UnicodeCache::Utf8Decoder>
|
|
|
|
decoder(isolate_->unicode_cache()->utf8_decoder());
|
2012-12-20 09:20:37 +00:00
|
|
|
decoder->Reset(string.start() + non_ascii_start,
|
|
|
|
string.length() - non_ascii_start);
|
|
|
|
int utf16_length = decoder->Utf16Length();
|
|
|
|
ASSERT(utf16_length > 0);
|
|
|
|
// Allocate string.
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
2012-12-20 09:20:37 +00:00
|
|
|
{
|
|
|
|
int chars = non_ascii_start + utf16_length;
|
|
|
|
MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
// Convert and copy the characters into the new object.
|
2012-09-12 11:15:20 +00:00
|
|
|
SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
|
2012-12-20 09:20:37 +00:00
|
|
|
// Copy ascii portion.
|
|
|
|
uint16_t* data = twobyte->GetChars();
|
|
|
|
if (non_ascii_start != 0) {
|
|
|
|
const char* ascii_data = string.start();
|
|
|
|
for (int i = 0; i < non_ascii_start; i++) {
|
|
|
|
*data++ = *ascii_data++;
|
2012-03-12 12:35:28 +00:00
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
2012-12-20 09:20:37 +00:00
|
|
|
// Now write the remainder.
|
|
|
|
decoder->WriteUtf16(data, utf16_length);
|
2008-07-03 15:10:15 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
|
|
|
|
PretenureFlag pretenure) {
|
2008-07-03 15:10:15 +00:00
|
|
|
// Check if the string is an ASCII string.
|
2012-09-04 12:23:22 +00:00
|
|
|
Object* result;
|
2012-09-12 11:15:20 +00:00
|
|
|
int length = string.length();
|
|
|
|
const uc16* start = string.start();
|
2012-09-03 15:06:36 +00:00
|
|
|
|
2013-01-09 10:30:54 +00:00
|
|
|
if (String::IsOneByte(start, length)) {
|
2012-11-21 10:01:05 +00:00
|
|
|
MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
|
2012-09-12 11:15:20 +00:00
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
2012-11-15 13:31:27 +00:00
|
|
|
CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
|
2013-01-09 10:30:54 +00:00
|
|
|
} else { // It's not a one byte string.
|
2012-09-12 11:15:20 +00:00
|
|
|
MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
|
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
|
2012-09-04 12:23:22 +00:00
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-02-28 17:03:34 +00:00
|
|
|
Map* Heap::InternalizedStringMapForString(String* string) {
|
|
|
|
// If the string is in new space it cannot be used as internalized.
|
2008-07-03 15:10:15 +00:00
|
|
|
if (InNewSpace(string)) return NULL;
|
|
|
|
|
2013-02-28 17:03:34 +00:00
|
|
|
// Find the corresponding internalized string map for strings.
|
2011-11-23 13:31:26 +00:00
|
|
|
switch (string->map()->instance_type()) {
|
2013-02-28 17:03:34 +00:00
|
|
|
case STRING_TYPE: return internalized_string_map();
|
|
|
|
case ASCII_STRING_TYPE: return ascii_internalized_string_map();
|
|
|
|
case CONS_STRING_TYPE: return cons_internalized_string_map();
|
|
|
|
case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
|
|
|
|
case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
|
|
|
|
case EXTERNAL_ASCII_STRING_TYPE:
|
|
|
|
return external_ascii_internalized_string_map();
|
2013-04-26 11:34:44 +00:00
|
|
|
case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
|
|
|
|
return external_internalized_string_with_one_byte_data_map();
|
2013-02-28 17:03:34 +00:00
|
|
|
case SHORT_EXTERNAL_STRING_TYPE:
|
|
|
|
return short_external_internalized_string_map();
|
2011-11-23 13:31:26 +00:00
|
|
|
case SHORT_EXTERNAL_ASCII_STRING_TYPE:
|
2013-02-28 17:03:34 +00:00
|
|
|
return short_external_ascii_internalized_string_map();
|
2013-04-26 11:34:44 +00:00
|
|
|
case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
|
|
|
|
return short_external_internalized_string_with_one_byte_data_map();
|
2011-11-23 13:31:26 +00:00
|
|
|
default: return NULL; // No match found.
|
2011-03-18 20:35:07 +00:00
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-01-17 14:31:03 +00:00
|
|
|
static inline void WriteOneByteData(Vector<const char> vector,
|
|
|
|
uint8_t* chars,
|
|
|
|
int len) {
|
|
|
|
// Only works for ascii.
|
|
|
|
ASSERT(vector.length() == len);
|
2013-04-16 12:30:51 +00:00
|
|
|
OS::MemCopy(chars, vector.start(), len);
|
2013-01-17 14:31:03 +00:00
|
|
|
}
|
2012-12-19 13:27:20 +00:00
|
|
|
|
2013-01-17 14:31:03 +00:00
|
|
|
static inline void WriteTwoByteData(Vector<const char> vector,
|
|
|
|
uint16_t* chars,
|
|
|
|
int len) {
|
|
|
|
const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
|
|
|
|
unsigned stream_length = vector.length();
|
|
|
|
while (stream_length != 0) {
|
|
|
|
unsigned consumed = 0;
|
|
|
|
uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
|
|
|
|
ASSERT(c != unibrow::Utf8::kBadChar);
|
|
|
|
ASSERT(consumed <= stream_length);
|
|
|
|
stream_length -= consumed;
|
|
|
|
stream += consumed;
|
|
|
|
if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
|
|
|
|
len -= 2;
|
|
|
|
if (len < 0) break;
|
|
|
|
*chars++ = unibrow::Utf16::LeadSurrogate(c);
|
|
|
|
*chars++ = unibrow::Utf16::TrailSurrogate(c);
|
|
|
|
} else {
|
|
|
|
len -= 1;
|
|
|
|
if (len < 0) break;
|
|
|
|
*chars++ = c;
|
2010-01-07 13:17:18 +00:00
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
2013-01-17 14:31:03 +00:00
|
|
|
ASSERT(stream_length == 0);
|
|
|
|
ASSERT(len == 0);
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2012-12-19 13:27:20 +00:00
|
|
|
|
2013-01-17 14:31:03 +00:00
|
|
|
static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
|
|
|
|
ASSERT(s->length() == len);
|
|
|
|
String::WriteToFlat(s, chars, 0, len);
|
|
|
|
}
|
2012-12-19 13:27:20 +00:00
|
|
|
|
2013-07-05 09:52:11 +00:00
|
|
|
|
2013-01-17 14:31:03 +00:00
|
|
|
static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
|
|
|
|
ASSERT(s->length() == len);
|
|
|
|
String::WriteToFlat(s, chars, 0, len);
|
|
|
|
}
|
2012-12-19 13:27:20 +00:00
|
|
|
|
|
|
|
|
|
|
|
template<bool is_one_byte, typename T>
|
2013-02-28 17:03:34 +00:00
|
|
|
MaybeObject* Heap::AllocateInternalizedStringImpl(
|
|
|
|
T t, int chars, uint32_t hash_field) {
|
2012-12-19 13:27:20 +00:00
|
|
|
ASSERT(chars >= 0);
|
2008-07-03 15:10:15 +00:00
|
|
|
// Compute map and object size.
|
|
|
|
int size;
|
|
|
|
Map* map;
|
|
|
|
|
2012-12-19 13:27:20 +00:00
|
|
|
if (is_one_byte) {
|
2012-11-15 13:31:27 +00:00
|
|
|
if (chars > SeqOneByteString::kMaxLength) {
|
2013-01-09 12:29:06 +00:00
|
|
|
return Failure::OutOfMemoryException(0x9);
|
2010-01-07 13:17:18 +00:00
|
|
|
}
|
2013-02-28 17:03:34 +00:00
|
|
|
map = ascii_internalized_string_map();
|
2012-11-15 13:31:27 +00:00
|
|
|
size = SeqOneByteString::SizeFor(chars);
|
2008-07-03 15:10:15 +00:00
|
|
|
} else {
|
2010-01-07 13:17:18 +00:00
|
|
|
if (chars > SeqTwoByteString::kMaxLength) {
|
2013-01-09 12:29:06 +00:00
|
|
|
return Failure::OutOfMemoryException(0xa);
|
2010-01-07 13:17:18 +00:00
|
|
|
}
|
2013-02-28 17:03:34 +00:00
|
|
|
map = internalized_string_map();
|
2008-10-09 08:08:04 +00:00
|
|
|
size = SeqTwoByteString::SizeFor(chars);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Allocate string.
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
2012-02-23 12:11:24 +00:00
|
|
|
{ MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
|
2011-09-19 18:36:47 +00:00
|
|
|
? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
|
2010-10-25 15:22:03 +00:00
|
|
|
: old_data_space_->AllocateRaw(size);
|
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-12-07 08:43:18 +00:00
|
|
|
reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
|
2009-11-24 14:10:06 +00:00
|
|
|
// Set length and hash fields of the allocated string.
|
2008-11-03 10:16:05 +00:00
|
|
|
String* answer = String::cast(result);
|
2009-11-24 14:10:06 +00:00
|
|
|
answer->set_length(chars);
|
|
|
|
answer->set_hash_field(hash_field);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2008-11-03 10:16:05 +00:00
|
|
|
ASSERT_EQ(size, answer->Size());
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2012-12-19 13:27:20 +00:00
|
|
|
if (is_one_byte) {
|
2013-01-17 14:31:03 +00:00
|
|
|
WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
|
2012-12-19 13:27:20 +00:00
|
|
|
} else {
|
2013-01-17 14:31:03 +00:00
|
|
|
WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
2008-11-03 10:16:05 +00:00
|
|
|
return answer;
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-12-19 13:27:20 +00:00
|
|
|
// Need explicit instantiations.
|
|
|
|
template
|
2013-02-28 17:03:34 +00:00
|
|
|
MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
|
2012-12-19 13:27:20 +00:00
|
|
|
template
|
2013-02-28 17:03:34 +00:00
|
|
|
MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
|
|
|
|
String*, int, uint32_t);
|
2012-12-19 13:27:20 +00:00
|
|
|
template
|
2013-02-28 17:03:34 +00:00
|
|
|
MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
|
|
|
|
Vector<const char>, int, uint32_t);
|
2012-12-19 13:27:20 +00:00
|
|
|
|
|
|
|
|
2012-11-21 10:01:05 +00:00
|
|
|
MaybeObject* Heap::AllocateRawOneByteString(int length,
|
|
|
|
PretenureFlag pretenure) {
|
2012-11-15 13:31:27 +00:00
|
|
|
if (length < 0 || length > SeqOneByteString::kMaxLength) {
|
2013-01-09 12:29:06 +00:00
|
|
|
return Failure::OutOfMemoryException(0xb);
|
2010-01-07 13:17:18 +00:00
|
|
|
}
|
|
|
|
|
2012-11-15 13:31:27 +00:00
|
|
|
int size = SeqOneByteString::SizeFor(length);
|
|
|
|
ASSERT(size <= SeqOneByteString::kMaxSize);
|
2010-01-07 13:17:18 +00:00
|
|
|
|
2010-01-05 11:30:05 +00:00
|
|
|
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
|
|
|
|
AllocationSpace retry_space = OLD_DATA_SPACE;
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2009-06-18 14:06:36 +00:00
|
|
|
if (space == NEW_SPACE) {
|
2010-01-05 11:30:05 +00:00
|
|
|
if (size > kMaxObjectSizeInNewSpace) {
|
|
|
|
// Allocate in large object space, retry space will be ignored.
|
|
|
|
space = LO_SPACE;
|
2012-02-23 12:11:24 +00:00
|
|
|
} else if (size > Page::kMaxNonCodeHeapObjectSize) {
|
2010-01-05 11:30:05 +00:00
|
|
|
// Allocate in new space, retry in large object space.
|
|
|
|
retry_space = LO_SPACE;
|
|
|
|
}
|
2012-02-23 12:11:24 +00:00
|
|
|
} else if (space == OLD_DATA_SPACE &&
|
|
|
|
size > Page::kMaxNonCodeHeapObjectSize) {
|
2010-01-05 11:30:05 +00:00
|
|
|
space = LO_SPACE;
|
2009-06-18 14:06:36 +00:00
|
|
|
}
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
|
|
|
{ MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
|
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
// Partially initialize the object.
|
2011-12-07 08:43:18 +00:00
|
|
|
HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
|
2008-07-03 15:10:15 +00:00
|
|
|
String::cast(result)->set_length(length);
|
2009-11-24 14:10:06 +00:00
|
|
|
String::cast(result)->set_hash_field(String::kEmptyHashField);
|
2008-07-03 15:10:15 +00:00
|
|
|
ASSERT_EQ(size, HeapObject::cast(result)->Size());
|
2012-05-22 12:49:20 +00:00
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateRawTwoByteString(int length,
|
|
|
|
PretenureFlag pretenure) {
|
2010-01-07 13:17:18 +00:00
|
|
|
if (length < 0 || length > SeqTwoByteString::kMaxLength) {
|
2013-01-09 12:29:06 +00:00
|
|
|
return Failure::OutOfMemoryException(0xc);
|
2010-01-07 13:17:18 +00:00
|
|
|
}
|
2008-10-09 08:08:04 +00:00
|
|
|
int size = SeqTwoByteString::SizeFor(length);
|
2010-01-07 13:17:18 +00:00
|
|
|
ASSERT(size <= SeqTwoByteString::kMaxSize);
|
2010-01-05 11:30:05 +00:00
|
|
|
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
|
|
|
|
AllocationSpace retry_space = OLD_DATA_SPACE;
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2009-06-18 14:06:36 +00:00
|
|
|
if (space == NEW_SPACE) {
|
2010-01-05 11:30:05 +00:00
|
|
|
if (size > kMaxObjectSizeInNewSpace) {
|
|
|
|
// Allocate in large object space, retry space will be ignored.
|
|
|
|
space = LO_SPACE;
|
2012-02-23 12:11:24 +00:00
|
|
|
} else if (size > Page::kMaxNonCodeHeapObjectSize) {
|
2010-01-05 11:30:05 +00:00
|
|
|
// Allocate in new space, retry in large object space.
|
|
|
|
retry_space = LO_SPACE;
|
|
|
|
}
|
2012-02-23 12:11:24 +00:00
|
|
|
} else if (space == OLD_DATA_SPACE &&
|
|
|
|
size > Page::kMaxNonCodeHeapObjectSize) {
|
2010-01-05 11:30:05 +00:00
|
|
|
space = LO_SPACE;
|
2009-06-18 14:06:36 +00:00
|
|
|
}
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
|
|
|
{ MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
|
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
// Partially initialize the object.
|
2011-12-07 08:43:18 +00:00
|
|
|
HeapObject::cast(result)->set_map_no_write_barrier(string_map());
|
2008-07-03 15:10:15 +00:00
|
|
|
String::cast(result)->set_length(length);
|
2009-11-24 14:10:06 +00:00
|
|
|
String::cast(result)->set_hash_field(String::kEmptyHashField);
|
2008-07-03 15:10:15 +00:00
|
|
|
ASSERT_EQ(size, HeapObject::cast(result)->Size());
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-01-26 21:47:57 +00:00
|
|
|
MaybeObject* Heap::AllocateJSArray(
|
|
|
|
ElementsKind elements_kind,
|
|
|
|
PretenureFlag pretenure) {
|
2012-08-17 09:03:08 +00:00
|
|
|
Context* native_context = isolate()->context()->native_context();
|
|
|
|
JSFunction* array_function = native_context->array_function();
|
2012-01-26 21:47:57 +00:00
|
|
|
Map* map = array_function->initial_map();
|
2013-05-13 07:35:26 +00:00
|
|
|
Map* transition_map = isolate()->get_initial_js_array_map(elements_kind);
|
|
|
|
if (transition_map != NULL) map = transition_map;
|
2012-01-26 21:47:57 +00:00
|
|
|
return AllocateJSObjectFromMap(map, pretenure);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-03-01 16:06:34 +00:00
|
|
|
MaybeObject* Heap::AllocateJSArrayWithAllocationSite(
|
|
|
|
ElementsKind elements_kind,
|
2013-07-08 10:02:16 +00:00
|
|
|
Handle<AllocationSite> allocation_site) {
|
2013-03-01 16:06:34 +00:00
|
|
|
Context* native_context = isolate()->context()->native_context();
|
|
|
|
JSFunction* array_function = native_context->array_function();
|
|
|
|
Map* map = array_function->initial_map();
|
|
|
|
Object* maybe_map_array = native_context->js_array_maps();
|
|
|
|
if (!maybe_map_array->IsUndefined()) {
|
|
|
|
Object* maybe_transitioned_map =
|
|
|
|
FixedArray::cast(maybe_map_array)->get(elements_kind);
|
|
|
|
if (!maybe_transitioned_map->IsUndefined()) {
|
|
|
|
map = Map::cast(maybe_transitioned_map);
|
|
|
|
}
|
|
|
|
}
|
2013-07-08 10:02:16 +00:00
|
|
|
return AllocateJSObjectFromMapWithAllocationSite(map, allocation_site);
|
2013-03-01 16:06:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateEmptyFixedArray() {
|
2008-07-03 15:10:15 +00:00
|
|
|
int size = FixedArray::SizeFor(0);
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
|
|
|
{ MaybeObject* maybe_result =
|
|
|
|
AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
|
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
// Initialize the object.
|
2011-12-07 08:43:18 +00:00
|
|
|
reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
|
|
|
|
fixed_array_map());
|
2010-05-27 12:30:45 +00:00
|
|
|
reinterpret_cast<FixedArray*>(result)->set_length(0);
|
2008-07-03 15:10:15 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2013-07-05 09:52:11 +00:00
|
|
|
|
2013-05-15 15:23:53 +00:00
|
|
|
MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
|
|
|
|
return AllocateExternalArray(0, array_type, NULL, TENURED);
|
|
|
|
}
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateRawFixedArray(int length) {
|
2010-01-07 13:17:18 +00:00
|
|
|
if (length < 0 || length > FixedArray::kMaxLength) {
|
2013-01-09 12:29:06 +00:00
|
|
|
return Failure::OutOfMemoryException(0xd);
|
2010-01-07 13:17:18 +00:00
|
|
|
}
|
2010-09-23 12:49:59 +00:00
|
|
|
ASSERT(length > 0);
|
2008-10-30 09:15:58 +00:00
|
|
|
// Use the general function if we're forced to always allocate.
|
2009-10-02 13:35:37 +00:00
|
|
|
if (always_allocate()) return AllocateFixedArray(length, TENURED);
|
2008-10-20 06:35:28 +00:00
|
|
|
// Allocate the raw data for a fixed array.
|
|
|
|
int size = FixedArray::SizeFor(length);
|
2009-06-18 14:06:36 +00:00
|
|
|
return size <= kMaxObjectSizeInNewSpace
|
|
|
|
? new_space_.AllocateRaw(size)
|
2011-09-19 18:36:47 +00:00
|
|
|
: lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
|
2008-10-20 06:35:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
|
2008-10-20 06:35:28 +00:00
|
|
|
int len = src->length();
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* obj;
|
|
|
|
{ MaybeObject* maybe_obj = AllocateRawFixedArray(len);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
|
|
|
|
}
|
2011-03-18 20:35:07 +00:00
|
|
|
if (InNewSpace(obj)) {
|
2008-10-22 08:21:18 +00:00
|
|
|
HeapObject* dst = HeapObject::cast(obj);
|
2011-12-07 08:43:18 +00:00
|
|
|
dst->set_map_no_write_barrier(map);
|
2010-09-23 12:23:35 +00:00
|
|
|
CopyBlock(dst->address() + kPointerSize,
|
|
|
|
src->address() + kPointerSize,
|
|
|
|
FixedArray::SizeFor(len) - kPointerSize);
|
2008-10-22 08:21:18 +00:00
|
|
|
return obj;
|
|
|
|
}
|
2011-12-07 08:43:18 +00:00
|
|
|
HeapObject::cast(obj)->set_map_no_write_barrier(map);
|
2008-10-20 06:35:28 +00:00
|
|
|
FixedArray* result = FixedArray::cast(obj);
|
|
|
|
result->set_length(len);
|
2010-01-29 11:46:55 +00:00
|
|
|
|
2008-10-20 06:35:28 +00:00
|
|
|
// Copy the content
|
2013-06-03 15:32:22 +00:00
|
|
|
DisallowHeapAllocation no_gc;
|
2010-01-29 11:46:55 +00:00
|
|
|
WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
|
2008-10-22 09:47:20 +00:00
|
|
|
for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
|
2008-10-20 06:35:28 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-07-27 15:08:50 +00:00
|
|
|
MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
|
|
|
|
Map* map) {
|
|
|
|
int len = src->length();
|
|
|
|
Object* obj;
|
|
|
|
{ MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
|
|
|
|
}
|
|
|
|
HeapObject* dst = HeapObject::cast(obj);
|
2011-12-07 08:43:18 +00:00
|
|
|
dst->set_map_no_write_barrier(map);
|
2011-07-27 15:08:50 +00:00
|
|
|
CopyBlock(
|
|
|
|
dst->address() + FixedDoubleArray::kLengthOffset,
|
|
|
|
src->address() + FixedDoubleArray::kLengthOffset,
|
|
|
|
FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateFixedArray(int length) {
|
2009-08-19 07:30:20 +00:00
|
|
|
ASSERT(length >= 0);
|
2009-01-12 10:59:58 +00:00
|
|
|
if (length == 0) return empty_fixed_array();
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
|
|
|
{ MaybeObject* maybe_result = AllocateRawFixedArray(length);
|
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
2008-10-20 06:35:28 +00:00
|
|
|
}
|
2010-10-25 15:22:03 +00:00
|
|
|
// Initialize header.
|
|
|
|
FixedArray* array = reinterpret_cast<FixedArray*>(result);
|
2011-12-07 08:43:18 +00:00
|
|
|
array->set_map_no_write_barrier(fixed_array_map());
|
2010-10-25 15:22:03 +00:00
|
|
|
array->set_length(length);
|
|
|
|
// Initialize body.
|
2011-03-18 20:35:07 +00:00
|
|
|
ASSERT(!InNewSpace(undefined_value()));
|
2010-10-25 15:22:03 +00:00
|
|
|
MemsetPointer(array->data_start(), undefined_value(), length);
|
2008-10-20 06:35:28 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
|
2010-01-07 13:17:18 +00:00
|
|
|
if (length < 0 || length > FixedArray::kMaxLength) {
|
2013-01-09 12:29:06 +00:00
|
|
|
return Failure::OutOfMemoryException(0xe);
|
2010-01-07 13:17:18 +00:00
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-01-05 11:30:05 +00:00
|
|
|
AllocationSpace space =
|
|
|
|
(pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
|
2008-07-03 15:10:15 +00:00
|
|
|
int size = FixedArray::SizeFor(length);
|
2010-01-05 11:30:05 +00:00
|
|
|
if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
|
|
|
|
// Too big for new space.
|
|
|
|
space = LO_SPACE;
|
|
|
|
} else if (space == OLD_POINTER_SPACE &&
|
2012-02-23 12:11:24 +00:00
|
|
|
size > Page::kMaxNonCodeHeapObjectSize) {
|
2010-01-05 11:30:05 +00:00
|
|
|
// Too big for old pointer space.
|
|
|
|
space = LO_SPACE;
|
|
|
|
}
|
|
|
|
|
2010-05-27 12:30:45 +00:00
|
|
|
AllocationSpace retry_space =
|
2012-02-23 12:11:24 +00:00
|
|
|
(size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
|
2010-05-27 12:30:45 +00:00
|
|
|
|
|
|
|
return AllocateRaw(size, space, retry_space);
|
2010-04-14 14:46:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
|
2011-03-18 20:35:07 +00:00
|
|
|
Heap* heap,
|
2010-10-25 15:22:03 +00:00
|
|
|
int length,
|
|
|
|
PretenureFlag pretenure,
|
|
|
|
Object* filler) {
|
2010-04-14 14:46:15 +00:00
|
|
|
ASSERT(length >= 0);
|
2011-03-18 20:35:07 +00:00
|
|
|
ASSERT(heap->empty_fixed_array()->IsFixedArray());
|
|
|
|
if (length == 0) return heap->empty_fixed_array();
|
2010-04-14 14:46:15 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
ASSERT(!heap->InNewSpace(filler));
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
2011-03-18 20:35:07 +00:00
|
|
|
{ MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2010-01-05 11:30:05 +00:00
|
|
|
|
2011-12-07 08:43:18 +00:00
|
|
|
HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
|
2008-07-03 15:10:15 +00:00
|
|
|
FixedArray* array = FixedArray::cast(result);
|
|
|
|
array->set_length(length);
|
2010-04-14 14:46:15 +00:00
|
|
|
MemsetPointer(array->data_start(), filler, length);
|
2008-07-03 15:10:15 +00:00
|
|
|
return array;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
|
2011-03-18 20:35:07 +00:00
|
|
|
return AllocateFixedArrayWithFiller(this,
|
|
|
|
length,
|
|
|
|
pretenure,
|
|
|
|
undefined_value());
|
2010-04-14 14:46:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
|
|
|
|
PretenureFlag pretenure) {
|
2011-03-18 20:35:07 +00:00
|
|
|
return AllocateFixedArrayWithFiller(this,
|
|
|
|
length,
|
|
|
|
pretenure,
|
|
|
|
the_hole_value());
|
2010-04-14 14:46:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
|
2010-03-04 14:03:08 +00:00
|
|
|
if (length == 0) return empty_fixed_array();
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* obj;
|
|
|
|
{ MaybeObject* maybe_obj = AllocateRawFixedArray(length);
|
|
|
|
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
|
|
|
|
}
|
2010-03-04 14:03:08 +00:00
|
|
|
|
2011-12-07 08:43:18 +00:00
|
|
|
reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
|
|
|
|
fixed_array_map());
|
2010-03-04 14:03:08 +00:00
|
|
|
FixedArray::cast(obj)->set_length(length);
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-09 10:03:35 +00:00
|
|
|
MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
|
|
|
|
int size = FixedDoubleArray::SizeFor(0);
|
|
|
|
Object* result;
|
|
|
|
{ MaybeObject* maybe_result =
|
|
|
|
AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
|
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
|
|
|
// Initialize the object.
|
2011-12-07 08:43:18 +00:00
|
|
|
reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
|
2011-06-09 10:03:35 +00:00
|
|
|
fixed_double_array_map());
|
|
|
|
reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
|
|
|
|
int length,
|
|
|
|
PretenureFlag pretenure) {
|
2012-02-10 12:36:05 +00:00
|
|
|
if (length == 0) return empty_fixed_array();
|
2011-06-09 10:03:35 +00:00
|
|
|
|
2012-01-26 21:47:57 +00:00
|
|
|
Object* elements_object;
|
|
|
|
MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
|
|
|
|
if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
|
|
|
|
FixedDoubleArray* elements =
|
|
|
|
reinterpret_cast<FixedDoubleArray*>(elements_object);
|
|
|
|
|
|
|
|
elements->set_map_no_write_barrier(fixed_double_array_map());
|
|
|
|
elements->set_length(length);
|
|
|
|
return elements;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
|
|
|
|
int length,
|
|
|
|
PretenureFlag pretenure) {
|
2012-02-10 12:36:05 +00:00
|
|
|
if (length == 0) return empty_fixed_array();
|
2012-01-26 21:47:57 +00:00
|
|
|
|
|
|
|
Object* elements_object;
|
|
|
|
MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
|
|
|
|
if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
|
|
|
|
FixedDoubleArray* elements =
|
|
|
|
reinterpret_cast<FixedDoubleArray*>(elements_object);
|
|
|
|
|
|
|
|
for (int i = 0; i < length; ++i) {
|
|
|
|
elements->set_the_hole(i);
|
2011-06-09 10:03:35 +00:00
|
|
|
}
|
|
|
|
|
2012-01-26 21:47:57 +00:00
|
|
|
elements->set_map_no_write_barrier(fixed_double_array_map());
|
|
|
|
elements->set_length(length);
|
|
|
|
return elements;
|
2011-06-09 10:03:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
|
|
|
|
PretenureFlag pretenure) {
|
|
|
|
if (length < 0 || length > FixedDoubleArray::kMaxLength) {
|
2013-01-09 12:29:06 +00:00
|
|
|
return Failure::OutOfMemoryException(0xf);
|
2011-06-09 10:03:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
AllocationSpace space =
|
|
|
|
(pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
|
|
|
|
int size = FixedDoubleArray::SizeFor(length);
|
2012-04-17 07:52:39 +00:00
|
|
|
|
|
|
|
#ifndef V8_HOST_ARCH_64_BIT
|
|
|
|
size += kPointerSize;
|
|
|
|
#endif
|
|
|
|
|
2011-06-09 10:03:35 +00:00
|
|
|
if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
|
|
|
|
// Too big for new space.
|
|
|
|
space = LO_SPACE;
|
|
|
|
} else if (space == OLD_DATA_SPACE &&
|
2012-02-23 12:11:24 +00:00
|
|
|
size > Page::kMaxNonCodeHeapObjectSize) {
|
2011-06-09 10:03:35 +00:00
|
|
|
// Too big for old data space.
|
|
|
|
space = LO_SPACE;
|
|
|
|
}
|
|
|
|
|
|
|
|
AllocationSpace retry_space =
|
2012-02-23 12:11:24 +00:00
|
|
|
(size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
|
2011-06-09 10:03:35 +00:00
|
|
|
|
2012-04-17 07:52:39 +00:00
|
|
|
HeapObject* object;
|
|
|
|
{ MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
|
|
|
|
if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
|
|
|
|
}
|
|
|
|
|
|
|
|
return EnsureDoubleAligned(this, object, size);
|
2011-06-09 10:03:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
|
|
|
|
Object* result;
|
2011-03-18 20:35:07 +00:00
|
|
|
{ MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2011-12-07 08:43:18 +00:00
|
|
|
reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
|
|
|
|
hash_table_map());
|
2009-07-02 06:50:43 +00:00
|
|
|
ASSERT(result->IsHashTable());
|
2008-07-03 15:10:15 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-03-28 13:52:31 +00:00
|
|
|
MaybeObject* Heap::AllocateSymbol() {
|
2013-03-01 10:34:31 +00:00
|
|
|
// Statically ensure that it is safe to allocate symbols in paged spaces.
|
|
|
|
STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize);
|
|
|
|
|
|
|
|
Object* result;
|
2013-03-28 13:52:31 +00:00
|
|
|
MaybeObject* maybe =
|
|
|
|
AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
|
2013-03-01 10:34:31 +00:00
|
|
|
if (!maybe->ToObject(&result)) return maybe;
|
|
|
|
|
|
|
|
HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
|
|
|
|
|
|
|
|
// Generate a random hash value.
|
|
|
|
int hash;
|
|
|
|
int attempts = 0;
|
|
|
|
do {
|
|
|
|
hash = V8::RandomPrivate(isolate()) & Name::kHashBitMask;
|
|
|
|
attempts++;
|
|
|
|
} while (hash == 0 && attempts < 30);
|
|
|
|
if (hash == 0) hash = 1; // never return 0
|
|
|
|
|
|
|
|
Symbol::cast(result)->set_hash_field(
|
|
|
|
Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
|
2013-03-22 16:51:28 +00:00
|
|
|
Symbol::cast(result)->set_name(undefined_value());
|
2013-03-01 10:34:31 +00:00
|
|
|
|
|
|
|
ASSERT(result->IsSymbol());
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-08-17 09:03:08 +00:00
|
|
|
MaybeObject* Heap::AllocateNativeContext() {
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
|
|
|
{ MaybeObject* maybe_result =
|
2012-08-17 09:03:08 +00:00
|
|
|
AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
Context* context = reinterpret_cast<Context*>(result);
|
2012-08-17 09:03:08 +00:00
|
|
|
context->set_map_no_write_barrier(native_context_map());
|
2012-05-23 14:24:29 +00:00
|
|
|
context->set_js_array_maps(undefined_value());
|
2012-08-17 09:03:08 +00:00
|
|
|
ASSERT(context->IsNativeContext());
|
2008-07-03 15:10:15 +00:00
|
|
|
ASSERT(result->IsContext());
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-08-27 09:40:26 +00:00
|
|
|
MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
|
|
|
|
ScopeInfo* scope_info) {
|
|
|
|
Object* result;
|
|
|
|
{ MaybeObject* maybe_result =
|
|
|
|
AllocateFixedArray(scope_info->ContextLength(), TENURED);
|
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
|
|
|
Context* context = reinterpret_cast<Context*>(result);
|
|
|
|
context->set_map_no_write_barrier(global_context_map());
|
|
|
|
context->set_closure(function);
|
2012-08-28 11:25:08 +00:00
|
|
|
context->set_previous(function->context());
|
2012-08-27 09:40:26 +00:00
|
|
|
context->set_extension(scope_info);
|
2012-08-28 11:25:08 +00:00
|
|
|
context->set_global_object(function->context()->global_object());
|
2012-08-27 09:40:26 +00:00
|
|
|
ASSERT(context->IsGlobalContext());
|
|
|
|
ASSERT(result->IsContext());
|
|
|
|
return context;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-07-09 08:59:03 +00:00
|
|
|
MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
|
2012-04-16 14:43:27 +00:00
|
|
|
Object* result;
|
|
|
|
{ MaybeObject* maybe_result =
|
2012-07-09 08:59:03 +00:00
|
|
|
AllocateFixedArray(scope_info->ContextLength(), TENURED);
|
2012-04-16 14:43:27 +00:00
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
|
|
|
Context* context = reinterpret_cast<Context*>(result);
|
|
|
|
context->set_map_no_write_barrier(module_context_map());
|
Get rid of static module allocation, do it in code.
Modules now have their own local scope, represented by their own context.
Module instance objects have an accessor for every export that forwards
access to the respective slot from the module's context. (Exports that are
modules themselves, however, are simple data properties.)
All modules have a _hosting_ scope/context, which (currently) is the
(innermost) enclosing global scope. To deal with recursion, nested modules
are hosted by the same scope as global ones.
For every (global or nested) module literal, the hosting context has an
internal slot that points directly to the respective module context. This
enables quick access to (statically resolved) module members by 2-dimensional
access through the hosting context. For example,
module A {
let x;
module B { let y; }
}
module C { let z; }
allocates contexts as follows:
[header| .A | .B | .C | A | C ] (global)
| | |
| | +-- [header| z ] (module)
| |
| +------- [header| y ] (module)
|
+------------ [header| x | B ] (module)
Here, .A, .B, .C are the internal slots pointing to the hosted module
contexts, whereas A, B, C hold the actual instance objects (note that every
module context also points to the respective instance object through its
extension slot in the header).
To deal with arbitrary recursion and aliases between modules,
they are created and initialized in several stages. Each stage applies to
all modules in the hosting global scope, including nested ones.
1. Allocate: for each module _literal_, allocate the module contexts and
respective instance object and wire them up. This happens in the
PushModuleContext runtime function, as generated by AllocateModules
(invoked by VisitDeclarations in the hosting scope).
2. Bind: for each module _declaration_ (i.e. literals as well as aliases),
assign the respective instance object to respective local variables. This
happens in VisitModuleDeclaration, and uses the instance objects created
in the previous stage.
For each module _literal_, this phase also constructs a module descriptor
for the next stage. This happens in VisitModuleLiteral.
3. Populate: invoke the DeclareModules runtime function to populate each
_instance_ object with accessors for it exports. This is generated by
DeclareModules (invoked by VisitDeclarations in the hosting scope again),
and uses the descriptors generated in the previous stage.
4. Initialize: execute the module bodies (and other code) in sequence. This
happens by the separate statements generated for module bodies. To reenter
the module scopes properly, the parser inserted ModuleStatements.
R=mstarzinger@chromium.org,svenpanne@chromium.org
BUG=
Review URL: https://codereview.chromium.org/11093074
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@13033 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2012-11-22 10:25:22 +00:00
|
|
|
// Instance link will be set later.
|
2012-07-09 08:59:03 +00:00
|
|
|
context->set_extension(Smi::FromInt(0));
|
2012-04-16 14:43:27 +00:00
|
|
|
return context;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
|
2008-07-03 15:10:15 +00:00
|
|
|
ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
2011-03-18 20:35:07 +00:00
|
|
|
{ MaybeObject* maybe_result = AllocateFixedArray(length);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
Context* context = reinterpret_cast<Context*>(result);
|
2011-12-07 08:43:18 +00:00
|
|
|
context->set_map_no_write_barrier(function_context_map());
|
2008-07-03 15:10:15 +00:00
|
|
|
context->set_closure(function);
|
2011-06-09 12:45:26 +00:00
|
|
|
context->set_previous(function->context());
|
2012-07-09 08:59:03 +00:00
|
|
|
context->set_extension(Smi::FromInt(0));
|
2012-08-17 12:59:00 +00:00
|
|
|
context->set_global_object(function->context()->global_object());
|
2011-06-09 11:26:01 +00:00
|
|
|
return context;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-29 07:41:42 +00:00
|
|
|
MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
|
|
|
|
Context* previous,
|
2011-06-16 06:37:49 +00:00
|
|
|
String* name,
|
|
|
|
Object* thrown_object) {
|
|
|
|
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
|
2011-06-09 11:26:01 +00:00
|
|
|
Object* result;
|
2011-06-16 06:37:49 +00:00
|
|
|
{ MaybeObject* maybe_result =
|
|
|
|
AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
|
2011-06-09 11:26:01 +00:00
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
|
|
|
Context* context = reinterpret_cast<Context*>(result);
|
2011-12-07 08:43:18 +00:00
|
|
|
context->set_map_no_write_barrier(catch_context_map());
|
2011-06-29 07:41:42 +00:00
|
|
|
context->set_closure(function);
|
2011-06-09 11:26:01 +00:00
|
|
|
context->set_previous(previous);
|
2011-06-16 06:37:49 +00:00
|
|
|
context->set_extension(name);
|
2012-08-17 12:59:00 +00:00
|
|
|
context->set_global_object(previous->global_object());
|
2011-06-16 06:37:49 +00:00
|
|
|
context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
|
2011-06-09 11:26:01 +00:00
|
|
|
return context;
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-29 07:41:42 +00:00
|
|
|
MaybeObject* Heap::AllocateWithContext(JSFunction* function,
|
|
|
|
Context* previous,
|
2013-07-19 14:07:23 +00:00
|
|
|
JSReceiver* extension) {
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
2011-03-18 20:35:07 +00:00
|
|
|
{ MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
Context* context = reinterpret_cast<Context*>(result);
|
2011-12-07 08:43:18 +00:00
|
|
|
context->set_map_no_write_barrier(with_context_map());
|
2011-06-29 07:41:42 +00:00
|
|
|
context->set_closure(function);
|
2008-07-03 15:10:15 +00:00
|
|
|
context->set_previous(previous);
|
|
|
|
context->set_extension(extension);
|
2012-08-17 12:59:00 +00:00
|
|
|
context->set_global_object(previous->global_object());
|
2011-06-09 11:26:01 +00:00
|
|
|
return context;
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-08-11 16:29:28 +00:00
|
|
|
MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
|
|
|
|
Context* previous,
|
2011-11-03 10:36:55 +00:00
|
|
|
ScopeInfo* scope_info) {
|
2011-08-11 16:29:28 +00:00
|
|
|
Object* result;
|
|
|
|
{ MaybeObject* maybe_result =
|
2011-11-03 10:36:55 +00:00
|
|
|
AllocateFixedArrayWithHoles(scope_info->ContextLength());
|
2011-08-11 16:29:28 +00:00
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
|
|
|
Context* context = reinterpret_cast<Context*>(result);
|
2011-12-07 08:43:18 +00:00
|
|
|
context->set_map_no_write_barrier(block_context_map());
|
2011-08-11 16:29:28 +00:00
|
|
|
context->set_closure(function);
|
|
|
|
context->set_previous(previous);
|
|
|
|
context->set_extension(scope_info);
|
2012-08-17 12:59:00 +00:00
|
|
|
context->set_global_object(previous->global_object());
|
2011-08-11 16:29:28 +00:00
|
|
|
return context;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-11-03 10:36:55 +00:00
|
|
|
MaybeObject* Heap::AllocateScopeInfo(int length) {
|
|
|
|
FixedArray* scope_info;
|
|
|
|
MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
|
|
|
|
if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
|
2011-12-07 08:43:18 +00:00
|
|
|
scope_info->set_map_no_write_barrier(scope_info_map());
|
2011-08-11 16:29:28 +00:00
|
|
|
return scope_info;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-11-13 12:27:03 +00:00
|
|
|
MaybeObject* Heap::AllocateExternal(void* value) {
|
|
|
|
Foreign* foreign;
|
|
|
|
{ MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
|
|
|
|
if (!maybe_result->To(&foreign)) return maybe_result;
|
|
|
|
}
|
|
|
|
JSObject* external;
|
|
|
|
{ MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
|
|
|
|
if (!maybe_result->To(&external)) return maybe_result;
|
|
|
|
}
|
|
|
|
external->SetInternalField(0, foreign);
|
|
|
|
return external;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-10-25 15:22:03 +00:00
|
|
|
MaybeObject* Heap::AllocateStruct(InstanceType type) {
|
2008-07-03 15:10:15 +00:00
|
|
|
Map* map;
|
|
|
|
switch (type) {
|
2011-03-18 20:35:07 +00:00
|
|
|
#define MAKE_CASE(NAME, Name, name) \
|
|
|
|
case NAME##_TYPE: map = name##_map(); break;
|
2008-07-03 15:10:15 +00:00
|
|
|
STRUCT_LIST(MAKE_CASE)
|
|
|
|
#undef MAKE_CASE
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
return Failure::InternalError();
|
|
|
|
}
|
|
|
|
int size = map->instance_size();
|
|
|
|
AllocationSpace space =
|
2012-02-23 12:11:24 +00:00
|
|
|
(size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* result;
|
2011-03-18 20:35:07 +00:00
|
|
|
{ MaybeObject* maybe_result = Allocate(map, space);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_result->ToObject(&result)) return maybe_result;
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
Struct::cast(result)->InitializeBody(size);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
bool Heap::IsHeapIterable() {
|
|
|
|
return (!old_pointer_space()->was_swept_conservatively() &&
|
|
|
|
!old_data_space()->was_swept_conservatively());
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Heap::EnsureHeapIsIterable() {
|
2013-06-03 15:32:22 +00:00
|
|
|
ASSERT(AllowHeapAllocation::IsAllowed());
|
2011-09-19 18:36:47 +00:00
|
|
|
if (!IsHeapIterable()) {
|
2012-02-03 14:16:40 +00:00
|
|
|
CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
|
2011-09-19 18:36:47 +00:00
|
|
|
}
|
|
|
|
ASSERT(IsHeapIterable());
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-03-20 13:29:49 +00:00
|
|
|
void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
|
2012-04-03 07:32:19 +00:00
|
|
|
incremental_marking()->Step(step_size,
|
|
|
|
IncrementalMarking::NO_GC_VIA_STACK_GUARD);
|
2012-03-20 13:29:49 +00:00
|
|
|
|
|
|
|
if (incremental_marking()->IsComplete()) {
|
|
|
|
bool uncommit = false;
|
|
|
|
if (gc_count_at_last_idle_gc_ == gc_count_) {
|
|
|
|
// No GC since the last full GC, the mutator is probably not active.
|
|
|
|
isolate_->compilation_cache()->Clear();
|
|
|
|
uncommit = true;
|
|
|
|
}
|
|
|
|
CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
|
2013-06-14 14:24:03 +00:00
|
|
|
mark_sweeps_since_idle_round_started_++;
|
2012-03-20 13:29:49 +00:00
|
|
|
gc_count_at_last_idle_gc_ = gc_count_;
|
|
|
|
if (uncommit) {
|
|
|
|
new_space_.Shrink();
|
|
|
|
UncommitFromSpace();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-11-30 11:13:36 +00:00
|
|
|
bool Heap::IdleNotification(int hint) {
|
2012-06-22 08:33:43 +00:00
|
|
|
// Hints greater than this value indicate that
|
|
|
|
// the embedder is requesting a lot of GC work.
|
2012-03-23 13:33:11 +00:00
|
|
|
const int kMaxHint = 1000;
|
2013-06-14 14:24:03 +00:00
|
|
|
const int kMinHintForIncrementalMarking = 10;
|
2012-06-22 08:33:43 +00:00
|
|
|
// Minimal hint that allows to do full GC.
|
|
|
|
const int kMinHintForFullGC = 100;
|
2012-04-30 14:41:12 +00:00
|
|
|
intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
|
|
|
|
// The size factor is in range [5..250]. The numbers here are chosen from
|
|
|
|
// experiments. If you changes them, make sure to test with
|
|
|
|
// chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
|
2012-09-26 11:35:42 +00:00
|
|
|
intptr_t step_size =
|
|
|
|
size_factor * IncrementalMarking::kAllocatedThreshold;
|
2012-03-20 13:29:49 +00:00
|
|
|
|
|
|
|
if (contexts_disposed_ > 0) {
|
2012-03-23 13:33:11 +00:00
|
|
|
if (hint >= kMaxHint) {
|
|
|
|
// The embedder is requesting a lot of GC work after context disposal,
|
|
|
|
// we age inline caches so that they don't keep objects from
|
|
|
|
// the old context alive.
|
|
|
|
AgeInlineCaches();
|
|
|
|
}
|
2012-03-20 13:29:49 +00:00
|
|
|
int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
|
2012-11-30 09:42:20 +00:00
|
|
|
if (hint >= mark_sweep_time && !FLAG_expose_gc &&
|
|
|
|
incremental_marking()->IsStopped()) {
|
2012-03-20 13:29:49 +00:00
|
|
|
HistogramTimerScope scope(isolate_->counters()->gc_context());
|
|
|
|
CollectAllGarbage(kReduceMemoryFootprintMask,
|
|
|
|
"idle notification: contexts disposed");
|
|
|
|
} else {
|
|
|
|
AdvanceIdleIncrementalMarking(step_size);
|
|
|
|
contexts_disposed_ = 0;
|
|
|
|
}
|
2012-04-30 14:41:12 +00:00
|
|
|
// After context disposal there is likely a lot of garbage remaining, reset
|
|
|
|
// the idle notification counters in order to trigger more incremental GCs
|
|
|
|
// on subsequent idle notifications.
|
|
|
|
StartIdleRound();
|
2012-03-20 13:29:49 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-11-30 09:42:20 +00:00
|
|
|
if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
|
2012-03-20 13:29:49 +00:00
|
|
|
return IdleGlobalGC();
|
2011-11-30 11:13:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// By doing small chunks of GC work in each IdleNotification,
|
|
|
|
// perform a round of incremental GCs and after that wait until
|
|
|
|
// the mutator creates enough garbage to justify a new round.
|
|
|
|
// An incremental GC progresses as follows:
|
|
|
|
// 1. many incremental marking steps,
|
|
|
|
// 2. one old space mark-sweep-compact,
|
|
|
|
// 3. many lazy sweep steps.
|
|
|
|
// Use mark-sweep-compact events to count incremental GCs in a round.
|
|
|
|
|
|
|
|
if (incremental_marking()->IsStopped()) {
|
2013-01-30 12:19:32 +00:00
|
|
|
if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
|
|
|
|
!IsSweepingComplete() &&
|
2011-11-30 16:55:55 +00:00
|
|
|
!AdvanceSweepers(static_cast<int>(step_size))) {
|
2011-11-30 11:13:36 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
|
|
|
|
if (EnoughGarbageSinceLastIdleRound()) {
|
|
|
|
StartIdleRound();
|
|
|
|
} else {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-22 08:33:43 +00:00
|
|
|
int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
|
|
|
|
mark_sweeps_since_idle_round_started_;
|
|
|
|
|
2011-11-30 11:13:36 +00:00
|
|
|
if (incremental_marking()->IsStopped()) {
|
2012-06-22 08:33:43 +00:00
|
|
|
// If there are no more than two GCs left in this idle round and we are
|
|
|
|
// allowed to do a full GC, then make those GCs full in order to compact
|
|
|
|
// the code space.
|
|
|
|
// TODO(ulan): Once we enable code compaction for incremental marking,
|
|
|
|
// we can get rid of this special case and always start incremental marking.
|
|
|
|
if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
|
|
|
|
CollectAllGarbage(kReduceMemoryFootprintMask,
|
|
|
|
"idle notification: finalize idle round");
|
2013-06-14 14:24:03 +00:00
|
|
|
mark_sweeps_since_idle_round_started_++;
|
|
|
|
} else if (hint > kMinHintForIncrementalMarking) {
|
2012-06-22 08:33:43 +00:00
|
|
|
incremental_marking()->Start();
|
|
|
|
}
|
|
|
|
}
|
2013-06-14 14:24:03 +00:00
|
|
|
if (!incremental_marking()->IsStopped() &&
|
|
|
|
hint > kMinHintForIncrementalMarking) {
|
2012-06-22 08:33:43 +00:00
|
|
|
AdvanceIdleIncrementalMarking(step_size);
|
2011-11-30 11:13:36 +00:00
|
|
|
}
|
2013-06-14 14:24:03 +00:00
|
|
|
|
|
|
|
if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
|
|
|
|
FinishIdleRound();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2011-11-30 11:13:36 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool Heap::IdleGlobalGC() {
|
2009-09-09 11:21:54 +00:00
|
|
|
static const int kIdlesBeforeScavenge = 4;
|
|
|
|
static const int kIdlesBeforeMarkSweep = 7;
|
|
|
|
static const int kIdlesBeforeMarkCompact = 8;
|
2010-12-13 12:14:30 +00:00
|
|
|
static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
|
2011-02-21 16:11:46 +00:00
|
|
|
static const unsigned int kGCsBetweenCleanup = 4;
|
2011-03-18 20:35:07 +00:00
|
|
|
|
|
|
|
if (!last_idle_notification_gc_count_init_) {
|
|
|
|
last_idle_notification_gc_count_ = gc_count_;
|
|
|
|
last_idle_notification_gc_count_init_ = true;
|
|
|
|
}
|
2009-08-26 08:13:27 +00:00
|
|
|
|
2010-03-09 09:41:58 +00:00
|
|
|
bool uncommit = true;
|
2009-08-26 08:13:27 +00:00
|
|
|
bool finished = false;
|
|
|
|
|
2010-12-13 12:14:30 +00:00
|
|
|
// Reset the number of idle notifications received when a number of
|
|
|
|
// GCs have taken place. This allows another round of cleanup based
|
|
|
|
// on idle notifications if enough work has been carried out to
|
|
|
|
// provoke a number of garbage collections.
|
2011-03-18 20:35:07 +00:00
|
|
|
if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
|
|
|
|
number_idle_notifications_ =
|
|
|
|
Min(number_idle_notifications_ + 1, kMaxIdleCount);
|
2009-08-26 08:13:27 +00:00
|
|
|
} else {
|
2011-03-18 20:35:07 +00:00
|
|
|
number_idle_notifications_ = 0;
|
|
|
|
last_idle_notification_gc_count_ = gc_count_;
|
2009-08-26 08:13:27 +00:00
|
|
|
}
|
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
if (number_idle_notifications_ == kIdlesBeforeScavenge) {
|
2012-03-20 13:29:49 +00:00
|
|
|
CollectGarbage(NEW_SPACE, "idle notification");
|
2009-09-09 11:21:54 +00:00
|
|
|
new_space_.Shrink();
|
2011-03-18 20:35:07 +00:00
|
|
|
last_idle_notification_gc_count_ = gc_count_;
|
|
|
|
} else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
|
2009-11-30 07:57:32 +00:00
|
|
|
// Before doing the mark-sweep collections we clear the
|
|
|
|
// compilation cache to avoid hanging on to source code and
|
|
|
|
// generated code for cached functions.
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->compilation_cache()->Clear();
|
2009-11-30 07:57:32 +00:00
|
|
|
|
2012-02-03 14:16:40 +00:00
|
|
|
CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
|
2009-09-09 11:21:54 +00:00
|
|
|
new_space_.Shrink();
|
2011-03-18 20:35:07 +00:00
|
|
|
last_idle_notification_gc_count_ = gc_count_;
|
2009-09-09 11:21:54 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
} else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
|
2012-02-03 14:16:40 +00:00
|
|
|
CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
|
2009-09-09 11:21:54 +00:00
|
|
|
new_space_.Shrink();
|
2011-03-18 20:35:07 +00:00
|
|
|
last_idle_notification_gc_count_ = gc_count_;
|
|
|
|
number_idle_notifications_ = 0;
|
2009-09-09 11:21:54 +00:00
|
|
|
finished = true;
|
2011-03-18 20:35:07 +00:00
|
|
|
} else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
|
2010-12-13 12:14:30 +00:00
|
|
|
// If we have received more than kIdlesBeforeMarkCompact idle
|
|
|
|
// notifications we do not perform any cleanup because we don't
|
|
|
|
// expect to gain much by doing so.
|
|
|
|
finished = true;
|
2009-08-26 08:13:27 +00:00
|
|
|
}
|
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
if (uncommit) UncommitFromSpace();
|
2011-09-28 12:55:34 +00:00
|
|
|
|
2009-08-26 08:13:27 +00:00
|
|
|
return finished;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
#ifdef DEBUG
|
|
|
|
|
|
|
|
void Heap::Print() {
|
2012-01-13 13:09:52 +00:00
|
|
|
if (!HasBeenSetUp()) return;
|
2013-05-21 09:25:57 +00:00
|
|
|
isolate()->PrintStack(stdout);
|
2013-02-11 13:02:20 +00:00
|
|
|
AllSpaces spaces(this);
|
|
|
|
for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
|
2010-01-25 22:53:18 +00:00
|
|
|
space->Print();
|
2013-02-11 13:02:20 +00:00
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Heap::ReportCodeStatistics(const char* title) {
|
|
|
|
PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
|
|
|
|
PagedSpace::ResetCodeStatistics();
|
|
|
|
// We do not look for code in new space, map space, or old space. If code
|
|
|
|
// somehow ends up in those spaces, we would miss it here.
|
|
|
|
code_space_->CollectCodeStatistics();
|
|
|
|
lo_space_->CollectCodeStatistics();
|
|
|
|
PagedSpace::ReportCodeStatistics();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// This function expects that NewSpace's allocated objects histogram is
|
|
|
|
// populated (via a call to CollectStatistics or else as a side effect of a
|
|
|
|
// just-completed scavenge collection).
|
|
|
|
void Heap::ReportHeapStatistics(const char* title) {
|
|
|
|
USE(title);
|
|
|
|
PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
|
|
|
|
title, gc_count_);
|
2013-05-23 15:11:43 +00:00
|
|
|
PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
|
|
|
|
old_generation_allocation_limit_);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
PrintF("\n");
|
2013-02-25 14:46:09 +00:00
|
|
|
PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->global_handles()->PrintStats();
|
2008-07-03 15:10:15 +00:00
|
|
|
PrintF("\n");
|
|
|
|
|
|
|
|
PrintF("Heap statistics : ");
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->memory_allocator()->ReportStatistics();
|
2008-07-03 15:10:15 +00:00
|
|
|
PrintF("To space : ");
|
2008-10-17 09:13:27 +00:00
|
|
|
new_space_.ReportStatistics();
|
2008-09-05 12:34:09 +00:00
|
|
|
PrintF("Old pointer space : ");
|
|
|
|
old_pointer_space_->ReportStatistics();
|
|
|
|
PrintF("Old data space : ");
|
|
|
|
old_data_space_->ReportStatistics();
|
2008-07-03 15:10:15 +00:00
|
|
|
PrintF("Code space : ");
|
|
|
|
code_space_->ReportStatistics();
|
|
|
|
PrintF("Map space : ");
|
|
|
|
map_space_->ReportStatistics();
|
2009-07-09 11:13:08 +00:00
|
|
|
PrintF("Cell space : ");
|
|
|
|
cell_space_->ReportStatistics();
|
2013-06-14 16:06:12 +00:00
|
|
|
PrintF("PropertyCell space : ");
|
2013-06-12 15:03:44 +00:00
|
|
|
property_cell_space_->ReportStatistics();
|
2008-07-03 15:10:15 +00:00
|
|
|
PrintF("Large object space : ");
|
|
|
|
lo_space_->ReportStatistics();
|
|
|
|
PrintF(">>>>>> ========================================= >>>>>>\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif // DEBUG
|
|
|
|
|
|
|
|
bool Heap::Contains(HeapObject* value) {
|
|
|
|
return Contains(value->address());
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool Heap::Contains(Address addr) {
|
|
|
|
if (OS::IsOutsideAllocatedSpace(addr)) return false;
|
2012-01-13 13:09:52 +00:00
|
|
|
return HasBeenSetUp() &&
|
2008-10-17 09:13:27 +00:00
|
|
|
(new_space_.ToSpaceContains(addr) ||
|
2008-09-05 12:34:09 +00:00
|
|
|
old_pointer_space_->Contains(addr) ||
|
|
|
|
old_data_space_->Contains(addr) ||
|
2008-07-03 15:10:15 +00:00
|
|
|
code_space_->Contains(addr) ||
|
|
|
|
map_space_->Contains(addr) ||
|
2009-07-09 11:13:08 +00:00
|
|
|
cell_space_->Contains(addr) ||
|
2013-06-12 15:03:44 +00:00
|
|
|
property_cell_space_->Contains(addr) ||
|
2008-07-03 15:10:15 +00:00
|
|
|
lo_space_->SlowContains(addr));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
|
|
|
|
return InSpace(value->address(), space);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool Heap::InSpace(Address addr, AllocationSpace space) {
|
|
|
|
if (OS::IsOutsideAllocatedSpace(addr)) return false;
|
2012-01-13 13:09:52 +00:00
|
|
|
if (!HasBeenSetUp()) return false;
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
switch (space) {
|
|
|
|
case NEW_SPACE:
|
2008-10-17 09:13:27 +00:00
|
|
|
return new_space_.ToSpaceContains(addr);
|
2008-09-05 12:34:09 +00:00
|
|
|
case OLD_POINTER_SPACE:
|
|
|
|
return old_pointer_space_->Contains(addr);
|
|
|
|
case OLD_DATA_SPACE:
|
|
|
|
return old_data_space_->Contains(addr);
|
2008-07-03 15:10:15 +00:00
|
|
|
case CODE_SPACE:
|
|
|
|
return code_space_->Contains(addr);
|
|
|
|
case MAP_SPACE:
|
|
|
|
return map_space_->Contains(addr);
|
2009-07-09 11:13:08 +00:00
|
|
|
case CELL_SPACE:
|
|
|
|
return cell_space_->Contains(addr);
|
2013-06-12 15:03:44 +00:00
|
|
|
case PROPERTY_CELL_SPACE:
|
|
|
|
return property_cell_space_->Contains(addr);
|
2008-07-03 15:10:15 +00:00
|
|
|
case LO_SPACE:
|
|
|
|
return lo_space_->SlowContains(addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-10-12 11:41:14 +00:00
|
|
|
#ifdef VERIFY_HEAP
|
2008-07-03 15:10:15 +00:00
|
|
|
void Heap::Verify() {
|
2012-10-12 11:41:14 +00:00
|
|
|
CHECK(HasBeenSetUp());
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
store_buffer()->Verify();
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
VerifyPointersVisitor visitor;
|
2009-11-05 15:12:36 +00:00
|
|
|
IterateRoots(&visitor, VISIT_ONLY_STRONG);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2009-07-09 14:34:08 +00:00
|
|
|
new_space_.Verify();
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
old_pointer_space_->Verify(&visitor);
|
|
|
|
map_space_->Verify(&visitor);
|
2010-05-27 12:30:45 +00:00
|
|
|
|
|
|
|
VerifyPointersVisitor no_dirty_regions_visitor;
|
|
|
|
old_data_space_->Verify(&no_dirty_regions_visitor);
|
|
|
|
code_space_->Verify(&no_dirty_regions_visitor);
|
|
|
|
cell_space_->Verify(&no_dirty_regions_visitor);
|
2013-06-12 15:03:44 +00:00
|
|
|
property_cell_space_->Verify(&no_dirty_regions_visitor);
|
2009-07-09 14:34:08 +00:00
|
|
|
|
|
|
|
lo_space_->Verify();
|
2012-02-20 08:42:18 +00:00
|
|
|
}
|
2012-10-12 11:41:14 +00:00
|
|
|
#endif
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
|
2013-02-28 17:03:34 +00:00
|
|
|
MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
|
|
|
|
Object* result = NULL;
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* new_table;
|
|
|
|
{ MaybeObject* maybe_new_table =
|
2013-02-28 17:03:34 +00:00
|
|
|
string_table()->LookupUtf8String(string, &result);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
|
|
|
|
}
|
2013-02-28 17:03:34 +00:00
|
|
|
// Can't use set_string_table because StringTable::cast knows that
|
|
|
|
// StringTable is a singleton and checks for identity.
|
|
|
|
roots_[kStringTableRootIndex] = new_table;
|
|
|
|
ASSERT(result != NULL);
|
|
|
|
return result;
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-02-28 17:03:34 +00:00
|
|
|
MaybeObject* Heap::InternalizeOneByteString(Vector<const uint8_t> string) {
|
|
|
|
Object* result = NULL;
|
2010-12-22 20:14:19 +00:00
|
|
|
Object* new_table;
|
|
|
|
{ MaybeObject* maybe_new_table =
|
2013-02-28 17:03:34 +00:00
|
|
|
string_table()->LookupOneByteString(string, &result);
|
2010-12-22 20:14:19 +00:00
|
|
|
if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
|
|
|
|
}
|
2013-02-28 17:03:34 +00:00
|
|
|
// Can't use set_string_table because StringTable::cast knows that
|
|
|
|
// StringTable is a singleton and checks for identity.
|
|
|
|
roots_[kStringTableRootIndex] = new_table;
|
|
|
|
ASSERT(result != NULL);
|
|
|
|
return result;
|
2010-12-22 20:14:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-02-28 17:03:34 +00:00
|
|
|
MaybeObject* Heap::InternalizeOneByteString(Handle<SeqOneByteString> string,
|
2011-05-24 12:16:23 +00:00
|
|
|
int from,
|
|
|
|
int length) {
|
2013-02-28 17:03:34 +00:00
|
|
|
Object* result = NULL;
|
2011-05-24 12:16:23 +00:00
|
|
|
Object* new_table;
|
|
|
|
{ MaybeObject* maybe_new_table =
|
2013-02-28 17:03:34 +00:00
|
|
|
string_table()->LookupSubStringOneByteString(string,
|
2011-05-24 12:16:23 +00:00
|
|
|
from,
|
|
|
|
length,
|
2013-02-28 17:03:34 +00:00
|
|
|
&result);
|
2011-05-24 12:16:23 +00:00
|
|
|
if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
|
|
|
|
}
|
2013-02-28 17:03:34 +00:00
|
|
|
// Can't use set_string_table because StringTable::cast knows that
|
|
|
|
// StringTable is a singleton and checks for identity.
|
|
|
|
roots_[kStringTableRootIndex] = new_table;
|
|
|
|
ASSERT(result != NULL);
|
|
|
|
return result;
|
2011-05-24 12:16:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-02-28 17:03:34 +00:00
|
|
|
MaybeObject* Heap::InternalizeTwoByteString(Vector<const uc16> string) {
|
|
|
|
Object* result = NULL;
|
2010-12-22 20:14:19 +00:00
|
|
|
Object* new_table;
|
|
|
|
{ MaybeObject* maybe_new_table =
|
2013-02-28 17:03:34 +00:00
|
|
|
string_table()->LookupTwoByteString(string, &result);
|
2010-12-22 20:14:19 +00:00
|
|
|
if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
|
|
|
|
}
|
2013-02-28 17:03:34 +00:00
|
|
|
// Can't use set_string_table because StringTable::cast knows that
|
|
|
|
// StringTable is a singleton and checks for identity.
|
|
|
|
roots_[kStringTableRootIndex] = new_table;
|
|
|
|
ASSERT(result != NULL);
|
|
|
|
return result;
|
2010-12-22 20:14:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-02-28 17:03:34 +00:00
|
|
|
MaybeObject* Heap::InternalizeString(String* string) {
|
|
|
|
if (string->IsInternalizedString()) return string;
|
|
|
|
Object* result = NULL;
|
2010-10-25 15:22:03 +00:00
|
|
|
Object* new_table;
|
|
|
|
{ MaybeObject* maybe_new_table =
|
2013-02-28 17:03:34 +00:00
|
|
|
string_table()->LookupString(string, &result);
|
2010-10-25 15:22:03 +00:00
|
|
|
if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
|
|
|
|
}
|
2013-02-28 17:03:34 +00:00
|
|
|
// Can't use set_string_table because StringTable::cast knows that
|
|
|
|
// StringTable is a singleton and checks for identity.
|
|
|
|
roots_[kStringTableRootIndex] = new_table;
|
|
|
|
ASSERT(result != NULL);
|
|
|
|
return result;
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-02-28 17:03:34 +00:00
|
|
|
bool Heap::InternalizeStringIfExists(String* string, String** result) {
|
|
|
|
if (string->IsInternalizedString()) {
|
|
|
|
*result = string;
|
2008-10-07 10:10:03 +00:00
|
|
|
return true;
|
|
|
|
}
|
2013-02-28 17:03:34 +00:00
|
|
|
return string_table()->LookupStringIfExists(string, result);
|
2008-10-07 10:10:03 +00:00
|
|
|
}
|
|
|
|
|
2012-10-26 09:44:34 +00:00
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
void Heap::ZapFromSpace() {
|
2011-09-19 18:36:47 +00:00
|
|
|
NewSpacePageIterator it(new_space_.FromSpaceStart(),
|
|
|
|
new_space_.FromSpaceEnd());
|
|
|
|
while (it.has_next()) {
|
|
|
|
NewSpacePage* page = it.next();
|
2012-02-23 12:11:24 +00:00
|
|
|
for (Address cursor = page->area_start(), limit = page->area_end();
|
2011-09-19 18:36:47 +00:00
|
|
|
cursor < limit;
|
|
|
|
cursor += kPointerSize) {
|
|
|
|
Memory::Address_at(cursor) = kFromSpaceZapValue;
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
void Heap::IterateAndMarkPointersToFromSpace(Address start,
|
|
|
|
Address end,
|
|
|
|
ObjectSlotCallback callback) {
|
2010-05-27 12:30:45 +00:00
|
|
|
Address slot_address = start;
|
2011-09-19 18:36:47 +00:00
|
|
|
|
|
|
|
// We are not collecting slots on new space objects during mutation
|
|
|
|
// thus we have to scan for pointers to evacuation candidates when we
|
|
|
|
// promote objects. But we should not record any slots in non-black
|
|
|
|
// objects. Grey object's slots would be rescanned.
|
|
|
|
// White object might not survive until the end of collection
|
|
|
|
// it would be a violation of the invariant to record it's slots.
|
|
|
|
bool record_slots = false;
|
|
|
|
if (incremental_marking()->IsCompacting()) {
|
|
|
|
MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
|
|
|
|
record_slots = Marking::IsBlack(mark_bit);
|
|
|
|
}
|
2010-05-27 12:30:45 +00:00
|
|
|
|
|
|
|
while (slot_address < end) {
|
|
|
|
Object** slot = reinterpret_cast<Object**>(slot_address);
|
2011-09-19 18:36:47 +00:00
|
|
|
Object* object = *slot;
|
|
|
|
// If the store buffer becomes overfull we mark pages as being exempt from
|
|
|
|
// the store buffer. These pages are scanned to find pointers that point
|
|
|
|
// to the new space. In that case we may hit newly promoted objects and
|
|
|
|
// fix the pointers before the promotion queue gets to them. Thus the 'if'.
|
|
|
|
if (object->IsHeapObject()) {
|
|
|
|
if (Heap::InFromSpace(object)) {
|
|
|
|
callback(reinterpret_cast<HeapObject**>(slot),
|
|
|
|
HeapObject::cast(object));
|
|
|
|
Object* new_object = *slot;
|
|
|
|
if (InNewSpace(new_object)) {
|
2011-10-13 11:50:00 +00:00
|
|
|
SLOW_ASSERT(Heap::InToSpace(new_object));
|
|
|
|
SLOW_ASSERT(new_object->IsHeapObject());
|
2011-09-19 18:36:47 +00:00
|
|
|
store_buffer_.EnterDirectlyIntoStoreBuffer(
|
|
|
|
reinterpret_cast<Address>(slot));
|
|
|
|
}
|
2011-10-13 11:50:00 +00:00
|
|
|
SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
|
2011-09-19 18:36:47 +00:00
|
|
|
} else if (record_slots &&
|
|
|
|
MarkCompactCollector::IsOnEvacuationCandidate(object)) {
|
|
|
|
mark_compact_collector()->RecordSlot(slot, slot, object);
|
2010-05-25 13:15:16 +00:00
|
|
|
}
|
2010-05-27 12:30:45 +00:00
|
|
|
}
|
|
|
|
slot_address += kPointerSize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
#ifdef DEBUG
|
|
|
|
typedef bool (*CheckStoreBufferFilter)(Object** addr);
|
2010-05-27 12:30:45 +00:00
|
|
|
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
bool IsAMapPointerAddress(Object** addr) {
|
|
|
|
uintptr_t a = reinterpret_cast<uintptr_t>(addr);
|
|
|
|
int mod = a % Map::kSize;
|
|
|
|
return mod >= Map::kPointerFieldsBeginOffset &&
|
|
|
|
mod < Map::kPointerFieldsEndOffset;
|
2010-05-27 12:30:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
bool EverythingsAPointer(Object** addr) {
|
|
|
|
return true;
|
|
|
|
}
|
2010-05-27 12:30:45 +00:00
|
|
|
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
static void CheckStoreBuffer(Heap* heap,
|
|
|
|
Object** current,
|
|
|
|
Object** limit,
|
|
|
|
Object**** store_buffer_position,
|
|
|
|
Object*** store_buffer_top,
|
|
|
|
CheckStoreBufferFilter filter,
|
|
|
|
Address special_garbage_start,
|
|
|
|
Address special_garbage_end) {
|
|
|
|
Map* free_space_map = heap->free_space_map();
|
|
|
|
for ( ; current < limit; current++) {
|
|
|
|
Object* o = *current;
|
|
|
|
Address current_address = reinterpret_cast<Address>(current);
|
|
|
|
// Skip free space.
|
|
|
|
if (o == free_space_map) {
|
|
|
|
Address current_address = reinterpret_cast<Address>(current);
|
|
|
|
FreeSpace* free_space =
|
|
|
|
FreeSpace::cast(HeapObject::FromAddress(current_address));
|
|
|
|
int skip = free_space->Size();
|
|
|
|
ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
|
|
|
|
ASSERT(skip > 0);
|
|
|
|
current_address += skip - kPointerSize;
|
|
|
|
current = reinterpret_cast<Object**>(current_address);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// Skip the current linear allocation space between top and limit which is
|
|
|
|
// unmarked with the free space map, but can contain junk.
|
|
|
|
if (current_address == special_garbage_start &&
|
|
|
|
special_garbage_end != special_garbage_start) {
|
|
|
|
current_address = special_garbage_end - kPointerSize;
|
|
|
|
current = reinterpret_cast<Object**>(current_address);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!(*filter)(current)) continue;
|
|
|
|
ASSERT(current_address < special_garbage_start ||
|
|
|
|
current_address >= special_garbage_end);
|
|
|
|
ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
|
|
|
|
// We have to check that the pointer does not point into new space
|
|
|
|
// without trying to cast it to a heap object since the hash field of
|
|
|
|
// a string can contain values like 1 and 3 which are tagged null
|
|
|
|
// pointers.
|
|
|
|
if (!heap->InNewSpace(o)) continue;
|
|
|
|
while (**store_buffer_position < current &&
|
|
|
|
*store_buffer_position < store_buffer_top) {
|
|
|
|
(*store_buffer_position)++;
|
|
|
|
}
|
|
|
|
if (**store_buffer_position != current ||
|
|
|
|
*store_buffer_position == store_buffer_top) {
|
|
|
|
Object** obj_start = current;
|
|
|
|
while (!(*obj_start)->IsMap()) obj_start--;
|
|
|
|
UNREACHABLE();
|
2010-05-27 12:30:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
// Check that the store buffer contains all intergenerational pointers by
|
|
|
|
// scanning a page and ensuring that all pointers to young space are in the
|
|
|
|
// store buffer.
|
|
|
|
void Heap::OldPointerSpaceCheckStoreBuffer() {
|
|
|
|
OldSpace* space = old_pointer_space();
|
|
|
|
PageIterator pages(space);
|
2010-05-27 12:30:45 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
store_buffer()->SortUniq();
|
2010-05-27 12:30:45 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
while (pages.has_next()) {
|
|
|
|
Page* page = pages.next();
|
2012-02-23 12:11:24 +00:00
|
|
|
Object** current = reinterpret_cast<Object**>(page->area_start());
|
2010-05-27 12:30:45 +00:00
|
|
|
|
2012-02-23 12:11:24 +00:00
|
|
|
Address end = page->area_end();
|
2010-05-27 12:30:45 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
Object*** store_buffer_position = store_buffer()->Start();
|
|
|
|
Object*** store_buffer_top = store_buffer()->Top();
|
2010-05-27 12:30:45 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
Object** limit = reinterpret_cast<Object**>(end);
|
|
|
|
CheckStoreBuffer(this,
|
|
|
|
current,
|
|
|
|
limit,
|
|
|
|
&store_buffer_position,
|
|
|
|
store_buffer_top,
|
|
|
|
&EverythingsAPointer,
|
|
|
|
space->top(),
|
|
|
|
space->limit());
|
2010-05-27 12:30:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
void Heap::MapSpaceCheckStoreBuffer() {
|
|
|
|
MapSpace* space = map_space();
|
|
|
|
PageIterator pages(space);
|
2010-05-27 12:30:45 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
store_buffer()->SortUniq();
|
2010-05-27 12:30:45 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
while (pages.has_next()) {
|
|
|
|
Page* page = pages.next();
|
2012-02-23 12:11:24 +00:00
|
|
|
Object** current = reinterpret_cast<Object**>(page->area_start());
|
2010-05-27 12:30:45 +00:00
|
|
|
|
2012-02-23 12:11:24 +00:00
|
|
|
Address end = page->area_end();
|
2010-05-27 12:30:45 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
Object*** store_buffer_position = store_buffer()->Start();
|
|
|
|
Object*** store_buffer_top = store_buffer()->Top();
|
2010-05-27 12:30:45 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
Object** limit = reinterpret_cast<Object**>(end);
|
|
|
|
CheckStoreBuffer(this,
|
|
|
|
current,
|
|
|
|
limit,
|
|
|
|
&store_buffer_position,
|
|
|
|
store_buffer_top,
|
|
|
|
&IsAMapPointerAddress,
|
|
|
|
space->top(),
|
|
|
|
space->limit());
|
2010-05-27 12:30:45 +00:00
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
void Heap::LargeObjectSpaceCheckStoreBuffer() {
|
|
|
|
LargeObjectIterator it(lo_space());
|
|
|
|
for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
|
|
|
|
// We only have code, sequential strings, or fixed arrays in large
|
|
|
|
// object space, and only fixed arrays can possibly contain pointers to
|
|
|
|
// the young generation.
|
|
|
|
if (object->IsFixedArray()) {
|
|
|
|
Object*** store_buffer_position = store_buffer()->Start();
|
|
|
|
Object*** store_buffer_top = store_buffer()->Top();
|
|
|
|
Object** current = reinterpret_cast<Object**>(object->address());
|
|
|
|
Object** limit =
|
|
|
|
reinterpret_cast<Object**>(object->address() + object->Size());
|
|
|
|
CheckStoreBuffer(this,
|
|
|
|
current,
|
|
|
|
limit,
|
|
|
|
&store_buffer_position,
|
|
|
|
store_buffer_top,
|
|
|
|
&EverythingsAPointer,
|
|
|
|
NULL,
|
|
|
|
NULL);
|
2009-05-25 16:33:00 +00:00
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
}
|
2011-09-19 18:36:47 +00:00
|
|
|
#endif
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
|
2009-11-05 15:12:36 +00:00
|
|
|
void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
|
|
|
|
IterateStrongRoots(v, mode);
|
2010-01-27 08:25:48 +00:00
|
|
|
IterateWeakRoots(v, mode);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
|
2013-02-28 17:03:34 +00:00
|
|
|
v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
|
|
|
|
v->Synchronize(VisitorSynchronization::kStringTable);
|
2011-05-17 12:18:19 +00:00
|
|
|
if (mode != VISIT_ALL_IN_SCAVENGE &&
|
|
|
|
mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
|
2009-12-09 14:32:45 +00:00
|
|
|
// Scavenge collections have special processing for this.
|
2011-03-18 20:35:07 +00:00
|
|
|
external_string_table_.Iterate(v);
|
2013-01-14 13:19:27 +00:00
|
|
|
error_object_list_.Iterate(v);
|
2009-12-09 14:32:45 +00:00
|
|
|
}
|
2011-12-06 17:41:47 +00:00
|
|
|
v->Synchronize(VisitorSynchronization::kExternalStringsTable);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-11-05 15:12:36 +00:00
|
|
|
void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
|
2009-07-08 19:12:58 +00:00
|
|
|
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
|
2011-12-06 17:41:47 +00:00
|
|
|
v->Synchronize(VisitorSynchronization::kStrongRootList);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2013-02-28 17:03:34 +00:00
|
|
|
v->VisitPointer(BitCast<Object**>(&hidden_string_));
|
|
|
|
v->Synchronize(VisitorSynchronization::kInternalizedString);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->bootstrapper()->Iterate(v);
|
2011-12-06 17:41:47 +00:00
|
|
|
v->Synchronize(VisitorSynchronization::kBootstrapper);
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->Iterate(v);
|
2011-12-06 17:41:47 +00:00
|
|
|
v->Synchronize(VisitorSynchronization::kTop);
|
2009-09-30 12:25:46 +00:00
|
|
|
Relocatable::Iterate(v);
|
2011-12-06 17:41:47 +00:00
|
|
|
v->Synchronize(VisitorSynchronization::kRelocatable);
|
2009-04-20 16:36:13 +00:00
|
|
|
|
|
|
|
#ifdef ENABLE_DEBUGGER_SUPPORT
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->debug()->Iterate(v);
|
2011-06-29 13:02:00 +00:00
|
|
|
if (isolate_->deoptimizer_data() != NULL) {
|
|
|
|
isolate_->deoptimizer_data()->Iterate(v);
|
|
|
|
}
|
2009-04-20 16:36:13 +00:00
|
|
|
#endif
|
2011-12-06 17:41:47 +00:00
|
|
|
v->Synchronize(VisitorSynchronization::kDebug);
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->compilation_cache()->Iterate(v);
|
2011-12-06 17:41:47 +00:00
|
|
|
v->Synchronize(VisitorSynchronization::kCompilationCache);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
// Iterate over local handles in handle scopes.
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->handle_scope_implementer()->Iterate(v);
|
2012-07-18 14:15:02 +00:00
|
|
|
isolate_->IterateDeferredHandles(v);
|
2011-12-06 17:41:47 +00:00
|
|
|
v->Synchronize(VisitorSynchronization::kHandleScope);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2009-12-09 14:32:45 +00:00
|
|
|
// Iterate over the builtin code objects and code stubs in the
|
|
|
|
// heap. Note that it is not necessary to iterate over code objects
|
|
|
|
// on scavenge collections.
|
2011-09-19 18:36:47 +00:00
|
|
|
if (mode != VISIT_ALL_IN_SCAVENGE) {
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->builtins()->IterateBuiltins(v);
|
2009-12-09 14:32:45 +00:00
|
|
|
}
|
2011-12-06 17:41:47 +00:00
|
|
|
v->Synchronize(VisitorSynchronization::kBuiltins);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
// Iterate over global handles.
|
2011-05-17 12:18:19 +00:00
|
|
|
switch (mode) {
|
|
|
|
case VISIT_ONLY_STRONG:
|
|
|
|
isolate_->global_handles()->IterateStrongRoots(v);
|
|
|
|
break;
|
|
|
|
case VISIT_ALL_IN_SCAVENGE:
|
2011-06-06 15:23:04 +00:00
|
|
|
isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
|
2011-05-17 12:18:19 +00:00
|
|
|
break;
|
|
|
|
case VISIT_ALL_IN_SWEEP_NEWSPACE:
|
|
|
|
case VISIT_ALL:
|
|
|
|
isolate_->global_handles()->IterateAllRoots(v);
|
|
|
|
break;
|
2009-11-05 15:12:36 +00:00
|
|
|
}
|
2011-12-06 17:41:47 +00:00
|
|
|
v->Synchronize(VisitorSynchronization::kGlobalHandles);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
// Iterate over pointers being held by inactive threads.
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->thread_manager()->Iterate(v);
|
2011-12-06 17:41:47 +00:00
|
|
|
v->Synchronize(VisitorSynchronization::kThreadManager);
|
2010-01-27 08:25:48 +00:00
|
|
|
|
|
|
|
// Iterate over the pointers the Serialization/Deserialization code is
|
|
|
|
// holding.
|
|
|
|
// During garbage collection this keeps the partial snapshot cache alive.
|
|
|
|
// During deserialization of the startup snapshot this creates the partial
|
|
|
|
// snapshot cache and deserializes the objects it refers to. During
|
|
|
|
// serialization this does nothing, since the partial snapshot cache is
|
|
|
|
// empty. However the next thing we do is create the partial snapshot,
|
|
|
|
// filling up the partial snapshot cache with objects it needs as we go.
|
|
|
|
SerializerDeserializer::Iterate(v);
|
|
|
|
// We don't do a v->Synchronize call here, because in debug mode that will
|
|
|
|
// output a flag to the snapshot. However at this point the serializer and
|
|
|
|
// deserializer are deliberately a little unsynchronized (see above) so the
|
|
|
|
// checking of the sync flag in the snapshot would fail.
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// TODO(1236194): Since the heap size is configurable on the command line
|
|
|
|
// and through the API, we should gracefully handle the case that the heap
|
|
|
|
// size is not big enough to fit all the initial objects.
|
2010-11-10 08:38:42 +00:00
|
|
|
bool Heap::ConfigureHeap(int max_semispace_size,
|
2011-09-19 18:36:47 +00:00
|
|
|
intptr_t max_old_gen_size,
|
|
|
|
intptr_t max_executable_size) {
|
2012-01-13 13:09:52 +00:00
|
|
|
if (HasBeenSetUp()) return false;
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2012-04-25 11:35:32 +00:00
|
|
|
if (FLAG_stress_compaction) {
|
|
|
|
// This will cause more frequent GCs when stressing.
|
|
|
|
max_semispace_size_ = Page::kPageSize;
|
|
|
|
}
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
if (max_semispace_size > 0) {
|
|
|
|
if (max_semispace_size < Page::kPageSize) {
|
|
|
|
max_semispace_size = Page::kPageSize;
|
|
|
|
if (FLAG_trace_gc) {
|
2012-07-10 12:52:36 +00:00
|
|
|
PrintPID("Max semispace size cannot be less than %dkbytes\n",
|
|
|
|
Page::kPageSize >> 10);
|
2011-09-19 18:36:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
max_semispace_size_ = max_semispace_size;
|
|
|
|
}
|
2009-10-21 15:03:34 +00:00
|
|
|
|
|
|
|
if (Snapshot::IsEnabled()) {
|
|
|
|
// If we are using a snapshot we always reserve the default amount
|
|
|
|
// of memory for each semispace because code in the snapshot has
|
|
|
|
// write-barrier code that relies on the size and alignment of new
|
|
|
|
// space. We therefore cannot use a larger max semispace size
|
|
|
|
// than the default reserved semispace size.
|
|
|
|
if (max_semispace_size_ > reserved_semispace_size_) {
|
|
|
|
max_semispace_size_ = reserved_semispace_size_;
|
2011-09-19 18:36:47 +00:00
|
|
|
if (FLAG_trace_gc) {
|
2012-07-10 12:52:36 +00:00
|
|
|
PrintPID("Max semispace size cannot be more than %dkbytes\n",
|
|
|
|
reserved_semispace_size_ >> 10);
|
2011-09-19 18:36:47 +00:00
|
|
|
}
|
2009-10-21 15:03:34 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// If we are not using snapshots we reserve space for the actual
|
|
|
|
// max semispace size.
|
|
|
|
reserved_semispace_size_ = max_semispace_size_;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
|
2010-11-10 08:38:42 +00:00
|
|
|
if (max_executable_size > 0) {
|
2010-11-10 09:20:08 +00:00
|
|
|
max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
|
2010-11-10 08:38:42 +00:00
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-11-10 10:14:13 +00:00
|
|
|
// The max executable size must be less than or equal to the max old
|
|
|
|
// generation size.
|
|
|
|
if (max_executable_size_ > max_old_generation_size_) {
|
|
|
|
max_executable_size_ = max_old_generation_size_;
|
|
|
|
}
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// The new space size must be a power of two to support single-bit testing
|
|
|
|
// for containment.
|
2009-10-21 15:03:34 +00:00
|
|
|
max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
|
|
|
|
reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
|
|
|
|
initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
|
2013-06-19 17:01:43 +00:00
|
|
|
|
|
|
|
// The external allocation limit should be below 256 MB on all architectures
|
|
|
|
// to avoid unnecessary low memory notifications, as that is the threshold
|
|
|
|
// for some embedders.
|
|
|
|
external_allocation_limit_ = 12 * max_semispace_size_;
|
|
|
|
ASSERT(external_allocation_limit_ <= 256 * MB);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
// The old generation is paged and needs at least one page for each space.
|
|
|
|
int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
|
|
|
|
max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
|
|
|
|
Page::kPageSize),
|
|
|
|
RoundUp(max_old_generation_size_,
|
|
|
|
Page::kPageSize));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
configured_ = true;
|
2008-07-03 15:10:15 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-07-30 08:49:36 +00:00
|
|
|
bool Heap::ConfigureHeapDefault() {
|
2011-09-19 18:36:47 +00:00
|
|
|
return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
|
|
|
|
static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
|
|
|
|
static_cast<intptr_t>(FLAG_max_executable_size) * MB);
|
2008-07-30 08:49:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-08-05 14:12:50 +00:00
|
|
|
void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
|
2010-08-17 13:48:03 +00:00
|
|
|
*stats->start_marker = HeapStats::kStartMarker;
|
|
|
|
*stats->end_marker = HeapStats::kEndMarker;
|
2010-09-30 07:22:53 +00:00
|
|
|
*stats->new_space_size = new_space_.SizeAsInt();
|
|
|
|
*stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
|
2012-03-16 13:52:43 +00:00
|
|
|
*stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
|
2009-12-04 10:18:30 +00:00
|
|
|
*stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
|
2012-03-16 13:52:43 +00:00
|
|
|
*stats->old_data_space_size = old_data_space_->SizeOfObjects();
|
2009-12-04 10:18:30 +00:00
|
|
|
*stats->old_data_space_capacity = old_data_space_->Capacity();
|
2012-03-16 13:52:43 +00:00
|
|
|
*stats->code_space_size = code_space_->SizeOfObjects();
|
2009-12-04 10:18:30 +00:00
|
|
|
*stats->code_space_capacity = code_space_->Capacity();
|
2012-03-16 13:52:43 +00:00
|
|
|
*stats->map_space_size = map_space_->SizeOfObjects();
|
2009-12-04 10:18:30 +00:00
|
|
|
*stats->map_space_capacity = map_space_->Capacity();
|
2012-03-16 13:52:43 +00:00
|
|
|
*stats->cell_space_size = cell_space_->SizeOfObjects();
|
2009-12-04 10:18:30 +00:00
|
|
|
*stats->cell_space_capacity = cell_space_->Capacity();
|
2013-06-12 15:03:44 +00:00
|
|
|
*stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
|
|
|
|
*stats->property_cell_space_capacity = property_cell_space_->Capacity();
|
2009-12-04 10:18:30 +00:00
|
|
|
*stats->lo_space_size = lo_space_->Size();
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->global_handles()->RecordStats(stats);
|
|
|
|
*stats->memory_allocator_size = isolate()->memory_allocator()->Size();
|
2010-08-05 14:12:50 +00:00
|
|
|
*stats->memory_allocator_capacity =
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate()->memory_allocator()->Size() +
|
|
|
|
isolate()->memory_allocator()->Available();
|
2010-08-13 11:11:36 +00:00
|
|
|
*stats->os_error = OS::GetLastError();
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate()->memory_allocator()->Available();
|
2010-08-05 14:12:50 +00:00
|
|
|
if (take_snapshot) {
|
2013-02-11 13:02:20 +00:00
|
|
|
HeapIterator iterator(this);
|
2010-08-05 14:12:50 +00:00
|
|
|
for (HeapObject* obj = iterator.next();
|
|
|
|
obj != NULL;
|
|
|
|
obj = iterator.next()) {
|
|
|
|
InstanceType type = obj->map()->instance_type();
|
|
|
|
ASSERT(0 <= type && type <= LAST_TYPE);
|
|
|
|
stats->objects_per_type[type]++;
|
|
|
|
stats->size_per_type[type] += obj->Size();
|
|
|
|
}
|
|
|
|
}
|
2009-12-03 10:16:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-01-25 13:46:42 +00:00
|
|
|
intptr_t Heap::PromotedSpaceSizeOfObjects() {
|
|
|
|
return old_pointer_space_->SizeOfObjects()
|
|
|
|
+ old_data_space_->SizeOfObjects()
|
|
|
|
+ code_space_->SizeOfObjects()
|
|
|
|
+ map_space_->SizeOfObjects()
|
|
|
|
+ cell_space_->SizeOfObjects()
|
2013-06-12 15:03:44 +00:00
|
|
|
+ property_cell_space_->SizeOfObjects()
|
2012-01-25 13:46:42 +00:00
|
|
|
+ lo_space_->SizeOfObjects();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-04-11 09:55:20 +00:00
|
|
|
intptr_t Heap::PromotedExternalMemorySize() {
|
2008-07-30 08:49:36 +00:00
|
|
|
if (amount_of_external_allocated_memory_
|
|
|
|
<= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
|
|
|
|
return amount_of_external_allocated_memory_
|
|
|
|
- amount_of_external_allocated_memory_at_last_global_gc_;
|
|
|
|
}
|
|
|
|
|
2012-04-05 14:10:39 +00:00
|
|
|
|
|
|
|
V8_DECLARE_ONCE(initialize_gc_once);
|
|
|
|
|
|
|
|
static void InitializeGCOnce() {
|
|
|
|
InitializeScavengingVisitorsTables();
|
|
|
|
NewSpaceScavenger::Initialize();
|
|
|
|
MarkCompactCollector::Initialize();
|
|
|
|
}
|
|
|
|
|
2013-07-05 09:52:11 +00:00
|
|
|
|
2013-02-25 14:03:09 +00:00
|
|
|
bool Heap::SetUp() {
|
2011-03-18 20:35:07 +00:00
|
|
|
#ifdef DEBUG
|
2011-10-24 08:59:34 +00:00
|
|
|
allocation_timeout_ = FLAG_gc_interval;
|
2011-03-18 20:35:07 +00:00
|
|
|
#endif
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// Initialize heap spaces and initial maps and objects. Whenever something
|
|
|
|
// goes wrong, just return false. The caller should check the results and
|
|
|
|
// call Heap::TearDown() to release allocated memory.
|
|
|
|
//
|
2012-01-16 12:38:59 +00:00
|
|
|
// If the heap is not yet configured (e.g. through the API), configure it.
|
2008-07-03 15:10:15 +00:00
|
|
|
// Configuration is based on the flags new-space-size (really the semispace
|
|
|
|
// size) and old-space-size if set or the initial values of semispace_size_
|
|
|
|
// and old_generation_size_ otherwise.
|
2011-03-18 20:35:07 +00:00
|
|
|
if (!configured_) {
|
2008-07-30 08:49:36 +00:00
|
|
|
if (!ConfigureHeapDefault()) return false;
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
2012-04-05 14:10:39 +00:00
|
|
|
CallOnce(&initialize_gc_once, &InitializeGCOnce);
|
2010-08-11 14:30:14 +00:00
|
|
|
|
2010-08-30 08:54:43 +00:00
|
|
|
MarkMapPointersAsEncoded(false);
|
|
|
|
|
2012-01-13 13:09:52 +00:00
|
|
|
// Set up memory allocator.
|
|
|
|
if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
|
2011-03-18 20:35:07 +00:00
|
|
|
return false;
|
2011-09-19 18:36:47 +00:00
|
|
|
|
2012-01-13 13:09:52 +00:00
|
|
|
// Set up new space.
|
|
|
|
if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
|
2009-10-21 15:37:14 +00:00
|
|
|
return false;
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2009-09-09 08:45:32 +00:00
|
|
|
// Initialize old pointer space.
|
2008-09-05 12:34:09 +00:00
|
|
|
old_pointer_space_ =
|
2011-03-18 20:35:07 +00:00
|
|
|
new OldSpace(this,
|
|
|
|
max_old_generation_size_,
|
|
|
|
OLD_POINTER_SPACE,
|
|
|
|
NOT_EXECUTABLE);
|
2008-09-05 12:34:09 +00:00
|
|
|
if (old_pointer_space_ == NULL) return false;
|
2012-01-13 13:09:52 +00:00
|
|
|
if (!old_pointer_space_->SetUp()) return false;
|
2009-09-09 08:45:32 +00:00
|
|
|
|
|
|
|
// Initialize old data space.
|
2008-09-05 12:34:09 +00:00
|
|
|
old_data_space_ =
|
2011-03-18 20:35:07 +00:00
|
|
|
new OldSpace(this,
|
|
|
|
max_old_generation_size_,
|
|
|
|
OLD_DATA_SPACE,
|
|
|
|
NOT_EXECUTABLE);
|
2008-09-05 12:34:09 +00:00
|
|
|
if (old_data_space_ == NULL) return false;
|
2012-01-13 13:09:52 +00:00
|
|
|
if (!old_data_space_->SetUp()) return false;
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
// Initialize the code space, set its maximum capacity to the old
|
2008-07-30 08:49:36 +00:00
|
|
|
// generation size. It needs executable memory.
|
2009-10-05 11:16:25 +00:00
|
|
|
// On 64-bit platform(s), we put all code objects in a 2 GB range of
|
|
|
|
// virtual address space, so that they can call each other with near calls.
|
|
|
|
if (code_range_size_ > 0) {
|
2012-01-13 13:09:52 +00:00
|
|
|
if (!isolate_->code_range()->SetUp(code_range_size_)) {
|
2009-10-05 11:16:25 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-09-05 12:34:09 +00:00
|
|
|
code_space_ =
|
2011-03-18 20:35:07 +00:00
|
|
|
new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
|
2008-07-03 15:10:15 +00:00
|
|
|
if (code_space_ == NULL) return false;
|
2012-01-13 13:09:52 +00:00
|
|
|
if (!code_space_->SetUp()) return false;
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
// Initialize map space.
|
2012-02-08 15:39:41 +00:00
|
|
|
map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
|
2008-07-03 15:10:15 +00:00
|
|
|
if (map_space_ == NULL) return false;
|
2012-01-13 13:09:52 +00:00
|
|
|
if (!map_space_->SetUp()) return false;
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2013-06-12 15:03:44 +00:00
|
|
|
// Initialize simple cell space.
|
2011-03-18 20:35:07 +00:00
|
|
|
cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
|
2009-07-09 11:13:08 +00:00
|
|
|
if (cell_space_ == NULL) return false;
|
2012-01-13 13:09:52 +00:00
|
|
|
if (!cell_space_->SetUp()) return false;
|
2009-07-09 11:13:08 +00:00
|
|
|
|
2013-06-12 15:03:44 +00:00
|
|
|
// Initialize global property cell space.
|
|
|
|
property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
|
|
|
|
PROPERTY_CELL_SPACE);
|
|
|
|
if (property_cell_space_ == NULL) return false;
|
|
|
|
if (!property_cell_space_->SetUp()) return false;
|
|
|
|
|
2008-09-05 12:34:09 +00:00
|
|
|
// The large object code space may contain code or data. We set the memory
|
|
|
|
// to be non-executable here for safety, but this means we need to enable it
|
|
|
|
// explicitly when allocating large code objects.
|
2011-10-19 10:15:09 +00:00
|
|
|
lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
|
2008-07-03 15:10:15 +00:00
|
|
|
if (lo_space_ == NULL) return false;
|
2012-01-13 13:09:52 +00:00
|
|
|
if (!lo_space_->SetUp()) return false;
|
2012-01-04 15:12:15 +00:00
|
|
|
|
2012-01-13 13:09:52 +00:00
|
|
|
// Set up the seed that is used to randomize the string hash function.
|
2012-01-10 13:24:18 +00:00
|
|
|
ASSERT(hash_seed() == 0);
|
|
|
|
if (FLAG_randomize_hashes) {
|
|
|
|
if (FLAG_hash_seed == 0) {
|
|
|
|
set_hash_seed(
|
2012-01-04 15:12:15 +00:00
|
|
|
Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
|
|
|
|
} else {
|
2012-01-10 13:24:18 +00:00
|
|
|
set_hash_seed(Smi::FromInt(FLAG_hash_seed));
|
2012-01-04 15:12:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
|
|
|
|
LOG(isolate_, IntPtrTEvent("heap-available", Available()));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2012-01-13 13:09:52 +00:00
|
|
|
store_buffer()->SetUp();
|
2011-09-19 18:36:47 +00:00
|
|
|
|
2013-04-26 07:35:07 +00:00
|
|
|
if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
|
|
|
|
#ifdef DEBUG
|
|
|
|
relocation_mutex_locked_by_optimizer_thread_ = false;
|
|
|
|
#endif // DEBUG
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-07-05 09:52:11 +00:00
|
|
|
|
2013-02-25 14:03:09 +00:00
|
|
|
bool Heap::CreateHeapObjects() {
|
|
|
|
// Create initial maps.
|
|
|
|
if (!CreateInitialMaps()) return false;
|
|
|
|
if (!CreateApiObjects()) return false;
|
|
|
|
|
|
|
|
// Create initial objects
|
|
|
|
if (!CreateInitialObjects()) return false;
|
|
|
|
|
|
|
|
native_contexts_list_ = undefined_value();
|
2013-06-12 09:31:39 +00:00
|
|
|
array_buffers_list_ = undefined_value();
|
2013-07-17 11:50:24 +00:00
|
|
|
allocation_sites_list_ = undefined_value();
|
2013-02-25 14:03:09 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2009-11-05 13:59:40 +00:00
|
|
|
void Heap::SetStackLimits() {
|
2011-03-18 20:35:07 +00:00
|
|
|
ASSERT(isolate_ != NULL);
|
|
|
|
ASSERT(isolate_ == isolate());
|
2009-08-31 08:57:36 +00:00
|
|
|
// On 64 bit machines, pointers are generally out of range of Smis. We write
|
|
|
|
// something that looks like an out of range Smi to the GC.
|
|
|
|
|
2009-11-05 13:59:40 +00:00
|
|
|
// Set up the special root array entries containing the stack limits.
|
|
|
|
// These are actually addresses, but the tag makes the GC ignore it.
|
2009-08-31 08:57:36 +00:00
|
|
|
roots_[kStackLimitRootIndex] =
|
2009-11-05 13:59:40 +00:00
|
|
|
reinterpret_cast<Object*>(
|
2011-03-18 20:35:07 +00:00
|
|
|
(isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
|
2009-11-05 13:59:40 +00:00
|
|
|
roots_[kRealStackLimitRootIndex] =
|
|
|
|
reinterpret_cast<Object*>(
|
2011-03-18 20:35:07 +00:00
|
|
|
(isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
|
2009-08-26 10:27:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
void Heap::TearDown() {
|
2012-10-12 11:41:14 +00:00
|
|
|
#ifdef VERIFY_HEAP
|
2012-04-26 09:11:45 +00:00
|
|
|
if (FLAG_verify_heap) {
|
|
|
|
Verify();
|
|
|
|
}
|
|
|
|
#endif
|
2012-10-12 11:41:14 +00:00
|
|
|
|
2010-05-18 16:50:17 +00:00
|
|
|
if (FLAG_print_cumulative_gc_stat) {
|
2013-01-30 10:51:13 +00:00
|
|
|
PrintF("\n");
|
2010-05-18 16:50:17 +00:00
|
|
|
PrintF("gc_count=%d ", gc_count_);
|
|
|
|
PrintF("mark_sweep_count=%d ", ms_count_);
|
2013-02-19 11:59:48 +00:00
|
|
|
PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
|
|
|
|
PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
|
|
|
|
PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
|
2010-09-30 07:22:53 +00:00
|
|
|
PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
|
2011-03-18 20:35:07 +00:00
|
|
|
get_max_alive_after_gc());
|
2013-02-19 11:59:48 +00:00
|
|
|
PrintF("total_marking_time=%.1f ", marking_time());
|
|
|
|
PrintF("total_sweeping_time=%.1f ", sweeping_time());
|
2010-05-18 16:50:17 +00:00
|
|
|
PrintF("\n\n");
|
|
|
|
}
|
|
|
|
|
2013-06-19 11:53:30 +00:00
|
|
|
TearDownArrayBuffers();
|
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->global_handles()->TearDown();
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
external_string_table_.TearDown();
|
2009-12-09 14:32:45 +00:00
|
|
|
|
2013-01-14 13:19:27 +00:00
|
|
|
error_object_list_.TearDown();
|
|
|
|
|
2008-10-17 09:13:27 +00:00
|
|
|
new_space_.TearDown();
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2008-09-05 12:34:09 +00:00
|
|
|
if (old_pointer_space_ != NULL) {
|
|
|
|
old_pointer_space_->TearDown();
|
|
|
|
delete old_pointer_space_;
|
|
|
|
old_pointer_space_ = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (old_data_space_ != NULL) {
|
|
|
|
old_data_space_->TearDown();
|
|
|
|
delete old_data_space_;
|
|
|
|
old_data_space_ = NULL;
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (code_space_ != NULL) {
|
|
|
|
code_space_->TearDown();
|
|
|
|
delete code_space_;
|
|
|
|
code_space_ = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (map_space_ != NULL) {
|
|
|
|
map_space_->TearDown();
|
|
|
|
delete map_space_;
|
|
|
|
map_space_ = NULL;
|
|
|
|
}
|
|
|
|
|
2009-07-09 11:13:08 +00:00
|
|
|
if (cell_space_ != NULL) {
|
|
|
|
cell_space_->TearDown();
|
|
|
|
delete cell_space_;
|
|
|
|
cell_space_ = NULL;
|
|
|
|
}
|
|
|
|
|
2013-06-12 15:03:44 +00:00
|
|
|
if (property_cell_space_ != NULL) {
|
|
|
|
property_cell_space_->TearDown();
|
|
|
|
delete property_cell_space_;
|
|
|
|
property_cell_space_ = NULL;
|
|
|
|
}
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
if (lo_space_ != NULL) {
|
|
|
|
lo_space_->TearDown();
|
|
|
|
delete lo_space_;
|
|
|
|
lo_space_ = NULL;
|
|
|
|
}
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
store_buffer()->TearDown();
|
|
|
|
incremental_marking()->TearDown();
|
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->memory_allocator()->TearDown();
|
2013-04-26 07:35:07 +00:00
|
|
|
|
|
|
|
delete relocation_mutex_;
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-03-23 13:11:44 +00:00
|
|
|
void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
|
|
|
|
ASSERT(callback != NULL);
|
|
|
|
GCPrologueCallbackPair pair(callback, gc_type);
|
|
|
|
ASSERT(!gc_prologue_callbacks_.Contains(pair));
|
|
|
|
return gc_prologue_callbacks_.Add(pair);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
|
|
|
|
ASSERT(callback != NULL);
|
|
|
|
for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
|
|
|
|
if (gc_prologue_callbacks_[i].callback == callback) {
|
|
|
|
gc_prologue_callbacks_.Remove(i);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
|
|
|
|
ASSERT(callback != NULL);
|
|
|
|
GCEpilogueCallbackPair pair(callback, gc_type);
|
|
|
|
ASSERT(!gc_epilogue_callbacks_.Contains(pair));
|
|
|
|
return gc_epilogue_callbacks_.Add(pair);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
|
|
|
|
ASSERT(callback != NULL);
|
|
|
|
for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
|
|
|
|
if (gc_epilogue_callbacks_[i].callback == callback) {
|
|
|
|
gc_epilogue_callbacks_.Remove(i);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
#ifdef DEBUG
|
|
|
|
|
|
|
|
class PrintHandleVisitor: public ObjectVisitor {
|
|
|
|
public:
|
|
|
|
void VisitPointers(Object** start, Object** end) {
|
|
|
|
for (Object** p = start; p < end; p++)
|
2010-09-30 07:22:53 +00:00
|
|
|
PrintF(" handle %p to %p\n",
|
|
|
|
reinterpret_cast<void*>(p),
|
|
|
|
reinterpret_cast<void*>(*p));
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2013-07-05 09:52:11 +00:00
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
void Heap::PrintHandles() {
|
|
|
|
PrintF("Handles:\n");
|
|
|
|
PrintHandleVisitor v;
|
2011-03-18 20:35:07 +00:00
|
|
|
isolate_->handle_scope_implementer()->Iterate(&v);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2008-09-05 12:34:09 +00:00
|
|
|
Space* AllSpaces::next() {
|
|
|
|
switch (counter_++) {
|
|
|
|
case NEW_SPACE:
|
2013-02-11 13:02:20 +00:00
|
|
|
return heap_->new_space();
|
2008-09-05 12:34:09 +00:00
|
|
|
case OLD_POINTER_SPACE:
|
2013-02-11 13:02:20 +00:00
|
|
|
return heap_->old_pointer_space();
|
2008-09-05 12:34:09 +00:00
|
|
|
case OLD_DATA_SPACE:
|
2013-02-11 13:02:20 +00:00
|
|
|
return heap_->old_data_space();
|
2008-09-05 12:34:09 +00:00
|
|
|
case CODE_SPACE:
|
2013-02-11 13:02:20 +00:00
|
|
|
return heap_->code_space();
|
2008-09-05 12:34:09 +00:00
|
|
|
case MAP_SPACE:
|
2013-02-11 13:02:20 +00:00
|
|
|
return heap_->map_space();
|
2009-07-09 11:13:08 +00:00
|
|
|
case CELL_SPACE:
|
2013-02-11 13:02:20 +00:00
|
|
|
return heap_->cell_space();
|
2013-06-12 15:03:44 +00:00
|
|
|
case PROPERTY_CELL_SPACE:
|
|
|
|
return heap_->property_cell_space();
|
2008-09-05 12:34:09 +00:00
|
|
|
case LO_SPACE:
|
2013-02-11 13:02:20 +00:00
|
|
|
return heap_->lo_space();
|
2008-09-05 12:34:09 +00:00
|
|
|
default:
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
PagedSpace* PagedSpaces::next() {
|
|
|
|
switch (counter_++) {
|
|
|
|
case OLD_POINTER_SPACE:
|
2013-02-11 13:02:20 +00:00
|
|
|
return heap_->old_pointer_space();
|
2008-09-05 12:34:09 +00:00
|
|
|
case OLD_DATA_SPACE:
|
2013-02-11 13:02:20 +00:00
|
|
|
return heap_->old_data_space();
|
2008-09-05 12:34:09 +00:00
|
|
|
case CODE_SPACE:
|
2013-02-11 13:02:20 +00:00
|
|
|
return heap_->code_space();
|
2008-09-05 12:34:09 +00:00
|
|
|
case MAP_SPACE:
|
2013-02-11 13:02:20 +00:00
|
|
|
return heap_->map_space();
|
2009-07-09 11:13:08 +00:00
|
|
|
case CELL_SPACE:
|
2013-02-11 13:02:20 +00:00
|
|
|
return heap_->cell_space();
|
2013-06-12 15:03:44 +00:00
|
|
|
case PROPERTY_CELL_SPACE:
|
|
|
|
return heap_->property_cell_space();
|
2008-09-05 12:34:09 +00:00
|
|
|
default:
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
OldSpace* OldSpaces::next() {
|
|
|
|
switch (counter_++) {
|
|
|
|
case OLD_POINTER_SPACE:
|
2013-02-11 13:02:20 +00:00
|
|
|
return heap_->old_pointer_space();
|
2008-09-05 12:34:09 +00:00
|
|
|
case OLD_DATA_SPACE:
|
2013-02-11 13:02:20 +00:00
|
|
|
return heap_->old_data_space();
|
2008-09-05 12:34:09 +00:00
|
|
|
case CODE_SPACE:
|
2013-02-11 13:02:20 +00:00
|
|
|
return heap_->code_space();
|
2008-09-05 12:34:09 +00:00
|
|
|
default:
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-02-11 13:02:20 +00:00
|
|
|
SpaceIterator::SpaceIterator(Heap* heap)
|
|
|
|
: heap_(heap),
|
|
|
|
current_space_(FIRST_SPACE),
|
2010-11-15 10:38:24 +00:00
|
|
|
iterator_(NULL),
|
|
|
|
size_func_(NULL) {
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-02-11 13:02:20 +00:00
|
|
|
SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
|
|
|
|
: heap_(heap),
|
|
|
|
current_space_(FIRST_SPACE),
|
2010-11-15 10:38:24 +00:00
|
|
|
iterator_(NULL),
|
|
|
|
size_func_(size_func) {
|
2008-07-30 08:49:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
SpaceIterator::~SpaceIterator() {
|
|
|
|
// Delete active iterator if any.
|
|
|
|
delete iterator_;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool SpaceIterator::has_next() {
|
|
|
|
// Iterate until no more spaces.
|
|
|
|
return current_space_ != LAST_SPACE;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
ObjectIterator* SpaceIterator::next() {
|
|
|
|
if (iterator_ != NULL) {
|
|
|
|
delete iterator_;
|
|
|
|
iterator_ = NULL;
|
|
|
|
// Move to the next space
|
|
|
|
current_space_++;
|
|
|
|
if (current_space_ > LAST_SPACE) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return iterator for the new current space.
|
|
|
|
return CreateIterator();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Create an iterator for the space to iterate.
|
|
|
|
ObjectIterator* SpaceIterator::CreateIterator() {
|
|
|
|
ASSERT(iterator_ == NULL);
|
|
|
|
|
|
|
|
switch (current_space_) {
|
|
|
|
case NEW_SPACE:
|
2013-02-11 13:02:20 +00:00
|
|
|
iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
|
2008-07-30 08:49:36 +00:00
|
|
|
break;
|
2008-09-05 12:34:09 +00:00
|
|
|
case OLD_POINTER_SPACE:
|
2013-02-11 13:02:20 +00:00
|
|
|
iterator_ =
|
|
|
|
new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
|
2008-09-05 12:34:09 +00:00
|
|
|
break;
|
|
|
|
case OLD_DATA_SPACE:
|
2013-02-11 13:02:20 +00:00
|
|
|
iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
|
2008-07-30 08:49:36 +00:00
|
|
|
break;
|
|
|
|
case CODE_SPACE:
|
2013-02-11 13:02:20 +00:00
|
|
|
iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
|
2008-07-30 08:49:36 +00:00
|
|
|
break;
|
|
|
|
case MAP_SPACE:
|
2013-02-11 13:02:20 +00:00
|
|
|
iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
|
2008-07-30 08:49:36 +00:00
|
|
|
break;
|
2009-07-09 11:13:08 +00:00
|
|
|
case CELL_SPACE:
|
2013-02-11 13:02:20 +00:00
|
|
|
iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
|
2009-07-09 11:13:08 +00:00
|
|
|
break;
|
2013-06-12 15:03:44 +00:00
|
|
|
case PROPERTY_CELL_SPACE:
|
|
|
|
iterator_ = new HeapObjectIterator(heap_->property_cell_space(),
|
|
|
|
size_func_);
|
|
|
|
break;
|
2008-07-30 08:49:36 +00:00
|
|
|
case LO_SPACE:
|
2013-02-11 13:02:20 +00:00
|
|
|
iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
|
2008-07-30 08:49:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return the newly allocated iterator;
|
|
|
|
ASSERT(iterator_ != NULL);
|
|
|
|
return iterator_;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-12-21 10:49:40 +00:00
|
|
|
class HeapObjectsFilter {
|
|
|
|
public:
|
|
|
|
virtual ~HeapObjectsFilter() {}
|
|
|
|
virtual bool SkipObject(HeapObject* object) = 0;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
class UnreachableObjectsFilter : public HeapObjectsFilter {
|
|
|
|
public:
|
|
|
|
UnreachableObjectsFilter() {
|
2011-10-20 11:40:16 +00:00
|
|
|
MarkReachableObjects();
|
|
|
|
}
|
|
|
|
|
|
|
|
~UnreachableObjectsFilter() {
|
|
|
|
Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
|
2010-12-21 10:49:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool SkipObject(HeapObject* object) {
|
2011-10-20 11:40:16 +00:00
|
|
|
MarkBit mark_bit = Marking::MarkBitFrom(object);
|
|
|
|
return !mark_bit.Get();
|
2010-12-21 10:49:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2011-10-20 11:40:16 +00:00
|
|
|
class MarkingVisitor : public ObjectVisitor {
|
2010-12-21 10:49:40 +00:00
|
|
|
public:
|
2011-10-20 11:40:16 +00:00
|
|
|
MarkingVisitor() : marking_stack_(10) {}
|
2010-12-21 10:49:40 +00:00
|
|
|
|
|
|
|
void VisitPointers(Object** start, Object** end) {
|
|
|
|
for (Object** p = start; p < end; p++) {
|
|
|
|
if (!(*p)->IsHeapObject()) continue;
|
|
|
|
HeapObject* obj = HeapObject::cast(*p);
|
2011-10-20 11:40:16 +00:00
|
|
|
MarkBit mark_bit = Marking::MarkBitFrom(obj);
|
|
|
|
if (!mark_bit.Get()) {
|
|
|
|
mark_bit.Set();
|
|
|
|
marking_stack_.Add(obj);
|
2010-12-21 10:49:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-10-20 11:40:16 +00:00
|
|
|
void TransitiveClosure() {
|
|
|
|
while (!marking_stack_.is_empty()) {
|
|
|
|
HeapObject* obj = marking_stack_.RemoveLast();
|
|
|
|
obj->Iterate(this);
|
|
|
|
}
|
2010-12-21 10:49:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2011-10-20 11:40:16 +00:00
|
|
|
List<HeapObject*> marking_stack_;
|
2010-12-21 10:49:40 +00:00
|
|
|
};
|
|
|
|
|
2011-10-20 11:40:16 +00:00
|
|
|
void MarkReachableObjects() {
|
|
|
|
Heap* heap = Isolate::Current()->heap();
|
|
|
|
MarkingVisitor visitor;
|
|
|
|
heap->IterateRoots(&visitor, VISIT_ALL);
|
|
|
|
visitor.TransitiveClosure();
|
2010-12-21 10:49:40 +00:00
|
|
|
}
|
|
|
|
|
2013-06-03 15:32:22 +00:00
|
|
|
DisallowHeapAllocation no_allocation_;
|
2010-12-21 10:49:40 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2013-02-11 13:02:20 +00:00
|
|
|
HeapIterator::HeapIterator(Heap* heap)
|
|
|
|
: heap_(heap),
|
|
|
|
filtering_(HeapIterator::kNoFiltering),
|
2010-11-15 10:38:24 +00:00
|
|
|
filter_(NULL) {
|
|
|
|
Init();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-02-11 13:02:20 +00:00
|
|
|
HeapIterator::HeapIterator(Heap* heap,
|
|
|
|
HeapIterator::HeapObjectsFiltering filtering)
|
|
|
|
: heap_(heap),
|
|
|
|
filtering_(filtering),
|
2010-11-15 10:38:24 +00:00
|
|
|
filter_(NULL) {
|
2008-07-03 15:10:15 +00:00
|
|
|
Init();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
HeapIterator::~HeapIterator() {
|
|
|
|
Shutdown();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void HeapIterator::Init() {
|
|
|
|
// Start the iteration.
|
2013-02-11 13:02:20 +00:00
|
|
|
space_iterator_ = new SpaceIterator(heap_);
|
2010-12-21 10:49:40 +00:00
|
|
|
switch (filtering_) {
|
|
|
|
case kFilterUnreachable:
|
|
|
|
filter_ = new UnreachableObjectsFilter;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
2010-11-15 10:38:24 +00:00
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
object_iterator_ = space_iterator_->next();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void HeapIterator::Shutdown() {
|
2010-11-15 10:38:24 +00:00
|
|
|
#ifdef DEBUG
|
2010-12-21 10:49:40 +00:00
|
|
|
// Assert that in filtering mode we have iterated through all
|
2010-11-15 10:38:24 +00:00
|
|
|
// objects. Otherwise, heap will be left in an inconsistent state.
|
2010-12-21 10:49:40 +00:00
|
|
|
if (filtering_ != kNoFiltering) {
|
2010-11-15 10:38:24 +00:00
|
|
|
ASSERT(object_iterator_ == NULL);
|
|
|
|
}
|
|
|
|
#endif
|
2008-07-03 15:10:15 +00:00
|
|
|
// Make sure the last iterator is deallocated.
|
|
|
|
delete space_iterator_;
|
|
|
|
space_iterator_ = NULL;
|
|
|
|
object_iterator_ = NULL;
|
2010-11-15 10:38:24 +00:00
|
|
|
delete filter_;
|
|
|
|
filter_ = NULL;
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-01-25 22:53:18 +00:00
|
|
|
HeapObject* HeapIterator::next() {
|
2010-11-15 10:38:24 +00:00
|
|
|
if (filter_ == NULL) return NextObject();
|
|
|
|
|
|
|
|
HeapObject* obj = NextObject();
|
2010-12-21 10:49:40 +00:00
|
|
|
while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
|
2010-11-15 10:38:24 +00:00
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
HeapObject* HeapIterator::NextObject() {
|
2008-07-03 15:10:15 +00:00
|
|
|
// No iterator means we are done.
|
2010-01-25 22:53:18 +00:00
|
|
|
if (object_iterator_ == NULL) return NULL;
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2010-01-25 22:53:18 +00:00
|
|
|
if (HeapObject* obj = object_iterator_->next_object()) {
|
2008-07-03 15:10:15 +00:00
|
|
|
// If the current iterator has more objects we are fine.
|
2010-01-25 22:53:18 +00:00
|
|
|
return obj;
|
2008-07-03 15:10:15 +00:00
|
|
|
} else {
|
|
|
|
// Go though the spaces looking for one that has objects.
|
|
|
|
while (space_iterator_->has_next()) {
|
|
|
|
object_iterator_ = space_iterator_->next();
|
2010-01-25 22:53:18 +00:00
|
|
|
if (HeapObject* obj = object_iterator_->next_object()) {
|
|
|
|
return obj;
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Done with the last space.
|
|
|
|
object_iterator_ = NULL;
|
2010-01-25 22:53:18 +00:00
|
|
|
return NULL;
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void HeapIterator::reset() {
|
|
|
|
// Restart the iterator.
|
|
|
|
Shutdown();
|
|
|
|
Init();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-01-16 15:44:26 +00:00
|
|
|
#ifdef DEBUG
|
2011-02-22 10:05:30 +00:00
|
|
|
|
2013-07-17 08:46:44 +00:00
|
|
|
Object* const PathTracer::kAnyGlobalObject = NULL;
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-02-22 10:05:30 +00:00
|
|
|
class PathTracer::MarkVisitor: public ObjectVisitor {
|
|
|
|
public:
|
|
|
|
explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
|
|
|
|
void VisitPointers(Object** start, Object** end) {
|
|
|
|
// Scan all HeapObject pointers in [start, end)
|
|
|
|
for (Object** p = start; !tracer_->found() && (p < end); p++) {
|
|
|
|
if ((*p)->IsHeapObject())
|
|
|
|
tracer_->MarkRecursively(p, this);
|
|
|
|
}
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-02-22 10:05:30 +00:00
|
|
|
private:
|
|
|
|
PathTracer* tracer_;
|
|
|
|
};
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
|
2011-02-22 10:05:30 +00:00
|
|
|
class PathTracer::UnmarkVisitor: public ObjectVisitor {
|
2008-07-03 15:10:15 +00:00
|
|
|
public:
|
2011-02-22 10:05:30 +00:00
|
|
|
explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
|
2008-07-03 15:10:15 +00:00
|
|
|
void VisitPointers(Object** start, Object** end) {
|
2011-02-22 10:05:30 +00:00
|
|
|
// Scan all HeapObject pointers in [start, end)
|
2008-07-03 15:10:15 +00:00
|
|
|
for (Object** p = start; p < end; p++) {
|
|
|
|
if ((*p)->IsHeapObject())
|
2011-02-22 10:05:30 +00:00
|
|
|
tracer_->UnmarkRecursively(p, this);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
}
|
2011-02-22 10:05:30 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
PathTracer* tracer_;
|
2008-07-03 15:10:15 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2011-02-22 10:05:30 +00:00
|
|
|
void PathTracer::VisitPointers(Object** start, Object** end) {
|
|
|
|
bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
|
|
|
|
// Visit all HeapObject pointers in [start, end)
|
|
|
|
for (Object** p = start; !done && (p < end); p++) {
|
|
|
|
if ((*p)->IsHeapObject()) {
|
|
|
|
TracePathFrom(p);
|
|
|
|
done = ((what_to_find_ == FIND_FIRST) && found_target_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void PathTracer::Reset() {
|
|
|
|
found_target_ = false;
|
|
|
|
object_stack_.Clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void PathTracer::TracePathFrom(Object** root) {
|
|
|
|
ASSERT((search_target_ == kAnyGlobalObject) ||
|
|
|
|
search_target_->IsHeapObject());
|
|
|
|
found_target_in_trace_ = false;
|
2012-06-14 15:33:15 +00:00
|
|
|
Reset();
|
2011-02-22 10:05:30 +00:00
|
|
|
|
|
|
|
MarkVisitor mark_visitor(this);
|
|
|
|
MarkRecursively(root, &mark_visitor);
|
|
|
|
|
|
|
|
UnmarkVisitor unmark_visitor(this);
|
|
|
|
UnmarkRecursively(root, &unmark_visitor);
|
|
|
|
|
|
|
|
ProcessResults();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-08-17 09:03:08 +00:00
|
|
|
static bool SafeIsNativeContext(HeapObject* obj) {
|
|
|
|
return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
|
2011-09-19 18:36:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-22 10:05:30 +00:00
|
|
|
void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
|
2008-07-03 15:10:15 +00:00
|
|
|
if (!(*p)->IsHeapObject()) return;
|
|
|
|
|
|
|
|
HeapObject* obj = HeapObject::cast(*p);
|
|
|
|
|
|
|
|
Object* map = obj->map();
|
|
|
|
|
|
|
|
if (!map->IsHeapObject()) return; // visited before
|
|
|
|
|
2011-02-22 10:05:30 +00:00
|
|
|
if (found_target_in_trace_) return; // stop if target found
|
|
|
|
object_stack_.Add(obj);
|
|
|
|
if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
|
|
|
|
(obj == search_target_)) {
|
|
|
|
found_target_in_trace_ = true;
|
|
|
|
found_target_ = true;
|
2008-07-03 15:10:15 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-08-17 09:03:08 +00:00
|
|
|
bool is_native_context = SafeIsNativeContext(obj);
|
2011-02-22 10:05:30 +00:00
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// not visited yet
|
|
|
|
Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
|
|
|
|
|
|
|
|
Address map_addr = map_p->address();
|
|
|
|
|
2011-12-07 08:43:18 +00:00
|
|
|
obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-02-22 10:05:30 +00:00
|
|
|
// Scan the object body.
|
2012-08-17 09:03:08 +00:00
|
|
|
if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
|
2011-02-22 10:05:30 +00:00
|
|
|
// This is specialized to scan Context's properly.
|
2012-10-04 11:09:17 +00:00
|
|
|
Object** start = reinterpret_cast<Object**>(obj->address() +
|
|
|
|
Context::kHeaderSize);
|
|
|
|
Object** end = reinterpret_cast<Object**>(obj->address() +
|
2011-02-22 10:05:30 +00:00
|
|
|
Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
|
2012-10-04 11:09:17 +00:00
|
|
|
mark_visitor->VisitPointers(start, end);
|
2011-02-22 10:05:30 +00:00
|
|
|
} else {
|
|
|
|
obj->IterateBody(map_p->instance_type(),
|
|
|
|
obj->SizeFromMap(map_p),
|
|
|
|
mark_visitor);
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-02-22 10:05:30 +00:00
|
|
|
// Scan the map after the body because the body is a lot more interesting
|
|
|
|
// when doing leak detection.
|
|
|
|
MarkRecursively(&map, mark_visitor);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-02-22 10:05:30 +00:00
|
|
|
if (!found_target_in_trace_) // don't pop if found the target
|
|
|
|
object_stack_.RemoveLast();
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-22 10:05:30 +00:00
|
|
|
void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
|
2008-07-03 15:10:15 +00:00
|
|
|
if (!(*p)->IsHeapObject()) return;
|
|
|
|
|
|
|
|
HeapObject* obj = HeapObject::cast(*p);
|
|
|
|
|
|
|
|
Object* map = obj->map();
|
|
|
|
|
|
|
|
if (map->IsHeapObject()) return; // unmarked already
|
|
|
|
|
|
|
|
Address map_addr = reinterpret_cast<Address>(map);
|
|
|
|
|
|
|
|
map_addr -= kMarkTag;
|
|
|
|
|
|
|
|
ASSERT_TAG_ALIGNED(map_addr);
|
|
|
|
|
|
|
|
HeapObject* map_p = HeapObject::FromAddress(map_addr);
|
|
|
|
|
2011-12-07 08:43:18 +00:00
|
|
|
obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
|
2008-07-03 15:10:15 +00:00
|
|
|
|
2011-02-22 10:05:30 +00:00
|
|
|
UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
obj->IterateBody(Map::cast(map_p)->instance_type(),
|
|
|
|
obj->SizeFromMap(Map::cast(map_p)),
|
2011-02-22 10:05:30 +00:00
|
|
|
unmark_visitor);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-22 10:05:30 +00:00
|
|
|
void PathTracer::ProcessResults() {
|
|
|
|
if (found_target_) {
|
2008-07-03 15:10:15 +00:00
|
|
|
PrintF("=====================================\n");
|
|
|
|
PrintF("==== Path to object ====\n");
|
|
|
|
PrintF("=====================================\n\n");
|
|
|
|
|
2011-02-22 10:05:30 +00:00
|
|
|
ASSERT(!object_stack_.is_empty());
|
|
|
|
for (int i = 0; i < object_stack_.length(); i++) {
|
2008-07-03 15:10:15 +00:00
|
|
|
if (i > 0) PrintF("\n |\n |\n V\n\n");
|
2011-02-22 10:05:30 +00:00
|
|
|
Object* obj = object_stack_[i];
|
2008-07-03 15:10:15 +00:00
|
|
|
obj->Print();
|
|
|
|
}
|
|
|
|
PrintF("=====================================\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-07-30 10:32:54 +00:00
|
|
|
// Triggers a depth-first traversal of reachable objects from one
|
|
|
|
// given root object and finds a path to a specific heap object and
|
|
|
|
// prints it.
|
|
|
|
void Heap::TracePathToObjectFrom(Object* target, Object* root) {
|
|
|
|
PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
|
|
|
|
tracer.VisitPointer(&root);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// Triggers a depth-first traversal of reachable objects from roots
|
|
|
|
// and finds a path to a specific heap object and prints it.
|
2009-12-09 14:32:45 +00:00
|
|
|
void Heap::TracePathToObject(Object* target) {
|
2011-02-22 10:05:30 +00:00
|
|
|
PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
|
|
|
|
IterateRoots(&tracer, VISIT_ONLY_STRONG);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Triggers a depth-first traversal of reachable objects from roots
|
|
|
|
// and finds a path to any global object and prints it. Useful for
|
|
|
|
// determining the source for leaks of global objects.
|
|
|
|
void Heap::TracePathToGlobal() {
|
2011-02-22 10:05:30 +00:00
|
|
|
PathTracer tracer(PathTracer::kAnyGlobalObject,
|
|
|
|
PathTracer::FIND_ALL,
|
|
|
|
VISIT_ALL);
|
|
|
|
IterateRoots(&tracer, VISIT_ONLY_STRONG);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2013-02-11 13:02:20 +00:00
|
|
|
static intptr_t CountTotalHolesSize(Heap* heap) {
|
2010-09-30 07:22:53 +00:00
|
|
|
intptr_t holes_size = 0;
|
2013-02-11 13:02:20 +00:00
|
|
|
OldSpaces spaces(heap);
|
2010-05-18 16:50:17 +00:00
|
|
|
for (OldSpace* space = spaces.next();
|
|
|
|
space != NULL;
|
|
|
|
space = spaces.next()) {
|
2011-09-19 18:36:47 +00:00
|
|
|
holes_size += space->Waste() + space->Available();
|
2010-05-18 16:50:17 +00:00
|
|
|
}
|
|
|
|
return holes_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-02-03 14:16:40 +00:00
|
|
|
GCTracer::GCTracer(Heap* heap,
|
|
|
|
const char* gc_reason,
|
|
|
|
const char* collector_reason)
|
2008-07-30 08:49:36 +00:00
|
|
|
: start_time_(0.0),
|
2012-02-02 11:47:23 +00:00
|
|
|
start_object_size_(0),
|
|
|
|
start_memory_size_(0),
|
2008-07-30 08:49:36 +00:00
|
|
|
gc_count_(0),
|
|
|
|
full_gc_count_(0),
|
2010-05-18 16:50:17 +00:00
|
|
|
allocated_since_last_gc_(0),
|
|
|
|
spent_in_mutator_(0),
|
2011-03-18 20:35:07 +00:00
|
|
|
promoted_objects_size_(0),
|
2012-12-07 09:44:10 +00:00
|
|
|
nodes_died_in_new_space_(0),
|
|
|
|
nodes_copied_in_new_space_(0),
|
|
|
|
nodes_promoted_(0),
|
2012-02-03 14:16:40 +00:00
|
|
|
heap_(heap),
|
|
|
|
gc_reason_(gc_reason),
|
|
|
|
collector_reason_(collector_reason) {
|
2010-05-18 16:50:17 +00:00
|
|
|
if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
|
2008-07-30 08:49:36 +00:00
|
|
|
start_time_ = OS::TimeCurrentMillis();
|
2012-02-02 11:47:23 +00:00
|
|
|
start_object_size_ = heap_->SizeOfObjects();
|
|
|
|
start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
|
2010-05-18 16:50:17 +00:00
|
|
|
|
|
|
|
for (int i = 0; i < Scope::kNumberOfScopes; i++) {
|
|
|
|
scopes_[i] = 0;
|
|
|
|
}
|
|
|
|
|
2013-02-11 13:02:20 +00:00
|
|
|
in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
|
2010-05-18 16:50:17 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
allocated_since_last_gc_ =
|
|
|
|
heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
|
2010-05-18 16:50:17 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
if (heap_->last_gc_end_timestamp_ > 0) {
|
|
|
|
spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
|
2010-05-18 16:50:17 +00:00
|
|
|
}
|
2011-09-19 18:36:47 +00:00
|
|
|
|
|
|
|
steps_count_ = heap_->incremental_marking()->steps_count();
|
|
|
|
steps_took_ = heap_->incremental_marking()->steps_took();
|
|
|
|
longest_step_ = heap_->incremental_marking()->longest_step();
|
|
|
|
steps_count_since_last_gc_ =
|
|
|
|
heap_->incremental_marking()->steps_count_since_last_gc();
|
|
|
|
steps_took_since_last_gc_ =
|
|
|
|
heap_->incremental_marking()->steps_took_since_last_gc();
|
2008-07-30 08:49:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
GCTracer::~GCTracer() {
|
|
|
|
// Printf ONE line iff flag is set.
|
2010-05-18 16:50:17 +00:00
|
|
|
if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
|
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
|
2010-05-18 16:50:17 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
|
|
|
|
heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
|
2010-05-18 16:50:17 +00:00
|
|
|
|
2013-02-19 11:59:48 +00:00
|
|
|
double time = heap_->last_gc_end_timestamp_ - start_time_;
|
2010-05-18 16:50:17 +00:00
|
|
|
|
|
|
|
// Update cumulative GC statistics if required.
|
|
|
|
if (FLAG_print_cumulative_gc_stat) {
|
2012-08-07 08:19:11 +00:00
|
|
|
heap_->total_gc_time_ms_ += time;
|
2011-03-18 20:35:07 +00:00
|
|
|
heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
|
|
|
|
heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
|
|
|
|
heap_->alive_after_last_gc_);
|
2010-05-18 16:50:17 +00:00
|
|
|
if (!first_gc) {
|
2011-03-18 20:35:07 +00:00
|
|
|
heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
|
2013-02-19 11:59:48 +00:00
|
|
|
spent_in_mutator_);
|
2010-05-18 16:50:17 +00:00
|
|
|
}
|
2012-08-07 08:19:11 +00:00
|
|
|
} else if (FLAG_trace_gc_verbose) {
|
|
|
|
heap_->total_gc_time_ms_ += time;
|
2010-05-18 16:50:17 +00:00
|
|
|
}
|
|
|
|
|
2012-08-07 08:19:11 +00:00
|
|
|
if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
|
|
|
|
|
2013-01-30 10:51:13 +00:00
|
|
|
heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
|
|
|
|
|
|
|
|
if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
|
2012-07-10 12:52:36 +00:00
|
|
|
PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
|
2012-02-06 08:59:43 +00:00
|
|
|
|
2010-05-18 16:50:17 +00:00
|
|
|
if (!FLAG_trace_gc_nvp) {
|
|
|
|
int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
|
|
|
|
|
2012-02-02 11:47:23 +00:00
|
|
|
double end_memory_size_mb =
|
|
|
|
static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
|
|
|
|
|
|
|
|
PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
|
2010-05-18 16:50:17 +00:00
|
|
|
CollectorString(),
|
2012-02-02 11:47:23 +00:00
|
|
|
static_cast<double>(start_object_size_) / MB,
|
|
|
|
static_cast<double>(start_memory_size_) / MB,
|
|
|
|
SizeOfHeapObjects(),
|
|
|
|
end_memory_size_mb);
|
2010-05-18 16:50:17 +00:00
|
|
|
|
|
|
|
if (external_time > 0) PrintF("%d / ", external_time);
|
2013-02-19 11:59:48 +00:00
|
|
|
PrintF("%.1f ms", time);
|
2011-09-19 18:36:47 +00:00
|
|
|
if (steps_count_ > 0) {
|
|
|
|
if (collector_ == SCAVENGER) {
|
2013-02-19 11:59:48 +00:00
|
|
|
PrintF(" (+ %.1f ms in %d steps since last GC)",
|
|
|
|
steps_took_since_last_gc_,
|
2011-09-19 18:36:47 +00:00
|
|
|
steps_count_since_last_gc_);
|
|
|
|
} else {
|
2013-02-19 11:59:48 +00:00
|
|
|
PrintF(" (+ %.1f ms in %d steps since start of marking, "
|
|
|
|
"biggest step %.1f ms)",
|
|
|
|
steps_took_,
|
2011-09-19 18:36:47 +00:00
|
|
|
steps_count_,
|
|
|
|
longest_step_);
|
|
|
|
}
|
|
|
|
}
|
2012-02-03 14:16:40 +00:00
|
|
|
|
|
|
|
if (gc_reason_ != NULL) {
|
|
|
|
PrintF(" [%s]", gc_reason_);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (collector_reason_ != NULL) {
|
|
|
|
PrintF(" [%s]", collector_reason_);
|
|
|
|
}
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
PrintF(".\n");
|
2010-05-18 16:50:17 +00:00
|
|
|
} else {
|
2013-02-19 11:59:48 +00:00
|
|
|
PrintF("pause=%.1f ", time);
|
|
|
|
PrintF("mutator=%.1f ", spent_in_mutator_);
|
2010-05-18 16:50:17 +00:00
|
|
|
PrintF("gc=");
|
|
|
|
switch (collector_) {
|
|
|
|
case SCAVENGER:
|
|
|
|
PrintF("s");
|
|
|
|
break;
|
|
|
|
case MARK_COMPACTOR:
|
2011-09-19 18:36:47 +00:00
|
|
|
PrintF("ms");
|
2010-05-18 16:50:17 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
PrintF(" ");
|
|
|
|
|
2013-02-19 11:59:48 +00:00
|
|
|
PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
|
|
|
|
PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
|
|
|
|
PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
|
|
|
|
PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
|
|
|
|
PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
|
|
|
|
PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
|
|
|
|
PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
|
|
|
|
PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
|
|
|
|
PrintF("compaction_ptrs=%.1f ",
|
|
|
|
scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
|
|
|
|
PrintF("intracompaction_ptrs=%.1f ",
|
|
|
|
scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
|
|
|
|
PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
|
2013-06-04 17:20:06 +00:00
|
|
|
PrintF("weakmap_process=%.1f ", scopes_[Scope::MC_WEAKMAP_PROCESS]);
|
|
|
|
PrintF("weakmap_clear=%.1f ", scopes_[Scope::MC_WEAKMAP_CLEAR]);
|
2010-05-18 16:50:17 +00:00
|
|
|
|
2012-02-02 11:47:23 +00:00
|
|
|
PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
|
2011-03-18 20:35:07 +00:00
|
|
|
PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
|
2010-09-30 07:22:53 +00:00
|
|
|
PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
|
|
|
|
in_free_list_or_wasted_before_gc_);
|
2013-02-11 13:02:20 +00:00
|
|
|
PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
|
2010-05-18 16:50:17 +00:00
|
|
|
|
2010-09-30 07:22:53 +00:00
|
|
|
PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
|
|
|
|
PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
|
2012-12-07 09:44:10 +00:00
|
|
|
PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
|
|
|
|
PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
|
|
|
|
PrintF("nodes_promoted=%d ", nodes_promoted_);
|
2010-05-18 16:50:17 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
if (collector_ == SCAVENGER) {
|
|
|
|
PrintF("stepscount=%d ", steps_count_since_last_gc_);
|
2013-02-19 11:59:48 +00:00
|
|
|
PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
|
2011-09-19 18:36:47 +00:00
|
|
|
} else {
|
|
|
|
PrintF("stepscount=%d ", steps_count_);
|
2013-02-19 11:59:48 +00:00
|
|
|
PrintF("stepstook=%.1f ", steps_took_);
|
|
|
|
PrintF("longeststep=%.1f ", longest_step_);
|
2011-09-19 18:36:47 +00:00
|
|
|
}
|
|
|
|
|
2010-05-18 16:50:17 +00:00
|
|
|
PrintF("\n");
|
|
|
|
}
|
2009-07-13 21:24:54 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
heap_->PrintShortHeapStatistics();
|
2008-07-30 08:49:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
const char* GCTracer::CollectorString() {
|
|
|
|
switch (collector_) {
|
|
|
|
case SCAVENGER:
|
|
|
|
return "Scavenge";
|
|
|
|
case MARK_COMPACTOR:
|
2011-09-19 18:36:47 +00:00
|
|
|
return "Mark-sweep";
|
2008-07-30 08:49:36 +00:00
|
|
|
}
|
|
|
|
return "Unknown GC";
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-03-04 15:00:57 +00:00
|
|
|
int KeyedLookupCache::Hash(Map* map, Name* name) {
|
2009-06-17 06:07:49 +00:00
|
|
|
// Uses only lower 32 bits if pointers are larger.
|
|
|
|
uintptr_t addr_hash =
|
2009-12-10 15:10:50 +00:00
|
|
|
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
|
2010-02-22 11:42:46 +00:00
|
|
|
return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
|
2009-06-17 06:07:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-03-04 15:00:57 +00:00
|
|
|
int KeyedLookupCache::Lookup(Map* map, Name* name) {
|
2012-01-20 13:43:21 +00:00
|
|
|
int index = (Hash(map, name) & kHashMask);
|
2012-01-23 16:18:10 +00:00
|
|
|
for (int i = 0; i < kEntriesPerBucket; i++) {
|
|
|
|
Key& key = keys_[index + i];
|
|
|
|
if ((key.map == map) && key.name->Equals(name)) {
|
|
|
|
return field_offsets_[index + i];
|
|
|
|
}
|
2012-01-20 13:43:21 +00:00
|
|
|
}
|
2011-03-18 20:35:07 +00:00
|
|
|
return kNotFound;
|
2009-06-17 06:07:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-03-04 15:00:57 +00:00
|
|
|
void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
|
|
|
|
if (!name->IsUniqueName()) {
|
|
|
|
String* internalized_string;
|
|
|
|
if (!HEAP->InternalizeStringIfExists(
|
|
|
|
String::cast(name), &internalized_string)) {
|
|
|
|
return;
|
2012-01-23 16:18:10 +00:00
|
|
|
}
|
2013-03-04 15:00:57 +00:00
|
|
|
name = internalized_string;
|
|
|
|
}
|
2013-03-28 13:52:31 +00:00
|
|
|
// This cache is cleared only between mark compact passes, so we expect the
|
|
|
|
// cache to only contain old space names.
|
|
|
|
ASSERT(!HEAP->InNewSpace(name));
|
2012-01-23 16:18:10 +00:00
|
|
|
|
2013-03-04 15:00:57 +00:00
|
|
|
int index = (Hash(map, name) & kHashMask);
|
|
|
|
// After a GC there will be free slots, so we use them in order (this may
|
|
|
|
// help to get the most frequently used one in position 0).
|
|
|
|
for (int i = 0; i< kEntriesPerBucket; i++) {
|
2012-01-23 16:18:10 +00:00
|
|
|
Key& key = keys_[index];
|
2013-03-04 15:00:57 +00:00
|
|
|
Object* free_entry_indicator = NULL;
|
|
|
|
if (key.map == free_entry_indicator) {
|
|
|
|
key.map = map;
|
|
|
|
key.name = name;
|
|
|
|
field_offsets_[index + i] = field_offset;
|
|
|
|
return;
|
|
|
|
}
|
2009-06-17 06:07:49 +00:00
|
|
|
}
|
2013-03-04 15:00:57 +00:00
|
|
|
// No free entry found in this bucket, so we move them all down one and
|
|
|
|
// put the new entry at position zero.
|
|
|
|
for (int i = kEntriesPerBucket - 1; i > 0; i--) {
|
|
|
|
Key& key = keys_[index + i];
|
|
|
|
Key& key2 = keys_[index + i - 1];
|
|
|
|
key = key2;
|
|
|
|
field_offsets_[index + i] = field_offsets_[index + i - 1];
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write the new first entry.
|
|
|
|
Key& key = keys_[index];
|
|
|
|
key.map = map;
|
|
|
|
key.name = name;
|
|
|
|
field_offsets_[index] = field_offset;
|
2009-06-17 06:07:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void KeyedLookupCache::Clear() {
|
|
|
|
for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
|
|
|
|
}
|
|
|
|
|
2009-06-22 08:09:57 +00:00
|
|
|
|
2009-06-22 14:29:35 +00:00
|
|
|
void DescriptorLookupCache::Clear() {
|
Sharing of descriptor arrays.
This CL adds multiple things:
Transition arrays do not directly point at their descriptor array anymore, but rather do so via an indirect pointer (a JSGlobalPropertyCell).
An ownership bit is added to maps indicating whether it owns its own descriptor array or not.
Maps owning a descriptor array can pass on ownership if a transition from that map is generated; but only if the descriptor array stays exactly the same; or if a descriptor is added.
Maps that don't have ownership get ownership back if their direct child to which ownership was passed is cleared in ClearNonLiveTransitions.
To detect which descriptors in an array are valid, each map knows its own NumberOfOwnDescriptors. Since the descriptors are sorted in order of addition, if we search and find a descriptor with index bigger than this number, it is not valid for the given map.
We currently still build up an enumeration cache (although this may disappear). The enumeration cache is always built for the entire descriptor array, even if not all descriptors are owned by the map. Once a descriptor array has an enumeration cache for a given map; this invariant will always be true, even if the descriptor array was extended. The extended array will inherit the enumeration cache from the smaller descriptor array. If a map with more descriptors needs an enumeration cache, it's EnumLength will still be set to invalid, so it will have to recompute the enumeration cache. This new cache will also be valid for smaller maps since they have their own enumlength; and use this to loop over the cache. If the EnumLength is still invalid, but there is already a cache present that is big enough; we just initialize the EnumLength field for the map.
When we apply ClearNonLiveTransitions and descriptor ownership is passed back to a parent map, the descriptor array is trimmed in-place and resorted. At the same time, the enumeration cache is trimmed in-place.
Only transition arrays contain descriptor arrays. If we transition to a map and pass ownership of the descriptor array along, the child map will not store the descriptor array it owns. Rather its parent will keep the pointer. So for every leaf-map, we find the descriptor array by following the back pointer, reading out the transition array, and fetching the descriptor array from the JSGlobalPropertyCell. If a map has a transition array, we fetch it from there. If a map has undefined as its back-pointer and has no transition array; it is considered to have an empty descriptor array.
When we modify properties, we cannot share the descriptor array. To accommodate this, the child map will get its own transition array; even if there are not necessarily any transitions leaving from the child map. This is necessary since it's the only way to store its own descriptor array.
Review URL: https://chromiumcodereview.appspot.com/10909007
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@12492 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2012-09-12 16:43:57 +00:00
|
|
|
for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
|
2009-06-22 14:29:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-10-20 07:37:51 +00:00
|
|
|
#ifdef DEBUG
|
2010-10-18 12:58:56 +00:00
|
|
|
void Heap::GarbageCollectionGreedyCheck() {
|
2008-10-20 07:31:33 +00:00
|
|
|
ASSERT(FLAG_gc_greedy);
|
2011-03-18 20:35:07 +00:00
|
|
|
if (isolate_->bootstrapper()->IsActive()) return;
|
2010-10-18 12:58:56 +00:00
|
|
|
if (disallow_allocation_failure()) return;
|
|
|
|
CollectGarbage(NEW_SPACE);
|
2008-10-20 07:31:33 +00:00
|
|
|
}
|
2008-10-20 07:37:51 +00:00
|
|
|
#endif
|
2008-10-20 07:31:33 +00:00
|
|
|
|
2009-09-01 09:03:58 +00:00
|
|
|
|
2011-03-18 20:35:07 +00:00
|
|
|
TranscendentalCache::SubCache::SubCache(Type t)
|
|
|
|
: type_(t),
|
|
|
|
isolate_(Isolate::Current()) {
|
2009-09-01 09:03:58 +00:00
|
|
|
uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
|
|
|
|
uint32_t in1 = 0xffffffffu; // generated by the FPU.
|
|
|
|
for (int i = 0; i < kCacheSize; i++) {
|
|
|
|
elements_[i].in[0] = in0;
|
|
|
|
elements_[i].in[1] = in1;
|
|
|
|
elements_[i].output = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void TranscendentalCache::Clear() {
|
|
|
|
for (int i = 0; i < kNumberOfCaches; i++) {
|
|
|
|
if (caches_[i] != NULL) {
|
|
|
|
delete caches_[i];
|
|
|
|
caches_[i] = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-12-09 14:32:45 +00:00
|
|
|
void ExternalStringTable::CleanUp() {
|
|
|
|
int last = 0;
|
|
|
|
for (int i = 0; i < new_space_strings_.length(); ++i) {
|
2012-10-25 11:52:37 +00:00
|
|
|
if (new_space_strings_[i] == heap_->the_hole_value()) {
|
2011-11-03 14:17:05 +00:00
|
|
|
continue;
|
|
|
|
}
|
2011-03-18 20:35:07 +00:00
|
|
|
if (heap_->InNewSpace(new_space_strings_[i])) {
|
2009-12-09 14:32:45 +00:00
|
|
|
new_space_strings_[last++] = new_space_strings_[i];
|
|
|
|
} else {
|
|
|
|
old_space_strings_.Add(new_space_strings_[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
new_space_strings_.Rewind(last);
|
2013-01-14 13:19:27 +00:00
|
|
|
new_space_strings_.Trim();
|
|
|
|
|
2009-12-09 14:32:45 +00:00
|
|
|
last = 0;
|
|
|
|
for (int i = 0; i < old_space_strings_.length(); ++i) {
|
2012-10-25 11:52:37 +00:00
|
|
|
if (old_space_strings_[i] == heap_->the_hole_value()) {
|
2011-11-03 14:17:05 +00:00
|
|
|
continue;
|
|
|
|
}
|
2011-03-18 20:35:07 +00:00
|
|
|
ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
|
2009-12-09 14:32:45 +00:00
|
|
|
old_space_strings_[last++] = old_space_strings_[i];
|
|
|
|
}
|
|
|
|
old_space_strings_.Rewind(last);
|
2013-01-14 13:19:27 +00:00
|
|
|
old_space_strings_.Trim();
|
2012-10-12 11:41:14 +00:00
|
|
|
#ifdef VERIFY_HEAP
|
2011-10-25 13:27:46 +00:00
|
|
|
if (FLAG_verify_heap) {
|
|
|
|
Verify();
|
|
|
|
}
|
2012-10-12 11:41:14 +00:00
|
|
|
#endif
|
2009-12-09 14:32:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void ExternalStringTable::TearDown() {
|
|
|
|
new_space_strings_.Free();
|
|
|
|
old_space_strings_.Free();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-01-14 13:19:27 +00:00
|
|
|
// Update all references.
|
|
|
|
void ErrorObjectList::UpdateReferences() {
|
|
|
|
for (int i = 0; i < list_.length(); i++) {
|
|
|
|
HeapObject* object = HeapObject::cast(list_[i]);
|
|
|
|
MapWord first_word = object->map_word();
|
|
|
|
if (first_word.IsForwardingAddress()) {
|
|
|
|
list_[i] = first_word.ToForwardingAddress();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Unforwarded objects in new space are dead and removed from the list.
|
|
|
|
void ErrorObjectList::UpdateReferencesInNewSpace(Heap* heap) {
|
2013-01-18 13:05:03 +00:00
|
|
|
if (list_.is_empty()) return;
|
2013-01-14 13:19:27 +00:00
|
|
|
if (!nested_) {
|
|
|
|
int write_index = 0;
|
|
|
|
for (int i = 0; i < list_.length(); i++) {
|
|
|
|
MapWord first_word = HeapObject::cast(list_[i])->map_word();
|
|
|
|
if (first_word.IsForwardingAddress()) {
|
|
|
|
list_[write_index++] = first_word.ToForwardingAddress();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
list_.Rewind(write_index);
|
|
|
|
} else {
|
|
|
|
// If a GC is triggered during DeferredFormatStackTrace, we do not move
|
|
|
|
// objects in the list, just remove dead ones, as to not confuse the
|
|
|
|
// loop in DeferredFormatStackTrace.
|
|
|
|
for (int i = 0; i < list_.length(); i++) {
|
|
|
|
MapWord first_word = HeapObject::cast(list_[i])->map_word();
|
|
|
|
list_[i] = first_word.IsForwardingAddress()
|
|
|
|
? first_word.ToForwardingAddress()
|
|
|
|
: heap->the_hole_value();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void ErrorObjectList::DeferredFormatStackTrace(Isolate* isolate) {
|
|
|
|
// If formatting the stack trace causes a GC, this method will be
|
|
|
|
// recursively called. In that case, skip the recursive call, since
|
|
|
|
// the loop modifies the list while iterating over it.
|
2013-01-18 13:05:03 +00:00
|
|
|
if (nested_ || list_.is_empty() || isolate->has_pending_exception()) return;
|
2013-01-14 13:19:27 +00:00
|
|
|
nested_ = true;
|
|
|
|
HandleScope scope(isolate);
|
2013-02-28 17:03:34 +00:00
|
|
|
Handle<String> stack_key = isolate->factory()->stack_string();
|
2013-01-14 13:19:27 +00:00
|
|
|
int write_index = 0;
|
|
|
|
int budget = kBudgetPerGC;
|
|
|
|
for (int i = 0; i < list_.length(); i++) {
|
|
|
|
Object* object = list_[i];
|
|
|
|
JSFunction* getter_fun;
|
|
|
|
|
2013-06-03 15:32:22 +00:00
|
|
|
{ DisallowHeapAllocation no_gc;
|
2013-01-14 13:19:27 +00:00
|
|
|
// Skip possible holes in the list.
|
|
|
|
if (object->IsTheHole()) continue;
|
|
|
|
if (isolate->heap()->InNewSpace(object) || budget == 0) {
|
|
|
|
list_[write_index++] = object;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check whether the stack property is backed by the original getter.
|
|
|
|
LookupResult lookup(isolate);
|
|
|
|
JSObject::cast(object)->LocalLookupRealNamedProperty(*stack_key, &lookup);
|
|
|
|
if (!lookup.IsFound() || lookup.type() != CALLBACKS) continue;
|
|
|
|
Object* callback = lookup.GetCallbackObject();
|
|
|
|
if (!callback->IsAccessorPair()) continue;
|
|
|
|
Object* getter_obj = AccessorPair::cast(callback)->getter();
|
|
|
|
if (!getter_obj->IsJSFunction()) continue;
|
|
|
|
getter_fun = JSFunction::cast(getter_obj);
|
2013-02-28 17:03:34 +00:00
|
|
|
String* key = isolate->heap()->hidden_stack_trace_string();
|
2013-04-26 08:12:21 +00:00
|
|
|
Object* value = getter_fun->GetHiddenProperty(key);
|
|
|
|
if (key != value) continue;
|
2013-01-14 13:19:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
budget--;
|
|
|
|
HandleScope scope(isolate);
|
|
|
|
bool has_exception = false;
|
|
|
|
#ifdef DEBUG
|
|
|
|
Handle<Map> map(HeapObject::cast(object)->map(), isolate);
|
|
|
|
#endif
|
|
|
|
Handle<Object> object_handle(object, isolate);
|
|
|
|
Handle<Object> getter_handle(getter_fun, isolate);
|
|
|
|
Execution::Call(getter_handle, object_handle, 0, NULL, &has_exception);
|
|
|
|
ASSERT(*map == HeapObject::cast(*object_handle)->map());
|
|
|
|
if (has_exception) {
|
|
|
|
// Hit an exception (most likely a stack overflow).
|
|
|
|
// Wrap up this pass and retry after another GC.
|
|
|
|
isolate->clear_pending_exception();
|
|
|
|
// We use the handle since calling the getter might have caused a GC.
|
|
|
|
list_[write_index++] = *object_handle;
|
|
|
|
budget = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
list_.Rewind(write_index);
|
|
|
|
list_.Trim();
|
|
|
|
nested_ = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void ErrorObjectList::RemoveUnmarked(Heap* heap) {
|
|
|
|
for (int i = 0; i < list_.length(); i++) {
|
|
|
|
HeapObject* object = HeapObject::cast(list_[i]);
|
|
|
|
if (!Marking::MarkBitFrom(object).Get()) {
|
|
|
|
list_[i] = heap->the_hole_value();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void ErrorObjectList::TearDown() {
|
|
|
|
list_.Free();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
|
|
|
|
chunk->set_next_chunk(chunks_queued_for_free_);
|
|
|
|
chunks_queued_for_free_ = chunk;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Heap::FreeQueuedChunks() {
|
|
|
|
if (chunks_queued_for_free_ == NULL) return;
|
|
|
|
MemoryChunk* next;
|
|
|
|
MemoryChunk* chunk;
|
|
|
|
for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
|
|
|
|
next = chunk->next_chunk();
|
|
|
|
chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
|
|
|
|
|
|
|
|
if (chunk->owner()->identity() == LO_SPACE) {
|
|
|
|
// StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
|
|
|
|
// If FromAnyPointerAddress encounters a slot that belongs to a large
|
|
|
|
// chunk queued for deletion it will fail to find the chunk because
|
|
|
|
// it try to perform a search in the list of pages owned by of the large
|
|
|
|
// object space and queued chunks were detached from that list.
|
|
|
|
// To work around this we split large chunk into normal kPageSize aligned
|
2011-11-11 09:45:12 +00:00
|
|
|
// pieces and initialize size, owner and flags field of every piece.
|
|
|
|
// If FromAnyPointerAddress encounters a slot that belongs to one of
|
2011-09-19 18:36:47 +00:00
|
|
|
// these smaller pieces it will treat it as a slot on a normal Page.
|
2012-03-05 16:39:25 +00:00
|
|
|
Address chunk_end = chunk->address() + chunk->size();
|
2011-09-19 18:36:47 +00:00
|
|
|
MemoryChunk* inner = MemoryChunk::FromAddress(
|
|
|
|
chunk->address() + Page::kPageSize);
|
2012-03-05 16:39:25 +00:00
|
|
|
MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
|
2011-09-19 18:36:47 +00:00
|
|
|
while (inner <= inner_last) {
|
|
|
|
// Size of a large chunk is always a multiple of
|
2011-11-11 11:50:05 +00:00
|
|
|
// OS::AllocateAlignment() so there is always
|
2011-09-19 18:36:47 +00:00
|
|
|
// enough space for a fake MemoryChunk header.
|
2012-03-05 16:39:25 +00:00
|
|
|
Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
|
|
|
|
// Guard against overflow.
|
|
|
|
if (area_end < inner->address()) area_end = chunk_end;
|
|
|
|
inner->SetArea(inner->address(), area_end);
|
2011-11-11 09:45:12 +00:00
|
|
|
inner->set_size(Page::kPageSize);
|
2011-09-19 18:36:47 +00:00
|
|
|
inner->set_owner(lo_space());
|
|
|
|
inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
|
|
|
|
inner = MemoryChunk::FromAddress(
|
|
|
|
inner->address() + Page::kPageSize);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2011-09-29 12:27:31 +00:00
|
|
|
isolate_->heap()->store_buffer()->Compact();
|
2011-09-19 18:36:47 +00:00
|
|
|
isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
|
|
|
|
for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
|
|
|
|
next = chunk->next_chunk();
|
|
|
|
isolate_->memory_allocator()->Free(chunk);
|
|
|
|
}
|
|
|
|
chunks_queued_for_free_ = NULL;
|
|
|
|
}
|
|
|
|
|
2012-03-16 14:13:22 +00:00
|
|
|
|
|
|
|
void Heap::RememberUnmappedPage(Address page, bool compacted) {
|
|
|
|
uintptr_t p = reinterpret_cast<uintptr_t>(page);
|
|
|
|
// Tag the page pointer to make it findable in the dump file.
|
|
|
|
if (compacted) {
|
|
|
|
p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
|
|
|
|
} else {
|
|
|
|
p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
|
|
|
|
}
|
|
|
|
remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
|
|
|
|
reinterpret_cast<Address>(p);
|
|
|
|
remembered_unmapped_pages_index_++;
|
|
|
|
remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
|
|
|
|
}
|
|
|
|
|
2012-07-13 12:22:09 +00:00
|
|
|
|
|
|
|
void Heap::ClearObjectStats(bool clear_last_time_stats) {
|
|
|
|
memset(object_counts_, 0, sizeof(object_counts_));
|
|
|
|
memset(object_sizes_, 0, sizeof(object_sizes_));
|
|
|
|
if (clear_last_time_stats) {
|
|
|
|
memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
|
|
|
|
memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
|
|
|
|
|
|
|
|
|
|
|
|
void Heap::CheckpointObjectStats() {
|
|
|
|
ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
|
|
|
|
Counters* counters = isolate()->counters();
|
2012-07-16 09:39:52 +00:00
|
|
|
#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
|
|
|
|
counters->count_of_##name()->Increment( \
|
|
|
|
static_cast<int>(object_counts_[name])); \
|
|
|
|
counters->count_of_##name()->Decrement( \
|
|
|
|
static_cast<int>(object_counts_last_time_[name])); \
|
|
|
|
counters->size_of_##name()->Increment( \
|
|
|
|
static_cast<int>(object_sizes_[name])); \
|
|
|
|
counters->size_of_##name()->Decrement( \
|
|
|
|
static_cast<int>(object_sizes_last_time_[name]));
|
2012-07-13 12:22:09 +00:00
|
|
|
INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
|
|
|
|
#undef ADJUST_LAST_TIME_OBJECT_COUNT
|
2012-07-17 14:04:41 +00:00
|
|
|
int index;
|
|
|
|
#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
|
|
|
|
index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
|
|
|
|
counters->count_of_CODE_TYPE_##name()->Increment( \
|
|
|
|
static_cast<int>(object_counts_[index])); \
|
|
|
|
counters->count_of_CODE_TYPE_##name()->Decrement( \
|
|
|
|
static_cast<int>(object_counts_last_time_[index])); \
|
|
|
|
counters->size_of_CODE_TYPE_##name()->Increment( \
|
|
|
|
static_cast<int>(object_sizes_[index])); \
|
|
|
|
counters->size_of_CODE_TYPE_##name()->Decrement( \
|
|
|
|
static_cast<int>(object_sizes_last_time_[index]));
|
2012-07-17 11:58:49 +00:00
|
|
|
CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
|
|
|
|
#undef ADJUST_LAST_TIME_OBJECT_COUNT
|
2012-07-20 14:06:24 +00:00
|
|
|
#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
|
|
|
|
index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
|
|
|
|
counters->count_of_FIXED_ARRAY_##name()->Increment( \
|
|
|
|
static_cast<int>(object_counts_[index])); \
|
|
|
|
counters->count_of_FIXED_ARRAY_##name()->Decrement( \
|
|
|
|
static_cast<int>(object_counts_last_time_[index])); \
|
|
|
|
counters->size_of_FIXED_ARRAY_##name()->Increment( \
|
|
|
|
static_cast<int>(object_sizes_[index])); \
|
|
|
|
counters->size_of_FIXED_ARRAY_##name()->Decrement( \
|
|
|
|
static_cast<int>(object_sizes_last_time_[index]));
|
|
|
|
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
|
|
|
|
#undef ADJUST_LAST_TIME_OBJECT_COUNT
|
2012-07-17 11:58:49 +00:00
|
|
|
|
2013-04-16 12:30:51 +00:00
|
|
|
OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
|
|
|
|
OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
|
2012-07-13 12:22:09 +00:00
|
|
|
ClearObjectStats();
|
|
|
|
}
|
|
|
|
|
2013-04-26 07:35:07 +00:00
|
|
|
|
|
|
|
Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
|
|
|
|
if (FLAG_parallel_recompilation) {
|
|
|
|
heap_->relocation_mutex_->Lock();
|
|
|
|
#ifdef DEBUG
|
|
|
|
heap_->relocation_mutex_locked_by_optimizer_thread_ =
|
|
|
|
heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
|
|
|
|
#endif // DEBUG
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
} } // namespace v8::internal
|