Preserve optimized code map during GCs weakly.

This change preserves the contents of optimized code maps during GCs but
treats the references in this cache weakly. It uses infrastructure from
code flushing to maintain a list of all caches.

R=hpayer@chromium.org

Review URL: https://codereview.chromium.org/14794007

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@14695 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
mstarzinger@chromium.org 2013-05-15 16:09:25 +00:00
parent 55f6281281
commit 365b2eb91e
11 changed files with 264 additions and 66 deletions

View File

@ -307,8 +307,8 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
// The optimized code map must never be empty, so check the first elements. // The optimized code map must never be empty, so check the first elements.
Label install_optimized; Label install_optimized;
// Speculatively move code object into r4. // Speculatively move code object into r4.
__ ldr(r4, FieldMemOperand(r1, FixedArray::kHeaderSize + kPointerSize)); __ ldr(r4, FieldMemOperand(r1, SharedFunctionInfo::kFirstCodeSlot));
__ ldr(r5, FieldMemOperand(r1, FixedArray::kHeaderSize)); __ ldr(r5, FieldMemOperand(r1, SharedFunctionInfo::kFirstContextSlot));
__ cmp(r2, r5); __ cmp(r2, r5);
__ b(eq, &install_optimized); __ b(eq, &install_optimized);
@ -317,11 +317,9 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ ldr(r4, FieldMemOperand(r1, FixedArray::kLengthOffset)); __ ldr(r4, FieldMemOperand(r1, FixedArray::kLengthOffset));
__ bind(&loop); __ bind(&loop);
// Do not double check first entry. // Do not double check first entry.
__ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kSecondEntryIndex)));
__ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
__ b(eq, &install_unoptimized); __ b(eq, &install_unoptimized);
__ sub(r4, r4, Operand( __ sub(r4, r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
Smi::FromInt(SharedFunctionInfo::kEntryLength))); // Skip an entry.
__ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize)); __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
__ ldr(r5, MemOperand(r5)); __ ldr(r5, MemOperand(r5));

View File

@ -464,6 +464,7 @@ DEFINE_bool(flush_code, true,
"flush code that we expect not to use again (during full gc)") "flush code that we expect not to use again (during full gc)")
DEFINE_bool(flush_code_incrementally, true, DEFINE_bool(flush_code_incrementally, true,
"flush code that we expect not to use again (incrementally)") "flush code that we expect not to use again (incrementally)")
DEFINE_bool(trace_code_flushing, false, "trace code flushing progress")
DEFINE_bool(age_code, true, DEFINE_bool(age_code, true,
"track un-executed functions to age code and flush only " "track un-executed functions to age code and flush only "
"old code") "old code")

View File

@ -292,8 +292,8 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Map must never be empty, so check the first elements. // Map must never be empty, so check the first elements.
Label install_optimized; Label install_optimized;
// Speculatively move code object into edx. // Speculatively move code object into edx.
__ mov(edx, FieldOperand(ebx, FixedArray::kHeaderSize + kPointerSize)); __ mov(edx, FieldOperand(ebx, SharedFunctionInfo::kFirstCodeSlot));
__ cmp(ecx, FieldOperand(ebx, FixedArray::kHeaderSize)); __ cmp(ecx, FieldOperand(ebx, SharedFunctionInfo::kFirstContextSlot));
__ j(equal, &install_optimized); __ j(equal, &install_optimized);
// Iterate through the rest of map backwards. edx holds an index as a Smi. // Iterate through the rest of map backwards. edx holds an index as a Smi.
@ -302,10 +302,9 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ mov(edx, FieldOperand(ebx, FixedArray::kLengthOffset)); __ mov(edx, FieldOperand(ebx, FixedArray::kLengthOffset));
__ bind(&loop); __ bind(&loop);
// Do not double check first entry. // Do not double check first entry.
__ cmp(edx, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength))); __ cmp(edx, Immediate(Smi::FromInt(SharedFunctionInfo::kSecondEntryIndex)));
__ j(equal, &restore); __ j(equal, &restore);
__ sub(edx, Immediate(Smi::FromInt( __ sub(edx, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
SharedFunctionInfo::kEntryLength))); // Skip an entry.
__ cmp(ecx, CodeGenerator::FixedArrayElementOperand(ebx, edx, 0)); __ cmp(ecx, CodeGenerator::FixedArrayElementOperand(ebx, edx, 0));
__ j(not_equal, &loop, Label::kNear); __ j(not_equal, &loop, Label::kNear);
// Hit: fetch the optimized code. // Hit: fetch the optimized code.

View File

@ -1054,6 +1054,70 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
} }
void CodeFlusher::ProcessOptimizedCodeMaps() {
static const int kEntriesStart = SharedFunctionInfo::kEntriesStart;
static const int kEntryLength = SharedFunctionInfo::kEntryLength;
static const int kContextOffset = 0;
static const int kCodeOffset = 1;
static const int kLiteralsOffset = 2;
STATIC_ASSERT(kEntryLength == 3);
SharedFunctionInfo* holder = optimized_code_map_holder_head_;
SharedFunctionInfo* next_holder;
while (holder != NULL) {
next_holder = GetNextCodeMap(holder);
ClearNextCodeMap(holder);
FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
int new_length = kEntriesStart;
int old_length = code_map->length();
for (int i = kEntriesStart; i < old_length; i += kEntryLength) {
Code* code = Code::cast(code_map->get(i + kCodeOffset));
MarkBit code_mark = Marking::MarkBitFrom(code);
if (!code_mark.Get()) {
continue;
}
// Update and record the context slot in the optimizled code map.
Object** context_slot = HeapObject::RawField(code_map,
FixedArray::OffsetOfElementAt(new_length));
code_map->set(new_length++, code_map->get(i + kContextOffset));
ASSERT(Marking::IsBlack(
Marking::MarkBitFrom(HeapObject::cast(*context_slot))));
isolate_->heap()->mark_compact_collector()->
RecordSlot(context_slot, context_slot, *context_slot);
// Update and record the code slot in the optimized code map.
Object** code_slot = HeapObject::RawField(code_map,
FixedArray::OffsetOfElementAt(new_length));
code_map->set(new_length++, code_map->get(i + kCodeOffset));
ASSERT(Marking::IsBlack(
Marking::MarkBitFrom(HeapObject::cast(*code_slot))));
isolate_->heap()->mark_compact_collector()->
RecordSlot(code_slot, code_slot, *code_slot);
// Update and record the literals slot in the optimized code map.
Object** literals_slot = HeapObject::RawField(code_map,
FixedArray::OffsetOfElementAt(new_length));
code_map->set(new_length++, code_map->get(i + kLiteralsOffset));
ASSERT(Marking::IsBlack(
Marking::MarkBitFrom(HeapObject::cast(*literals_slot))));
isolate_->heap()->mark_compact_collector()->
RecordSlot(literals_slot, literals_slot, *literals_slot);
}
// Trim the optimized code map if entries have been removed.
if (new_length < old_length) {
holder->TrimOptimizedCodeMap(old_length - new_length);
}
holder = next_holder;
}
optimized_code_map_holder_head_ = NULL;
}
void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) { void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
// Make sure previous flushing decisions are revisited. // Make sure previous flushing decisions are revisited.
isolate_->heap()->incremental_marking()->RecordWrites(shared_info); isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
@ -1112,6 +1176,36 @@ void CodeFlusher::EvictCandidate(JSFunction* function) {
} }
void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
ASSERT(!FixedArray::cast(code_map_holder->optimized_code_map())->
get(SharedFunctionInfo::kNextMapIndex)->IsUndefined());
// Make sure previous flushing decisions are revisited.
isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
SharedFunctionInfo* holder = optimized_code_map_holder_head_;
SharedFunctionInfo* next_holder;
if (holder == code_map_holder) {
next_holder = GetNextCodeMap(code_map_holder);
optimized_code_map_holder_head_ = next_holder;
ClearNextCodeMap(code_map_holder);
} else {
while (holder != NULL) {
next_holder = GetNextCodeMap(holder);
if (next_holder == code_map_holder) {
next_holder = GetNextCodeMap(code_map_holder);
SetNextCodeMap(holder, next_holder);
ClearNextCodeMap(code_map_holder);
break;
}
holder = next_holder;
}
}
}
void CodeFlusher::EvictJSFunctionCandidates() { void CodeFlusher::EvictJSFunctionCandidates() {
JSFunction* candidate = jsfunction_candidates_head_; JSFunction* candidate = jsfunction_candidates_head_;
JSFunction* next_candidate; JSFunction* next_candidate;
@ -1136,6 +1230,18 @@ void CodeFlusher::EvictSharedFunctionInfoCandidates() {
} }
void CodeFlusher::EvictOptimizedCodeMaps() {
SharedFunctionInfo* holder = optimized_code_map_holder_head_;
SharedFunctionInfo* next_holder;
while (holder != NULL) {
next_holder = GetNextCodeMap(holder);
EvictOptimizedCodeMap(holder);
holder = next_holder;
}
ASSERT(optimized_code_map_holder_head_ == NULL);
}
void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) { void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
Heap* heap = isolate_->heap(); Heap* heap = isolate_->heap();
@ -3955,6 +4061,10 @@ void MarkCompactCollector::EnableCodeFlushing(bool enable) {
delete code_flusher_; delete code_flusher_;
code_flusher_ = NULL; code_flusher_ = NULL;
} }
if (FLAG_trace_code_flushing) {
PrintF("[code-flushing is now %s]\n", enable ? "on" : "off");
}
} }

View File

@ -406,9 +406,10 @@ class SlotsBuffer {
// CodeFlusher collects candidates for code flushing during marking and // CodeFlusher collects candidates for code flushing during marking and
// processes those candidates after marking has completed in order to // processes those candidates after marking has completed in order to
// reset those functions referencing code objects that would otherwise // reset those functions referencing code objects that would otherwise
// be unreachable. Code objects can be referenced in two ways: // be unreachable. Code objects can be referenced in three ways:
// - SharedFunctionInfo references unoptimized code. // - SharedFunctionInfo references unoptimized code.
// - JSFunction references either unoptimized or optimized code. // - JSFunction references either unoptimized or optimized code.
// - OptimizedCodeMap references optimized code.
// We are not allowed to flush unoptimized code for functions that got // We are not allowed to flush unoptimized code for functions that got
// optimized or inlined into optimized code, because we might bailout // optimized or inlined into optimized code, because we might bailout
// into the unoptimized code again during deoptimization. // into the unoptimized code again during deoptimization.
@ -417,7 +418,8 @@ class CodeFlusher {
explicit CodeFlusher(Isolate* isolate) explicit CodeFlusher(Isolate* isolate)
: isolate_(isolate), : isolate_(isolate),
jsfunction_candidates_head_(NULL), jsfunction_candidates_head_(NULL),
shared_function_info_candidates_head_(NULL) {} shared_function_info_candidates_head_(NULL),
optimized_code_map_holder_head_(NULL) {}
void AddCandidate(SharedFunctionInfo* shared_info) { void AddCandidate(SharedFunctionInfo* shared_info) {
if (GetNextCandidate(shared_info) == NULL) { if (GetNextCandidate(shared_info) == NULL) {
@ -434,15 +436,25 @@ class CodeFlusher {
} }
} }
void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
optimized_code_map_holder_head_ = code_map_holder;
}
}
void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
void EvictCandidate(SharedFunctionInfo* shared_info); void EvictCandidate(SharedFunctionInfo* shared_info);
void EvictCandidate(JSFunction* function); void EvictCandidate(JSFunction* function);
void ProcessCandidates() { void ProcessCandidates() {
ProcessOptimizedCodeMaps();
ProcessSharedFunctionInfoCandidates(); ProcessSharedFunctionInfoCandidates();
ProcessJSFunctionCandidates(); ProcessJSFunctionCandidates();
} }
void EvictAllCandidates() { void EvictAllCandidates() {
EvictOptimizedCodeMaps();
EvictJSFunctionCandidates(); EvictJSFunctionCandidates();
EvictSharedFunctionInfoCandidates(); EvictSharedFunctionInfoCandidates();
} }
@ -450,8 +462,10 @@ class CodeFlusher {
void IteratePointersToFromSpace(ObjectVisitor* v); void IteratePointersToFromSpace(ObjectVisitor* v);
private: private:
void ProcessOptimizedCodeMaps();
void ProcessJSFunctionCandidates(); void ProcessJSFunctionCandidates();
void ProcessSharedFunctionInfoCandidates(); void ProcessSharedFunctionInfoCandidates();
void EvictOptimizedCodeMaps();
void EvictJSFunctionCandidates(); void EvictJSFunctionCandidates();
void EvictSharedFunctionInfoCandidates(); void EvictSharedFunctionInfoCandidates();
@ -489,9 +503,27 @@ class CodeFlusher {
candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER); candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
} }
static SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder) {
FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
return reinterpret_cast<SharedFunctionInfo*>(next_map);
}
static void SetNextCodeMap(SharedFunctionInfo* holder,
SharedFunctionInfo* next_holder) {
FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
}
static void ClearNextCodeMap(SharedFunctionInfo* holder) {
FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
code_map->set_undefined(SharedFunctionInfo::kNextMapIndex);
}
Isolate* isolate_; Isolate* isolate_;
JSFunction* jsfunction_candidates_head_; JSFunction* jsfunction_candidates_head_;
SharedFunctionInfo* shared_function_info_candidates_head_; SharedFunctionInfo* shared_function_info_candidates_head_;
SharedFunctionInfo* optimized_code_map_holder_head_;
DISALLOW_COPY_AND_ASSIGN(CodeFlusher); DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
}; };

View File

@ -309,8 +309,8 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
// The optimized code map must never be empty, so check the first elements. // The optimized code map must never be empty, so check the first elements.
Label install_optimized; Label install_optimized;
// Speculatively move code object into t0. // Speculatively move code object into t0.
__ lw(t0, FieldMemOperand(a1, FixedArray::kHeaderSize + kPointerSize)); __ lw(t0, FieldMemOperand(a1, SharedFunctionInfo::kFirstCodeSlot));
__ lw(t1, FieldMemOperand(a1, FixedArray::kHeaderSize)); __ lw(t1, FieldMemOperand(a1, SharedFunctionInfo::kFirstContextSlot));
__ Branch(&install_optimized, eq, a2, Operand(t1)); __ Branch(&install_optimized, eq, a2, Operand(t1));
// Iterate through the rest of map backwards. t0 holds an index as a Smi. // Iterate through the rest of map backwards. t0 holds an index as a Smi.
@ -318,11 +318,9 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ lw(t0, FieldMemOperand(a1, FixedArray::kLengthOffset)); __ lw(t0, FieldMemOperand(a1, FixedArray::kLengthOffset));
__ bind(&loop); __ bind(&loop);
// Do not double check first entry. // Do not double check first entry.
__ Branch(&install_unoptimized, eq, t0, __ Branch(&install_unoptimized, eq, t0,
Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength))); Operand(Smi::FromInt(SharedFunctionInfo::kSecondEntryIndex)));
__ Subu(t0, t0, Operand( __ Subu(t0, t0, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
Smi::FromInt(SharedFunctionInfo::kEntryLength))); // Skip an entry.
__ Addu(t1, a1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ Addu(t1, a1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ sll(at, t0, kPointerSizeLog2 - kSmiTagSize); __ sll(at, t0, kPointerSizeLog2 - kSmiTagSize);
__ Addu(t1, t1, Operand(at)); __ Addu(t1, t1, Operand(at));

View File

@ -311,15 +311,17 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
if (shared->ic_age() != heap->global_ic_age()) { if (shared->ic_age() != heap->global_ic_age()) {
shared->ResetForNewContext(heap->global_ic_age()); shared->ResetForNewContext(heap->global_ic_age());
} }
if (FLAG_cache_optimized_code) {
// Flush optimized code map on major GC.
// TODO(mstarzinger): We may experiment with rebuilding it or with
// retaining entries which should survive as we iterate through
// optimized functions anyway.
shared->ClearOptimizedCodeMap("during full gc");
}
MarkCompactCollector* collector = heap->mark_compact_collector(); MarkCompactCollector* collector = heap->mark_compact_collector();
if (collector->is_code_flushing_enabled()) { if (collector->is_code_flushing_enabled()) {
if (FLAG_cache_optimized_code && !shared->optimized_code_map()->IsSmi()) {
// Add the shared function info holding an optimized code map to
// the code flusher for processing of code maps after marking.
collector->code_flusher()->AddOptimizedCodeMap(shared);
// Treat all references within the code map weakly by marking the
// code map itself but not pushing it onto the marking deque.
FixedArray* code_map = FixedArray::cast(shared->optimized_code_map());
StaticVisitor::MarkObjectWithoutPush(heap, code_map);
}
if (IsFlushable(heap, shared)) { if (IsFlushable(heap, shared)) {
// This function's code looks flushable. But we have to postpone // This function's code looks flushable. But we have to postpone
// the decision until we see all functions that point to the same // the decision until we see all functions that point to the same
@ -332,6 +334,12 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
VisitSharedFunctionInfoWeakCode(heap, object); VisitSharedFunctionInfoWeakCode(heap, object);
return; return;
} }
} else {
if (FLAG_cache_optimized_code && !shared->optimized_code_map()->IsSmi()) {
// Flush optimized code map on major GCs without code flushing,
// needed because cached code doesn't contain breakpoints.
shared->ClearOptimizedCodeMap();
}
} }
VisitSharedFunctionInfoStrongCode(heap, object); VisitSharedFunctionInfoStrongCode(heap, object);
} }

View File

@ -2186,7 +2186,7 @@ static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
Address new_end = elms->address() + FixedArray::SizeFor(len - to_trim); Address new_end = elms->address() + FixedArray::SizeFor(len - to_trim);
if (trim_mode != FROM_GC || Heap::ShouldZapGarbage()) { if (trim_mode != FROM_GC || Heap::ShouldZapGarbage()) {
ZapEndOfFixedArray(new_end, to_trim); ZapEndOfFixedArray(new_end, to_trim);
} }
int size_delta = to_trim * kPointerSize; int size_delta = to_trim * kPointerSize;
@ -8976,33 +8976,46 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
Handle<Context> native_context, Handle<Context> native_context,
Handle<Code> code, Handle<Code> code,
Handle<FixedArray> literals) { Handle<FixedArray> literals) {
CALL_HEAP_FUNCTION_VOID(
shared->GetIsolate(),
shared->AddToOptimizedCodeMap(*native_context, *code, *literals));
}
MaybeObject* SharedFunctionInfo::AddToOptimizedCodeMap(Context* native_context,
Code* code,
FixedArray* literals) {
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION); ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
ASSERT(native_context->IsNativeContext()); ASSERT(native_context->IsNativeContext());
STATIC_ASSERT(kEntryLength == 3); STATIC_ASSERT(kEntryLength == 3);
Object* value = shared->optimized_code_map(); Heap* heap = GetHeap();
Handle<FixedArray> new_code_map; FixedArray* new_code_map;
Object* value = optimized_code_map();
if (value->IsSmi()) { if (value->IsSmi()) {
// No optimized code map. // No optimized code map.
ASSERT_EQ(0, Smi::cast(value)->value()); ASSERT_EQ(0, Smi::cast(value)->value());
// Crate 3 entries per context {context, code, literals}. // Crate 3 entries per context {context, code, literals}.
new_code_map = FACTORY->NewFixedArray(kEntryLength); MaybeObject* maybe = heap->AllocateFixedArray(kInitialLength);
new_code_map->set(0, *native_context); if (!maybe->To(&new_code_map)) return maybe;
new_code_map->set(1, *code); new_code_map->set(kEntriesStart + 0, native_context);
new_code_map->set(2, *literals); new_code_map->set(kEntriesStart + 1, code);
new_code_map->set(kEntriesStart + 2, literals);
} else { } else {
// Copy old map and append one new entry. // Copy old map and append one new entry.
Handle<FixedArray> old_code_map(FixedArray::cast(value)); FixedArray* old_code_map = FixedArray::cast(value);
ASSERT_EQ(-1, shared->SearchOptimizedCodeMap(*native_context)); ASSERT_EQ(-1, SearchOptimizedCodeMap(native_context));
int old_length = old_code_map->length(); int old_length = old_code_map->length();
int new_length = old_length + kEntryLength; int new_length = old_length + kEntryLength;
new_code_map = FACTORY->NewFixedArray(new_length); MaybeObject* maybe = old_code_map->CopySize(new_length);
old_code_map->CopyTo(0, *new_code_map, 0, old_length); if (!maybe->To(&new_code_map)) return maybe;
new_code_map->set(old_length, *native_context); new_code_map->set(old_length + 0, native_context);
new_code_map->set(old_length + 1, *code); new_code_map->set(old_length + 1, code);
new_code_map->set(old_length + 2, *literals); new_code_map->set(old_length + 2, literals);
// Zap the old map for the sake of the heap verifier.
if (Heap::ShouldZapGarbage()) ZapOptimizedCodeMap();
} }
#ifdef DEBUG #ifdef DEBUG
for (int i = 0; i < new_code_map->length(); i += kEntryLength) { for (int i = kEntriesStart; i < new_code_map->length(); i += kEntryLength) {
ASSERT(new_code_map->get(i)->IsNativeContext()); ASSERT(new_code_map->get(i)->IsNativeContext());
ASSERT(new_code_map->get(i + 1)->IsCode()); ASSERT(new_code_map->get(i + 1)->IsCode());
ASSERT(Code::cast(new_code_map->get(i + 1))->kind() == ASSERT(Code::cast(new_code_map->get(i + 1))->kind() ==
@ -9010,14 +9023,14 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
ASSERT(new_code_map->get(i + 2)->IsFixedArray()); ASSERT(new_code_map->get(i + 2)->IsFixedArray());
} }
#endif #endif
shared->set_optimized_code_map(*new_code_map); set_optimized_code_map(new_code_map);
return new_code_map;
} }
void SharedFunctionInfo::InstallFromOptimizedCodeMap(JSFunction* function, void SharedFunctionInfo::InstallFromOptimizedCodeMap(JSFunction* function,
int index) { int index) {
ASSERT(index > 0); ASSERT(index > kEntriesStart);
ASSERT(optimized_code_map()->IsFixedArray());
FixedArray* code_map = FixedArray::cast(optimized_code_map()); FixedArray* code_map = FixedArray::cast(optimized_code_map());
if (!bound()) { if (!bound()) {
FixedArray* cached_literals = FixedArray::cast(code_map->get(index + 1)); FixedArray* cached_literals = FixedArray::cast(code_map->get(index + 1));
@ -9031,15 +9044,18 @@ void SharedFunctionInfo::InstallFromOptimizedCodeMap(JSFunction* function,
} }
void SharedFunctionInfo::ClearOptimizedCodeMap(const char* reason) { void SharedFunctionInfo::ClearOptimizedCodeMap() {
if (!optimized_code_map()->IsSmi()) { FixedArray* code_map = FixedArray::cast(optimized_code_map());
if (FLAG_trace_opt) {
PrintF("[clearing entire optimizing code map (%s) for ", reason); // If the next map link slot is already used then the function was
ShortPrint(); // enqueued with code flushing and we remove it now.
PrintF("]\n"); if (!code_map->get(kNextMapIndex)->IsUndefined()) {
} CodeFlusher* flusher = GetHeap()->mark_compact_collector()->code_flusher();
set_optimized_code_map(Smi::FromInt(0)); flusher->EvictOptimizedCodeMap(this);
} }
ASSERT(code_map->get(kNextMapIndex)->IsUndefined());
set_optimized_code_map(Smi::FromInt(0));
} }
@ -9050,11 +9066,11 @@ void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
int i; int i;
bool removed_entry = false; bool removed_entry = false;
FixedArray* code_map = FixedArray::cast(optimized_code_map()); FixedArray* code_map = FixedArray::cast(optimized_code_map());
for (i = 0; i < code_map->length(); i += kEntryLength) { for (i = kEntriesStart; i < code_map->length(); i += kEntryLength) {
ASSERT(code_map->get(i)->IsNativeContext()); ASSERT(code_map->get(i)->IsNativeContext());
if (Code::cast(code_map->get(i + 1)) == optimized_code) { if (Code::cast(code_map->get(i + 1)) == optimized_code) {
if (FLAG_trace_opt) { if (FLAG_trace_opt) {
PrintF("[clearing optimizing code map (%s) for ", reason); PrintF("[evicting entry from optimizing code map (%s) for ", reason);
ShortPrint(); ShortPrint();
PrintF("]\n"); PrintF("]\n");
} }
@ -9069,15 +9085,35 @@ void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
i += kEntryLength; i += kEntryLength;
} }
if (removed_entry) { if (removed_entry) {
if (code_map->length() > kEntryLength) { // Always trim even when array is cleared because of heap verifier.
RightTrimFixedArray<FROM_MUTATOR>(GetHeap(), code_map, kEntryLength); RightTrimFixedArray<FROM_MUTATOR>(GetHeap(), code_map, kEntryLength);
} else { if (code_map->length() == kEntriesStart) {
ClearOptimizedCodeMap(reason); ClearOptimizedCodeMap();
} }
} }
} }
void SharedFunctionInfo::TrimOptimizedCodeMap(int shrink_by) {
FixedArray* code_map = FixedArray::cast(optimized_code_map());
ASSERT(shrink_by % kEntryLength == 0);
ASSERT(shrink_by <= code_map->length() - kEntriesStart);
// Always trim even when array is cleared because of heap verifier.
RightTrimFixedArray<FROM_GC>(GetHeap(), code_map, shrink_by);
if (code_map->length() == kEntriesStart) {
ClearOptimizedCodeMap();
}
}
void SharedFunctionInfo::ZapOptimizedCodeMap() {
FixedArray* code_map = FixedArray::cast(optimized_code_map());
MemsetPointer(code_map->data_start(),
GetHeap()->the_hole_value(),
code_map->length());
}
bool JSFunction::CompileLazy(Handle<JSFunction> function, bool JSFunction::CompileLazy(Handle<JSFunction> function,
ClearExceptionFlag flag) { ClearExceptionFlag flag) {
bool result = true; bool result = true;
@ -9717,7 +9753,7 @@ int SharedFunctionInfo::SearchOptimizedCodeMap(Context* native_context) {
if (!value->IsSmi()) { if (!value->IsSmi()) {
FixedArray* optimized_code_map = FixedArray::cast(value); FixedArray* optimized_code_map = FixedArray::cast(value);
int length = optimized_code_map->length(); int length = optimized_code_map->length();
for (int i = 0; i < length; i += 3) { for (int i = kEntriesStart; i < length; i += kEntryLength) {
if (optimized_code_map->get(i) == native_context) { if (optimized_code_map->get(i) == native_context) {
return i + 1; return i + 1;
} }

View File

@ -5807,7 +5807,7 @@ class SharedFunctionInfo: public HeapObject {
inline void ReplaceCode(Code* code); inline void ReplaceCode(Code* code);
// [optimized_code_map]: Map from native context to optimized code // [optimized_code_map]: Map from native context to optimized code
// and a shared literals array or Smi 0 if none. // and a shared literals array or Smi(0) if none.
DECL_ACCESSORS(optimized_code_map, Object) DECL_ACCESSORS(optimized_code_map, Object)
// Returns index i of the entry with the specified context. At position // Returns index i of the entry with the specified context. At position
@ -5820,17 +5820,34 @@ class SharedFunctionInfo: public HeapObject {
void InstallFromOptimizedCodeMap(JSFunction* function, int index); void InstallFromOptimizedCodeMap(JSFunction* function, int index);
// Clear optimized code map. // Clear optimized code map.
void ClearOptimizedCodeMap(const char* reason); void ClearOptimizedCodeMap();
// Removed a specific optimized code object from the optimized code map. // Removed a specific optimized code object from the optimized code map.
void EvictFromOptimizedCodeMap(Code* optimized_code, const char* reason); void EvictFromOptimizedCodeMap(Code* optimized_code, const char* reason);
// Trims the optimized code map after entries have been removed.
void TrimOptimizedCodeMap(int shrink_by);
// Zaps the contents of backing optimized code map.
void ZapOptimizedCodeMap();
// Add a new entry to the optimized code map. // Add a new entry to the optimized code map.
MUST_USE_RESULT MaybeObject* AddToOptimizedCodeMap(Context* native_context,
Code* code,
FixedArray* literals);
static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared, static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
Handle<Context> native_context, Handle<Context> native_context,
Handle<Code> code, Handle<Code> code,
Handle<FixedArray> literals); Handle<FixedArray> literals);
// Layout description of the optimized code map.
static const int kNextMapIndex = 0;
static const int kEntriesStart = 1;
static const int kEntryLength = 3; static const int kEntryLength = 3;
static const int kFirstContextSlot = FixedArray::kHeaderSize + kPointerSize;
static const int kFirstCodeSlot = FixedArray::kHeaderSize + 2 * kPointerSize;
static const int kSecondEntryIndex = kEntryLength + kEntriesStart;
static const int kInitialLength = kEntriesStart + kEntryLength;
// [scope_info]: Scope info. // [scope_info]: Scope info.
DECL_ACCESSORS(scope_info, ScopeInfo) DECL_ACCESSORS(scope_info, ScopeInfo)

View File

@ -287,8 +287,8 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
// The optimized code map must never be empty, so check the first elements. // The optimized code map must never be empty, so check the first elements.
Label install_optimized; Label install_optimized;
// Speculatively move code object into edx. // Speculatively move code object into edx.
__ movq(rdx, FieldOperand(rbx, FixedArray::kHeaderSize + kPointerSize)); __ movq(rdx, FieldOperand(rbx, SharedFunctionInfo::kFirstCodeSlot));
__ cmpq(rcx, FieldOperand(rbx, FixedArray::kHeaderSize)); __ cmpq(rcx, FieldOperand(rbx, SharedFunctionInfo::kFirstContextSlot));
__ j(equal, &install_optimized); __ j(equal, &install_optimized);
// Iterate through the rest of map backwards. rdx holds an index. // Iterate through the rest of map backwards. rdx holds an index.
@ -298,9 +298,9 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ SmiToInteger32(rdx, rdx); __ SmiToInteger32(rdx, rdx);
__ bind(&loop); __ bind(&loop);
// Do not double check first entry. // Do not double check first entry.
__ cmpq(rdx, Immediate(SharedFunctionInfo::kEntryLength)); __ cmpq(rdx, Immediate(SharedFunctionInfo::kSecondEntryIndex));
__ j(equal, &restore); __ j(equal, &restore);
__ subq(rdx, Immediate(SharedFunctionInfo::kEntryLength)); // Skip an entry. __ subq(rdx, Immediate(SharedFunctionInfo::kEntryLength));
__ cmpq(rcx, FieldOperand(rbx, __ cmpq(rcx, FieldOperand(rbx,
rdx, rdx,
times_pointer_size, times_pointer_size,

View File

@ -2814,7 +2814,6 @@ TEST(Regress169209) {
CcTest::InitializeVM(); CcTest::InitializeVM();
Isolate* isolate = Isolate::Current(); Isolate* isolate = Isolate::Current();
// Force experimental natives to compile to normalize heap layout.
Heap* heap = isolate->heap(); Heap* heap = isolate->heap();
HandleScope scope(isolate); HandleScope scope(isolate);