Revert of [heap] Separate out optimized code map processing. (patchset #3 id:40001 of https://codereview.chromium.org/1426953006/ )
Reason for revert: Breaks build: https://uberchromegw.corp.google.com/i/client.v8/builders/V8%20Linux%20-%20nosnap%20-%20debug/builds/3565 Original issue's description: > [heap] Separate out optimized code map processing. > > This separates the post-processing step for optimized code maps out of > the CodeFlusher. It uses the complete SharedFunctionInfo::Iterator to > visit all candidates instead of gathering candidates during marking. > > Gathering candidates during marking no longer makes sense, now that the > majority of SharedFunctionInfo objects will hold such an optimized code > map. Also it reduces complexity of the implementation. Also conflating > this mechanism with "code flushing" was confusing. > > R=ulan@chromium.org > > Committed: https://crrev.com/8ad6168d197dd167235c9d342ec7ce37b0daa88b > Cr-Commit-Position: refs/heads/master@{#31830} TBR=ulan@chromium.org,yangguo@chromium.org,mvstanton@chromium.org,mstarzinger@chromium.org NOPRESUBMIT=true NOTREECHECKS=true NOTRY=true Review URL: https://codereview.chromium.org/1434503003 Cr-Commit-Position: refs/heads/master@{#31832}
This commit is contained in:
parent
6413ef4e63
commit
b6644e8491
@ -492,7 +492,6 @@ void GCTracer::PrintNVP() const {
|
||||
"mark_weakrefs=%.1f "
|
||||
"mark_globalhandles=%.1f "
|
||||
"mark_codeflush=%.1f "
|
||||
"mark_optimizedcodemaps=%.1f "
|
||||
"store_buffer_clear=%.1f "
|
||||
"slots_buffer_clear=%.1f "
|
||||
"sweep=%.2f "
|
||||
@ -559,7 +558,6 @@ void GCTracer::PrintNVP() const {
|
||||
current_.scopes[Scope::MC_MARK_WEAK_REFERENCES],
|
||||
current_.scopes[Scope::MC_MARK_GLOBAL_HANDLES],
|
||||
current_.scopes[Scope::MC_MARK_CODE_FLUSH],
|
||||
current_.scopes[Scope::MC_MARK_OPTIMIZED_CODE_MAPS],
|
||||
current_.scopes[Scope::MC_STORE_BUFFER_CLEAR],
|
||||
current_.scopes[Scope::MC_SLOTS_BUFFER_CLEAR],
|
||||
current_.scopes[Scope::MC_SWEEP],
|
||||
|
@ -109,7 +109,6 @@ class GCTracer {
|
||||
MC_MARK_WEAK_REFERENCES,
|
||||
MC_MARK_GLOBAL_HANDLES,
|
||||
MC_MARK_CODE_FLUSH,
|
||||
MC_MARK_OPTIMIZED_CODE_MAPS,
|
||||
MC_STORE_BUFFER_CLEAR,
|
||||
MC_SLOTS_BUFFER_CLEAR,
|
||||
MC_SWEEP,
|
||||
|
@ -99,6 +99,14 @@ void CodeFlusher::AddCandidate(JSFunction* function) {
|
||||
}
|
||||
|
||||
|
||||
void CodeFlusher::AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
|
||||
if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
|
||||
SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
|
||||
optimized_code_map_holder_head_ = code_map_holder;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
JSFunction** CodeFlusher::GetNextCandidateSlot(JSFunction* candidate) {
|
||||
return reinterpret_cast<JSFunction**>(
|
||||
HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
|
||||
@ -140,6 +148,26 @@ void CodeFlusher::ClearNextCandidate(SharedFunctionInfo* candidate) {
|
||||
candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
|
||||
}
|
||||
|
||||
|
||||
SharedFunctionInfo* CodeFlusher::GetNextCodeMap(SharedFunctionInfo* holder) {
|
||||
FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
|
||||
Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
|
||||
return reinterpret_cast<SharedFunctionInfo*>(next_map);
|
||||
}
|
||||
|
||||
|
||||
void CodeFlusher::SetNextCodeMap(SharedFunctionInfo* holder,
|
||||
SharedFunctionInfo* next_holder) {
|
||||
FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
|
||||
code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
|
||||
}
|
||||
|
||||
|
||||
void CodeFlusher::ClearNextCodeMap(SharedFunctionInfo* holder) {
|
||||
FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
|
||||
code_map->set_undefined(SharedFunctionInfo::kNextMapIndex);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
|
@ -984,6 +984,85 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
|
||||
}
|
||||
|
||||
|
||||
void CodeFlusher::ProcessOptimizedCodeMaps() {
|
||||
STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
|
||||
|
||||
SharedFunctionInfo* holder = optimized_code_map_holder_head_;
|
||||
SharedFunctionInfo* next_holder;
|
||||
|
||||
while (holder != NULL) {
|
||||
next_holder = GetNextCodeMap(holder);
|
||||
ClearNextCodeMap(holder);
|
||||
|
||||
// Process context-dependent entries in the optimized code map.
|
||||
FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
|
||||
int new_length = SharedFunctionInfo::kEntriesStart;
|
||||
int old_length = code_map->length();
|
||||
for (int i = SharedFunctionInfo::kEntriesStart; i < old_length;
|
||||
i += SharedFunctionInfo::kEntryLength) {
|
||||
// Each entry contains [ context, code, literals, ast-id ] as fields.
|
||||
STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
|
||||
Context* context =
|
||||
Context::cast(code_map->get(i + SharedFunctionInfo::kContextOffset));
|
||||
HeapObject* code = HeapObject::cast(
|
||||
code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
|
||||
FixedArray* literals = FixedArray::cast(
|
||||
code_map->get(i + SharedFunctionInfo::kLiteralsOffset));
|
||||
Smi* ast_id =
|
||||
Smi::cast(code_map->get(i + SharedFunctionInfo::kOsrAstIdOffset));
|
||||
if (Marking::IsWhite(Marking::MarkBitFrom(context))) continue;
|
||||
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(context)));
|
||||
if (Marking::IsWhite(Marking::MarkBitFrom(code))) continue;
|
||||
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(code)));
|
||||
if (Marking::IsWhite(Marking::MarkBitFrom(literals))) continue;
|
||||
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(literals)));
|
||||
// Move every slot in the entry and record slots when needed.
|
||||
code_map->set(new_length + SharedFunctionInfo::kCachedCodeOffset, code);
|
||||
code_map->set(new_length + SharedFunctionInfo::kContextOffset, context);
|
||||
code_map->set(new_length + SharedFunctionInfo::kLiteralsOffset, literals);
|
||||
code_map->set(new_length + SharedFunctionInfo::kOsrAstIdOffset, ast_id);
|
||||
Object** code_slot = code_map->RawFieldOfElementAt(
|
||||
new_length + SharedFunctionInfo::kCachedCodeOffset);
|
||||
isolate_->heap()->mark_compact_collector()->RecordSlot(
|
||||
code_map, code_slot, *code_slot);
|
||||
Object** context_slot = code_map->RawFieldOfElementAt(
|
||||
new_length + SharedFunctionInfo::kContextOffset);
|
||||
isolate_->heap()->mark_compact_collector()->RecordSlot(
|
||||
code_map, context_slot, *context_slot);
|
||||
Object** literals_slot = code_map->RawFieldOfElementAt(
|
||||
new_length + SharedFunctionInfo::kLiteralsOffset);
|
||||
isolate_->heap()->mark_compact_collector()->RecordSlot(
|
||||
code_map, literals_slot, *literals_slot);
|
||||
new_length += SharedFunctionInfo::kEntryLength;
|
||||
}
|
||||
|
||||
// Process context-independent entry in the optimized code map.
|
||||
Object* shared_object = code_map->get(SharedFunctionInfo::kSharedCodeIndex);
|
||||
if (shared_object->IsCode()) {
|
||||
Code* shared_code = Code::cast(shared_object);
|
||||
if (Marking::IsWhite(Marking::MarkBitFrom(shared_code))) {
|
||||
code_map->set_undefined(SharedFunctionInfo::kSharedCodeIndex);
|
||||
} else {
|
||||
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(shared_code)));
|
||||
Object** slot =
|
||||
code_map->RawFieldOfElementAt(SharedFunctionInfo::kSharedCodeIndex);
|
||||
isolate_->heap()->mark_compact_collector()->RecordSlot(code_map, slot,
|
||||
*slot);
|
||||
}
|
||||
}
|
||||
|
||||
// Trim the optimized code map if entries have been removed.
|
||||
if (new_length < old_length) {
|
||||
holder->TrimOptimizedCodeMap(old_length - new_length);
|
||||
}
|
||||
|
||||
holder = next_holder;
|
||||
}
|
||||
|
||||
optimized_code_map_holder_head_ = NULL;
|
||||
}
|
||||
|
||||
|
||||
void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
|
||||
// Make sure previous flushing decisions are revisited.
|
||||
isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
|
||||
@ -1054,6 +1133,44 @@ void CodeFlusher::EvictCandidate(JSFunction* function) {
|
||||
}
|
||||
|
||||
|
||||
void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
|
||||
FixedArray* code_map =
|
||||
FixedArray::cast(code_map_holder->optimized_code_map());
|
||||
DCHECK(!code_map->get(SharedFunctionInfo::kNextMapIndex)->IsUndefined());
|
||||
|
||||
// Make sure previous flushing decisions are revisited.
|
||||
isolate_->heap()->incremental_marking()->RecordWrites(code_map);
|
||||
isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
|
||||
|
||||
if (FLAG_trace_code_flushing) {
|
||||
PrintF("[code-flushing abandons code-map: ");
|
||||
code_map_holder->ShortPrint();
|
||||
PrintF("]\n");
|
||||
}
|
||||
|
||||
SharedFunctionInfo* holder = optimized_code_map_holder_head_;
|
||||
SharedFunctionInfo* next_holder;
|
||||
if (holder == code_map_holder) {
|
||||
next_holder = GetNextCodeMap(code_map_holder);
|
||||
optimized_code_map_holder_head_ = next_holder;
|
||||
ClearNextCodeMap(code_map_holder);
|
||||
} else {
|
||||
while (holder != NULL) {
|
||||
next_holder = GetNextCodeMap(holder);
|
||||
|
||||
if (next_holder == code_map_holder) {
|
||||
next_holder = GetNextCodeMap(code_map_holder);
|
||||
SetNextCodeMap(holder, next_holder);
|
||||
ClearNextCodeMap(code_map_holder);
|
||||
break;
|
||||
}
|
||||
|
||||
holder = next_holder;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CodeFlusher::EvictJSFunctionCandidates() {
|
||||
JSFunction* candidate = jsfunction_candidates_head_;
|
||||
JSFunction* next_candidate;
|
||||
@ -1078,6 +1195,18 @@ void CodeFlusher::EvictSharedFunctionInfoCandidates() {
|
||||
}
|
||||
|
||||
|
||||
void CodeFlusher::EvictOptimizedCodeMaps() {
|
||||
SharedFunctionInfo* holder = optimized_code_map_holder_head_;
|
||||
SharedFunctionInfo* next_holder;
|
||||
while (holder != NULL) {
|
||||
next_holder = GetNextCodeMap(holder);
|
||||
EvictOptimizedCodeMap(holder);
|
||||
holder = next_holder;
|
||||
}
|
||||
DCHECK(optimized_code_map_holder_head_ == NULL);
|
||||
}
|
||||
|
||||
|
||||
void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
|
||||
Heap* heap = isolate_->heap();
|
||||
|
||||
@ -2105,13 +2234,6 @@ void MarkCompactCollector::AfterMarking() {
|
||||
code_flusher_->ProcessCandidates();
|
||||
}
|
||||
|
||||
// Process and clear all optimized code maps.
|
||||
if (!FLAG_flush_optimized_code_cache) {
|
||||
GCTracer::Scope gc_scope(heap()->tracer(),
|
||||
GCTracer::Scope::MC_MARK_OPTIMIZED_CODE_MAPS);
|
||||
ProcessAndClearOptimizedCodeMaps();
|
||||
}
|
||||
|
||||
if (FLAG_track_gc_object_stats) {
|
||||
if (FLAG_trace_gc_object_stats) {
|
||||
heap()->object_stats_->TraceObjectStats();
|
||||
@ -2121,72 +2243,6 @@ void MarkCompactCollector::AfterMarking() {
|
||||
}
|
||||
|
||||
|
||||
void MarkCompactCollector::ProcessAndClearOptimizedCodeMaps() {
|
||||
SharedFunctionInfo::Iterator iterator(isolate());
|
||||
while (SharedFunctionInfo* shared = iterator.Next()) {
|
||||
if (shared->optimized_code_map()->IsSmi()) continue;
|
||||
|
||||
// Process context-dependent entries in the optimized code map.
|
||||
FixedArray* code_map = FixedArray::cast(shared->optimized_code_map());
|
||||
int new_length = SharedFunctionInfo::kEntriesStart;
|
||||
int old_length = code_map->length();
|
||||
for (int i = SharedFunctionInfo::kEntriesStart; i < old_length;
|
||||
i += SharedFunctionInfo::kEntryLength) {
|
||||
// Each entry contains [ context, code, literals, ast-id ] as fields.
|
||||
STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
|
||||
Context* context =
|
||||
Context::cast(code_map->get(i + SharedFunctionInfo::kContextOffset));
|
||||
HeapObject* code = HeapObject::cast(
|
||||
code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
|
||||
FixedArray* literals = FixedArray::cast(
|
||||
code_map->get(i + SharedFunctionInfo::kLiteralsOffset));
|
||||
Smi* ast_id =
|
||||
Smi::cast(code_map->get(i + SharedFunctionInfo::kOsrAstIdOffset));
|
||||
if (Marking::IsWhite(Marking::MarkBitFrom(context))) continue;
|
||||
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(context)));
|
||||
if (Marking::IsWhite(Marking::MarkBitFrom(code))) continue;
|
||||
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(code)));
|
||||
if (Marking::IsWhite(Marking::MarkBitFrom(literals))) continue;
|
||||
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(literals)));
|
||||
// Move every slot in the entry and record slots when needed.
|
||||
code_map->set(new_length + SharedFunctionInfo::kCachedCodeOffset, code);
|
||||
code_map->set(new_length + SharedFunctionInfo::kContextOffset, context);
|
||||
code_map->set(new_length + SharedFunctionInfo::kLiteralsOffset, literals);
|
||||
code_map->set(new_length + SharedFunctionInfo::kOsrAstIdOffset, ast_id);
|
||||
Object** code_slot = code_map->RawFieldOfElementAt(
|
||||
new_length + SharedFunctionInfo::kCachedCodeOffset);
|
||||
RecordSlot(code_map, code_slot, *code_slot);
|
||||
Object** context_slot = code_map->RawFieldOfElementAt(
|
||||
new_length + SharedFunctionInfo::kContextOffset);
|
||||
RecordSlot(code_map, context_slot, *context_slot);
|
||||
Object** literals_slot = code_map->RawFieldOfElementAt(
|
||||
new_length + SharedFunctionInfo::kLiteralsOffset);
|
||||
RecordSlot(code_map, literals_slot, *literals_slot);
|
||||
new_length += SharedFunctionInfo::kEntryLength;
|
||||
}
|
||||
|
||||
// Process context-independent entry in the optimized code map.
|
||||
Object* shared_object = code_map->get(SharedFunctionInfo::kSharedCodeIndex);
|
||||
if (shared_object->IsCode()) {
|
||||
Code* shared_code = Code::cast(shared_object);
|
||||
if (Marking::IsWhite(Marking::MarkBitFrom(shared_code))) {
|
||||
code_map->set_undefined(SharedFunctionInfo::kSharedCodeIndex);
|
||||
} else {
|
||||
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(shared_code)));
|
||||
Object** slot =
|
||||
code_map->RawFieldOfElementAt(SharedFunctionInfo::kSharedCodeIndex);
|
||||
RecordSlot(code_map, slot, *slot);
|
||||
}
|
||||
}
|
||||
|
||||
// Trim the optimized code map if entries have been removed.
|
||||
if (new_length < old_length) {
|
||||
shared->TrimOptimizedCodeMap(old_length - new_length);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MarkCompactCollector::ClearNonLiveReferences() {
|
||||
GCTracer::Scope gc_scope(heap()->tracer(),
|
||||
GCTracer::Scope::MC_NONLIVEREFERENCES);
|
||||
|
@ -263,9 +263,10 @@ class MarkingDeque {
|
||||
// CodeFlusher collects candidates for code flushing during marking and
|
||||
// processes those candidates after marking has completed in order to
|
||||
// reset those functions referencing code objects that would otherwise
|
||||
// be unreachable. Code objects can be referenced in two ways:
|
||||
// be unreachable. Code objects can be referenced in three ways:
|
||||
// - SharedFunctionInfo references unoptimized code.
|
||||
// - JSFunction references either unoptimized or optimized code.
|
||||
// - OptimizedCodeMap references optimized code.
|
||||
// We are not allowed to flush unoptimized code for functions that got
|
||||
// optimized or inlined into optimized code, because we might bailout
|
||||
// into the unoptimized code again during deoptimization.
|
||||
@ -273,21 +274,26 @@ class CodeFlusher {
|
||||
public:
|
||||
explicit CodeFlusher(Isolate* isolate)
|
||||
: isolate_(isolate),
|
||||
jsfunction_candidates_head_(nullptr),
|
||||
shared_function_info_candidates_head_(nullptr) {}
|
||||
jsfunction_candidates_head_(NULL),
|
||||
shared_function_info_candidates_head_(NULL),
|
||||
optimized_code_map_holder_head_(NULL) {}
|
||||
|
||||
inline void AddCandidate(SharedFunctionInfo* shared_info);
|
||||
inline void AddCandidate(JSFunction* function);
|
||||
inline void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
|
||||
|
||||
void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
|
||||
void EvictCandidate(SharedFunctionInfo* shared_info);
|
||||
void EvictCandidate(JSFunction* function);
|
||||
|
||||
void ProcessCandidates() {
|
||||
ProcessOptimizedCodeMaps();
|
||||
ProcessSharedFunctionInfoCandidates();
|
||||
ProcessJSFunctionCandidates();
|
||||
}
|
||||
|
||||
void EvictAllCandidates() {
|
||||
EvictOptimizedCodeMaps();
|
||||
EvictJSFunctionCandidates();
|
||||
EvictSharedFunctionInfoCandidates();
|
||||
}
|
||||
@ -295,8 +301,10 @@ class CodeFlusher {
|
||||
void IteratePointersToFromSpace(ObjectVisitor* v);
|
||||
|
||||
private:
|
||||
void ProcessOptimizedCodeMaps();
|
||||
void ProcessJSFunctionCandidates();
|
||||
void ProcessSharedFunctionInfoCandidates();
|
||||
void EvictOptimizedCodeMaps();
|
||||
void EvictJSFunctionCandidates();
|
||||
void EvictSharedFunctionInfoCandidates();
|
||||
|
||||
@ -313,9 +321,15 @@ class CodeFlusher {
|
||||
SharedFunctionInfo* next_candidate);
|
||||
static inline void ClearNextCandidate(SharedFunctionInfo* candidate);
|
||||
|
||||
static inline SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder);
|
||||
static inline void SetNextCodeMap(SharedFunctionInfo* holder,
|
||||
SharedFunctionInfo* next_holder);
|
||||
static inline void ClearNextCodeMap(SharedFunctionInfo* holder);
|
||||
|
||||
Isolate* isolate_;
|
||||
JSFunction* jsfunction_candidates_head_;
|
||||
SharedFunctionInfo* shared_function_info_candidates_head_;
|
||||
SharedFunctionInfo* optimized_code_map_holder_head_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
|
||||
};
|
||||
@ -680,14 +694,10 @@ class MarkCompactCollector {
|
||||
// collections when incremental marking is aborted.
|
||||
void AbortWeakCollections();
|
||||
|
||||
|
||||
void ProcessAndClearWeakCells();
|
||||
void AbortWeakCells();
|
||||
|
||||
// After all reachable objects have been marked, those entries within
|
||||
// optimized code maps that became unreachable are removed, potentially
|
||||
// trimming or clearing out the entire optimized code map.
|
||||
void ProcessAndClearOptimizedCodeMaps();
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Phase 2: Sweeping to clear mark bits and free non-live objects for
|
||||
// a non-compacting collection.
|
||||
|
@ -443,23 +443,23 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
|
||||
if (FLAG_cleanup_code_caches_at_gc) {
|
||||
shared->ClearTypeFeedbackInfoAtGCTime();
|
||||
}
|
||||
if (FLAG_flush_optimized_code_cache ||
|
||||
heap->isolate()->serializer_enabled()) {
|
||||
if (!shared->optimized_code_map()->IsSmi()) {
|
||||
// Always flush the optimized code map if requested by flag.
|
||||
shared->ClearOptimizedCodeMap();
|
||||
}
|
||||
} else {
|
||||
if (!shared->optimized_code_map()->IsSmi()) {
|
||||
// Treat some references within the code map weakly by marking the
|
||||
// code map itself but not pushing it onto the marking deque. The
|
||||
// map will be processed after marking.
|
||||
FixedArray* code_map = FixedArray::cast(shared->optimized_code_map());
|
||||
MarkOptimizedCodeMap(heap, code_map);
|
||||
}
|
||||
if ((FLAG_flush_optimized_code_cache ||
|
||||
heap->isolate()->serializer_enabled()) &&
|
||||
!shared->optimized_code_map()->IsSmi()) {
|
||||
// Always flush the optimized code map if requested by flag.
|
||||
shared->ClearOptimizedCodeMap();
|
||||
}
|
||||
MarkCompactCollector* collector = heap->mark_compact_collector();
|
||||
if (collector->is_code_flushing_enabled()) {
|
||||
if (!shared->optimized_code_map()->IsSmi()) {
|
||||
// Add the shared function info holding an optimized code map to
|
||||
// the code flusher for processing of code maps after marking.
|
||||
collector->code_flusher()->AddOptimizedCodeMap(shared);
|
||||
// Treat some references within the code map weakly by marking the
|
||||
// code map itself but not pushing it onto the marking deque.
|
||||
FixedArray* code_map = FixedArray::cast(shared->optimized_code_map());
|
||||
MarkOptimizedCodeMap(heap, code_map);
|
||||
}
|
||||
if (IsFlushable(heap, shared)) {
|
||||
// This function's code looks flushable. But we have to postpone
|
||||
// the decision until we see all functions that point to the same
|
||||
@ -473,7 +473,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
// TODO(mstarzinger): Drop this case, it shouldn't be done here!
|
||||
if (!shared->optimized_code_map()->IsSmi()) {
|
||||
// Flush optimized code map on major GCs without code flushing,
|
||||
// needed because cached code doesn't contain breakpoints.
|
||||
|
@ -11107,6 +11107,16 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
|
||||
|
||||
|
||||
void SharedFunctionInfo::ClearOptimizedCodeMap() {
|
||||
FixedArray* code_map = FixedArray::cast(optimized_code_map());
|
||||
|
||||
// If the next map link slot is already used then the function was
|
||||
// enqueued with code flushing and we remove it now.
|
||||
if (!code_map->get(kNextMapIndex)->IsUndefined()) {
|
||||
CodeFlusher* flusher = GetHeap()->mark_compact_collector()->code_flusher();
|
||||
flusher->EvictOptimizedCodeMap(this);
|
||||
}
|
||||
|
||||
DCHECK(code_map->get(kNextMapIndex)->IsUndefined());
|
||||
set_optimized_code_map(Smi::FromInt(0));
|
||||
}
|
||||
|
||||
|
@ -6555,8 +6555,9 @@ class SharedFunctionInfo: public HeapObject {
|
||||
Handle<Object> script_object);
|
||||
|
||||
// Layout description of the optimized code map.
|
||||
static const int kSharedCodeIndex = 0;
|
||||
static const int kEntriesStart = 1;
|
||||
static const int kNextMapIndex = 0;
|
||||
static const int kSharedCodeIndex = 1;
|
||||
static const int kEntriesStart = 2;
|
||||
static const int kContextOffset = 0;
|
||||
static const int kCachedCodeOffset = 1;
|
||||
static const int kLiteralsOffset = 2;
|
||||
|
Loading…
Reference in New Issue
Block a user