[heap] Handle JSFunction, SharedFunctionInfo in concurrent marker.

This patch also adds handling of NativeContext and BytecodeArray.

BUG=chromium:694255

Change-Id: I6d4b2db03ece7346200853bd0b80daf65672787f
Reviewed-on: https://chromium-review.googlesource.com/543237
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#46139}
This commit is contained in:
Ulan Degenbaev 2017-06-22 15:47:44 +02:00 committed by Commit Bot
parent e3e0b6f46b
commit 4f85c19252
7 changed files with 61 additions and 24 deletions

View File

@ -712,13 +712,11 @@ class Context: public FixedArray {
static const int kNotFound = -1;
// GC support.
typedef FixedBodyDescriptor<
kHeaderSize, kSize, kSize> ScavengeBodyDescriptor;
typedef FixedBodyDescriptor<kHeaderSize, kSize, kSize> BodyDescriptor;
typedef FixedBodyDescriptor<
kHeaderSize,
kHeaderSize + FIRST_WEAK_SLOT * kPointerSize,
kSize> MarkCompactBodyDescriptor;
kHeaderSize, kHeaderSize + FIRST_WEAK_SLOT * kPointerSize, kSize>
BodyDescriptorWeak;
private:
#ifdef DEBUG

View File

@ -73,6 +73,13 @@ class ConcurrentMarkingVisitor final
}
}
void VisitCodeEntry(JSFunction* host, Address entry_address) override {
Address code_entry = base::AsAtomicWord::Relaxed_Load(
reinterpret_cast<Address*>(entry_address));
Object* code = Code::GetObjectFromCodeEntry(code_entry);
VisitPointer(host, &code);
}
// ===========================================================================
// JS object =================================================================
// ===========================================================================
@ -120,15 +127,23 @@ class ConcurrentMarkingVisitor final
// ===========================================================================
int VisitBytecodeArray(Map* map, BytecodeArray* object) override {
// TODO(ulan): implement iteration of strong fields.
deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
if (ObjectMarking::IsGrey<AccessMode::ATOMIC>(object,
marking_state(object))) {
int size = BytecodeArray::BodyDescriptorWeak::SizeOf(map, object);
VisitMapPointer(object, object->map_slot());
BytecodeArray::BodyDescriptorWeak::IterateBody(object, size, this);
// Aging of bytecode arrays is done on the main thread.
deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
}
return 0;
}
int VisitJSFunction(Map* map, JSFunction* object) override {
// TODO(ulan): implement iteration of strong fields.
deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
return 0;
if (!ShouldVisit(object)) return 0;
int size = JSFunction::BodyDescriptorWeak::SizeOf(map, object);
VisitMapPointer(object, object->map_slot());
JSFunction::BodyDescriptorWeak::IterateBody(object, size, this);
return size;
}
int VisitMap(Map* map, Map* object) override {
@ -138,14 +153,27 @@ class ConcurrentMarkingVisitor final
}
int VisitNativeContext(Map* map, Context* object) override {
// TODO(ulan): implement iteration of strong fields.
deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
if (ObjectMarking::IsGrey<AccessMode::ATOMIC>(object,
marking_state(object))) {
int size = Context::BodyDescriptorWeak::SizeOf(map, object);
VisitMapPointer(object, object->map_slot());
Context::BodyDescriptorWeak::IterateBody(object, size, this);
// TODO(ulan): implement proper weakness for normalized map cache
// and remove this bailout.
deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
}
return 0;
}
int VisitSharedFunctionInfo(Map* map, SharedFunctionInfo* object) override {
// TODO(ulan): implement iteration of strong fields.
deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
if (ObjectMarking::IsGrey<AccessMode::ATOMIC>(object,
marking_state(object))) {
int size = SharedFunctionInfo::BodyDescriptorWeak::SizeOf(map, object);
VisitMapPointer(object, object->map_slot());
SharedFunctionInfo::BodyDescriptorWeak::IterateBody(object, size, this);
// Resetting of IC age counter is done on the main thread.
deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
}
return 0;
}

View File

@ -2248,9 +2248,9 @@ class YoungGenerationMarkingVisitor final
int VisitNativeContext(Map* map, Context* object) final {
if (!ShouldVisit(object)) return 0;
int size = Context::ScavengeBodyDescriptor::SizeOf(map, object);
int size = Context::BodyDescriptor::SizeOf(map, object);
VisitMapPointer(object, object->map_slot());
Context::ScavengeBodyDescriptor::IterateBody(object, size, this);
Context::BodyDescriptor::IterateBody(object, size, this);
return size;
}

View File

@ -65,8 +65,7 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
table_.Register(
kVisitNativeContext,
&FixedBodyVisitor<StaticVisitor, Context::ScavengeBodyDescriptor,
int>::Visit);
&FixedBodyVisitor<StaticVisitor, Context::BodyDescriptor, int>::Visit);
table_.Register(kVisitByteArray, &VisitByteArray);
@ -321,8 +320,8 @@ void StaticMarkingVisitor<StaticVisitor>::VisitBytecodeArray(
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitNativeContext(
Map* map, HeapObject* object) {
FixedBodyVisitor<StaticVisitor, Context::MarkCompactBodyDescriptor,
void>::Visit(map, object);
FixedBodyVisitor<StaticVisitor, Context::BodyDescriptorWeak, void>::Visit(
map, object);
}

View File

@ -4205,10 +4205,12 @@ Code* Code::GetCodeFromTargetAddress(Address address) {
return result;
}
Object* Code::GetObjectFromCodeEntry(Address code_entry) {
return HeapObject::FromAddress(code_entry - Code::kHeaderSize);
}
Object* Code::GetObjectFromEntryAddress(Address location_of_address) {
return HeapObject::
FromAddress(Memory::Address_at(location_of_address) - Code::kHeaderSize);
return GetObjectFromCodeEntry(Memory::Address_at(location_of_address));
}
@ -4878,7 +4880,8 @@ Code* JSFunction::code() {
void JSFunction::set_code(Code* value) {
DCHECK(!GetHeap()->InNewSpace(value));
Address entry = value->entry();
WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry));
RELAXED_WRITE_INTPTR_FIELD(this, kCodeEntryOffset,
reinterpret_cast<intptr_t>(entry));
GetHeap()->incremental_marking()->RecordWriteOfCodeEntry(
this,
HeapObject::RawField(this, kCodeEntryOffset),
@ -4889,7 +4892,8 @@ void JSFunction::set_code(Code* value) {
void JSFunction::set_code_no_write_barrier(Code* value) {
DCHECK(!GetHeap()->InNewSpace(value));
Address entry = value->entry();
WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry));
RELAXED_WRITE_INTPTR_FIELD(this, kCodeEntryOffset,
reinterpret_cast<intptr_t>(entry));
}
void JSFunction::ClearOptimizedCodeSlot(const char* reason) {

View File

@ -3830,6 +3830,9 @@ class Code: public HeapObject {
// Convert an entry address into an object.
static inline Object* GetObjectFromEntryAddress(Address location_of_address);
// Convert a code entry into an object.
static inline Object* GetObjectFromCodeEntry(Address code_entry);
// Returns the address of the first instruction.
inline byte* instruction_start();

View File

@ -181,6 +181,11 @@
#define READ_INTPTR_FIELD(p, offset) \
(*reinterpret_cast<const intptr_t*>(FIELD_ADDR_CONST(p, offset)))
#define RELAXED_WRITE_INTPTR_FIELD(p, offset, value) \
base::Relaxed_Store( \
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
static_cast<base::AtomicWord>(value));
#define WRITE_INTPTR_FIELD(p, offset, value) \
(*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)) = value)