[heap] Move the chunk map to CodeLargeObjectSpace.

Only Heap::GcSafeFindCodeForInnerPointer requires the chunk map.
Other large object spaces use more the efficient
MemoryChunk::FromAnyPointerAddress.

Additionally, this patch renames Register/Unregister to AddPage/RemovePage
to be consistent with other spaces and makes them virtual.

Bug: chromium:852420
Change-Id: I8d637bb59e15bd61fe452fda7f4a55049d32030c
Reviewed-on: https://chromium-review.googlesource.com/c/1439417
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59207}
This commit is contained in:
Ulan Degenbaev 2019-01-29 13:21:28 +01:00 committed by Commit Bot
parent 99936546d8
commit 18ad43c749
4 changed files with 58 additions and 83 deletions

View File

@ -3417,11 +3417,7 @@ LargeObjectSpace::LargeObjectSpace(Heap* heap)
: LargeObjectSpace(heap, LO_SPACE) {}
LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
: Space(heap, id),
size_(0),
page_count_(0),
objects_size_(0),
chunk_map_(1024) {}
: Space(heap, id), size_(0), page_count_(0), objects_size_(0) {}
void LargeObjectSpace::TearDown() {
while (!memory_chunk_list_.Empty()) {
@ -3472,7 +3468,7 @@ LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
if (page == nullptr) return nullptr;
DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
Register(page, object_size);
AddPage(page, object_size);
HeapObject object = page->GetObject();
@ -3490,29 +3486,17 @@ size_t LargeObjectSpace::CommittedPhysicalMemory() {
return CommittedMemory();
}
// GC support
Object LargeObjectSpace::FindObject(Address a) {
LargePage* page = FindPage(a);
if (page != nullptr) {
return page->GetObject();
}
return Smi::kZero; // Signaling not found.
}
LargePage* LargeObjectSpace::FindPage(Address a) {
LargePage* CodeLargeObjectSpace::FindPage(Address a) {
const Address key = MemoryChunk::FromAddress(a)->address();
auto it = chunk_map_.find(key);
if (it != chunk_map_.end()) {
LargePage* page = it->second;
if (page->Contains(a)) {
CHECK(page->Contains(a));
return page;
}
}
return nullptr;
}
void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
@ -3529,10 +3513,7 @@ void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
}
}
void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
// There may be concurrent access on the chunk map. We have to take the lock
// here.
base::MutexGuard guard(&chunk_map_mutex_);
void CodeLargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
for (Address current = reinterpret_cast<Address>(page);
current < reinterpret_cast<Address>(page) + page->size();
current += MemoryChunk::kPageSize) {
@ -3540,13 +3521,8 @@ void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
}
}
void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
RemoveChunkMapEntries(page, page->address());
}
void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page,
Address free_start) {
for (Address current = ::RoundUp(free_start, MemoryChunk::kPageSize);
void CodeLargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
for (Address current = page->address();
current < reinterpret_cast<Address>(page) + page->size();
current += MemoryChunk::kPageSize) {
chunk_map_.erase(current);
@ -3559,32 +3535,27 @@ void LargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
DCHECK(page->IsFlagSet(MemoryChunk::FROM_PAGE));
DCHECK(!page->IsFlagSet(MemoryChunk::TO_PAGE));
size_t object_size = static_cast<size_t>(page->GetObject()->Size());
reinterpret_cast<NewLargeObjectSpace*>(page->owner())
->Unregister(page, object_size);
Register(page, object_size);
static_cast<LargeObjectSpace*>(page->owner())->RemovePage(page, object_size);
AddPage(page, object_size);
page->ClearFlag(MemoryChunk::FROM_PAGE);
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->set_owner(this);
}
void LargeObjectSpace::Register(LargePage* page, size_t object_size) {
void LargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
size_ += static_cast<int>(page->size());
AccountCommitted(page->size());
objects_size_ += object_size;
page_count_++;
memory_chunk_list_.PushBack(page);
InsertChunkMapEntries(page);
}
void LargeObjectSpace::Unregister(LargePage* page, size_t object_size) {
void LargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
size_ -= static_cast<int>(page->size());
AccountUncommitted(page->size());
objects_size_ -= object_size;
page_count_--;
memory_chunk_list_.Remove(page);
RemoveChunkMapEntries(page);
}
void LargeObjectSpace::FreeUnmarkedObjects() {
@ -3593,20 +3564,19 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
heap()->incremental_marking()->non_atomic_marking_state();
// Right-trimming does not update the objects_size_ counter. We are lazily
// updating it after every GC.
objects_size_ = 0;
size_t surviving_object_size = 0;
while (current) {
LargePage* next_current = current->next_page();
HeapObject object = current->GetObject();
DCHECK(!marking_state->IsGrey(object));
size_t size = static_cast<size_t>(object->Size());
if (marking_state->IsBlack(object)) {
Address free_start;
size_t size = static_cast<size_t>(object->Size());
objects_size_ += size;
surviving_object_size += size;
if ((free_start = current->GetAddressToShrink(object->address(), size)) !=
0) {
DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE));
current->ClearOutOfLiveRangeSlots(free_start);
RemoveChunkMapEntries(current, free_start);
const size_t bytes_to_free =
current->size() - (free_start - current->address());
heap()->memory_allocator()->PartialFreeMemory(
@ -3616,19 +3586,13 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
AccountUncommitted(bytes_to_free);
}
} else {
memory_chunk_list_.Remove(current);
// Free the chunk.
size_ -= static_cast<int>(current->size());
AccountUncommitted(current->size());
page_count_--;
RemoveChunkMapEntries(current);
RemovePage(current, size);
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
current);
}
current = next_current;
}
objects_size_ = surviving_object_size;
}
bool LargeObjectSpace::Contains(HeapObject object) {
@ -3636,11 +3600,18 @@ bool LargeObjectSpace::Contains(HeapObject object) {
bool owned = (chunk->owner() == this);
SLOW_DCHECK(!owned || FindObject(object->address())->IsHeapObject());
SLOW_DCHECK(!owned || ContainsSlow(object->address()));
return owned;
}
bool LargeObjectSpace::ContainsSlow(Address addr) {
for (LargePage* page : *this) {
if (page->Contains(addr)) return true;
}
return false;
}
std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator() {
return std::unique_ptr<ObjectIterator>(new LargeObjectIterator(this));
}
@ -3793,7 +3764,7 @@ void NewLargeObjectSpace::FreeAllObjects() {
LargePage* current = first_page();
while (current) {
LargePage* next_current = current->next_page();
Unregister(current, static_cast<size_t>(current->GetObject()->Size()));
RemovePage(current, static_cast<size_t>(current->GetObject()->Size()));
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
current);
current = next_current;
@ -3804,11 +3775,22 @@ void NewLargeObjectSpace::FreeAllObjects() {
}
CodeLargeObjectSpace::CodeLargeObjectSpace(Heap* heap)
: LargeObjectSpace(heap, CODE_LO_SPACE) {}
: LargeObjectSpace(heap, CODE_LO_SPACE),
chunk_map_(kInitialChunkMapCapacity) {}
AllocationResult CodeLargeObjectSpace::AllocateRaw(int object_size) {
return LargeObjectSpace::AllocateRaw(object_size, EXECUTABLE);
}
void CodeLargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
LargeObjectSpace::AddPage(page, object_size);
InsertChunkMapEntries(page);
}
void CodeLargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
RemoveChunkMapEntries(page);
LargeObjectSpace::RemovePage(page, object_size);
}
} // namespace internal
} // namespace v8

View File

@ -3025,37 +3025,25 @@ class LargeObjectSpace : public Space {
int PageCount() { return page_count_; }
// Finds an object for a given address, returns a Smi if it is not found.
// The function iterates through all objects in this space, may be slow.
Object FindObject(Address a);
// Finds a large object page containing the given address, returns nullptr
// if such a page doesn't exist.
LargePage* FindPage(Address a);
// Clears the marking state of live objects.
void ClearMarkingStateOfLiveObjects();
// Frees unmarked objects.
void FreeUnmarkedObjects();
void InsertChunkMapEntries(LargePage* page);
void RemoveChunkMapEntries(LargePage* page);
void RemoveChunkMapEntries(LargePage* page, Address free_start);
void PromoteNewLargeObject(LargePage* page);
// Checks whether a heap object is in this space; O(1).
bool Contains(HeapObject obj);
// Checks whether an address is in the object area in this space. Iterates
// all objects in the space. May be slow.
bool ContainsSlow(Address addr) { return FindObject(addr)->IsHeapObject(); }
bool ContainsSlow(Address addr);
// Checks whether the space is empty.
bool IsEmpty() { return first_page() == nullptr; }
void Register(LargePage* page, size_t object_size);
void Unregister(LargePage* page, size_t object_size);
virtual void AddPage(LargePage* page, size_t object_size);
virtual void RemovePage(LargePage* page, size_t object_size);
LargePage* first_page() {
return reinterpret_cast<LargePage*>(Space::first_page());
@ -3069,8 +3057,6 @@ class LargeObjectSpace : public Space {
std::unique_ptr<ObjectIterator> GetObjectIterator() override;
base::Mutex* chunk_map_mutex() { return &chunk_map_mutex_; }
#ifdef VERIFY_HEAP
virtual void Verify(Isolate* isolate);
#endif
@ -3089,13 +3075,6 @@ class LargeObjectSpace : public Space {
size_t objects_size_; // size of objects
private:
// The chunk_map_mutex_ has to be used when the chunk map is accessed
// concurrently.
base::Mutex chunk_map_mutex_;
// Page-aligned addresses to their corresponding LargePage.
std::unordered_map<Address, LargePage*> chunk_map_;
friend class LargeObjectIterator;
};
@ -3119,6 +3098,22 @@ class CodeLargeObjectSpace : public LargeObjectSpace {
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
AllocateRaw(int object_size);
// Finds a large object page containing the given address, returns nullptr
// if such a page doesn't exist.
LargePage* FindPage(Address a);
protected:
void AddPage(LargePage* page, size_t object_size) override;
void RemovePage(LargePage* page, size_t object_size) override;
private:
static const size_t kInitialChunkMapCapacity = 1024;
void InsertChunkMapEntries(LargePage* page);
void RemoveChunkMapEntries(LargePage* page);
// Page-aligned addresses to their corresponding LargePage.
std::unordered_map<Address, LargePage*> chunk_map_;
};
class LargeObjectIterator : public ObjectIterator {

View File

@ -5355,7 +5355,7 @@ TEST(Regress598319) {
CHECK_EQ(arr.get()->length(), kNumberOfObjects);
CHECK(heap->lo_space()->Contains(arr.get()));
LargePage* page = heap->lo_space()->FindPage(arr.get()->address());
LargePage* page = LargePage::FromHeapObject(arr.get());
CHECK_NOT_NULL(page);
// GC to cleanup state

View File

@ -315,8 +315,6 @@ TEST(LargeObjectSpace) {
CHECK(lo->Contains(HeapObject::cast(obj)));
CHECK(lo->FindObject(ho->address()) == obj);
CHECK(lo->Contains(ho));
while (true) {