[heap] Cleanup Heap SetUp/TearDown a bit.

Change-Id: Ieec4dccdf8a5241f439bde9fffc75f4f300930e1
Reviewed-on: https://chromium-review.googlesource.com/1089333
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Commit-Queue: Hannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#53567}
This commit is contained in:
Hannes Payer 2018-06-06 21:10:19 +02:00 committed by Commit Bot
parent b984b70c3a
commit 5e31f9ffdf
6 changed files with 15 additions and 82 deletions

View File

@ -336,9 +336,8 @@ bool Heap::CanExpandOldGeneration(size_t size) {
}
bool Heap::HasBeenSetUp() {
return old_space_ != nullptr && code_space_ != nullptr &&
map_space_ != nullptr && lo_space_ != nullptr &&
read_only_space_ != nullptr;
// We will always have a new space when the heap is set up.
return new_space_ != nullptr;
}
@ -4717,23 +4716,16 @@ bool Heap::SetUp() {
}
space_[OLD_SPACE] = old_space_ = new OldSpace(this);
if (!old_space_->SetUp()) return false;
space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
if (!code_space_->SetUp()) return false;
space_[MAP_SPACE] = map_space_ = new MapSpace(this, MAP_SPACE);
if (!map_space_->SetUp()) return false;
// The large object code space may contain code or data. We set the memory
// The large object space may contain code or data. We set the memory
// to be non-executable here for safety, but this means we need to enable it
// explicitly when allocating large code objects.
space_[LO_SPACE] = lo_space_ = new LargeObjectSpace(this, LO_SPACE);
if (!lo_space_->SetUp()) return false;
space_[RO_SPACE] = read_only_space_ =
new ReadOnlySpace(this, RO_SPACE, NOT_EXECUTABLE);
if (!read_only_space_->SetUp()) return false;
// Set up the seed that is used to randomize the string hash function.
DCHECK_EQ(Smi::kZero, hash_seed());
@ -5009,34 +5001,9 @@ void Heap::TearDown() {
delete tracer_;
tracer_ = nullptr;
new_space_->TearDown();
delete new_space_;
new_space_ = nullptr;
if (old_space_ != nullptr) {
delete old_space_;
old_space_ = nullptr;
}
if (code_space_ != nullptr) {
delete code_space_;
code_space_ = nullptr;
}
if (map_space_ != nullptr) {
delete map_space_;
map_space_ = nullptr;
}
if (lo_space_ != nullptr) {
lo_space_->TearDown();
delete lo_space_;
lo_space_ = nullptr;
}
if (read_only_space_ != nullptr) {
delete read_only_space_;
read_only_space_ = nullptr;
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
delete space_[i];
space_[i] = nullptr;
}
store_buffer()->TearDown();

View File

@ -468,10 +468,6 @@ V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
return AllocateRaw(size_in_bytes, alignment);
}
size_t LargeObjectSpace::Available() {
return ObjectSizeFor(heap()->memory_allocator()->Available());
}
LocalAllocationBuffer LocalAllocationBuffer::InvalidBuffer() {
return LocalAllocationBuffer(

View File

@ -1443,13 +1443,6 @@ PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
accounting_stats_.Clear();
}
bool PagedSpace::SetUp() { return true; }
bool PagedSpace::HasBeenSetUp() { return true; }
void PagedSpace::TearDown() {
while (!memory_chunk_list_.Empty()) {
MemoryChunk* chunk = memory_chunk_list_.front();
@ -2258,6 +2251,10 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
return true;
}
size_t LargeObjectSpace::Available() {
return ObjectSizeFor(heap()->memory_allocator()->Available());
}
void SpaceWithLinearArea::StartNextInlineAllocationStep() {
if (heap()->allocation_step_in_progress()) {
// If we are mid-way through an existing step, don't start a new one.
@ -3244,12 +3241,6 @@ LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
objects_size_(0),
chunk_map_(1024) {}
LargeObjectSpace::~LargeObjectSpace() {}
bool LargeObjectSpace::SetUp() {
return true;
}
void LargeObjectSpace::TearDown() {
while (!memory_chunk_list_.Empty()) {
LargePage* page = first_page();
@ -3259,7 +3250,6 @@ void LargeObjectSpace::TearDown() {
memory_chunk_list_.Remove(page);
heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
}
SetUp();
}

View File

@ -2038,16 +2038,6 @@ class V8_EXPORT_PRIVATE PagedSpace
~PagedSpace() override { TearDown(); }
// Set up the space using the given address range of virtual memory (from
// the memory allocator's initial chunk) if possible. If the block of
// addresses is not big enough to contain a single page-aligned page, a
// fresh chunk will be allocated.
bool SetUp();
// Returns true if the space has been successfully set up and not
// subsequently torn down.
bool HasBeenSetUp();
// Checks whether an object/address is in this space.
inline bool Contains(Address a);
inline bool Contains(Object* o);
@ -2366,7 +2356,6 @@ class SemiSpace : public Space {
void SetUp(size_t initial_capacity, size_t maximum_capacity);
void TearDown();
bool HasBeenSetUp() { return maximum_capacity_ != 0; }
bool Commit();
bool Uncommit();
@ -2548,6 +2537,8 @@ class NewSpace : public SpaceWithLinearArea {
from_space_(heap, kFromSpace),
reservation_() {}
~NewSpace() override { TearDown(); }
inline bool Contains(HeapObject* o);
inline bool ContainsSlow(Address a);
inline bool Contains(Object* o);
@ -2558,11 +2549,6 @@ class NewSpace : public SpaceWithLinearArea {
// is not deallocated here.
void TearDown();
// True if the space has been set up but not torn down.
bool HasBeenSetUp() {
return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
}
// Flip the pair of spaces.
void Flip();
@ -2934,10 +2920,7 @@ class LargeObjectSpace : public Space {
typedef LargePageIterator iterator;
LargeObjectSpace(Heap* heap, AllocationSpace id);
virtual ~LargeObjectSpace();
// Initializes internal data structures.
bool SetUp();
~LargeObjectSpace() override { TearDown(); }
// Releases internal resources, frees objects in this space.
void TearDown();
@ -2953,7 +2936,7 @@ class LargeObjectSpace : public Space {
Executability executable);
// Available bytes for objects in this space.
inline size_t Available() override;
size_t Available() override;
size_t Size() override { return size_; }
size_t SizeOfObjects() override { return objects_size_; }

View File

@ -257,7 +257,7 @@ TEST(NewSpace) {
CHECK(new_space.SetUp(CcTest::heap()->InitialSemiSpaceSize(),
CcTest::heap()->InitialSemiSpaceSize()));
CHECK(new_space.HasBeenSetUp());
CHECK(new_space.MaximumCapacity());
while (new_space.Available() >= kMaxRegularHeapObjectSize) {
CHECK(new_space.Contains(
@ -282,8 +282,6 @@ TEST(OldSpace) {
OldSpace* s = new OldSpace(heap);
CHECK_NOT_NULL(s);
CHECK(s->SetUp());
while (s->Available() > 0) {
s->AllocateRawUnaligned(kMaxRegularHeapObjectSize).ToObjectChecked();
}

View File

@ -20,7 +20,6 @@ TEST_F(SpacesTest, CompactionSpaceMerge) {
CompactionSpace* compaction_space =
new CompactionSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
EXPECT_TRUE(compaction_space != NULL);
EXPECT_TRUE(compaction_space->SetUp());
for (Page* p : *old_space) {
// Unlink free lists from the main space to avoid reusing the memory for