[heap] Reland prepare code for smaller large object allocation limit than max allocatable memory.
BUG= Review URL: https://codereview.chromium.org/1393833002 Cr-Commit-Position: refs/heads/master@{#31136}
This commit is contained in:
parent
74ae226b94
commit
5d125f218a
@ -1215,7 +1215,7 @@ HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor(
|
||||
// trigger it.
|
||||
HValue* length = GetArgumentsLength();
|
||||
HConstant* max_alloc_length =
|
||||
Add<HConstant>(JSObject::kInitialMaxFastElementArray);
|
||||
Add<HConstant>(JSArray::kInitialMaxFastElementArray);
|
||||
HValue* checked_length = Add<HBoundsCheck>(length, max_alloc_length);
|
||||
|
||||
// We need to fill with the hole if it's a smi array in the multi-argument
|
||||
|
@ -1198,7 +1198,7 @@ Reduction JSTypedLowering::ReduceJSCreateLiteralArray(Node* node) {
|
||||
// TODO(rossberg): Teach strong mode to FastCloneShallowArrayStub.
|
||||
if ((flags & ArrayLiteral::kShallowElements) != 0 &&
|
||||
(flags & ArrayLiteral::kIsStrong) == 0 &&
|
||||
length < JSObject::kInitialMaxFastElementArray) {
|
||||
length < JSArray::kInitialMaxFastElementArray) {
|
||||
Isolate* isolate = jsgraph()->isolate();
|
||||
Callable callable = CodeFactory::FastCloneShallowArray(isolate);
|
||||
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
|
||||
|
@ -2170,7 +2170,7 @@ MaybeHandle<Object> ArrayConstructInitializeElements(Handle<JSArray> array,
|
||||
|
||||
// Optimize the case where there is one argument and the argument is a small
|
||||
// smi.
|
||||
if (length > 0 && length < JSObject::kInitialMaxFastElementArray) {
|
||||
if (length > 0 && length < JSArray::kInitialMaxFastElementArray) {
|
||||
ElementsKind elements_kind = array->GetElementsKind();
|
||||
JSArray::Initialize(array, length, length);
|
||||
|
||||
|
@ -158,7 +158,7 @@ bool FullCodeGenerator::MustCreateArrayLiteralWithRuntime(
|
||||
ArrayLiteral* expr) const {
|
||||
// TODO(rossberg): Teach strong mode to FastCloneShallowArrayStub.
|
||||
return expr->depth() > 1 || expr->is_strong() ||
|
||||
expr->values()->length() > JSObject::kInitialMaxFastElementArray;
|
||||
expr->values()->length() > JSArray::kInitialMaxFastElementArray;
|
||||
}
|
||||
|
||||
|
||||
|
@ -4718,7 +4718,7 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
|
||||
// We rely on being able to allocate new arrays in paged spaces.
|
||||
DCHECK(Page::kMaxRegularHeapObjectSize >=
|
||||
(JSArray::kSize +
|
||||
FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
|
||||
FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
|
||||
AllocationMemento::kSize));
|
||||
|
||||
code_range_size_ = code_range_size * MB;
|
||||
|
@ -188,7 +188,7 @@ Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
|
||||
PagedSpace* owner) {
|
||||
Page* page = reinterpret_cast<Page*>(chunk);
|
||||
page->mutex_ = new base::Mutex();
|
||||
DCHECK(page->area_size() <= kMaxRegularHeapObjectSize);
|
||||
DCHECK(page->area_size() <= kAllocatableMemory);
|
||||
DCHECK(chunk->owner() == owner);
|
||||
owner->IncreaseCapacity(page->area_size());
|
||||
owner->Free(page->area_start(), page->area_size());
|
||||
|
@ -814,12 +814,15 @@ class Page : public MemoryChunk {
|
||||
// Page size in bytes. This must be a multiple of the OS page size.
|
||||
static const int kPageSize = 1 << kPageSizeBits;
|
||||
|
||||
// Maximum object size that fits in a page. Objects larger than that size
|
||||
// are allocated in large object space and are never moved in memory. This
|
||||
// also applies to new space allocation, since objects are never migrated
|
||||
// from new space to large object space. Takes double alignment into account.
|
||||
// Maximum object size that gets allocated into regular pages. Objects larger
|
||||
// than that size are allocated in large object space and are never moved in
|
||||
// memory. This also applies to new space allocation, since objects are never
|
||||
// migrated from new space to large object space. Takes double alignment into
|
||||
// account.
|
||||
static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset;
|
||||
|
||||
static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
|
||||
|
||||
// Page size mask.
|
||||
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
|
||||
|
||||
@ -1169,7 +1172,7 @@ class MemoryAllocator {
|
||||
|
||||
// Returns maximum available bytes that the old space can have.
|
||||
intptr_t MaxAvailable() {
|
||||
return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize;
|
||||
return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
|
||||
}
|
||||
|
||||
// Returns an indication of whether a pointer is in a space that has
|
||||
@ -1245,7 +1248,7 @@ class MemoryAllocator {
|
||||
static int PageAreaSize(AllocationSpace space) {
|
||||
DCHECK_NE(LO_SPACE, space);
|
||||
return (space == CODE_SPACE) ? CodePageAreaSize()
|
||||
: Page::kMaxRegularHeapObjectSize;
|
||||
: Page::kAllocatableMemory;
|
||||
}
|
||||
|
||||
MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
|
||||
@ -1700,7 +1703,7 @@ class FreeList {
|
||||
private:
|
||||
// The size range of blocks, in bytes.
|
||||
static const int kMinBlockSize = 3 * kPointerSize;
|
||||
static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
|
||||
static const int kMaxBlockSize = Page::kAllocatableMemory;
|
||||
|
||||
static const int kSmallListMin = 0x1f * kPointerSize;
|
||||
static const int kSmallListMax = 0xff * kPointerSize;
|
||||
@ -2120,7 +2123,7 @@ class NewSpacePage : public MemoryChunk {
|
||||
(1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
|
||||
(1 << MemoryChunk::SCAN_ON_SCAVENGE);
|
||||
|
||||
static const int kAreaSize = Page::kMaxRegularHeapObjectSize;
|
||||
static const int kAreaSize = Page::kAllocatableMemory;
|
||||
|
||||
inline NewSpacePage* next_page() {
|
||||
return static_cast<NewSpacePage*>(next_chunk());
|
||||
@ -2839,7 +2842,7 @@ class MapSpace : public PagedSpace {
|
||||
virtual void VerifyObject(HeapObject* obj);
|
||||
|
||||
private:
|
||||
static const int kMapsPerPage = Page::kMaxRegularHeapObjectSize / Map::kSize;
|
||||
static const int kMapsPerPage = Page::kAllocatableMemory / Map::kSize;
|
||||
|
||||
// Do map space compaction if there is a page gap.
|
||||
int CompactionThreshold() {
|
||||
|
@ -1852,7 +1852,7 @@ HValue* HGraphBuilder::BuildRegExpConstructResult(HValue* length,
|
||||
HValue* index,
|
||||
HValue* input) {
|
||||
NoObservableSideEffectsScope scope(this);
|
||||
HConstant* max_length = Add<HConstant>(JSObject::kInitialMaxFastElementArray);
|
||||
HConstant* max_length = Add<HConstant>(JSArray::kInitialMaxFastElementArray);
|
||||
Add<HBoundsCheck>(length, max_length);
|
||||
|
||||
// Generate size calculation code here in order to make it dominate
|
||||
@ -2658,7 +2658,7 @@ HValue* HGraphBuilder::BuildAllocateArrayFromLength(
|
||||
|
||||
HValue* constant_zero = graph()->GetConstant0();
|
||||
HConstant* max_alloc_length =
|
||||
Add<HConstant>(JSObject::kInitialMaxFastElementArray);
|
||||
Add<HConstant>(JSArray::kInitialMaxFastElementArray);
|
||||
HInstruction* checked_length = Add<HBoundsCheck>(length_argument,
|
||||
max_alloc_length);
|
||||
IfBuilder if_builder(this);
|
||||
@ -3128,10 +3128,10 @@ HValue* HGraphBuilder::BuildCloneShallowArrayNonEmpty(HValue* boilerplate,
|
||||
|
||||
// This function implicitly relies on the fact that the
|
||||
// FastCloneShallowArrayStub is called only for literals shorter than
|
||||
// JSObject::kInitialMaxFastElementArray.
|
||||
// JSArray::kInitialMaxFastElementArray.
|
||||
// Can't add HBoundsCheck here because otherwise the stub will eager a frame.
|
||||
HConstant* size_upper_bound = EstablishElementsAllocationSize(
|
||||
kind, JSObject::kInitialMaxFastElementArray);
|
||||
kind, JSArray::kInitialMaxFastElementArray);
|
||||
elements->set_size_upper_bound(size_upper_bound);
|
||||
|
||||
Add<HStoreNamedField>(result, HObjectAccess::ForElementsPointer(), elements);
|
||||
|
@ -2366,10 +2366,6 @@ class JSObject: public JSReceiver {
|
||||
// don't want to be wasteful with long lived objects.
|
||||
static const int kMaxUncheckedOldFastElementsLength = 500;
|
||||
|
||||
// Note that Page::kMaxRegularHeapObjectSize puts a limit on
|
||||
// permissible values (see the DCHECK in heap.cc).
|
||||
static const int kInitialMaxFastElementArray = 100000;
|
||||
|
||||
// This constant applies only to the initial map of "global.Object" and
|
||||
// not to arbitrary other JSObject maps.
|
||||
static const int kInitialGlobalObjectUnusedPropertiesCount = 4;
|
||||
@ -10042,6 +10038,10 @@ class JSArray: public JSObject {
|
||||
static const int kLengthOffset = JSObject::kHeaderSize;
|
||||
static const int kSize = kLengthOffset + kPointerSize;
|
||||
|
||||
// Note that Page::kMaxRegularHeapObjectSize puts a limit on
|
||||
// permissible values (see the DCHECK in heap.cc).
|
||||
static const int kInitialMaxFastElementArray = 100000;
|
||||
|
||||
private:
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(JSArray);
|
||||
};
|
||||
|
@ -253,7 +253,7 @@ static Object* ArrayConstructorCommon(Isolate* isolate,
|
||||
can_use_type_feedback = false;
|
||||
} else if (value != 0) {
|
||||
holey = true;
|
||||
if (value >= JSObject::kInitialMaxFastElementArray) {
|
||||
if (value >= JSArray::kInitialMaxFastElementArray) {
|
||||
can_inline_array_constructor = false;
|
||||
}
|
||||
}
|
||||
|
@ -530,16 +530,61 @@ static inline void DisableInlineAllocationSteps(v8::internal::NewSpace* space) {
|
||||
}
|
||||
|
||||
|
||||
static int LenFromSize(int size) {
|
||||
return (size - i::FixedArray::kHeaderSize) / i::kPointerSize;
|
||||
}
|
||||
|
||||
|
||||
static inline void CreatePadding(i::Heap* heap, int padding_size,
|
||||
i::PretenureFlag tenure) {
|
||||
const int max_number_of_objects = 20;
|
||||
v8::internal::Handle<v8::internal::FixedArray>
|
||||
big_objects[max_number_of_objects];
|
||||
i::Isolate* isolate = heap->isolate();
|
||||
int allocate_memory;
|
||||
int length;
|
||||
int free_memory = padding_size;
|
||||
if (tenure == i::TENURED) {
|
||||
int current_free_memory =
|
||||
static_cast<int>(*heap->old_space()->allocation_limit_address() -
|
||||
*heap->old_space()->allocation_top_address());
|
||||
CHECK(padding_size <= current_free_memory || current_free_memory == 0);
|
||||
} else {
|
||||
DisableInlineAllocationSteps(heap->new_space());
|
||||
int current_free_memory =
|
||||
static_cast<int>(*heap->new_space()->allocation_limit_address() -
|
||||
*heap->new_space()->allocation_top_address());
|
||||
CHECK(padding_size <= current_free_memory || current_free_memory == 0);
|
||||
}
|
||||
for (int i = 0; i < max_number_of_objects && free_memory > 0; i++) {
|
||||
if (free_memory > i::Page::kMaxRegularHeapObjectSize) {
|
||||
allocate_memory = i::Page::kMaxRegularHeapObjectSize;
|
||||
length = LenFromSize(allocate_memory);
|
||||
} else {
|
||||
allocate_memory = free_memory;
|
||||
length = LenFromSize(allocate_memory);
|
||||
if (length <= 0) {
|
||||
// Not enough room to create another fixed array. Let's create a filler.
|
||||
heap->CreateFillerObjectAt(*heap->old_space()->allocation_top_address(),
|
||||
free_memory);
|
||||
break;
|
||||
}
|
||||
}
|
||||
big_objects[i] = isolate->factory()->NewFixedArray(length, tenure);
|
||||
CHECK((tenure == i::NOT_TENURED && heap->InNewSpace(*big_objects[i])) ||
|
||||
(tenure == i::TENURED && heap->InOldSpace(*big_objects[i])));
|
||||
free_memory -= allocate_memory;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Helper function that simulates a full new-space in the heap.
|
||||
static inline bool FillUpOnePage(v8::internal::NewSpace* space) {
|
||||
DisableInlineAllocationSteps(space);
|
||||
v8::internal::AllocationResult allocation = space->AllocateRawUnaligned(
|
||||
v8::internal::Page::kMaxRegularHeapObjectSize);
|
||||
if (allocation.IsRetry()) return false;
|
||||
v8::internal::HeapObject* free_space = NULL;
|
||||
CHECK(allocation.To(&free_space));
|
||||
space->heap()->CreateFillerObjectAt(
|
||||
free_space->address(), v8::internal::Page::kMaxRegularHeapObjectSize);
|
||||
int space_remaining = static_cast<int>(*space->allocation_limit_address() -
|
||||
*space->allocation_top_address());
|
||||
if (space_remaining == 0) return false;
|
||||
CreatePadding(space->heap(), space_remaining, i::NOT_TENURED);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -553,11 +598,7 @@ static inline void AllocateAllButNBytes(v8::internal::NewSpace* space,
|
||||
CHECK(space_remaining >= extra_bytes);
|
||||
int new_linear_size = space_remaining - extra_bytes;
|
||||
if (new_linear_size == 0) return;
|
||||
v8::internal::AllocationResult allocation =
|
||||
space->AllocateRawUnaligned(new_linear_size);
|
||||
v8::internal::HeapObject* free_space = NULL;
|
||||
CHECK(allocation.To(&free_space));
|
||||
space->heap()->CreateFillerObjectAt(free_space->address(), new_linear_size);
|
||||
CreatePadding(space->heap(), new_linear_size, i::NOT_TENURED);
|
||||
}
|
||||
|
||||
|
||||
|
@ -6547,7 +6547,7 @@ static void IndependentWeakHandle(bool global_gc, bool interlinked) {
|
||||
}
|
||||
// We are relying on this creating a big flag array and reserving the space
|
||||
// up front.
|
||||
v8::Handle<Value> big_array = CompileRun("new Array(50000)");
|
||||
v8::Handle<Value> big_array = CompileRun("new Array(5000)");
|
||||
a->Set(v8_str("y"), big_array);
|
||||
big_heap_size = CcTest::heap()->SizeOfObjects();
|
||||
}
|
||||
@ -6569,7 +6569,7 @@ static void IndependentWeakHandle(bool global_gc, bool interlinked) {
|
||||
}
|
||||
// A single GC should be enough to reclaim the memory, since we are using
|
||||
// phantom handles.
|
||||
CHECK_LT(CcTest::heap()->SizeOfObjects(), big_heap_size - 200000);
|
||||
CHECK_LT(CcTest::heap()->SizeOfObjects(), big_heap_size - 20000);
|
||||
CHECK(object_a.flag);
|
||||
CHECK(object_b.flag);
|
||||
}
|
||||
|
@ -5565,8 +5565,8 @@ TEST(ArrayShiftSweeping) {
|
||||
Heap* heap = isolate->heap();
|
||||
|
||||
v8::Local<v8::Value> result = CompileRun(
|
||||
"var array = new Array(40000);"
|
||||
"var tmp = new Array(100000);"
|
||||
"var array = new Array(400);"
|
||||
"var tmp = new Array(1000);"
|
||||
"array[0] = 10;"
|
||||
"gc();"
|
||||
"gc();"
|
||||
@ -5638,16 +5638,8 @@ UNINITIALIZED_TEST(PromotionQueue) {
|
||||
heap->CollectGarbage(NEW_SPACE);
|
||||
CHECK(i::FLAG_min_semi_space_size * MB == new_space->TotalCapacity());
|
||||
|
||||
// Create the first huge object which will exactly fit the first semi-space
|
||||
// page.
|
||||
DisableInlineAllocationSteps(new_space);
|
||||
int new_linear_size =
|
||||
static_cast<int>(*heap->new_space()->allocation_limit_address() -
|
||||
*heap->new_space()->allocation_top_address());
|
||||
int length = (new_linear_size - FixedArray::kHeaderSize) / kPointerSize;
|
||||
Handle<FixedArray> first =
|
||||
i_isolate->factory()->NewFixedArray(length, NOT_TENURED);
|
||||
CHECK(heap->InNewSpace(*first));
|
||||
// Fill-up the first semi-space page.
|
||||
FillUpOnePage(new_space);
|
||||
|
||||
// Create a small object to initialize the bump pointer on the second
|
||||
// semi-space page.
|
||||
@ -5655,17 +5647,8 @@ UNINITIALIZED_TEST(PromotionQueue) {
|
||||
i_isolate->factory()->NewFixedArray(1, NOT_TENURED);
|
||||
CHECK(heap->InNewSpace(*small));
|
||||
|
||||
|
||||
// Create the second huge object of maximum allocatable second semi-space
|
||||
// page size.
|
||||
DisableInlineAllocationSteps(new_space);
|
||||
new_linear_size =
|
||||
static_cast<int>(*heap->new_space()->allocation_limit_address() -
|
||||
*heap->new_space()->allocation_top_address());
|
||||
length = (new_linear_size - FixedArray::kHeaderSize) / kPointerSize;
|
||||
Handle<FixedArray> second =
|
||||
i_isolate->factory()->NewFixedArray(length, NOT_TENURED);
|
||||
CHECK(heap->InNewSpace(*second));
|
||||
// Fill-up the second semi-space page.
|
||||
FillUpOnePage(new_space);
|
||||
|
||||
// This scavenge will corrupt memory if the promotion queue is not
|
||||
// evacuated.
|
||||
@ -5691,19 +5674,11 @@ TEST(Regress388880) {
|
||||
|
||||
int desired_offset = Page::kPageSize - map1->instance_size();
|
||||
|
||||
// Allocate fixed array in old pointer space so, that object allocated
|
||||
// Allocate padding objects in old pointer space so, that object allocated
|
||||
// afterwards would end at the end of the page.
|
||||
{
|
||||
SimulateFullSpace(heap->old_space());
|
||||
int padding_size = desired_offset - Page::kObjectStartOffset;
|
||||
int padding_array_length =
|
||||
(padding_size - FixedArray::kHeaderSize) / kPointerSize;
|
||||
|
||||
Handle<FixedArray> temp2 =
|
||||
factory->NewFixedArray(padding_array_length, TENURED);
|
||||
Page* page = Page::FromAddress(temp2->address());
|
||||
CHECK_EQ(Page::kObjectStartOffset, page->Offset(temp2->address()));
|
||||
}
|
||||
SimulateFullSpace(heap->old_space());
|
||||
int padding_size = desired_offset - Page::kObjectStartOffset;
|
||||
CreatePadding(heap, padding_size, TENURED);
|
||||
|
||||
Handle<JSObject> o = factory->NewJSObjectFromMap(map1, TENURED);
|
||||
o->set_properties(*factory->empty_fixed_array());
|
||||
|
@ -426,7 +426,8 @@ TEST(CompactionSpace) {
|
||||
// Cannot loop until "Available()" since we initially have 0 bytes available
|
||||
// and would thus neither grow, nor be able to allocate an object.
|
||||
const int kNumObjects = 100;
|
||||
const int kExpectedPages = kNumObjects;
|
||||
const int kExpectedPages = (kNumObjects / (compaction_space->AreaSize() /
|
||||
Page::kMaxRegularHeapObjectSize));
|
||||
for (int i = 0; i < kNumObjects; i++) {
|
||||
compaction_space->AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize)
|
||||
.ToObjectChecked();
|
||||
|
@ -188,8 +188,7 @@ TEST(Regress2060a) {
|
||||
|
||||
// Start second old-space page so that values land on evacuation candidate.
|
||||
Page* first_page = heap->old_space()->anchor()->next_page();
|
||||
int dummy_array_size = Page::kMaxRegularHeapObjectSize - 92 * KB;
|
||||
factory->NewFixedArray(dummy_array_size / kPointerSize, TENURED);
|
||||
SimulateFullSpace(heap->old_space());
|
||||
|
||||
// Fill up weak map with values on an evacuation candidate.
|
||||
{
|
||||
@ -228,8 +227,7 @@ TEST(Regress2060b) {
|
||||
|
||||
// Start second old-space page so that keys land on evacuation candidate.
|
||||
Page* first_page = heap->old_space()->anchor()->next_page();
|
||||
int dummy_array_size = Page::kMaxRegularHeapObjectSize - 92 * KB;
|
||||
factory->NewFixedArray(dummy_array_size / kPointerSize, TENURED);
|
||||
SimulateFullSpace(heap->old_space());
|
||||
|
||||
// Fill up weak map with keys on an evacuation candidate.
|
||||
Handle<JSObject> keys[32];
|
||||
|
@ -187,8 +187,7 @@ TEST(WeakSet_Regress2060a) {
|
||||
|
||||
// Start second old-space page so that values land on evacuation candidate.
|
||||
Page* first_page = heap->old_space()->anchor()->next_page();
|
||||
int dummy_array_size = Page::kMaxRegularHeapObjectSize - 92 * KB;
|
||||
factory->NewFixedArray(dummy_array_size / kPointerSize, TENURED);
|
||||
SimulateFullSpace(heap->old_space());
|
||||
|
||||
// Fill up weak set with values on an evacuation candidate.
|
||||
{
|
||||
@ -227,8 +226,7 @@ TEST(WeakSet_Regress2060b) {
|
||||
|
||||
// Start second old-space page so that keys land on evacuation candidate.
|
||||
Page* first_page = heap->old_space()->anchor()->next_page();
|
||||
int dummy_array_size = Page::kMaxRegularHeapObjectSize - 92 * KB;
|
||||
factory->NewFixedArray(dummy_array_size / kPointerSize, TENURED);
|
||||
SimulateFullSpace(heap->old_space());
|
||||
|
||||
// Fill up weak set with keys on an evacuation candidate.
|
||||
Handle<JSObject> keys[32];
|
||||
|
Loading…
Reference in New Issue
Block a user