Revert of [heap] Prepare code for smaller large object allocation limit than max allocatable memory. (patchset #10 id:180001 of https://codereview.chromium.org/1361853005/ )
Reason for revert: [Sheriff] Need to revert for reverting https://codereview.chromium.org/1358703003/ Original issue's description: > [heap] Prepare heap for smaller large object allocation limit than max allocatable memory. > > BUG=chromium:524425 > LOG=n > > Committed: https://crrev.com/c2bce747993c445daf78975392e587bff20c6677 > Cr-Commit-Position: refs/heads/master@{#31107} TBR=mlippautz@chromium.org,mstarzinger@chromium.org,hpayer@chromium.org NOPRESUBMIT=true NOTREECHECKS=true NOTRY=true BUG=chromium:524425 Review URL: https://codereview.chromium.org/1376413005 Cr-Commit-Position: refs/heads/master@{#31129}
This commit is contained in:
parent
2e7077e02c
commit
9af0174f03
@ -1215,7 +1215,7 @@ HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor(
|
||||
// trigger it.
|
||||
HValue* length = GetArgumentsLength();
|
||||
HConstant* max_alloc_length =
|
||||
Add<HConstant>(JSArray::kInitialMaxFastElementArray);
|
||||
Add<HConstant>(JSObject::kInitialMaxFastElementArray);
|
||||
HValue* checked_length = Add<HBoundsCheck>(length, max_alloc_length);
|
||||
|
||||
// We need to fill with the hole if it's a smi array in the multi-argument
|
||||
|
@ -1211,7 +1211,7 @@ Reduction JSTypedLowering::ReduceJSCreateLiteralArray(Node* node) {
|
||||
// TODO(rossberg): Teach strong mode to FastCloneShallowArrayStub.
|
||||
if ((flags & ArrayLiteral::kShallowElements) != 0 &&
|
||||
(flags & ArrayLiteral::kIsStrong) == 0 &&
|
||||
length < JSArray::kInitialMaxFastElementArray) {
|
||||
length < JSObject::kInitialMaxFastElementArray) {
|
||||
Isolate* isolate = jsgraph()->isolate();
|
||||
Callable callable = CodeFactory::FastCloneShallowArray(isolate);
|
||||
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
|
||||
|
@ -2170,7 +2170,7 @@ MaybeHandle<Object> ArrayConstructInitializeElements(Handle<JSArray> array,
|
||||
|
||||
// Optimize the case where there is one argument and the argument is a small
|
||||
// smi.
|
||||
if (length > 0 && length < JSArray::kInitialMaxFastElementArray) {
|
||||
if (length > 0 && length < JSObject::kInitialMaxFastElementArray) {
|
||||
ElementsKind elements_kind = array->GetElementsKind();
|
||||
JSArray::Initialize(array, length, length);
|
||||
|
||||
|
@ -158,7 +158,7 @@ bool FullCodeGenerator::MustCreateArrayLiteralWithRuntime(
|
||||
ArrayLiteral* expr) const {
|
||||
// TODO(rossberg): Teach strong mode to FastCloneShallowArrayStub.
|
||||
return expr->depth() > 1 || expr->is_strong() ||
|
||||
expr->values()->length() > JSArray::kInitialMaxFastElementArray;
|
||||
expr->values()->length() > JSObject::kInitialMaxFastElementArray;
|
||||
}
|
||||
|
||||
|
||||
|
@ -4744,7 +4744,7 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
|
||||
// We rely on being able to allocate new arrays in paged spaces.
|
||||
DCHECK(Page::kMaxRegularHeapObjectSize >=
|
||||
(JSArray::kSize +
|
||||
FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
|
||||
FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
|
||||
AllocationMemento::kSize));
|
||||
|
||||
code_range_size_ = code_range_size * MB;
|
||||
|
@ -188,7 +188,7 @@ Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
|
||||
PagedSpace* owner) {
|
||||
Page* page = reinterpret_cast<Page*>(chunk);
|
||||
page->mutex_ = new base::Mutex();
|
||||
DCHECK(page->area_size() <= kAllocatableMemory);
|
||||
DCHECK(page->area_size() <= kMaxRegularHeapObjectSize);
|
||||
DCHECK(chunk->owner() == owner);
|
||||
owner->IncreaseCapacity(page->area_size());
|
||||
owner->Free(page->area_start(), page->area_size());
|
||||
|
@ -814,15 +814,12 @@ class Page : public MemoryChunk {
|
||||
// Page size in bytes. This must be a multiple of the OS page size.
|
||||
static const int kPageSize = 1 << kPageSizeBits;
|
||||
|
||||
// Maximum object size that gets allocated into regular pages. Objects larger
|
||||
// than that size are allocated in large object space and are never moved in
|
||||
// memory. This also applies to new space allocation, since objects are never
|
||||
// migrated from new space to large object space. Takes double alignment into
|
||||
// account.
|
||||
// Maximum object size that fits in a page. Objects larger than that size
|
||||
// are allocated in large object space and are never moved in memory. This
|
||||
// also applies to new space allocation, since objects are never migrated
|
||||
// from new space to large object space. Takes double alignment into account.
|
||||
static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset;
|
||||
|
||||
static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
|
||||
|
||||
// Page size mask.
|
||||
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
|
||||
|
||||
@ -1172,7 +1169,7 @@ class MemoryAllocator {
|
||||
|
||||
// Returns maximum available bytes that the old space can have.
|
||||
intptr_t MaxAvailable() {
|
||||
return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
|
||||
return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize;
|
||||
}
|
||||
|
||||
// Returns an indication of whether a pointer is in a space that has
|
||||
@ -1248,7 +1245,7 @@ class MemoryAllocator {
|
||||
static int PageAreaSize(AllocationSpace space) {
|
||||
DCHECK_NE(LO_SPACE, space);
|
||||
return (space == CODE_SPACE) ? CodePageAreaSize()
|
||||
: Page::kAllocatableMemory;
|
||||
: Page::kMaxRegularHeapObjectSize;
|
||||
}
|
||||
|
||||
MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
|
||||
@ -1703,7 +1700,7 @@ class FreeList {
|
||||
private:
|
||||
// The size range of blocks, in bytes.
|
||||
static const int kMinBlockSize = 3 * kPointerSize;
|
||||
static const int kMaxBlockSize = Page::kAllocatableMemory;
|
||||
static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
|
||||
|
||||
static const int kSmallListMin = 0x1f * kPointerSize;
|
||||
static const int kSmallListMax = 0xff * kPointerSize;
|
||||
@ -2123,7 +2120,7 @@ class NewSpacePage : public MemoryChunk {
|
||||
(1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
|
||||
(1 << MemoryChunk::SCAN_ON_SCAVENGE);
|
||||
|
||||
static const int kAreaSize = Page::kAllocatableMemory;
|
||||
static const int kAreaSize = Page::kMaxRegularHeapObjectSize;
|
||||
|
||||
inline NewSpacePage* next_page() {
|
||||
return static_cast<NewSpacePage*>(next_chunk());
|
||||
@ -2842,7 +2839,7 @@ class MapSpace : public PagedSpace {
|
||||
virtual void VerifyObject(HeapObject* obj);
|
||||
|
||||
private:
|
||||
static const int kMapsPerPage = Page::kAllocatableMemory / Map::kSize;
|
||||
static const int kMapsPerPage = Page::kMaxRegularHeapObjectSize / Map::kSize;
|
||||
|
||||
// Do map space compaction if there is a page gap.
|
||||
int CompactionThreshold() {
|
||||
|
@ -1852,7 +1852,7 @@ HValue* HGraphBuilder::BuildRegExpConstructResult(HValue* length,
|
||||
HValue* index,
|
||||
HValue* input) {
|
||||
NoObservableSideEffectsScope scope(this);
|
||||
HConstant* max_length = Add<HConstant>(JSArray::kInitialMaxFastElementArray);
|
||||
HConstant* max_length = Add<HConstant>(JSObject::kInitialMaxFastElementArray);
|
||||
Add<HBoundsCheck>(length, max_length);
|
||||
|
||||
// Generate size calculation code here in order to make it dominate
|
||||
@ -2658,7 +2658,7 @@ HValue* HGraphBuilder::BuildAllocateArrayFromLength(
|
||||
|
||||
HValue* constant_zero = graph()->GetConstant0();
|
||||
HConstant* max_alloc_length =
|
||||
Add<HConstant>(JSArray::kInitialMaxFastElementArray);
|
||||
Add<HConstant>(JSObject::kInitialMaxFastElementArray);
|
||||
HInstruction* checked_length = Add<HBoundsCheck>(length_argument,
|
||||
max_alloc_length);
|
||||
IfBuilder if_builder(this);
|
||||
@ -3128,10 +3128,10 @@ HValue* HGraphBuilder::BuildCloneShallowArrayNonEmpty(HValue* boilerplate,
|
||||
|
||||
// This function implicitly relies on the fact that the
|
||||
// FastCloneShallowArrayStub is called only for literals shorter than
|
||||
// JSArray::kInitialMaxFastElementArray.
|
||||
// JSObject::kInitialMaxFastElementArray.
|
||||
// Can't add HBoundsCheck here because otherwise the stub will eager a frame.
|
||||
HConstant* size_upper_bound = EstablishElementsAllocationSize(
|
||||
kind, JSArray::kInitialMaxFastElementArray);
|
||||
kind, JSObject::kInitialMaxFastElementArray);
|
||||
elements->set_size_upper_bound(size_upper_bound);
|
||||
|
||||
Add<HStoreNamedField>(result, HObjectAccess::ForElementsPointer(), elements);
|
||||
|
@ -2366,6 +2366,10 @@ class JSObject: public JSReceiver {
|
||||
// don't want to be wasteful with long lived objects.
|
||||
static const int kMaxUncheckedOldFastElementsLength = 500;
|
||||
|
||||
// Note that Page::kMaxRegularHeapObjectSize puts a limit on
|
||||
// permissible values (see the DCHECK in heap.cc).
|
||||
static const int kInitialMaxFastElementArray = 100000;
|
||||
|
||||
// This constant applies only to the initial map of "global.Object" and
|
||||
// not to arbitrary other JSObject maps.
|
||||
static const int kInitialGlobalObjectUnusedPropertiesCount = 4;
|
||||
@ -10032,10 +10036,6 @@ class JSArray: public JSObject {
|
||||
static const int kLengthOffset = JSObject::kHeaderSize;
|
||||
static const int kSize = kLengthOffset + kPointerSize;
|
||||
|
||||
// Note that Page::kMaxRegularHeapObjectSize puts a limit on
|
||||
// permissible values (see the DCHECK in heap.cc).
|
||||
static const int kInitialMaxFastElementArray = 100000;
|
||||
|
||||
private:
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(JSArray);
|
||||
};
|
||||
|
@ -253,7 +253,7 @@ static Object* ArrayConstructorCommon(Isolate* isolate,
|
||||
can_use_type_feedback = false;
|
||||
} else if (value != 0) {
|
||||
holey = true;
|
||||
if (value >= JSArray::kInitialMaxFastElementArray) {
|
||||
if (value >= JSObject::kInitialMaxFastElementArray) {
|
||||
can_inline_array_constructor = false;
|
||||
}
|
||||
}
|
||||
|
@ -530,61 +530,16 @@ static inline void DisableInlineAllocationSteps(v8::internal::NewSpace* space) {
|
||||
}
|
||||
|
||||
|
||||
static int LenFromSize(int size) {
|
||||
return (size - i::FixedArray::kHeaderSize) / i::kPointerSize;
|
||||
}
|
||||
|
||||
|
||||
static inline void CreatePadding(i::Heap* heap, int padding_size,
|
||||
i::PretenureFlag tenure) {
|
||||
const int max_number_of_objects = 20;
|
||||
v8::internal::Handle<v8::internal::FixedArray>
|
||||
big_objects[max_number_of_objects];
|
||||
i::Isolate* isolate = heap->isolate();
|
||||
int allocate_memory;
|
||||
int length;
|
||||
int free_memory = padding_size;
|
||||
if (tenure == i::TENURED) {
|
||||
int current_free_memory =
|
||||
static_cast<int>(*heap->old_space()->allocation_limit_address() -
|
||||
*heap->old_space()->allocation_top_address());
|
||||
CHECK(padding_size <= current_free_memory || current_free_memory == 0);
|
||||
} else {
|
||||
DisableInlineAllocationSteps(heap->new_space());
|
||||
int current_free_memory =
|
||||
static_cast<int>(*heap->new_space()->allocation_limit_address() -
|
||||
*heap->new_space()->allocation_top_address());
|
||||
CHECK(padding_size <= current_free_memory || current_free_memory == 0);
|
||||
}
|
||||
for (int i = 0; i < max_number_of_objects && free_memory > 0; i++) {
|
||||
if (free_memory > i::Page::kMaxRegularHeapObjectSize) {
|
||||
allocate_memory = i::Page::kMaxRegularHeapObjectSize;
|
||||
length = LenFromSize(allocate_memory);
|
||||
} else {
|
||||
allocate_memory = free_memory;
|
||||
length = LenFromSize(allocate_memory);
|
||||
if (length <= 0) {
|
||||
// Not enough room to create another fixed array. Let's create a filler.
|
||||
heap->CreateFillerObjectAt(*heap->old_space()->allocation_top_address(),
|
||||
free_memory);
|
||||
break;
|
||||
}
|
||||
}
|
||||
big_objects[i] = isolate->factory()->NewFixedArray(length, tenure);
|
||||
CHECK((tenure == i::NOT_TENURED && heap->InNewSpace(*big_objects[i])) ||
|
||||
(tenure == i::TENURED && heap->InOldSpace(*big_objects[i])));
|
||||
free_memory -= allocate_memory;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Helper function that simulates a full new-space in the heap.
|
||||
static inline bool FillUpOnePage(v8::internal::NewSpace* space) {
|
||||
DisableInlineAllocationSteps(space);
|
||||
int space_remaining = static_cast<int>(*space->allocation_limit_address() -
|
||||
*space->allocation_top_address());
|
||||
if (space_remaining == 0) return false;
|
||||
CreatePadding(space->heap(), space_remaining, i::NOT_TENURED);
|
||||
v8::internal::AllocationResult allocation = space->AllocateRawUnaligned(
|
||||
v8::internal::Page::kMaxRegularHeapObjectSize);
|
||||
if (allocation.IsRetry()) return false;
|
||||
v8::internal::HeapObject* free_space = NULL;
|
||||
CHECK(allocation.To(&free_space));
|
||||
space->heap()->CreateFillerObjectAt(
|
||||
free_space->address(), v8::internal::Page::kMaxRegularHeapObjectSize);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -598,7 +553,11 @@ static inline void AllocateAllButNBytes(v8::internal::NewSpace* space,
|
||||
CHECK(space_remaining >= extra_bytes);
|
||||
int new_linear_size = space_remaining - extra_bytes;
|
||||
if (new_linear_size == 0) return;
|
||||
CreatePadding(space->heap(), new_linear_size, i::NOT_TENURED);
|
||||
v8::internal::AllocationResult allocation =
|
||||
space->AllocateRawUnaligned(new_linear_size);
|
||||
v8::internal::HeapObject* free_space = NULL;
|
||||
CHECK(allocation.To(&free_space));
|
||||
space->heap()->CreateFillerObjectAt(free_space->address(), new_linear_size);
|
||||
}
|
||||
|
||||
|
||||
|
@ -6548,7 +6548,7 @@ static void IndependentWeakHandle(bool global_gc, bool interlinked) {
|
||||
}
|
||||
// We are relying on this creating a big flag array and reserving the space
|
||||
// up front.
|
||||
v8::Handle<Value> big_array = CompileRun("new Array(5000)");
|
||||
v8::Handle<Value> big_array = CompileRun("new Array(50000)");
|
||||
if (!v8::internal::FLAG_scavenge_reclaim_unmodified_objects)
|
||||
a->Set(v8_str("y"), big_array);
|
||||
big_heap_size = CcTest::heap()->SizeOfObjects();
|
||||
@ -6571,7 +6571,7 @@ static void IndependentWeakHandle(bool global_gc, bool interlinked) {
|
||||
}
|
||||
// A single GC should be enough to reclaim the memory, since we are using
|
||||
// phantom handles.
|
||||
CHECK_LT(CcTest::heap()->SizeOfObjects(), big_heap_size - 20000);
|
||||
CHECK_LT(CcTest::heap()->SizeOfObjects(), big_heap_size - 200000);
|
||||
CHECK(object_a.flag);
|
||||
CHECK(object_b.flag);
|
||||
}
|
||||
|
@ -5565,8 +5565,8 @@ TEST(ArrayShiftSweeping) {
|
||||
Heap* heap = isolate->heap();
|
||||
|
||||
v8::Local<v8::Value> result = CompileRun(
|
||||
"var array = new Array(400);"
|
||||
"var tmp = new Array(1000);"
|
||||
"var array = new Array(40000);"
|
||||
"var tmp = new Array(100000);"
|
||||
"array[0] = 10;"
|
||||
"gc();"
|
||||
"gc();"
|
||||
@ -5638,8 +5638,16 @@ UNINITIALIZED_TEST(PromotionQueue) {
|
||||
heap->CollectGarbage(NEW_SPACE);
|
||||
CHECK(i::FLAG_min_semi_space_size * MB == new_space->TotalCapacity());
|
||||
|
||||
// Fill-up the first semi-space page.
|
||||
FillUpOnePage(new_space);
|
||||
// Create the first huge object which will exactly fit the first semi-space
|
||||
// page.
|
||||
DisableInlineAllocationSteps(new_space);
|
||||
int new_linear_size =
|
||||
static_cast<int>(*heap->new_space()->allocation_limit_address() -
|
||||
*heap->new_space()->allocation_top_address());
|
||||
int length = (new_linear_size - FixedArray::kHeaderSize) / kPointerSize;
|
||||
Handle<FixedArray> first =
|
||||
i_isolate->factory()->NewFixedArray(length, NOT_TENURED);
|
||||
CHECK(heap->InNewSpace(*first));
|
||||
|
||||
// Create a small object to initialize the bump pointer on the second
|
||||
// semi-space page.
|
||||
@ -5647,8 +5655,17 @@ UNINITIALIZED_TEST(PromotionQueue) {
|
||||
i_isolate->factory()->NewFixedArray(1, NOT_TENURED);
|
||||
CHECK(heap->InNewSpace(*small));
|
||||
|
||||
// Fill-up the second semi-space page.
|
||||
FillUpOnePage(new_space);
|
||||
|
||||
// Create the second huge object of maximum allocatable second semi-space
|
||||
// page size.
|
||||
DisableInlineAllocationSteps(new_space);
|
||||
new_linear_size =
|
||||
static_cast<int>(*heap->new_space()->allocation_limit_address() -
|
||||
*heap->new_space()->allocation_top_address());
|
||||
length = (new_linear_size - FixedArray::kHeaderSize) / kPointerSize;
|
||||
Handle<FixedArray> second =
|
||||
i_isolate->factory()->NewFixedArray(length, NOT_TENURED);
|
||||
CHECK(heap->InNewSpace(*second));
|
||||
|
||||
// This scavenge will corrupt memory if the promotion queue is not
|
||||
// evacuated.
|
||||
@ -5674,11 +5691,19 @@ TEST(Regress388880) {
|
||||
|
||||
int desired_offset = Page::kPageSize - map1->instance_size();
|
||||
|
||||
// Allocate padding objects in old pointer space so, that object allocated
|
||||
// Allocate fixed array in old pointer space so, that object allocated
|
||||
// afterwards would end at the end of the page.
|
||||
SimulateFullSpace(heap->old_space());
|
||||
int padding_size = desired_offset - Page::kObjectStartOffset;
|
||||
CreatePadding(heap, padding_size, TENURED);
|
||||
{
|
||||
SimulateFullSpace(heap->old_space());
|
||||
int padding_size = desired_offset - Page::kObjectStartOffset;
|
||||
int padding_array_length =
|
||||
(padding_size - FixedArray::kHeaderSize) / kPointerSize;
|
||||
|
||||
Handle<FixedArray> temp2 =
|
||||
factory->NewFixedArray(padding_array_length, TENURED);
|
||||
Page* page = Page::FromAddress(temp2->address());
|
||||
CHECK_EQ(Page::kObjectStartOffset, page->Offset(temp2->address()));
|
||||
}
|
||||
|
||||
Handle<JSObject> o = factory->NewJSObjectFromMap(map1, TENURED);
|
||||
o->set_properties(*factory->empty_fixed_array());
|
||||
|
@ -426,8 +426,7 @@ TEST(CompactionSpace) {
|
||||
// Cannot loop until "Available()" since we initially have 0 bytes available
|
||||
// and would thus neither grow, nor be able to allocate an object.
|
||||
const int kNumObjects = 100;
|
||||
const int kExpectedPages = (kNumObjects / (compaction_space->AreaSize() /
|
||||
Page::kMaxRegularHeapObjectSize));
|
||||
const int kExpectedPages = kNumObjects;
|
||||
for (int i = 0; i < kNumObjects; i++) {
|
||||
compaction_space->AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize)
|
||||
.ToObjectChecked();
|
||||
|
@ -188,7 +188,8 @@ TEST(Regress2060a) {
|
||||
|
||||
// Start second old-space page so that values land on evacuation candidate.
|
||||
Page* first_page = heap->old_space()->anchor()->next_page();
|
||||
SimulateFullSpace(heap->old_space());
|
||||
int dummy_array_size = Page::kMaxRegularHeapObjectSize - 92 * KB;
|
||||
factory->NewFixedArray(dummy_array_size / kPointerSize, TENURED);
|
||||
|
||||
// Fill up weak map with values on an evacuation candidate.
|
||||
{
|
||||
@ -227,7 +228,8 @@ TEST(Regress2060b) {
|
||||
|
||||
// Start second old-space page so that keys land on evacuation candidate.
|
||||
Page* first_page = heap->old_space()->anchor()->next_page();
|
||||
SimulateFullSpace(heap->old_space());
|
||||
int dummy_array_size = Page::kMaxRegularHeapObjectSize - 92 * KB;
|
||||
factory->NewFixedArray(dummy_array_size / kPointerSize, TENURED);
|
||||
|
||||
// Fill up weak map with keys on an evacuation candidate.
|
||||
Handle<JSObject> keys[32];
|
||||
|
@ -187,7 +187,8 @@ TEST(WeakSet_Regress2060a) {
|
||||
|
||||
// Start second old-space page so that values land on evacuation candidate.
|
||||
Page* first_page = heap->old_space()->anchor()->next_page();
|
||||
SimulateFullSpace(heap->old_space());
|
||||
int dummy_array_size = Page::kMaxRegularHeapObjectSize - 92 * KB;
|
||||
factory->NewFixedArray(dummy_array_size / kPointerSize, TENURED);
|
||||
|
||||
// Fill up weak set with values on an evacuation candidate.
|
||||
{
|
||||
@ -226,7 +227,8 @@ TEST(WeakSet_Regress2060b) {
|
||||
|
||||
// Start second old-space page so that keys land on evacuation candidate.
|
||||
Page* first_page = heap->old_space()->anchor()->next_page();
|
||||
SimulateFullSpace(heap->old_space());
|
||||
int dummy_array_size = Page::kMaxRegularHeapObjectSize - 92 * KB;
|
||||
factory->NewFixedArray(dummy_array_size / kPointerSize, TENURED);
|
||||
|
||||
// Fill up weak set with keys on an evacuation candidate.
|
||||
Handle<JSObject> keys[32];
|
||||
|
Loading…
Reference in New Issue
Block a user