Remove Heap::MaxRegularSpaceAllocationSize and use Page::MaxRegularHeapObjectSize instead.

BUG=
R=mstarzinger@chromium.org, mvstanton@chromium.org

Review URL: https://codereview.chromium.org/141653016

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18776 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
hpayer@chromium.org 2014-01-23 13:02:27 +00:00
parent a92e87e100
commit 83a1df2354
21 changed files with 41 additions and 41 deletions

View File

@ -1639,7 +1639,7 @@ void MacroAssembler::Allocate(int object_size,
Register scratch2,
Label* gc_required,
AllocationFlags flags) {
ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.

View File

@ -2282,7 +2282,7 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject** slot,
HeapObject* object,
int object_size) {
SLOW_ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
SLOW_ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
SLOW_ASSERT(object->Size() == object_size);
int allocation_size = object_size;
@ -2936,7 +2936,8 @@ MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate heap numbers in paged
// spaces.
int size = HeapNumber::kSize;
STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
Object* result;
@ -2952,7 +2953,7 @@ MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
MaybeObject* Heap::AllocateCell(Object* value) {
int size = Cell::kSize;
STATIC_ASSERT(Cell::kSize <= Page::kMaxNonCodeHeapObjectSize);
STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
Object* result;
{ MaybeObject* maybe_result = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
@ -2966,7 +2967,7 @@ MaybeObject* Heap::AllocateCell(Object* value) {
MaybeObject* Heap::AllocatePropertyCell() {
int size = PropertyCell::kSize;
STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxNonCodeHeapObjectSize);
STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
Object* result;
MaybeObject* maybe_result =
@ -3724,7 +3725,7 @@ MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate foreigns in paged spaces.
STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
Foreign* result;
MaybeObject* maybe_result = Allocate(foreign_map(), space);
@ -5348,7 +5349,7 @@ MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
MaybeObject* Heap::AllocateSymbol() {
// Statically ensure that it is safe to allocate symbols in paged spaces.
STATIC_ASSERT(Symbol::kSize <= Page::kMaxNonCodeHeapObjectSize);
STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
Object* result;
MaybeObject* maybe =
@ -6312,7 +6313,7 @@ bool Heap::ConfigureHeap(int max_semispace_size,
Page::kPageSize));
// We rely on being able to allocate new arrays in paged spaces.
ASSERT(MaxRegularSpaceAllocationSize() >=
ASSERT(Page::kMaxRegularHeapObjectSize >=
(JSArray::kSize +
FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
AllocationMemento::kSize));

View File

@ -531,7 +531,6 @@ class Heap {
int InitialSemiSpaceSize() { return initial_semispace_size_; }
intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
intptr_t MaxExecutableSize() { return max_executable_size_; }
int MaxRegularSpaceAllocationSize() { return InitialSemiSpaceSize() * 4/5; }
// Returns the capacity of the heap in bytes w/o growing. Heap grows when
// more spaces are needed until it reaches the limit.
@ -2103,7 +2102,7 @@ class Heap {
PretenureFlag pretenure) {
ASSERT(preferred_old_space == OLD_POINTER_SPACE ||
preferred_old_space == OLD_DATA_SPACE);
if (object_size > Page::kMaxNonCodeHeapObjectSize) return LO_SPACE;
if (object_size > Page::kMaxRegularHeapObjectSize) return LO_SPACE;
return (pretenure == TENURED) ? preferred_old_space : NEW_SPACE;
}

View File

@ -3453,7 +3453,9 @@ void HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
}
}
if (new_dominator_size > isolate()->heap()->MaxRegularSpaceAllocationSize()) {
// Since we clear the first word after folded memory, we cannot use the
// whole Page::kMaxRegularHeapObjectSize memory.
if (new_dominator_size > Page::kMaxRegularHeapObjectSize - kPointerSize) {
if (FLAG_trace_allocation_folding) {
PrintF("#%d (%s) cannot fold into #%d (%s) due to size: %d\n",
id(), Mnemonic(), dominator_allocate->id(),

View File

@ -2369,10 +2369,9 @@ HValue* HGraphBuilder::BuildNewElementsCapacity(HValue* old_capacity) {
void HGraphBuilder::BuildNewSpaceArrayCheck(HValue* length, ElementsKind kind) {
Heap* heap = isolate()->heap();
int element_size = IsFastDoubleElementsKind(kind) ? kDoubleSize
: kPointerSize;
int max_size = heap->MaxRegularSpaceAllocationSize() / element_size;
int max_size = Page::kMaxRegularHeapObjectSize / element_size;
max_size -= JSArray::kSize / element_size;
HConstant* max_size_constant = Add<HConstant>(max_size);
Add<HBoundsCheck>(length, max_size_constant);

View File

@ -1591,7 +1591,7 @@ void MacroAssembler::Allocate(int object_size,
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.

View File

@ -2792,7 +2792,7 @@ void MarkCompactCollector::MigrateObject(Address dst,
heap_profiler->ObjectMoveEvent(src, dst, size);
}
ASSERT(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest));
ASSERT(dest != LO_SPACE && size <= Page::kMaxNonCodeHeapObjectSize);
ASSERT(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
if (dest == OLD_POINTER_SPACE) {
Address src_slot = src;
Address dst_slot = dst;
@ -2966,7 +2966,7 @@ static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
int object_size) {
// TODO(hpayer): Replace that check with an assert.
CHECK(object_size <= Page::kMaxNonCodeHeapObjectSize);
CHECK(object_size <= Page::kMaxRegularHeapObjectSize);
OldSpace* target_space = heap()->TargetSpace(object);

View File

@ -2844,7 +2844,7 @@ void MacroAssembler::Allocate(int object_size,
Register scratch2,
Label* gc_required,
AllocationFlags flags) {
ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.

View File

@ -4702,7 +4702,7 @@ bool Map::CanHaveMoreTransitions() {
if (!HasTransitionArray()) return true;
return FixedArray::SizeFor(transitions()->length() +
TransitionArray::kTransitionSize)
<= Page::kMaxNonCodeHeapObjectSize;
<= Page::kMaxRegularHeapObjectSize;
}

View File

@ -144,7 +144,7 @@ class StaticVisitorBase : public AllStatic {
(base == kVisitJSObject));
ASSERT(IsAligned(object_size, kPointerSize));
ASSERT(kMinObjectSizeInWords * kPointerSize <= object_size);
ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
const VisitorId specialization = static_cast<VisitorId>(
base + (object_size >> kPointerSizeLog2) - kMinObjectSizeInWords);

View File

@ -2663,7 +2663,7 @@ class JSObject: public JSReceiver {
// don't want to be wasteful with long lived objects.
static const int kMaxUncheckedOldFastElementsLength = 500;
// Note that Heap::MaxRegularSpaceAllocationSize() puts a limit on
// Note that Page::kMaxRegularHeapObjectSize puts a limit on
// permissible values (see the ASSERT in heap.cc).
static const int kInitialMaxFastElementArray = 100000;

View File

@ -9757,7 +9757,7 @@ static MaybeObject* Allocate(Isolate* isolate,
Heap* heap = isolate->heap();
RUNTIME_ASSERT(IsAligned(size, kPointerSize));
RUNTIME_ASSERT(size > 0);
RUNTIME_ASSERT(size <= heap->MaxRegularSpaceAllocationSize());
RUNTIME_ASSERT(size <= Page::kMaxRegularHeapObjectSize);
HeapObject* allocation;
{ MaybeObject* maybe_allocation = heap->AllocateRaw(size, space, space);
if (!maybe_allocation->To(&allocation)) return maybe_allocation;

View File

@ -165,7 +165,7 @@ Page* Page::Initialize(Heap* heap,
Executability executable,
PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk);
ASSERT(page->area_size() <= kMaxNonCodeHeapObjectSize);
ASSERT(page->area_size() <= kMaxRegularHeapObjectSize);
ASSERT(chunk->owner() == owner);
owner->IncreaseCapacity(page->area_size());
owner->Free(page->area_start(), page->area_size());

View File

@ -103,7 +103,7 @@ class Isolate;
ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
#define ASSERT_OBJECT_SIZE(size) \
ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
ASSERT((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
#define ASSERT_PAGE_OFFSET(offset) \
ASSERT((Page::kObjectStartOffset <= offset) \
@ -783,7 +783,7 @@ class Page : public MemoryChunk {
// are allocated in large object space and are never moved in memory. This
// also applies to new space allocation, since objects are never migrated
// from new space to large object space. Takes double alignment into account.
static const int kMaxNonCodeHeapObjectSize = kPageSize - kObjectStartOffset;
static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset;
// Page size mask.
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
@ -1076,7 +1076,7 @@ class MemoryAllocator {
// Returns maximum available bytes that the old space can have.
intptr_t MaxAvailable() {
return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize;
}
// Returns an indication of whether a pointer is in a space that has
@ -1631,7 +1631,7 @@ class FreeList {
private:
// The size range of blocks, in bytes.
static const int kMinBlockSize = 3 * kPointerSize;
static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
@ -2009,7 +2009,7 @@ class NewSpacePage : public MemoryChunk {
(1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
(1 << MemoryChunk::SCAN_ON_SCAVENGE);
static const int kAreaSize = Page::kMaxNonCodeHeapObjectSize;
static const int kAreaSize = Page::kMaxRegularHeapObjectSize;
inline NewSpacePage* next_page() const {
return static_cast<NewSpacePage*>(next_chunk());
@ -2669,7 +2669,7 @@ class MapSpace : public PagedSpace {
virtual void VerifyObject(HeapObject* obj);
private:
static const int kMapsPerPage = Page::kMaxNonCodeHeapObjectSize / Map::kSize;
static const int kMapsPerPage = Page::kMaxRegularHeapObjectSize / Map::kSize;
// Do map space compaction if there is a page gap.
int CompactionThreshold() {

View File

@ -4048,7 +4048,7 @@ void MacroAssembler::Allocate(int object_size,
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.

View File

@ -198,11 +198,11 @@ TEST(CodeRange) {
if (current_allocated < code_range_size / 10) {
// Allocate a block.
// Geometrically distributed sizes, greater than
// Page::kMaxNonCodeHeapObjectSize (which is greater than code page area).
// Page::kMaxRegularHeapObjectSize (which is greater than code page area).
// TODO(gc): instead of using 3 use some contant based on code_range_size
// kMaxHeapObjectSize.
size_t requested =
(Page::kMaxNonCodeHeapObjectSize << (Pseudorandom() % 3)) +
(Page::kMaxRegularHeapObjectSize << (Pseudorandom() % 3)) +
Pseudorandom() % 5000 + 1;
size_t allocated = 0;
Address base = code_range.AllocateRawMemory(requested,

View File

@ -918,7 +918,7 @@ TEST(Iteration) {
factory->NewStringFromAscii(CStrVector("abcdefghij"), TENURED);
// Allocate a large string (for large object space).
int large_size = Page::kMaxNonCodeHeapObjectSize + 1;
int large_size = Page::kMaxRegularHeapObjectSize + 1;
char* str = new char[large_size];
for (int i = 0; i < large_size - 1; ++i) str[i] = 'a';
str[large_size - 1] = '\0';
@ -987,7 +987,7 @@ TEST(Regression39128) {
// just enough room to allocate JSObject and thus fill the newspace.
int allocation_amount = Min(FixedArray::kMaxSize,
Page::kMaxNonCodeHeapObjectSize + kPointerSize);
Page::kMaxRegularHeapObjectSize + kPointerSize);
int allocation_len = LenFromSize(allocation_amount);
NewSpace* new_space = heap->new_space();
Address* top_addr = new_space->allocation_top_address();

View File

@ -82,7 +82,7 @@ TEST(Promotion) {
// Allocate a fixed array in the new space.
int array_length =
(Page::kMaxNonCodeHeapObjectSize - FixedArray::kHeaderSize) /
(Page::kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) /
(4 * kPointerSize);
Object* obj = heap->AllocateFixedArray(array_length)->ToObjectChecked();
Handle<FixedArray> array(FixedArray::cast(obj));
@ -107,7 +107,7 @@ TEST(NoPromotion) {
// Allocate a big fixed array in the new space.
int array_length =
(Page::kMaxNonCodeHeapObjectSize - FixedArray::kHeaderSize) /
(Page::kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) /
(2 * kPointerSize);
Object* obj = heap->AllocateFixedArray(array_length)->ToObjectChecked();
Handle<FixedArray> array(FixedArray::cast(obj));

View File

@ -327,9 +327,9 @@ TEST(NewSpace) {
CcTest::heap()->ReservedSemiSpaceSize()));
CHECK(new_space.HasBeenSetUp());
while (new_space.Available() >= Page::kMaxNonCodeHeapObjectSize) {
while (new_space.Available() >= Page::kMaxRegularHeapObjectSize) {
Object* obj =
new_space.AllocateRaw(Page::kMaxNonCodeHeapObjectSize)->
new_space.AllocateRaw(Page::kMaxRegularHeapObjectSize)->
ToObjectUnchecked();
CHECK(new_space.Contains(HeapObject::cast(obj)));
}
@ -359,7 +359,7 @@ TEST(OldSpace) {
CHECK(s->SetUp());
while (s->Available() > 0) {
s->AllocateRaw(Page::kMaxNonCodeHeapObjectSize)->ToObjectUnchecked();
s->AllocateRaw(Page::kMaxRegularHeapObjectSize)->ToObjectUnchecked();
}
s->TearDown();

View File

@ -101,8 +101,7 @@ gc();
assertEquals(result[1], 4);
assertEquals(result2[1], 6);
// Test to exceed the Heap::MaxRegularSpaceAllocationSize limit but not
// the Page::kMaxNonCodeHeapObjectSize limit with allocation folding.
// Test to almost exceed the Page::MaxRegularHeapObjectSize limit.
function boom() {
var a1 = new Array(84632);

View File

@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Test to exceed the Heap::MaxRegularSpaceAllocationSize with an array
// Test to exceed the Page::MaxRegularHeapObjectSize with an array
// constructor call taking many arguments.
function boom() {