Extend the maximum size map space

On 32-bit the maps are now aligned on a 32-byte boundary in order to encode more maps during compacting GC. The actual size of a map on 32-bit is 28 bytes making this change waste 4 bytes per map.

On 64-bit the encoding for compacting GC is now using more than 32-bits and the maps here are still pointer size aligned. The actual size of a map on 64-bit is 48 bytes and this change does not intruduce any waste.

My choice of 16 bits for kMapPageIndexBits for 64-bit should give the same maximum number of pages (8K) for map space. As maps on 64-bit are larger than on 32-bit the total number of maps on 64-bit will be smaller than on 32-bit. We could consider raising this to 17 or 18.

I moved the kPageSizeBits to globals.h as the calculation of the encoding really depended on this.

There are still an #ifdef/#endif in objects.h and this constant could be moved to globaks.h as well, but I kept it together with the related constants.

All the tests run in debug mode with additional options --gc-global --always-compact as well (except for a few tests on which also fails before this change when run with --gc-global --always-compact).

BUG=http://code.google.com/p/v8/issues/detail?id=524
BUG=http://crbug.com/29428
TEST=test/mjsunit/regress/regress-524.js

Review URL: http://codereview.chromium.org/504026

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@3481 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
sgjesse@chromium.org 2009-12-17 08:53:18 +00:00
parent 51aa605e62
commit 44b7c59eb5
12 changed files with 111 additions and 47 deletions

View File

@ -145,6 +145,14 @@ const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
const intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
// Desired alignment for maps.
#if V8_HOST_ARCH_64_BIT
const intptr_t kMapAlignmentBits = kObjectAlignmentBits;
#else
const intptr_t kMapAlignmentBits = kObjectAlignmentBits + 3;
#endif
const intptr_t kMapAlignment = (1 << kMapAlignmentBits);
const intptr_t kMapAlignmentMask = kMapAlignment - 1;
// Tag information for Failure.
const int kFailureTag = 3;
@ -174,6 +182,11 @@ const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdad);
#endif
// Number of bits to represent the page size for paged spaces. The value of 13
// gives 8K bytes per page.
const int kPageSizeBits = 13;
// Constants relevant to double precision floating point numbers.
// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
@ -450,6 +463,10 @@ enum StateTag {
#define POINTER_SIZE_ALIGN(value) \
(((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
// MAP_SIZE_ALIGN returns the value aligned as a map pointer.
#define MAP_SIZE_ALIGN(value) \
(((value) + kMapAlignmentMask) & ~kMapAlignmentMask)
// The expression OFFSET_OF(type, field) computes the byte-offset
// of the specified field relative to the containing type. This
// corresponds to 'offsetof' (in stddef.h), except that it doesn't

View File

@ -129,6 +129,12 @@ Object* Heap::AllocateRawMap() {
#endif
Object* result = map_space_->AllocateRaw(Map::kSize);
if (result->IsFailure()) old_gen_exhausted_ = true;
#ifdef DEBUG
if (!result->IsFailure()) {
// Maps have their own alignment.
CHECK((OffsetFrom(result) & kMapAlignmentMask) == kHeapObjectTag);
}
#endif
return result;
}

View File

@ -1142,6 +1142,13 @@ Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
map->set_unused_property_fields(0);
map->set_bit_field(0);
map->set_bit_field2(0);
// If the map object is aligned fill the padding area with Smi 0 objects.
if (Map::kPadStart < Map::kSize) {
memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
0,
Map::kSize - Map::kPadStart);
}
return map;
}

View File

@ -891,7 +891,7 @@ class Heap : public AllStatic {
// The number of MapSpace pages is limited by the way we pack
// Map pointers during GC.
static const int kMaxMapSpaceSize =
(1 << MapWord::kMapPageIndexBits) * Page::kPageSize;
(1 << (MapWord::kMapPageIndexBits)) * Page::kPageSize;
#if defined(V8_TARGET_ARCH_X64)
static const int kMaxObjectSizeInNewSpace = 512*KB;

View File

@ -836,11 +836,8 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
// space are encoded in their map pointer word (along with an encoding of
// their map pointers).
//
// 31 21 20 10 9 0
// +-----------------+------------------+-----------------+
// |forwarding offset|page offset of map|page index of map|
// +-----------------+------------------+-----------------+
// 11 bits 11 bits 10 bits
// The excact encoding is described in the comments for class MapWord in
// objects.h.
//
// An address range [start, end) can have both live and non-live objects.
// Maximal non-live regions are marked so they can be skipped on subsequent

View File

@ -952,14 +952,14 @@ MapWord MapWord::EncodeAddress(Address map_address, int offset) {
// exceed the object area size of a page.
ASSERT(0 <= offset && offset < Page::kObjectAreaSize);
int compact_offset = offset >> kObjectAlignmentBits;
uintptr_t compact_offset = offset >> kObjectAlignmentBits;
ASSERT(compact_offset < (1 << kForwardingOffsetBits));
Page* map_page = Page::FromAddress(map_address);
ASSERT_MAP_PAGE_INDEX(map_page->mc_page_index);
int map_page_offset =
map_page->Offset(map_address) >> kObjectAlignmentBits;
uintptr_t map_page_offset =
map_page->Offset(map_address) >> kMapAlignmentBits;
uintptr_t encoding =
(compact_offset << kForwardingOffsetShift) |
@ -975,8 +975,8 @@ Address MapWord::DecodeMapAddress(MapSpace* map_space) {
ASSERT_MAP_PAGE_INDEX(map_page_index);
int map_page_offset = static_cast<int>(
((value_ & kMapPageOffsetMask) >> kMapPageOffsetShift)
<< kObjectAlignmentBits);
((value_ & kMapPageOffsetMask) >> kMapPageOffsetShift) <<
kMapAlignmentBits);
return (map_space->PageAddress(map_page_index) + map_page_offset);
}

View File

@ -892,15 +892,25 @@ class MapWord BASE_EMBEDDED {
static const int kOverflowBit = 1; // overflow bit
static const int kOverflowMask = (1 << kOverflowBit); // overflow mask
// Forwarding pointers and map pointer encoding
// 31 21 20 10 9 0
// Forwarding pointers and map pointer encoding. On 32 bit all the bits are
// used.
// +-----------------+------------------+-----------------+
// |forwarding offset|page offset of map|page index of map|
// +-----------------+------------------+-----------------+
// 11 bits 11 bits 10 bits
static const int kMapPageIndexBits = 10;
static const int kMapPageOffsetBits = 11;
static const int kForwardingOffsetBits = 11;
// ^ ^ ^
// | | |
// | | kMapPageIndexBits
// | kMapPageOffsetBits
// kForwardingOffsetBits
static const int kMapPageOffsetBits = kPageSizeBits - kMapAlignmentBits;
static const int kForwardingOffsetBits = kPageSizeBits - kObjectAlignmentBits;
#ifdef V8_HOST_ARCH_64_BIT
static const int kMapPageIndexBits = 16;
#else
// Use all the 32-bits to encode on a 32-bit platform.
static const int kMapPageIndexBits =
32 - (kMapPageOffsetBits + kForwardingOffsetBits);
#endif
static const int kMapPageIndexShift = 0;
static const int kMapPageOffsetShift =
@ -908,16 +918,12 @@ class MapWord BASE_EMBEDDED {
static const int kForwardingOffsetShift =
kMapPageOffsetShift + kMapPageOffsetBits;
// 0x000003FF
static const uint32_t kMapPageIndexMask =
// Bit masks covering the different parts the encoding.
static const uintptr_t kMapPageIndexMask =
(1 << kMapPageOffsetShift) - 1;
// 0x001FFC00
static const uint32_t kMapPageOffsetMask =
static const uintptr_t kMapPageOffsetMask =
((1 << kForwardingOffsetShift) - 1) & ~kMapPageIndexMask;
// 0xFFE00000
static const uint32_t kForwardingOffsetMask =
static const uintptr_t kForwardingOffsetMask =
~(kMapPageIndexMask | kMapPageOffsetMask);
private:
@ -2841,7 +2847,6 @@ class Map: public HeapObject {
// [stub cache]: contains stubs compiled for this map.
DECL_ACCESSORS(code_cache, FixedArray)
// Returns a copy of the map.
Object* CopyDropDescriptors();
// Returns a copy of the map, with all transitions dropped from the
@ -2909,7 +2914,8 @@ class Map: public HeapObject {
static const int kInstanceDescriptorsOffset =
kConstructorOffset + kPointerSize;
static const int kCodeCacheOffset = kInstanceDescriptorsOffset + kPointerSize;
static const int kSize = kCodeCacheOffset + kPointerSize;
static const int kPadStart = kCodeCacheOffset + kPointerSize;
static const int kSize = MAP_SIZE_ALIGN(kPadStart);
// Byte offsets within kInstanceSizesOffset.
static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;

View File

@ -632,7 +632,7 @@ HeapObject* Deserializer::GetAddressFromStart(int space) {
return HeapObject::FromAddress(pages_[space][0] + offset);
}
ASSERT(SpaceIsPaged(space));
int page_of_pointee = offset >> Page::kPageSizeBits;
int page_of_pointee = offset >> kPageSizeBits;
Address object_address = pages_[space][page_of_pointee] +
(offset & Page::kPageAlignmentMask);
return HeapObject::FromAddress(object_address);
@ -972,8 +972,8 @@ void Serializer::SerializeObject(
int offset = CurrentAllocationAddress(space) - address;
bool from_start = true;
if (SpaceIsPaged(space)) {
if ((CurrentAllocationAddress(space) >> Page::kPageSizeBits) ==
(address >> Page::kPageSizeBits)) {
if ((CurrentAllocationAddress(space) >> kPageSizeBits) ==
(address >> kPageSizeBits)) {
from_start = false;
address = offset;
}

View File

@ -398,7 +398,7 @@ static int PagesInChunk(Address start, size_t size) {
// start+size. Page::kPageSize is a power of two so we can divide by
// shifting.
return static_cast<int>((RoundDown(start + size, Page::kPageSize)
- RoundUp(start, Page::kPageSize)) >> Page::kPageSizeBits);
- RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
}
@ -412,7 +412,7 @@ Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,
if (size_ + static_cast<int>(chunk_size) > capacity_) {
// Request as many pages as we can.
chunk_size = capacity_ - size_;
requested_pages = static_cast<int>(chunk_size >> Page::kPageSizeBits);
requested_pages = static_cast<int>(chunk_size >> kPageSizeBits);
if (requested_pages <= 0) return Page::FromAddress(NULL);
}

View File

@ -65,20 +65,23 @@ namespace internal {
// Some assertion macros used in the debugging mode.
#define ASSERT_PAGE_ALIGNED(address) \
#define ASSERT_PAGE_ALIGNED(address) \
ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
#define ASSERT_OBJECT_ALIGNED(address) \
#define ASSERT_OBJECT_ALIGNED(address) \
ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
#define ASSERT_OBJECT_SIZE(size) \
#define ASSERT_MAP_ALIGNED(address) \
ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
#define ASSERT_OBJECT_SIZE(size) \
ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
#define ASSERT_PAGE_OFFSET(offset) \
ASSERT((Page::kObjectStartOffset <= offset) \
#define ASSERT_PAGE_OFFSET(offset) \
ASSERT((Page::kObjectStartOffset <= offset) \
&& (offset <= Page::kPageSize))
#define ASSERT_MAP_PAGE_INDEX(index) \
#define ASSERT_MAP_PAGE_INDEX(index) \
ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
@ -106,11 +109,8 @@ class AllocationInfo;
// For this reason we add an offset to get room for the Page data at the start.
//
// The mark-compact collector transforms a map pointer into a page index and a
// page offset. The map space can have up to 1024 pages, and 8M bytes (1024 *
// 8K) in total. Because a map pointer is aligned to the pointer size (4
// bytes), 11 bits are enough to encode the page offset. 21 bits (10 for the
// page index + 11 for the offset in the page) are required to encode a map
// pointer.
// page offset. The excact encoding is described in the comments for
// class MapWord in objects.h.
//
// The only way to get a page pointer is by calling factory methods:
// Page* p = Page::FromAddress(addr); or
@ -212,9 +212,6 @@ class Page {
static void set_rset_state(RSetState state) { rset_state_ = state; }
#endif
// 8K bytes per page.
static const int kPageSizeBits = 13;
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
@ -514,7 +511,7 @@ class MemoryAllocator : public AllStatic {
#endif
// Due to encoding limitation, we can only have 8K chunks.
static const int kMaxNofChunks = 1 << Page::kPageSizeBits;
static const int kMaxNofChunks = 1 << kPageSizeBits;
// If a chunk has at least 16 pages, the maximum heap size is about
// 8K * 8K * 16 = 1G bytes.
#ifdef V8_TARGET_ARCH_X64

View File

@ -45,6 +45,8 @@ array-constructor: PASS || TIMEOUT
# Very slow on ARM, contains no architecture dependent code.
unicode-case-overoptimization: PASS, TIMEOUT if ($arch == arm)
# Skip long running test in debug.
regress/regress-524: PASS, SKIP if $mode == debug
[ $arch == arm ]

View File

@ -0,0 +1,32 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Test allocation of a large number of maps.
var i = 500000
var a = new Array(i)
for (var j = 0; j < i; j++) { var o = {}; o.x = 42; delete o.x; a[j] = o; }