Cardmarking writebarrier.

- New сardmarking write barrier handles large objects and normal objects in a similar fashion (no more additional space for pointer tracking is required, no conditional branches in WB code).
- Changes to enable oldspaces iteration without maps decoding:
-- layout change for FixedArrays: length is stored as a smis (initial patch by
Kevin Millikin)
-- layout change for SharedFunctionInfo: integer fields are stored as smi on
arm, ia32 and rearranged on x64.
-- layout change for String: meaning of LSB bit is fliped (1 now means hash not
computed); on x64 padding is added.
-- layout of maps is _not_ changed. Map space is currently iterated in a special
way.

Review URL: http://codereview.chromium.org/2144006

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@4715 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
vegorov@chromium.org 2010-05-25 13:15:16 +00:00
parent 897f7dedc1
commit 675e711f1c
38 changed files with 1583 additions and 1615 deletions

View File

@ -138,7 +138,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// Clear the heap tag on the elements array.
__ and_(scratch1, scratch1, Operand(~kHeapObjectTagMask));
// Initialize the FixedArray and fill it with holes. FixedArray length is not
// Initialize the FixedArray and fill it with holes. FixedArray length is
// stored as a smi.
// result: JSObject
// scratch1: elements array (untagged)
@ -146,7 +146,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
__ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
__ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
__ mov(scratch3, Operand(initial_capacity));
__ mov(scratch3, Operand(Smi::FromInt(initial_capacity)));
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
__ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
@ -243,23 +243,23 @@ static void AllocateJSArray(MacroAssembler* masm,
__ and_(elements_array_storage,
elements_array_storage,
Operand(~kHeapObjectTagMask));
// Initialize the fixed array and fill it with holes. FixedArray length is not
// Initialize the fixed array and fill it with holes. FixedArray length is
// stored as a smi.
// result: JSObject
// elements_array_storage: elements array (untagged)
// array_size: size of array (smi)
ASSERT(kSmiTag == 0);
__ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
__ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
// Convert array_size from smi to value.
__ mov(array_size,
Operand(array_size, ASR, kSmiTagSize));
ASSERT(kSmiTag == 0);
__ tst(array_size, array_size);
// Length of the FixedArray is the number of pre-allocated elements if
// the actual JSArray has length 0 and the size of the JSArray for non-empty
// JSArrays. The length of a FixedArray is not stored as a smi.
__ mov(array_size, Operand(JSArray::kPreallocatedArrayElements), LeaveCC, eq);
// JSArrays. The length of a FixedArray is stored as a smi.
__ mov(array_size,
Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)),
LeaveCC,
eq);
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
__ str(array_size,
MemOperand(elements_array_storage, kPointerSize, PostIndex));
@ -267,10 +267,11 @@ static void AllocateJSArray(MacroAssembler* masm,
// Calculate elements array and elements array end.
// result: JSObject
// elements_array_storage: elements array element storage
// array_size: size of elements array
// array_size: smi-tagged size of elements array
ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
__ add(elements_array_end,
elements_array_storage,
Operand(array_size, LSL, kPointerSizeLog2));
Operand(array_size, LSL, kPointerSizeLog2 - kSmiTagSize));
// Fill the allocated FixedArray with the hole value if requested.
// result: JSObject
@ -543,7 +544,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Load the initial map and verify that it is in fact a map.
// r1: constructor function
// r7: undefined
// r7: undefined value
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
__ tst(r2, Operand(kSmiTagMask));
__ b(eq, &rt_call);
@ -555,14 +556,14 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// instance type would be JS_FUNCTION_TYPE.
// r1: constructor function
// r2: initial map
// r7: undefined
// r7: undefined value
__ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
__ b(eq, &rt_call);
// Now allocate the JSObject on the heap.
// r1: constructor function
// r2: initial map
// r7: undefined
// r7: undefined value
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
__ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
@ -572,7 +573,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r2: initial map
// r3: object size
// r4: JSObject (not tagged)
// r7: undefined
// r7: undefined value
__ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
__ mov(r5, r4);
ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
@ -588,7 +589,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: object size (in words)
// r4: JSObject (not tagged)
// r5: First in-object property of JSObject (not tagged)
// r7: undefined
// r7: undefined value
__ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
{ Label loop, entry;
@ -611,7 +612,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r1: constructor function
// r4: JSObject
// r5: start of next object (not tagged)
// r7: undefined
// r7: undefined value
__ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
// The field instance sizes contains both pre-allocated property fields and
// in-object properties.
@ -633,7 +634,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: number of elements in properties array
// r4: JSObject
// r5: start of next object
// r7: undefined
// r7: undefined value
__ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
__ AllocateInNewSpace(
r0,
@ -648,13 +649,14 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: number of elements in properties array
// r4: JSObject
// r5: FixedArray (not tagged)
// r7: undefined
// r7: undefined value
__ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
__ mov(r2, r5);
ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
__ str(r6, MemOperand(r2, kPointerSize, PostIndex));
ASSERT_EQ(1 * kPointerSize, Array::kLengthOffset);
__ str(r3, MemOperand(r2, kPointerSize, PostIndex));
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
__ mov(r0, Operand(r3, LSL, kSmiTagSize));
__ str(r0, MemOperand(r2, kPointerSize, PostIndex));
// Initialize the fields to undefined.
// r1: constructor function
@ -1047,6 +1049,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(r2, Operand(r2, ASR, kSmiTagSize));
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
__ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
__ cmp(r2, r0); // Check formal and actual parameter counts.

View File

@ -2276,7 +2276,6 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
frame_->EmitPush(r0); // map
frame_->EmitPush(r2); // enum cache bridge cache
__ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset));
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
frame_->EmitPush(r0);
__ mov(r0, Operand(Smi::FromInt(0)));
frame_->EmitPush(r0);
@ -2289,7 +2288,6 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// Push the length of the array and the initial index onto the stack.
__ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset));
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
frame_->EmitPush(r0);
__ mov(r0, Operand(Smi::FromInt(0))); // init index
frame_->EmitPush(r0);
@ -4490,7 +4488,8 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
__ mov(r2, Operand(Factory::fixed_array_map()));
__ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
// Set FixedArray length.
__ str(r5, FieldMemOperand(r3, FixedArray::kLengthOffset));
__ mov(r6, Operand(r5, LSL, kSmiTagSize));
__ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
// Fill contents of fixed-array with the-hole.
__ mov(r2, Operand(Factory::the_hole_value()));
__ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@ -5697,7 +5696,7 @@ void CodeGenerator::EmitKeyedLoad() {
// Check that key is within bounds. Use unsigned comparison to handle
// negative keys.
__ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
__ cmp(scratch2, Operand(key, ASR, kSmiTagSize));
__ cmp(scratch2, key);
deferred->Branch(ls); // Unsigned less equal.
// Load and check that the result is not the hole (key is a smi).
@ -5998,8 +5997,8 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Setup the object header.
__ LoadRoot(r2, Heap::kContextMapRootIndex);
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
__ mov(r2, Operand(length));
__ str(r2, FieldMemOperand(r0, Array::kLengthOffset));
__ mov(r2, Operand(Smi::FromInt(length)));
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
// Setup the fixed slots.
__ mov(r1, Operand(Smi::FromInt(0)));
@ -6630,8 +6629,8 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
// Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry.
__ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
// Divide length by two (length is not a smi).
__ mov(mask, Operand(mask, ASR, 1));
// Divide length by two (length is a smi).
__ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
__ sub(mask, mask, Operand(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
@ -8522,9 +8521,8 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ cmp(r1, Operand(0));
__ b(eq, &done);
// Get the parameters pointer from the stack and untag the length.
// Get the parameters pointer from the stack.
__ ldr(r2, MemOperand(sp, 1 * kPointerSize));
__ mov(r1, Operand(r1, LSR, kSmiTagSize));
// Setup the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
@ -8533,6 +8531,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
__ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
__ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
__ mov(r1, Operand(r1, LSR, kSmiTagSize)); // Untag the length for the loop.
// Copy the fixed array slots.
Label loop;
@ -8683,7 +8682,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldr(r0,
FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
__ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
__ cmp(r2, r0);
__ cmp(r2, Operand(r0, ASR, kSmiTagSize));
__ b(gt, &runtime);
// subject: Subject string

View File

@ -163,11 +163,11 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
//
// key - holds the smi key on entry and is unchanged if a branch is
// performed to the miss label.
// Holds the result on exit if the load succeeded.
//
// Scratch registers:
//
// t0 - holds the untagged key on entry and holds the hash once computed.
// Holds the result on exit if the load succeeded.
//
// t1 - used to hold the capacity mask of the dictionary
//
@ -235,7 +235,7 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
// Get the value at the masked, scaled index and return.
const int kValueOffset =
NumberDictionary::kElementsStartOffset + kPointerSize;
__ ldr(t0, FieldMemOperand(t2, kValueOffset));
__ ldr(key, FieldMemOperand(t2, kValueOffset));
}
@ -743,9 +743,6 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check that the key is a smi.
__ BranchOnNotSmi(key, &slow);
// Untag key into r2..
__ mov(r2, Operand(key, ASR, kSmiTagSize));
// Get the elements array of the object.
__ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
@ -754,12 +751,14 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ cmp(r3, ip);
__ b(ne, &check_pixel_array);
// Check that the key (index) is within bounds.
__ ldr(r3, FieldMemOperand(r4, Array::kLengthOffset));
__ cmp(r2, r3);
__ ldr(r3, FieldMemOperand(r4, FixedArray::kLengthOffset));
__ cmp(key, Operand(r3));
__ b(hs, &slow);
// Fast case: Do the load.
__ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2));
// The key is a smi.
ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
__ ldr(r2, MemOperand(r3, key, LSL, kPointerSizeLog2 - kSmiTagSize));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(r2, ip);
// In case the loaded value is the_hole we have to consult GetProperty
@ -770,7 +769,6 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check whether the elements is a pixel array.
// r0: key
// r2: untagged index
// r3: elements map
// r4: elements
__ bind(&check_pixel_array);
@ -778,6 +776,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ cmp(r3, ip);
__ b(ne, &check_number_dictionary);
__ ldr(ip, FieldMemOperand(r4, PixelArray::kLengthOffset));
__ mov(r2, Operand(key, ASR, kSmiTagSize));
__ cmp(r2, ip);
__ b(hs, &slow);
__ ldr(ip, FieldMemOperand(r4, PixelArray::kExternalPointerOffset));
@ -788,14 +787,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_number_dictionary);
// Check whether the elements is a number dictionary.
// r0: key
// r2: untagged index
// r3: elements map
// r4: elements
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r3, ip);
__ b(ne, &slow);
__ mov(r2, Operand(r0, ASR, kSmiTagSize));
GenerateNumberDictionaryLoad(masm, &slow, r4, r0, r2, r3, r5);
__ mov(r0, r2);
__ Ret();
// Slow case, key and receiver still in r0 and r1.
@ -1283,11 +1281,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r4, ip);
__ b(ne, &check_pixel_array);
// Untag the key (for checking against untagged length in the fixed array).
__ mov(r4, Operand(key, ASR, kSmiTagSize));
// Compute address to store into and check array bounds.
// Check array bounds. Both the key and the length of FixedArray are smis.
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(r4, Operand(ip));
__ cmp(key, Operand(ip));
__ b(lo, &fast);
// Slow case, handle jump to runtime.
@ -1333,9 +1329,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Condition code from comparing key and array length is still available.
__ b(ne, &slow); // Only support writing to writing to array[array.length].
// Check for room in the elements backing store.
__ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag key.
// Both the key and the length of FixedArray are smis.
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(r4, Operand(ip));
__ cmp(key, Operand(ip));
__ b(hs, &slow);
// Calculate key + 1 as smi.
ASSERT_EQ(0, kSmiTag);

View File

@ -252,63 +252,21 @@ void MacroAssembler::RecordWriteHelper(Register object,
bind(&not_in_new_space);
}
// This is how much we shift the remembered set bit offset to get the
// offset of the word in the remembered set. We divide by kBitsPerInt (32,
// shift right 5) and then multiply by kIntSize (4, shift left 2).
const int kRSetWordShift = 3;
mov(ip, Operand(Page::kPageAlignmentMask)); // Load mask only once.
Label fast;
// Calculate region number.
add(offset, object, Operand(offset)); // Add offset into the object.
and_(offset, offset, Operand(ip)); // Offset into page of the object.
mov(offset, Operand(offset, LSR, Page::kRegionSizeLog2));
// Compute the bit offset in the remembered set.
// object: heap object pointer (with tag)
// offset: offset to store location from the object
mov(ip, Operand(Page::kPageAlignmentMask)); // load mask only once
and_(scratch, object, Operand(ip)); // offset into page of the object
add(offset, scratch, Operand(offset)); // add offset into the object
mov(offset, Operand(offset, LSR, kObjectAlignmentBits));
// Compute the page address from the heap object pointer.
// object: heap object pointer (with tag)
// offset: bit offset of store position in the remembered set
// Calculate page address.
bic(object, object, Operand(ip));
// If the bit offset lies beyond the normal remembered set range, it is in
// the extra remembered set area of a large object.
// object: page start
// offset: bit offset of store position in the remembered set
cmp(offset, Operand(Page::kPageSize / kPointerSize));
b(lt, &fast);
// Adjust the bit offset to be relative to the start of the extra
// remembered set and the start address to be the address of the extra
// remembered set.
sub(offset, offset, Operand(Page::kPageSize / kPointerSize));
// Load the array length into 'scratch' and multiply by four to get the
// size in bytes of the elements.
ldr(scratch, MemOperand(object, Page::kObjectStartOffset
+ FixedArray::kLengthOffset));
mov(scratch, Operand(scratch, LSL, kObjectAlignmentBits));
// Add the page header (including remembered set), array header, and array
// body size to the page address.
add(object, object, Operand(Page::kObjectStartOffset
+ FixedArray::kHeaderSize));
add(object, object, Operand(scratch));
bind(&fast);
// Get address of the rset word.
// object: start of the remembered set (page start for the fast case)
// offset: bit offset of store position in the remembered set
bic(scratch, offset, Operand(kBitsPerInt - 1)); // clear the bit offset
add(object, object, Operand(scratch, LSR, kRSetWordShift));
// Get bit offset in the rset word.
// object: address of remembered set word
// offset: bit offset of store position
and_(offset, offset, Operand(kBitsPerInt - 1));
ldr(scratch, MemOperand(object));
// Mark region dirty.
ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
mov(ip, Operand(1));
orr(scratch, scratch, Operand(ip, LSL, offset));
str(scratch, MemOperand(object));
str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
}
@ -336,7 +294,7 @@ void MacroAssembler::RecordWrite(Register object, Register offset,
Label done;
// First, test that the object is not in the new space. We cannot set
// remembered set bits in the new space.
// region marks for new space pages.
InNewSpace(object, scratch, eq, &done);
// Record the actual write.
@ -619,6 +577,7 @@ void MacroAssembler::InvokeFunction(Register fun,
ldr(expected_reg,
FieldMemOperand(code_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
ldr(code_reg,
MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
add(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));

View File

@ -114,16 +114,14 @@ class MacroAssembler: public Assembler {
Label* branch);
// Set the remebered set bit for an offset into an
// object. RecordWriteHelper only works if the object is not in new
// space.
// For the page containing |object| mark the region covering [object+offset]
// dirty. The object address must be in the first 8K of an allocated page.
void RecordWriteHelper(Register object, Register offset, Register scracth);
// Sets the remembered set bit for [address+offset], where address is the
// address of the heap object 'object'. The address must be in the first 8K
// of an allocated page. The 'scratch' register is used in the
// implementation and all 3 registers are clobbered by the operation, as
// well as the ip register.
// For the page containing |object| mark the region covering [object+offset]
// dirty. The object address must be in the first 8K of an allocated page.
// The 'scratch' register is used in the implementation and all 3 registers
// are clobbered by the operation, as well as the ip register.
void RecordWrite(Register object, Register offset, Register scratch);
// Push two registers. Pushes leftmost register first (to highest address).

View File

@ -305,7 +305,7 @@ static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
// In large object space the object's start must coincide with chunk
// and thus the trick is just not applicable.
// In old space we do not use this trick to avoid dealing with
// remembered sets.
// region dirty marks.
ASSERT(Heap::new_space()->Contains(elms));
STATIC_ASSERT(FixedArray::kMapOffset == 0);
@ -322,7 +322,7 @@ static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
Heap::CreateFillerObjectAt(elms->address(), to_trim * kPointerSize);
former_start[to_trim] = Heap::fixed_array_map();
former_start[to_trim + 1] = reinterpret_cast<Object*>(len - to_trim);
former_start[to_trim + 1] = Smi::FromInt(len - to_trim);
ASSERT_EQ(elms->address() + to_trim * kPointerSize,
(elms + to_trim * kPointerSize)->address());
@ -500,7 +500,7 @@ BUILTIN(ArrayShift) {
if (Heap::new_space()->Contains(elms)) {
// As elms still in the same space they used to be (new space),
// there is no need to update remembered set.
// there is no need to update region dirty mark.
array->set_elements(LeftTrimFixedArray(elms, 1), SKIP_WRITE_BARRIER);
} else {
// Shift the elements.

View File

@ -337,7 +337,6 @@ DEFINE_bool(code_stats, false, "report code statistics after GC")
DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
DEFINE_bool(print_handles, false, "report handles after GC")
DEFINE_bool(print_global_handles, false, "report global handles after GC")
DEFINE_bool(print_rset, false, "print remembered sets before GC")
// ic.cc
DEFINE_bool(trace_ic, false, "trace inline cache state transitions")

View File

@ -303,7 +303,6 @@ class HeapObject;
class IC;
class InterceptorInfo;
class IterationStatement;
class Array;
class JSArray;
class JSFunction;
class JSObject;
@ -544,16 +543,16 @@ enum StateTag {
#define HAS_FAILURE_TAG(value) \
((reinterpret_cast<intptr_t>(value) & kFailureTagMask) == kFailureTag)
// OBJECT_SIZE_ALIGN returns the value aligned HeapObject size
#define OBJECT_SIZE_ALIGN(value) \
// OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer
#define OBJECT_POINTER_ALIGN(value) \
(((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)
// POINTER_SIZE_ALIGN returns the value aligned as a pointer.
#define POINTER_SIZE_ALIGN(value) \
(((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
// MAP_SIZE_ALIGN returns the value aligned as a map pointer.
#define MAP_SIZE_ALIGN(value) \
// MAP_POINTER_ALIGN returns the value aligned as a map pointer.
#define MAP_POINTER_ALIGN(value) \
(((value) + kMapAlignmentMask) & ~kMapAlignmentMask)
// The expression OFFSET_OF(type, field) computes the byte-offset

View File

@ -184,7 +184,7 @@ void Heap::RecordWrite(Address address, int offset) {
if (new_space_.Contains(address)) return;
ASSERT(!new_space_.FromSpaceContains(address));
SLOW_ASSERT(Contains(address + offset));
Page::SetRSet(address, offset);
Page::FromAddress(address)->MarkRegionDirty(address + offset);
}
@ -195,7 +195,7 @@ void Heap::RecordWrites(Address address, int start, int len) {
offset < start + len * kPointerSize;
offset += kPointerSize) {
SLOW_ASSERT(Contains(address + offset));
Page::SetRSet(address, offset);
Page::FromAddress(address)->MarkRegionDirty(address + offset);
}
}
@ -234,13 +234,40 @@ AllocationSpace Heap::TargetSpaceId(InstanceType type) {
}
void Heap::CopyBlock(Object** dst, Object** src, int byte_size) {
void Heap::CopyBlock(Address dst, Address src, int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
CopyWords(dst, src, byte_size / kPointerSize);
CopyWords(reinterpret_cast<Object**>(dst),
reinterpret_cast<Object**>(src),
byte_size / kPointerSize);
}
void Heap::MoveBlock(Object** dst, Object** src, int byte_size) {
void Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
Address src,
int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
Page* page = Page::FromAddress(dst);
uint32_t marks = page->GetRegionMarks();
for (int remaining = byte_size / kPointerSize;
remaining > 0;
remaining--) {
Memory::Object_at(dst) = Memory::Object_at(src);
if (Heap::InNewSpace(Memory::Object_at(dst))) {
marks |= page->GetRegionMaskForAddress(dst);
}
dst += kPointerSize;
src += kPointerSize;
}
page->SetRegionMarks(marks);
}
void Heap::MoveBlock(Address dst, Address src, int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
int size_in_words = byte_size / kPointerSize;
@ -250,10 +277,12 @@ void Heap::MoveBlock(Object** dst, Object** src, int byte_size) {
((OffsetFrom(reinterpret_cast<Address>(src)) -
OffsetFrom(reinterpret_cast<Address>(dst))) >= kPointerSize));
Object** end = src + size_in_words;
Object** src_slot = reinterpret_cast<Object**>(src);
Object** dst_slot = reinterpret_cast<Object**>(dst);
Object** end_slot = src_slot + size_in_words;
while (src != end) {
*dst++ = *src++;
while (src_slot != end_slot) {
*dst_slot++ = *src_slot++;
}
} else {
memmove(dst, src, byte_size);
@ -261,6 +290,17 @@ void Heap::MoveBlock(Object** dst, Object** src, int byte_size) {
}
void Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
Address src,
int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
ASSERT((dst >= (src + byte_size)) ||
((OffsetFrom(src) - OffsetFrom(dst)) >= kPointerSize));
CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, byte_size);
}
void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
ASSERT(InFromSpace(object));

View File

@ -326,13 +326,6 @@ void Heap::GarbageCollectionPrologue() {
}
if (FLAG_gc_verbose) Print();
if (FLAG_print_rset) {
// Not all spaces have remembered set bits that we care about.
old_pointer_space_->PrintRSet();
map_space_->PrintRSet();
lo_space_->PrintRSet();
}
#endif
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
@ -519,9 +512,8 @@ void Heap::ReserveSpace(
Heap::CollectGarbage(cell_space_size, CELL_SPACE);
gc_performed = true;
}
// We add a slack-factor of 2 in order to have space for the remembered
// set and a series of large-object allocations that are only just larger
// than the page size.
// We add a slack-factor of 2 in order to have space for a series of
// large-object allocations that are only just larger than the page size.
large_object_size *= 2;
// The ReserveSpace method on the large object space checks how much
// we can expand the old generation. This includes expansion caused by
@ -572,6 +564,25 @@ void Heap::ClearJSFunctionResultCaches() {
}
#ifdef DEBUG
enum PageWatermarkValidity {
ALL_VALID,
ALL_INVALID
};
static void VerifyPageWatermarkValidity(PagedSpace* space,
PageWatermarkValidity validity) {
PageIterator it(space, PageIterator::PAGES_IN_USE);
bool expected_value = (validity == ALL_VALID);
while (it.has_next()) {
Page* page = it.next();
ASSERT(page->IsWatermarkValid() == expected_value);
}
}
#endif
void Heap::PerformGarbageCollection(AllocationSpace space,
GarbageCollector collector,
GCTracer* tracer) {
@ -816,6 +827,20 @@ void Heap::Scavenge() {
gc_state_ = SCAVENGE;
Page::FlipMeaningOfInvalidatedWatermarkFlag();
#ifdef DEBUG
VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
VerifyPageWatermarkValidity(map_space_, ALL_VALID);
#endif
// We do not update an allocation watermark of the top page during linear
// allocation to avoid overhead. So to maintain the watermark invariant
// we have to manually cache the watermark and mark the top page as having an
// invalid watermark. This guarantees that dirty regions iteration will use a
// correct watermark even if a linear allocation happens.
old_pointer_space_->FlushTopPageWatermark();
map_space_->FlushTopPageWatermark();
// Implements Cheney's copying algorithm
LOG(ResourceEvent("scavenge", "begin"));
@ -858,9 +883,17 @@ void Heap::Scavenge() {
// Copy objects reachable from the old generation. By definition,
// there are no intergenerational pointers in code or data spaces.
IterateRSet(old_pointer_space_, &ScavengePointer);
IterateRSet(map_space_, &ScavengePointer);
lo_space_->IterateRSet(&ScavengePointer);
IterateDirtyRegions(old_pointer_space_,
&IteratePointersInDirtyRegion,
&ScavengePointer,
WATERMARK_CAN_BE_INVALID);
IterateDirtyRegions(map_space_,
&IteratePointersInDirtyMapsRegion,
&ScavengePointer,
WATERMARK_CAN_BE_INVALID);
lo_space_->IterateDirtyRegions(&ScavengePointer);
// Copy objects reachable from cells by scavenging cell values directly.
HeapObjectIterator cell_iterator(cell_space_);
@ -963,9 +996,8 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
// Copy the from-space object to its new location (given by the
// forwarding address) and fix its map.
HeapObject* target = source->map_word().ToForwardingAddress();
CopyBlock(reinterpret_cast<Object**>(target->address()),
reinterpret_cast<Object**>(source->address()),
source->SizeFromMap(map));
int size = source->SizeFromMap(map);
CopyBlock(target->address(), source->address(), size);
target->set_map(map);
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
@ -973,8 +1005,10 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
RecordCopiedObject(target);
#endif
// Visit the newly copied object for pointers to new space.
target->Iterate(scavenge_visitor);
UpdateRSet(target);
ASSERT(!target->IsMap());
IterateAndMarkPointersToNewSpace(target->address(),
target->address() + size,
&ScavengePointer);
}
// Take another spin if there are now unswept objects in new space
@ -985,117 +1019,6 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
}
void Heap::ClearRSetRange(Address start, int size_in_bytes) {
uint32_t start_bit;
Address start_word_address =
Page::ComputeRSetBitPosition(start, 0, &start_bit);
uint32_t end_bit;
Address end_word_address =
Page::ComputeRSetBitPosition(start + size_in_bytes - kIntSize,
0,
&end_bit);
// We want to clear the bits in the starting word starting with the
// first bit, and in the ending word up to and including the last
// bit. Build a pair of bitmasks to do that.
uint32_t start_bitmask = start_bit - 1;
uint32_t end_bitmask = ~((end_bit << 1) - 1);
// If the start address and end address are the same, we mask that
// word once, otherwise mask the starting and ending word
// separately and all the ones in between.
if (start_word_address == end_word_address) {
Memory::uint32_at(start_word_address) &= (start_bitmask | end_bitmask);
} else {
Memory::uint32_at(start_word_address) &= start_bitmask;
Memory::uint32_at(end_word_address) &= end_bitmask;
start_word_address += kIntSize;
memset(start_word_address, 0, end_word_address - start_word_address);
}
}
class UpdateRSetVisitor: public ObjectVisitor {
public:
void VisitPointer(Object** p) {
UpdateRSet(p);
}
void VisitPointers(Object** start, Object** end) {
// Update a store into slots [start, end), used (a) to update remembered
// set when promoting a young object to old space or (b) to rebuild
// remembered sets after a mark-compact collection.
for (Object** p = start; p < end; p++) UpdateRSet(p);
}
private:
void UpdateRSet(Object** p) {
// The remembered set should not be set. It should be clear for objects
// newly copied to old space, and it is cleared before rebuilding in the
// mark-compact collector.
ASSERT(!Page::IsRSetSet(reinterpret_cast<Address>(p), 0));
if (Heap::InNewSpace(*p)) {
Page::SetRSet(reinterpret_cast<Address>(p), 0);
}
}
};
int Heap::UpdateRSet(HeapObject* obj) {
ASSERT(!InNewSpace(obj));
// Special handling of fixed arrays to iterate the body based on the start
// address and offset. Just iterating the pointers as in UpdateRSetVisitor
// will not work because Page::SetRSet needs to have the start of the
// object for large object pages.
if (obj->IsFixedArray()) {
FixedArray* array = FixedArray::cast(obj);
int length = array->length();
for (int i = 0; i < length; i++) {
int offset = FixedArray::kHeaderSize + i * kPointerSize;
ASSERT(!Page::IsRSetSet(obj->address(), offset));
if (Heap::InNewSpace(array->get(i))) {
Page::SetRSet(obj->address(), offset);
}
}
} else if (!obj->IsCode()) {
// Skip code object, we know it does not contain inter-generational
// pointers.
UpdateRSetVisitor v;
obj->Iterate(&v);
}
return obj->Size();
}
void Heap::RebuildRSets() {
// By definition, we do not care about remembered set bits in code,
// data, or cell spaces.
map_space_->ClearRSet();
RebuildRSets(map_space_);
old_pointer_space_->ClearRSet();
RebuildRSets(old_pointer_space_);
Heap::lo_space_->ClearRSet();
RebuildRSets(lo_space_);
}
void Heap::RebuildRSets(PagedSpace* space) {
HeapObjectIterator it(space);
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
Heap::UpdateRSet(obj);
}
void Heap::RebuildRSets(LargeObjectSpace* space) {
LargeObjectIterator it(space);
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
Heap::UpdateRSet(obj);
}
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
void Heap::RecordCopiedObject(HeapObject* obj) {
bool should_record = false;
@ -1121,9 +1044,7 @@ HeapObject* Heap::MigrateObject(HeapObject* source,
HeapObject* target,
int size) {
// Copy the content of source to target.
CopyBlock(reinterpret_cast<Object**>(target->address()),
reinterpret_cast<Object**>(source->address()),
size);
CopyBlock(target->address(), source->address(), size);
// Set the forwarding address.
source->set_map_word(MapWord::FromForwardingAddress(target));
@ -1178,21 +1099,30 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
if (object_size > MaxObjectSizeInPagedSpace()) {
result = lo_space_->AllocateRawFixedArray(object_size);
if (!result->IsFailure()) {
// Save the from-space object pointer and its map pointer at the
// top of the to space to be swept and copied later. Write the
// forwarding address over the map word of the from-space
// object.
HeapObject* target = HeapObject::cast(result);
promotion_queue.insert(object, first_word.ToMap());
object->set_map_word(MapWord::FromForwardingAddress(target));
// Give the space allocated for the result a proper map by
// treating it as a free list node (not linked into the free
// list).
FreeListNode* node = FreeListNode::FromAddress(target->address());
node->set_size(object_size);
if (object->IsFixedArray()) {
// Save the from-space object pointer and its map pointer at the
// top of the to space to be swept and copied later. Write the
// forwarding address over the map word of the from-space
// object.
promotion_queue.insert(object, first_word.ToMap());
object->set_map_word(MapWord::FromForwardingAddress(target));
// Give the space allocated for the result a proper map by
// treating it as a free list node (not linked into the free
// list).
FreeListNode* node = FreeListNode::FromAddress(target->address());
node->set_size(object_size);
*p = target;
} else {
// In large object space only fixed arrays might possibly contain
// intergenerational references.
// All other objects can be copied immediately and not revisited.
*p = MigrateObject(object, target, object_size);
}
*p = target;
tracer()->increment_promoted_objects_size(object_size);
return;
}
@ -1682,7 +1612,7 @@ bool Heap::CreateInitialObjects() {
// loop above because it needs to be allocated manually with the special
// hash code in place. The hash code for the hidden_symbol is zero to ensure
// that it will always be at the first entry in property descriptors.
obj = AllocateSymbol(CStrVector(""), 0, String::kHashComputedMask);
obj = AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
if (obj->IsFailure()) return false;
hidden_symbol_ = String::cast(obj);
@ -1918,6 +1848,9 @@ Object* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_compiler_hints(0);
share->set_this_property_assignments_count(0);
share->set_this_property_assignments(undefined_value());
share->set_num_literals(0);
share->set_end_position(0);
share->set_function_token_position(0);
return result;
}
@ -2179,8 +2112,8 @@ Object* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
: lo_space_->AllocateRaw(size);
if (result->IsFailure()) return result;
reinterpret_cast<Array*>(result)->set_map(byte_array_map());
reinterpret_cast<Array*>(result)->set_length(length);
reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
reinterpret_cast<ByteArray*>(result)->set_length(length);
return result;
}
@ -2195,8 +2128,8 @@ Object* Heap::AllocateByteArray(int length) {
Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
reinterpret_cast<Array*>(result)->set_map(byte_array_map());
reinterpret_cast<Array*>(result)->set_length(length);
reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
reinterpret_cast<ByteArray*>(result)->set_length(length);
return result;
}
@ -2312,9 +2245,7 @@ Object* Heap::CopyCode(Code* code) {
// Copy code object.
Address old_addr = code->address();
Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
CopyBlock(reinterpret_cast<Object**>(new_addr),
reinterpret_cast<Object**>(old_addr),
obj_size);
CopyBlock(new_addr, old_addr, obj_size);
// Relocate the copy.
Code* new_code = Code::cast(result);
ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
@ -2460,8 +2391,8 @@ Object* Heap::AllocateArgumentsObject(Object* callee, int length) {
// Copy the content. The arguments boilerplate doesn't have any
// fields that point to new space so it's safe to skip the write
// barrier here.
CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(result)->address()),
reinterpret_cast<Object**>(boilerplate->address()),
CopyBlock(HeapObject::cast(result)->address(),
boilerplate->address(),
kArgumentsObjectSize);
// Set the two properties.
@ -2683,8 +2614,8 @@ Object* Heap::CopyJSObject(JSObject* source) {
clone = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
if (clone->IsFailure()) return clone;
Address clone_address = HeapObject::cast(clone)->address();
CopyBlock(reinterpret_cast<Object**>(clone_address),
reinterpret_cast<Object**>(source->address()),
CopyBlock(clone_address,
source->address(),
object_size);
// Update write barrier for all fields that lie beyond the header.
RecordWrites(clone_address,
@ -2696,8 +2627,8 @@ Object* Heap::CopyJSObject(JSObject* source) {
ASSERT(Heap::InNewSpace(clone));
// Since we know the clone is allocated in new space, we can copy
// the contents without worrying about updating the write barrier.
CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(clone)->address()),
reinterpret_cast<Object**>(source->address()),
CopyBlock(HeapObject::cast(clone)->address(),
source->address(),
object_size);
}
@ -2968,8 +2899,8 @@ Object* Heap::AllocateEmptyFixedArray() {
Object* result = AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
// Initialize the object.
reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
reinterpret_cast<Array*>(result)->set_length(0);
reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
reinterpret_cast<FixedArray*>(result)->set_length(0);
return result;
}
@ -2994,9 +2925,7 @@ Object* Heap::CopyFixedArray(FixedArray* src) {
if (obj->IsFailure()) return obj;
if (Heap::InNewSpace(obj)) {
HeapObject* dst = HeapObject::cast(obj);
CopyBlock(reinterpret_cast<Object**>(dst->address()),
reinterpret_cast<Object**>(src->address()),
FixedArray::SizeFor(len));
CopyBlock(dst->address(), src->address(), FixedArray::SizeFor(len));
return obj;
}
HeapObject::cast(obj)->set_map(src->map());
@ -3017,8 +2946,8 @@ Object* Heap::AllocateFixedArray(int length) {
Object* result = AllocateRawFixedArray(length);
if (!result->IsFailure()) {
// Initialize header.
reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
FixedArray* array = FixedArray::cast(result);
FixedArray* array = reinterpret_cast<FixedArray*>(result);
array->set_map(fixed_array_map());
array->set_length(length);
// Initialize body.
ASSERT(!Heap::InNewSpace(undefined_value()));
@ -3045,27 +2974,10 @@ Object* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
space = LO_SPACE;
}
// Specialize allocation for the space.
Object* result = Failure::OutOfMemoryException();
if (space == NEW_SPACE) {
// We cannot use Heap::AllocateRaw() because it will not properly
// allocate extra remembered set bits if always_allocate() is true and
// new space allocation fails.
result = new_space_.AllocateRaw(size);
if (result->IsFailure() && always_allocate()) {
if (size <= MaxObjectSizeInPagedSpace()) {
result = old_pointer_space_->AllocateRaw(size);
} else {
result = lo_space_->AllocateRawFixedArray(size);
}
}
} else if (space == OLD_POINTER_SPACE) {
result = old_pointer_space_->AllocateRaw(size);
} else {
ASSERT(space == LO_SPACE);
result = lo_space_->AllocateRawFixedArray(size);
}
return result;
AllocationSpace retry_space =
(size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
return AllocateRaw(size, space, retry_space);
}
@ -3113,7 +3025,7 @@ Object* Heap::AllocateUninitializedFixedArray(int length) {
Object* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
Object* result = Heap::AllocateFixedArray(length, pretenure);
if (result->IsFailure()) return result;
reinterpret_cast<Array*>(result)->set_map(hash_table_map());
reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
ASSERT(result->IsHashTable());
return result;
}
@ -3365,6 +3277,49 @@ bool Heap::InSpace(Address addr, AllocationSpace space) {
#ifdef DEBUG
static void DummyScavengePointer(HeapObject** p) {
}
static void VerifyPointersUnderWatermark(
PagedSpace* space,
DirtyRegionCallback visit_dirty_region) {
PageIterator it(space, PageIterator::PAGES_IN_USE);
while (it.has_next()) {
Page* page = it.next();
Address start = page->ObjectAreaStart();
Address end = page->AllocationWatermark();
Heap::IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
start,
end,
visit_dirty_region,
&DummyScavengePointer);
}
}
static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
LargeObjectIterator it(space);
for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
if (object->IsFixedArray()) {
Address slot_address = object->address();
Address end = object->address() + object->Size();
while (slot_address < end) {
HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
// When we are not in GC the Heap::InNewSpace() predicate
// checks that pointers which satisfy predicate point into
// the active semispace.
Heap::InNewSpace(*slot);
slot_address += kPointerSize;
}
}
}
}
void Heap::Verify() {
ASSERT(HasBeenSetup());
@ -3373,14 +3328,23 @@ void Heap::Verify() {
new_space_.Verify();
VerifyPointersAndRSetVisitor rset_visitor;
old_pointer_space_->Verify(&rset_visitor);
map_space_->Verify(&rset_visitor);
VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
old_pointer_space_->Verify(&dirty_regions_visitor);
map_space_->Verify(&dirty_regions_visitor);
VerifyPointersVisitor no_rset_visitor;
old_data_space_->Verify(&no_rset_visitor);
code_space_->Verify(&no_rset_visitor);
cell_space_->Verify(&no_rset_visitor);
VerifyPointersUnderWatermark(old_pointer_space_,
&IteratePointersInDirtyRegion);
VerifyPointersUnderWatermark(map_space_,
&IteratePointersInDirtyMapsRegion);
VerifyPointersUnderWatermark(lo_space_);
VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
VerifyPointersVisitor no_dirty_regions_visitor;
old_data_space_->Verify(&no_dirty_regions_visitor);
code_space_->Verify(&no_dirty_regions_visitor);
cell_space_->Verify(&no_dirty_regions_visitor);
lo_space_->Verify();
}
@ -3433,65 +3397,253 @@ void Heap::ZapFromSpace() {
#endif // DEBUG
int Heap::IterateRSetRange(Address object_start,
Address object_end,
Address rset_start,
ObjectSlotCallback copy_object_func) {
Address object_address = object_start;
Address rset_address = rset_start;
int set_bits_count = 0;
bool Heap::IteratePointersInDirtyRegion(Address start,
Address end,
ObjectSlotCallback copy_object_func) {
Address slot_address = start;
bool pointers_to_new_space_found = false;
// Loop over all the pointers in [object_start, object_end).
while (object_address < object_end) {
uint32_t rset_word = Memory::uint32_at(rset_address);
if (rset_word != 0) {
uint32_t result_rset = rset_word;
for (uint32_t bitmask = 1; bitmask != 0; bitmask = bitmask << 1) {
// Do not dereference pointers at or past object_end.
if ((rset_word & bitmask) != 0 && object_address < object_end) {
Object** object_p = reinterpret_cast<Object**>(object_address);
if (Heap::InNewSpace(*object_p)) {
copy_object_func(reinterpret_cast<HeapObject**>(object_p));
}
// If this pointer does not need to be remembered anymore, clear
// the remembered set bit.
if (!Heap::InNewSpace(*object_p)) result_rset &= ~bitmask;
set_bits_count++;
}
object_address += kPointerSize;
while (slot_address < end) {
Object** slot = reinterpret_cast<Object**>(slot_address);
if (Heap::InNewSpace(*slot)) {
ASSERT((*slot)->IsHeapObject());
copy_object_func(reinterpret_cast<HeapObject**>(slot));
if (Heap::InNewSpace(*slot)) {
ASSERT((*slot)->IsHeapObject());
pointers_to_new_space_found = true;
}
// Update the remembered set if it has changed.
if (result_rset != rset_word) {
Memory::uint32_at(rset_address) = result_rset;
}
} else {
// No bits in the word were set. This is the common case.
object_address += kPointerSize * kBitsPerInt;
}
rset_address += kIntSize;
slot_address += kPointerSize;
}
return set_bits_count;
return pointers_to_new_space_found;
}
void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) {
ASSERT(Page::is_rset_in_use());
ASSERT(space == old_pointer_space_ || space == map_space_);
// Compute start address of the first map following given addr.
static inline Address MapStartAlign(Address addr) {
Address page = Page::FromAddress(addr)->ObjectAreaStart();
return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
}
static void* paged_rset_histogram = StatsTable::CreateHistogram(
"V8.RSetPaged",
0,
Page::kObjectAreaSize / kPointerSize,
30);
// Compute end address of the first map preceding given addr.
static inline Address MapEndAlign(Address addr) {
Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
return page + ((addr - page) / Map::kSize * Map::kSize);
}
static bool IteratePointersInDirtyMaps(Address start,
Address end,
ObjectSlotCallback copy_object_func) {
ASSERT(MapStartAlign(start) == start);
ASSERT(MapEndAlign(end) == end);
Address map_address = start;
bool pointers_to_new_space_found = false;
while (map_address < end) {
ASSERT(!Heap::InNewSpace(Memory::Object_at(map_address)));
ASSERT(Memory::Object_at(map_address)->IsMap());
Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
if (Heap::IteratePointersInDirtyRegion(pointer_fields_start,
pointer_fields_end,
copy_object_func)) {
pointers_to_new_space_found = true;
}
map_address += Map::kSize;
}
return pointers_to_new_space_found;
}
bool Heap::IteratePointersInDirtyMapsRegion(
Address start,
Address end,
ObjectSlotCallback copy_object_func) {
Address map_aligned_start = MapStartAlign(start);
Address map_aligned_end = MapEndAlign(end);
bool contains_pointers_to_new_space = false;
if (map_aligned_start != start) {
Address prev_map = map_aligned_start - Map::kSize;
ASSERT(Memory::Object_at(prev_map)->IsMap());
Address pointer_fields_start =
Max(start, prev_map + Map::kPointerFieldsBeginOffset);
Address pointer_fields_end =
Min(prev_map + Map::kCodeCacheOffset + kPointerSize, end);
contains_pointers_to_new_space =
IteratePointersInDirtyRegion(pointer_fields_start,
pointer_fields_end,
copy_object_func)
|| contains_pointers_to_new_space;
}
contains_pointers_to_new_space =
IteratePointersInDirtyMaps(map_aligned_start,
map_aligned_end,
copy_object_func)
|| contains_pointers_to_new_space;
if (map_aligned_end != end) {
ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
Address pointer_fields_start = map_aligned_end + Map::kPrototypeOffset;
Address pointer_fields_end =
Min(end, map_aligned_end + Map::kCodeCacheOffset + kPointerSize);
contains_pointers_to_new_space =
IteratePointersInDirtyRegion(pointer_fields_start,
pointer_fields_end,
copy_object_func)
|| contains_pointers_to_new_space;
}
return contains_pointers_to_new_space;
}
void Heap::IterateAndMarkPointersToNewSpace(Address start,
Address end,
ObjectSlotCallback callback) {
Address slot_address = start;
Page* page = Page::FromAddress(start);
uint32_t marks = page->GetRegionMarks();
while (slot_address < end) {
Object** slot = reinterpret_cast<Object**>(slot_address);
if (Heap::InNewSpace(*slot)) {
ASSERT((*slot)->IsHeapObject());
callback(reinterpret_cast<HeapObject**>(slot));
if (Heap::InNewSpace(*slot)) {
ASSERT((*slot)->IsHeapObject());
marks |= page->GetRegionMaskForAddress(slot_address);
}
}
slot_address += kPointerSize;
}
page->SetRegionMarks(marks);
}
uint32_t Heap::IterateDirtyRegions(
uint32_t marks,
Address area_start,
Address area_end,
DirtyRegionCallback visit_dirty_region,
ObjectSlotCallback copy_object_func) {
uint32_t newmarks = 0;
uint32_t mask = 1;
if (area_start >= area_end) {
return newmarks;
}
Address region_start = area_start;
// area_start does not necessarily coincide with start of the first region.
// Thus to calculate the beginning of the next region we have to align
// area_start by Page::kRegionSize.
Address second_region =
reinterpret_cast<Address>(
reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
~Page::kRegionAlignmentMask);
// Next region might be beyond area_end.
Address region_end = Min(second_region, area_end);
if (marks & mask) {
if (visit_dirty_region(region_start, region_end, copy_object_func)) {
newmarks |= mask;
}
}
mask <<= 1;
// Iterate subsequent regions which fully lay inside [area_start, area_end[.
region_start = region_end;
region_end = region_start + Page::kRegionSize;
while (region_end <= area_end) {
if (marks & mask) {
if (visit_dirty_region(region_start, region_end, copy_object_func)) {
newmarks |= mask;
}
}
region_start = region_end;
region_end = region_start + Page::kRegionSize;
mask <<= 1;
}
if (region_start != area_end) {
// A small piece of area left uniterated because area_end does not coincide
// with region end. Check whether region covering last part of area is
// dirty.
if (marks & mask) {
if (visit_dirty_region(region_start, area_end, copy_object_func)) {
newmarks |= mask;
}
}
}
return newmarks;
}
void Heap::IterateDirtyRegions(
PagedSpace* space,
DirtyRegionCallback visit_dirty_region,
ObjectSlotCallback copy_object_func,
ExpectedPageWatermarkState expected_page_watermark_state) {
PageIterator it(space, PageIterator::PAGES_IN_USE);
while (it.has_next()) {
Page* page = it.next();
int count = IterateRSetRange(page->ObjectAreaStart(), page->AllocationTop(),
page->RSetStart(), copy_object_func);
if (paged_rset_histogram != NULL) {
StatsTable::AddHistogramSample(paged_rset_histogram, count);
uint32_t marks = page->GetRegionMarks();
if (marks != Page::kAllRegionsCleanMarks) {
Address start = page->ObjectAreaStart();
// Do not try to visit pointers beyond page allocation watermark.
// Page can contain garbage pointers there.
Address end;
if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
page->IsWatermarkValid()) {
end = page->AllocationWatermark();
} else {
end = page->CachedAllocationWatermark();
}
ASSERT(space == old_pointer_space_ ||
(space == map_space_ &&
((page->ObjectAreaStart() - end) % Map::kSize == 0)));
page->SetRegionMarks(IterateDirtyRegions(marks,
start,
end,
visit_dirty_region,
copy_object_func));
}
// Mark page watermark as invalid to maintain watermark validity invariant.
// See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
page->InvalidateWatermark(true);
}
}

View File

@ -206,6 +206,10 @@ class HeapStats;
typedef String* (*ExternalStringTableUpdaterCallback)(Object** pointer);
typedef bool (*DirtyRegionCallback)(Address start,
Address end,
ObjectSlotCallback copy_object_func);
// The all static Heap captures the interface to the global object heap.
// All JavaScript contexts by this process share the same object heap.
@ -740,17 +744,54 @@ class Heap : public AllStatic {
// Iterates over all the other roots in the heap.
static void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
// Iterates remembered set of an old space.
static void IterateRSet(PagedSpace* space, ObjectSlotCallback callback);
enum ExpectedPageWatermarkState {
WATERMARK_SHOULD_BE_VALID,
WATERMARK_CAN_BE_INVALID
};
// For each dirty region on a page in use from an old space call
// visit_dirty_region callback.
// If either visit_dirty_region or callback can cause an allocation
// in old space and changes in allocation watermark then
// can_preallocate_during_iteration should be set to true.
// All pages will be marked as having invalid watermark upon
// iteration completion.
static void IterateDirtyRegions(
PagedSpace* space,
DirtyRegionCallback visit_dirty_region,
ObjectSlotCallback callback,
ExpectedPageWatermarkState expected_page_watermark_state);
// Interpret marks as a bitvector of dirty marks for regions of size
// Page::kRegionSize aligned by Page::kRegionAlignmentMask and covering
// memory interval from start to top. For each dirty region call a
// visit_dirty_region callback. Return updated bitvector of dirty marks.
static uint32_t IterateDirtyRegions(uint32_t marks,
Address start,
Address end,
DirtyRegionCallback visit_dirty_region,
ObjectSlotCallback callback);
// Iterate pointers to new space found in memory interval from start to end.
// Update dirty marks for page containing start address.
static void IterateAndMarkPointersToNewSpace(Address start,
Address end,
ObjectSlotCallback callback);
// Iterate pointers to new space found in memory interval from start to end.
// Return true if pointers to new space was found.
static bool IteratePointersInDirtyRegion(Address start,
Address end,
ObjectSlotCallback callback);
// Iterate pointers to new space found in memory interval from start to end.
// This interval is considered to belong to the map space.
// Return true if pointers to new space was found.
static bool IteratePointersInDirtyMapsRegion(Address start,
Address end,
ObjectSlotCallback callback);
// Iterates a range of remembered set addresses starting with rset_start
// corresponding to the range of allocated pointers
// [object_start, object_end).
// Returns the number of bits that were set.
static int IterateRSetRange(Address object_start,
Address object_end,
Address rset_start,
ObjectSlotCallback copy_object_func);
// Returns whether the object resides in new space.
static inline bool InNewSpace(Object* object);
@ -852,17 +893,6 @@ class Heap : public AllStatic {
static void ScavengePointer(HeapObject** p);
static inline void ScavengeObject(HeapObject** p, HeapObject* object);
// Clear a range of remembered set addresses corresponding to the object
// area address 'start' with size 'size_in_bytes', eg, when adding blocks
// to the free list.
static void ClearRSetRange(Address start, int size_in_bytes);
// Rebuild remembered set in old and map spaces.
static void RebuildRSets();
// Update an old object's remembered set
static int UpdateRSet(HeapObject* obj);
// Commits from space if it is uncommitted.
static void EnsureFromSpaceIsCommitted();
@ -955,11 +985,19 @@ class Heap : public AllStatic {
// Copy block of memory from src to dst. Size of block should be aligned
// by pointer size.
static inline void CopyBlock(Object** dst, Object** src, int byte_size);
static inline void CopyBlock(Address dst, Address src, int byte_size);
static inline void CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
Address src,
int byte_size);
// Optimized version of memmove for blocks with pointer size aligned sizes and
// pointer size aligned addresses.
static inline void MoveBlock(Object** dst, Object** src, int byte_size);
static inline void MoveBlock(Address dst, Address src, int byte_size);
static inline void MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
Address src,
int byte_size);
// Check new space expansion criteria and expand semispaces if it was hit.
static void CheckNewSpaceExpansionCriteria();
@ -1207,12 +1245,6 @@ class Heap : public AllStatic {
static void ReportStatisticsAfterGC();
#endif
// Rebuild remembered set in an old space.
static void RebuildRSets(PagedSpace* space);
// Rebuild remembered set in the large object space.
static void RebuildRSets(LargeObjectSpace* space);
// Slow part of scavenge object.
static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
@ -1301,11 +1333,11 @@ class LinearAllocationScope {
#ifdef DEBUG
// Visitor class to verify interior pointers that do not have remembered set
// bits. All heap object pointers have to point into the heap to a location
// that has a map pointer at its first word. Caveat: Heap::Contains is an
// approximation because it can return true for objects in a heap space but
// above the allocation pointer.
// Visitor class to verify interior pointers in spaces that do not contain
// or care about intergenerational references. All heap object pointers have to
// point into the heap to a location that has a map pointer at its first word.
// Caveat: Heap::Contains is an approximation because it can return true for
// objects in a heap space but above the allocation pointer.
class VerifyPointersVisitor: public ObjectVisitor {
public:
void VisitPointers(Object** start, Object** end) {
@ -1320,10 +1352,11 @@ class VerifyPointersVisitor: public ObjectVisitor {
};
// Visitor class to verify interior pointers that have remembered set bits.
// As VerifyPointersVisitor but also checks that remembered set bits are
// always set for pointers into new space.
class VerifyPointersAndRSetVisitor: public ObjectVisitor {
// Visitor class to verify interior pointers in spaces that use region marks
// to keep track of intergenerational references.
// As VerifyPointersVisitor but also checks that dirty marks are set
// for regions covering intergenerational references.
class VerifyPointersAndDirtyRegionsVisitor: public ObjectVisitor {
public:
void VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
@ -1332,7 +1365,9 @@ class VerifyPointersAndRSetVisitor: public ObjectVisitor {
ASSERT(Heap::Contains(object));
ASSERT(object->map()->IsMap());
if (Heap::InNewSpace(object)) {
ASSERT(Page::IsRSetSet(reinterpret_cast<Address>(current), 0));
ASSERT(Heap::InToSpace(object));
Address addr = reinterpret_cast<Address>(current);
ASSERT(Page::FromAddress(addr)->IsRegionDirty(addr));
}
}
}

View File

@ -226,8 +226,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// edx: number of elements
// ecx: start of next object
__ mov(eax, Factory::fixed_array_map());
__ mov(Operand(edi, JSObject::kMapOffset), eax); // setup the map
__ mov(Operand(edi, Array::kLengthOffset), edx); // and length
__ mov(Operand(edi, FixedArray::kMapOffset), eax); // setup the map
__ SmiTag(edx);
__ mov(Operand(edi, FixedArray::kLengthOffset), edx); // and length
// Initialize the fields to undefined.
// ebx: JSObject
@ -548,6 +549,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ebx,
FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
__ SmiUntag(ebx);
__ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
__ lea(edx, FieldOperand(edx, Code::kHeaderSize));
__ cmp(eax, Operand(ebx));
@ -752,15 +754,15 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
__ lea(scratch1, Operand(result, JSArray::kSize));
__ mov(FieldOperand(result, JSArray::kElementsOffset), scratch1);
// Initialize the FixedArray and fill it with holes. FixedArray length is not
// Initialize the FixedArray and fill it with holes. FixedArray length is
// stored as a smi.
// result: JSObject
// scratch1: elements array
// scratch2: start of next object
__ mov(FieldOperand(scratch1, JSObject::kMapOffset),
__ mov(FieldOperand(scratch1, FixedArray::kMapOffset),
Factory::fixed_array_map());
__ mov(FieldOperand(scratch1, Array::kLengthOffset),
Immediate(initial_capacity));
__ mov(FieldOperand(scratch1, FixedArray::kLengthOffset),
Immediate(Smi::FromInt(initial_capacity)));
// Fill the FixedArray with the hole value. Inline the code if short.
// Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
@ -847,23 +849,22 @@ static void AllocateJSArray(MacroAssembler* masm,
__ lea(elements_array, Operand(result, JSArray::kSize));
__ mov(FieldOperand(result, JSArray::kElementsOffset), elements_array);
// Initialize the fixed array. FixedArray length is not stored as a smi.
// Initialize the fixed array. FixedArray length is stored as a smi.
// result: JSObject
// elements_array: elements array
// elements_array_end: start of next object
// array_size: size of array (smi)
ASSERT(kSmiTag == 0);
__ SmiUntag(array_size); // Convert from smi to value.
__ mov(FieldOperand(elements_array, JSObject::kMapOffset),
__ mov(FieldOperand(elements_array, FixedArray::kMapOffset),
Factory::fixed_array_map());
// For non-empty JSArrays the length of the FixedArray and the JSArray is the
// same.
__ mov(FieldOperand(elements_array, Array::kLengthOffset), array_size);
__ mov(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
// Fill the allocated FixedArray with the hole value if requested.
// result: JSObject
// elements_array: elements array
if (fill_with_hole) {
__ SmiUntag(array_size);
__ lea(edi, Operand(elements_array,
FixedArray::kHeaderSize - kHeapObjectTag));
__ mov(eax, Factory::the_hole_value());

View File

@ -4198,7 +4198,6 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
frame_->EmitPush(eax); // <- slot 3
frame_->EmitPush(edx); // <- slot 2
__ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
__ SmiTag(eax);
frame_->EmitPush(eax); // <- slot 1
frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
entry.Jump();
@ -4210,7 +4209,6 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// Push the length of the array and the initial index onto the stack.
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
__ SmiTag(eax);
frame_->EmitPush(eax); // <- slot 1
frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
@ -6600,9 +6598,9 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
__ mov(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(Factory::fixed_array_map()));
// Set length.
__ SmiUntag(ecx);
__ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
// Fill contents of fixed-array with the-hole.
__ SmiUntag(ecx);
__ mov(edx, Immediate(Factory::the_hole_value()));
__ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
// Fill fixed array elements with hole.
@ -6706,7 +6704,6 @@ void DeferredSearchCache::Generate() {
// Check if we could add new entry to cache.
__ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
__ SmiTag(ebx);
__ cmp(ebx, FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset));
__ j(greater, &add_new_entry);
@ -6904,12 +6901,8 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
// (or them and test against Smi mask.)
__ mov(tmp2.reg(), tmp1.reg());
RecordWriteStub recordWrite1(tmp2.reg(), index1.reg(), object.reg());
__ CallStub(&recordWrite1);
RecordWriteStub recordWrite2(tmp1.reg(), index2.reg(), object.reg());
__ CallStub(&recordWrite2);
__ RecordWriteHelper(tmp2.reg(), index1.reg(), object.reg());
__ RecordWriteHelper(tmp1.reg(), index2.reg(), object.reg());
__ bind(&done);
deferred->BindExit();
@ -8608,13 +8601,10 @@ Result CodeGenerator::EmitKeyedLoad() {
Result elements = allocator()->Allocate();
ASSERT(elements.is_valid());
// Use a fresh temporary for the index and later the loaded
// value.
result = allocator()->Allocate();
ASSERT(result.is_valid());
result = elements;
DeferredReferenceGetKeyedValue* deferred =
new DeferredReferenceGetKeyedValue(result.reg(),
new DeferredReferenceGetKeyedValue(elements.reg(),
receiver.reg(),
key.reg());
@ -8646,20 +8636,17 @@ Result CodeGenerator::EmitKeyedLoad() {
Immediate(Factory::fixed_array_map()));
deferred->Branch(not_equal);
// Shift the key to get the actual index value and check that
// it is within bounds. Use unsigned comparison to handle negative keys.
__ mov(result.reg(), key.reg());
__ SmiUntag(result.reg());
__ cmp(result.reg(),
// Check that the key is within bounds.
__ cmp(key.reg(),
FieldOperand(elements.reg(), FixedArray::kLengthOffset));
deferred->Branch(above_equal);
// Load and check that the result is not the hole.
ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
__ mov(result.reg(), Operand(elements.reg(),
result.reg(),
times_4,
key.reg(),
times_2,
FixedArray::kHeaderSize - kHeapObjectTag));
elements.Unuse();
__ cmp(Operand(result.reg()), Immediate(Factory::the_hole_value()));
deferred->Branch(equal);
__ IncrementCounter(&Counters::keyed_load_inline, 1);
@ -8744,7 +8731,7 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
// Check whether it is possible to omit the write barrier. If the elements
// array is in new space or the value written is a smi we can safely update
// the elements array without updating the remembered set.
// the elements array without write barrier.
Label in_new_space;
__ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
if (!value_is_constant) {
@ -9014,7 +9001,8 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Setup the object header.
__ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map());
__ mov(FieldOperand(eax, Array::kLengthOffset), Immediate(length));
__ mov(FieldOperand(eax, Context::kLengthOffset),
Immediate(Smi::FromInt(length)));
// Setup the fixed slots.
__ xor_(ebx, Operand(ebx)); // Set to NULL.
@ -10977,9 +10965,8 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ test(ecx, Operand(ecx));
__ j(zero, &done);
// Get the parameters pointer from the stack and untag the length.
// Get the parameters pointer from the stack.
__ mov(edx, Operand(esp, 2 * kPointerSize));
__ SmiUntag(ecx);
// Setup the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
@ -10988,6 +10975,8 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
Immediate(Factory::fixed_array_map()));
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
// Untag the length for the loop below.
__ SmiUntag(ecx);
// Copy the fixed array slots.
Label loop;
@ -11116,6 +11105,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the last match info has space for the capture registers and the
// additional information.
__ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
__ SmiUntag(eax);
__ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead));
__ cmp(edx, Operand(eax));
__ j(greater, &runtime);
@ -11359,7 +11349,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
// Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry.
__ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
__ shr(mask, 1); // Divide length by two (length is not a smi).
__ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
__ sub(Operand(mask), Immediate(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
@ -11450,12 +11440,6 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
}
void RecordWriteStub::Generate(MacroAssembler* masm) {
masm->RecordWriteHelper(object_, addr_, scratch_);
masm->ret(0);
}
static int NegativeComparisonResult(Condition cc) {
ASSERT(cc != equal);
ASSERT((cc == less) || (cc == less_equal)

View File

@ -1083,42 +1083,6 @@ class NumberToStringStub: public CodeStub {
};
class RecordWriteStub : public CodeStub {
public:
RecordWriteStub(Register object, Register addr, Register scratch)
: object_(object), addr_(addr), scratch_(scratch) { }
void Generate(MacroAssembler* masm);
private:
Register object_;
Register addr_;
Register scratch_;
#ifdef DEBUG
void Print() {
PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
object_.code(), addr_.code(), scratch_.code());
}
#endif
// Minor key encoding in 12 bits. 4 bits for each of the three
// registers (object, address and scratch) OOOOAAAASSSS.
class ScratchBits: public BitField<uint32_t, 0, 4> {};
class AddressBits: public BitField<uint32_t, 4, 4> {};
class ObjectBits: public BitField<uint32_t, 8, 4> {};
Major MajorKey() { return RecordWrite; }
int MinorKey() {
// Encode the registers.
return ObjectBits::encode(object_.code()) |
AddressBits::encode(addr_.code()) |
ScratchBits::encode(scratch_.code());
}
};
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_

View File

@ -1009,7 +1009,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(eax); // Map.
__ push(edx); // Enumeration cache.
__ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
__ SmiTag(eax);
__ push(eax); // Enumeration cache length (as smi).
__ push(Immediate(Smi::FromInt(0))); // Initial index.
__ jmp(&loop);
@ -1019,7 +1018,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(Immediate(Smi::FromInt(0))); // Map (0) - force slow check.
__ push(eax);
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
__ SmiTag(eax);
__ push(eax); // Fixed array length (as smi).
__ push(Immediate(Smi::FromInt(0))); // Initial index.

View File

@ -304,7 +304,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label slow, check_string, index_int, index_string;
Label slow, check_string, index_smi, index_string;
Label check_pixel_array, probe_dictionary;
Label check_number_dictionary;
@ -329,18 +329,17 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check that the key is a smi.
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &check_string, not_taken);
__ mov(ebx, eax);
__ SmiUntag(ebx);
// Get the elements array of the object.
__ bind(&index_int);
__ bind(&index_smi);
__ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ CheckMap(ecx, Factory::fixed_array_map(), &check_pixel_array, true);
// Check that the key (index) is within bounds.
__ cmp(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
__ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset));
__ j(above_equal, &slow);
// Fast case: Do the load.
__ mov(ecx, FieldOperand(ecx, ebx, times_4, FixedArray::kHeaderSize));
ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
__ mov(ecx, FieldOperand(ecx, eax, times_2, FixedArray::kHeaderSize));
__ cmp(Operand(ecx), Immediate(Factory::the_hole_value()));
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
@ -352,9 +351,10 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_pixel_array);
// Check whether the elements is a pixel array.
// edx: receiver
// ebx: untagged index
// eax: key
// ecx: elements
__ mov(ebx, eax);
__ SmiUntag(ebx);
__ CheckMap(ecx, Factory::pixel_array_map(), &check_number_dictionary, true);
__ cmp(ebx, FieldOperand(ecx, PixelArray::kLengthOffset));
__ j(above_equal, &slow);
@ -485,9 +485,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
__ bind(&index_string);
__ and_(ebx, String::kArrayIndexHashMask);
__ shr(ebx, String::kHashShift);
__ jmp(&index_int);
// We want the smi-tagged index in eax. kArrayIndexValueMask has zeros in
// the low kHashShift bits.
ASSERT(String::kHashShift >= kSmiTagSize);
__ and_(ebx, String::kArrayIndexValueMask);
__ shr(ebx, String::kHashShift - kSmiTagSize);
__ mov(eax, ebx);
__ jmp(&index_smi);
}
@ -792,9 +796,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ CheckMap(edi, Factory::fixed_array_map(), &check_pixel_array, true);
__ mov(ebx, Operand(ecx));
__ SmiUntag(ebx);
__ cmp(ebx, FieldOperand(edi, Array::kLengthOffset));
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
__ j(below, &fast, taken);
// Slow case: call runtime.
@ -804,7 +806,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Check whether the elements is a pixel array.
__ bind(&check_pixel_array);
// eax: value
// ecx: key
// ecx: key (a smi)
// edx: receiver
// edi: elements array
__ CheckMap(edi, Factory::pixel_array_map(), &slow, true);
@ -840,13 +842,11 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// edi: receiver->elements, a FixedArray
// flags: compare (ecx, edx.length())
__ j(not_equal, &slow, not_taken); // do not leave holes in the array
__ mov(ebx, ecx);
__ SmiUntag(ebx); // untag
__ cmp(ebx, FieldOperand(edi, Array::kLengthOffset));
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
__ j(above_equal, &slow, not_taken);
// Add 1 to receiver->length, and go to fast array write.
__ add(FieldOperand(edx, JSArray::kLengthOffset),
Immediate(1 << kSmiTagSize));
Immediate(Smi::FromInt(1)));
__ jmp(&fast);
// Array case: Get the length and the elements array from the JS

View File

@ -60,49 +60,17 @@ void MacroAssembler::RecordWriteHelper(Register object,
bind(&not_in_new_space);
}
Label fast;
// Compute the page start address from the heap object pointer, and reuse
// the 'object' register for it.
and_(object, ~Page::kPageAlignmentMask);
Register page_start = object;
// Compute the bit addr in the remembered set/index of the pointer in the
// page. Reuse 'addr' as pointer_offset.
sub(addr, Operand(page_start));
shr(addr, kObjectAlignmentBits);
Register pointer_offset = addr;
// Compute number of region covering addr. See Page::GetRegionNumberForAddress
// method for more details.
and_(addr, Page::kPageAlignmentMask);
shr(addr, Page::kRegionSizeLog2);
// If the bit offset lies beyond the normal remembered set range, it is in
// the extra remembered set area of a large object.
cmp(pointer_offset, Page::kPageSize / kPointerSize);
j(less, &fast);
// Adjust 'page_start' so that addressing using 'pointer_offset' hits the
// extra remembered set after the large object.
// Find the length of the large object (FixedArray).
mov(scratch, Operand(page_start, Page::kObjectStartOffset
+ FixedArray::kLengthOffset));
Register array_length = scratch;
// Extra remembered set starts right after the large object (a FixedArray), at
// page_start + kObjectStartOffset + objectSize
// where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length.
// Add the delta between the end of the normal RSet and the start of the
// extra RSet to 'page_start', so that addressing the bit using
// 'pointer_offset' hits the extra RSet words.
lea(page_start,
Operand(page_start, array_length, times_pointer_size,
Page::kObjectStartOffset + FixedArray::kHeaderSize
- Page::kRSetEndOffset));
// NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
// to limit code size. We should probably evaluate this decision by
// measuring the performance of an equivalent implementation using
// "simpler" instructions
bind(&fast);
bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
// Set dirty mark for region.
bts(Operand(object, Page::kDirtyFlagOffset), addr);
}
@ -130,7 +98,7 @@ void MacroAssembler::InNewSpace(Register object,
}
// Set the remembered set bit for [object+offset].
// For page containing |object| mark region covering [object+offset] dirty.
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the scratch register contains the array index into
// the elements array represented as a Smi.
@ -142,9 +110,8 @@ void MacroAssembler::RecordWrite(Register object, int offset,
// registers are esi.
ASSERT(!object.is(esi) && !value.is(esi) && !scratch.is(esi));
// First, check if a remembered set write is even needed. The tests below
// catch stores of Smis and stores into young gen (which does not have space
// for the remembered set bits).
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
// Skip barrier if writing a smi.
@ -160,47 +127,19 @@ void MacroAssembler::RecordWrite(Register object, int offset,
ASSERT(IsAligned(offset, kPointerSize) ||
IsAligned(offset + kHeapObjectTag, kPointerSize));
// We use optimized write barrier code if the word being written to is not in
// a large object chunk or is in the first page of a large object chunk.
// We make sure that an offset is inside the right limits whether it is
// tagged or untagged.
if ((offset > 0) && (offset < Page::kMaxHeapObjectSize - kHeapObjectTag)) {
// Compute the bit offset in the remembered set, leave it in 'value'.
lea(value, Operand(object, offset));
and_(value, Page::kPageAlignmentMask);
shr(value, kPointerSizeLog2);
// Compute the page address from the heap object pointer, leave it in
// 'object'.
and_(object, ~Page::kPageAlignmentMask);
// NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
// to limit code size. We should probably evaluate this decision by
// measuring the performance of an equivalent implementation using
// "simpler" instructions
bts(Operand(object, Page::kRSetOffset), value);
Register dst = scratch;
if (offset != 0) {
lea(dst, Operand(object, offset));
} else {
Register dst = scratch;
if (offset != 0) {
lea(dst, Operand(object, offset));
} else {
// array access: calculate the destination address in the same manner as
// KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
// into an array of words.
ASSERT_EQ(1, kSmiTagSize);
ASSERT_EQ(0, kSmiTag);
lea(dst, Operand(object, dst, times_half_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag));
}
// If we are already generating a shared stub, not inlining the
// record write code isn't going to save us any memory.
if (generating_stub()) {
RecordWriteHelper(object, dst, value);
} else {
RecordWriteStub stub(object, dst, value);
CallStub(&stub);
}
// Array access: calculate the destination address in the same manner as
// KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
// into an array of words.
ASSERT_EQ(1, kSmiTagSize);
ASSERT_EQ(0, kSmiTag);
lea(dst, Operand(object, dst, times_half_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag));
}
RecordWriteHelper(object, dst, value);
bind(&done);
@ -1384,6 +1323,7 @@ void MacroAssembler::InvokeFunction(Register fun,
mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
SmiUntag(ebx);
mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
lea(edx, FieldOperand(edx, Code::kHeaderSize));

View File

@ -59,8 +59,8 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// GC Support
// Set the remebered set bit for an address which points into an
// object. RecordWriteHelper only works if the object is not in new
// For page containing |object| mark region covering |addr| dirty.
// RecordWriteHelper only works if the object is not in new
// space.
void RecordWriteHelper(Register object,
Register addr,
@ -73,7 +73,7 @@ class MacroAssembler: public Assembler {
Condition cc, // equal for new space, not_equal otherwise.
Label* branch);
// Set the remembered set bit for [object+offset].
// For page containing |object| mark region covering [object+offset] dirty.
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the scratch register contains the array index into
// the elements array represented as a Smi.

View File

@ -1173,7 +1173,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ j(not_equal, &miss);
if (argc == 1) { // Otherwise fall through to call builtin.
Label call_builtin, exit, with_rset_update, attempt_to_grow_elements;
Label call_builtin, exit, with_write_barrier, attempt_to_grow_elements;
// Get the array's length into eax and calculate new length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
@ -1183,7 +1183,6 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
// Get the element's length into ecx.
__ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
__ SmiTag(ecx);
// Check if we could survive without allocation.
__ cmp(eax, Operand(ecx));
@ -1201,17 +1200,16 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
// Check if value is a smi.
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &with_rset_update);
__ j(not_zero, &with_write_barrier);
__ bind(&exit);
__ ret((argc + 1) * kPointerSize);
__ bind(&with_rset_update);
__ bind(&with_write_barrier);
__ InNewSpace(ebx, ecx, equal, &exit);
RecordWriteStub stub(ebx, edx, ecx);
__ CallStub(&stub);
__ RecordWriteHelper(ebx, edx, ecx);
__ ret((argc + 1) * kPointerSize);
__ bind(&attempt_to_grow_elements);
@ -1251,10 +1249,10 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
// Increment element's and array's sizes.
__ add(FieldOperand(ebx, FixedArray::kLengthOffset),
Immediate(kAllocationDelta));
Immediate(Smi::FromInt(kAllocationDelta)));
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
// Elements are in new space, so no remembered set updates are necessary.
// Elements are in new space, so write barrier is not required.
__ ret((argc + 1) * kPointerSize);
__ bind(&call_builtin);

View File

@ -84,9 +84,6 @@ void MarkCompactCollector::CollectGarbage() {
UpdatePointers();
RelocateObjects();
RebuildRSets();
} else {
SweepSpaces();
}
@ -121,14 +118,6 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
compacting_collection_ = false;
if (FLAG_collect_maps) CreateBackPointers();
#ifdef DEBUG
if (compacting_collection_) {
// We will write bookkeeping information to the remembered set area
// starting now.
Page::set_rset_state(Page::NOT_IN_USE);
}
#endif
PagedSpaces spaces;
for (PagedSpace* space = spaces.next();
space != NULL; space = spaces.next()) {
@ -150,7 +139,7 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
void MarkCompactCollector::Finish() {
#ifdef DEBUG
ASSERT(state_ == SWEEP_SPACES || state_ == REBUILD_RSETS);
ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
state_ = IDLE;
#endif
// The stub cache is not traversed during GC; clear the cache to
@ -244,8 +233,8 @@ static inline HeapObject* ShortCircuitConsString(Object** p) {
}
// Since we don't have the object's start, it is impossible to update the
// remembered set. Therefore, we only replace the string with its left
// substring when the remembered set does not change.
// page dirty marks. Therefore, we only replace the string with its left
// substring when page dirty marks do not change.
Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object;
@ -776,6 +765,7 @@ void MarkCompactCollector::SweepLargeObjectSpace() {
Heap::lo_space()->FreeUnmarkedObjects();
}
// Safe to use during marking phase only.
bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
MapWord metamap = object->map_word();
@ -783,6 +773,7 @@ bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
return metamap.ToMap()->instance_type() == MAP_TYPE;
}
void MarkCompactCollector::ClearNonLiveTransitions() {
HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback);
// Iterate over the map space, setting map transitions that go from
@ -1078,13 +1069,18 @@ void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
// first word of object without any encoding. If object is dead we are writing
// NULL as a forwarding address.
// The second pass updates pointers to new space in all spaces. It is possible
// to encounter pointers to dead objects during traversal of remembered set for
// map space because remembered set bits corresponding to dead maps are cleared
// later during map space sweeping.
static void MigrateObject(Address dst, Address src, int size) {
Heap::CopyBlock(reinterpret_cast<Object**>(dst),
reinterpret_cast<Object**>(src),
size);
// to encounter pointers to dead objects during traversal of dirty regions we
// should clear them to avoid encountering them during next dirty regions
// iteration.
static void MigrateObject(Address dst,
Address src,
int size,
bool to_old_space) {
if (to_old_space) {
Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
} else {
Heap::CopyBlock(dst, src, size);
}
Memory::Address_at(src) = dst;
}
@ -1131,6 +1127,7 @@ class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
}
};
// Visitor for updating pointers from live objects in old spaces to new space.
// It can encounter pointers to dead objects in new space when traversing map
// space (see comment for MigrateObject).
@ -1142,10 +1139,13 @@ static void UpdatePointerToNewGen(HeapObject** p) {
Address new_addr = Memory::Address_at(old_addr);
// Object pointed by *p is dead. Update is not required.
if (new_addr == NULL) return;
*p = HeapObject::FromAddress(new_addr);
if (new_addr == NULL) {
// We encountered pointer to a dead object. Clear it so we will
// not visit it again during next iteration of dirty regions.
*p = NULL;
} else {
*p = HeapObject::FromAddress(new_addr);
}
}
@ -1163,8 +1163,7 @@ static bool TryPromoteObject(HeapObject* object, int object_size) {
result = Heap::lo_space()->AllocateRawFixedArray(object_size);
if (!result->IsFailure()) {
HeapObject* target = HeapObject::cast(result);
MigrateObject(target->address(), object->address(), object_size);
Heap::UpdateRSet(target);
MigrateObject(target->address(), object->address(), object_size, true);
MarkCompactCollector::tracer()->
increment_promoted_objects_size(object_size);
return true;
@ -1177,10 +1176,10 @@ static bool TryPromoteObject(HeapObject* object, int object_size) {
result = target_space->AllocateRaw(object_size);
if (!result->IsFailure()) {
HeapObject* target = HeapObject::cast(result);
MigrateObject(target->address(), object->address(), object_size);
if (target_space == Heap::old_pointer_space()) {
Heap::UpdateRSet(target);
}
MigrateObject(target->address(),
object->address(),
object_size,
target_space == Heap::old_pointer_space());
MarkCompactCollector::tracer()->
increment_promoted_objects_size(object_size);
return true;
@ -1222,14 +1221,16 @@ static void SweepNewSpace(NewSpace* space) {
continue;
}
// Promotion either failed or not required.
// Copy the content of the object.
// Promotion failed. Just migrate object to another semispace.
Object* target = space->AllocateRaw(size);
// Allocation cannot fail at this point: semispaces are of equal size.
ASSERT(!target->IsFailure());
MigrateObject(HeapObject::cast(target)->address(), current, size);
MigrateObject(HeapObject::cast(target)->address(),
current,
size,
false);
} else {
size = object->Size();
Memory::Address_at(current) = NULL;
@ -1255,9 +1256,12 @@ static void SweepNewSpace(NewSpace* space) {
Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
// Update pointers in old spaces.
Heap::IterateRSet(Heap::old_pointer_space(), &UpdatePointerToNewGen);
Heap::IterateRSet(Heap::map_space(), &UpdatePointerToNewGen);
Heap::lo_space()->IterateRSet(&UpdatePointerToNewGen);
Heap::IterateDirtyRegions(Heap::old_pointer_space(),
&Heap::IteratePointersInDirtyRegion,
&UpdatePointerToNewGen,
Heap::WATERMARK_SHOULD_BE_VALID);
Heap::lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
// Update pointers from cells.
HeapObjectIterator cell_iterator(Heap::cell_space());
@ -1323,7 +1327,10 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
MarkCompactCollector::tracer()->decrement_marked_count();
if (!is_previous_alive) { // Transition from free to live.
dealloc(free_start, static_cast<int>(current - free_start), true);
dealloc(free_start,
static_cast<int>(current - free_start),
true,
false);
is_previous_alive = true;
}
} else {
@ -1353,7 +1360,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// without putting anything into free list.
int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
if (size_in_bytes > 0) {
dealloc(free_start, size_in_bytes, false);
dealloc(free_start, size_in_bytes, false, true);
}
}
} else {
@ -1367,7 +1374,9 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// If there is a free ending area on one of the previous pages we have
// deallocate that area and put it on the free list.
if (last_free_size > 0) {
dealloc(last_free_start, last_free_size, true);
Page::FromAddress(last_free_start)->
SetAllocationWatermark(last_free_start);
dealloc(last_free_start, last_free_size, true, true);
last_free_start = NULL;
last_free_size = 0;
}
@ -1398,7 +1407,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// There was a free ending area on the previous page.
// Deallocate it without putting it into freelist and move allocation
// top to the beginning of this free area.
dealloc(last_free_start, last_free_size, false);
dealloc(last_free_start, last_free_size, false, true);
new_allocation_top = last_free_start;
}
@ -1421,34 +1430,36 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
int size_in_bytes,
bool add_to_freelist) {
Heap::ClearRSetRange(start, size_in_bytes);
bool add_to_freelist,
bool last_on_page) {
Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateOldDataBlock(Address start,
int size_in_bytes,
bool add_to_freelist) {
bool add_to_freelist,
bool last_on_page) {
Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateCodeBlock(Address start,
int size_in_bytes,
bool add_to_freelist) {
bool add_to_freelist,
bool last_on_page) {
Heap::code_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateMapBlock(Address start,
int size_in_bytes,
bool add_to_freelist) {
bool add_to_freelist,
bool last_on_page) {
// Objects in map space are assumed to have size Map::kSize and a
// valid map in their first word. Thus, we break the free block up into
// chunks and free them separately.
ASSERT(size_in_bytes % Map::kSize == 0);
Heap::ClearRSetRange(start, size_in_bytes);
Address end = start + size_in_bytes;
for (Address a = start; a < end; a += Map::kSize) {
Heap::map_space()->Free(a, add_to_freelist);
@ -1458,13 +1469,13 @@ void MarkCompactCollector::DeallocateMapBlock(Address start,
void MarkCompactCollector::DeallocateCellBlock(Address start,
int size_in_bytes,
bool add_to_freelist) {
bool add_to_freelist,
bool last_on_page) {
// Free-list elements in cell space are assumed to have a fixed size.
// We break the free block into chunks and add them to the free list
// individually.
int size = Heap::cell_space()->object_size_in_bytes();
ASSERT(size_in_bytes % size == 0);
Heap::ClearRSetRange(start, size_in_bytes);
Address end = start + size_in_bytes;
for (Address a = start; a < end; a += size) {
Heap::cell_space()->Free(a, add_to_freelist);
@ -1563,20 +1574,6 @@ class MapCompact {
GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
}
void FinishMapSpace() {
// Iterate through to space and finish move.
MapIterator it;
HeapObject* o = it.next();
for (; o != first_map_to_evacuate_; o = it.next()) {
ASSERT(o != NULL);
Map* map = reinterpret_cast<Map*>(o);
ASSERT(!map->IsMarked());
ASSERT(!map->IsOverflowed());
ASSERT(map->IsMap());
Heap::UpdateRSet(map);
}
}
void UpdateMapPointersInPagedSpace(PagedSpace* space) {
ASSERT(space != Heap::map_space());
@ -1669,9 +1666,9 @@ class MapCompact {
ASSERT(Map::kSize % 4 == 0);
Heap::CopyBlock(reinterpret_cast<Object**>(vacant_map->address()),
reinterpret_cast<Object**>(map_to_evacuate->address()),
Map::kSize);
Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(vacant_map->address(),
map_to_evacuate->address(),
Map::kSize);
ASSERT(vacant_map->IsMap()); // Due to memcpy above.
@ -1756,6 +1753,12 @@ void MarkCompactCollector::SweepSpaces() {
SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
SweepNewSpace(Heap::new_space());
SweepSpace(Heap::map_space(), &DeallocateMapBlock);
Heap::IterateDirtyRegions(Heap::map_space(),
&Heap::IteratePointersInDirtyMapsRegion,
&UpdatePointerToNewGen,
Heap::WATERMARK_SHOULD_BE_VALID);
int live_maps_size = Heap::map_space()->Size();
int live_maps = live_maps_size / Map::kSize;
ASSERT(live_map_objects_size_ == live_maps_size);
@ -1766,7 +1769,6 @@ void MarkCompactCollector::SweepSpaces() {
map_compact.CompactMaps();
map_compact.UpdateMapPointersInRoots();
map_compact.FinishMapSpace();
PagedSpaces spaces;
for (PagedSpace* space = spaces.next();
space != NULL; space = spaces.next()) {
@ -2039,9 +2041,8 @@ Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
Page* forwarded_page = Page::FromAddress(first_forwarded);
int forwarded_offset = forwarded_page->Offset(first_forwarded);
// Find end of allocation of in the page of first_forwarded.
Address mc_top = forwarded_page->mc_relocation_top;
int mc_top_offset = forwarded_page->Offset(mc_top);
// Find end of allocation in the page of first_forwarded.
int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
// Check if current object's forward pointer is in the same page
// as the first live object's forwarding pointer
@ -2058,7 +2059,7 @@ Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
offset += Page::kObjectStartOffset;
ASSERT_PAGE_OFFSET(offset);
ASSERT(next_page->OffsetToAddress(offset) < next_page->mc_relocation_top);
ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
return next_page->OffsetToAddress(offset);
}
@ -2103,16 +2104,12 @@ void MarkCompactCollector::RelocateObjects() {
// Flip from and to spaces
Heap::new_space()->Flip();
Heap::new_space()->MCCommitRelocationInfo();
// Set age_mark to bottom in to space
Address mark = Heap::new_space()->bottom();
Heap::new_space()->set_age_mark(mark);
Heap::new_space()->MCCommitRelocationInfo();
#ifdef DEBUG
// It is safe to write to the remembered sets as remembered sets on a
// page-by-page basis after committing the m-c forwarding pointer.
Page::set_rset_state(Page::IN_USE);
#endif
PagedSpaces spaces;
for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
space->MCCommitRelocationInfo();
@ -2139,9 +2136,9 @@ int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
if (new_addr != old_addr) {
// Move contents.
Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
reinterpret_cast<Object**>(old_addr),
Map::kSize);
Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
old_addr,
Map::kSize);
}
#ifdef DEBUG
@ -2198,9 +2195,13 @@ int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
if (new_addr != old_addr) {
// Move contents.
Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
reinterpret_cast<Object**>(old_addr),
obj_size);
if (space == Heap::old_data_space()) {
Heap::MoveBlock(new_addr, old_addr, obj_size);
} else {
Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
old_addr,
obj_size);
}
}
ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
@ -2245,9 +2246,7 @@ int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
if (new_addr != old_addr) {
// Move contents.
Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
reinterpret_cast<Object**>(old_addr),
obj_size);
Heap::MoveBlock(new_addr, old_addr, obj_size);
}
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
@ -2283,9 +2282,13 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
#endif
// New and old addresses cannot overlap.
Heap::CopyBlock(reinterpret_cast<Object**>(new_addr),
reinterpret_cast<Object**>(old_addr),
obj_size);
if (Heap::InNewSpace(HeapObject::FromAddress(new_addr))) {
Heap::CopyBlock(new_addr, old_addr, obj_size);
} else {
Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
old_addr,
obj_size);
}
#ifdef DEBUG
if (FLAG_gc_verbose) {
@ -2302,18 +2305,6 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
}
// -------------------------------------------------------------------------
// Phase 5: rebuild remembered sets
void MarkCompactCollector::RebuildRSets() {
#ifdef DEBUG
ASSERT(state_ == RELOCATE_OBJECTS);
state_ = REBUILD_RSETS;
#endif
Heap::RebuildRSets();
}
void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (obj->IsCode()) {

View File

@ -41,7 +41,8 @@ typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
// no attempt to add area to free list is made.
typedef void (*DeallocateFunction)(Address start,
int size_in_bytes,
bool add_to_freelist);
bool add_to_freelist,
bool last_on_page);
// Forward declarations.
@ -131,8 +132,7 @@ class MarkCompactCollector: public AllStatic {
SWEEP_SPACES,
ENCODE_FORWARDING_ADDRESSES,
UPDATE_POINTERS,
RELOCATE_OBJECTS,
REBUILD_RSETS
RELOCATE_OBJECTS
};
// The current stage of the collector.
@ -269,22 +269,22 @@ class MarkCompactCollector: public AllStatic {
// written to their map word's offset in the inactive
// semispace.
//
// Bookkeeping data is written to the remembered-set are of
// Bookkeeping data is written to the page header of
// eached paged-space page that contains live objects after
// compaction:
//
// The 3rd word of the page (first word of the remembered
// set) contains the relocation top address, the address of
// the first word after the end of the last live object in
// the page after compaction.
// The allocation watermark field is used to track the
// relocation top address, the address of the first word
// after the end of the last live object in the page after
// compaction.
//
// The 4th word contains the zero-based index of the page in
// its space. This word is only used for map space pages, in
// The Page::mc_page_index field contains the zero-based index of the
// page in its space. This word is only used for map space pages, in
// order to encode the map addresses in 21 bits to free 11
// bits per map word for the forwarding address.
//
// The 5th word contains the (nonencoded) forwarding address
// of the first live object in the page.
// The Page::mc_first_forwarded field contains the (nonencoded)
// forwarding address of the first live object in the page.
//
// In both the new space and the paged spaces, a linked list
// of live regions is constructructed (linked through
@ -319,23 +319,28 @@ class MarkCompactCollector: public AllStatic {
// generation.
static void DeallocateOldPointerBlock(Address start,
int size_in_bytes,
bool add_to_freelist);
bool add_to_freelist,
bool last_on_page);
static void DeallocateOldDataBlock(Address start,
int size_in_bytes,
bool add_to_freelist);
bool add_to_freelist,
bool last_on_page);
static void DeallocateCodeBlock(Address start,
int size_in_bytes,
bool add_to_freelist);
bool add_to_freelist,
bool last_on_page);
static void DeallocateMapBlock(Address start,
int size_in_bytes,
bool add_to_freelist);
bool add_to_freelist,
bool last_on_page);
static void DeallocateCellBlock(Address start,
int size_in_bytes,
bool add_to_freelist);
bool add_to_freelist,
bool last_on_page);
// If we are not compacting the heap, we simply sweep the spaces except
// for the large object space, clearing mark bits and adding unmarked
@ -349,9 +354,7 @@ class MarkCompactCollector: public AllStatic {
//
// After: All pointers in live objects, including encoded map
// pointers, are updated to point to their target's new
// location. The remembered set area of each paged-space
// page containing live objects still contains bookkeeping
// information.
// location.
friend class UpdatingVisitor; // helper for updating visited objects
@ -373,13 +376,9 @@ class MarkCompactCollector: public AllStatic {
// Phase 4: Relocating objects.
//
// Before: Pointers to live objects are updated to point to their
// target's new location. The remembered set area of each
// paged-space page containing live objects still contains
// bookkeeping information.
// target's new location.
//
// After: Objects have been moved to their new addresses. The
// remembered set area of each paged-space page containing
// live objects still contains bookkeeping information.
// After: Objects have been moved to their new addresses.
// Relocates objects in all spaces.
static void RelocateObjects();
@ -408,17 +407,6 @@ class MarkCompactCollector: public AllStatic {
// Copy a new object.
static int RelocateNewObject(HeapObject* obj);
// -----------------------------------------------------------------------
// Phase 5: Rebuilding remembered sets.
//
// Before: The heap is in a normal state except that remembered sets
// in the paged spaces are not correct.
//
// After: The heap is in a normal state.
// Rebuild remembered set in old and map spaces.
static void RebuildRSets();
#ifdef DEBUG
// -----------------------------------------------------------------------
// Debugging variables, functions and classes

View File

@ -806,7 +806,8 @@ void JSGlobalProxy::JSGlobalProxyVerify() {
VerifyObjectField(JSGlobalProxy::kContextOffset);
// Make sure that this object has no properties, elements.
CHECK_EQ(0, properties()->length());
CHECK_EQ(0, elements()->length());
CHECK(HasFastElements());
CHECK_EQ(0, FixedArray::cast(elements())->length());
}

View File

@ -759,7 +759,8 @@ Object* Object::GetProperty(String* key, PropertyAttributes* attributes) {
ASSERT(mode == SKIP_WRITE_BARRIER); \
ASSERT(Heap::InNewSpace(object) || \
!Heap::InNewSpace(READ_FIELD(object, offset)) || \
Page::IsRSetSet(object->address(), offset)); \
Page::FromAddress(object->address())-> \
IsRegionDirty(object->address() + offset)); \
}
#define READ_DOUBLE_FIELD(p, offset) \
@ -1045,6 +1046,10 @@ Address MapWord::ToEncodedAddress() {
void HeapObject::VerifyObjectField(int offset) {
VerifyPointer(READ_FIELD(this, offset));
}
void HeapObject::VerifySmiField(int offset) {
ASSERT(READ_FIELD(this, offset)->IsSmi());
}
#endif
@ -1064,7 +1069,7 @@ MapWord HeapObject::map_word() {
void HeapObject::set_map_word(MapWord map_word) {
// WRITE_FIELD does not update the remembered set, but there is no need
// WRITE_FIELD does not invoke write barrier, but there is no need
// here.
WRITE_FIELD(this, kMapOffset, reinterpret_cast<Object*>(map_word.value_));
}
@ -1162,16 +1167,16 @@ int HeapNumber::get_sign() {
ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
Array* JSObject::elements() {
HeapObject* JSObject::elements() {
Object* array = READ_FIELD(this, kElementsOffset);
// In the assert below Dictionary is covered under FixedArray.
ASSERT(array->IsFixedArray() || array->IsPixelArray() ||
array->IsExternalArray());
return reinterpret_cast<Array*>(array);
return reinterpret_cast<HeapObject*>(array);
}
void JSObject::set_elements(Array* value, WriteBarrierMode mode) {
void JSObject::set_elements(HeapObject* value, WriteBarrierMode mode) {
// In the assert below Dictionary is covered under FixedArray.
ASSERT(value->IsFixedArray() || value->IsPixelArray() ||
value->IsExternalArray());
@ -1342,15 +1347,15 @@ bool JSObject::HasFastProperties() {
}
bool Array::IndexFromObject(Object* object, uint32_t* index) {
if (object->IsSmi()) {
int value = Smi::cast(object)->value();
bool Object::ToArrayIndex(uint32_t* index) {
if (IsSmi()) {
int value = Smi::cast(this)->value();
if (value < 0) return false;
*index = value;
return true;
}
if (object->IsHeapNumber()) {
double value = HeapNumber::cast(object)->value();
if (IsHeapNumber()) {
double value = HeapNumber::cast(this)->value();
uint32_t uint_value = static_cast<uint32_t>(value);
if (value == static_cast<double>(uint_value)) {
*index = uint_value;
@ -1665,7 +1670,11 @@ HashTable<Shape, Key>* HashTable<Shape, Key>::cast(Object* obj) {
}
INT_ACCESSORS(Array, length, kLengthOffset)
SMI_ACCESSORS(FixedArray, length, kLengthOffset)
SMI_ACCESSORS(ByteArray, length, kLengthOffset)
INT_ACCESSORS(PixelArray, length, kLengthOffset)
INT_ACCESSORS(ExternalArray, length, kLengthOffset)
SMI_ACCESSORS(String, length, kLengthOffset)
@ -1678,6 +1687,9 @@ uint32_t String::hash_field() {
void String::set_hash_field(uint32_t value) {
WRITE_UINT32_FIELD(this, kHashFieldOffset, value);
#if V8_HOST_ARCH_64_BIT
WRITE_UINT32_FIELD(this, kHashFieldOffset + kIntSize, 0);
#endif
}
@ -2456,22 +2468,65 @@ BOOL_ACCESSORS(SharedFunctionInfo,
try_full_codegen,
kTryFullCodegen)
INT_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
INT_ACCESSORS(SharedFunctionInfo, formal_parameter_count,
#if V8_HOST_ARCH_32_BIT
SMI_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
SMI_ACCESSORS(SharedFunctionInfo, formal_parameter_count,
kFormalParameterCountOffset)
INT_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
SMI_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
kExpectedNofPropertiesOffset)
INT_ACCESSORS(SharedFunctionInfo, num_literals, kNumLiteralsOffset)
INT_ACCESSORS(SharedFunctionInfo, start_position_and_type,
SMI_ACCESSORS(SharedFunctionInfo, num_literals, kNumLiteralsOffset)
SMI_ACCESSORS(SharedFunctionInfo, start_position_and_type,
kStartPositionAndTypeOffset)
INT_ACCESSORS(SharedFunctionInfo, end_position, kEndPositionOffset)
INT_ACCESSORS(SharedFunctionInfo, function_token_position,
SMI_ACCESSORS(SharedFunctionInfo, end_position, kEndPositionOffset)
SMI_ACCESSORS(SharedFunctionInfo, function_token_position,
kFunctionTokenPositionOffset)
INT_ACCESSORS(SharedFunctionInfo, compiler_hints,
SMI_ACCESSORS(SharedFunctionInfo, compiler_hints,
kCompilerHintsOffset)
INT_ACCESSORS(SharedFunctionInfo, this_property_assignments_count,
SMI_ACCESSORS(SharedFunctionInfo, this_property_assignments_count,
kThisPropertyAssignmentsCountOffset)
#else
#define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset) \
int holder::name() { \
int value = READ_INT_FIELD(this, offset); \
ASSERT(kHeapObjectTag == 1); \
ASSERT((value & kHeapObjectTag) == 0); \
return value >> 1; \
} \
void holder::set_##name(int value) { \
ASSERT(kHeapObjectTag == 1); \
ASSERT((value & 0xC0000000) == 0xC0000000 || \
(value & 0xC0000000) == 0x000000000); \
WRITE_INT_FIELD(this, \
offset, \
(value << 1) & ~kHeapObjectTag); \
}
#define PSEUDO_SMI_ACCESSORS_HI(holder, name, offset) \
INT_ACCESSORS(holder, name, offset)
PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, length, kLengthOffset)
PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, formal_parameter_count,
kFormalParameterCountOffset)
PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, expected_nof_properties,
kExpectedNofPropertiesOffset)
PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, num_literals, kNumLiteralsOffset)
PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, start_position_and_type,
kStartPositionAndTypeOffset)
PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, end_position, kEndPositionOffset)
PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, function_token_position,
kFunctionTokenPositionOffset)
PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, compiler_hints,
kCompilerHintsOffset)
PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, this_property_assignments_count,
kThisPropertyAssignmentsCountOffset)
#endif
ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
@ -2785,7 +2840,7 @@ void JSRegExp::SetDataAt(int index, Object* value) {
JSObject::ElementsKind JSObject::GetElementsKind() {
Array* array = elements();
HeapObject* array = elements();
if (array->IsFixedArray()) {
// FAST_ELEMENTS or DICTIONARY_ELEMENTS are both stored in a FixedArray.
if (array->map() == Heap::fixed_array_map()) {
@ -2908,15 +2963,20 @@ NumberDictionary* JSObject::element_dictionary() {
}
bool String::IsHashFieldComputed(uint32_t field) {
return (field & kHashNotComputedMask) == 0;
}
bool String::HasHashCode() {
return (hash_field() & kHashComputedMask) != 0;
return IsHashFieldComputed(hash_field());
}
uint32_t String::Hash() {
// Fast case: has hash code already been computed?
uint32_t field = hash_field();
if (field & kHashComputedMask) return field >> kHashShift;
if (IsHashFieldComputed(field)) return field >> kHashShift;
// Slow case: compute hash code and set it.
return ComputeAndSetHash();
}
@ -2989,7 +3049,7 @@ uint32_t StringHasher::GetHash() {
bool String::AsArrayIndex(uint32_t* index) {
uint32_t field = hash_field();
if ((field & kHashComputedMask) && !(field & kIsArrayIndexMask)) return false;
if (IsHashFieldComputed(field) && !(field & kIsArrayIndexMask)) return false;
return SlowAsArrayIndex(index);
}
@ -3113,7 +3173,7 @@ void Map::ClearCodeCache() {
void JSArray::EnsureSize(int required_size) {
ASSERT(HasFastElements());
Array* elts = elements();
FixedArray* elts = FixedArray::cast(elements());
const int kArraySizeThatFitsComfortablyInNewSpace = 128;
if (elts->length() < required_size) {
// Doubling in size would be overkill, but leave some slack to avoid

View File

@ -4784,7 +4784,7 @@ static inline uint32_t HashSequentialString(const schar* chars, int length) {
uint32_t String::ComputeAndSetHash() {
// Should only be called if hash code has not yet been computed.
ASSERT(!(hash_field() & kHashComputedMask));
ASSERT(!HasHashCode());
const int len = length();
@ -4803,7 +4803,7 @@ uint32_t String::ComputeAndSetHash() {
set_hash_field(field);
// Check the hash code is there.
ASSERT(hash_field() & kHashComputedMask);
ASSERT(HasHashCode());
uint32_t result = field >> kHashShift;
ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
return result;
@ -4858,8 +4858,7 @@ bool String::SlowAsArrayIndex(uint32_t* index) {
static inline uint32_t HashField(uint32_t hash,
bool is_array_index,
int length = -1) {
uint32_t result =
(hash << String::kHashShift) | String::kHashComputedMask;
uint32_t result = (hash << String::kHashShift);
if (is_array_index) {
// For array indexes mix the length into the hash as an array index could
// be zero.
@ -5654,7 +5653,7 @@ Object* JSObject::SetElementsLength(Object* len) {
// General slow case.
if (len->IsNumber()) {
uint32_t length;
if (Array::IndexFromObject(len, &length)) {
if (len->ToArrayIndex(&length)) {
return SetSlowElements(len);
} else {
return ArrayLengthRangeError();
@ -6078,8 +6077,7 @@ Object* JSObject::SetFastElement(uint32_t index, Object* value) {
if (IsJSArray()) {
// Update the length of the array if needed.
uint32_t array_length = 0;
CHECK(Array::IndexFromObject(JSArray::cast(this)->length(),
&array_length));
CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length));
if (index >= array_length) {
JSArray::cast(this)->set_length(Smi::FromInt(index + 1));
}
@ -6217,8 +6215,7 @@ Object* JSObject::SetElementWithoutInterceptor(uint32_t index, Object* value) {
if (ShouldConvertToFastElements()) {
uint32_t new_length = 0;
if (IsJSArray()) {
CHECK(Array::IndexFromObject(JSArray::cast(this)->length(),
&new_length));
CHECK(JSArray::cast(this)->length()->ToArrayIndex(&new_length));
JSArray::cast(this)->set_length(Smi::FromInt(new_length));
} else {
new_length = NumberDictionary::cast(elements())->max_number_key() + 1;
@ -6249,7 +6246,7 @@ Object* JSObject::SetElementWithoutInterceptor(uint32_t index, Object* value) {
Object* JSArray::JSArrayUpdateLengthFromIndex(uint32_t index, Object* value) {
uint32_t old_len = 0;
CHECK(Array::IndexFromObject(length(), &old_len));
CHECK(length()->ToArrayIndex(&old_len));
// Check to see if we need to update the length. For now, we make
// sure that the length stays within 32-bits (unsigned).
if (index >= old_len && index != 0xffffffff) {
@ -6531,7 +6528,7 @@ bool JSObject::ShouldConvertToFastElements() {
// fast elements.
uint32_t length = 0;
if (IsJSArray()) {
CHECK(Array::IndexFromObject(JSArray::cast(this)->length(), &length));
CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
} else {
length = dictionary->max_number_key();
}

View File

@ -54,29 +54,28 @@
// - JSGlobalObject
// - JSBuiltinsObject
// - JSGlobalProxy
// - JSValue
// - Array
// - ByteArray
// - PixelArray
// - ExternalArray
// - ExternalByteArray
// - ExternalUnsignedByteArray
// - ExternalShortArray
// - ExternalUnsignedShortArray
// - ExternalIntArray
// - ExternalUnsignedIntArray
// - ExternalFloatArray
// - FixedArray
// - DescriptorArray
// - HashTable
// - Dictionary
// - SymbolTable
// - CompilationCacheTable
// - CodeCacheHashTable
// - MapCache
// - Context
// - GlobalContext
// - JSFunctionResultCache
// - JSValue
// - ByteArray
// - PixelArray
// - ExternalArray
// - ExternalByteArray
// - ExternalUnsignedByteArray
// - ExternalShortArray
// - ExternalUnsignedShortArray
// - ExternalIntArray
// - ExternalUnsignedIntArray
// - ExternalFloatArray
// - FixedArray
// - DescriptorArray
// - HashTable
// - Dictionary
// - SymbolTable
// - CompilationCacheTable
// - CodeCacheHashTable
// - MapCache
// - Context
// - GlobalContext
// - JSFunctionResultCache
// - String
// - SeqString
// - SeqAsciiString
@ -676,6 +675,10 @@ class Object BASE_EMBEDDED {
// Return the object's prototype (might be Heap::null_value()).
Object* GetPrototype();
// Tries to convert an object to an array index. Returns true and sets
// the output parameter if it succeeds.
inline bool ToArrayIndex(uint32_t* index);
// Returns true if this is a JSValue containing a string and the index is
// < the length of the string. Used to implement [] on strings.
inline bool IsStringObjectWithCharacterAt(uint32_t index);
@ -1026,7 +1029,7 @@ class HeapObject: public Object {
// Returns the field at offset in obj, as a read/write Object* reference.
// Does no checking, and is safe to use during GC, while maps are invalid.
// Does not update remembered sets, so should only be assigned to
// Does not invoke write barrier, so should only be assigned to
// during marking GC.
static inline Object** RawField(HeapObject* obj, int offset);
@ -1046,6 +1049,7 @@ class HeapObject: public Object {
void HeapObjectPrint();
void HeapObjectVerify();
inline void VerifyObjectField(int offset);
inline void VerifySmiField(int offset);
void PrintHeader(const char* id);
@ -1150,7 +1154,7 @@ class JSObject: public HeapObject {
};
// [properties]: Backing storage for properties.
// properties is a FixedArray in the fast case, and a Dictionary in the
// properties is a FixedArray in the fast case and a Dictionary in the
// slow case.
DECL_ACCESSORS(properties, FixedArray) // Get and set fast properties.
inline void initialize_properties();
@ -1158,9 +1162,9 @@ class JSObject: public HeapObject {
inline StringDictionary* property_dictionary(); // Gets slow properties.
// [elements]: The elements (properties with names that are integers).
// elements is a FixedArray in the fast case, and a Dictionary in the slow
// case or a PixelArray in a special case.
DECL_ACCESSORS(elements, Array) // Get and set fast elements.
// elements is a FixedArray in the fast case, a Dictionary in the slow
// case, and a PixelArray or ExternalArray in special cases.
DECL_ACCESSORS(elements, HeapObject)
inline void initialize_elements();
inline ElementsKind GetElementsKind();
inline bool HasFastElements();
@ -1594,37 +1598,13 @@ class JSObject: public HeapObject {
};
// Abstract super class arrays. It provides length behavior.
class Array: public HeapObject {
// FixedArray describes fixed-sized arrays with element type Object*.
class FixedArray: public HeapObject {
public:
// [length]: length of the array.
inline int length();
inline void set_length(int value);
// Convert an object to an array index.
// Returns true if the conversion succeeded.
static inline bool IndexFromObject(Object* object, uint32_t* index);
// Layout descriptor.
static const int kLengthOffset = HeapObject::kHeaderSize;
protected:
// No code should use the Array class directly, only its subclasses.
// Use the kHeaderSize of the appropriate subclass, which may be aligned.
static const int kHeaderSize = kLengthOffset + kIntSize;
static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Array);
};
// FixedArray describes fixed sized arrays where element
// type is Object*.
class FixedArray: public Array {
public:
// Setter and getter for elements.
inline Object* get(int index);
// Setter that uses write barrier.
@ -1665,7 +1645,10 @@ class FixedArray: public Array {
// Casting.
static inline FixedArray* cast(Object* obj);
static const int kHeaderSize = Array::kAlignedSize;
// Layout description.
// Length is smi tagged when it is stored.
static const int kLengthOffset = HeapObject::kHeaderSize;
static const int kHeaderSize = kLengthOffset + kPointerSize;
// Maximal allowed size, in bytes, of a single FixedArray.
// Prevents overflowing size computations, as well as extreme memory
@ -2364,8 +2347,12 @@ class JSFunctionResultCache: public FixedArray {
// ByteArray represents fixed sized byte arrays. Used by the outside world,
// such as PCRE, and also by the memory allocator and garbage collector to
// fill in free blocks in the heap.
class ByteArray: public Array {
class ByteArray: public HeapObject {
public:
// [length]: length of the array.
inline int length();
inline void set_length(int value);
// Setter and getter.
inline byte get(int index);
inline void set(int index, byte value);
@ -2374,7 +2361,7 @@ class ByteArray: public Array {
inline int get_int(int index);
static int SizeFor(int length) {
return OBJECT_SIZE_ALIGN(kHeaderSize + length);
return OBJECT_POINTER_ALIGN(kHeaderSize + length);
}
// We use byte arrays for free blocks in the heap. Given a desired size in
// bytes that is a multiple of the word size and big enough to hold a byte
@ -2402,9 +2389,12 @@ class ByteArray: public Array {
void ByteArrayVerify();
#endif
// ByteArray headers are not quadword aligned.
static const int kHeaderSize = Array::kHeaderSize;
static const int kAlignedSize = Array::kAlignedSize;
// Layout description.
// Length is smi tagged when it is stored.
static const int kLengthOffset = HeapObject::kHeaderSize;
static const int kHeaderSize = kLengthOffset + kPointerSize;
static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
// Maximal memory consumption for a single ByteArray.
static const int kMaxSize = 512 * MB;
@ -2423,8 +2413,12 @@ class ByteArray: public Array {
// multipage/the-canvas-element.html#canvaspixelarray
// In particular, write access clamps the value written to 0 or 255 if the
// value written is outside this range.
class PixelArray: public Array {
class PixelArray: public HeapObject {
public:
// [length]: length of the array.
inline int length();
inline void set_length(int value);
// [external_pointer]: The pointer to the external memory area backing this
// pixel array.
DECL_ACCESSORS(external_pointer, uint8_t) // Pointer to the data store.
@ -2449,9 +2443,11 @@ class PixelArray: public Array {
static const int kMaxLength = 0x3fffffff;
// PixelArray headers are not quadword aligned.
static const int kExternalPointerOffset = Array::kAlignedSize;
static const int kLengthOffset = HeapObject::kHeaderSize;
static const int kExternalPointerOffset =
POINTER_SIZE_ALIGN(kLengthOffset + kIntSize);
static const int kHeaderSize = kExternalPointerOffset + kPointerSize;
static const int kAlignedSize = OBJECT_SIZE_ALIGN(kHeaderSize);
static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(PixelArray);
@ -2469,8 +2465,12 @@ class PixelArray: public Array {
// Out-of-range values passed to the setter are converted via a C
// cast, not clamping. Out-of-range indices cause exceptions to be
// raised rather than being silently ignored.
class ExternalArray: public Array {
class ExternalArray: public HeapObject {
public:
// [length]: length of the array.
inline int length();
inline void set_length(int value);
// [external_pointer]: The pointer to the external memory area backing this
// external array.
DECL_ACCESSORS(external_pointer, void) // Pointer to the data store.
@ -2482,9 +2482,11 @@ class ExternalArray: public Array {
static const int kMaxLength = 0x3fffffff;
// ExternalArray headers are not quadword aligned.
static const int kExternalPointerOffset = Array::kAlignedSize;
static const int kLengthOffset = HeapObject::kHeaderSize;
static const int kExternalPointerOffset =
POINTER_SIZE_ALIGN(kLengthOffset + kIntSize);
static const int kHeaderSize = kExternalPointerOffset + kPointerSize;
static const int kAlignedSize = OBJECT_SIZE_ALIGN(kHeaderSize);
static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalArray);
@ -3038,7 +3040,13 @@ class Map: public HeapObject {
kConstructorOffset + kPointerSize;
static const int kCodeCacheOffset = kInstanceDescriptorsOffset + kPointerSize;
static const int kPadStart = kCodeCacheOffset + kPointerSize;
static const int kSize = MAP_SIZE_ALIGN(kPadStart);
static const int kSize = MAP_POINTER_ALIGN(kPadStart);
// Layout of pointer fields. Heap iteration code relies on them
// being continiously allocated.
static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset;
static const int kPointerFieldsEndOffset =
Map::kCodeCacheOffset + kPointerSize;
// Byte offsets within kInstanceSizesOffset.
static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;
@ -3350,23 +3358,64 @@ class SharedFunctionInfo: public HeapObject {
static const int kInferredNameOffset = kDebugInfoOffset + kPointerSize;
static const int kThisPropertyAssignmentsOffset =
kInferredNameOffset + kPointerSize;
// Integer fields.
#if V8_HOST_ARCH_32_BIT
// Smi fields.
static const int kLengthOffset =
kThisPropertyAssignmentsOffset + kPointerSize;
static const int kFormalParameterCountOffset = kLengthOffset + kIntSize;
static const int kFormalParameterCountOffset = kLengthOffset + kPointerSize;
static const int kExpectedNofPropertiesOffset =
kFormalParameterCountOffset + kPointerSize;
static const int kNumLiteralsOffset =
kExpectedNofPropertiesOffset + kPointerSize;
static const int kStartPositionAndTypeOffset =
kNumLiteralsOffset + kPointerSize;
static const int kEndPositionOffset =
kStartPositionAndTypeOffset + kPointerSize;
static const int kFunctionTokenPositionOffset =
kEndPositionOffset + kPointerSize;
static const int kCompilerHintsOffset =
kFunctionTokenPositionOffset + kPointerSize;
static const int kThisPropertyAssignmentsCountOffset =
kCompilerHintsOffset + kPointerSize;
// Total size.
static const int kSize = kThisPropertyAssignmentsCountOffset + kPointerSize;
#else
// The only reason to use smi fields instead of int fields
// is to allow interation without maps decoding during
// garbage collections.
// To avoid wasting space on 64-bit architectures we use
// the following trick: we group integer fields into pairs
// First integer in each pair is shifted left by 1.
// By doing this we guarantee that LSB of each kPointerSize aligned
// word is not set and thus this word cannot be treated as pointer
// to HeapObject during old space traversal.
static const int kLengthOffset =
kThisPropertyAssignmentsOffset + kPointerSize;
static const int kFormalParameterCountOffset =
kLengthOffset + kIntSize;
static const int kExpectedNofPropertiesOffset =
kFormalParameterCountOffset + kIntSize;
static const int kNumLiteralsOffset = kExpectedNofPropertiesOffset + kIntSize;
static const int kStartPositionAndTypeOffset =
static const int kNumLiteralsOffset =
kExpectedNofPropertiesOffset + kIntSize;
static const int kEndPositionOffset =
kNumLiteralsOffset + kIntSize;
static const int kEndPositionOffset = kStartPositionAndTypeOffset + kIntSize;
static const int kFunctionTokenPositionOffset = kEndPositionOffset + kIntSize;
static const int kStartPositionAndTypeOffset =
kEndPositionOffset + kIntSize;
static const int kFunctionTokenPositionOffset =
kStartPositionAndTypeOffset + kIntSize;
static const int kCompilerHintsOffset =
kFunctionTokenPositionOffset + kIntSize;
static const int kThisPropertyAssignmentsCountOffset =
kCompilerHintsOffset + kIntSize;
// Total size.
static const int kSize = kThisPropertyAssignmentsCountOffset + kIntSize;
#endif
static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
private:
@ -4122,8 +4171,7 @@ class String: public HeapObject {
// Layout description.
static const int kLengthOffset = HeapObject::kHeaderSize;
static const int kHashFieldOffset = kLengthOffset + kPointerSize;
static const int kSize = kHashFieldOffset + kIntSize;
// Notice: kSize is not pointer-size aligned if pointers are 64-bit.
static const int kSize = kHashFieldOffset + kPointerSize;
// Maximum number of characters to consider when trying to convert a string
// value into an array index.
@ -4142,7 +4190,7 @@ class String: public HeapObject {
// whether a hash code has been computed. If the hash code has been
// computed the 2nd bit tells whether the string can be used as an
// array index.
static const int kHashComputedMask = 1;
static const int kHashNotComputedMask = 1;
static const int kIsArrayIndexMask = 1 << 1;
static const int kNofLengthBitFields = 2;
@ -4160,9 +4208,14 @@ class String: public HeapObject {
static const int kArrayIndexHashMask = (1 << kArrayIndexHashLengthShift) - 1;
static const int kArrayIndexValueBits =
kArrayIndexHashLengthShift - kHashShift;
static const int kArrayIndexValueMask =
((1 << kArrayIndexValueBits) - 1) << kHashShift;
// Value of empty hash field indicating that the hash is not computed.
static const int kEmptyHashField = 0;
static const int kEmptyHashField = kHashNotComputedMask;
// Value of hash field containing computed hash equal to zero.
static const int kZeroHash = 0;
// Maximal string length.
static const int kMaxLength = (1 << (32 - 2)) - 1;
@ -4230,6 +4283,8 @@ class String: public HeapObject {
// mutates the ConsString and might return a failure.
Object* SlowTryFlatten(PretenureFlag pretenure);
static inline bool IsHashFieldComputed(uint32_t field);
// Slow case of String::Equals. This implementation works on any strings
// but it is most efficient on strings that are almost flat.
bool SlowEquals(String* other);
@ -4279,7 +4334,7 @@ class SeqAsciiString: public SeqString {
// Computes the size for an AsciiString instance of a given length.
static int SizeFor(int length) {
return OBJECT_SIZE_ALIGN(kHeaderSize + length * kCharSize);
return OBJECT_POINTER_ALIGN(kHeaderSize + length * kCharSize);
}
// Layout description.
@ -4331,7 +4386,7 @@ class SeqTwoByteString: public SeqString {
// Computes the size for a TwoByteString instance of a given length.
static int SizeFor(int length) {
return OBJECT_SIZE_ALIGN(kHeaderSize + length * kShortSize);
return OBJECT_POINTER_ALIGN(kHeaderSize + length * kShortSize);
}
// Layout description.

View File

@ -291,7 +291,7 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
Handle<String> name(String::cast(*key));
ASSERT(!name->AsArrayIndex(&element_index));
result = SetProperty(boilerplate, name, value, NONE);
} else if (Array::IndexFromObject(*key, &element_index)) {
} else if (key->ToArrayIndex(&element_index)) {
// Array index (uint32).
result = SetElement(boilerplate, element_index, value);
} else {
@ -1583,7 +1583,7 @@ static Object* Runtime_SetCode(Arguments args) {
static Object* CharCodeAt(String* subject, Object* index) {
uint32_t i = 0;
if (!Array::IndexFromObject(index, &i)) return Heap::nan_value();
if (!index->ToArrayIndex(&i)) return Heap::nan_value();
// Flatten the string. If someone wants to get a char at an index
// in a cons string, it is likely that more indices will be
// accessed.
@ -1599,7 +1599,7 @@ static Object* CharCodeAt(String* subject, Object* index) {
static Object* CharFromCode(Object* char_code) {
uint32_t code;
if (Array::IndexFromObject(char_code, &code)) {
if (char_code->ToArrayIndex(&code)) {
if (code <= 0xffff) {
return Heap::LookupSingleCharacterStringFromCode(code);
}
@ -2780,7 +2780,7 @@ static Object* Runtime_StringIndexOf(Arguments args) {
Object* index = args[2];
uint32_t start_index;
if (!Array::IndexFromObject(index, &start_index)) return Smi::FromInt(-1);
if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
RUNTIME_ASSERT(start_index <= static_cast<uint32_t>(sub->length()));
int position = Runtime::StringMatch(sub, pat, start_index);
@ -2830,7 +2830,7 @@ static Object* Runtime_StringLastIndexOf(Arguments args) {
Object* index = args[2];
uint32_t start_index;
if (!Array::IndexFromObject(index, &start_index)) return Smi::FromInt(-1);
if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
uint32_t pat_length = pat->length();
uint32_t sub_length = sub->length();
@ -3657,7 +3657,7 @@ Object* Runtime::GetObjectProperty(Handle<Object> object, Handle<Object> key) {
// Check if the given key is an array index.
uint32_t index;
if (Array::IndexFromObject(*key, &index)) {
if (key->ToArrayIndex(&index)) {
return GetElementOrCharAt(object, index);
}
@ -3843,7 +3843,7 @@ Object* Runtime::SetObjectProperty(Handle<Object> object,
// Check if the given key is an array index.
uint32_t index;
if (Array::IndexFromObject(*key, &index)) {
if (key->ToArrayIndex(&index)) {
// In Firefox/SpiderMonkey, Safari and Opera you can access the characters
// of a string using [] notation. We need to support this too in
// JavaScript.
@ -3895,7 +3895,7 @@ Object* Runtime::ForceSetObjectProperty(Handle<JSObject> js_object,
// Check if the given key is an array index.
uint32_t index;
if (Array::IndexFromObject(*key, &index)) {
if (key->ToArrayIndex(&index)) {
// In Firefox/SpiderMonkey, Safari and Opera you can access the characters
// of a string using [] notation. We need to support this too in
// JavaScript.
@ -3942,7 +3942,7 @@ Object* Runtime::ForceDeleteObjectProperty(Handle<JSObject> js_object,
// Check if the given key is an array index.
uint32_t index;
if (Array::IndexFromObject(*key, &index)) {
if (key->ToArrayIndex(&index)) {
// In Firefox/SpiderMonkey, Safari and Opera you can access the
// characters of a string using [] notation. In the case of a
// String object we just need to redirect the deletion to the
@ -4355,7 +4355,7 @@ static Object* Runtime_GetArgumentsProperty(Arguments args) {
// Try to convert the key to an index. If successful and within
// index return the the argument from the frame.
uint32_t index;
if (Array::IndexFromObject(args[0], &index) && index < n) {
if (args[0]->ToArrayIndex(&index) && index < n) {
return frame->GetParameter(index);
}
@ -6457,8 +6457,8 @@ static Object* Runtime_NewArgumentsFast(Arguments args) {
if (obj->IsFailure()) return obj;
AssertNoAllocation no_gc;
reinterpret_cast<Array*>(obj)->set_map(Heap::fixed_array_map());
FixedArray* array = FixedArray::cast(obj);
FixedArray* array = reinterpret_cast<FixedArray*>(obj);
array->set_map(Heap::fixed_array_map());
array->set_length(length);
WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
@ -7747,8 +7747,8 @@ static Object* Runtime_SwapElements(Arguments args) {
Handle<Object> key2 = args.at<Object>(2);
uint32_t index1, index2;
if (!Array::IndexFromObject(*key1, &index1)
|| !Array::IndexFromObject(*key2, &index2)) {
if (!key1->ToArrayIndex(&index1)
|| !key2->ToArrayIndex(&index2)) {
return Top::ThrowIllegalOperation();
}
@ -7779,17 +7779,19 @@ static Object* Runtime_GetArrayKeys(Arguments args) {
for (int i = 0; i < keys_length; i++) {
Object* key = keys->get(i);
uint32_t index;
if (!Array::IndexFromObject(key, &index) || index >= length) {
if (!key->ToArrayIndex(&index) || index >= length) {
// Zap invalid keys.
keys->set_undefined(i);
}
}
return *Factory::NewJSArrayWithElements(keys);
} else {
ASSERT(array->HasFastElements());
Handle<FixedArray> single_interval = Factory::NewFixedArray(2);
// -1 means start of array.
single_interval->set(0, Smi::FromInt(-1));
uint32_t actual_length = static_cast<uint32_t>(array->elements()->length());
uint32_t actual_length =
static_cast<uint32_t>(FixedArray::cast(array->elements())->length());
uint32_t min_length = actual_length < length ? actual_length : length;
Handle<Object> length_object =
Factory::NewNumber(static_cast<double>(min_length));

View File

@ -66,99 +66,173 @@ Address Page::AllocationTop() {
}
void Page::ClearRSet() {
// This method can be called in all rset states.
memset(RSetStart(), 0, kRSetEndOffset - kRSetStartOffset);
}
// Given a 32-bit address, separate its bits into:
// | page address | words (6) | bit offset (5) | pointer alignment (2) |
// The address of the rset word containing the bit for this word is computed as:
// page_address + words * 4
// For a 64-bit address, if it is:
// | page address | words(5) | bit offset(5) | pointer alignment (3) |
// The address of the rset word containing the bit for this word is computed as:
// page_address + words * 4 + kRSetOffset.
// The rset is accessed as 32-bit words, and bit offsets in a 32-bit word,
// even on the X64 architecture.
Address Page::ComputeRSetBitPosition(Address address, int offset,
uint32_t* bitmask) {
ASSERT(Page::is_rset_in_use());
Page* page = Page::FromAddress(address);
uint32_t bit_offset = ArithmeticShiftRight(page->Offset(address) + offset,
kPointerSizeLog2);
*bitmask = 1 << (bit_offset % kBitsPerInt);
Address rset_address =
page->address() + kRSetOffset + (bit_offset / kBitsPerInt) * kIntSize;
// The remembered set address is either in the normal remembered set range
// of a page or else we have a large object page.
ASSERT((page->RSetStart() <= rset_address && rset_address < page->RSetEnd())
|| page->IsLargeObjectPage());
if (rset_address >= page->RSetEnd()) {
// We have a large object page, and the remembered set address is actually
// past the end of the object.
// The first part of the remembered set is still located at the start of
// the page, but anything after kRSetEndOffset must be relocated to after
// the large object, i.e. after
// (page->ObjectAreaStart() + object size)
// We do that by adding the difference between the normal RSet's end and
// the object's end.
ASSERT(HeapObject::FromAddress(address)->IsFixedArray());
int fixedarray_length =
FixedArray::SizeFor(Memory::int_at(page->ObjectAreaStart()
+ Array::kLengthOffset));
rset_address += kObjectStartOffset - kRSetEndOffset + fixedarray_length;
Address Page::AllocationWatermark() {
PagedSpace* owner = MemoryAllocator::PageOwner(this);
if (this == owner->AllocationTopPage()) {
return owner->top();
}
return rset_address;
return address() + AllocationWatermarkOffset();
}
void Page::SetRSet(Address address, int offset) {
uint32_t bitmask = 0;
Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
Memory::uint32_at(rset_address) |= bitmask;
ASSERT(IsRSetSet(address, offset));
uint32_t Page::AllocationWatermarkOffset() {
return static_cast<uint32_t>((flags_ & kAllocationWatermarkOffsetMask) >>
kAllocationWatermarkOffsetShift);
}
// Clears the corresponding remembered set bit for a given address.
void Page::UnsetRSet(Address address, int offset) {
uint32_t bitmask = 0;
Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
Memory::uint32_at(rset_address) &= ~bitmask;
void Page::SetAllocationWatermark(Address allocation_watermark) {
if ((Heap::gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) {
// When iterating intergenerational references during scavenge
// we might decide to promote an encountered young object.
// We will allocate a space for such an object and put it
// into the promotion queue to process it later.
// If space for object was allocated somewhere beyond allocation
// watermark this might cause garbage pointers to appear under allocation
// watermark. To avoid visiting them during dirty regions iteration
// which might be still in progress we store a valid allocation watermark
// value and mark this page as having an invalid watermark.
SetCachedAllocationWatermark(AllocationWatermark());
InvalidateWatermark(true);
}
ASSERT(!IsRSetSet(address, offset));
flags_ = (flags_ & kFlagsMask) |
Offset(allocation_watermark) << kAllocationWatermarkOffsetShift;
ASSERT(AllocationWatermarkOffset()
== static_cast<uint32_t>(Offset(allocation_watermark)));
}
bool Page::IsRSetSet(Address address, int offset) {
void Page::SetCachedAllocationWatermark(Address allocation_watermark) {
mc_first_forwarded = allocation_watermark;
}
Address Page::CachedAllocationWatermark() {
return mc_first_forwarded;
}
uint32_t Page::GetRegionMarks() {
return dirty_regions_;
}
void Page::SetRegionMarks(uint32_t marks) {
dirty_regions_ = marks;
}
int Page::GetRegionNumberForAddress(Address addr) {
// Each page is divided into 256 byte regions. Each region has a corresponding
// dirty mark bit in the page header. Region can contain intergenerational
// references iff its dirty mark is set.
// A normal 8K page contains exactly 32 regions so all region marks fit
// into 32-bit integer field. To calculate a region number we just divide
// offset inside page by region size.
// A large page can contain more then 32 regions. But we want to avoid
// additional write barrier code for distinguishing between large and normal
// pages so we just ignore the fact that addr points into a large page and
// calculate region number as if addr pointed into a normal 8K page. This way
// we get a region number modulo 32 so for large pages several regions might
// be mapped to a single dirty mark.
ASSERT_PAGE_ALIGNED(this->address());
STATIC_ASSERT((kPageAlignmentMask >> kRegionSizeLog2) < kBitsPerInt);
// We are using masking with kPageAlignmentMask instead of Page::Offset()
// to get an offset to the beginning of 8K page containing addr not to the
// beginning of actual page which can be bigger then 8K.
intptr_t offset_inside_normal_page = OffsetFrom(addr) & kPageAlignmentMask;
return static_cast<int>(offset_inside_normal_page >> kRegionSizeLog2);
}
uint32_t Page::GetRegionMaskForAddress(Address addr) {
return 1 << GetRegionNumberForAddress(addr);
}
void Page::MarkRegionDirty(Address address) {
SetRegionMarks(GetRegionMarks() | GetRegionMaskForAddress(address));
}
bool Page::IsRegionDirty(Address address) {
return GetRegionMarks() & GetRegionMaskForAddress(address);
}
void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) {
int rstart = GetRegionNumberForAddress(start);
int rend = GetRegionNumberForAddress(end);
if (reaches_limit) {
end += 1;
}
if ((rend - rstart) == 0) {
return;
}
uint32_t bitmask = 0;
Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
return (Memory::uint32_at(rset_address) & bitmask) != 0;
if ((OffsetFrom(start) & kRegionAlignmentMask) == 0
|| (start == ObjectAreaStart())) {
// First region is fully covered
bitmask = 1 << rstart;
}
while (++rstart < rend) {
bitmask |= 1 << rstart;
}
if (bitmask) {
SetRegionMarks(GetRegionMarks() & ~bitmask);
}
}
void Page::FlipMeaningOfInvalidatedWatermarkFlag() {
watermark_invalidated_mark_ ^= WATERMARK_INVALIDATED;
}
bool Page::IsWatermarkValid() {
return (flags_ & WATERMARK_INVALIDATED) != watermark_invalidated_mark_;
}
void Page::InvalidateWatermark(bool value) {
if (value) {
flags_ = (flags_ & ~WATERMARK_INVALIDATED) | watermark_invalidated_mark_;
} else {
flags_ = (flags_ & ~WATERMARK_INVALIDATED) |
(watermark_invalidated_mark_ ^ WATERMARK_INVALIDATED);
}
ASSERT(IsWatermarkValid() == !value);
}
bool Page::GetPageFlag(PageFlag flag) {
return (flags & flag) != 0;
return (flags_ & flag) != 0;
}
void Page::SetPageFlag(PageFlag flag, bool value) {
if (value) {
flags |= flag;
flags_ |= flag;
} else {
flags &= ~flag;
flags_ &= ~flag;
}
}
void Page::ClearPageFlags() {
flags_ = 0;
}
bool Page::WasInUseBeforeMC() {
return GetPageFlag(WAS_IN_USE_BEFORE_MC);
}
@ -343,14 +417,6 @@ HeapObject* LargeObjectChunk::GetObject() {
// -----------------------------------------------------------------------------
// LargeObjectSpace
int LargeObjectSpace::ExtraRSetBytesFor(int object_size) {
int extra_rset_bits =
RoundUp((object_size - Page::kObjectAreaSize) / kPointerSize,
kBitsPerInt);
return extra_rset_bits / kBitsPerByte;
}
Object* NewSpace::AllocateRawInternal(int size_in_bytes,
AllocationInfo* alloc_info) {
Address new_top = alloc_info->top + size_in_bytes;

View File

@ -41,6 +41,7 @@ namespace internal {
&& (info).top <= (space).high() \
&& (info).limit == (space).high())
intptr_t Page::watermark_invalidated_mark_ = Page::WATERMARK_INVALIDATED;
// ----------------------------------------------------------------------------
// HeapObjectIterator
@ -138,13 +139,6 @@ PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
}
// -----------------------------------------------------------------------------
// Page
#ifdef DEBUG
Page::RSetState Page::rset_state_ = Page::IN_USE;
#endif
// -----------------------------------------------------------------------------
// CodeRange
@ -524,7 +518,10 @@ Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
for (int i = 0; i < pages_in_chunk; i++) {
Page* p = Page::FromAddress(page_addr);
p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
p->InvalidateWatermark(true);
p->SetIsLargeObjectPage(false);
p->SetAllocationWatermark(p->ObjectAreaStart());
p->SetCachedAllocationWatermark(p->ObjectAreaStart());
page_addr += Page::kPageSize;
}
@ -681,6 +678,7 @@ Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
page_addr += Page::kPageSize;
p->InvalidateWatermark(true);
if (p->WasInUseBeforeMC()) {
*last_page_in_use = p;
}
@ -744,10 +742,10 @@ bool PagedSpace::Setup(Address start, size_t size) {
accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
ASSERT(Capacity() <= max_capacity_);
// Sequentially initialize remembered sets in the newly allocated
// Sequentially clear region marks in the newly allocated
// pages and cache the current last page in the space.
for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
p->ClearRSet();
p->SetRegionMarks(Page::kAllRegionsCleanMarks);
last_page_ = p;
}
@ -794,10 +792,10 @@ void PagedSpace::Unprotect() {
#endif
void PagedSpace::ClearRSet() {
void PagedSpace::MarkAllPagesClean() {
PageIterator it(this, PageIterator::ALL_PAGES);
while (it.has_next()) {
it.next()->ClearRSet();
it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
}
}
@ -900,7 +898,8 @@ HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
// of forwarding addresses is as an offset in terms of live bytes, so we
// need quick access to the allocation top of each page to decode
// forwarding addresses.
current_page->mc_relocation_top = mc_forwarding_info_.top;
current_page->SetAllocationWatermark(mc_forwarding_info_.top);
current_page->next_page()->InvalidateWatermark(true);
SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
}
@ -928,10 +927,10 @@ bool PagedSpace::Expand(Page* last_page) {
MemoryAllocator::SetNextPage(last_page, p);
// Sequentially clear remembered set of new pages and and cache the
// Sequentially clear region marks of new pages and and cache the
// new last page in the space.
while (p->is_valid()) {
p->ClearRSet();
p->SetRegionMarks(Page::kAllRegionsCleanMarks);
last_page_ = p;
p = p->next_page();
}
@ -1030,16 +1029,11 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
if (above_allocation_top) {
// We don't care what's above the allocation top.
} else {
// Unless this is the last page in the space containing allocated
// objects, the allocation top should be at a constant offset from the
// object area end.
Address top = current_page->AllocationTop();
if (current_page == top_page) {
ASSERT(top == allocation_info_.top);
// The next page will be above the allocation top.
above_allocation_top = true;
} else {
ASSERT(top == PageAllocationLimit(current_page));
}
// It should be packed with objects from the bottom to the top.
@ -1060,8 +1054,8 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
object->Verify();
// All the interior pointers should be contained in the heap and
// have their remembered set bits set if required as determined
// by the visitor.
// have page regions covering intergenerational references should be
// marked dirty.
int size = object->Size();
object->IterateBody(map->instance_type(), size, visitor);
@ -1120,7 +1114,7 @@ bool NewSpace::Setup(Address start, int size) {
start_ = start;
address_mask_ = ~(size - 1);
object_mask_ = address_mask_ | kHeapObjectTag;
object_mask_ = address_mask_ | kHeapObjectTagMask;
object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
allocation_info_.top = to_space_.low();
@ -1324,7 +1318,7 @@ bool SemiSpace::Setup(Address start,
start_ = start;
address_mask_ = ~(maximum_capacity - 1);
object_mask_ = address_mask_ | kHeapObjectTag;
object_mask_ = address_mask_ | kHeapObjectTagMask;
object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
age_mark_ = start_;
@ -1634,7 +1628,7 @@ void FreeListNode::set_size(int size_in_bytes) {
// If the block is too small (eg, one or two words), to hold both a size
// field and a next pointer, we give it a filler map that gives it the
// correct size.
if (size_in_bytes > ByteArray::kAlignedSize) {
if (size_in_bytes > ByteArray::kHeaderSize) {
set_map(Heap::raw_unchecked_byte_array_map());
// Can't use ByteArray::cast because it fails during deserialization.
ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
@ -1907,15 +1901,14 @@ void OldSpace::MCCommitRelocationInfo() {
Page* p = it.next();
// Space below the relocation pointer is allocated.
computed_size +=
static_cast<int>(p->mc_relocation_top - p->ObjectAreaStart());
static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart());
if (it.has_next()) {
// Free the space at the top of the page. We cannot use
// p->mc_relocation_top after the call to Free (because Free will clear
// remembered set bits).
// Free the space at the top of the page.
int extra_size =
static_cast<int>(p->ObjectAreaEnd() - p->mc_relocation_top);
static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark());
if (extra_size > 0) {
int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size);
int wasted_bytes = free_list_.Free(p->AllocationWatermark(),
extra_size);
// The bytes we have just "freed" to add to the free list were
// already accounted as available.
accounting_stats_.WasteBytes(wasted_bytes);
@ -1963,7 +1956,10 @@ void PagedSpace::FreePages(Page* prev, Page* last) {
// Clean them up.
do {
first->ClearRSet();
first->InvalidateWatermark(true);
first->SetAllocationWatermark(first->ObjectAreaStart());
first->SetCachedAllocationWatermark(first->ObjectAreaStart());
first->SetRegionMarks(Page::kAllRegionsCleanMarks);
first = first->next_page();
} while (first != NULL);
@ -2003,6 +1999,7 @@ void PagedSpace::PrepareForMarkCompact(bool will_compact) {
// Current allocation top points to a page which is now in the middle
// of page list. We should move allocation top forward to the new last
// used page so various object iterators will continue to work properly.
last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
last_in_use->AllocationTop());
@ -2035,6 +2032,7 @@ void PagedSpace::PrepareForMarkCompact(bool will_compact) {
int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
p->ObjectAreaStart());
p->SetAllocationWatermark(p->ObjectAreaStart());
Heap::CreateFillerObjectAt(p->ObjectAreaStart(), size_in_bytes);
}
}
@ -2066,6 +2064,7 @@ bool PagedSpace::ReserveSpace(int bytes) {
if (!reserved_page->is_valid()) return false;
}
ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
SetAllocationInfo(&allocation_info_,
TopPageOf(allocation_info_)->next_page());
return true;
@ -2100,7 +2099,15 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
accounting_stats_.WasteBytes(wasted_bytes);
if (!result->IsFailure()) {
accounting_stats_.AllocateBytes(size_in_bytes);
return HeapObject::cast(result);
HeapObject* obj = HeapObject::cast(result);
Page* p = Page::FromAddress(obj->address());
if (obj->address() >= p->AllocationWatermark()) {
p->SetAllocationWatermark(obj->address() + size_in_bytes);
}
return obj;
}
}
@ -2123,6 +2130,7 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
current_page->SetAllocationWatermark(allocation_info_.top);
int free_size =
static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
if (free_size > 0) {
@ -2133,6 +2141,7 @@ void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
current_page->SetAllocationWatermark(allocation_info_.top);
int free_size =
static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
// In the fixed space free list all the free list items have the right size.
@ -2152,6 +2161,7 @@ void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
int size_in_bytes) {
ASSERT(current_page->next_page()->is_valid());
current_page->next_page()->InvalidateWatermark(true);
PutRestOfCurrentPageOnFreeList(current_page);
SetAllocationInfo(&allocation_info_, current_page->next_page());
return AllocateLinearly(&allocation_info_, size_in_bytes);
@ -2296,160 +2306,12 @@ void OldSpace::ReportStatistics() {
PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
Capacity(), Waste(), Available(), pct);
// Report remembered set statistics.
int rset_marked_pointers = 0;
int rset_marked_arrays = 0;
int rset_marked_array_elements = 0;
int cross_gen_pointers = 0;
int cross_gen_array_elements = 0;
PageIterator page_it(this, PageIterator::PAGES_IN_USE);
while (page_it.has_next()) {
Page* p = page_it.next();
for (Address rset_addr = p->RSetStart();
rset_addr < p->RSetEnd();
rset_addr += kIntSize) {
int rset = Memory::int_at(rset_addr);
if (rset != 0) {
// Bits were set
int intoff =
static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
int bitoff = 0;
for (; bitoff < kBitsPerInt; ++bitoff) {
if ((rset & (1 << bitoff)) != 0) {
int bitpos = intoff*kBitsPerByte + bitoff;
Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
Object** obj = reinterpret_cast<Object**>(slot);
if (*obj == Heap::raw_unchecked_fixed_array_map()) {
rset_marked_arrays++;
FixedArray* fa = FixedArray::cast(HeapObject::FromAddress(slot));
rset_marked_array_elements += fa->length();
// Manually inline FixedArray::IterateBody
Address elm_start = slot + FixedArray::kHeaderSize;
Address elm_stop = elm_start + fa->length() * kPointerSize;
for (Address elm_addr = elm_start;
elm_addr < elm_stop; elm_addr += kPointerSize) {
// Filter non-heap-object pointers
Object** elm_p = reinterpret_cast<Object**>(elm_addr);
if (Heap::InNewSpace(*elm_p))
cross_gen_array_elements++;
}
} else {
rset_marked_pointers++;
if (Heap::InNewSpace(*obj))
cross_gen_pointers++;
}
}
}
}
}
}
pct = rset_marked_pointers == 0 ?
0 : cross_gen_pointers * 100 / rset_marked_pointers;
PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
rset_marked_pointers, cross_gen_pointers, pct);
PrintF(" rset_marked arrays %d, ", rset_marked_arrays);
PrintF(" elements %d, ", rset_marked_array_elements);
pct = rset_marked_array_elements == 0 ? 0
: cross_gen_array_elements * 100 / rset_marked_array_elements;
PrintF(" pointers to new space %d (%%%d)\n", cross_gen_array_elements, pct);
PrintF(" total rset-marked bits %d\n",
(rset_marked_pointers + rset_marked_arrays));
pct = (rset_marked_pointers + rset_marked_array_elements) == 0 ? 0
: (cross_gen_pointers + cross_gen_array_elements) * 100 /
(rset_marked_pointers + rset_marked_array_elements);
PrintF(" total rset pointers %d, true cross generation ones %d (%%%d)\n",
(rset_marked_pointers + rset_marked_array_elements),
(cross_gen_pointers + cross_gen_array_elements),
pct);
ClearHistograms();
HeapObjectIterator obj_it(this);
for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
CollectHistogramInfo(obj);
ReportHistogram(true);
}
// Dump the range of remembered set words between [start, end) corresponding
// to the pointers starting at object_p. The allocation_top is an object
// pointer which should not be read past. This is important for large object
// pages, where some bits in the remembered set range do not correspond to
// allocated addresses.
static void PrintRSetRange(Address start, Address end, Object** object_p,
Address allocation_top) {
Address rset_address = start;
// If the range starts on on odd numbered word (eg, for large object extra
// remembered set ranges), print some spaces.
if ((reinterpret_cast<uintptr_t>(start) / kIntSize) % 2 == 1) {
PrintF(" ");
}
// Loop over all the words in the range.
while (rset_address < end) {
uint32_t rset_word = Memory::uint32_at(rset_address);
int bit_position = 0;
// Loop over all the bits in the word.
while (bit_position < kBitsPerInt) {
if (object_p == reinterpret_cast<Object**>(allocation_top)) {
// Print a bar at the allocation pointer.
PrintF("|");
} else if (object_p > reinterpret_cast<Object**>(allocation_top)) {
// Do not dereference object_p past the allocation pointer.
PrintF("#");
} else if ((rset_word & (1 << bit_position)) == 0) {
// Print a dot for zero bits.
PrintF(".");
} else if (Heap::InNewSpace(*object_p)) {
// Print an X for one bits for pointers to new space.
PrintF("X");
} else {
// Print a circle for one bits for pointers to old space.
PrintF("o");
}
// Print a space after every 8th bit except the last.
if (bit_position % 8 == 7 && bit_position != (kBitsPerInt - 1)) {
PrintF(" ");
}
// Advance to next bit.
bit_position++;
object_p++;
}
// Print a newline after every odd numbered word, otherwise a space.
if ((reinterpret_cast<uintptr_t>(rset_address) / kIntSize) % 2 == 1) {
PrintF("\n");
} else {
PrintF(" ");
}
// Advance to next remembered set word.
rset_address += kIntSize;
}
}
void PagedSpace::DoPrintRSet(const char* space_name) {
PageIterator it(this, PageIterator::PAGES_IN_USE);
while (it.has_next()) {
Page* p = it.next();
PrintF("%s page 0x%x:\n", space_name, p);
PrintRSetRange(p->RSetStart(), p->RSetEnd(),
reinterpret_cast<Object**>(p->ObjectAreaStart()),
p->AllocationTop());
PrintF("\n");
}
}
void OldSpace::PrintRSet() { DoPrintRSet("old"); }
#endif
// -----------------------------------------------------------------------------
@ -2499,6 +2361,7 @@ void FixedSpace::MCCommitRelocationInfo() {
if (it.has_next()) {
accounting_stats_.WasteBytes(
static_cast<int>(page->ObjectAreaEnd() - page_top));
page->SetAllocationWatermark(page_top);
}
}
@ -2528,7 +2391,14 @@ HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
Object* result = free_list_.Allocate();
if (!result->IsFailure()) {
accounting_stats_.AllocateBytes(size_in_bytes);
return HeapObject::cast(result);
HeapObject* obj = HeapObject::cast(result);
Page* p = Page::FromAddress(obj->address());
if (obj->address() >= p->AllocationWatermark()) {
p->SetAllocationWatermark(obj->address() + size_in_bytes);
}
return obj;
}
}
@ -2558,6 +2428,8 @@ HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
ASSERT(current_page->next_page()->is_valid());
ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
current_page->next_page()->InvalidateWatermark(true);
current_page->SetAllocationWatermark(allocation_info_.top);
accounting_stats_.WasteBytes(page_extra_);
SetAllocationInfo(&allocation_info_, current_page->next_page());
return AllocateLinearly(&allocation_info_, size_in_bytes);
@ -2570,51 +2442,12 @@ void FixedSpace::ReportStatistics() {
PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
Capacity(), Waste(), Available(), pct);
// Report remembered set statistics.
int rset_marked_pointers = 0;
int cross_gen_pointers = 0;
PageIterator page_it(this, PageIterator::PAGES_IN_USE);
while (page_it.has_next()) {
Page* p = page_it.next();
for (Address rset_addr = p->RSetStart();
rset_addr < p->RSetEnd();
rset_addr += kIntSize) {
int rset = Memory::int_at(rset_addr);
if (rset != 0) {
// Bits were set
int intoff =
static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
int bitoff = 0;
for (; bitoff < kBitsPerInt; ++bitoff) {
if ((rset & (1 << bitoff)) != 0) {
int bitpos = intoff*kBitsPerByte + bitoff;
Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
Object** obj = reinterpret_cast<Object**>(slot);
rset_marked_pointers++;
if (Heap::InNewSpace(*obj))
cross_gen_pointers++;
}
}
}
}
}
pct = rset_marked_pointers == 0 ?
0 : cross_gen_pointers * 100 / rset_marked_pointers;
PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
rset_marked_pointers, cross_gen_pointers, pct);
ClearHistograms();
HeapObjectIterator obj_it(this);
for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
CollectHistogramInfo(obj);
ReportHistogram(false);
}
void FixedSpace::PrintRSet() { DoPrintRSet(name_); }
#endif
@ -2793,8 +2626,7 @@ Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
chunk->set_size(chunk_size);
first_chunk_ = chunk;
// Set the object address and size in the page header and clear its
// remembered set.
// Initialize page header.
Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
Address object_address = page->ObjectAreaStart();
// Clear the low order bit of the second word in the page to flag it as a
@ -2802,13 +2634,7 @@ Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
// low order bit should already be clear.
ASSERT((chunk_size & 0x1) == 0);
page->SetIsLargeObjectPage(true);
page->ClearRSet();
int extra_bytes = requested_size - object_size;
if (extra_bytes > 0) {
// The extra memory for the remembered set should be cleared.
memset(object_address + object_size, 0, extra_bytes);
}
page->SetRegionMarks(Page::kAllRegionsCleanMarks);
return HeapObject::FromAddress(object_address);
}
@ -2823,8 +2649,7 @@ Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
ASSERT(0 < size_in_bytes);
int extra_rset_bytes = ExtraRSetBytesFor(size_in_bytes);
return AllocateRawInternal(size_in_bytes + extra_rset_bytes,
return AllocateRawInternal(size_in_bytes,
size_in_bytes,
NOT_EXECUTABLE);
}
@ -2851,59 +2676,61 @@ Object* LargeObjectSpace::FindObject(Address a) {
return Failure::Exception();
}
void LargeObjectSpace::ClearRSet() {
ASSERT(Page::is_rset_in_use());
LargeObjectIterator it(this);
for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
// We only have code, sequential strings, or fixed arrays in large
// object space, and only fixed arrays need remembered set support.
if (object->IsFixedArray()) {
// Clear the normal remembered set region of the page;
Page* page = Page::FromAddress(object->address());
page->ClearRSet();
// Clear the extra remembered set.
int size = object->Size();
int extra_rset_bytes = ExtraRSetBytesFor(size);
memset(object->address() + size, 0, extra_rset_bytes);
}
}
}
void LargeObjectSpace::IterateRSet(ObjectSlotCallback copy_object_func) {
ASSERT(Page::is_rset_in_use());
static void* lo_rset_histogram = StatsTable::CreateHistogram(
"V8.RSetLO",
0,
// Keeping this histogram's buckets the same as the paged space histogram.
Page::kObjectAreaSize / kPointerSize,
30);
void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
LargeObjectIterator it(this);
for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
// We only have code, sequential strings, or fixed arrays in large
// object space, and only fixed arrays can possibly contain pointers to
// the young generation.
if (object->IsFixedArray()) {
// Iterate the normal page remembered set range.
Page* page = Page::FromAddress(object->address());
Address object_end = object->address() + object->Size();
int count = Heap::IterateRSetRange(page->ObjectAreaStart(),
Min(page->ObjectAreaEnd(), object_end),
page->RSetStart(),
copy_object_func);
uint32_t marks = page->GetRegionMarks();
uint32_t newmarks = Page::kAllRegionsCleanMarks;
// Iterate the extra array elements.
if (object_end > page->ObjectAreaEnd()) {
count += Heap::IterateRSetRange(page->ObjectAreaEnd(), object_end,
object_end, copy_object_func);
}
if (lo_rset_histogram != NULL) {
StatsTable::AddHistogramSample(lo_rset_histogram, count);
if (marks != Page::kAllRegionsCleanMarks) {
// For a large page a single dirty mark corresponds to several
// regions (modulo 32). So we treat a large page as a sequence of
// normal pages of size Page::kPageSize having same dirty marks
// and subsequently iterate dirty regions on each of these pages.
Address start = object->address();
Address end = page->ObjectAreaEnd();
Address object_end = start + object->Size();
// Iterate regions of the first normal page covering object.
uint32_t first_region_number = page->GetRegionNumberForAddress(start);
newmarks |=
Heap::IterateDirtyRegions(marks >> first_region_number,
start,
end,
&Heap::IteratePointersInDirtyRegion,
copy_object) << first_region_number;
start = end;
end = start + Page::kPageSize;
while (end <= object_end) {
// Iterate next 32 regions.
newmarks |=
Heap::IterateDirtyRegions(marks,
start,
end,
&Heap::IteratePointersInDirtyRegion,
copy_object);
start = end;
end = start + Page::kPageSize;
}
if (start != object_end) {
// Iterate the last piece of an object which is less than
// Page::kPageSize.
newmarks |=
Heap::IterateDirtyRegions(marks,
start,
object_end,
&Heap::IteratePointersInDirtyRegion,
copy_object);
}
page->SetRegionMarks(newmarks);
}
}
}
@ -2995,7 +2822,7 @@ void LargeObjectSpace::Verify() {
} else if (object->IsFixedArray()) {
// We loop over fixed arrays ourselves, rather then using the visitor,
// because the visitor doesn't support the start/offset iteration
// needed for IsRSetSet.
// needed for IsRegionDirty.
FixedArray* array = FixedArray::cast(object);
for (int j = 0; j < array->length(); j++) {
Object* element = array->get(j);
@ -3004,8 +2831,11 @@ void LargeObjectSpace::Verify() {
ASSERT(Heap::Contains(element_object));
ASSERT(element_object->map()->IsMap());
if (Heap::InNewSpace(element_object)) {
ASSERT(Page::IsRSetSet(object->address(),
FixedArray::kHeaderSize + j * kPointerSize));
Address array_addr = object->address();
Address element_addr = array_addr + FixedArray::kHeaderSize +
j * kPointerSize;
ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
}
}
}
@ -3046,33 +2876,6 @@ void LargeObjectSpace::CollectCodeStatistics() {
}
}
}
void LargeObjectSpace::PrintRSet() {
LargeObjectIterator it(this);
for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
if (object->IsFixedArray()) {
Page* page = Page::FromAddress(object->address());
Address allocation_top = object->address() + object->Size();
PrintF("large page 0x%x:\n", page);
PrintRSetRange(page->RSetStart(), page->RSetEnd(),
reinterpret_cast<Object**>(object->address()),
allocation_top);
int extra_array_bytes = object->Size() - Page::kObjectAreaSize;
int extra_rset_bits = RoundUp(extra_array_bytes / kPointerSize,
kBitsPerInt);
PrintF("------------------------------------------------------------"
"-----------\n");
PrintRSetRange(allocation_top,
allocation_top + extra_rset_bits / kBitsPerByte,
reinterpret_cast<Object**>(object->address()
+ Page::kObjectAreaSize),
allocation_top);
PrintF("\n");
}
}
}
#endif // DEBUG
} } // namespace v8::internal

View File

@ -45,23 +45,46 @@ namespace internal {
// The old generation is collected by a mark-sweep-compact collector.
//
// The semispaces of the young generation are contiguous. The old and map
// spaces consists of a list of pages. A page has a page header, a remembered
// set area, and an object area. A page size is deliberately chosen as 8K
// bytes. The first word of a page is an opaque page header that has the
// spaces consists of a list of pages. A page has a page header and an object
// area. A page size is deliberately chosen as 8K bytes.
// The first word of a page is an opaque page header that has the
// address of the next page and its ownership information. The second word may
// have the allocation top address of this page. The next 248 bytes are
// remembered sets. Heap objects are aligned to the pointer size (4 bytes). A
// remembered set bit corresponds to a pointer in the object area.
// have the allocation top address of this page. Heap objects are aligned to the
// pointer size.
//
// There is a separate large object space for objects larger than
// Page::kMaxHeapObjectSize, so that they do not have to move during
// collection. The large object space is paged and uses the same remembered
// set implementation. Pages in large object space may be larger than 8K.
// collection. The large object space is paged. Pages in large object space
// may be larger than 8K.
//
// A card marking write barrier is used to keep track of intergenerational
// references. Old space pages are divided into regions of Page::kRegionSize
// size. Each region has a corresponding dirty bit in the page header which is
// set if the region might contain pointers to new space. For details about
// dirty bits encoding see comments in the Page::GetRegionNumberForAddress()
// method body.
//
// During scavenges and mark-sweep collections we iterate intergenerational
// pointers without decoding heap object maps so if the page belongs to old
// pointer space or large object space it is essential to guarantee that
// the page does not contain any garbage pointers to new space: every pointer
// aligned word which satisfies the Heap::InNewSpace() predicate must be a
// pointer to a live heap object in new space. Thus objects in old pointer
// and large object spaces should have a special layout (e.g. no bare integer
// fields). This requirement does not apply to map space which is iterated in
// a special fashion. However we still require pointer fields of dead maps to
// be cleaned.
//
// To enable lazy cleaning of old space pages we use a notion of allocation
// watermark. Every pointer under watermark is considered to be well formed.
// Page allocation watermark is not necessarily equal to page allocation top but
// all alive objects on page should reside under allocation watermark.
// During scavenge allocation watermark might be bumped and invalid pointers
// might appear below it. To avoid following them we store a valid watermark
// into special field in the page header and set a page WATERMARK_INVALIDATED
// flag. For details see comments in the Page::SetAllocationWatermark() method
// body.
//
// NOTE: The mark-compact collector rebuilds the remembered set after a
// collection. It reuses first a few words of the remembered set for
// bookkeeping relocation information.
// Some assertion macros used in the debugging mode.
@ -91,25 +114,13 @@ class AllocationInfo;
// -----------------------------------------------------------------------------
// A page normally has 8K bytes. Large object pages may be larger. A page
// address is always aligned to the 8K page size. A page is divided into
// three areas: the first two words are used for bookkeeping, the next 248
// bytes are used as remembered set, and the rest of the page is the object
// area.
// address is always aligned to the 8K page size.
//
// Pointers are aligned to the pointer size (4), only 1 bit is needed
// for a pointer in the remembered set. Given an address, its remembered set
// bit position (offset from the start of the page) is calculated by dividing
// its page offset by 32. Therefore, the object area in a page starts at the
// 256th byte (8K/32). Bytes 0 to 255 do not need the remembered set, so that
// the first two words (64 bits) in a page can be used for other purposes.
//
// On the 64-bit platform, we add an offset to the start of the remembered set,
// and pointers are aligned to 8-byte pointer size. This means that we need
// only 128 bytes for the RSet, and only get two bytes free in the RSet's RSet.
// For this reason we add an offset to get room for the Page data at the start.
// Each page starts with a header of Page::kPageHeaderSize size which contains
// bookkeeping data.
//
// The mark-compact collector transforms a map pointer into a page index and a
// page offset. The excact encoding is described in the comments for
// page offset. The exact encoding is described in the comments for
// class MapWord in objects.h.
//
// The only way to get a page pointer is by calling factory methods:
@ -150,18 +161,25 @@ class Page {
// Return the end of allocation in this page. Undefined for unused pages.
inline Address AllocationTop();
// Return the allocation watermark for the page.
// For old space pages it is guaranteed that the area under the watermark
// does not contain any garbage pointers to new space.
inline Address AllocationWatermark();
// Return the allocation watermark offset from the beginning of the page.
inline uint32_t AllocationWatermarkOffset();
inline void SetAllocationWatermark(Address allocation_watermark);
inline void SetCachedAllocationWatermark(Address allocation_watermark);
inline Address CachedAllocationWatermark();
// Returns the start address of the object area in this page.
Address ObjectAreaStart() { return address() + kObjectStartOffset; }
// Returns the end address (exclusive) of the object area in this page.
Address ObjectAreaEnd() { return address() + Page::kPageSize; }
// Returns the start address of the remembered set area.
Address RSetStart() { return address() + kRSetStartOffset; }
// Returns the end address of the remembered set area (exclusive).
Address RSetEnd() { return address() + kRSetEndOffset; }
// Checks whether an address is page aligned.
static bool IsAlignedToPageSize(Address a) {
return 0 == (OffsetFrom(a) & kPageAlignmentMask);
@ -193,33 +211,23 @@ class Page {
}
// ---------------------------------------------------------------------
// Remembered set support
// Card marking support
// Clears remembered set in this page.
inline void ClearRSet();
static const uint32_t kAllRegionsCleanMarks = 0x0;
static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF;
// Return the address of the remembered set word corresponding to an
// object address/offset pair, and the bit encoded as a single-bit
// mask in the output parameter 'bitmask'.
INLINE(static Address ComputeRSetBitPosition(Address address, int offset,
uint32_t* bitmask));
inline uint32_t GetRegionMarks();
inline void SetRegionMarks(uint32_t dirty);
// Sets the corresponding remembered set bit for a given address.
INLINE(static void SetRSet(Address address, int offset));
inline uint32_t GetRegionMaskForAddress(Address addr);
inline int GetRegionNumberForAddress(Address addr);
// Clears the corresponding remembered set bit for a given address.
static inline void UnsetRSet(Address address, int offset);
inline void MarkRegionDirty(Address addr);
inline bool IsRegionDirty(Address addr);
// Checks whether the remembered set bit for a given address is set.
static inline bool IsRSetSet(Address address, int offset);
#ifdef DEBUG
// Use a state to mark whether remembered set space can be used for other
// purposes.
enum RSetState { IN_USE, NOT_IN_USE };
static bool is_rset_in_use() { return rset_state_ == IN_USE; }
static void set_rset_state(RSetState state) { rset_state_ = state; }
#endif
inline void ClearRegionMarks(Address start,
Address end,
bool reaches_limit);
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
@ -227,25 +235,11 @@ class Page {
// Page size mask.
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
// The offset of the remembered set in a page, in addition to the empty bytes
// formed as the remembered bits of the remembered set itself.
#ifdef V8_TARGET_ARCH_X64
static const int kRSetOffset = 4 * kPointerSize; // Room for four pointers.
#else
static const int kRSetOffset = 0;
#endif
// The end offset of the remembered set in a page
// (heaps are aligned to pointer size).
static const int kRSetEndOffset = kRSetOffset + kPageSize / kBitsPerPointer;
static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
kIntSize + kPointerSize;
// The start offset of the object area in a page.
// This needs to be at least (bits per uint32_t) * kBitsPerPointer,
// to align start of rset to a uint32_t address.
static const int kObjectStartOffset = 256;
// The start offset of the used part of the remembered set in a page.
static const int kRSetStartOffset = kRSetOffset +
kObjectStartOffset / kBitsPerPointer;
static const int kObjectStartOffset = MAP_POINTER_ALIGN(kPageHeaderSize);
// Object area size in bytes.
static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
@ -253,13 +247,63 @@ class Page {
// Maximum object size that fits in a page.
static const int kMaxHeapObjectSize = kObjectAreaSize;
static const int kDirtyFlagOffset = 2 * kPointerSize;
static const int kRegionSizeLog2 = 8;
static const int kRegionSize = 1 << kRegionSizeLog2;
static const intptr_t kRegionAlignmentMask = (kRegionSize - 1);
STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
enum PageFlag {
IS_NORMAL_PAGE = 1 << 0,
WAS_IN_USE_BEFORE_MC = 1 << 1
WAS_IN_USE_BEFORE_MC = 1 << 1,
// Page allocation watermark was bumped by preallocation during scavenge.
// Correct watermark can be retrieved by CachedAllocationWatermark() method
WATERMARK_INVALIDATED = 1 << 2
};
// To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
// scavenge we just invalidate the watermark on each old space page after
// processing it. And then we flip the meaning of the WATERMARK_INVALIDATED
// flag at the beginning of the next scavenge and each page becomes marked as
// having a valid watermark.
//
// The following invariant must hold for pages in old pointer and map spaces:
// If page is in use then page is marked as having invalid watermark at
// the beginning and at the end of any GC.
//
// This invariant guarantees that after flipping flag meaning at the
// beginning of scavenge all pages in use will be marked as having valid
// watermark.
static inline void FlipMeaningOfInvalidatedWatermarkFlag();
// Returns true if the page allocation watermark was not altered during
// scavenge.
inline bool IsWatermarkValid();
inline void InvalidateWatermark(bool value);
inline bool GetPageFlag(PageFlag flag);
inline void SetPageFlag(PageFlag flag, bool value);
inline void ClearPageFlags();
static const int kAllocationWatermarkOffsetShift = 3;
static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1;
static const uint32_t kAllocationWatermarkOffsetMask =
((1 << kAllocationWatermarkOffsetBits) - 1) <<
kAllocationWatermarkOffsetShift;
static const uint32_t kFlagsMask =
((1 << kAllocationWatermarkOffsetShift) - 1);
STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
kAllocationWatermarkOffsetBits);
// This field contains the meaning of the WATERMARK_INVALIDATED flag.
// Instead of clearing this flag from all pages we just flip
// its meaning at the beginning of a scavenge.
static intptr_t watermark_invalidated_mark_;
//---------------------------------------------------------------------------
// Page header description.
@ -279,26 +323,24 @@ class Page {
// second word *may* (if the page start and large object chunk start are
// the same) contain the large object chunk size. In either case, the
// low-order bit for large object pages will be cleared.
// For normal pages this word is used to store various page flags.
int flags;
// For normal pages this word is used to store page flags and
// offset of allocation top.
intptr_t flags_;
// The following fields may overlap with remembered set, they can only
// be used in the mark-compact collector when remembered set is not
// used.
// This field contains dirty marks for regions covering the page. Only dirty
// regions might contain intergenerational references.
// Only 32 dirty marks are supported so for large object pages several regions
// might be mapped to a single dirty mark.
uint32_t dirty_regions_;
// The index of the page in its owner space.
int mc_page_index;
// The allocation pointer after relocating objects to this page.
Address mc_relocation_top;
// The forwarding address of the first live object in this page.
// During mark-compact collections this field contains the forwarding address
// of the first live object in this page.
// During scavenge collection this field is used to store allocation watermark
// if it is altered during scavenge.
Address mc_first_forwarded;
#ifdef DEBUG
private:
static RSetState rset_state_; // state of the remembered set
#endif
};
@ -921,8 +963,7 @@ class PagedSpace : public Space {
// Checks whether page is currently in use by this space.
bool IsUsed(Page* page);
// Clears remembered sets of pages in this space.
void ClearRSet();
void MarkAllPagesClean();
// Prepares for a mark-compact GC.
virtual void PrepareForMarkCompact(bool will_compact);
@ -936,6 +977,11 @@ class PagedSpace : public Space {
// The limit of allocation for a page in this space.
virtual Address PageAllocationLimit(Page* page) = 0;
void FlushTopPageWatermark() {
AllocationTopPage()->SetCachedAllocationWatermark(top());
AllocationTopPage()->InvalidateWatermark(true);
}
// Current capacity without growing (Size() + Available() + Waste()).
int Capacity() { return accounting_stats_.Capacity(); }
@ -990,7 +1036,8 @@ class PagedSpace : public Space {
// Writes relocation info to the top page.
void MCWriteRelocationInfoToPage() {
TopPageOf(mc_forwarding_info_)->mc_relocation_top = mc_forwarding_info_.top;
TopPageOf(mc_forwarding_info_)->
SetAllocationWatermark(mc_forwarding_info_.top);
}
// Computes the offset of a given address in this space to the beginning
@ -1108,8 +1155,6 @@ class PagedSpace : public Space {
#ifdef DEBUG
// Returns the number of total pages in this space.
int CountTotalPages();
void DoPrintRSet(const char* space_name);
#endif
private:
@ -1762,8 +1807,6 @@ class OldSpace : public PagedSpace {
#ifdef DEBUG
// Reports statistics for the space
void ReportStatistics();
// Dump the remembered sets in the space to stdout.
void PrintRSet();
#endif
protected:
@ -1828,9 +1871,6 @@ class FixedSpace : public PagedSpace {
#ifdef DEBUG
// Reports statistic info of the space
void ReportStatistics();
// Dump the remembered sets in the space to stdout.
void PrintRSet();
#endif
protected:
@ -1899,11 +1939,11 @@ class MapSpace : public FixedSpace {
PageIterator it(this, PageIterator::ALL_PAGES);
while (pages_left-- > 0) {
ASSERT(it.has_next());
it.next()->ClearRSet();
it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
}
ASSERT(it.has_next());
Page* top_page = it.next();
top_page->ClearRSet();
top_page->SetRegionMarks(Page::kAllRegionsCleanMarks);
ASSERT(top_page->is_valid());
int offset = live_maps % kMapsPerPage * Map::kSize;
@ -1994,9 +2034,8 @@ class LargeObjectChunk {
public:
// Allocates a new LargeObjectChunk that contains a large object page
// (Page::kPageSize aligned) that has at least size_in_bytes (for a large
// object and possibly extra remembered set words) bytes after the object
// area start of that page. The allocated chunk size is set in the output
// parameter chunk_size.
// object) bytes after the object area start of that page.
// The allocated chunk size is set in the output parameter chunk_size.
static LargeObjectChunk* New(int size_in_bytes,
size_t* chunk_size,
Executability executable);
@ -2019,16 +2058,12 @@ class LargeObjectChunk {
// Returns the object in this chunk.
inline HeapObject* GetObject();
// Given a requested size (including any extra remembered set words),
// returns the physical size of a chunk to be allocated.
// Given a requested size returns the physical size of a chunk to be
// allocated.
static int ChunkSizeFor(int size_in_bytes);
// Given a chunk size, returns the object size it can accommodate (not
// including any extra remembered set words). Used by
// LargeObjectSpace::Available. Note that this can overestimate the size
// of object that will fit in a chunk---if the object requires extra
// remembered set words (eg, for large fixed arrays), the actual object
// size for the chunk will be smaller than reported by this function.
// Given a chunk size, returns the object size it can accommodate. Used by
// LargeObjectSpace::Available.
static int ObjectSizeFor(int chunk_size) {
if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
@ -2064,8 +2099,7 @@ class LargeObjectSpace : public Space {
// Allocates a large FixedArray.
Object* AllocateRawFixedArray(int size_in_bytes);
// Available bytes for objects in this space, not including any extra
// remembered set words.
// Available bytes for objects in this space.
int Available() {
return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
}
@ -2083,11 +2117,8 @@ class LargeObjectSpace : public Space {
// space, may be slow.
Object* FindObject(Address a);
// Clears remembered sets.
void ClearRSet();
// Iterates objects whose remembered set bits are set.
void IterateRSet(ObjectSlotCallback func);
// Iterates objects covered by dirty regions.
void IterateDirtyRegions(ObjectSlotCallback func);
// Frees unmarked objects.
void FreeUnmarkedObjects();
@ -2114,8 +2145,6 @@ class LargeObjectSpace : public Space {
virtual void Print();
void ReportStatistics();
void CollectCodeStatistics();
// Dump the remembered sets in the space to stdout.
void PrintRSet();
#endif
// Checks whether an address is in the object area in this space. It
// iterates all objects in the space. May be slow.
@ -2134,10 +2163,6 @@ class LargeObjectSpace : public Space {
int object_size,
Executability executable);
// Returns the number of extra bytes (rounded up to the nearest full word)
// required for extra_object_bytes of extra pointers (in bytes).
static inline int ExtraRSetBytesFor(int extra_object_bytes);
friend class LargeObjectIterator;
public:

View File

@ -308,7 +308,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// (tail-call) to the code in register edx without checking arguments.
__ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movsxlq(rbx,
FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
FieldOperand(rdx,
SharedFunctionInfo::kFormalParameterCountOffset));
__ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
__ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
__ cmpq(rax, rbx);
@ -525,15 +526,15 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
__ lea(scratch1, Operand(result, JSArray::kSize));
__ movq(FieldOperand(result, JSArray::kElementsOffset), scratch1);
// Initialize the FixedArray and fill it with holes. FixedArray length is not
// Initialize the FixedArray and fill it with holes. FixedArray length is
// stored as a smi.
// result: JSObject
// scratch1: elements array
// scratch2: start of next object
__ Move(FieldOperand(scratch1, JSObject::kMapOffset),
__ Move(FieldOperand(scratch1, HeapObject::kMapOffset),
Factory::fixed_array_map());
__ movq(FieldOperand(scratch1, Array::kLengthOffset),
Immediate(initial_capacity));
__ Move(FieldOperand(scratch1, FixedArray::kLengthOffset),
Smi::FromInt(initial_capacity));
// Fill the FixedArray with the hole value. Inline the code if short.
// Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
@ -587,7 +588,6 @@ static void AllocateJSArray(MacroAssembler* masm,
JSFunction::kPrototypeOrInitialMapOffset));
// Check whether an empty sized array is requested.
__ SmiToInteger64(array_size, array_size);
__ testq(array_size, array_size);
__ j(not_zero, &not_empty);
@ -605,10 +605,11 @@ static void AllocateJSArray(MacroAssembler* masm,
// Allocate the JSArray object together with space for a FixedArray with the
// requested elements.
__ bind(&not_empty);
ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
SmiIndex index =
masm->SmiToIndex(kScratchRegister, array_size, kPointerSizeLog2);
__ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
times_pointer_size,
array_size,
index.scale,
index.reg,
result,
elements_array_end,
scratch,
@ -620,43 +621,41 @@ static void AllocateJSArray(MacroAssembler* masm,
// result: JSObject
// elements_array: initial map
// elements_array_end: start of next object
// array_size: size of array
// array_size: size of array (smi)
__ bind(&allocated);
__ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
__ Move(elements_array, Factory::empty_fixed_array());
__ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
// Field JSArray::kElementsOffset is initialized later.
__ Integer32ToSmi(scratch, array_size);
__ movq(FieldOperand(result, JSArray::kLengthOffset), scratch);
__ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
// Calculate the location of the elements array and set elements array member
// of the JSArray.
// result: JSObject
// elements_array_end: start of next object
// array_size: size of array
// array_size: size of array (smi)
__ lea(elements_array, Operand(result, JSArray::kSize));
__ movq(FieldOperand(result, JSArray::kElementsOffset), elements_array);
// Initialize the fixed array. FixedArray length is not stored as a smi.
// Initialize the fixed array. FixedArray length is stored as a smi.
// result: JSObject
// elements_array: elements array
// elements_array_end: start of next object
// array_size: size of array
ASSERT(kSmiTag == 0);
// array_size: size of array (smi)
__ Move(FieldOperand(elements_array, JSObject::kMapOffset),
Factory::fixed_array_map());
Label not_empty_2, fill_array;
__ testq(array_size, array_size);
__ SmiTest(array_size);
__ j(not_zero, &not_empty_2);
// Length of the FixedArray is the number of pre-allocated elements even
// though the actual JSArray has length 0.
__ movq(FieldOperand(elements_array, Array::kLengthOffset),
Immediate(kPreallocatedArrayElements));
__ Move(FieldOperand(elements_array, FixedArray::kLengthOffset),
Smi::FromInt(kPreallocatedArrayElements));
__ jmp(&fill_array);
__ bind(&not_empty_2);
// For non-empty JSArrays the length of the FixedArray and the JSArray is the
// same.
__ movq(FieldOperand(elements_array, Array::kLengthOffset), array_size);
__ movq(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
// Fill the allocated FixedArray with the hole value if requested.
// result: JSObject
@ -1039,8 +1038,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rdx: number of elements
// rax: start of next object
__ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
__ movq(Operand(rdi, JSObject::kMapOffset), rcx); // setup the map
__ movl(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
__ movq(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map
__ Integer32ToSmi(rdx, rdx);
__ movq(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
// Initialize the fields to undefined.
// rbx: JSObject

View File

@ -1895,8 +1895,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
frame_->EmitPush(rax); // <- slot 3
frame_->EmitPush(rdx); // <- slot 2
__ movl(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
__ Integer32ToSmi(rax, rax);
__ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
frame_->EmitPush(rax); // <- slot 1
frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
entry.Jump();
@ -1907,8 +1906,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
frame_->EmitPush(rax); // <- slot 2
// Push the length of the array and the initial index onto the stack.
__ movl(rax, FieldOperand(rax, FixedArray::kLengthOffset));
__ Integer32ToSmi(rax, rax);
__ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
frame_->EmitPush(rax); // <- slot 1
frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
@ -4472,7 +4470,8 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
__ Move(FieldOperand(rcx, HeapObject::kMapOffset),
Factory::fixed_array_map());
// Set length.
__ movl(FieldOperand(rcx, FixedArray::kLengthOffset), rbx);
__ Integer32ToSmi(rdx, rbx);
__ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
// Fill contents of fixed-array with the-hole.
__ Move(rdx, Factory::the_hole_value());
__ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
@ -4591,15 +4590,15 @@ void DeferredSearchCache::Generate() {
// cache miss this optimization would hardly matter much.
// Check if we could add new entry to cache.
__ movl(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ movq(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ movq(r9, FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
__ SmiToInteger32(r9, r9);
__ cmpq(rbx, r9);
__ SmiCompare(rbx, r9);
__ j(greater, &add_new_entry);
// Check if we could evict entry after finger.
__ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
__ SmiToInteger32(rdx, rdx);
__ SmiToInteger32(rbx, rbx);
__ addq(rdx, kEntrySizeImm);
Label forward;
__ cmpq(rbx, rdx);
@ -4611,9 +4610,8 @@ void DeferredSearchCache::Generate() {
__ jmp(&update_cache);
__ bind(&add_new_entry);
// r9 holds cache size as int.
__ movq(rdx, r9);
__ Integer32ToSmi(r9, r9);
// r9 holds cache size as smi.
__ SmiToInteger32(rdx, r9);
__ SmiAddConstant(rbx, r9, Smi::FromInt(JSFunctionResultCache::kEntrySize));
__ movq(FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
@ -7207,13 +7205,8 @@ Result CodeGenerator::EmitKeyedLoad(bool is_global) {
Result elements = allocator()->Allocate();
ASSERT(elements.is_valid());
// Use a fresh temporary for the index and later the loaded
// value.
Result index = allocator()->Allocate();
ASSERT(index.is_valid());
DeferredReferenceGetKeyedValue* deferred =
new DeferredReferenceGetKeyedValue(index.reg(),
new DeferredReferenceGetKeyedValue(elements.reg(),
receiver.reg(),
key.reg(),
is_global);
@ -7249,31 +7242,21 @@ Result CodeGenerator::EmitKeyedLoad(bool is_global) {
Factory::fixed_array_map());
deferred->Branch(not_equal);
// Shift the key to get the actual index value and check that
// it is within bounds.
__ SmiToInteger32(index.reg(), key.reg());
__ cmpl(index.reg(),
FieldOperand(elements.reg(), FixedArray::kLengthOffset));
// Check that key is within bounds.
__ SmiCompare(key.reg(),
FieldOperand(elements.reg(), FixedArray::kLengthOffset));
deferred->Branch(above_equal);
// The index register holds the un-smi-tagged key. It has been
// zero-extended to 64-bits, so it can be used directly as index in the
// operand below.
// Load and check that the result is not the hole. We could
// reuse the index or elements register for the value.
//
// TODO(206): Consider whether it makes sense to try some
// heuristic about which register to reuse. For example, if
// one is rax, the we can reuse that one because the value
// coming from the deferred code will be in rax.
Result value = index;
// The key register holds the smi-tagged key. Load the value and
// check that it is not the hole value.
Result value = elements;
SmiIndex index =
masm_->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
__ movq(value.reg(),
Operand(elements.reg(),
index.reg(),
times_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag));
elements.Unuse();
index.Unuse();
FieldOperand(elements.reg(),
index.reg,
index.scale,
FixedArray::kHeaderSize));
__ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
deferred->Branch(equal);
__ IncrementCounter(&Counters::keyed_load_inline, 1);
@ -7572,7 +7555,7 @@ void Reference::SetValue(InitState init_state) {
// Check whether it is possible to omit the write barrier. If the
// elements array is in new space or the value written is a smi we can
// safely update the elements array without updating the remembered set.
// safely update the elements array without write barrier.
Label in_new_space;
__ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
if (!value_is_constant) {
@ -7597,10 +7580,10 @@ void Reference::SetValue(InitState init_state) {
// Store the value.
SmiIndex index =
masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
__ movq(Operand(tmp.reg(),
index.reg,
index.scale,
FixedArray::kHeaderSize - kHeapObjectTag),
__ movq(FieldOperand(tmp.reg(),
index.reg,
index.scale,
FixedArray::kHeaderSize),
value.reg());
__ IncrementCounter(&Counters::keyed_store_inline, 1);
@ -7680,7 +7663,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Setup the object header.
__ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
__ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
__ movl(FieldOperand(rax, Array::kLengthOffset), Immediate(length));
__ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
// Setup the fixed slots.
__ xor_(rbx, rbx); // Set to NULL.
@ -8355,7 +8338,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the last match info has space for the capture registers and the
// additional information. Ensure no overflow in add.
ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
__ movl(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
__ movq(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
__ SmiToInteger32(rax, rax);
__ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
__ cmpl(rdx, rax);
__ j(greater, &runtime);
@ -8633,9 +8617,10 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
// Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry.
__ movl(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
__ shrl(mask, Immediate(1)); // Divide length by two (length is not a smi).
__ subl(mask, Immediate(1)); // Make mask.
__ movq(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
// Divide smi tagged length by two.
__ PositiveSmiDivPowerOfTwoToInteger32(mask, mask, 1);
__ subq(mask, Immediate(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
// number string cache for smis is just the smi value, and the hash for
@ -9155,7 +9140,6 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
// Get the parameters pointer from the stack and untag the length.
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
__ SmiToInteger32(rcx, rcx);
// Setup the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
@ -9163,7 +9147,8 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
__ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
__ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
__ movl(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
__ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
__ SmiToInteger32(rcx, rcx); // Untag length for the loop below.
// Copy the fixed array slots.
Label loop;
@ -10934,7 +10919,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ bind(&allocated);
// Fill the fields of the cons string.
__ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
__ movl(FieldOperand(rcx, ConsString::kHashFieldOffset),
__ movq(FieldOperand(rcx, ConsString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
__ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
__ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);

View File

@ -1010,7 +1010,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(rax); // Map.
__ push(rdx); // Enumeration cache.
__ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
__ Integer32ToSmi(rax, rax);
__ push(rax); // Enumeration cache length (as smi).
__ Push(Smi::FromInt(0)); // Initial index.
__ jmp(&loop);
@ -1020,7 +1019,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(Smi::FromInt(0)); // Map (0) - force slow check.
__ push(rax);
__ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
__ Integer32ToSmi(rax, rax);
__ push(rax); // Fixed array length (as smi).
__ Push(Smi::FromInt(0)); // Initial index.

View File

@ -165,11 +165,11 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
//
// key - holds the smi key on entry and is unchanged if a branch is
// performed to the miss label.
// Holds the result on exit if the load succeeded.
//
// Scratch registers:
//
// r0 - holds the untagged key on entry and holds the hash once computed.
// Holds the result on exit if the load succeeded.
//
// r1 - used to hold the capacity mask of the dictionary
//
@ -245,7 +245,7 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
// Get the value at the masked, scaled index.
const int kValueOffset =
NumberDictionary::kElementsStartOffset + kPointerSize;
__ movq(r0, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
__ movq(key, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
}
@ -351,7 +351,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- rsp[8] : name
// -- rsp[16] : receiver
// -----------------------------------
Label slow, check_string, index_int, index_string;
Label slow, check_string, index_smi, index_string;
Label check_pixel_array, probe_dictionary;
Label check_number_dictionary;
@ -377,23 +377,23 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check that the key is a smi.
__ JumpIfNotSmi(rax, &check_string);
// Save key in rbx in case we want it for the number dictionary
// case.
__ movq(rbx, rax);
__ SmiToInteger32(rax, rax);
// Get the elements array of the object.
__ bind(&index_int);
__ bind(&index_smi);
__ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &check_pixel_array);
// Check that the key (index) is within bounds.
__ cmpl(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
__ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
__ j(above_equal, &slow); // Unsigned comparison rejects negative indices.
// Fast case: Do the load.
__ movq(rax, Operand(rcx, rax, times_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag));
SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
__ movq(rax, FieldOperand(rcx,
index.reg,
index.scale,
FixedArray::kHeaderSize));
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
@ -402,12 +402,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ ret(0);
// Check whether the elements is a pixel array.
// rax: untagged index
// rax: key
// rcx: elements array
__ bind(&check_pixel_array);
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
Heap::kPixelArrayMapRootIndex);
__ j(not_equal, &check_number_dictionary);
__ SmiToInteger32(rax, rax);
__ cmpl(rax, FieldOperand(rcx, PixelArray::kLengthOffset));
__ j(above_equal, &slow);
__ movq(rcx, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
@ -417,13 +418,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_number_dictionary);
// Check whether the elements is a number dictionary.
// rax: untagged index
// rbx: key
// rax: key
// rcx: elements
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(not_equal, &slow);
GenerateNumberDictionaryLoad(masm, &slow, rcx, rbx, rax, rdx, rdi);
__ SmiToInteger32(rbx, rax);
GenerateNumberDictionaryLoad(masm, &slow, rcx, rax, rbx, rdx, rdi);
__ ret(0);
// Slow case: Load name and receiver from stack and jump to runtime.
@ -512,10 +513,11 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
__ bind(&index_string);
__ movl(rax, rbx);
__ and_(rax, Immediate(String::kArrayIndexHashMask));
__ shrl(rax, Immediate(String::kHashShift));
__ jmp(&index_int);
// We want the smi-tagged index in rax.
__ and_(rbx, Immediate(String::kArrayIndexValueMask));
__ shr(rbx, Immediate(String::kHashShift));
__ Integer32ToSmi(rax, rbx);
__ jmp(&index_smi);
}
@ -852,9 +854,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &check_pixel_array);
// Untag the key (for checking against untagged length in the fixed array).
__ SmiToInteger32(rdi, rcx);
__ cmpl(rdi, FieldOperand(rbx, Array::kLengthOffset));
__ SmiCompare(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
// rax: value
// rbx: FixedArray
// rcx: index (as a smi)
@ -903,11 +903,10 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// rcx: index (as a smi)
// flags: smicompare (rdx.length(), rbx)
__ j(not_equal, &slow); // do not leave holes in the array
__ SmiToInteger64(rdi, rcx);
__ cmpl(rdi, FieldOperand(rbx, FixedArray::kLengthOffset));
__ SmiCompare(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
__ j(above_equal, &slow);
// Increment and restore smi-tag.
__ Integer64PlusConstantToSmi(rdi, rdi, 1);
// Increment index to get new length.
__ SmiAddConstant(rdi, rcx, Smi::FromInt(1));
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
__ jmp(&fast);
@ -936,16 +935,14 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
Label non_smi_value;
__ JumpIfNotSmi(rax, &non_smi_value);
SmiIndex index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
__ movq(Operand(rbx, index.reg, index.scale,
FixedArray::kHeaderSize - kHeapObjectTag),
__ movq(FieldOperand(rbx, index.reg, index.scale, FixedArray::kHeaderSize),
rax);
__ ret(0);
__ bind(&non_smi_value);
// Slow case that needs to retain rcx for use by RecordWrite.
// Update write barrier for the elements array address.
SmiIndex index2 = masm->SmiToIndex(kScratchRegister, rcx, kPointerSizeLog2);
__ movq(Operand(rbx, index2.reg, index2.scale,
FixedArray::kHeaderSize - kHeapObjectTag),
__ movq(FieldOperand(rbx, index2.reg, index2.scale, FixedArray::kHeaderSize),
rax);
__ movq(rdx, rax);
__ RecordWriteNonSmi(rbx, 0, rdx, rcx);

View File

@ -90,58 +90,21 @@ void MacroAssembler::RecordWriteHelper(Register object,
bind(&not_in_new_space);
}
Label fast;
// Compute the page start address from the heap object pointer, and reuse
// the 'object' register for it.
ASSERT(is_int32(~Page::kPageAlignmentMask));
and_(object,
Immediate(static_cast<int32_t>(~Page::kPageAlignmentMask)));
Register page_start = object;
and_(object, Immediate(~Page::kPageAlignmentMask));
// Compute the bit addr in the remembered set/index of the pointer in the
// page. Reuse 'addr' as pointer_offset.
subq(addr, page_start);
shr(addr, Immediate(kPointerSizeLog2));
Register pointer_offset = addr;
// Compute number of region covering addr. See Page::GetRegionNumberForAddress
// method for more details.
and_(addr, Immediate(Page::kPageAlignmentMask));
shrl(addr, Immediate(Page::kRegionSizeLog2));
// If the bit offset lies beyond the normal remembered set range, it is in
// the extra remembered set area of a large object.
cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize));
j(below, &fast);
// We have a large object containing pointers. It must be a FixedArray.
// Adjust 'page_start' so that addressing using 'pointer_offset' hits the
// extra remembered set after the large object.
// Load the array length into 'scratch'.
movl(scratch,
Operand(page_start,
Page::kObjectStartOffset + FixedArray::kLengthOffset));
Register array_length = scratch;
// Extra remembered set starts right after the large object (a FixedArray), at
// page_start + kObjectStartOffset + objectSize
// where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length.
// Add the delta between the end of the normal RSet and the start of the
// extra RSet to 'page_start', so that addressing the bit using
// 'pointer_offset' hits the extra RSet words.
lea(page_start,
Operand(page_start, array_length, times_pointer_size,
Page::kObjectStartOffset + FixedArray::kHeaderSize
- Page::kRSetEndOffset));
// NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
// to limit code size. We should probably evaluate this decision by
// measuring the performance of an equivalent implementation using
// "simpler" instructions
bind(&fast);
bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
// Set dirty mark for region.
bts(Operand(object, Page::kDirtyFlagOffset), addr);
}
// Set the remembered set bit for [object+offset].
// For page containing |object| mark region covering [object+offset] dirty.
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the smi_index register contains the array index into
// the elements array represented as a smi. Otherwise it can be used as a
@ -156,9 +119,8 @@ void MacroAssembler::RecordWrite(Register object,
// registers are rsi.
ASSERT(!object.is(rsi) && !value.is(rsi) && !smi_index.is(rsi));
// First, check if a remembered set write is even needed. The tests below
// catch stores of Smis and stores into young gen (which does not have space
// for the remembered set bits).
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
JumpIfSmi(value, &done);
@ -191,8 +153,8 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
bind(&okay);
}
// Test that the object address is not in the new space. We cannot
// set remembered set bits in the new space.
// Test that the object address is not in the new space. We cannot
// update page dirty marks for new space pages.
InNewSpace(object, scratch, equal, &done);
// The offset is relative to a tagged or untagged HeapObject pointer,
@ -201,48 +163,19 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
ASSERT(IsAligned(offset, kPointerSize) ||
IsAligned(offset + kHeapObjectTag, kPointerSize));
// We use optimized write barrier code if the word being written to is not in
// a large object page, or is in the first "page" of a large object page.
// We make sure that an offset is inside the right limits whether it is
// tagged or untagged.
if ((offset > 0) && (offset < Page::kMaxHeapObjectSize - kHeapObjectTag)) {
// Compute the bit offset in the remembered set, leave it in 'scratch'.
lea(scratch, Operand(object, offset));
ASSERT(is_int32(Page::kPageAlignmentMask));
and_(scratch, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask)));
shr(scratch, Immediate(kPointerSizeLog2));
// Compute the page address from the heap object pointer, leave it in
// 'object' (immediate value is sign extended).
and_(object, Immediate(~Page::kPageAlignmentMask));
// NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
// to limit code size. We should probably evaluate this decision by
// measuring the performance of an equivalent implementation using
// "simpler" instructions
bts(Operand(object, Page::kRSetOffset), scratch);
Register dst = smi_index;
if (offset != 0) {
lea(dst, Operand(object, offset));
} else {
Register dst = smi_index;
if (offset != 0) {
lea(dst, Operand(object, offset));
} else {
// array access: calculate the destination address in the same manner as
// KeyedStoreIC::GenerateGeneric.
SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
lea(dst, FieldOperand(object,
index.reg,
index.scale,
FixedArray::kHeaderSize));
}
// If we are already generating a shared stub, not inlining the
// record write code isn't going to save us any memory.
if (generating_stub()) {
RecordWriteHelper(object, dst, scratch);
} else {
RecordWriteStub stub(object, dst, scratch);
CallStub(&stub);
}
// array access: calculate the destination address in the same manner as
// KeyedStoreIC::GenerateGeneric.
SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
lea(dst, FieldOperand(object,
index.reg,
index.scale,
FixedArray::kHeaderSize));
}
RecordWriteHelper(object, dst, scratch);
bind(&done);
@ -644,6 +577,18 @@ void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
}
void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
Register src,
int power) {
ASSERT((0 <= power) && (power < 32));
if (dst.is(src)) {
shr(dst, Immediate(power + kSmiShift));
} else {
UNIMPLEMENTED(); // Not used.
}
}
Condition MacroAssembler::CheckSmi(Register src) {
ASSERT_EQ(0, kSmiTag);
testb(src, Immediate(kSmiTagMask));
@ -2607,7 +2552,7 @@ void MacroAssembler::AllocateTwoByteString(Register result,
movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
Integer32ToSmi(scratch1, length);
movq(FieldOperand(result, String::kLengthOffset), scratch1);
movl(FieldOperand(result, String::kHashFieldOffset),
movq(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
@ -2645,7 +2590,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
Integer32ToSmi(scratch1, length);
movq(FieldOperand(result, String::kLengthOffset), scratch1);
movl(FieldOperand(result, String::kHashFieldOffset),
movq(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}

View File

@ -78,8 +78,8 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// GC Support
// Set the remebered set bit for an address which points into an
// object. RecordWriteHelper only works if the object is not in new
// For page containing |object| mark region covering |addr| dirty.
// RecordWriteHelper only works if the object is not in new
// space.
void RecordWriteHelper(Register object,
Register addr,
@ -93,7 +93,7 @@ class MacroAssembler: public Assembler {
Condition cc,
Label* branch);
// Set the remembered set bit for [object+offset].
// For page containing |object| mark region covering [object+offset] dirty.
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the scratch register contains the array index into
// the elements array represented as a Smi.
@ -103,7 +103,7 @@ class MacroAssembler: public Assembler {
Register value,
Register scratch);
// Set the remembered set bit for [object+offset].
// For page containing |object| mark region covering [object+offset] dirty.
// The value is known to not be a smi.
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the scratch register contains the array index into
@ -220,6 +220,13 @@ class MacroAssembler: public Assembler {
Register src,
int power);
// Divide a positive smi's integer value by a power of two.
// Provides result as 32-bit integer value.
void PositiveSmiDivPowerOfTwoToInteger32(Register dst,
Register src,
int power);
// Simple comparison of smis.
void SmiCompare(Register dst, Register src);
void SmiCompare(Register dst, Smi* src);

View File

@ -1115,7 +1115,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ j(not_equal, &miss);
if (argc == 1) { // Otherwise fall through to call builtin.
Label call_builtin, exit, with_rset_update, attempt_to_grow_elements;
Label call_builtin, exit, with_write_barrier, attempt_to_grow_elements;
// Get the array's length into rax and calculate new length.
__ movq(rax, FieldOperand(rdx, JSArray::kLengthOffset));
@ -1123,8 +1123,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ SmiAddConstant(rax, rax, Smi::FromInt(argc));
// Get the element's length into rcx.
__ movl(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
__ Integer32ToSmi(rcx, rcx);
__ movq(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
__ SmiCompare(rax, rcx);
@ -1143,12 +1142,12 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ movq(Operand(rdx, 0), rcx);
// Check if value is a smi.
__ JumpIfNotSmi(rcx, &with_rset_update);
__ JumpIfNotSmi(rcx, &with_write_barrier);
__ bind(&exit);
__ ret((argc + 1) * kPointerSize);
__ bind(&with_rset_update);
__ bind(&with_write_barrier);
__ InNewSpace(rbx, rcx, equal, &exit);
@ -1196,11 +1195,11 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
// Increment element's and array's sizes.
__ addl(FieldOperand(rbx, FixedArray::kLengthOffset),
Immediate(kAllocationDelta));
__ SmiAddConstant(FieldOperand(rbx, FixedArray::kLengthOffset),
Smi::FromInt(kAllocationDelta));
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
// Elements are in new space, so no remembered set updates are necessary.
// Elements are in new space, so write barrier is not required.
__ ret((argc + 1) * kPointerSize);
__ bind(&call_builtin);

View File

@ -177,7 +177,7 @@ TEST(HeapObjects) {
TEST(Tagging) {
InitializeVM();
int request = 24;
CHECK_EQ(request, static_cast<int>(OBJECT_SIZE_ALIGN(request)));
CHECK_EQ(request, static_cast<int>(OBJECT_POINTER_ALIGN(request)));
CHECK(Smi::FromInt(42)->IsSmi());
CHECK(Failure::RetryAfterGC(request, NEW_SPACE)->IsFailure());
CHECK_EQ(request, Failure::RetryAfterGC(request, NEW_SPACE)->requested());
@ -666,14 +666,14 @@ TEST(JSArray) {
array->SetElementsLength(*length);
uint32_t int_length = 0;
CHECK(Array::IndexFromObject(*length, &int_length));
CHECK(length->ToArrayIndex(&int_length));
CHECK_EQ(*length, array->length());
CHECK(array->HasDictionaryElements()); // Must be in slow mode.
// array[length] = name.
array->SetElement(int_length, *name);
uint32_t new_int_length = 0;
CHECK(Array::IndexFromObject(array->length(), &new_int_length));
CHECK(array->length()->ToArrayIndex(&new_int_length));
CHECK_EQ(static_cast<double>(int_length), new_int_length - 1);
CHECK_EQ(array->GetElement(int_length), *name);
CHECK_EQ(array->GetElement(0), *name);
@ -830,7 +830,7 @@ TEST(LargeObjectSpaceContains) {
}
CHECK(bytes_to_page > FixedArray::kHeaderSize);
int* flags_ptr = &Page::FromAddress(next_page)->flags;
intptr_t* flags_ptr = &Page::FromAddress(next_page)->flags_;
Address flags_addr = reinterpret_cast<Address>(flags_ptr);
int bytes_to_allocate =
@ -888,7 +888,7 @@ TEST(Regression39128) {
// The plan: create JSObject which references objects in new space.
// Then clone this object (forcing it to go into old space) and check
// that only bits pertaining to the object are updated in remembered set.
// that region dirty marks are updated correctly.
// Step 1: prepare a map for the object. We add 1 inobject property to it.
Handle<JSFunction> object_ctor(Top::global_context()->object_function());
@ -931,7 +931,7 @@ TEST(Regression39128) {
CHECK(!object->IsFailure());
CHECK(new_space->Contains(object));
JSObject* jsobject = JSObject::cast(object);
CHECK_EQ(0, jsobject->elements()->length());
CHECK_EQ(0, FixedArray::cast(jsobject->elements())->length());
CHECK_EQ(0, jsobject->properties()->length());
// Create a reference to object in new space in jsobject.
jsobject->FastPropertyAtPut(-1, array);
@ -951,17 +951,9 @@ TEST(Regression39128) {
}
CHECK(Heap::old_pointer_space()->Contains(clone->address()));
// Step 5: verify validity of remembered set.
// Step 5: verify validity of region dirty marks.
Address clone_addr = clone->address();
Page* page = Page::FromAddress(clone_addr);
// Check that remembered set tracks a reference from inobject property 1.
CHECK(page->IsRSetSet(clone_addr, object_size - kPointerSize));
// Probe several addresses after the object.
for (int i = 0; i < 7; i++) {
int offset = object_size + i * kPointerSize;
if (clone_addr + offset >= page->ObjectAreaEnd()) {
break;
}
CHECK(!page->IsRSetSet(clone_addr, offset));
}
// Check that region covering inobject property 1 is marked dirty.
CHECK(page->IsRegionDirty(clone_addr + (object_size - kPointerSize)));
}

View File

@ -32,40 +32,32 @@
using namespace v8::internal;
static void VerifyRSet(Address page_start) {
#ifdef DEBUG
Page::set_rset_state(Page::IN_USE);
#endif
static void VerifyRegionMarking(Address page_start) {
Page* p = Page::FromAddress(page_start);
p->ClearRSet();
p->SetRegionMarks(Page::kAllRegionsCleanMarks);
for (Address addr = p->ObjectAreaStart();
addr < p->ObjectAreaEnd();
addr += kPointerSize) {
CHECK(!Page::IsRSetSet(addr, 0));
CHECK(!Page::FromAddress(addr)->IsRegionDirty(addr));
}
for (Address addr = p->ObjectAreaStart();
addr < p->ObjectAreaEnd();
addr += kPointerSize) {
Page::SetRSet(addr, 0);
Page::FromAddress(addr)->MarkRegionDirty(addr);
}
for (Address addr = p->ObjectAreaStart();
addr < p->ObjectAreaEnd();
addr += kPointerSize) {
CHECK(Page::IsRSetSet(addr, 0));
CHECK(Page::FromAddress(addr)->IsRegionDirty(addr));
}
}
TEST(Page) {
#ifdef DEBUG
Page::set_rset_state(Page::NOT_IN_USE);
#endif
byte* mem = NewArray<byte>(2*Page::kPageSize);
CHECK(mem != NULL);
@ -90,8 +82,8 @@ TEST(Page) {
CHECK(p->OffsetToAddress(Page::kObjectStartOffset) == p->ObjectAreaStart());
CHECK(p->OffsetToAddress(Page::kPageSize) == p->ObjectAreaEnd());
// test remember set
VerifyRSet(page_start);
// test region marking
VerifyRegionMarking(page_start);
DeleteArray(mem);
}