Reverting r4685, r4686, r4687
TBR=ager@chromium.org Review URL: http://codereview.chromium.org/2071020 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@4688 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
52cef3f0e1
commit
955828e437
@ -138,7 +138,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
|
||||
// Clear the heap tag on the elements array.
|
||||
__ and_(scratch1, scratch1, Operand(~kHeapObjectTagMask));
|
||||
|
||||
// Initialize the FixedArray and fill it with holes. FixedArray length is
|
||||
// Initialize the FixedArray and fill it with holes. FixedArray length is not
|
||||
// stored as a smi.
|
||||
// result: JSObject
|
||||
// scratch1: elements array (untagged)
|
||||
@ -146,7 +146,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
|
||||
__ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
|
||||
ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
|
||||
__ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
|
||||
__ mov(scratch3, Operand(Smi::FromInt(initial_capacity)));
|
||||
__ mov(scratch3, Operand(initial_capacity));
|
||||
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
|
||||
__ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
|
||||
|
||||
@ -243,23 +243,23 @@ static void AllocateJSArray(MacroAssembler* masm,
|
||||
__ and_(elements_array_storage,
|
||||
elements_array_storage,
|
||||
Operand(~kHeapObjectTagMask));
|
||||
// Initialize the fixed array and fill it with holes. FixedArray length is
|
||||
// Initialize the fixed array and fill it with holes. FixedArray length is not
|
||||
// stored as a smi.
|
||||
// result: JSObject
|
||||
// elements_array_storage: elements array (untagged)
|
||||
// array_size: size of array (smi)
|
||||
ASSERT(kSmiTag == 0);
|
||||
__ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
|
||||
ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
|
||||
__ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
|
||||
ASSERT(kSmiTag == 0);
|
||||
// Convert array_size from smi to value.
|
||||
__ mov(array_size,
|
||||
Operand(array_size, ASR, kSmiTagSize));
|
||||
__ tst(array_size, array_size);
|
||||
// Length of the FixedArray is the number of pre-allocated elements if
|
||||
// the actual JSArray has length 0 and the size of the JSArray for non-empty
|
||||
// JSArrays. The length of a FixedArray is stored as a smi.
|
||||
__ mov(array_size,
|
||||
Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)),
|
||||
LeaveCC,
|
||||
eq);
|
||||
// JSArrays. The length of a FixedArray is not stored as a smi.
|
||||
__ mov(array_size, Operand(JSArray::kPreallocatedArrayElements), LeaveCC, eq);
|
||||
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
|
||||
__ str(array_size,
|
||||
MemOperand(elements_array_storage, kPointerSize, PostIndex));
|
||||
@ -267,11 +267,10 @@ static void AllocateJSArray(MacroAssembler* masm,
|
||||
// Calculate elements array and elements array end.
|
||||
// result: JSObject
|
||||
// elements_array_storage: elements array element storage
|
||||
// array_size: smi-tagged size of elements array
|
||||
ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
|
||||
// array_size: size of elements array
|
||||
__ add(elements_array_end,
|
||||
elements_array_storage,
|
||||
Operand(array_size, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
Operand(array_size, LSL, kPointerSizeLog2));
|
||||
|
||||
// Fill the allocated FixedArray with the hole value if requested.
|
||||
// result: JSObject
|
||||
@ -544,7 +543,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
|
||||
// Load the initial map and verify that it is in fact a map.
|
||||
// r1: constructor function
|
||||
// r7: undefined value
|
||||
// r7: undefined
|
||||
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
|
||||
__ tst(r2, Operand(kSmiTagMask));
|
||||
__ b(eq, &rt_call);
|
||||
@ -556,14 +555,14 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
// instance type would be JS_FUNCTION_TYPE.
|
||||
// r1: constructor function
|
||||
// r2: initial map
|
||||
// r7: undefined value
|
||||
// r7: undefined
|
||||
__ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
|
||||
__ b(eq, &rt_call);
|
||||
|
||||
// Now allocate the JSObject on the heap.
|
||||
// r1: constructor function
|
||||
// r2: initial map
|
||||
// r7: undefined value
|
||||
// r7: undefined
|
||||
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
|
||||
__ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
|
||||
|
||||
@ -573,7 +572,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
// r2: initial map
|
||||
// r3: object size
|
||||
// r4: JSObject (not tagged)
|
||||
// r7: undefined value
|
||||
// r7: undefined
|
||||
__ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
|
||||
__ mov(r5, r4);
|
||||
ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
|
||||
@ -589,7 +588,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
// r3: object size (in words)
|
||||
// r4: JSObject (not tagged)
|
||||
// r5: First in-object property of JSObject (not tagged)
|
||||
// r7: undefined value
|
||||
// r7: undefined
|
||||
__ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
|
||||
ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
|
||||
{ Label loop, entry;
|
||||
@ -612,7 +611,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
// r1: constructor function
|
||||
// r4: JSObject
|
||||
// r5: start of next object (not tagged)
|
||||
// r7: undefined value
|
||||
// r7: undefined
|
||||
__ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
|
||||
// The field instance sizes contains both pre-allocated property fields and
|
||||
// in-object properties.
|
||||
@ -634,7 +633,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
// r3: number of elements in properties array
|
||||
// r4: JSObject
|
||||
// r5: start of next object
|
||||
// r7: undefined value
|
||||
// r7: undefined
|
||||
__ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
|
||||
__ AllocateInNewSpace(
|
||||
r0,
|
||||
@ -649,14 +648,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
// r3: number of elements in properties array
|
||||
// r4: JSObject
|
||||
// r5: FixedArray (not tagged)
|
||||
// r7: undefined value
|
||||
// r7: undefined
|
||||
__ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
|
||||
__ mov(r2, r5);
|
||||
ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
|
||||
__ str(r6, MemOperand(r2, kPointerSize, PostIndex));
|
||||
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
|
||||
__ mov(r0, Operand(r3, LSL, kSmiTagSize));
|
||||
__ str(r0, MemOperand(r2, kPointerSize, PostIndex));
|
||||
ASSERT_EQ(1 * kPointerSize, Array::kLengthOffset);
|
||||
__ str(r3, MemOperand(r2, kPointerSize, PostIndex));
|
||||
|
||||
// Initialize the fields to undefined.
|
||||
// r1: constructor function
|
||||
@ -1049,7 +1047,6 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
|
||||
__ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ ldr(r2,
|
||||
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||
__ mov(r2, Operand(r2, ASR, kSmiTagSize));
|
||||
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
|
||||
__ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ cmp(r2, r0); // Check formal and actual parameter counts.
|
||||
|
@ -2276,6 +2276,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
|
||||
frame_->EmitPush(r0); // map
|
||||
frame_->EmitPush(r2); // enum cache bridge cache
|
||||
__ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset));
|
||||
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
|
||||
frame_->EmitPush(r0);
|
||||
__ mov(r0, Operand(Smi::FromInt(0)));
|
||||
frame_->EmitPush(r0);
|
||||
@ -2288,6 +2289,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
|
||||
|
||||
// Push the length of the array and the initial index onto the stack.
|
||||
__ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset));
|
||||
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
|
||||
frame_->EmitPush(r0);
|
||||
__ mov(r0, Operand(Smi::FromInt(0))); // init index
|
||||
frame_->EmitPush(r0);
|
||||
@ -4488,8 +4490,7 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
|
||||
__ mov(r2, Operand(Factory::fixed_array_map()));
|
||||
__ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
|
||||
// Set FixedArray length.
|
||||
__ mov(r6, Operand(r5, LSL, kSmiTagSize));
|
||||
__ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
|
||||
__ str(r5, FieldMemOperand(r3, FixedArray::kLengthOffset));
|
||||
// Fill contents of fixed-array with the-hole.
|
||||
__ mov(r2, Operand(Factory::the_hole_value()));
|
||||
__ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
@ -5696,7 +5697,7 @@ void CodeGenerator::EmitKeyedLoad() {
|
||||
// Check that key is within bounds. Use unsigned comparison to handle
|
||||
// negative keys.
|
||||
__ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
|
||||
__ cmp(scratch2, key);
|
||||
__ cmp(scratch2, Operand(key, ASR, kSmiTagSize));
|
||||
deferred->Branch(ls); // Unsigned less equal.
|
||||
|
||||
// Load and check that the result is not the hole (key is a smi).
|
||||
@ -5997,8 +5998,8 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
|
||||
// Setup the object header.
|
||||
__ LoadRoot(r2, Heap::kContextMapRootIndex);
|
||||
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
|
||||
__ mov(r2, Operand(Smi::FromInt(length)));
|
||||
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
|
||||
__ mov(r2, Operand(length));
|
||||
__ str(r2, FieldMemOperand(r0, Array::kLengthOffset));
|
||||
|
||||
// Setup the fixed slots.
|
||||
__ mov(r1, Operand(Smi::FromInt(0)));
|
||||
@ -6629,8 +6630,8 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
|
||||
// Make the hash mask from the length of the number string cache. It
|
||||
// contains two elements (number and string) for each cache entry.
|
||||
__ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
|
||||
// Divide length by two (length is a smi).
|
||||
__ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
|
||||
// Divide length by two (length is not a smi).
|
||||
__ mov(mask, Operand(mask, ASR, 1));
|
||||
__ sub(mask, mask, Operand(1)); // Make mask.
|
||||
|
||||
// Calculate the entry in the number string cache. The hash value in the
|
||||
@ -8521,8 +8522,9 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
|
||||
__ cmp(r1, Operand(0));
|
||||
__ b(eq, &done);
|
||||
|
||||
// Get the parameters pointer from the stack.
|
||||
// Get the parameters pointer from the stack and untag the length.
|
||||
__ ldr(r2, MemOperand(sp, 1 * kPointerSize));
|
||||
__ mov(r1, Operand(r1, LSR, kSmiTagSize));
|
||||
|
||||
// Setup the elements pointer in the allocated arguments object and
|
||||
// initialize the header in the elements fixed array.
|
||||
@ -8531,7 +8533,6 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
|
||||
__ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
|
||||
__ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
|
||||
__ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
|
||||
__ mov(r1, Operand(r1, LSR, kSmiTagSize)); // Untag the length for the loop.
|
||||
|
||||
// Copy the fixed array slots.
|
||||
Label loop;
|
||||
@ -8682,7 +8683,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
||||
__ ldr(r0,
|
||||
FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
|
||||
__ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
|
||||
__ cmp(r2, Operand(r0, ASR, kSmiTagSize));
|
||||
__ cmp(r2, r0);
|
||||
__ b(gt, &runtime);
|
||||
|
||||
// subject: Subject string
|
||||
|
@ -163,11 +163,11 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
|
||||
//
|
||||
// key - holds the smi key on entry and is unchanged if a branch is
|
||||
// performed to the miss label.
|
||||
// Holds the result on exit if the load succeeded.
|
||||
//
|
||||
// Scratch registers:
|
||||
//
|
||||
// t0 - holds the untagged key on entry and holds the hash once computed.
|
||||
// Holds the result on exit if the load succeeded.
|
||||
//
|
||||
// t1 - used to hold the capacity mask of the dictionary
|
||||
//
|
||||
@ -235,7 +235,7 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
|
||||
// Get the value at the masked, scaled index and return.
|
||||
const int kValueOffset =
|
||||
NumberDictionary::kElementsStartOffset + kPointerSize;
|
||||
__ ldr(key, FieldMemOperand(t2, kValueOffset));
|
||||
__ ldr(t0, FieldMemOperand(t2, kValueOffset));
|
||||
}
|
||||
|
||||
|
||||
@ -743,6 +743,9 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
|
||||
// Check that the key is a smi.
|
||||
__ BranchOnNotSmi(key, &slow);
|
||||
// Untag key into r2..
|
||||
__ mov(r2, Operand(key, ASR, kSmiTagSize));
|
||||
|
||||
// Get the elements array of the object.
|
||||
__ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
// Check that the object is in fast mode (not dictionary).
|
||||
@ -751,14 +754,12 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
__ cmp(r3, ip);
|
||||
__ b(ne, &check_pixel_array);
|
||||
// Check that the key (index) is within bounds.
|
||||
__ ldr(r3, FieldMemOperand(r4, FixedArray::kLengthOffset));
|
||||
__ cmp(key, Operand(r3));
|
||||
__ ldr(r3, FieldMemOperand(r4, Array::kLengthOffset));
|
||||
__ cmp(r2, r3);
|
||||
__ b(hs, &slow);
|
||||
// Fast case: Do the load.
|
||||
__ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
// The key is a smi.
|
||||
ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
|
||||
__ ldr(r2, MemOperand(r3, key, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2));
|
||||
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
|
||||
__ cmp(r2, ip);
|
||||
// In case the loaded value is the_hole we have to consult GetProperty
|
||||
@ -769,6 +770,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
|
||||
// Check whether the elements is a pixel array.
|
||||
// r0: key
|
||||
// r2: untagged index
|
||||
// r3: elements map
|
||||
// r4: elements
|
||||
__ bind(&check_pixel_array);
|
||||
@ -776,7 +778,6 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
__ cmp(r3, ip);
|
||||
__ b(ne, &check_number_dictionary);
|
||||
__ ldr(ip, FieldMemOperand(r4, PixelArray::kLengthOffset));
|
||||
__ mov(r2, Operand(key, ASR, kSmiTagSize));
|
||||
__ cmp(r2, ip);
|
||||
__ b(hs, &slow);
|
||||
__ ldr(ip, FieldMemOperand(r4, PixelArray::kExternalPointerOffset));
|
||||
@ -787,13 +788,14 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
__ bind(&check_number_dictionary);
|
||||
// Check whether the elements is a number dictionary.
|
||||
// r0: key
|
||||
// r2: untagged index
|
||||
// r3: elements map
|
||||
// r4: elements
|
||||
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
|
||||
__ cmp(r3, ip);
|
||||
__ b(ne, &slow);
|
||||
__ mov(r2, Operand(r0, ASR, kSmiTagSize));
|
||||
GenerateNumberDictionaryLoad(masm, &slow, r4, r0, r2, r3, r5);
|
||||
__ mov(r0, r2);
|
||||
__ Ret();
|
||||
|
||||
// Slow case, key and receiver still in r0 and r1.
|
||||
@ -1281,9 +1283,11 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
|
||||
__ cmp(r4, ip);
|
||||
__ b(ne, &check_pixel_array);
|
||||
// Check array bounds. Both the key and the length of FixedArray are smis.
|
||||
// Untag the key (for checking against untagged length in the fixed array).
|
||||
__ mov(r4, Operand(key, ASR, kSmiTagSize));
|
||||
// Compute address to store into and check array bounds.
|
||||
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
||||
__ cmp(key, Operand(ip));
|
||||
__ cmp(r4, Operand(ip));
|
||||
__ b(lo, &fast);
|
||||
|
||||
// Slow case, handle jump to runtime.
|
||||
@ -1329,9 +1333,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
// Condition code from comparing key and array length is still available.
|
||||
__ b(ne, &slow); // Only support writing to writing to array[array.length].
|
||||
// Check for room in the elements backing store.
|
||||
// Both the key and the length of FixedArray are smis.
|
||||
__ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag key.
|
||||
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
||||
__ cmp(key, Operand(ip));
|
||||
__ cmp(r4, Operand(ip));
|
||||
__ b(hs, &slow);
|
||||
// Calculate key + 1 as smi.
|
||||
ASSERT_EQ(0, kSmiTag);
|
||||
|
@ -252,21 +252,63 @@ void MacroAssembler::RecordWriteHelper(Register object,
|
||||
bind(¬_in_new_space);
|
||||
}
|
||||
|
||||
mov(ip, Operand(Page::kPageAlignmentMask)); // Load mask only once.
|
||||
// This is how much we shift the remembered set bit offset to get the
|
||||
// offset of the word in the remembered set. We divide by kBitsPerInt (32,
|
||||
// shift right 5) and then multiply by kIntSize (4, shift left 2).
|
||||
const int kRSetWordShift = 3;
|
||||
|
||||
// Calculate region number.
|
||||
add(offset, object, Operand(offset)); // Add offset into the object.
|
||||
and_(offset, offset, Operand(ip)); // Offset into page of the object.
|
||||
mov(offset, Operand(offset, LSR, Page::kRegionSizeLog2));
|
||||
Label fast;
|
||||
|
||||
// Calculate page address.
|
||||
// Compute the bit offset in the remembered set.
|
||||
// object: heap object pointer (with tag)
|
||||
// offset: offset to store location from the object
|
||||
mov(ip, Operand(Page::kPageAlignmentMask)); // load mask only once
|
||||
and_(scratch, object, Operand(ip)); // offset into page of the object
|
||||
add(offset, scratch, Operand(offset)); // add offset into the object
|
||||
mov(offset, Operand(offset, LSR, kObjectAlignmentBits));
|
||||
|
||||
// Compute the page address from the heap object pointer.
|
||||
// object: heap object pointer (with tag)
|
||||
// offset: bit offset of store position in the remembered set
|
||||
bic(object, object, Operand(ip));
|
||||
|
||||
// Mark region dirty.
|
||||
ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
|
||||
// If the bit offset lies beyond the normal remembered set range, it is in
|
||||
// the extra remembered set area of a large object.
|
||||
// object: page start
|
||||
// offset: bit offset of store position in the remembered set
|
||||
cmp(offset, Operand(Page::kPageSize / kPointerSize));
|
||||
b(lt, &fast);
|
||||
|
||||
// Adjust the bit offset to be relative to the start of the extra
|
||||
// remembered set and the start address to be the address of the extra
|
||||
// remembered set.
|
||||
sub(offset, offset, Operand(Page::kPageSize / kPointerSize));
|
||||
// Load the array length into 'scratch' and multiply by four to get the
|
||||
// size in bytes of the elements.
|
||||
ldr(scratch, MemOperand(object, Page::kObjectStartOffset
|
||||
+ FixedArray::kLengthOffset));
|
||||
mov(scratch, Operand(scratch, LSL, kObjectAlignmentBits));
|
||||
// Add the page header (including remembered set), array header, and array
|
||||
// body size to the page address.
|
||||
add(object, object, Operand(Page::kObjectStartOffset
|
||||
+ FixedArray::kHeaderSize));
|
||||
add(object, object, Operand(scratch));
|
||||
|
||||
bind(&fast);
|
||||
// Get address of the rset word.
|
||||
// object: start of the remembered set (page start for the fast case)
|
||||
// offset: bit offset of store position in the remembered set
|
||||
bic(scratch, offset, Operand(kBitsPerInt - 1)); // clear the bit offset
|
||||
add(object, object, Operand(scratch, LSR, kRSetWordShift));
|
||||
// Get bit offset in the rset word.
|
||||
// object: address of remembered set word
|
||||
// offset: bit offset of store position
|
||||
and_(offset, offset, Operand(kBitsPerInt - 1));
|
||||
|
||||
ldr(scratch, MemOperand(object));
|
||||
mov(ip, Operand(1));
|
||||
orr(scratch, scratch, Operand(ip, LSL, offset));
|
||||
str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
|
||||
str(scratch, MemOperand(object));
|
||||
}
|
||||
|
||||
|
||||
@ -294,7 +336,7 @@ void MacroAssembler::RecordWrite(Register object, Register offset,
|
||||
Label done;
|
||||
|
||||
// First, test that the object is not in the new space. We cannot set
|
||||
// region marks for new space pages.
|
||||
// remembered set bits in the new space.
|
||||
InNewSpace(object, scratch, eq, &done);
|
||||
|
||||
// Record the actual write.
|
||||
@ -577,7 +619,6 @@ void MacroAssembler::InvokeFunction(Register fun,
|
||||
ldr(expected_reg,
|
||||
FieldMemOperand(code_reg,
|
||||
SharedFunctionInfo::kFormalParameterCountOffset));
|
||||
mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
|
||||
ldr(code_reg,
|
||||
MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
|
||||
add(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
|
@ -114,14 +114,16 @@ class MacroAssembler: public Assembler {
|
||||
Label* branch);
|
||||
|
||||
|
||||
// For the page containing |object| mark the region covering [object+offset]
|
||||
// dirty. The object address must be in the first 8K of an allocated page.
|
||||
// Set the remebered set bit for an offset into an
|
||||
// object. RecordWriteHelper only works if the object is not in new
|
||||
// space.
|
||||
void RecordWriteHelper(Register object, Register offset, Register scracth);
|
||||
|
||||
// For the page containing |object| mark the region covering [object+offset]
|
||||
// dirty. The object address must be in the first 8K of an allocated page.
|
||||
// The 'scratch' register is used in the implementation and all 3 registers
|
||||
// are clobbered by the operation, as well as the ip register.
|
||||
// Sets the remembered set bit for [address+offset], where address is the
|
||||
// address of the heap object 'object'. The address must be in the first 8K
|
||||
// of an allocated page. The 'scratch' register is used in the
|
||||
// implementation and all 3 registers are clobbered by the operation, as
|
||||
// well as the ip register.
|
||||
void RecordWrite(Register object, Register offset, Register scratch);
|
||||
|
||||
// Push two registers. Pushes leftmost register first (to highest address).
|
||||
|
@ -305,7 +305,7 @@ static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
|
||||
// In large object space the object's start must coincide with chunk
|
||||
// and thus the trick is just not applicable.
|
||||
// In old space we do not use this trick to avoid dealing with
|
||||
// region dirty marks.
|
||||
// remembered sets.
|
||||
ASSERT(Heap::new_space()->Contains(elms));
|
||||
|
||||
STATIC_ASSERT(FixedArray::kMapOffset == 0);
|
||||
@ -322,7 +322,7 @@ static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
|
||||
Heap::CreateFillerObjectAt(elms->address(), to_trim * kPointerSize);
|
||||
|
||||
former_start[to_trim] = Heap::fixed_array_map();
|
||||
former_start[to_trim + 1] = Smi::FromInt(len - to_trim);
|
||||
former_start[to_trim + 1] = reinterpret_cast<Object*>(len - to_trim);
|
||||
|
||||
ASSERT_EQ(elms->address() + to_trim * kPointerSize,
|
||||
(elms + to_trim * kPointerSize)->address());
|
||||
@ -500,7 +500,7 @@ BUILTIN(ArrayShift) {
|
||||
|
||||
if (Heap::new_space()->Contains(elms)) {
|
||||
// As elms still in the same space they used to be (new space),
|
||||
// there is no need to update region dirty mark.
|
||||
// there is no need to update remembered set.
|
||||
array->set_elements(LeftTrimFixedArray(elms, 1), SKIP_WRITE_BARRIER);
|
||||
} else {
|
||||
// Shift the elements.
|
||||
|
@ -121,7 +121,7 @@ static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
|
||||
: (shared->is_toplevel() || shared->try_full_codegen());
|
||||
|
||||
bool force_full_compiler = false;
|
||||
#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64)
|
||||
#ifdef V8_TARGET_ARCH_IA32
|
||||
// On ia32 the full compiler can compile all code whereas the other platforms
|
||||
// the constructs supported is checked by the associated syntax checker. When
|
||||
// --always-full-compiler is used on ia32 the syntax checker is still in
|
||||
|
@ -149,7 +149,7 @@ DEFINE_bool(full_compiler, true, "enable dedicated backend for run-once code")
|
||||
DEFINE_bool(fast_compiler, false, "enable speculative optimizing backend")
|
||||
DEFINE_bool(always_full_compiler, false,
|
||||
"try to use the dedicated run-once backend for all code")
|
||||
#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64)
|
||||
#ifdef V8_TARGET_ARCH_IA32
|
||||
DEFINE_bool(force_full_compiler, false,
|
||||
"force use of the dedicated run-once backend for all code")
|
||||
#endif
|
||||
@ -337,6 +337,7 @@ DEFINE_bool(code_stats, false, "report code statistics after GC")
|
||||
DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
|
||||
DEFINE_bool(print_handles, false, "report handles after GC")
|
||||
DEFINE_bool(print_global_handles, false, "report global handles after GC")
|
||||
DEFINE_bool(print_rset, false, "print remembered sets before GC")
|
||||
|
||||
// ic.cc
|
||||
DEFINE_bool(trace_ic, false, "trace inline cache state transitions")
|
||||
|
@ -303,6 +303,7 @@ class HeapObject;
|
||||
class IC;
|
||||
class InterceptorInfo;
|
||||
class IterationStatement;
|
||||
class Array;
|
||||
class JSArray;
|
||||
class JSFunction;
|
||||
class JSObject;
|
||||
@ -543,16 +544,16 @@ enum StateTag {
|
||||
#define HAS_FAILURE_TAG(value) \
|
||||
((reinterpret_cast<intptr_t>(value) & kFailureTagMask) == kFailureTag)
|
||||
|
||||
// OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer
|
||||
#define OBJECT_POINTER_ALIGN(value) \
|
||||
// OBJECT_SIZE_ALIGN returns the value aligned HeapObject size
|
||||
#define OBJECT_SIZE_ALIGN(value) \
|
||||
(((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)
|
||||
|
||||
// POINTER_SIZE_ALIGN returns the value aligned as a pointer.
|
||||
#define POINTER_SIZE_ALIGN(value) \
|
||||
(((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
|
||||
|
||||
// MAP_POINTER_ALIGN returns the value aligned as a map pointer.
|
||||
#define MAP_POINTER_ALIGN(value) \
|
||||
// MAP_SIZE_ALIGN returns the value aligned as a map pointer.
|
||||
#define MAP_SIZE_ALIGN(value) \
|
||||
(((value) + kMapAlignmentMask) & ~kMapAlignmentMask)
|
||||
|
||||
// The expression OFFSET_OF(type, field) computes the byte-offset
|
||||
|
@ -184,7 +184,7 @@ void Heap::RecordWrite(Address address, int offset) {
|
||||
if (new_space_.Contains(address)) return;
|
||||
ASSERT(!new_space_.FromSpaceContains(address));
|
||||
SLOW_ASSERT(Contains(address + offset));
|
||||
Page::FromAddress(address)->MarkRegionDirty(address + offset);
|
||||
Page::SetRSet(address, offset);
|
||||
}
|
||||
|
||||
|
||||
@ -195,7 +195,7 @@ void Heap::RecordWrites(Address address, int start, int len) {
|
||||
offset < start + len * kPointerSize;
|
||||
offset += kPointerSize) {
|
||||
SLOW_ASSERT(Contains(address + offset));
|
||||
Page::FromAddress(address)->MarkRegionDirty(address + offset);
|
||||
Page::SetRSet(address, offset);
|
||||
}
|
||||
}
|
||||
|
||||
@ -234,40 +234,13 @@ AllocationSpace Heap::TargetSpaceId(InstanceType type) {
|
||||
}
|
||||
|
||||
|
||||
void Heap::CopyBlock(Address dst, Address src, int byte_size) {
|
||||
void Heap::CopyBlock(Object** dst, Object** src, int byte_size) {
|
||||
ASSERT(IsAligned(byte_size, kPointerSize));
|
||||
CopyWords(reinterpret_cast<Object**>(dst),
|
||||
reinterpret_cast<Object**>(src),
|
||||
byte_size / kPointerSize);
|
||||
CopyWords(dst, src, byte_size / kPointerSize);
|
||||
}
|
||||
|
||||
|
||||
void Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
|
||||
Address src,
|
||||
int byte_size) {
|
||||
ASSERT(IsAligned(byte_size, kPointerSize));
|
||||
|
||||
Page* page = Page::FromAddress(dst);
|
||||
uint32_t marks = page->GetRegionMarks();
|
||||
|
||||
for (int remaining = byte_size / kPointerSize;
|
||||
remaining > 0;
|
||||
remaining--) {
|
||||
Memory::Object_at(dst) = Memory::Object_at(src);
|
||||
|
||||
if (Heap::InNewSpace(Memory::Object_at(dst))) {
|
||||
marks |= page->GetRegionMaskForAddress(dst);
|
||||
}
|
||||
|
||||
dst += kPointerSize;
|
||||
src += kPointerSize;
|
||||
}
|
||||
|
||||
page->SetRegionMarks(marks);
|
||||
}
|
||||
|
||||
|
||||
void Heap::MoveBlock(Address dst, Address src, int byte_size) {
|
||||
void Heap::MoveBlock(Object** dst, Object** src, int byte_size) {
|
||||
ASSERT(IsAligned(byte_size, kPointerSize));
|
||||
|
||||
int size_in_words = byte_size / kPointerSize;
|
||||
@ -277,12 +250,10 @@ void Heap::MoveBlock(Address dst, Address src, int byte_size) {
|
||||
((OffsetFrom(reinterpret_cast<Address>(src)) -
|
||||
OffsetFrom(reinterpret_cast<Address>(dst))) >= kPointerSize));
|
||||
|
||||
Object** src_slot = reinterpret_cast<Object**>(src);
|
||||
Object** dst_slot = reinterpret_cast<Object**>(dst);
|
||||
Object** end_slot = src_slot + size_in_words;
|
||||
Object** end = src + size_in_words;
|
||||
|
||||
while (src_slot != end_slot) {
|
||||
*dst_slot++ = *src_slot++;
|
||||
while (src != end) {
|
||||
*dst++ = *src++;
|
||||
}
|
||||
} else {
|
||||
memmove(dst, src, byte_size);
|
||||
@ -290,17 +261,6 @@ void Heap::MoveBlock(Address dst, Address src, int byte_size) {
|
||||
}
|
||||
|
||||
|
||||
void Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
|
||||
Address src,
|
||||
int byte_size) {
|
||||
ASSERT(IsAligned(byte_size, kPointerSize));
|
||||
ASSERT((dst >= (src + byte_size)) ||
|
||||
((OffsetFrom(src) - OffsetFrom(dst)) >= kPointerSize));
|
||||
|
||||
CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, byte_size);
|
||||
}
|
||||
|
||||
|
||||
void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
|
||||
ASSERT(InFromSpace(object));
|
||||
|
||||
|
583
src/heap.cc
583
src/heap.cc
@ -326,6 +326,13 @@ void Heap::GarbageCollectionPrologue() {
|
||||
}
|
||||
|
||||
if (FLAG_gc_verbose) Print();
|
||||
|
||||
if (FLAG_print_rset) {
|
||||
// Not all spaces have remembered set bits that we care about.
|
||||
old_pointer_space_->PrintRSet();
|
||||
map_space_->PrintRSet();
|
||||
lo_space_->PrintRSet();
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
|
||||
@ -512,8 +519,9 @@ void Heap::ReserveSpace(
|
||||
Heap::CollectGarbage(cell_space_size, CELL_SPACE);
|
||||
gc_performed = true;
|
||||
}
|
||||
// We add a slack-factor of 2 in order to have space for a series of
|
||||
// large-object allocations that are only just larger than the page size.
|
||||
// We add a slack-factor of 2 in order to have space for the remembered
|
||||
// set and a series of large-object allocations that are only just larger
|
||||
// than the page size.
|
||||
large_object_size *= 2;
|
||||
// The ReserveSpace method on the large object space checks how much
|
||||
// we can expand the old generation. This includes expansion caused by
|
||||
@ -564,25 +572,6 @@ void Heap::ClearJSFunctionResultCaches() {
|
||||
}
|
||||
|
||||
|
||||
#ifdef DEBUG
|
||||
|
||||
enum PageWatermarkValidity {
|
||||
ALL_VALID,
|
||||
ALL_INVALID
|
||||
};
|
||||
|
||||
static void VerifyPageWatermarkValidity(PagedSpace* space,
|
||||
PageWatermarkValidity validity) {
|
||||
PageIterator it(space, PageIterator::PAGES_IN_USE);
|
||||
bool expected_value = (validity == ALL_VALID);
|
||||
while (it.has_next()) {
|
||||
Page* page = it.next();
|
||||
ASSERT(page->IsWatermarkValid() == expected_value);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
void Heap::PerformGarbageCollection(AllocationSpace space,
|
||||
GarbageCollector collector,
|
||||
GCTracer* tracer) {
|
||||
@ -827,20 +816,6 @@ void Heap::Scavenge() {
|
||||
|
||||
gc_state_ = SCAVENGE;
|
||||
|
||||
Page::FlipMeaningOfInvalidatedWatermarkFlag();
|
||||
#ifdef DEBUG
|
||||
VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
|
||||
VerifyPageWatermarkValidity(map_space_, ALL_VALID);
|
||||
#endif
|
||||
|
||||
// We do not update an allocation watermark of the top page during linear
|
||||
// allocation to avoid overhead. So to maintain the watermark invariant
|
||||
// we have to manually cache the watermark and mark the top page as having an
|
||||
// invalid watermark. This guarantees that dirty regions iteration will use a
|
||||
// correct watermark even if a linear allocation happens.
|
||||
old_pointer_space_->FlushTopPageWatermark();
|
||||
map_space_->FlushTopPageWatermark();
|
||||
|
||||
// Implements Cheney's copying algorithm
|
||||
LOG(ResourceEvent("scavenge", "begin"));
|
||||
|
||||
@ -883,17 +858,9 @@ void Heap::Scavenge() {
|
||||
|
||||
// Copy objects reachable from the old generation. By definition,
|
||||
// there are no intergenerational pointers in code or data spaces.
|
||||
IterateDirtyRegions(old_pointer_space_,
|
||||
&IteratePointersInDirtyRegion,
|
||||
&ScavengePointer,
|
||||
WATERMARK_CAN_BE_INVALID);
|
||||
|
||||
IterateDirtyRegions(map_space_,
|
||||
&IteratePointersInDirtyMapsRegion,
|
||||
&ScavengePointer,
|
||||
WATERMARK_CAN_BE_INVALID);
|
||||
|
||||
lo_space_->IterateDirtyRegions(&ScavengePointer);
|
||||
IterateRSet(old_pointer_space_, &ScavengePointer);
|
||||
IterateRSet(map_space_, &ScavengePointer);
|
||||
lo_space_->IterateRSet(&ScavengePointer);
|
||||
|
||||
// Copy objects reachable from cells by scavenging cell values directly.
|
||||
HeapObjectIterator cell_iterator(cell_space_);
|
||||
@ -996,8 +963,9 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
|
||||
// Copy the from-space object to its new location (given by the
|
||||
// forwarding address) and fix its map.
|
||||
HeapObject* target = source->map_word().ToForwardingAddress();
|
||||
int size = source->SizeFromMap(map);
|
||||
CopyBlock(target->address(), source->address(), size);
|
||||
CopyBlock(reinterpret_cast<Object**>(target->address()),
|
||||
reinterpret_cast<Object**>(source->address()),
|
||||
source->SizeFromMap(map));
|
||||
target->set_map(map);
|
||||
|
||||
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
|
||||
@ -1005,10 +973,8 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
|
||||
RecordCopiedObject(target);
|
||||
#endif
|
||||
// Visit the newly copied object for pointers to new space.
|
||||
ASSERT(!target->IsMap());
|
||||
IterateAndMarkPointersToNewSpace(target->address(),
|
||||
target->address() + size,
|
||||
&ScavengePointer);
|
||||
target->Iterate(scavenge_visitor);
|
||||
UpdateRSet(target);
|
||||
}
|
||||
|
||||
// Take another spin if there are now unswept objects in new space
|
||||
@ -1019,6 +985,117 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
|
||||
}
|
||||
|
||||
|
||||
void Heap::ClearRSetRange(Address start, int size_in_bytes) {
|
||||
uint32_t start_bit;
|
||||
Address start_word_address =
|
||||
Page::ComputeRSetBitPosition(start, 0, &start_bit);
|
||||
uint32_t end_bit;
|
||||
Address end_word_address =
|
||||
Page::ComputeRSetBitPosition(start + size_in_bytes - kIntSize,
|
||||
0,
|
||||
&end_bit);
|
||||
|
||||
// We want to clear the bits in the starting word starting with the
|
||||
// first bit, and in the ending word up to and including the last
|
||||
// bit. Build a pair of bitmasks to do that.
|
||||
uint32_t start_bitmask = start_bit - 1;
|
||||
uint32_t end_bitmask = ~((end_bit << 1) - 1);
|
||||
|
||||
// If the start address and end address are the same, we mask that
|
||||
// word once, otherwise mask the starting and ending word
|
||||
// separately and all the ones in between.
|
||||
if (start_word_address == end_word_address) {
|
||||
Memory::uint32_at(start_word_address) &= (start_bitmask | end_bitmask);
|
||||
} else {
|
||||
Memory::uint32_at(start_word_address) &= start_bitmask;
|
||||
Memory::uint32_at(end_word_address) &= end_bitmask;
|
||||
start_word_address += kIntSize;
|
||||
memset(start_word_address, 0, end_word_address - start_word_address);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class UpdateRSetVisitor: public ObjectVisitor {
|
||||
public:
|
||||
|
||||
void VisitPointer(Object** p) {
|
||||
UpdateRSet(p);
|
||||
}
|
||||
|
||||
void VisitPointers(Object** start, Object** end) {
|
||||
// Update a store into slots [start, end), used (a) to update remembered
|
||||
// set when promoting a young object to old space or (b) to rebuild
|
||||
// remembered sets after a mark-compact collection.
|
||||
for (Object** p = start; p < end; p++) UpdateRSet(p);
|
||||
}
|
||||
private:
|
||||
|
||||
void UpdateRSet(Object** p) {
|
||||
// The remembered set should not be set. It should be clear for objects
|
||||
// newly copied to old space, and it is cleared before rebuilding in the
|
||||
// mark-compact collector.
|
||||
ASSERT(!Page::IsRSetSet(reinterpret_cast<Address>(p), 0));
|
||||
if (Heap::InNewSpace(*p)) {
|
||||
Page::SetRSet(reinterpret_cast<Address>(p), 0);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
int Heap::UpdateRSet(HeapObject* obj) {
|
||||
ASSERT(!InNewSpace(obj));
|
||||
// Special handling of fixed arrays to iterate the body based on the start
|
||||
// address and offset. Just iterating the pointers as in UpdateRSetVisitor
|
||||
// will not work because Page::SetRSet needs to have the start of the
|
||||
// object for large object pages.
|
||||
if (obj->IsFixedArray()) {
|
||||
FixedArray* array = FixedArray::cast(obj);
|
||||
int length = array->length();
|
||||
for (int i = 0; i < length; i++) {
|
||||
int offset = FixedArray::kHeaderSize + i * kPointerSize;
|
||||
ASSERT(!Page::IsRSetSet(obj->address(), offset));
|
||||
if (Heap::InNewSpace(array->get(i))) {
|
||||
Page::SetRSet(obj->address(), offset);
|
||||
}
|
||||
}
|
||||
} else if (!obj->IsCode()) {
|
||||
// Skip code object, we know it does not contain inter-generational
|
||||
// pointers.
|
||||
UpdateRSetVisitor v;
|
||||
obj->Iterate(&v);
|
||||
}
|
||||
return obj->Size();
|
||||
}
|
||||
|
||||
|
||||
void Heap::RebuildRSets() {
|
||||
// By definition, we do not care about remembered set bits in code,
|
||||
// data, or cell spaces.
|
||||
map_space_->ClearRSet();
|
||||
RebuildRSets(map_space_);
|
||||
|
||||
old_pointer_space_->ClearRSet();
|
||||
RebuildRSets(old_pointer_space_);
|
||||
|
||||
Heap::lo_space_->ClearRSet();
|
||||
RebuildRSets(lo_space_);
|
||||
}
|
||||
|
||||
|
||||
void Heap::RebuildRSets(PagedSpace* space) {
|
||||
HeapObjectIterator it(space);
|
||||
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
|
||||
Heap::UpdateRSet(obj);
|
||||
}
|
||||
|
||||
|
||||
void Heap::RebuildRSets(LargeObjectSpace* space) {
|
||||
LargeObjectIterator it(space);
|
||||
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
|
||||
Heap::UpdateRSet(obj);
|
||||
}
|
||||
|
||||
|
||||
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
|
||||
void Heap::RecordCopiedObject(HeapObject* obj) {
|
||||
bool should_record = false;
|
||||
@ -1044,7 +1121,9 @@ HeapObject* Heap::MigrateObject(HeapObject* source,
|
||||
HeapObject* target,
|
||||
int size) {
|
||||
// Copy the content of source to target.
|
||||
CopyBlock(target->address(), source->address(), size);
|
||||
CopyBlock(reinterpret_cast<Object**>(target->address()),
|
||||
reinterpret_cast<Object**>(source->address()),
|
||||
size);
|
||||
|
||||
// Set the forwarding address.
|
||||
source->set_map_word(MapWord::FromForwardingAddress(target));
|
||||
@ -1603,7 +1682,7 @@ bool Heap::CreateInitialObjects() {
|
||||
// loop above because it needs to be allocated manually with the special
|
||||
// hash code in place. The hash code for the hidden_symbol is zero to ensure
|
||||
// that it will always be at the first entry in property descriptors.
|
||||
obj = AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
|
||||
obj = AllocateSymbol(CStrVector(""), 0, String::kHashComputedMask);
|
||||
if (obj->IsFailure()) return false;
|
||||
hidden_symbol_ = String::cast(obj);
|
||||
|
||||
@ -1839,9 +1918,6 @@ Object* Heap::AllocateSharedFunctionInfo(Object* name) {
|
||||
share->set_compiler_hints(0);
|
||||
share->set_this_property_assignments_count(0);
|
||||
share->set_this_property_assignments(undefined_value());
|
||||
share->set_num_literals(0);
|
||||
share->set_end_position(0);
|
||||
share->set_function_token_position(0);
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -2103,8 +2179,8 @@ Object* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
|
||||
: lo_space_->AllocateRaw(size);
|
||||
if (result->IsFailure()) return result;
|
||||
|
||||
reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
|
||||
reinterpret_cast<ByteArray*>(result)->set_length(length);
|
||||
reinterpret_cast<Array*>(result)->set_map(byte_array_map());
|
||||
reinterpret_cast<Array*>(result)->set_length(length);
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -2119,8 +2195,8 @@ Object* Heap::AllocateByteArray(int length) {
|
||||
Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
|
||||
if (result->IsFailure()) return result;
|
||||
|
||||
reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
|
||||
reinterpret_cast<ByteArray*>(result)->set_length(length);
|
||||
reinterpret_cast<Array*>(result)->set_map(byte_array_map());
|
||||
reinterpret_cast<Array*>(result)->set_length(length);
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -2236,7 +2312,9 @@ Object* Heap::CopyCode(Code* code) {
|
||||
// Copy code object.
|
||||
Address old_addr = code->address();
|
||||
Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
|
||||
CopyBlock(new_addr, old_addr, obj_size);
|
||||
CopyBlock(reinterpret_cast<Object**>(new_addr),
|
||||
reinterpret_cast<Object**>(old_addr),
|
||||
obj_size);
|
||||
// Relocate the copy.
|
||||
Code* new_code = Code::cast(result);
|
||||
ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
|
||||
@ -2382,8 +2460,8 @@ Object* Heap::AllocateArgumentsObject(Object* callee, int length) {
|
||||
// Copy the content. The arguments boilerplate doesn't have any
|
||||
// fields that point to new space so it's safe to skip the write
|
||||
// barrier here.
|
||||
CopyBlock(HeapObject::cast(result)->address(),
|
||||
boilerplate->address(),
|
||||
CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(result)->address()),
|
||||
reinterpret_cast<Object**>(boilerplate->address()),
|
||||
kArgumentsObjectSize);
|
||||
|
||||
// Set the two properties.
|
||||
@ -2605,8 +2683,8 @@ Object* Heap::CopyJSObject(JSObject* source) {
|
||||
clone = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
|
||||
if (clone->IsFailure()) return clone;
|
||||
Address clone_address = HeapObject::cast(clone)->address();
|
||||
CopyBlock(clone_address,
|
||||
source->address(),
|
||||
CopyBlock(reinterpret_cast<Object**>(clone_address),
|
||||
reinterpret_cast<Object**>(source->address()),
|
||||
object_size);
|
||||
// Update write barrier for all fields that lie beyond the header.
|
||||
RecordWrites(clone_address,
|
||||
@ -2618,8 +2696,8 @@ Object* Heap::CopyJSObject(JSObject* source) {
|
||||
ASSERT(Heap::InNewSpace(clone));
|
||||
// Since we know the clone is allocated in new space, we can copy
|
||||
// the contents without worrying about updating the write barrier.
|
||||
CopyBlock(HeapObject::cast(clone)->address(),
|
||||
source->address(),
|
||||
CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(clone)->address()),
|
||||
reinterpret_cast<Object**>(source->address()),
|
||||
object_size);
|
||||
}
|
||||
|
||||
@ -2890,8 +2968,8 @@ Object* Heap::AllocateEmptyFixedArray() {
|
||||
Object* result = AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
|
||||
if (result->IsFailure()) return result;
|
||||
// Initialize the object.
|
||||
reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
|
||||
reinterpret_cast<FixedArray*>(result)->set_length(0);
|
||||
reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
|
||||
reinterpret_cast<Array*>(result)->set_length(0);
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -2916,7 +2994,9 @@ Object* Heap::CopyFixedArray(FixedArray* src) {
|
||||
if (obj->IsFailure()) return obj;
|
||||
if (Heap::InNewSpace(obj)) {
|
||||
HeapObject* dst = HeapObject::cast(obj);
|
||||
CopyBlock(dst->address(), src->address(), FixedArray::SizeFor(len));
|
||||
CopyBlock(reinterpret_cast<Object**>(dst->address()),
|
||||
reinterpret_cast<Object**>(src->address()),
|
||||
FixedArray::SizeFor(len));
|
||||
return obj;
|
||||
}
|
||||
HeapObject::cast(obj)->set_map(src->map());
|
||||
@ -2937,8 +3017,8 @@ Object* Heap::AllocateFixedArray(int length) {
|
||||
Object* result = AllocateRawFixedArray(length);
|
||||
if (!result->IsFailure()) {
|
||||
// Initialize header.
|
||||
FixedArray* array = reinterpret_cast<FixedArray*>(result);
|
||||
array->set_map(fixed_array_map());
|
||||
reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
|
||||
FixedArray* array = FixedArray::cast(result);
|
||||
array->set_length(length);
|
||||
// Initialize body.
|
||||
ASSERT(!Heap::InNewSpace(undefined_value()));
|
||||
@ -2965,10 +3045,27 @@ Object* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
|
||||
space = LO_SPACE;
|
||||
}
|
||||
|
||||
AllocationSpace retry_space =
|
||||
(size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
|
||||
|
||||
return AllocateRaw(size, space, retry_space);
|
||||
// Specialize allocation for the space.
|
||||
Object* result = Failure::OutOfMemoryException();
|
||||
if (space == NEW_SPACE) {
|
||||
// We cannot use Heap::AllocateRaw() because it will not properly
|
||||
// allocate extra remembered set bits if always_allocate() is true and
|
||||
// new space allocation fails.
|
||||
result = new_space_.AllocateRaw(size);
|
||||
if (result->IsFailure() && always_allocate()) {
|
||||
if (size <= MaxObjectSizeInPagedSpace()) {
|
||||
result = old_pointer_space_->AllocateRaw(size);
|
||||
} else {
|
||||
result = lo_space_->AllocateRawFixedArray(size);
|
||||
}
|
||||
}
|
||||
} else if (space == OLD_POINTER_SPACE) {
|
||||
result = old_pointer_space_->AllocateRaw(size);
|
||||
} else {
|
||||
ASSERT(space == LO_SPACE);
|
||||
result = lo_space_->AllocateRawFixedArray(size);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
@ -3016,7 +3113,7 @@ Object* Heap::AllocateUninitializedFixedArray(int length) {
|
||||
Object* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
|
||||
Object* result = Heap::AllocateFixedArray(length, pretenure);
|
||||
if (result->IsFailure()) return result;
|
||||
reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
|
||||
reinterpret_cast<Array*>(result)->set_map(hash_table_map());
|
||||
ASSERT(result->IsHashTable());
|
||||
return result;
|
||||
}
|
||||
@ -3268,30 +3365,6 @@ bool Heap::InSpace(Address addr, AllocationSpace space) {
|
||||
|
||||
|
||||
#ifdef DEBUG
|
||||
|
||||
|
||||
static bool VerifyPointersInDirtyRegion(Address start,
|
||||
Address end,
|
||||
ObjectSlotCallback copy_object_func
|
||||
) {
|
||||
Address slot_address = start;
|
||||
|
||||
while (slot_address < end) {
|
||||
Object** slot = reinterpret_cast<Object**>(slot_address);
|
||||
if (Heap::InNewSpace(*slot)) {
|
||||
ASSERT(Heap::InToSpace(*slot));
|
||||
}
|
||||
slot_address += kPointerSize;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
static void DummyScavengePointer(HeapObject** p) {
|
||||
}
|
||||
|
||||
|
||||
void Heap::Verify() {
|
||||
ASSERT(HasBeenSetup());
|
||||
|
||||
@ -3300,28 +3373,14 @@ void Heap::Verify() {
|
||||
|
||||
new_space_.Verify();
|
||||
|
||||
VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
|
||||
old_pointer_space_->Verify(&dirty_regions_visitor);
|
||||
map_space_->Verify(&dirty_regions_visitor);
|
||||
VerifyPointersAndRSetVisitor rset_visitor;
|
||||
old_pointer_space_->Verify(&rset_visitor);
|
||||
map_space_->Verify(&rset_visitor);
|
||||
|
||||
IterateDirtyRegions(old_pointer_space_,
|
||||
&VerifyPointersInDirtyRegion,
|
||||
&DummyScavengePointer,
|
||||
WATERMARK_SHOULD_BE_VALID);
|
||||
|
||||
IterateDirtyRegions(map_space_,
|
||||
&VerifyPointersInDirtyRegion,
|
||||
&DummyScavengePointer,
|
||||
WATERMARK_SHOULD_BE_VALID);
|
||||
|
||||
|
||||
VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
|
||||
VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
|
||||
|
||||
VerifyPointersVisitor no_dirty_regions_visitor;
|
||||
old_data_space_->Verify(&no_dirty_regions_visitor);
|
||||
code_space_->Verify(&no_dirty_regions_visitor);
|
||||
cell_space_->Verify(&no_dirty_regions_visitor);
|
||||
VerifyPointersVisitor no_rset_visitor;
|
||||
old_data_space_->Verify(&no_rset_visitor);
|
||||
code_space_->Verify(&no_rset_visitor);
|
||||
cell_space_->Verify(&no_rset_visitor);
|
||||
|
||||
lo_space_->Verify();
|
||||
}
|
||||
@ -3374,253 +3433,65 @@ void Heap::ZapFromSpace() {
|
||||
#endif // DEBUG
|
||||
|
||||
|
||||
bool Heap::IteratePointersInDirtyRegion(Address start,
|
||||
Address end,
|
||||
ObjectSlotCallback copy_object_func) {
|
||||
Address slot_address = start;
|
||||
bool pointers_to_new_space_found = false;
|
||||
int Heap::IterateRSetRange(Address object_start,
|
||||
Address object_end,
|
||||
Address rset_start,
|
||||
ObjectSlotCallback copy_object_func) {
|
||||
Address object_address = object_start;
|
||||
Address rset_address = rset_start;
|
||||
int set_bits_count = 0;
|
||||
|
||||
while (slot_address < end) {
|
||||
Object** slot = reinterpret_cast<Object**>(slot_address);
|
||||
if (Heap::InNewSpace(*slot)) {
|
||||
ASSERT((*slot)->IsHeapObject());
|
||||
copy_object_func(reinterpret_cast<HeapObject**>(slot));
|
||||
if (Heap::InNewSpace(*slot)) {
|
||||
ASSERT((*slot)->IsHeapObject());
|
||||
pointers_to_new_space_found = true;
|
||||
// Loop over all the pointers in [object_start, object_end).
|
||||
while (object_address < object_end) {
|
||||
uint32_t rset_word = Memory::uint32_at(rset_address);
|
||||
if (rset_word != 0) {
|
||||
uint32_t result_rset = rset_word;
|
||||
for (uint32_t bitmask = 1; bitmask != 0; bitmask = bitmask << 1) {
|
||||
// Do not dereference pointers at or past object_end.
|
||||
if ((rset_word & bitmask) != 0 && object_address < object_end) {
|
||||
Object** object_p = reinterpret_cast<Object**>(object_address);
|
||||
if (Heap::InNewSpace(*object_p)) {
|
||||
copy_object_func(reinterpret_cast<HeapObject**>(object_p));
|
||||
}
|
||||
// If this pointer does not need to be remembered anymore, clear
|
||||
// the remembered set bit.
|
||||
if (!Heap::InNewSpace(*object_p)) result_rset &= ~bitmask;
|
||||
set_bits_count++;
|
||||
}
|
||||
object_address += kPointerSize;
|
||||
}
|
||||
}
|
||||
slot_address += kPointerSize;
|
||||
}
|
||||
return pointers_to_new_space_found;
|
||||
}
|
||||
|
||||
|
||||
// Compute start address of the first map following given addr.
|
||||
static inline Address MapStartAlign(Address addr) {
|
||||
Address page = Page::FromAddress(addr)->ObjectAreaStart();
|
||||
return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
|
||||
}
|
||||
|
||||
|
||||
// Compute end address of the first map preceding given addr.
|
||||
static inline Address MapEndAlign(Address addr) {
|
||||
Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
|
||||
return page + ((addr - page) / Map::kSize * Map::kSize);
|
||||
}
|
||||
|
||||
|
||||
static bool IteratePointersInDirtyMaps(Address start,
|
||||
Address end,
|
||||
ObjectSlotCallback copy_object_func) {
|
||||
ASSERT(MapStartAlign(start) == start);
|
||||
ASSERT(MapEndAlign(end) == end);
|
||||
|
||||
Address map_address = start;
|
||||
bool pointers_to_new_space_found = false;
|
||||
|
||||
while (map_address < end) {
|
||||
ASSERT(!Heap::InNewSpace(Memory::Object_at(map_address)));
|
||||
ASSERT(Memory::Object_at(map_address)->IsMap());
|
||||
|
||||
Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
|
||||
Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
|
||||
|
||||
if (Heap::IteratePointersInDirtyRegion(pointer_fields_start,
|
||||
pointer_fields_end,
|
||||
copy_object_func)) {
|
||||
pointers_to_new_space_found = true;
|
||||
}
|
||||
|
||||
map_address += Map::kSize;
|
||||
}
|
||||
|
||||
return pointers_to_new_space_found;
|
||||
}
|
||||
|
||||
|
||||
bool Heap::IteratePointersInDirtyMapsRegion(
|
||||
Address start,
|
||||
Address end,
|
||||
ObjectSlotCallback copy_object_func) {
|
||||
Address map_aligned_start = MapStartAlign(start);
|
||||
Address map_aligned_end = MapEndAlign(end);
|
||||
|
||||
bool contains_pointers_to_new_space = false;
|
||||
|
||||
if (map_aligned_start != start) {
|
||||
Address prev_map = map_aligned_start - Map::kSize;
|
||||
ASSERT(Memory::Object_at(prev_map)->IsMap());
|
||||
|
||||
Address pointer_fields_start =
|
||||
Max(start, prev_map + Map::kPointerFieldsBeginOffset);
|
||||
|
||||
Address pointer_fields_end =
|
||||
Min(prev_map + Map::kCodeCacheOffset + kPointerSize, end);
|
||||
|
||||
contains_pointers_to_new_space =
|
||||
IteratePointersInDirtyRegion(pointer_fields_start,
|
||||
pointer_fields_end,
|
||||
copy_object_func)
|
||||
|| contains_pointers_to_new_space;
|
||||
}
|
||||
|
||||
contains_pointers_to_new_space =
|
||||
IteratePointersInDirtyMaps(map_aligned_start,
|
||||
map_aligned_end,
|
||||
copy_object_func)
|
||||
|| contains_pointers_to_new_space;
|
||||
|
||||
if (map_aligned_end != end) {
|
||||
ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
|
||||
|
||||
Address pointer_fields_start = map_aligned_end + Map::kPrototypeOffset;
|
||||
|
||||
Address pointer_fields_end =
|
||||
Min(end, map_aligned_end + Map::kCodeCacheOffset + kPointerSize);
|
||||
|
||||
contains_pointers_to_new_space =
|
||||
IteratePointersInDirtyRegion(pointer_fields_start,
|
||||
pointer_fields_end,
|
||||
copy_object_func)
|
||||
|| contains_pointers_to_new_space;
|
||||
}
|
||||
|
||||
return contains_pointers_to_new_space;
|
||||
}
|
||||
|
||||
|
||||
void Heap::IterateAndMarkPointersToNewSpace(Address start,
|
||||
Address end,
|
||||
ObjectSlotCallback callback) {
|
||||
Address slot_address = start;
|
||||
Page* page = Page::FromAddress(start);
|
||||
|
||||
uint32_t marks = page->GetRegionMarks();
|
||||
|
||||
while (slot_address < end) {
|
||||
Object** slot = reinterpret_cast<Object**>(slot_address);
|
||||
if (Heap::InNewSpace(*slot)) {
|
||||
ASSERT((*slot)->IsHeapObject());
|
||||
callback(reinterpret_cast<HeapObject**>(slot));
|
||||
if (Heap::InNewSpace(*slot)) {
|
||||
ASSERT((*slot)->IsHeapObject());
|
||||
marks |= page->GetRegionMaskForAddress(slot_address);
|
||||
// Update the remembered set if it has changed.
|
||||
if (result_rset != rset_word) {
|
||||
Memory::uint32_at(rset_address) = result_rset;
|
||||
}
|
||||
} else {
|
||||
// No bits in the word were set. This is the common case.
|
||||
object_address += kPointerSize * kBitsPerInt;
|
||||
}
|
||||
slot_address += kPointerSize;
|
||||
rset_address += kIntSize;
|
||||
}
|
||||
|
||||
page->SetRegionMarks(marks);
|
||||
return set_bits_count;
|
||||
}
|
||||
|
||||
|
||||
uint32_t Heap::IterateDirtyRegions(
|
||||
uint32_t marks,
|
||||
Address area_start,
|
||||
Address area_end,
|
||||
DirtyRegionCallback visit_dirty_region,
|
||||
ObjectSlotCallback copy_object_func) {
|
||||
uint32_t newmarks = 0;
|
||||
uint32_t mask = 1;
|
||||
void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) {
|
||||
ASSERT(Page::is_rset_in_use());
|
||||
ASSERT(space == old_pointer_space_ || space == map_space_);
|
||||
|
||||
if (area_start >= area_end) {
|
||||
return newmarks;
|
||||
}
|
||||
|
||||
Address region_start = area_start;
|
||||
|
||||
// area_start does not necessarily coincide with start of the first region.
|
||||
// Thus to calculate the beginning of the next region we have to align
|
||||
// area_start by Page::kRegionSize.
|
||||
Address second_region =
|
||||
reinterpret_cast<Address>(
|
||||
reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
|
||||
~Page::kRegionAlignmentMask);
|
||||
|
||||
// Next region might be beyond area_end.
|
||||
Address region_end = Min(second_region, area_end);
|
||||
|
||||
if (marks & mask) {
|
||||
if (visit_dirty_region(region_start, region_end, copy_object_func)) {
|
||||
newmarks |= mask;
|
||||
}
|
||||
}
|
||||
mask <<= 1;
|
||||
|
||||
// Iterate subsequent regions which fully lay inside [area_start, area_end[.
|
||||
region_start = region_end;
|
||||
region_end = region_start + Page::kRegionSize;
|
||||
|
||||
while (region_end <= area_end) {
|
||||
if (marks & mask) {
|
||||
if (visit_dirty_region(region_start, region_end, copy_object_func)) {
|
||||
newmarks |= mask;
|
||||
}
|
||||
}
|
||||
|
||||
region_start = region_end;
|
||||
region_end = region_start + Page::kRegionSize;
|
||||
|
||||
mask <<= 1;
|
||||
}
|
||||
|
||||
if (region_start != area_end) {
|
||||
// A small piece of area left uniterated because area_end does not coincide
|
||||
// with region end. Check whether region covering last part of area is
|
||||
// dirty.
|
||||
if (marks & mask) {
|
||||
if (visit_dirty_region(region_start, area_end, copy_object_func)) {
|
||||
newmarks |= mask;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return newmarks;
|
||||
}
|
||||
|
||||
|
||||
|
||||
void Heap::IterateDirtyRegions(
|
||||
PagedSpace* space,
|
||||
DirtyRegionCallback visit_dirty_region,
|
||||
ObjectSlotCallback copy_object_func,
|
||||
ExpectedPageWatermarkState expected_page_watermark_state) {
|
||||
static void* paged_rset_histogram = StatsTable::CreateHistogram(
|
||||
"V8.RSetPaged",
|
||||
0,
|
||||
Page::kObjectAreaSize / kPointerSize,
|
||||
30);
|
||||
|
||||
PageIterator it(space, PageIterator::PAGES_IN_USE);
|
||||
|
||||
while (it.has_next()) {
|
||||
Page* page = it.next();
|
||||
uint32_t marks = page->GetRegionMarks();
|
||||
|
||||
if (marks != Page::kAllRegionsCleanMarks) {
|
||||
Address start = page->ObjectAreaStart();
|
||||
|
||||
// Do not try to visit pointers beyond page allocation watermark.
|
||||
// Page can contain garbage pointers there.
|
||||
Address end;
|
||||
|
||||
if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
|
||||
page->IsWatermarkValid()) {
|
||||
end = page->AllocationWatermark();
|
||||
} else {
|
||||
end = page->CachedAllocationWatermark();
|
||||
}
|
||||
|
||||
ASSERT(space == old_pointer_space_ ||
|
||||
(space == map_space_ &&
|
||||
((page->ObjectAreaStart() - end) % Map::kSize == 0)));
|
||||
|
||||
page->SetRegionMarks(IterateDirtyRegions(marks,
|
||||
start,
|
||||
end,
|
||||
visit_dirty_region,
|
||||
copy_object_func));
|
||||
int count = IterateRSetRange(page->ObjectAreaStart(), page->AllocationTop(),
|
||||
page->RSetStart(), copy_object_func);
|
||||
if (paged_rset_histogram != NULL) {
|
||||
StatsTable::AddHistogramSample(paged_rset_histogram, count);
|
||||
}
|
||||
|
||||
// Mark page watermark as invalid to maintain watermark validity invariant.
|
||||
// See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
|
||||
page->InvalidateWatermark(true);
|
||||
}
|
||||
}
|
||||
|
||||
|
113
src/heap.h
113
src/heap.h
@ -206,10 +206,6 @@ class HeapStats;
|
||||
|
||||
typedef String* (*ExternalStringTableUpdaterCallback)(Object** pointer);
|
||||
|
||||
typedef bool (*DirtyRegionCallback)(Address start,
|
||||
Address end,
|
||||
ObjectSlotCallback copy_object_func);
|
||||
|
||||
|
||||
// The all static Heap captures the interface to the global object heap.
|
||||
// All JavaScript contexts by this process share the same object heap.
|
||||
@ -744,54 +740,17 @@ class Heap : public AllStatic {
|
||||
// Iterates over all the other roots in the heap.
|
||||
static void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
|
||||
|
||||
enum ExpectedPageWatermarkState {
|
||||
WATERMARK_SHOULD_BE_VALID,
|
||||
WATERMARK_CAN_BE_INVALID
|
||||
};
|
||||
|
||||
// For each dirty region on a page in use from an old space call
|
||||
// visit_dirty_region callback.
|
||||
// If either visit_dirty_region or callback can cause an allocation
|
||||
// in old space and changes in allocation watermark then
|
||||
// can_preallocate_during_iteration should be set to true.
|
||||
// All pages will be marked as having invalid watermark upon
|
||||
// iteration completion.
|
||||
static void IterateDirtyRegions(
|
||||
PagedSpace* space,
|
||||
DirtyRegionCallback visit_dirty_region,
|
||||
ObjectSlotCallback callback,
|
||||
ExpectedPageWatermarkState expected_page_watermark_state);
|
||||
|
||||
// Interpret marks as a bitvector of dirty marks for regions of size
|
||||
// Page::kRegionSize aligned by Page::kRegionAlignmentMask and covering
|
||||
// memory interval from start to top. For each dirty region call a
|
||||
// visit_dirty_region callback. Return updated bitvector of dirty marks.
|
||||
static uint32_t IterateDirtyRegions(uint32_t marks,
|
||||
Address start,
|
||||
Address end,
|
||||
DirtyRegionCallback visit_dirty_region,
|
||||
ObjectSlotCallback callback);
|
||||
|
||||
// Iterate pointers to new space found in memory interval from start to end.
|
||||
// Update dirty marks for page containing start address.
|
||||
static void IterateAndMarkPointersToNewSpace(Address start,
|
||||
Address end,
|
||||
ObjectSlotCallback callback);
|
||||
|
||||
// Iterate pointers to new space found in memory interval from start to end.
|
||||
// Return true if pointers to new space was found.
|
||||
static bool IteratePointersInDirtyRegion(Address start,
|
||||
Address end,
|
||||
ObjectSlotCallback callback);
|
||||
|
||||
|
||||
// Iterate pointers to new space found in memory interval from start to end.
|
||||
// This interval is considered to belong to the map space.
|
||||
// Return true if pointers to new space was found.
|
||||
static bool IteratePointersInDirtyMapsRegion(Address start,
|
||||
Address end,
|
||||
ObjectSlotCallback callback);
|
||||
// Iterates remembered set of an old space.
|
||||
static void IterateRSet(PagedSpace* space, ObjectSlotCallback callback);
|
||||
|
||||
// Iterates a range of remembered set addresses starting with rset_start
|
||||
// corresponding to the range of allocated pointers
|
||||
// [object_start, object_end).
|
||||
// Returns the number of bits that were set.
|
||||
static int IterateRSetRange(Address object_start,
|
||||
Address object_end,
|
||||
Address rset_start,
|
||||
ObjectSlotCallback copy_object_func);
|
||||
|
||||
// Returns whether the object resides in new space.
|
||||
static inline bool InNewSpace(Object* object);
|
||||
@ -893,6 +852,17 @@ class Heap : public AllStatic {
|
||||
static void ScavengePointer(HeapObject** p);
|
||||
static inline void ScavengeObject(HeapObject** p, HeapObject* object);
|
||||
|
||||
// Clear a range of remembered set addresses corresponding to the object
|
||||
// area address 'start' with size 'size_in_bytes', eg, when adding blocks
|
||||
// to the free list.
|
||||
static void ClearRSetRange(Address start, int size_in_bytes);
|
||||
|
||||
// Rebuild remembered set in old and map spaces.
|
||||
static void RebuildRSets();
|
||||
|
||||
// Update an old object's remembered set
|
||||
static int UpdateRSet(HeapObject* obj);
|
||||
|
||||
// Commits from space if it is uncommitted.
|
||||
static void EnsureFromSpaceIsCommitted();
|
||||
|
||||
@ -985,19 +955,11 @@ class Heap : public AllStatic {
|
||||
|
||||
// Copy block of memory from src to dst. Size of block should be aligned
|
||||
// by pointer size.
|
||||
static inline void CopyBlock(Address dst, Address src, int byte_size);
|
||||
|
||||
static inline void CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
|
||||
Address src,
|
||||
int byte_size);
|
||||
static inline void CopyBlock(Object** dst, Object** src, int byte_size);
|
||||
|
||||
// Optimized version of memmove for blocks with pointer size aligned sizes and
|
||||
// pointer size aligned addresses.
|
||||
static inline void MoveBlock(Address dst, Address src, int byte_size);
|
||||
|
||||
static inline void MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
|
||||
Address src,
|
||||
int byte_size);
|
||||
static inline void MoveBlock(Object** dst, Object** src, int byte_size);
|
||||
|
||||
// Check new space expansion criteria and expand semispaces if it was hit.
|
||||
static void CheckNewSpaceExpansionCriteria();
|
||||
@ -1245,6 +1207,12 @@ class Heap : public AllStatic {
|
||||
static void ReportStatisticsAfterGC();
|
||||
#endif
|
||||
|
||||
// Rebuild remembered set in an old space.
|
||||
static void RebuildRSets(PagedSpace* space);
|
||||
|
||||
// Rebuild remembered set in the large object space.
|
||||
static void RebuildRSets(LargeObjectSpace* space);
|
||||
|
||||
// Slow part of scavenge object.
|
||||
static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
|
||||
|
||||
@ -1333,11 +1301,11 @@ class LinearAllocationScope {
|
||||
|
||||
|
||||
#ifdef DEBUG
|
||||
// Visitor class to verify interior pointers in spaces that do not contain
|
||||
// or care about intergenerational references. All heap object pointers have to
|
||||
// point into the heap to a location that has a map pointer at its first word.
|
||||
// Caveat: Heap::Contains is an approximation because it can return true for
|
||||
// objects in a heap space but above the allocation pointer.
|
||||
// Visitor class to verify interior pointers that do not have remembered set
|
||||
// bits. All heap object pointers have to point into the heap to a location
|
||||
// that has a map pointer at its first word. Caveat: Heap::Contains is an
|
||||
// approximation because it can return true for objects in a heap space but
|
||||
// above the allocation pointer.
|
||||
class VerifyPointersVisitor: public ObjectVisitor {
|
||||
public:
|
||||
void VisitPointers(Object** start, Object** end) {
|
||||
@ -1352,11 +1320,10 @@ class VerifyPointersVisitor: public ObjectVisitor {
|
||||
};
|
||||
|
||||
|
||||
// Visitor class to verify interior pointers in spaces that use region marks
|
||||
// to keep track of intergenerational references.
|
||||
// As VerifyPointersVisitor but also checks that dirty marks are set
|
||||
// for regions covering intergenerational references.
|
||||
class VerifyPointersAndDirtyRegionsVisitor: public ObjectVisitor {
|
||||
// Visitor class to verify interior pointers that have remembered set bits.
|
||||
// As VerifyPointersVisitor but also checks that remembered set bits are
|
||||
// always set for pointers into new space.
|
||||
class VerifyPointersAndRSetVisitor: public ObjectVisitor {
|
||||
public:
|
||||
void VisitPointers(Object** start, Object** end) {
|
||||
for (Object** current = start; current < end; current++) {
|
||||
@ -1365,9 +1332,7 @@ class VerifyPointersAndDirtyRegionsVisitor: public ObjectVisitor {
|
||||
ASSERT(Heap::Contains(object));
|
||||
ASSERT(object->map()->IsMap());
|
||||
if (Heap::InNewSpace(object)) {
|
||||
ASSERT(Heap::InToSpace(object));
|
||||
Address addr = reinterpret_cast<Address>(current);
|
||||
ASSERT(Page::FromAddress(addr)->IsRegionDirty(addr));
|
||||
ASSERT(Page::IsRSetSet(reinterpret_cast<Address>(current), 0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -226,9 +226,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
// edx: number of elements
|
||||
// ecx: start of next object
|
||||
__ mov(eax, Factory::fixed_array_map());
|
||||
__ mov(Operand(edi, FixedArray::kMapOffset), eax); // setup the map
|
||||
__ SmiTag(edx);
|
||||
__ mov(Operand(edi, FixedArray::kLengthOffset), edx); // and length
|
||||
__ mov(Operand(edi, JSObject::kMapOffset), eax); // setup the map
|
||||
__ mov(Operand(edi, Array::kLengthOffset), edx); // and length
|
||||
|
||||
// Initialize the fields to undefined.
|
||||
// ebx: JSObject
|
||||
@ -549,7 +548,6 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
|
||||
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ mov(ebx,
|
||||
FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||
__ SmiUntag(ebx);
|
||||
__ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
|
||||
__ lea(edx, FieldOperand(edx, Code::kHeaderSize));
|
||||
__ cmp(eax, Operand(ebx));
|
||||
@ -754,15 +752,15 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
|
||||
__ lea(scratch1, Operand(result, JSArray::kSize));
|
||||
__ mov(FieldOperand(result, JSArray::kElementsOffset), scratch1);
|
||||
|
||||
// Initialize the FixedArray and fill it with holes. FixedArray length is
|
||||
// Initialize the FixedArray and fill it with holes. FixedArray length is not
|
||||
// stored as a smi.
|
||||
// result: JSObject
|
||||
// scratch1: elements array
|
||||
// scratch2: start of next object
|
||||
__ mov(FieldOperand(scratch1, FixedArray::kMapOffset),
|
||||
__ mov(FieldOperand(scratch1, JSObject::kMapOffset),
|
||||
Factory::fixed_array_map());
|
||||
__ mov(FieldOperand(scratch1, FixedArray::kLengthOffset),
|
||||
Immediate(Smi::FromInt(initial_capacity)));
|
||||
__ mov(FieldOperand(scratch1, Array::kLengthOffset),
|
||||
Immediate(initial_capacity));
|
||||
|
||||
// Fill the FixedArray with the hole value. Inline the code if short.
|
||||
// Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
|
||||
@ -849,22 +847,23 @@ static void AllocateJSArray(MacroAssembler* masm,
|
||||
__ lea(elements_array, Operand(result, JSArray::kSize));
|
||||
__ mov(FieldOperand(result, JSArray::kElementsOffset), elements_array);
|
||||
|
||||
// Initialize the fixed array. FixedArray length is stored as a smi.
|
||||
// Initialize the fixed array. FixedArray length is not stored as a smi.
|
||||
// result: JSObject
|
||||
// elements_array: elements array
|
||||
// elements_array_end: start of next object
|
||||
// array_size: size of array (smi)
|
||||
__ mov(FieldOperand(elements_array, FixedArray::kMapOffset),
|
||||
ASSERT(kSmiTag == 0);
|
||||
__ SmiUntag(array_size); // Convert from smi to value.
|
||||
__ mov(FieldOperand(elements_array, JSObject::kMapOffset),
|
||||
Factory::fixed_array_map());
|
||||
// For non-empty JSArrays the length of the FixedArray and the JSArray is the
|
||||
// same.
|
||||
__ mov(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
|
||||
__ mov(FieldOperand(elements_array, Array::kLengthOffset), array_size);
|
||||
|
||||
// Fill the allocated FixedArray with the hole value if requested.
|
||||
// result: JSObject
|
||||
// elements_array: elements array
|
||||
if (fill_with_hole) {
|
||||
__ SmiUntag(array_size);
|
||||
__ lea(edi, Operand(elements_array,
|
||||
FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ mov(eax, Factory::the_hole_value());
|
||||
|
@ -4201,6 +4201,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
|
||||
frame_->EmitPush(eax); // <- slot 3
|
||||
frame_->EmitPush(edx); // <- slot 2
|
||||
__ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
|
||||
__ SmiTag(eax);
|
||||
frame_->EmitPush(eax); // <- slot 1
|
||||
frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
|
||||
entry.Jump();
|
||||
@ -4212,6 +4213,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
|
||||
|
||||
// Push the length of the array and the initial index onto the stack.
|
||||
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
|
||||
__ SmiTag(eax);
|
||||
frame_->EmitPush(eax); // <- slot 1
|
||||
frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
|
||||
|
||||
@ -6163,11 +6165,11 @@ void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
|
||||
__ mov(map.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
|
||||
__ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
|
||||
__ cmp(map.reg(), FIRST_JS_OBJECT_TYPE);
|
||||
destination()->false_target()->Branch(below);
|
||||
destination()->false_target()->Branch(less);
|
||||
__ cmp(map.reg(), LAST_JS_OBJECT_TYPE);
|
||||
obj.Unuse();
|
||||
map.Unuse();
|
||||
destination()->Split(below_equal);
|
||||
destination()->Split(less_equal);
|
||||
}
|
||||
|
||||
|
||||
@ -6280,7 +6282,7 @@ void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
|
||||
__ mov(obj.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
|
||||
__ movzx_b(tmp.reg(), FieldOperand(obj.reg(), Map::kInstanceTypeOffset));
|
||||
__ cmp(tmp.reg(), FIRST_JS_OBJECT_TYPE);
|
||||
null.Branch(below);
|
||||
null.Branch(less);
|
||||
|
||||
// As long as JS_FUNCTION_TYPE is the last instance type and it is
|
||||
// right after LAST_JS_OBJECT_TYPE, we can avoid checking for
|
||||
@ -6603,9 +6605,9 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
|
||||
__ mov(FieldOperand(ebx, HeapObject::kMapOffset),
|
||||
Immediate(Factory::fixed_array_map()));
|
||||
// Set length.
|
||||
__ SmiUntag(ecx);
|
||||
__ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
|
||||
// Fill contents of fixed-array with the-hole.
|
||||
__ SmiUntag(ecx);
|
||||
__ mov(edx, Immediate(Factory::the_hole_value()));
|
||||
__ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
|
||||
// Fill fixed array elements with hole.
|
||||
@ -6709,6 +6711,7 @@ void DeferredSearchCache::Generate() {
|
||||
|
||||
// Check if we could add new entry to cache.
|
||||
__ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
|
||||
__ SmiTag(ebx);
|
||||
__ cmp(ebx, FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset));
|
||||
__ j(greater, &add_new_entry);
|
||||
|
||||
@ -6869,7 +6872,7 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
|
||||
// Check that object doesn't require security checks and
|
||||
// has no indexed interceptor.
|
||||
__ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg());
|
||||
deferred->Branch(below);
|
||||
deferred->Branch(less);
|
||||
__ movzx_b(tmp1.reg(), FieldOperand(tmp1.reg(), Map::kBitFieldOffset));
|
||||
__ test(tmp1.reg(), Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
|
||||
deferred->Branch(not_zero);
|
||||
@ -6906,8 +6909,12 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
|
||||
// (or them and test against Smi mask.)
|
||||
|
||||
__ mov(tmp2.reg(), tmp1.reg());
|
||||
__ RecordWriteHelper(tmp2.reg(), index1.reg(), object.reg());
|
||||
__ RecordWriteHelper(tmp1.reg(), index2.reg(), object.reg());
|
||||
RecordWriteStub recordWrite1(tmp2.reg(), index1.reg(), object.reg());
|
||||
__ CallStub(&recordWrite1);
|
||||
|
||||
RecordWriteStub recordWrite2(tmp1.reg(), index2.reg(), object.reg());
|
||||
__ CallStub(&recordWrite2);
|
||||
|
||||
__ bind(&done);
|
||||
|
||||
deferred->BindExit();
|
||||
@ -8185,11 +8192,11 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
|
||||
__ mov(map.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
|
||||
__ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
|
||||
__ cmp(map.reg(), FIRST_JS_OBJECT_TYPE);
|
||||
destination()->false_target()->Branch(below);
|
||||
destination()->false_target()->Branch(less);
|
||||
__ cmp(map.reg(), LAST_JS_OBJECT_TYPE);
|
||||
answer.Unuse();
|
||||
map.Unuse();
|
||||
destination()->Split(below_equal);
|
||||
destination()->Split(less_equal);
|
||||
} else {
|
||||
// Uncommon case: typeof testing against a string literal that is
|
||||
// never returned from the typeof operator.
|
||||
@ -8606,10 +8613,13 @@ Result CodeGenerator::EmitKeyedLoad() {
|
||||
Result elements = allocator()->Allocate();
|
||||
ASSERT(elements.is_valid());
|
||||
|
||||
result = elements;
|
||||
// Use a fresh temporary for the index and later the loaded
|
||||
// value.
|
||||
result = allocator()->Allocate();
|
||||
ASSERT(result.is_valid());
|
||||
|
||||
DeferredReferenceGetKeyedValue* deferred =
|
||||
new DeferredReferenceGetKeyedValue(elements.reg(),
|
||||
new DeferredReferenceGetKeyedValue(result.reg(),
|
||||
receiver.reg(),
|
||||
key.reg());
|
||||
|
||||
@ -8641,17 +8651,20 @@ Result CodeGenerator::EmitKeyedLoad() {
|
||||
Immediate(Factory::fixed_array_map()));
|
||||
deferred->Branch(not_equal);
|
||||
|
||||
// Check that the key is within bounds.
|
||||
__ cmp(key.reg(),
|
||||
// Shift the key to get the actual index value and check that
|
||||
// it is within bounds. Use unsigned comparison to handle negative keys.
|
||||
__ mov(result.reg(), key.reg());
|
||||
__ SmiUntag(result.reg());
|
||||
__ cmp(result.reg(),
|
||||
FieldOperand(elements.reg(), FixedArray::kLengthOffset));
|
||||
deferred->Branch(above_equal);
|
||||
|
||||
// Load and check that the result is not the hole.
|
||||
ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
|
||||
__ mov(result.reg(), Operand(elements.reg(),
|
||||
key.reg(),
|
||||
times_2,
|
||||
result.reg(),
|
||||
times_4,
|
||||
FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
elements.Unuse();
|
||||
__ cmp(Operand(result.reg()), Immediate(Factory::the_hole_value()));
|
||||
deferred->Branch(equal);
|
||||
__ IncrementCounter(&Counters::keyed_load_inline, 1);
|
||||
@ -8736,7 +8749,7 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
|
||||
|
||||
// Check whether it is possible to omit the write barrier. If the elements
|
||||
// array is in new space or the value written is a smi we can safely update
|
||||
// the elements array without write barrier.
|
||||
// the elements array without updating the remembered set.
|
||||
Label in_new_space;
|
||||
__ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
|
||||
if (!value_is_constant) {
|
||||
@ -9008,8 +9021,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
// Setup the object header.
|
||||
__ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map());
|
||||
__ mov(FieldOperand(eax, Context::kLengthOffset),
|
||||
Immediate(Smi::FromInt(length)));
|
||||
__ mov(FieldOperand(eax, Array::kLengthOffset), Immediate(length));
|
||||
|
||||
// Setup the fixed slots.
|
||||
__ xor_(ebx, Operand(ebx)); // Set to NULL.
|
||||
@ -10972,8 +10984,9 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
|
||||
__ test(ecx, Operand(ecx));
|
||||
__ j(zero, &done);
|
||||
|
||||
// Get the parameters pointer from the stack.
|
||||
// Get the parameters pointer from the stack and untag the length.
|
||||
__ mov(edx, Operand(esp, 2 * kPointerSize));
|
||||
__ SmiUntag(ecx);
|
||||
|
||||
// Setup the elements pointer in the allocated arguments object and
|
||||
// initialize the header in the elements fixed array.
|
||||
@ -10982,8 +10995,6 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
|
||||
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
|
||||
Immediate(Factory::fixed_array_map()));
|
||||
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
|
||||
// Untag the length for the loop below.
|
||||
__ SmiUntag(ecx);
|
||||
|
||||
// Copy the fixed array slots.
|
||||
Label loop;
|
||||
@ -11112,7 +11123,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
||||
// Check that the last match info has space for the capture registers and the
|
||||
// additional information.
|
||||
__ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
|
||||
__ SmiUntag(eax);
|
||||
__ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead));
|
||||
__ cmp(edx, Operand(eax));
|
||||
__ j(greater, &runtime);
|
||||
@ -11356,7 +11366,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
|
||||
// Make the hash mask from the length of the number string cache. It
|
||||
// contains two elements (number and string) for each cache entry.
|
||||
__ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
|
||||
__ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
|
||||
__ shr(mask, 1); // Divide length by two (length is not a smi).
|
||||
__ sub(Operand(mask), Immediate(1)); // Make mask.
|
||||
|
||||
// Calculate the entry in the number string cache. The hash value in the
|
||||
@ -11447,6 +11457,12 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void RecordWriteStub::Generate(MacroAssembler* masm) {
|
||||
masm->RecordWriteHelper(object_, addr_, scratch_);
|
||||
masm->ret(0);
|
||||
}
|
||||
|
||||
|
||||
static int NegativeComparisonResult(Condition cc) {
|
||||
ASSERT(cc != equal);
|
||||
ASSERT((cc == less) || (cc == less_equal)
|
||||
@ -11586,7 +11602,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
|
||||
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
|
||||
Label first_non_object;
|
||||
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
|
||||
__ j(below, &first_non_object);
|
||||
__ j(less, &first_non_object);
|
||||
|
||||
// Return non-zero (eax is not zero)
|
||||
Label return_not_equal;
|
||||
@ -11603,7 +11619,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
|
||||
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
|
||||
|
||||
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
|
||||
__ j(above_equal, &return_not_equal);
|
||||
__ j(greater_equal, &return_not_equal);
|
||||
|
||||
// Check for oddballs: true, false, null, undefined.
|
||||
__ cmp(ecx, ODDBALL_TYPE);
|
||||
@ -12251,9 +12267,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
|
||||
__ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); // eax - object map
|
||||
__ movzx_b(ecx, FieldOperand(eax, Map::kInstanceTypeOffset)); // ecx - type
|
||||
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
|
||||
__ j(below, &slow, not_taken);
|
||||
__ j(less, &slow, not_taken);
|
||||
__ cmp(ecx, LAST_JS_OBJECT_TYPE);
|
||||
__ j(above, &slow, not_taken);
|
||||
__ j(greater, &slow, not_taken);
|
||||
|
||||
// Get the prototype of the function.
|
||||
__ mov(edx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
|
||||
@ -12281,9 +12297,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
|
||||
__ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
|
||||
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
|
||||
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
|
||||
__ j(below, &slow, not_taken);
|
||||
__ j(less, &slow, not_taken);
|
||||
__ cmp(ecx, LAST_JS_OBJECT_TYPE);
|
||||
__ j(above, &slow, not_taken);
|
||||
__ j(greater, &slow, not_taken);
|
||||
|
||||
// Register mapping:
|
||||
// eax is object map.
|
||||
|
@ -1083,6 +1083,42 @@ class NumberToStringStub: public CodeStub {
|
||||
};
|
||||
|
||||
|
||||
class RecordWriteStub : public CodeStub {
|
||||
public:
|
||||
RecordWriteStub(Register object, Register addr, Register scratch)
|
||||
: object_(object), addr_(addr), scratch_(scratch) { }
|
||||
|
||||
void Generate(MacroAssembler* masm);
|
||||
|
||||
private:
|
||||
Register object_;
|
||||
Register addr_;
|
||||
Register scratch_;
|
||||
|
||||
#ifdef DEBUG
|
||||
void Print() {
|
||||
PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
|
||||
object_.code(), addr_.code(), scratch_.code());
|
||||
}
|
||||
#endif
|
||||
|
||||
// Minor key encoding in 12 bits. 4 bits for each of the three
|
||||
// registers (object, address and scratch) OOOOAAAASSSS.
|
||||
class ScratchBits: public BitField<uint32_t, 0, 4> {};
|
||||
class AddressBits: public BitField<uint32_t, 4, 4> {};
|
||||
class ObjectBits: public BitField<uint32_t, 8, 4> {};
|
||||
|
||||
Major MajorKey() { return RecordWrite; }
|
||||
|
||||
int MinorKey() {
|
||||
// Encode the registers.
|
||||
return ObjectBits::encode(object_.code()) |
|
||||
AddressBits::encode(addr_.code()) |
|
||||
ScratchBits::encode(scratch_.code());
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_IA32_CODEGEN_IA32_H_
|
||||
|
@ -806,8 +806,8 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
|
||||
__ Check(equal, "Unexpected declaration in current context.");
|
||||
}
|
||||
if (mode == Variable::CONST) {
|
||||
__ mov(CodeGenerator::ContextOperand(esi, slot->index()),
|
||||
Immediate(Factory::the_hole_value()));
|
||||
__ mov(eax, Immediate(Factory::the_hole_value()));
|
||||
__ mov(CodeGenerator::ContextOperand(esi, slot->index()), eax);
|
||||
// No write barrier since the hole value is in old space.
|
||||
} else if (function != NULL) {
|
||||
VisitForValue(function, kAccumulator);
|
||||
@ -823,8 +823,10 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
|
||||
__ push(esi);
|
||||
__ push(Immediate(variable->name()));
|
||||
// Declaration nodes are always introduced in one of two modes.
|
||||
ASSERT(mode == Variable::VAR || mode == Variable::CONST);
|
||||
PropertyAttributes attr = (mode == Variable::VAR) ? NONE : READ_ONLY;
|
||||
ASSERT(mode == Variable::VAR ||
|
||||
mode == Variable::CONST);
|
||||
PropertyAttributes attr =
|
||||
(mode == Variable::VAR) ? NONE : READ_ONLY;
|
||||
__ push(Immediate(Smi::FromInt(attr)));
|
||||
// Push initial value, if any.
|
||||
// Note: For variables we must not push an initial value (such as
|
||||
@ -1009,6 +1011,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
|
||||
__ push(eax); // Map.
|
||||
__ push(edx); // Enumeration cache.
|
||||
__ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
|
||||
__ SmiTag(eax);
|
||||
__ push(eax); // Enumeration cache length (as smi).
|
||||
__ push(Immediate(Smi::FromInt(0))); // Initial index.
|
||||
__ jmp(&loop);
|
||||
@ -1018,6 +1021,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
|
||||
__ push(Immediate(Smi::FromInt(0))); // Map (0) - force slow check.
|
||||
__ push(eax);
|
||||
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
|
||||
__ SmiTag(eax);
|
||||
__ push(eax); // Fixed array length (as smi).
|
||||
__ push(Immediate(Smi::FromInt(0))); // Initial index.
|
||||
|
||||
@ -1066,8 +1070,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
|
||||
__ StackLimitCheck(&stack_limit_hit);
|
||||
__ bind(&stack_check_done);
|
||||
|
||||
// Generate code for going to the next element by incrementing the
|
||||
// index (smi) stored on top of the stack.
|
||||
// Generate code for the going to the next element by incrementing
|
||||
// the index (smi) stored on top of the stack.
|
||||
__ bind(loop_statement.continue_target());
|
||||
__ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1)));
|
||||
__ jmp(&loop);
|
||||
@ -2029,9 +2033,9 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
|
||||
__ j(not_zero, if_false);
|
||||
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceTypeOffset));
|
||||
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
|
||||
__ j(below, if_false);
|
||||
__ j(less, if_false);
|
||||
__ cmp(ecx, LAST_JS_OBJECT_TYPE);
|
||||
__ j(below_equal, if_true);
|
||||
__ j(less_equal, if_true);
|
||||
__ jmp(if_false);
|
||||
|
||||
Apply(context_, if_true, if_false);
|
||||
@ -2223,7 +2227,7 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
|
||||
__ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
|
||||
__ movzx_b(ebx, FieldOperand(eax, Map::kInstanceTypeOffset));
|
||||
__ cmp(ebx, FIRST_JS_OBJECT_TYPE);
|
||||
__ j(below, &null);
|
||||
__ j(less, &null);
|
||||
|
||||
// As long as JS_FUNCTION_TYPE is the last instance type and it is
|
||||
// right after LAST_JS_OBJECT_TYPE, we can avoid checking for
|
||||
|
@ -304,7 +304,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
// -- edx : receiver
|
||||
// -- esp[0] : return address
|
||||
// -----------------------------------
|
||||
Label slow, check_string, index_smi, index_string;
|
||||
Label slow, check_string, index_int, index_string;
|
||||
Label check_pixel_array, probe_dictionary;
|
||||
Label check_number_dictionary;
|
||||
|
||||
@ -329,17 +329,18 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
// Check that the key is a smi.
|
||||
__ test(eax, Immediate(kSmiTagMask));
|
||||
__ j(not_zero, &check_string, not_taken);
|
||||
__ mov(ebx, eax);
|
||||
__ SmiUntag(ebx);
|
||||
// Get the elements array of the object.
|
||||
__ bind(&index_smi);
|
||||
__ bind(&index_int);
|
||||
__ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
// Check that the object is in fast mode (not dictionary).
|
||||
__ CheckMap(ecx, Factory::fixed_array_map(), &check_pixel_array, true);
|
||||
// Check that the key (index) is within bounds.
|
||||
__ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset));
|
||||
__ cmp(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
|
||||
__ j(above_equal, &slow);
|
||||
// Fast case: Do the load.
|
||||
ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
|
||||
__ mov(ecx, FieldOperand(ecx, eax, times_2, FixedArray::kHeaderSize));
|
||||
__ mov(ecx, FieldOperand(ecx, ebx, times_4, FixedArray::kHeaderSize));
|
||||
__ cmp(Operand(ecx), Immediate(Factory::the_hole_value()));
|
||||
// In case the loaded value is the_hole we have to consult GetProperty
|
||||
// to ensure the prototype chain is searched.
|
||||
@ -351,10 +352,9 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
__ bind(&check_pixel_array);
|
||||
// Check whether the elements is a pixel array.
|
||||
// edx: receiver
|
||||
// ebx: untagged index
|
||||
// eax: key
|
||||
// ecx: elements
|
||||
__ mov(ebx, eax);
|
||||
__ SmiUntag(ebx);
|
||||
__ CheckMap(ecx, Factory::pixel_array_map(), &check_number_dictionary, true);
|
||||
__ cmp(ebx, FieldOperand(ecx, PixelArray::kLengthOffset));
|
||||
__ j(above_equal, &slow);
|
||||
@ -485,13 +485,9 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
|
||||
(1 << String::kArrayIndexValueBits));
|
||||
__ bind(&index_string);
|
||||
// We want the smi-tagged index in eax. kArrayIndexValueMask has zeros in
|
||||
// the low kHashShift bits.
|
||||
ASSERT(String::kHashShift >= kSmiTagSize);
|
||||
__ and_(ebx, String::kArrayIndexValueMask);
|
||||
__ shr(ebx, String::kHashShift - kSmiTagSize);
|
||||
__ mov(eax, ebx);
|
||||
__ jmp(&index_smi);
|
||||
__ and_(ebx, String::kArrayIndexHashMask);
|
||||
__ shr(ebx, String::kHashShift);
|
||||
__ jmp(&index_int);
|
||||
}
|
||||
|
||||
|
||||
@ -796,7 +792,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
// Check that the object is in fast mode (not dictionary).
|
||||
__ CheckMap(edi, Factory::fixed_array_map(), &check_pixel_array, true);
|
||||
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
|
||||
__ mov(ebx, Operand(ecx));
|
||||
__ SmiUntag(ebx);
|
||||
__ cmp(ebx, FieldOperand(edi, Array::kLengthOffset));
|
||||
__ j(below, &fast, taken);
|
||||
|
||||
// Slow case: call runtime.
|
||||
@ -806,7 +804,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
// Check whether the elements is a pixel array.
|
||||
__ bind(&check_pixel_array);
|
||||
// eax: value
|
||||
// ecx: key (a smi)
|
||||
// ecx: key
|
||||
// edx: receiver
|
||||
// edi: elements array
|
||||
__ CheckMap(edi, Factory::pixel_array_map(), &slow, true);
|
||||
@ -842,11 +840,13 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
// edi: receiver->elements, a FixedArray
|
||||
// flags: compare (ecx, edx.length())
|
||||
__ j(not_equal, &slow, not_taken); // do not leave holes in the array
|
||||
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
|
||||
__ mov(ebx, ecx);
|
||||
__ SmiUntag(ebx); // untag
|
||||
__ cmp(ebx, FieldOperand(edi, Array::kLengthOffset));
|
||||
__ j(above_equal, &slow, not_taken);
|
||||
// Add 1 to receiver->length, and go to fast array write.
|
||||
__ add(FieldOperand(edx, JSArray::kLengthOffset),
|
||||
Immediate(Smi::FromInt(1)));
|
||||
Immediate(1 << kSmiTagSize));
|
||||
__ jmp(&fast);
|
||||
|
||||
// Array case: Get the length and the elements array from the JS
|
||||
|
@ -60,17 +60,49 @@ void MacroAssembler::RecordWriteHelper(Register object,
|
||||
bind(¬_in_new_space);
|
||||
}
|
||||
|
||||
Label fast;
|
||||
|
||||
// Compute the page start address from the heap object pointer, and reuse
|
||||
// the 'object' register for it.
|
||||
and_(object, ~Page::kPageAlignmentMask);
|
||||
Register page_start = object;
|
||||
|
||||
// Compute number of region covering addr. See Page::GetRegionNumberForAddress
|
||||
// method for more details.
|
||||
and_(addr, Page::kPageAlignmentMask);
|
||||
shr(addr, Page::kRegionSizeLog2);
|
||||
// Compute the bit addr in the remembered set/index of the pointer in the
|
||||
// page. Reuse 'addr' as pointer_offset.
|
||||
sub(addr, Operand(page_start));
|
||||
shr(addr, kObjectAlignmentBits);
|
||||
Register pointer_offset = addr;
|
||||
|
||||
// Set dirty mark for region.
|
||||
bts(Operand(object, Page::kDirtyFlagOffset), addr);
|
||||
// If the bit offset lies beyond the normal remembered set range, it is in
|
||||
// the extra remembered set area of a large object.
|
||||
cmp(pointer_offset, Page::kPageSize / kPointerSize);
|
||||
j(less, &fast);
|
||||
|
||||
// Adjust 'page_start' so that addressing using 'pointer_offset' hits the
|
||||
// extra remembered set after the large object.
|
||||
|
||||
// Find the length of the large object (FixedArray).
|
||||
mov(scratch, Operand(page_start, Page::kObjectStartOffset
|
||||
+ FixedArray::kLengthOffset));
|
||||
Register array_length = scratch;
|
||||
|
||||
// Extra remembered set starts right after the large object (a FixedArray), at
|
||||
// page_start + kObjectStartOffset + objectSize
|
||||
// where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length.
|
||||
// Add the delta between the end of the normal RSet and the start of the
|
||||
// extra RSet to 'page_start', so that addressing the bit using
|
||||
// 'pointer_offset' hits the extra RSet words.
|
||||
lea(page_start,
|
||||
Operand(page_start, array_length, times_pointer_size,
|
||||
Page::kObjectStartOffset + FixedArray::kHeaderSize
|
||||
- Page::kRSetEndOffset));
|
||||
|
||||
// NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
|
||||
// to limit code size. We should probably evaluate this decision by
|
||||
// measuring the performance of an equivalent implementation using
|
||||
// "simpler" instructions
|
||||
bind(&fast);
|
||||
bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
|
||||
}
|
||||
|
||||
|
||||
@ -98,7 +130,7 @@ void MacroAssembler::InNewSpace(Register object,
|
||||
}
|
||||
|
||||
|
||||
// For page containing |object| mark region covering [object+offset] dirty.
|
||||
// Set the remembered set bit for [object+offset].
|
||||
// object is the object being stored into, value is the object being stored.
|
||||
// If offset is zero, then the scratch register contains the array index into
|
||||
// the elements array represented as a Smi.
|
||||
@ -110,8 +142,9 @@ void MacroAssembler::RecordWrite(Register object, int offset,
|
||||
// registers are esi.
|
||||
ASSERT(!object.is(esi) && !value.is(esi) && !scratch.is(esi));
|
||||
|
||||
// First, check if a write barrier is even needed. The tests below
|
||||
// catch stores of Smis and stores into young gen.
|
||||
// First, check if a remembered set write is even needed. The tests below
|
||||
// catch stores of Smis and stores into young gen (which does not have space
|
||||
// for the remembered set bits).
|
||||
Label done;
|
||||
|
||||
// Skip barrier if writing a smi.
|
||||
@ -127,19 +160,47 @@ void MacroAssembler::RecordWrite(Register object, int offset,
|
||||
ASSERT(IsAligned(offset, kPointerSize) ||
|
||||
IsAligned(offset + kHeapObjectTag, kPointerSize));
|
||||
|
||||
Register dst = scratch;
|
||||
if (offset != 0) {
|
||||
lea(dst, Operand(object, offset));
|
||||
// We use optimized write barrier code if the word being written to is not in
|
||||
// a large object chunk or is in the first page of a large object chunk.
|
||||
// We make sure that an offset is inside the right limits whether it is
|
||||
// tagged or untagged.
|
||||
if ((offset > 0) && (offset < Page::kMaxHeapObjectSize - kHeapObjectTag)) {
|
||||
// Compute the bit offset in the remembered set, leave it in 'value'.
|
||||
lea(value, Operand(object, offset));
|
||||
and_(value, Page::kPageAlignmentMask);
|
||||
shr(value, kPointerSizeLog2);
|
||||
|
||||
// Compute the page address from the heap object pointer, leave it in
|
||||
// 'object'.
|
||||
and_(object, ~Page::kPageAlignmentMask);
|
||||
|
||||
// NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
|
||||
// to limit code size. We should probably evaluate this decision by
|
||||
// measuring the performance of an equivalent implementation using
|
||||
// "simpler" instructions
|
||||
bts(Operand(object, Page::kRSetOffset), value);
|
||||
} else {
|
||||
// Array access: calculate the destination address in the same manner as
|
||||
// KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
|
||||
// into an array of words.
|
||||
ASSERT_EQ(1, kSmiTagSize);
|
||||
ASSERT_EQ(0, kSmiTag);
|
||||
lea(dst, Operand(object, dst, times_half_pointer_size,
|
||||
FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
Register dst = scratch;
|
||||
if (offset != 0) {
|
||||
lea(dst, Operand(object, offset));
|
||||
} else {
|
||||
// array access: calculate the destination address in the same manner as
|
||||
// KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
|
||||
// into an array of words.
|
||||
ASSERT_EQ(1, kSmiTagSize);
|
||||
ASSERT_EQ(0, kSmiTag);
|
||||
lea(dst, Operand(object, dst, times_half_pointer_size,
|
||||
FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
}
|
||||
// If we are already generating a shared stub, not inlining the
|
||||
// record write code isn't going to save us any memory.
|
||||
if (generating_stub()) {
|
||||
RecordWriteHelper(object, dst, value);
|
||||
} else {
|
||||
RecordWriteStub stub(object, dst, value);
|
||||
CallStub(&stub);
|
||||
}
|
||||
}
|
||||
RecordWriteHelper(object, dst, value);
|
||||
|
||||
bind(&done);
|
||||
|
||||
@ -1323,7 +1384,6 @@ void MacroAssembler::InvokeFunction(Register fun,
|
||||
mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
|
||||
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
|
||||
mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||
SmiUntag(ebx);
|
||||
mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
|
||||
lea(edx, FieldOperand(edx, Code::kHeaderSize));
|
||||
|
||||
|
@ -59,8 +59,8 @@ class MacroAssembler: public Assembler {
|
||||
// ---------------------------------------------------------------------------
|
||||
// GC Support
|
||||
|
||||
// For page containing |object| mark region covering |addr| dirty.
|
||||
// RecordWriteHelper only works if the object is not in new
|
||||
// Set the remebered set bit for an address which points into an
|
||||
// object. RecordWriteHelper only works if the object is not in new
|
||||
// space.
|
||||
void RecordWriteHelper(Register object,
|
||||
Register addr,
|
||||
@ -73,7 +73,7 @@ class MacroAssembler: public Assembler {
|
||||
Condition cc, // equal for new space, not_equal otherwise.
|
||||
Label* branch);
|
||||
|
||||
// For page containing |object| mark region covering [object+offset] dirty.
|
||||
// Set the remembered set bit for [object+offset].
|
||||
// object is the object being stored into, value is the object being stored.
|
||||
// If offset is zero, then the scratch register contains the array index into
|
||||
// the elements array represented as a Smi.
|
||||
|
@ -1173,7 +1173,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
|
||||
__ j(not_equal, &miss);
|
||||
|
||||
if (argc == 1) { // Otherwise fall through to call builtin.
|
||||
Label call_builtin, exit, with_write_barrier, attempt_to_grow_elements;
|
||||
Label call_builtin, exit, with_rset_update, attempt_to_grow_elements;
|
||||
|
||||
// Get the array's length into eax and calculate new length.
|
||||
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
|
||||
@ -1183,6 +1183,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
|
||||
|
||||
// Get the element's length into ecx.
|
||||
__ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
|
||||
__ SmiTag(ecx);
|
||||
|
||||
// Check if we could survive without allocation.
|
||||
__ cmp(eax, Operand(ecx));
|
||||
@ -1200,16 +1201,17 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
|
||||
|
||||
// Check if value is a smi.
|
||||
__ test(ecx, Immediate(kSmiTagMask));
|
||||
__ j(not_zero, &with_write_barrier);
|
||||
__ j(not_zero, &with_rset_update);
|
||||
|
||||
__ bind(&exit);
|
||||
__ ret((argc + 1) * kPointerSize);
|
||||
|
||||
__ bind(&with_write_barrier);
|
||||
__ bind(&with_rset_update);
|
||||
|
||||
__ InNewSpace(ebx, ecx, equal, &exit);
|
||||
|
||||
__ RecordWriteHelper(ebx, edx, ecx);
|
||||
RecordWriteStub stub(ebx, edx, ecx);
|
||||
__ CallStub(&stub);
|
||||
__ ret((argc + 1) * kPointerSize);
|
||||
|
||||
__ bind(&attempt_to_grow_elements);
|
||||
@ -1249,10 +1251,10 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
|
||||
|
||||
// Increment element's and array's sizes.
|
||||
__ add(FieldOperand(ebx, FixedArray::kLengthOffset),
|
||||
Immediate(Smi::FromInt(kAllocationDelta)));
|
||||
Immediate(kAllocationDelta));
|
||||
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
|
||||
|
||||
// Elements are in new space, so write barrier is not required.
|
||||
// Elements are in new space, so no remembered set updates are necessary.
|
||||
__ ret((argc + 1) * kPointerSize);
|
||||
|
||||
__ bind(&call_builtin);
|
||||
|
@ -84,6 +84,9 @@ void MarkCompactCollector::CollectGarbage() {
|
||||
UpdatePointers();
|
||||
|
||||
RelocateObjects();
|
||||
|
||||
RebuildRSets();
|
||||
|
||||
} else {
|
||||
SweepSpaces();
|
||||
}
|
||||
@ -118,6 +121,14 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
|
||||
compacting_collection_ = false;
|
||||
if (FLAG_collect_maps) CreateBackPointers();
|
||||
|
||||
#ifdef DEBUG
|
||||
if (compacting_collection_) {
|
||||
// We will write bookkeeping information to the remembered set area
|
||||
// starting now.
|
||||
Page::set_rset_state(Page::NOT_IN_USE);
|
||||
}
|
||||
#endif
|
||||
|
||||
PagedSpaces spaces;
|
||||
for (PagedSpace* space = spaces.next();
|
||||
space != NULL; space = spaces.next()) {
|
||||
@ -139,7 +150,7 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
|
||||
|
||||
void MarkCompactCollector::Finish() {
|
||||
#ifdef DEBUG
|
||||
ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
|
||||
ASSERT(state_ == SWEEP_SPACES || state_ == REBUILD_RSETS);
|
||||
state_ = IDLE;
|
||||
#endif
|
||||
// The stub cache is not traversed during GC; clear the cache to
|
||||
@ -233,8 +244,8 @@ static inline HeapObject* ShortCircuitConsString(Object** p) {
|
||||
}
|
||||
|
||||
// Since we don't have the object's start, it is impossible to update the
|
||||
// page dirty marks. Therefore, we only replace the string with its left
|
||||
// substring when page dirty marks do not change.
|
||||
// remembered set. Therefore, we only replace the string with its left
|
||||
// substring when the remembered set does not change.
|
||||
Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
|
||||
if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object;
|
||||
|
||||
@ -765,7 +776,6 @@ void MarkCompactCollector::SweepLargeObjectSpace() {
|
||||
Heap::lo_space()->FreeUnmarkedObjects();
|
||||
}
|
||||
|
||||
|
||||
// Safe to use during marking phase only.
|
||||
bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
|
||||
MapWord metamap = object->map_word();
|
||||
@ -773,7 +783,6 @@ bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
|
||||
return metamap.ToMap()->instance_type() == MAP_TYPE;
|
||||
}
|
||||
|
||||
|
||||
void MarkCompactCollector::ClearNonLiveTransitions() {
|
||||
HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback);
|
||||
// Iterate over the map space, setting map transitions that go from
|
||||
@ -1069,18 +1078,13 @@ void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
|
||||
// first word of object without any encoding. If object is dead we are writing
|
||||
// NULL as a forwarding address.
|
||||
// The second pass updates pointers to new space in all spaces. It is possible
|
||||
// to encounter pointers to dead objects during traversal of dirty regions we
|
||||
// should clear them to avoid encountering them during next dirty regions
|
||||
// iteration.
|
||||
static void MigrateObject(Address dst,
|
||||
Address src,
|
||||
int size,
|
||||
bool to_old_space) {
|
||||
if (to_old_space) {
|
||||
Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
|
||||
} else {
|
||||
Heap::CopyBlock(dst, src, size);
|
||||
}
|
||||
// to encounter pointers to dead objects during traversal of remembered set for
|
||||
// map space because remembered set bits corresponding to dead maps are cleared
|
||||
// later during map space sweeping.
|
||||
static void MigrateObject(Address dst, Address src, int size) {
|
||||
Heap::CopyBlock(reinterpret_cast<Object**>(dst),
|
||||
reinterpret_cast<Object**>(src),
|
||||
size);
|
||||
|
||||
Memory::Address_at(src) = dst;
|
||||
}
|
||||
@ -1127,7 +1131,6 @@ class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// Visitor for updating pointers from live objects in old spaces to new space.
|
||||
// It can encounter pointers to dead objects in new space when traversing map
|
||||
// space (see comment for MigrateObject).
|
||||
@ -1139,13 +1142,10 @@ static void UpdatePointerToNewGen(HeapObject** p) {
|
||||
|
||||
Address new_addr = Memory::Address_at(old_addr);
|
||||
|
||||
if (new_addr == NULL) {
|
||||
// We encountered pointer to a dead object. Clear it so we will
|
||||
// not visit it again during next iteration of dirty regions.
|
||||
*p = NULL;
|
||||
} else {
|
||||
*p = HeapObject::FromAddress(new_addr);
|
||||
}
|
||||
// Object pointed by *p is dead. Update is not required.
|
||||
if (new_addr == NULL) return;
|
||||
|
||||
*p = HeapObject::FromAddress(new_addr);
|
||||
}
|
||||
|
||||
|
||||
@ -1163,7 +1163,8 @@ static bool TryPromoteObject(HeapObject* object, int object_size) {
|
||||
result = Heap::lo_space()->AllocateRawFixedArray(object_size);
|
||||
if (!result->IsFailure()) {
|
||||
HeapObject* target = HeapObject::cast(result);
|
||||
MigrateObject(target->address(), object->address(), object_size, true);
|
||||
MigrateObject(target->address(), object->address(), object_size);
|
||||
Heap::UpdateRSet(target);
|
||||
MarkCompactCollector::tracer()->
|
||||
increment_promoted_objects_size(object_size);
|
||||
return true;
|
||||
@ -1176,10 +1177,10 @@ static bool TryPromoteObject(HeapObject* object, int object_size) {
|
||||
result = target_space->AllocateRaw(object_size);
|
||||
if (!result->IsFailure()) {
|
||||
HeapObject* target = HeapObject::cast(result);
|
||||
MigrateObject(target->address(),
|
||||
object->address(),
|
||||
object_size,
|
||||
target_space == Heap::old_pointer_space());
|
||||
MigrateObject(target->address(), object->address(), object_size);
|
||||
if (target_space == Heap::old_pointer_space()) {
|
||||
Heap::UpdateRSet(target);
|
||||
}
|
||||
MarkCompactCollector::tracer()->
|
||||
increment_promoted_objects_size(object_size);
|
||||
return true;
|
||||
@ -1221,16 +1222,14 @@ static void SweepNewSpace(NewSpace* space) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Promotion failed. Just migrate object to another semispace.
|
||||
// Promotion either failed or not required.
|
||||
// Copy the content of the object.
|
||||
Object* target = space->AllocateRaw(size);
|
||||
|
||||
// Allocation cannot fail at this point: semispaces are of equal size.
|
||||
ASSERT(!target->IsFailure());
|
||||
|
||||
MigrateObject(HeapObject::cast(target)->address(),
|
||||
current,
|
||||
size,
|
||||
false);
|
||||
MigrateObject(HeapObject::cast(target)->address(), current, size);
|
||||
} else {
|
||||
size = object->Size();
|
||||
Memory::Address_at(current) = NULL;
|
||||
@ -1256,12 +1255,9 @@ static void SweepNewSpace(NewSpace* space) {
|
||||
Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
|
||||
|
||||
// Update pointers in old spaces.
|
||||
Heap::IterateDirtyRegions(Heap::old_pointer_space(),
|
||||
&Heap::IteratePointersInDirtyRegion,
|
||||
&UpdatePointerToNewGen,
|
||||
Heap::WATERMARK_SHOULD_BE_VALID);
|
||||
|
||||
Heap::lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
|
||||
Heap::IterateRSet(Heap::old_pointer_space(), &UpdatePointerToNewGen);
|
||||
Heap::IterateRSet(Heap::map_space(), &UpdatePointerToNewGen);
|
||||
Heap::lo_space()->IterateRSet(&UpdatePointerToNewGen);
|
||||
|
||||
// Update pointers from cells.
|
||||
HeapObjectIterator cell_iterator(Heap::cell_space());
|
||||
@ -1327,10 +1323,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
|
||||
MarkCompactCollector::tracer()->decrement_marked_count();
|
||||
|
||||
if (!is_previous_alive) { // Transition from free to live.
|
||||
dealloc(free_start,
|
||||
static_cast<int>(current - free_start),
|
||||
true,
|
||||
false);
|
||||
dealloc(free_start, static_cast<int>(current - free_start), true);
|
||||
is_previous_alive = true;
|
||||
}
|
||||
} else {
|
||||
@ -1360,18 +1353,8 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
|
||||
// without putting anything into free list.
|
||||
int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
|
||||
if (size_in_bytes > 0) {
|
||||
dealloc(free_start, size_in_bytes, false, true);
|
||||
} else {
|
||||
#ifdef DEBUG
|
||||
MemoryAllocator::ZapBlock(p->ObjectAreaStart(),
|
||||
Page::kObjectAreaSize);
|
||||
#endif
|
||||
dealloc(free_start, size_in_bytes, false);
|
||||
}
|
||||
} else {
|
||||
#ifdef DEBUG
|
||||
MemoryAllocator::ZapBlock(p->ObjectAreaStart(),
|
||||
Page::kObjectAreaSize);
|
||||
#endif
|
||||
}
|
||||
} else {
|
||||
// This page is not empty. Sequence of empty pages ended on the previous
|
||||
@ -1384,9 +1367,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
|
||||
// If there is a free ending area on one of the previous pages we have
|
||||
// deallocate that area and put it on the free list.
|
||||
if (last_free_size > 0) {
|
||||
Page::FromAddress(last_free_start)->
|
||||
SetAllocationWatermark(last_free_start);
|
||||
dealloc(last_free_start, last_free_size, true, true);
|
||||
dealloc(last_free_start, last_free_size, true);
|
||||
last_free_start = NULL;
|
||||
last_free_size = 0;
|
||||
}
|
||||
@ -1417,7 +1398,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
|
||||
// There was a free ending area on the previous page.
|
||||
// Deallocate it without putting it into freelist and move allocation
|
||||
// top to the beginning of this free area.
|
||||
dealloc(last_free_start, last_free_size, false, true);
|
||||
dealloc(last_free_start, last_free_size, false);
|
||||
new_allocation_top = last_free_start;
|
||||
}
|
||||
|
||||
@ -1440,36 +1421,34 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
|
||||
|
||||
void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
|
||||
int size_in_bytes,
|
||||
bool add_to_freelist,
|
||||
bool last_on_page) {
|
||||
bool add_to_freelist) {
|
||||
Heap::ClearRSetRange(start, size_in_bytes);
|
||||
Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist);
|
||||
}
|
||||
|
||||
|
||||
void MarkCompactCollector::DeallocateOldDataBlock(Address start,
|
||||
int size_in_bytes,
|
||||
bool add_to_freelist,
|
||||
bool last_on_page) {
|
||||
bool add_to_freelist) {
|
||||
Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist);
|
||||
}
|
||||
|
||||
|
||||
void MarkCompactCollector::DeallocateCodeBlock(Address start,
|
||||
int size_in_bytes,
|
||||
bool add_to_freelist,
|
||||
bool last_on_page) {
|
||||
bool add_to_freelist) {
|
||||
Heap::code_space()->Free(start, size_in_bytes, add_to_freelist);
|
||||
}
|
||||
|
||||
|
||||
void MarkCompactCollector::DeallocateMapBlock(Address start,
|
||||
int size_in_bytes,
|
||||
bool add_to_freelist,
|
||||
bool last_on_page) {
|
||||
bool add_to_freelist) {
|
||||
// Objects in map space are assumed to have size Map::kSize and a
|
||||
// valid map in their first word. Thus, we break the free block up into
|
||||
// chunks and free them separately.
|
||||
ASSERT(size_in_bytes % Map::kSize == 0);
|
||||
Heap::ClearRSetRange(start, size_in_bytes);
|
||||
Address end = start + size_in_bytes;
|
||||
for (Address a = start; a < end; a += Map::kSize) {
|
||||
Heap::map_space()->Free(a, add_to_freelist);
|
||||
@ -1479,13 +1458,13 @@ void MarkCompactCollector::DeallocateMapBlock(Address start,
|
||||
|
||||
void MarkCompactCollector::DeallocateCellBlock(Address start,
|
||||
int size_in_bytes,
|
||||
bool add_to_freelist,
|
||||
bool last_on_page) {
|
||||
bool add_to_freelist) {
|
||||
// Free-list elements in cell space are assumed to have a fixed size.
|
||||
// We break the free block into chunks and add them to the free list
|
||||
// individually.
|
||||
int size = Heap::cell_space()->object_size_in_bytes();
|
||||
ASSERT(size_in_bytes % size == 0);
|
||||
Heap::ClearRSetRange(start, size_in_bytes);
|
||||
Address end = start + size_in_bytes;
|
||||
for (Address a = start; a < end; a += size) {
|
||||
Heap::cell_space()->Free(a, add_to_freelist);
|
||||
@ -1584,6 +1563,20 @@ class MapCompact {
|
||||
GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
|
||||
}
|
||||
|
||||
void FinishMapSpace() {
|
||||
// Iterate through to space and finish move.
|
||||
MapIterator it;
|
||||
HeapObject* o = it.next();
|
||||
for (; o != first_map_to_evacuate_; o = it.next()) {
|
||||
ASSERT(o != NULL);
|
||||
Map* map = reinterpret_cast<Map*>(o);
|
||||
ASSERT(!map->IsMarked());
|
||||
ASSERT(!map->IsOverflowed());
|
||||
ASSERT(map->IsMap());
|
||||
Heap::UpdateRSet(map);
|
||||
}
|
||||
}
|
||||
|
||||
void UpdateMapPointersInPagedSpace(PagedSpace* space) {
|
||||
ASSERT(space != Heap::map_space());
|
||||
|
||||
@ -1676,9 +1669,9 @@ class MapCompact {
|
||||
|
||||
ASSERT(Map::kSize % 4 == 0);
|
||||
|
||||
Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(vacant_map->address(),
|
||||
map_to_evacuate->address(),
|
||||
Map::kSize);
|
||||
Heap::CopyBlock(reinterpret_cast<Object**>(vacant_map->address()),
|
||||
reinterpret_cast<Object**>(map_to_evacuate->address()),
|
||||
Map::kSize);
|
||||
|
||||
ASSERT(vacant_map->IsMap()); // Due to memcpy above.
|
||||
|
||||
@ -1763,12 +1756,6 @@ void MarkCompactCollector::SweepSpaces() {
|
||||
SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
|
||||
SweepNewSpace(Heap::new_space());
|
||||
SweepSpace(Heap::map_space(), &DeallocateMapBlock);
|
||||
|
||||
Heap::IterateDirtyRegions(Heap::map_space(),
|
||||
&Heap::IteratePointersInDirtyMapsRegion,
|
||||
&UpdatePointerToNewGen,
|
||||
Heap::WATERMARK_SHOULD_BE_VALID);
|
||||
|
||||
int live_maps_size = Heap::map_space()->Size();
|
||||
int live_maps = live_maps_size / Map::kSize;
|
||||
ASSERT(live_map_objects_size_ == live_maps_size);
|
||||
@ -1779,6 +1766,7 @@ void MarkCompactCollector::SweepSpaces() {
|
||||
map_compact.CompactMaps();
|
||||
map_compact.UpdateMapPointersInRoots();
|
||||
|
||||
map_compact.FinishMapSpace();
|
||||
PagedSpaces spaces;
|
||||
for (PagedSpace* space = spaces.next();
|
||||
space != NULL; space = spaces.next()) {
|
||||
@ -2051,8 +2039,9 @@ Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
|
||||
Page* forwarded_page = Page::FromAddress(first_forwarded);
|
||||
int forwarded_offset = forwarded_page->Offset(first_forwarded);
|
||||
|
||||
// Find end of allocation in the page of first_forwarded.
|
||||
int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
|
||||
// Find end of allocation of in the page of first_forwarded.
|
||||
Address mc_top = forwarded_page->mc_relocation_top;
|
||||
int mc_top_offset = forwarded_page->Offset(mc_top);
|
||||
|
||||
// Check if current object's forward pointer is in the same page
|
||||
// as the first live object's forwarding pointer
|
||||
@ -2069,7 +2058,7 @@ Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
|
||||
offset += Page::kObjectStartOffset;
|
||||
|
||||
ASSERT_PAGE_OFFSET(offset);
|
||||
ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
|
||||
ASSERT(next_page->OffsetToAddress(offset) < next_page->mc_relocation_top);
|
||||
|
||||
return next_page->OffsetToAddress(offset);
|
||||
}
|
||||
@ -2114,12 +2103,16 @@ void MarkCompactCollector::RelocateObjects() {
|
||||
// Flip from and to spaces
|
||||
Heap::new_space()->Flip();
|
||||
|
||||
Heap::new_space()->MCCommitRelocationInfo();
|
||||
|
||||
// Set age_mark to bottom in to space
|
||||
Address mark = Heap::new_space()->bottom();
|
||||
Heap::new_space()->set_age_mark(mark);
|
||||
|
||||
Heap::new_space()->MCCommitRelocationInfo();
|
||||
#ifdef DEBUG
|
||||
// It is safe to write to the remembered sets as remembered sets on a
|
||||
// page-by-page basis after committing the m-c forwarding pointer.
|
||||
Page::set_rset_state(Page::IN_USE);
|
||||
#endif
|
||||
PagedSpaces spaces;
|
||||
for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
|
||||
space->MCCommitRelocationInfo();
|
||||
@ -2146,9 +2139,9 @@ int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
|
||||
|
||||
if (new_addr != old_addr) {
|
||||
// Move contents.
|
||||
Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
|
||||
old_addr,
|
||||
Map::kSize);
|
||||
Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
|
||||
reinterpret_cast<Object**>(old_addr),
|
||||
Map::kSize);
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
@ -2205,13 +2198,9 @@ int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
|
||||
|
||||
if (new_addr != old_addr) {
|
||||
// Move contents.
|
||||
if (space == Heap::old_data_space()) {
|
||||
Heap::MoveBlock(new_addr, old_addr, obj_size);
|
||||
} else {
|
||||
Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
|
||||
old_addr,
|
||||
obj_size);
|
||||
}
|
||||
Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
|
||||
reinterpret_cast<Object**>(old_addr),
|
||||
obj_size);
|
||||
}
|
||||
|
||||
ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
|
||||
@ -2256,7 +2245,9 @@ int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
|
||||
|
||||
if (new_addr != old_addr) {
|
||||
// Move contents.
|
||||
Heap::MoveBlock(new_addr, old_addr, obj_size);
|
||||
Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
|
||||
reinterpret_cast<Object**>(old_addr),
|
||||
obj_size);
|
||||
}
|
||||
|
||||
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
|
||||
@ -2292,13 +2283,9 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
|
||||
#endif
|
||||
|
||||
// New and old addresses cannot overlap.
|
||||
if (Heap::InNewSpace(HeapObject::FromAddress(new_addr))) {
|
||||
Heap::CopyBlock(new_addr, old_addr, obj_size);
|
||||
} else {
|
||||
Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
|
||||
old_addr,
|
||||
obj_size);
|
||||
}
|
||||
Heap::CopyBlock(reinterpret_cast<Object**>(new_addr),
|
||||
reinterpret_cast<Object**>(old_addr),
|
||||
obj_size);
|
||||
|
||||
#ifdef DEBUG
|
||||
if (FLAG_gc_verbose) {
|
||||
@ -2315,6 +2302,18 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
|
||||
}
|
||||
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Phase 5: rebuild remembered sets
|
||||
|
||||
void MarkCompactCollector::RebuildRSets() {
|
||||
#ifdef DEBUG
|
||||
ASSERT(state_ == RELOCATE_OBJECTS);
|
||||
state_ = REBUILD_RSETS;
|
||||
#endif
|
||||
Heap::RebuildRSets();
|
||||
}
|
||||
|
||||
|
||||
void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
|
||||
#ifdef ENABLE_LOGGING_AND_PROFILING
|
||||
if (obj->IsCode()) {
|
||||
|
@ -41,8 +41,7 @@ typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
|
||||
// no attempt to add area to free list is made.
|
||||
typedef void (*DeallocateFunction)(Address start,
|
||||
int size_in_bytes,
|
||||
bool add_to_freelist,
|
||||
bool last_on_page);
|
||||
bool add_to_freelist);
|
||||
|
||||
|
||||
// Forward declarations.
|
||||
@ -132,7 +131,8 @@ class MarkCompactCollector: public AllStatic {
|
||||
SWEEP_SPACES,
|
||||
ENCODE_FORWARDING_ADDRESSES,
|
||||
UPDATE_POINTERS,
|
||||
RELOCATE_OBJECTS
|
||||
RELOCATE_OBJECTS,
|
||||
REBUILD_RSETS
|
||||
};
|
||||
|
||||
// The current stage of the collector.
|
||||
@ -269,22 +269,22 @@ class MarkCompactCollector: public AllStatic {
|
||||
// written to their map word's offset in the inactive
|
||||
// semispace.
|
||||
//
|
||||
// Bookkeeping data is written to the page header of
|
||||
// Bookkeeping data is written to the remembered-set are of
|
||||
// eached paged-space page that contains live objects after
|
||||
// compaction:
|
||||
//
|
||||
// The allocation watermark field is used to track the
|
||||
// relocation top address, the address of the first word
|
||||
// after the end of the last live object in the page after
|
||||
// compaction.
|
||||
// The 3rd word of the page (first word of the remembered
|
||||
// set) contains the relocation top address, the address of
|
||||
// the first word after the end of the last live object in
|
||||
// the page after compaction.
|
||||
//
|
||||
// The Page::mc_page_index field contains the zero-based index of the
|
||||
// page in its space. This word is only used for map space pages, in
|
||||
// The 4th word contains the zero-based index of the page in
|
||||
// its space. This word is only used for map space pages, in
|
||||
// order to encode the map addresses in 21 bits to free 11
|
||||
// bits per map word for the forwarding address.
|
||||
//
|
||||
// The Page::mc_first_forwarded field contains the (nonencoded)
|
||||
// forwarding address of the first live object in the page.
|
||||
// The 5th word contains the (nonencoded) forwarding address
|
||||
// of the first live object in the page.
|
||||
//
|
||||
// In both the new space and the paged spaces, a linked list
|
||||
// of live regions is constructructed (linked through
|
||||
@ -319,28 +319,23 @@ class MarkCompactCollector: public AllStatic {
|
||||
// generation.
|
||||
static void DeallocateOldPointerBlock(Address start,
|
||||
int size_in_bytes,
|
||||
bool add_to_freelist,
|
||||
bool last_on_page);
|
||||
bool add_to_freelist);
|
||||
|
||||
static void DeallocateOldDataBlock(Address start,
|
||||
int size_in_bytes,
|
||||
bool add_to_freelist,
|
||||
bool last_on_page);
|
||||
bool add_to_freelist);
|
||||
|
||||
static void DeallocateCodeBlock(Address start,
|
||||
int size_in_bytes,
|
||||
bool add_to_freelist,
|
||||
bool last_on_page);
|
||||
bool add_to_freelist);
|
||||
|
||||
static void DeallocateMapBlock(Address start,
|
||||
int size_in_bytes,
|
||||
bool add_to_freelist,
|
||||
bool last_on_page);
|
||||
bool add_to_freelist);
|
||||
|
||||
static void DeallocateCellBlock(Address start,
|
||||
int size_in_bytes,
|
||||
bool add_to_freelist,
|
||||
bool last_on_page);
|
||||
bool add_to_freelist);
|
||||
|
||||
// If we are not compacting the heap, we simply sweep the spaces except
|
||||
// for the large object space, clearing mark bits and adding unmarked
|
||||
@ -354,7 +349,9 @@ class MarkCompactCollector: public AllStatic {
|
||||
//
|
||||
// After: All pointers in live objects, including encoded map
|
||||
// pointers, are updated to point to their target's new
|
||||
// location.
|
||||
// location. The remembered set area of each paged-space
|
||||
// page containing live objects still contains bookkeeping
|
||||
// information.
|
||||
|
||||
friend class UpdatingVisitor; // helper for updating visited objects
|
||||
|
||||
@ -376,9 +373,13 @@ class MarkCompactCollector: public AllStatic {
|
||||
// Phase 4: Relocating objects.
|
||||
//
|
||||
// Before: Pointers to live objects are updated to point to their
|
||||
// target's new location.
|
||||
// target's new location. The remembered set area of each
|
||||
// paged-space page containing live objects still contains
|
||||
// bookkeeping information.
|
||||
//
|
||||
// After: Objects have been moved to their new addresses.
|
||||
// After: Objects have been moved to their new addresses. The
|
||||
// remembered set area of each paged-space page containing
|
||||
// live objects still contains bookkeeping information.
|
||||
|
||||
// Relocates objects in all spaces.
|
||||
static void RelocateObjects();
|
||||
@ -407,6 +408,17 @@ class MarkCompactCollector: public AllStatic {
|
||||
// Copy a new object.
|
||||
static int RelocateNewObject(HeapObject* obj);
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Phase 5: Rebuilding remembered sets.
|
||||
//
|
||||
// Before: The heap is in a normal state except that remembered sets
|
||||
// in the paged spaces are not correct.
|
||||
//
|
||||
// After: The heap is in a normal state.
|
||||
|
||||
// Rebuild remembered set in old and map spaces.
|
||||
static void RebuildRSets();
|
||||
|
||||
#ifdef DEBUG
|
||||
// -----------------------------------------------------------------------
|
||||
// Debugging variables, functions and classes
|
||||
|
@ -806,8 +806,7 @@ void JSGlobalProxy::JSGlobalProxyVerify() {
|
||||
VerifyObjectField(JSGlobalProxy::kContextOffset);
|
||||
// Make sure that this object has no properties, elements.
|
||||
CHECK_EQ(0, properties()->length());
|
||||
CHECK(HasFastElements());
|
||||
CHECK_EQ(0, FixedArray::cast(elements())->length());
|
||||
CHECK_EQ(0, elements()->length());
|
||||
}
|
||||
|
||||
|
||||
|
@ -759,8 +759,7 @@ Object* Object::GetProperty(String* key, PropertyAttributes* attributes) {
|
||||
ASSERT(mode == SKIP_WRITE_BARRIER); \
|
||||
ASSERT(Heap::InNewSpace(object) || \
|
||||
!Heap::InNewSpace(READ_FIELD(object, offset)) || \
|
||||
Page::FromAddress(object->address())-> \
|
||||
IsRegionDirty(object->address() + offset)); \
|
||||
Page::IsRSetSet(object->address(), offset)); \
|
||||
}
|
||||
|
||||
#define READ_DOUBLE_FIELD(p, offset) \
|
||||
@ -1046,10 +1045,6 @@ Address MapWord::ToEncodedAddress() {
|
||||
void HeapObject::VerifyObjectField(int offset) {
|
||||
VerifyPointer(READ_FIELD(this, offset));
|
||||
}
|
||||
|
||||
void HeapObject::VerifySmiField(int offset) {
|
||||
ASSERT(READ_FIELD(this, offset)->IsSmi());
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
@ -1069,7 +1064,7 @@ MapWord HeapObject::map_word() {
|
||||
|
||||
|
||||
void HeapObject::set_map_word(MapWord map_word) {
|
||||
// WRITE_FIELD does not invoke write barrier, but there is no need
|
||||
// WRITE_FIELD does not update the remembered set, but there is no need
|
||||
// here.
|
||||
WRITE_FIELD(this, kMapOffset, reinterpret_cast<Object*>(map_word.value_));
|
||||
}
|
||||
@ -1167,16 +1162,16 @@ int HeapNumber::get_sign() {
|
||||
ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
|
||||
|
||||
|
||||
HeapObject* JSObject::elements() {
|
||||
Array* JSObject::elements() {
|
||||
Object* array = READ_FIELD(this, kElementsOffset);
|
||||
// In the assert below Dictionary is covered under FixedArray.
|
||||
ASSERT(array->IsFixedArray() || array->IsPixelArray() ||
|
||||
array->IsExternalArray());
|
||||
return reinterpret_cast<HeapObject*>(array);
|
||||
return reinterpret_cast<Array*>(array);
|
||||
}
|
||||
|
||||
|
||||
void JSObject::set_elements(HeapObject* value, WriteBarrierMode mode) {
|
||||
void JSObject::set_elements(Array* value, WriteBarrierMode mode) {
|
||||
// In the assert below Dictionary is covered under FixedArray.
|
||||
ASSERT(value->IsFixedArray() || value->IsPixelArray() ||
|
||||
value->IsExternalArray());
|
||||
@ -1347,15 +1342,15 @@ bool JSObject::HasFastProperties() {
|
||||
}
|
||||
|
||||
|
||||
bool Object::ToArrayIndex(uint32_t* index) {
|
||||
if (IsSmi()) {
|
||||
int value = Smi::cast(this)->value();
|
||||
bool Array::IndexFromObject(Object* object, uint32_t* index) {
|
||||
if (object->IsSmi()) {
|
||||
int value = Smi::cast(object)->value();
|
||||
if (value < 0) return false;
|
||||
*index = value;
|
||||
return true;
|
||||
}
|
||||
if (IsHeapNumber()) {
|
||||
double value = HeapNumber::cast(this)->value();
|
||||
if (object->IsHeapNumber()) {
|
||||
double value = HeapNumber::cast(object)->value();
|
||||
uint32_t uint_value = static_cast<uint32_t>(value);
|
||||
if (value == static_cast<double>(uint_value)) {
|
||||
*index = uint_value;
|
||||
@ -1670,11 +1665,7 @@ HashTable<Shape, Key>* HashTable<Shape, Key>::cast(Object* obj) {
|
||||
}
|
||||
|
||||
|
||||
SMI_ACCESSORS(FixedArray, length, kLengthOffset)
|
||||
SMI_ACCESSORS(ByteArray, length, kLengthOffset)
|
||||
|
||||
INT_ACCESSORS(PixelArray, length, kLengthOffset)
|
||||
INT_ACCESSORS(ExternalArray, length, kLengthOffset)
|
||||
INT_ACCESSORS(Array, length, kLengthOffset)
|
||||
|
||||
|
||||
SMI_ACCESSORS(String, length, kLengthOffset)
|
||||
@ -1687,9 +1678,6 @@ uint32_t String::hash_field() {
|
||||
|
||||
void String::set_hash_field(uint32_t value) {
|
||||
WRITE_UINT32_FIELD(this, kHashFieldOffset, value);
|
||||
#if V8_HOST_ARCH_64_BIT
|
||||
WRITE_UINT32_FIELD(this, kHashFieldOffset + kIntSize, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@ -2468,65 +2456,22 @@ BOOL_ACCESSORS(SharedFunctionInfo,
|
||||
try_full_codegen,
|
||||
kTryFullCodegen)
|
||||
|
||||
#if V8_HOST_ARCH_32_BIT
|
||||
SMI_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
|
||||
SMI_ACCESSORS(SharedFunctionInfo, formal_parameter_count,
|
||||
INT_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
|
||||
INT_ACCESSORS(SharedFunctionInfo, formal_parameter_count,
|
||||
kFormalParameterCountOffset)
|
||||
SMI_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
|
||||
INT_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
|
||||
kExpectedNofPropertiesOffset)
|
||||
SMI_ACCESSORS(SharedFunctionInfo, num_literals, kNumLiteralsOffset)
|
||||
SMI_ACCESSORS(SharedFunctionInfo, start_position_and_type,
|
||||
INT_ACCESSORS(SharedFunctionInfo, num_literals, kNumLiteralsOffset)
|
||||
INT_ACCESSORS(SharedFunctionInfo, start_position_and_type,
|
||||
kStartPositionAndTypeOffset)
|
||||
SMI_ACCESSORS(SharedFunctionInfo, end_position, kEndPositionOffset)
|
||||
SMI_ACCESSORS(SharedFunctionInfo, function_token_position,
|
||||
INT_ACCESSORS(SharedFunctionInfo, end_position, kEndPositionOffset)
|
||||
INT_ACCESSORS(SharedFunctionInfo, function_token_position,
|
||||
kFunctionTokenPositionOffset)
|
||||
SMI_ACCESSORS(SharedFunctionInfo, compiler_hints,
|
||||
INT_ACCESSORS(SharedFunctionInfo, compiler_hints,
|
||||
kCompilerHintsOffset)
|
||||
SMI_ACCESSORS(SharedFunctionInfo, this_property_assignments_count,
|
||||
INT_ACCESSORS(SharedFunctionInfo, this_property_assignments_count,
|
||||
kThisPropertyAssignmentsCountOffset)
|
||||
#else
|
||||
|
||||
#define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset) \
|
||||
int holder::name() { \
|
||||
int value = READ_INT_FIELD(this, offset); \
|
||||
ASSERT(kHeapObjectTag == 1); \
|
||||
ASSERT((value & kHeapObjectTag) == 0); \
|
||||
return value >> 1; \
|
||||
} \
|
||||
void holder::set_##name(int value) { \
|
||||
ASSERT(kHeapObjectTag == 1); \
|
||||
ASSERT((value & 0xC0000000) == 0xC0000000 || \
|
||||
(value & 0xC0000000) == 0x000000000); \
|
||||
WRITE_INT_FIELD(this, \
|
||||
offset, \
|
||||
(value << 1) & ~kHeapObjectTag); \
|
||||
}
|
||||
|
||||
#define PSEUDO_SMI_ACCESSORS_HI(holder, name, offset) \
|
||||
INT_ACCESSORS(holder, name, offset)
|
||||
|
||||
|
||||
|
||||
PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, length, kLengthOffset)
|
||||
PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, formal_parameter_count,
|
||||
kFormalParameterCountOffset)
|
||||
|
||||
PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, expected_nof_properties,
|
||||
kExpectedNofPropertiesOffset)
|
||||
PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, num_literals, kNumLiteralsOffset)
|
||||
|
||||
PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, start_position_and_type,
|
||||
kStartPositionAndTypeOffset)
|
||||
PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, end_position, kEndPositionOffset)
|
||||
|
||||
PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, function_token_position,
|
||||
kFunctionTokenPositionOffset)
|
||||
PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, compiler_hints,
|
||||
kCompilerHintsOffset)
|
||||
|
||||
PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, this_property_assignments_count,
|
||||
kThisPropertyAssignmentsCountOffset)
|
||||
#endif
|
||||
|
||||
ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
|
||||
ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
|
||||
@ -2840,7 +2785,7 @@ void JSRegExp::SetDataAt(int index, Object* value) {
|
||||
|
||||
|
||||
JSObject::ElementsKind JSObject::GetElementsKind() {
|
||||
HeapObject* array = elements();
|
||||
Array* array = elements();
|
||||
if (array->IsFixedArray()) {
|
||||
// FAST_ELEMENTS or DICTIONARY_ELEMENTS are both stored in a FixedArray.
|
||||
if (array->map() == Heap::fixed_array_map()) {
|
||||
@ -2963,20 +2908,15 @@ NumberDictionary* JSObject::element_dictionary() {
|
||||
}
|
||||
|
||||
|
||||
bool String::IsHashFieldComputed(uint32_t field) {
|
||||
return (field & kHashNotComputedMask) == 0;
|
||||
}
|
||||
|
||||
|
||||
bool String::HasHashCode() {
|
||||
return IsHashFieldComputed(hash_field());
|
||||
return (hash_field() & kHashComputedMask) != 0;
|
||||
}
|
||||
|
||||
|
||||
uint32_t String::Hash() {
|
||||
// Fast case: has hash code already been computed?
|
||||
uint32_t field = hash_field();
|
||||
if (IsHashFieldComputed(field)) return field >> kHashShift;
|
||||
if (field & kHashComputedMask) return field >> kHashShift;
|
||||
// Slow case: compute hash code and set it.
|
||||
return ComputeAndSetHash();
|
||||
}
|
||||
@ -3049,7 +2989,7 @@ uint32_t StringHasher::GetHash() {
|
||||
|
||||
bool String::AsArrayIndex(uint32_t* index) {
|
||||
uint32_t field = hash_field();
|
||||
if (IsHashFieldComputed(field) && !(field & kIsArrayIndexMask)) return false;
|
||||
if ((field & kHashComputedMask) && !(field & kIsArrayIndexMask)) return false;
|
||||
return SlowAsArrayIndex(index);
|
||||
}
|
||||
|
||||
@ -3173,7 +3113,7 @@ void Map::ClearCodeCache() {
|
||||
|
||||
void JSArray::EnsureSize(int required_size) {
|
||||
ASSERT(HasFastElements());
|
||||
FixedArray* elts = FixedArray::cast(elements());
|
||||
Array* elts = elements();
|
||||
const int kArraySizeThatFitsComfortablyInNewSpace = 128;
|
||||
if (elts->length() < required_size) {
|
||||
// Doubling in size would be overkill, but leave some slack to avoid
|
||||
|
@ -4683,7 +4683,7 @@ static inline uint32_t HashSequentialString(const schar* chars, int length) {
|
||||
|
||||
uint32_t String::ComputeAndSetHash() {
|
||||
// Should only be called if hash code has not yet been computed.
|
||||
ASSERT(!HasHashCode());
|
||||
ASSERT(!(hash_field() & kHashComputedMask));
|
||||
|
||||
const int len = length();
|
||||
|
||||
@ -4702,7 +4702,7 @@ uint32_t String::ComputeAndSetHash() {
|
||||
set_hash_field(field);
|
||||
|
||||
// Check the hash code is there.
|
||||
ASSERT(HasHashCode());
|
||||
ASSERT(hash_field() & kHashComputedMask);
|
||||
uint32_t result = field >> kHashShift;
|
||||
ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
|
||||
return result;
|
||||
@ -4757,7 +4757,8 @@ bool String::SlowAsArrayIndex(uint32_t* index) {
|
||||
static inline uint32_t HashField(uint32_t hash,
|
||||
bool is_array_index,
|
||||
int length = -1) {
|
||||
uint32_t result = (hash << String::kHashShift);
|
||||
uint32_t result =
|
||||
(hash << String::kHashShift) | String::kHashComputedMask;
|
||||
if (is_array_index) {
|
||||
// For array indexes mix the length into the hash as an array index could
|
||||
// be zero.
|
||||
@ -5552,7 +5553,7 @@ Object* JSObject::SetElementsLength(Object* len) {
|
||||
// General slow case.
|
||||
if (len->IsNumber()) {
|
||||
uint32_t length;
|
||||
if (len->ToArrayIndex(&length)) {
|
||||
if (Array::IndexFromObject(len, &length)) {
|
||||
return SetSlowElements(len);
|
||||
} else {
|
||||
return ArrayLengthRangeError();
|
||||
@ -5875,7 +5876,8 @@ Object* JSObject::SetFastElement(uint32_t index, Object* value) {
|
||||
if (IsJSArray()) {
|
||||
// Update the length of the array if needed.
|
||||
uint32_t array_length = 0;
|
||||
CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length));
|
||||
CHECK(Array::IndexFromObject(JSArray::cast(this)->length(),
|
||||
&array_length));
|
||||
if (index >= array_length) {
|
||||
JSArray::cast(this)->set_length(Smi::FromInt(index + 1));
|
||||
}
|
||||
@ -6026,7 +6028,8 @@ Object* JSObject::SetElementWithoutInterceptor(uint32_t index, Object* value) {
|
||||
if (ShouldConvertToFastElements()) {
|
||||
uint32_t new_length = 0;
|
||||
if (IsJSArray()) {
|
||||
CHECK(JSArray::cast(this)->length()->ToArrayIndex(&new_length));
|
||||
CHECK(Array::IndexFromObject(JSArray::cast(this)->length(),
|
||||
&new_length));
|
||||
JSArray::cast(this)->set_length(Smi::FromInt(new_length));
|
||||
} else {
|
||||
new_length = NumberDictionary::cast(elements())->max_number_key() + 1;
|
||||
@ -6057,7 +6060,7 @@ Object* JSObject::SetElementWithoutInterceptor(uint32_t index, Object* value) {
|
||||
|
||||
Object* JSArray::JSArrayUpdateLengthFromIndex(uint32_t index, Object* value) {
|
||||
uint32_t old_len = 0;
|
||||
CHECK(length()->ToArrayIndex(&old_len));
|
||||
CHECK(Array::IndexFromObject(length(), &old_len));
|
||||
// Check to see if we need to update the length. For now, we make
|
||||
// sure that the length stays within 32-bits (unsigned).
|
||||
if (index >= old_len && index != 0xffffffff) {
|
||||
@ -6351,7 +6354,7 @@ bool JSObject::ShouldConvertToFastElements() {
|
||||
// fast elements.
|
||||
uint32_t length = 0;
|
||||
if (IsJSArray()) {
|
||||
CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
|
||||
CHECK(Array::IndexFromObject(JSArray::cast(this)->length(), &length));
|
||||
} else {
|
||||
length = dictionary->max_number_key();
|
||||
}
|
||||
|
213
src/objects.h
213
src/objects.h
@ -54,28 +54,29 @@
|
||||
// - JSGlobalObject
|
||||
// - JSBuiltinsObject
|
||||
// - JSGlobalProxy
|
||||
// - JSValue
|
||||
// - ByteArray
|
||||
// - PixelArray
|
||||
// - ExternalArray
|
||||
// - ExternalByteArray
|
||||
// - ExternalUnsignedByteArray
|
||||
// - ExternalShortArray
|
||||
// - ExternalUnsignedShortArray
|
||||
// - ExternalIntArray
|
||||
// - ExternalUnsignedIntArray
|
||||
// - ExternalFloatArray
|
||||
// - FixedArray
|
||||
// - DescriptorArray
|
||||
// - HashTable
|
||||
// - Dictionary
|
||||
// - SymbolTable
|
||||
// - CompilationCacheTable
|
||||
// - CodeCacheHashTable
|
||||
// - MapCache
|
||||
// - Context
|
||||
// - GlobalContext
|
||||
// - JSFunctionResultCache
|
||||
// - JSValue
|
||||
// - Array
|
||||
// - ByteArray
|
||||
// - PixelArray
|
||||
// - ExternalArray
|
||||
// - ExternalByteArray
|
||||
// - ExternalUnsignedByteArray
|
||||
// - ExternalShortArray
|
||||
// - ExternalUnsignedShortArray
|
||||
// - ExternalIntArray
|
||||
// - ExternalUnsignedIntArray
|
||||
// - ExternalFloatArray
|
||||
// - FixedArray
|
||||
// - DescriptorArray
|
||||
// - HashTable
|
||||
// - Dictionary
|
||||
// - SymbolTable
|
||||
// - CompilationCacheTable
|
||||
// - CodeCacheHashTable
|
||||
// - MapCache
|
||||
// - Context
|
||||
// - GlobalContext
|
||||
// - JSFunctionResultCache
|
||||
// - String
|
||||
// - SeqString
|
||||
// - SeqAsciiString
|
||||
@ -675,10 +676,6 @@ class Object BASE_EMBEDDED {
|
||||
// Return the object's prototype (might be Heap::null_value()).
|
||||
Object* GetPrototype();
|
||||
|
||||
// Tries to convert an object to an array index. Returns true and sets
|
||||
// the output parameter if it succeeds.
|
||||
inline bool ToArrayIndex(uint32_t* index);
|
||||
|
||||
// Returns true if this is a JSValue containing a string and the index is
|
||||
// < the length of the string. Used to implement [] on strings.
|
||||
inline bool IsStringObjectWithCharacterAt(uint32_t index);
|
||||
@ -1029,7 +1026,7 @@ class HeapObject: public Object {
|
||||
|
||||
// Returns the field at offset in obj, as a read/write Object* reference.
|
||||
// Does no checking, and is safe to use during GC, while maps are invalid.
|
||||
// Does not invoke write barrier, so should only be assigned to
|
||||
// Does not update remembered sets, so should only be assigned to
|
||||
// during marking GC.
|
||||
static inline Object** RawField(HeapObject* obj, int offset);
|
||||
|
||||
@ -1049,7 +1046,6 @@ class HeapObject: public Object {
|
||||
void HeapObjectPrint();
|
||||
void HeapObjectVerify();
|
||||
inline void VerifyObjectField(int offset);
|
||||
inline void VerifySmiField(int offset);
|
||||
|
||||
void PrintHeader(const char* id);
|
||||
|
||||
@ -1154,7 +1150,7 @@ class JSObject: public HeapObject {
|
||||
};
|
||||
|
||||
// [properties]: Backing storage for properties.
|
||||
// properties is a FixedArray in the fast case and a Dictionary in the
|
||||
// properties is a FixedArray in the fast case, and a Dictionary in the
|
||||
// slow case.
|
||||
DECL_ACCESSORS(properties, FixedArray) // Get and set fast properties.
|
||||
inline void initialize_properties();
|
||||
@ -1162,9 +1158,9 @@ class JSObject: public HeapObject {
|
||||
inline StringDictionary* property_dictionary(); // Gets slow properties.
|
||||
|
||||
// [elements]: The elements (properties with names that are integers).
|
||||
// elements is a FixedArray in the fast case, a Dictionary in the slow
|
||||
// case, and a PixelArray or ExternalArray in special cases.
|
||||
DECL_ACCESSORS(elements, HeapObject)
|
||||
// elements is a FixedArray in the fast case, and a Dictionary in the slow
|
||||
// case or a PixelArray in a special case.
|
||||
DECL_ACCESSORS(elements, Array) // Get and set fast elements.
|
||||
inline void initialize_elements();
|
||||
inline ElementsKind GetElementsKind();
|
||||
inline bool HasFastElements();
|
||||
@ -1581,13 +1577,37 @@ class JSObject: public HeapObject {
|
||||
};
|
||||
|
||||
|
||||
// FixedArray describes fixed-sized arrays with element type Object*.
|
||||
class FixedArray: public HeapObject {
|
||||
// Abstract super class arrays. It provides length behavior.
|
||||
class Array: public HeapObject {
|
||||
public:
|
||||
// [length]: length of the array.
|
||||
inline int length();
|
||||
inline void set_length(int value);
|
||||
|
||||
// Convert an object to an array index.
|
||||
// Returns true if the conversion succeeded.
|
||||
static inline bool IndexFromObject(Object* object, uint32_t* index);
|
||||
|
||||
// Layout descriptor.
|
||||
static const int kLengthOffset = HeapObject::kHeaderSize;
|
||||
|
||||
protected:
|
||||
// No code should use the Array class directly, only its subclasses.
|
||||
// Use the kHeaderSize of the appropriate subclass, which may be aligned.
|
||||
static const int kHeaderSize = kLengthOffset + kIntSize;
|
||||
static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
|
||||
|
||||
private:
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(Array);
|
||||
};
|
||||
|
||||
|
||||
// FixedArray describes fixed sized arrays where element
|
||||
// type is Object*.
|
||||
|
||||
class FixedArray: public Array {
|
||||
public:
|
||||
|
||||
// Setter and getter for elements.
|
||||
inline Object* get(int index);
|
||||
// Setter that uses write barrier.
|
||||
@ -1628,10 +1648,7 @@ class FixedArray: public HeapObject {
|
||||
// Casting.
|
||||
static inline FixedArray* cast(Object* obj);
|
||||
|
||||
// Layout description.
|
||||
// Length is smi tagged when it is stored.
|
||||
static const int kLengthOffset = HeapObject::kHeaderSize;
|
||||
static const int kHeaderSize = kLengthOffset + kPointerSize;
|
||||
static const int kHeaderSize = Array::kAlignedSize;
|
||||
|
||||
// Maximal allowed size, in bytes, of a single FixedArray.
|
||||
// Prevents overflowing size computations, as well as extreme memory
|
||||
@ -2330,12 +2347,8 @@ class JSFunctionResultCache: public FixedArray {
|
||||
// ByteArray represents fixed sized byte arrays. Used by the outside world,
|
||||
// such as PCRE, and also by the memory allocator and garbage collector to
|
||||
// fill in free blocks in the heap.
|
||||
class ByteArray: public HeapObject {
|
||||
class ByteArray: public Array {
|
||||
public:
|
||||
// [length]: length of the array.
|
||||
inline int length();
|
||||
inline void set_length(int value);
|
||||
|
||||
// Setter and getter.
|
||||
inline byte get(int index);
|
||||
inline void set(int index, byte value);
|
||||
@ -2344,7 +2357,7 @@ class ByteArray: public HeapObject {
|
||||
inline int get_int(int index);
|
||||
|
||||
static int SizeFor(int length) {
|
||||
return OBJECT_POINTER_ALIGN(kHeaderSize + length);
|
||||
return OBJECT_SIZE_ALIGN(kHeaderSize + length);
|
||||
}
|
||||
// We use byte arrays for free blocks in the heap. Given a desired size in
|
||||
// bytes that is a multiple of the word size and big enough to hold a byte
|
||||
@ -2372,12 +2385,9 @@ class ByteArray: public HeapObject {
|
||||
void ByteArrayVerify();
|
||||
#endif
|
||||
|
||||
// Layout description.
|
||||
// Length is smi tagged when it is stored.
|
||||
static const int kLengthOffset = HeapObject::kHeaderSize;
|
||||
static const int kHeaderSize = kLengthOffset + kPointerSize;
|
||||
|
||||
static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
|
||||
// ByteArray headers are not quadword aligned.
|
||||
static const int kHeaderSize = Array::kHeaderSize;
|
||||
static const int kAlignedSize = Array::kAlignedSize;
|
||||
|
||||
// Maximal memory consumption for a single ByteArray.
|
||||
static const int kMaxSize = 512 * MB;
|
||||
@ -2396,12 +2406,8 @@ class ByteArray: public HeapObject {
|
||||
// multipage/the-canvas-element.html#canvaspixelarray
|
||||
// In particular, write access clamps the value written to 0 or 255 if the
|
||||
// value written is outside this range.
|
||||
class PixelArray: public HeapObject {
|
||||
class PixelArray: public Array {
|
||||
public:
|
||||
// [length]: length of the array.
|
||||
inline int length();
|
||||
inline void set_length(int value);
|
||||
|
||||
// [external_pointer]: The pointer to the external memory area backing this
|
||||
// pixel array.
|
||||
DECL_ACCESSORS(external_pointer, uint8_t) // Pointer to the data store.
|
||||
@ -2426,11 +2432,9 @@ class PixelArray: public HeapObject {
|
||||
static const int kMaxLength = 0x3fffffff;
|
||||
|
||||
// PixelArray headers are not quadword aligned.
|
||||
static const int kLengthOffset = HeapObject::kHeaderSize;
|
||||
static const int kExternalPointerOffset =
|
||||
POINTER_SIZE_ALIGN(kLengthOffset + kIntSize);
|
||||
static const int kExternalPointerOffset = Array::kAlignedSize;
|
||||
static const int kHeaderSize = kExternalPointerOffset + kPointerSize;
|
||||
static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
|
||||
static const int kAlignedSize = OBJECT_SIZE_ALIGN(kHeaderSize);
|
||||
|
||||
private:
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(PixelArray);
|
||||
@ -2448,12 +2452,8 @@ class PixelArray: public HeapObject {
|
||||
// Out-of-range values passed to the setter are converted via a C
|
||||
// cast, not clamping. Out-of-range indices cause exceptions to be
|
||||
// raised rather than being silently ignored.
|
||||
class ExternalArray: public HeapObject {
|
||||
class ExternalArray: public Array {
|
||||
public:
|
||||
// [length]: length of the array.
|
||||
inline int length();
|
||||
inline void set_length(int value);
|
||||
|
||||
// [external_pointer]: The pointer to the external memory area backing this
|
||||
// external array.
|
||||
DECL_ACCESSORS(external_pointer, void) // Pointer to the data store.
|
||||
@ -2465,11 +2465,9 @@ class ExternalArray: public HeapObject {
|
||||
static const int kMaxLength = 0x3fffffff;
|
||||
|
||||
// ExternalArray headers are not quadword aligned.
|
||||
static const int kLengthOffset = HeapObject::kHeaderSize;
|
||||
static const int kExternalPointerOffset =
|
||||
POINTER_SIZE_ALIGN(kLengthOffset + kIntSize);
|
||||
static const int kExternalPointerOffset = Array::kAlignedSize;
|
||||
static const int kHeaderSize = kExternalPointerOffset + kPointerSize;
|
||||
static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
|
||||
static const int kAlignedSize = OBJECT_SIZE_ALIGN(kHeaderSize);
|
||||
|
||||
private:
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalArray);
|
||||
@ -3023,13 +3021,7 @@ class Map: public HeapObject {
|
||||
kConstructorOffset + kPointerSize;
|
||||
static const int kCodeCacheOffset = kInstanceDescriptorsOffset + kPointerSize;
|
||||
static const int kPadStart = kCodeCacheOffset + kPointerSize;
|
||||
static const int kSize = MAP_POINTER_ALIGN(kPadStart);
|
||||
|
||||
// Layout of pointer fields. Heap iteration code relies on them
|
||||
// being continiously allocated.
|
||||
static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset;
|
||||
static const int kPointerFieldsEndOffset =
|
||||
Map::kCodeCacheOffset + kPointerSize;
|
||||
static const int kSize = MAP_SIZE_ALIGN(kPadStart);
|
||||
|
||||
// Byte offsets within kInstanceSizesOffset.
|
||||
static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;
|
||||
@ -3341,64 +3333,23 @@ class SharedFunctionInfo: public HeapObject {
|
||||
static const int kInferredNameOffset = kDebugInfoOffset + kPointerSize;
|
||||
static const int kThisPropertyAssignmentsOffset =
|
||||
kInferredNameOffset + kPointerSize;
|
||||
#if V8_HOST_ARCH_32_BIT
|
||||
// Smi fields.
|
||||
// Integer fields.
|
||||
static const int kLengthOffset =
|
||||
kThisPropertyAssignmentsOffset + kPointerSize;
|
||||
static const int kFormalParameterCountOffset = kLengthOffset + kPointerSize;
|
||||
static const int kExpectedNofPropertiesOffset =
|
||||
kFormalParameterCountOffset + kPointerSize;
|
||||
static const int kNumLiteralsOffset =
|
||||
kExpectedNofPropertiesOffset + kPointerSize;
|
||||
static const int kStartPositionAndTypeOffset =
|
||||
kNumLiteralsOffset + kPointerSize;
|
||||
static const int kEndPositionOffset =
|
||||
kStartPositionAndTypeOffset + kPointerSize;
|
||||
static const int kFunctionTokenPositionOffset =
|
||||
kEndPositionOffset + kPointerSize;
|
||||
static const int kCompilerHintsOffset =
|
||||
kFunctionTokenPositionOffset + kPointerSize;
|
||||
static const int kThisPropertyAssignmentsCountOffset =
|
||||
kCompilerHintsOffset + kPointerSize;
|
||||
// Total size.
|
||||
static const int kSize = kThisPropertyAssignmentsCountOffset + kPointerSize;
|
||||
#else
|
||||
// The only reason to use smi fields instead of int fields
|
||||
// is to allow interation without maps decoding during
|
||||
// garbage collections.
|
||||
// To avoid wasting space on 64-bit architectures we use
|
||||
// the following trick: we group integer fields into pairs
|
||||
// First integer in each pair is shifted left by 1.
|
||||
// By doing this we guarantee that LSB of each kPointerSize aligned
|
||||
// word is not set and thus this word cannot be treated as pointer
|
||||
// to HeapObject during old space traversal.
|
||||
static const int kLengthOffset =
|
||||
kThisPropertyAssignmentsOffset + kPointerSize;
|
||||
static const int kFormalParameterCountOffset =
|
||||
kLengthOffset + kIntSize;
|
||||
|
||||
static const int kFormalParameterCountOffset = kLengthOffset + kIntSize;
|
||||
static const int kExpectedNofPropertiesOffset =
|
||||
kFormalParameterCountOffset + kIntSize;
|
||||
static const int kNumLiteralsOffset =
|
||||
kExpectedNofPropertiesOffset + kIntSize;
|
||||
|
||||
static const int kEndPositionOffset =
|
||||
kNumLiteralsOffset + kIntSize;
|
||||
static const int kNumLiteralsOffset = kExpectedNofPropertiesOffset + kIntSize;
|
||||
static const int kStartPositionAndTypeOffset =
|
||||
kEndPositionOffset + kIntSize;
|
||||
|
||||
static const int kFunctionTokenPositionOffset =
|
||||
kStartPositionAndTypeOffset + kIntSize;
|
||||
kNumLiteralsOffset + kIntSize;
|
||||
static const int kEndPositionOffset = kStartPositionAndTypeOffset + kIntSize;
|
||||
static const int kFunctionTokenPositionOffset = kEndPositionOffset + kIntSize;
|
||||
static const int kCompilerHintsOffset =
|
||||
kFunctionTokenPositionOffset + kIntSize;
|
||||
|
||||
static const int kThisPropertyAssignmentsCountOffset =
|
||||
kCompilerHintsOffset + kIntSize;
|
||||
|
||||
// Total size.
|
||||
static const int kSize = kThisPropertyAssignmentsCountOffset + kIntSize;
|
||||
|
||||
#endif
|
||||
static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
|
||||
|
||||
private:
|
||||
@ -4154,7 +4105,8 @@ class String: public HeapObject {
|
||||
// Layout description.
|
||||
static const int kLengthOffset = HeapObject::kHeaderSize;
|
||||
static const int kHashFieldOffset = kLengthOffset + kPointerSize;
|
||||
static const int kSize = kHashFieldOffset + kPointerSize;
|
||||
static const int kSize = kHashFieldOffset + kIntSize;
|
||||
// Notice: kSize is not pointer-size aligned if pointers are 64-bit.
|
||||
|
||||
// Maximum number of characters to consider when trying to convert a string
|
||||
// value into an array index.
|
||||
@ -4173,7 +4125,7 @@ class String: public HeapObject {
|
||||
// whether a hash code has been computed. If the hash code has been
|
||||
// computed the 2nd bit tells whether the string can be used as an
|
||||
// array index.
|
||||
static const int kHashNotComputedMask = 1;
|
||||
static const int kHashComputedMask = 1;
|
||||
static const int kIsArrayIndexMask = 1 << 1;
|
||||
static const int kNofLengthBitFields = 2;
|
||||
|
||||
@ -4191,14 +4143,9 @@ class String: public HeapObject {
|
||||
static const int kArrayIndexHashMask = (1 << kArrayIndexHashLengthShift) - 1;
|
||||
static const int kArrayIndexValueBits =
|
||||
kArrayIndexHashLengthShift - kHashShift;
|
||||
static const int kArrayIndexValueMask =
|
||||
((1 << kArrayIndexValueBits) - 1) << kHashShift;
|
||||
|
||||
// Value of empty hash field indicating that the hash is not computed.
|
||||
static const int kEmptyHashField = kHashNotComputedMask;
|
||||
|
||||
// Value of hash field containing computed hash equal to zero.
|
||||
static const int kZeroHash = 0;
|
||||
static const int kEmptyHashField = 0;
|
||||
|
||||
// Maximal string length.
|
||||
static const int kMaxLength = (1 << (32 - 2)) - 1;
|
||||
@ -4266,8 +4213,6 @@ class String: public HeapObject {
|
||||
// mutates the ConsString and might return a failure.
|
||||
Object* SlowTryFlatten(PretenureFlag pretenure);
|
||||
|
||||
static inline bool IsHashFieldComputed(uint32_t field);
|
||||
|
||||
// Slow case of String::Equals. This implementation works on any strings
|
||||
// but it is most efficient on strings that are almost flat.
|
||||
bool SlowEquals(String* other);
|
||||
@ -4317,7 +4262,7 @@ class SeqAsciiString: public SeqString {
|
||||
|
||||
// Computes the size for an AsciiString instance of a given length.
|
||||
static int SizeFor(int length) {
|
||||
return OBJECT_POINTER_ALIGN(kHeaderSize + length * kCharSize);
|
||||
return OBJECT_SIZE_ALIGN(kHeaderSize + length * kCharSize);
|
||||
}
|
||||
|
||||
// Layout description.
|
||||
@ -4369,7 +4314,7 @@ class SeqTwoByteString: public SeqString {
|
||||
|
||||
// Computes the size for a TwoByteString instance of a given length.
|
||||
static int SizeFor(int length) {
|
||||
return OBJECT_POINTER_ALIGN(kHeaderSize + length * kShortSize);
|
||||
return OBJECT_SIZE_ALIGN(kHeaderSize + length * kShortSize);
|
||||
}
|
||||
|
||||
// Layout description.
|
||||
|
@ -291,7 +291,7 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
|
||||
Handle<String> name(String::cast(*key));
|
||||
ASSERT(!name->AsArrayIndex(&element_index));
|
||||
result = SetProperty(boilerplate, name, value, NONE);
|
||||
} else if (key->ToArrayIndex(&element_index)) {
|
||||
} else if (Array::IndexFromObject(*key, &element_index)) {
|
||||
// Array index (uint32).
|
||||
result = SetElement(boilerplate, element_index, value);
|
||||
} else {
|
||||
@ -1583,7 +1583,7 @@ static Object* Runtime_SetCode(Arguments args) {
|
||||
|
||||
static Object* CharCodeAt(String* subject, Object* index) {
|
||||
uint32_t i = 0;
|
||||
if (!index->ToArrayIndex(&i)) return Heap::nan_value();
|
||||
if (!Array::IndexFromObject(index, &i)) return Heap::nan_value();
|
||||
// Flatten the string. If someone wants to get a char at an index
|
||||
// in a cons string, it is likely that more indices will be
|
||||
// accessed.
|
||||
@ -1599,7 +1599,7 @@ static Object* CharCodeAt(String* subject, Object* index) {
|
||||
|
||||
static Object* CharFromCode(Object* char_code) {
|
||||
uint32_t code;
|
||||
if (char_code->ToArrayIndex(&code)) {
|
||||
if (Array::IndexFromObject(char_code, &code)) {
|
||||
if (code <= 0xffff) {
|
||||
return Heap::LookupSingleCharacterStringFromCode(code);
|
||||
}
|
||||
@ -2780,7 +2780,7 @@ static Object* Runtime_StringIndexOf(Arguments args) {
|
||||
|
||||
Object* index = args[2];
|
||||
uint32_t start_index;
|
||||
if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
|
||||
if (!Array::IndexFromObject(index, &start_index)) return Smi::FromInt(-1);
|
||||
|
||||
RUNTIME_ASSERT(start_index <= static_cast<uint32_t>(sub->length()));
|
||||
int position = Runtime::StringMatch(sub, pat, start_index);
|
||||
@ -2830,7 +2830,7 @@ static Object* Runtime_StringLastIndexOf(Arguments args) {
|
||||
|
||||
Object* index = args[2];
|
||||
uint32_t start_index;
|
||||
if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
|
||||
if (!Array::IndexFromObject(index, &start_index)) return Smi::FromInt(-1);
|
||||
|
||||
uint32_t pat_length = pat->length();
|
||||
uint32_t sub_length = sub->length();
|
||||
@ -3657,7 +3657,7 @@ Object* Runtime::GetObjectProperty(Handle<Object> object, Handle<Object> key) {
|
||||
|
||||
// Check if the given key is an array index.
|
||||
uint32_t index;
|
||||
if (key->ToArrayIndex(&index)) {
|
||||
if (Array::IndexFromObject(*key, &index)) {
|
||||
return GetElementOrCharAt(object, index);
|
||||
}
|
||||
|
||||
@ -3843,7 +3843,7 @@ Object* Runtime::SetObjectProperty(Handle<Object> object,
|
||||
|
||||
// Check if the given key is an array index.
|
||||
uint32_t index;
|
||||
if (key->ToArrayIndex(&index)) {
|
||||
if (Array::IndexFromObject(*key, &index)) {
|
||||
// In Firefox/SpiderMonkey, Safari and Opera you can access the characters
|
||||
// of a string using [] notation. We need to support this too in
|
||||
// JavaScript.
|
||||
@ -3895,7 +3895,7 @@ Object* Runtime::ForceSetObjectProperty(Handle<JSObject> js_object,
|
||||
|
||||
// Check if the given key is an array index.
|
||||
uint32_t index;
|
||||
if (key->ToArrayIndex(&index)) {
|
||||
if (Array::IndexFromObject(*key, &index)) {
|
||||
// In Firefox/SpiderMonkey, Safari and Opera you can access the characters
|
||||
// of a string using [] notation. We need to support this too in
|
||||
// JavaScript.
|
||||
@ -3942,7 +3942,7 @@ Object* Runtime::ForceDeleteObjectProperty(Handle<JSObject> js_object,
|
||||
|
||||
// Check if the given key is an array index.
|
||||
uint32_t index;
|
||||
if (key->ToArrayIndex(&index)) {
|
||||
if (Array::IndexFromObject(*key, &index)) {
|
||||
// In Firefox/SpiderMonkey, Safari and Opera you can access the
|
||||
// characters of a string using [] notation. In the case of a
|
||||
// String object we just need to redirect the deletion to the
|
||||
@ -4355,7 +4355,7 @@ static Object* Runtime_GetArgumentsProperty(Arguments args) {
|
||||
// Try to convert the key to an index. If successful and within
|
||||
// index return the the argument from the frame.
|
||||
uint32_t index;
|
||||
if (args[0]->ToArrayIndex(&index) && index < n) {
|
||||
if (Array::IndexFromObject(args[0], &index) && index < n) {
|
||||
return frame->GetParameter(index);
|
||||
}
|
||||
|
||||
@ -6457,8 +6457,8 @@ static Object* Runtime_NewArgumentsFast(Arguments args) {
|
||||
if (obj->IsFailure()) return obj;
|
||||
|
||||
AssertNoAllocation no_gc;
|
||||
FixedArray* array = reinterpret_cast<FixedArray*>(obj);
|
||||
array->set_map(Heap::fixed_array_map());
|
||||
reinterpret_cast<Array*>(obj)->set_map(Heap::fixed_array_map());
|
||||
FixedArray* array = FixedArray::cast(obj);
|
||||
array->set_length(length);
|
||||
|
||||
WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
|
||||
@ -7747,8 +7747,8 @@ static Object* Runtime_SwapElements(Arguments args) {
|
||||
Handle<Object> key2 = args.at<Object>(2);
|
||||
|
||||
uint32_t index1, index2;
|
||||
if (!key1->ToArrayIndex(&index1)
|
||||
|| !key2->ToArrayIndex(&index2)) {
|
||||
if (!Array::IndexFromObject(*key1, &index1)
|
||||
|| !Array::IndexFromObject(*key2, &index2)) {
|
||||
return Top::ThrowIllegalOperation();
|
||||
}
|
||||
|
||||
@ -7779,19 +7779,17 @@ static Object* Runtime_GetArrayKeys(Arguments args) {
|
||||
for (int i = 0; i < keys_length; i++) {
|
||||
Object* key = keys->get(i);
|
||||
uint32_t index;
|
||||
if (!key->ToArrayIndex(&index) || index >= length) {
|
||||
if (!Array::IndexFromObject(key, &index) || index >= length) {
|
||||
// Zap invalid keys.
|
||||
keys->set_undefined(i);
|
||||
}
|
||||
}
|
||||
return *Factory::NewJSArrayWithElements(keys);
|
||||
} else {
|
||||
ASSERT(array->HasFastElements());
|
||||
Handle<FixedArray> single_interval = Factory::NewFixedArray(2);
|
||||
// -1 means start of array.
|
||||
single_interval->set(0, Smi::FromInt(-1));
|
||||
uint32_t actual_length =
|
||||
static_cast<uint32_t>(FixedArray::cast(array->elements())->length());
|
||||
uint32_t actual_length = static_cast<uint32_t>(array->elements()->length());
|
||||
uint32_t min_length = actual_length < length ? actual_length : length;
|
||||
Handle<Object> length_object =
|
||||
Factory::NewNumber(static_cast<double>(min_length));
|
||||
|
215
src/spaces-inl.h
215
src/spaces-inl.h
@ -66,172 +66,99 @@ Address Page::AllocationTop() {
|
||||
}
|
||||
|
||||
|
||||
Address Page::AllocationWatermark() {
|
||||
PagedSpace* owner = MemoryAllocator::PageOwner(this);
|
||||
if (this == owner->AllocationTopPage()) {
|
||||
return owner->top();
|
||||
void Page::ClearRSet() {
|
||||
// This method can be called in all rset states.
|
||||
memset(RSetStart(), 0, kRSetEndOffset - kRSetStartOffset);
|
||||
}
|
||||
|
||||
|
||||
// Given a 32-bit address, separate its bits into:
|
||||
// | page address | words (6) | bit offset (5) | pointer alignment (2) |
|
||||
// The address of the rset word containing the bit for this word is computed as:
|
||||
// page_address + words * 4
|
||||
// For a 64-bit address, if it is:
|
||||
// | page address | words(5) | bit offset(5) | pointer alignment (3) |
|
||||
// The address of the rset word containing the bit for this word is computed as:
|
||||
// page_address + words * 4 + kRSetOffset.
|
||||
// The rset is accessed as 32-bit words, and bit offsets in a 32-bit word,
|
||||
// even on the X64 architecture.
|
||||
|
||||
Address Page::ComputeRSetBitPosition(Address address, int offset,
|
||||
uint32_t* bitmask) {
|
||||
ASSERT(Page::is_rset_in_use());
|
||||
|
||||
Page* page = Page::FromAddress(address);
|
||||
uint32_t bit_offset = ArithmeticShiftRight(page->Offset(address) + offset,
|
||||
kPointerSizeLog2);
|
||||
*bitmask = 1 << (bit_offset % kBitsPerInt);
|
||||
|
||||
Address rset_address =
|
||||
page->address() + kRSetOffset + (bit_offset / kBitsPerInt) * kIntSize;
|
||||
// The remembered set address is either in the normal remembered set range
|
||||
// of a page or else we have a large object page.
|
||||
ASSERT((page->RSetStart() <= rset_address && rset_address < page->RSetEnd())
|
||||
|| page->IsLargeObjectPage());
|
||||
|
||||
if (rset_address >= page->RSetEnd()) {
|
||||
// We have a large object page, and the remembered set address is actually
|
||||
// past the end of the object.
|
||||
|
||||
// The first part of the remembered set is still located at the start of
|
||||
// the page, but anything after kRSetEndOffset must be relocated to after
|
||||
// the large object, i.e. after
|
||||
// (page->ObjectAreaStart() + object size)
|
||||
// We do that by adding the difference between the normal RSet's end and
|
||||
// the object's end.
|
||||
ASSERT(HeapObject::FromAddress(address)->IsFixedArray());
|
||||
int fixedarray_length =
|
||||
FixedArray::SizeFor(Memory::int_at(page->ObjectAreaStart()
|
||||
+ Array::kLengthOffset));
|
||||
rset_address += kObjectStartOffset - kRSetEndOffset + fixedarray_length;
|
||||
}
|
||||
return address() + AllocationWatermarkOffset();
|
||||
return rset_address;
|
||||
}
|
||||
|
||||
|
||||
uint32_t Page::AllocationWatermarkOffset() {
|
||||
return (flags_ & kAllocationWatermarkOffsetMask) >>
|
||||
kAllocationWatermarkOffsetShift;
|
||||
}
|
||||
|
||||
|
||||
void Page::SetAllocationWatermark(Address allocation_watermark) {
|
||||
if ((Heap::gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) {
|
||||
// When iterating intergenerational references during scavenge
|
||||
// we might decide to promote an encountered young object.
|
||||
// We will allocate a space for such an object and put it
|
||||
// into the promotion queue to process it later.
|
||||
// If space for object was allocated somewhere beyond allocation
|
||||
// watermark this might cause garbage pointers to appear under allocation
|
||||
// watermark. To avoid visiting them during dirty regions iteration
|
||||
// which might be still in progress we store a valid allocation watermark
|
||||
// value and mark this page as having an invalid watermark.
|
||||
SetCachedAllocationWatermark(AllocationWatermark());
|
||||
InvalidateWatermark(true);
|
||||
}
|
||||
|
||||
flags_ = (flags_ & kFlagsMask) |
|
||||
Offset(allocation_watermark) << kAllocationWatermarkOffsetShift;
|
||||
ASSERT(AllocationWatermarkOffset()
|
||||
== static_cast<uint32_t>(Offset(allocation_watermark)));
|
||||
}
|
||||
|
||||
|
||||
void Page::SetCachedAllocationWatermark(Address allocation_watermark) {
|
||||
mc_first_forwarded = allocation_watermark;
|
||||
}
|
||||
|
||||
|
||||
Address Page::CachedAllocationWatermark() {
|
||||
return mc_first_forwarded;
|
||||
}
|
||||
|
||||
|
||||
uint32_t Page::GetRegionMarks() {
|
||||
return dirty_regions_;
|
||||
}
|
||||
|
||||
|
||||
void Page::SetRegionMarks(uint32_t marks) {
|
||||
dirty_regions_ = marks;
|
||||
}
|
||||
|
||||
|
||||
int Page::GetRegionNumberForAddress(Address addr) {
|
||||
// Each page is divided into 256 byte regions. Each region has a corresponding
|
||||
// dirty mark bit in the page header. Region can contain intergenerational
|
||||
// references iff its dirty mark is set.
|
||||
// A normal 8K page contains exactly 32 regions so all region marks fit
|
||||
// into 32-bit integer field. To calculate a region number we just divide
|
||||
// offset inside page by region size.
|
||||
// A large page can contain more then 32 regions. But we want to avoid
|
||||
// additional write barrier code for distinguishing between large and normal
|
||||
// pages so we just ignore the fact that addr points into a large page and
|
||||
// calculate region number as if addr pointed into a normal 8K page. This way
|
||||
// we get a region number modulo 32 so for large pages several regions might
|
||||
// be mapped to a single dirty mark.
|
||||
ASSERT_PAGE_ALIGNED(this->address());
|
||||
STATIC_ASSERT((kPageAlignmentMask >> kRegionSizeLog2) < kBitsPerInt);
|
||||
|
||||
// We are using masking with kPageAlignmentMask instead of Page::Offset()
|
||||
// to get an offset to the beginning of 8K page containing addr not to the
|
||||
// beginning of actual page which can be bigger then 8K.
|
||||
return (OffsetFrom(addr) & kPageAlignmentMask) >> kRegionSizeLog2;
|
||||
}
|
||||
|
||||
|
||||
uint32_t Page::GetRegionMaskForAddress(Address addr) {
|
||||
return 1 << GetRegionNumberForAddress(addr);
|
||||
}
|
||||
|
||||
|
||||
void Page::MarkRegionDirty(Address address) {
|
||||
SetRegionMarks(GetRegionMarks() | GetRegionMaskForAddress(address));
|
||||
}
|
||||
|
||||
|
||||
bool Page::IsRegionDirty(Address address) {
|
||||
return GetRegionMarks() & GetRegionMaskForAddress(address);
|
||||
}
|
||||
|
||||
|
||||
void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) {
|
||||
int rstart = GetRegionNumberForAddress(start);
|
||||
int rend = GetRegionNumberForAddress(end);
|
||||
|
||||
if (reaches_limit) {
|
||||
end += 1;
|
||||
}
|
||||
|
||||
if ((rend - rstart) == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
void Page::SetRSet(Address address, int offset) {
|
||||
uint32_t bitmask = 0;
|
||||
Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
|
||||
Memory::uint32_at(rset_address) |= bitmask;
|
||||
|
||||
if ((OffsetFrom(start) & kRegionAlignmentMask) == 0
|
||||
|| (start == ObjectAreaStart())) {
|
||||
// First region is fully covered
|
||||
bitmask = 1 << rstart;
|
||||
}
|
||||
|
||||
while (++rstart < rend) {
|
||||
bitmask |= 1 << rstart;
|
||||
}
|
||||
|
||||
if (bitmask) {
|
||||
SetRegionMarks(GetRegionMarks() & ~bitmask);
|
||||
}
|
||||
ASSERT(IsRSetSet(address, offset));
|
||||
}
|
||||
|
||||
|
||||
void Page::FlipMeaningOfInvalidatedWatermarkFlag() {
|
||||
watermark_invalidated_mark_ ^= WATERMARK_INVALIDATED;
|
||||
// Clears the corresponding remembered set bit for a given address.
|
||||
void Page::UnsetRSet(Address address, int offset) {
|
||||
uint32_t bitmask = 0;
|
||||
Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
|
||||
Memory::uint32_at(rset_address) &= ~bitmask;
|
||||
|
||||
ASSERT(!IsRSetSet(address, offset));
|
||||
}
|
||||
|
||||
|
||||
bool Page::IsWatermarkValid() {
|
||||
return (flags_ & WATERMARK_INVALIDATED) != watermark_invalidated_mark_;
|
||||
}
|
||||
|
||||
|
||||
void Page::InvalidateWatermark(bool value) {
|
||||
if (value) {
|
||||
flags_ = (flags_ & ~WATERMARK_INVALIDATED) | watermark_invalidated_mark_;
|
||||
} else {
|
||||
flags_ = (flags_ & ~WATERMARK_INVALIDATED) |
|
||||
(watermark_invalidated_mark_ ^ WATERMARK_INVALIDATED);
|
||||
}
|
||||
|
||||
ASSERT(IsWatermarkValid() == !value);
|
||||
bool Page::IsRSetSet(Address address, int offset) {
|
||||
uint32_t bitmask = 0;
|
||||
Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
|
||||
return (Memory::uint32_at(rset_address) & bitmask) != 0;
|
||||
}
|
||||
|
||||
|
||||
bool Page::GetPageFlag(PageFlag flag) {
|
||||
return (flags_ & flag) != 0;
|
||||
return (flags & flag) != 0;
|
||||
}
|
||||
|
||||
|
||||
void Page::SetPageFlag(PageFlag flag, bool value) {
|
||||
if (value) {
|
||||
flags_ |= flag;
|
||||
flags |= flag;
|
||||
} else {
|
||||
flags_ &= ~flag;
|
||||
flags &= ~flag;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Page::ClearPageFlags() {
|
||||
flags_ = 0;
|
||||
}
|
||||
|
||||
|
||||
bool Page::WasInUseBeforeMC() {
|
||||
return GetPageFlag(WAS_IN_USE_BEFORE_MC);
|
||||
}
|
||||
@ -416,6 +343,14 @@ HeapObject* LargeObjectChunk::GetObject() {
|
||||
// -----------------------------------------------------------------------------
|
||||
// LargeObjectSpace
|
||||
|
||||
int LargeObjectSpace::ExtraRSetBytesFor(int object_size) {
|
||||
int extra_rset_bits =
|
||||
RoundUp((object_size - Page::kObjectAreaSize) / kPointerSize,
|
||||
kBitsPerInt);
|
||||
return extra_rset_bits / kBitsPerByte;
|
||||
}
|
||||
|
||||
|
||||
Object* NewSpace::AllocateRawInternal(int size_in_bytes,
|
||||
AllocationInfo* alloc_info) {
|
||||
Address new_top = alloc_info->top + size_in_bytes;
|
||||
|
411
src/spaces.cc
411
src/spaces.cc
@ -41,7 +41,6 @@ namespace internal {
|
||||
&& (info).top <= (space).high() \
|
||||
&& (info).limit == (space).high())
|
||||
|
||||
intptr_t Page::watermark_invalidated_mark_ = Page::WATERMARK_INVALIDATED;
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// HeapObjectIterator
|
||||
@ -139,6 +138,13 @@ PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
|
||||
}
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Page
|
||||
|
||||
#ifdef DEBUG
|
||||
Page::RSetState Page::rset_state_ = Page::IN_USE;
|
||||
#endif
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// CodeRange
|
||||
|
||||
@ -518,10 +524,7 @@ Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
|
||||
for (int i = 0; i < pages_in_chunk; i++) {
|
||||
Page* p = Page::FromAddress(page_addr);
|
||||
p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
|
||||
p->InvalidateWatermark(true);
|
||||
p->SetIsLargeObjectPage(false);
|
||||
p->SetAllocationWatermark(p->ObjectAreaStart());
|
||||
p->SetCachedAllocationWatermark(p->ObjectAreaStart());
|
||||
page_addr += Page::kPageSize;
|
||||
}
|
||||
|
||||
@ -678,7 +681,6 @@ Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
|
||||
p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
|
||||
page_addr += Page::kPageSize;
|
||||
|
||||
p->InvalidateWatermark(true);
|
||||
if (p->WasInUseBeforeMC()) {
|
||||
*last_page_in_use = p;
|
||||
}
|
||||
@ -742,10 +744,10 @@ bool PagedSpace::Setup(Address start, size_t size) {
|
||||
accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
|
||||
ASSERT(Capacity() <= max_capacity_);
|
||||
|
||||
// Sequentially clear region marks in the newly allocated
|
||||
// Sequentially initialize remembered sets in the newly allocated
|
||||
// pages and cache the current last page in the space.
|
||||
for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
|
||||
p->SetRegionMarks(Page::kAllRegionsCleanMarks);
|
||||
p->ClearRSet();
|
||||
last_page_ = p;
|
||||
}
|
||||
|
||||
@ -792,10 +794,10 @@ void PagedSpace::Unprotect() {
|
||||
#endif
|
||||
|
||||
|
||||
void PagedSpace::MarkAllPagesClean() {
|
||||
void PagedSpace::ClearRSet() {
|
||||
PageIterator it(this, PageIterator::ALL_PAGES);
|
||||
while (it.has_next()) {
|
||||
it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
|
||||
it.next()->ClearRSet();
|
||||
}
|
||||
}
|
||||
|
||||
@ -898,8 +900,7 @@ HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
|
||||
// of forwarding addresses is as an offset in terms of live bytes, so we
|
||||
// need quick access to the allocation top of each page to decode
|
||||
// forwarding addresses.
|
||||
current_page->SetAllocationWatermark(mc_forwarding_info_.top);
|
||||
current_page->next_page()->InvalidateWatermark(true);
|
||||
current_page->mc_relocation_top = mc_forwarding_info_.top;
|
||||
SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
|
||||
return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
|
||||
}
|
||||
@ -927,10 +928,10 @@ bool PagedSpace::Expand(Page* last_page) {
|
||||
|
||||
MemoryAllocator::SetNextPage(last_page, p);
|
||||
|
||||
// Sequentially clear region marks of new pages and and cache the
|
||||
// Sequentially clear remembered set of new pages and and cache the
|
||||
// new last page in the space.
|
||||
while (p->is_valid()) {
|
||||
p->SetRegionMarks(Page::kAllRegionsCleanMarks);
|
||||
p->ClearRSet();
|
||||
last_page_ = p;
|
||||
p = p->next_page();
|
||||
}
|
||||
@ -1029,11 +1030,16 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
|
||||
if (above_allocation_top) {
|
||||
// We don't care what's above the allocation top.
|
||||
} else {
|
||||
// Unless this is the last page in the space containing allocated
|
||||
// objects, the allocation top should be at a constant offset from the
|
||||
// object area end.
|
||||
Address top = current_page->AllocationTop();
|
||||
if (current_page == top_page) {
|
||||
ASSERT(top == allocation_info_.top);
|
||||
// The next page will be above the allocation top.
|
||||
above_allocation_top = true;
|
||||
} else {
|
||||
ASSERT(top == PageAllocationLimit(current_page));
|
||||
}
|
||||
|
||||
// It should be packed with objects from the bottom to the top.
|
||||
@ -1054,8 +1060,8 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
|
||||
object->Verify();
|
||||
|
||||
// All the interior pointers should be contained in the heap and
|
||||
// have page regions covering intergenerational references should be
|
||||
// marked dirty.
|
||||
// have their remembered set bits set if required as determined
|
||||
// by the visitor.
|
||||
int size = object->Size();
|
||||
object->IterateBody(map->instance_type(), size, visitor);
|
||||
|
||||
@ -1628,7 +1634,7 @@ void FreeListNode::set_size(int size_in_bytes) {
|
||||
// If the block is too small (eg, one or two words), to hold both a size
|
||||
// field and a next pointer, we give it a filler map that gives it the
|
||||
// correct size.
|
||||
if (size_in_bytes > ByteArray::kHeaderSize) {
|
||||
if (size_in_bytes > ByteArray::kAlignedSize) {
|
||||
set_map(Heap::raw_unchecked_byte_array_map());
|
||||
// Can't use ByteArray::cast because it fails during deserialization.
|
||||
ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
|
||||
@ -1901,14 +1907,15 @@ void OldSpace::MCCommitRelocationInfo() {
|
||||
Page* p = it.next();
|
||||
// Space below the relocation pointer is allocated.
|
||||
computed_size +=
|
||||
static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart());
|
||||
static_cast<int>(p->mc_relocation_top - p->ObjectAreaStart());
|
||||
if (it.has_next()) {
|
||||
// Free the space at the top of the page.
|
||||
// Free the space at the top of the page. We cannot use
|
||||
// p->mc_relocation_top after the call to Free (because Free will clear
|
||||
// remembered set bits).
|
||||
int extra_size =
|
||||
static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark());
|
||||
static_cast<int>(p->ObjectAreaEnd() - p->mc_relocation_top);
|
||||
if (extra_size > 0) {
|
||||
int wasted_bytes = free_list_.Free(p->AllocationWatermark(),
|
||||
extra_size);
|
||||
int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size);
|
||||
// The bytes we have just "freed" to add to the free list were
|
||||
// already accounted as available.
|
||||
accounting_stats_.WasteBytes(wasted_bytes);
|
||||
@ -1956,10 +1963,7 @@ void PagedSpace::FreePages(Page* prev, Page* last) {
|
||||
|
||||
// Clean them up.
|
||||
do {
|
||||
first->InvalidateWatermark(true);
|
||||
first->SetAllocationWatermark(first->ObjectAreaStart());
|
||||
first->SetCachedAllocationWatermark(first->ObjectAreaStart());
|
||||
first->SetRegionMarks(Page::kAllRegionsCleanMarks);
|
||||
first->ClearRSet();
|
||||
first = first->next_page();
|
||||
} while (first != NULL);
|
||||
|
||||
@ -1999,7 +2003,6 @@ void PagedSpace::PrepareForMarkCompact(bool will_compact) {
|
||||
// Current allocation top points to a page which is now in the middle
|
||||
// of page list. We should move allocation top forward to the new last
|
||||
// used page so various object iterators will continue to work properly.
|
||||
last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
|
||||
|
||||
int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
|
||||
last_in_use->AllocationTop());
|
||||
@ -2032,7 +2035,6 @@ void PagedSpace::PrepareForMarkCompact(bool will_compact) {
|
||||
int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
|
||||
p->ObjectAreaStart());
|
||||
|
||||
p->SetAllocationWatermark(p->ObjectAreaStart());
|
||||
Heap::CreateFillerObjectAt(p->ObjectAreaStart(), size_in_bytes);
|
||||
}
|
||||
}
|
||||
@ -2064,7 +2066,6 @@ bool PagedSpace::ReserveSpace(int bytes) {
|
||||
if (!reserved_page->is_valid()) return false;
|
||||
}
|
||||
ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
|
||||
TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
|
||||
SetAllocationInfo(&allocation_info_,
|
||||
TopPageOf(allocation_info_)->next_page());
|
||||
return true;
|
||||
@ -2099,15 +2100,7 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
|
||||
accounting_stats_.WasteBytes(wasted_bytes);
|
||||
if (!result->IsFailure()) {
|
||||
accounting_stats_.AllocateBytes(size_in_bytes);
|
||||
|
||||
HeapObject* obj = HeapObject::cast(result);
|
||||
Page* p = Page::FromAddress(obj->address());
|
||||
|
||||
if (obj->address() >= p->AllocationWatermark()) {
|
||||
p->SetAllocationWatermark(obj->address() + size_in_bytes);
|
||||
}
|
||||
|
||||
return obj;
|
||||
return HeapObject::cast(result);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2130,7 +2123,6 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
|
||||
|
||||
|
||||
void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
|
||||
current_page->SetAllocationWatermark(allocation_info_.top);
|
||||
int free_size =
|
||||
static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
|
||||
if (free_size > 0) {
|
||||
@ -2141,7 +2133,6 @@ void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
|
||||
|
||||
|
||||
void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
|
||||
current_page->SetAllocationWatermark(allocation_info_.top);
|
||||
int free_size =
|
||||
static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
|
||||
// In the fixed space free list all the free list items have the right size.
|
||||
@ -2161,7 +2152,6 @@ void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
|
||||
HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
|
||||
int size_in_bytes) {
|
||||
ASSERT(current_page->next_page()->is_valid());
|
||||
current_page->next_page()->InvalidateWatermark(true);
|
||||
PutRestOfCurrentPageOnFreeList(current_page);
|
||||
SetAllocationInfo(&allocation_info_, current_page->next_page());
|
||||
return AllocateLinearly(&allocation_info_, size_in_bytes);
|
||||
@ -2306,12 +2296,160 @@ void OldSpace::ReportStatistics() {
|
||||
PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
|
||||
Capacity(), Waste(), Available(), pct);
|
||||
|
||||
// Report remembered set statistics.
|
||||
int rset_marked_pointers = 0;
|
||||
int rset_marked_arrays = 0;
|
||||
int rset_marked_array_elements = 0;
|
||||
int cross_gen_pointers = 0;
|
||||
int cross_gen_array_elements = 0;
|
||||
|
||||
PageIterator page_it(this, PageIterator::PAGES_IN_USE);
|
||||
while (page_it.has_next()) {
|
||||
Page* p = page_it.next();
|
||||
|
||||
for (Address rset_addr = p->RSetStart();
|
||||
rset_addr < p->RSetEnd();
|
||||
rset_addr += kIntSize) {
|
||||
int rset = Memory::int_at(rset_addr);
|
||||
if (rset != 0) {
|
||||
// Bits were set
|
||||
int intoff =
|
||||
static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
|
||||
int bitoff = 0;
|
||||
for (; bitoff < kBitsPerInt; ++bitoff) {
|
||||
if ((rset & (1 << bitoff)) != 0) {
|
||||
int bitpos = intoff*kBitsPerByte + bitoff;
|
||||
Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
|
||||
Object** obj = reinterpret_cast<Object**>(slot);
|
||||
if (*obj == Heap::raw_unchecked_fixed_array_map()) {
|
||||
rset_marked_arrays++;
|
||||
FixedArray* fa = FixedArray::cast(HeapObject::FromAddress(slot));
|
||||
|
||||
rset_marked_array_elements += fa->length();
|
||||
// Manually inline FixedArray::IterateBody
|
||||
Address elm_start = slot + FixedArray::kHeaderSize;
|
||||
Address elm_stop = elm_start + fa->length() * kPointerSize;
|
||||
for (Address elm_addr = elm_start;
|
||||
elm_addr < elm_stop; elm_addr += kPointerSize) {
|
||||
// Filter non-heap-object pointers
|
||||
Object** elm_p = reinterpret_cast<Object**>(elm_addr);
|
||||
if (Heap::InNewSpace(*elm_p))
|
||||
cross_gen_array_elements++;
|
||||
}
|
||||
} else {
|
||||
rset_marked_pointers++;
|
||||
if (Heap::InNewSpace(*obj))
|
||||
cross_gen_pointers++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pct = rset_marked_pointers == 0 ?
|
||||
0 : cross_gen_pointers * 100 / rset_marked_pointers;
|
||||
PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
|
||||
rset_marked_pointers, cross_gen_pointers, pct);
|
||||
PrintF(" rset_marked arrays %d, ", rset_marked_arrays);
|
||||
PrintF(" elements %d, ", rset_marked_array_elements);
|
||||
pct = rset_marked_array_elements == 0 ? 0
|
||||
: cross_gen_array_elements * 100 / rset_marked_array_elements;
|
||||
PrintF(" pointers to new space %d (%%%d)\n", cross_gen_array_elements, pct);
|
||||
PrintF(" total rset-marked bits %d\n",
|
||||
(rset_marked_pointers + rset_marked_arrays));
|
||||
pct = (rset_marked_pointers + rset_marked_array_elements) == 0 ? 0
|
||||
: (cross_gen_pointers + cross_gen_array_elements) * 100 /
|
||||
(rset_marked_pointers + rset_marked_array_elements);
|
||||
PrintF(" total rset pointers %d, true cross generation ones %d (%%%d)\n",
|
||||
(rset_marked_pointers + rset_marked_array_elements),
|
||||
(cross_gen_pointers + cross_gen_array_elements),
|
||||
pct);
|
||||
|
||||
ClearHistograms();
|
||||
HeapObjectIterator obj_it(this);
|
||||
for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
|
||||
CollectHistogramInfo(obj);
|
||||
ReportHistogram(true);
|
||||
}
|
||||
|
||||
|
||||
// Dump the range of remembered set words between [start, end) corresponding
|
||||
// to the pointers starting at object_p. The allocation_top is an object
|
||||
// pointer which should not be read past. This is important for large object
|
||||
// pages, where some bits in the remembered set range do not correspond to
|
||||
// allocated addresses.
|
||||
static void PrintRSetRange(Address start, Address end, Object** object_p,
|
||||
Address allocation_top) {
|
||||
Address rset_address = start;
|
||||
|
||||
// If the range starts on on odd numbered word (eg, for large object extra
|
||||
// remembered set ranges), print some spaces.
|
||||
if ((reinterpret_cast<uintptr_t>(start) / kIntSize) % 2 == 1) {
|
||||
PrintF(" ");
|
||||
}
|
||||
|
||||
// Loop over all the words in the range.
|
||||
while (rset_address < end) {
|
||||
uint32_t rset_word = Memory::uint32_at(rset_address);
|
||||
int bit_position = 0;
|
||||
|
||||
// Loop over all the bits in the word.
|
||||
while (bit_position < kBitsPerInt) {
|
||||
if (object_p == reinterpret_cast<Object**>(allocation_top)) {
|
||||
// Print a bar at the allocation pointer.
|
||||
PrintF("|");
|
||||
} else if (object_p > reinterpret_cast<Object**>(allocation_top)) {
|
||||
// Do not dereference object_p past the allocation pointer.
|
||||
PrintF("#");
|
||||
} else if ((rset_word & (1 << bit_position)) == 0) {
|
||||
// Print a dot for zero bits.
|
||||
PrintF(".");
|
||||
} else if (Heap::InNewSpace(*object_p)) {
|
||||
// Print an X for one bits for pointers to new space.
|
||||
PrintF("X");
|
||||
} else {
|
||||
// Print a circle for one bits for pointers to old space.
|
||||
PrintF("o");
|
||||
}
|
||||
|
||||
// Print a space after every 8th bit except the last.
|
||||
if (bit_position % 8 == 7 && bit_position != (kBitsPerInt - 1)) {
|
||||
PrintF(" ");
|
||||
}
|
||||
|
||||
// Advance to next bit.
|
||||
bit_position++;
|
||||
object_p++;
|
||||
}
|
||||
|
||||
// Print a newline after every odd numbered word, otherwise a space.
|
||||
if ((reinterpret_cast<uintptr_t>(rset_address) / kIntSize) % 2 == 1) {
|
||||
PrintF("\n");
|
||||
} else {
|
||||
PrintF(" ");
|
||||
}
|
||||
|
||||
// Advance to next remembered set word.
|
||||
rset_address += kIntSize;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void PagedSpace::DoPrintRSet(const char* space_name) {
|
||||
PageIterator it(this, PageIterator::PAGES_IN_USE);
|
||||
while (it.has_next()) {
|
||||
Page* p = it.next();
|
||||
PrintF("%s page 0x%x:\n", space_name, p);
|
||||
PrintRSetRange(p->RSetStart(), p->RSetEnd(),
|
||||
reinterpret_cast<Object**>(p->ObjectAreaStart()),
|
||||
p->AllocationTop());
|
||||
PrintF("\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void OldSpace::PrintRSet() { DoPrintRSet("old"); }
|
||||
#endif
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
@ -2361,7 +2499,6 @@ void FixedSpace::MCCommitRelocationInfo() {
|
||||
if (it.has_next()) {
|
||||
accounting_stats_.WasteBytes(
|
||||
static_cast<int>(page->ObjectAreaEnd() - page_top));
|
||||
page->SetAllocationWatermark(page_top);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2391,14 +2528,7 @@ HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
|
||||
Object* result = free_list_.Allocate();
|
||||
if (!result->IsFailure()) {
|
||||
accounting_stats_.AllocateBytes(size_in_bytes);
|
||||
HeapObject* obj = HeapObject::cast(result);
|
||||
Page* p = Page::FromAddress(obj->address());
|
||||
|
||||
if (obj->address() >= p->AllocationWatermark()) {
|
||||
p->SetAllocationWatermark(obj->address() + size_in_bytes);
|
||||
}
|
||||
|
||||
return obj;
|
||||
return HeapObject::cast(result);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2428,8 +2558,6 @@ HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
|
||||
ASSERT(current_page->next_page()->is_valid());
|
||||
ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
|
||||
ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
|
||||
current_page->next_page()->InvalidateWatermark(true);
|
||||
current_page->SetAllocationWatermark(allocation_info_.top);
|
||||
accounting_stats_.WasteBytes(page_extra_);
|
||||
SetAllocationInfo(&allocation_info_, current_page->next_page());
|
||||
return AllocateLinearly(&allocation_info_, size_in_bytes);
|
||||
@ -2442,12 +2570,51 @@ void FixedSpace::ReportStatistics() {
|
||||
PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
|
||||
Capacity(), Waste(), Available(), pct);
|
||||
|
||||
// Report remembered set statistics.
|
||||
int rset_marked_pointers = 0;
|
||||
int cross_gen_pointers = 0;
|
||||
|
||||
PageIterator page_it(this, PageIterator::PAGES_IN_USE);
|
||||
while (page_it.has_next()) {
|
||||
Page* p = page_it.next();
|
||||
|
||||
for (Address rset_addr = p->RSetStart();
|
||||
rset_addr < p->RSetEnd();
|
||||
rset_addr += kIntSize) {
|
||||
int rset = Memory::int_at(rset_addr);
|
||||
if (rset != 0) {
|
||||
// Bits were set
|
||||
int intoff =
|
||||
static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
|
||||
int bitoff = 0;
|
||||
for (; bitoff < kBitsPerInt; ++bitoff) {
|
||||
if ((rset & (1 << bitoff)) != 0) {
|
||||
int bitpos = intoff*kBitsPerByte + bitoff;
|
||||
Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
|
||||
Object** obj = reinterpret_cast<Object**>(slot);
|
||||
rset_marked_pointers++;
|
||||
if (Heap::InNewSpace(*obj))
|
||||
cross_gen_pointers++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pct = rset_marked_pointers == 0 ?
|
||||
0 : cross_gen_pointers * 100 / rset_marked_pointers;
|
||||
PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
|
||||
rset_marked_pointers, cross_gen_pointers, pct);
|
||||
|
||||
ClearHistograms();
|
||||
HeapObjectIterator obj_it(this);
|
||||
for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
|
||||
CollectHistogramInfo(obj);
|
||||
ReportHistogram(false);
|
||||
}
|
||||
|
||||
|
||||
void FixedSpace::PrintRSet() { DoPrintRSet(name_); }
|
||||
#endif
|
||||
|
||||
|
||||
@ -2626,7 +2793,8 @@ Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
|
||||
chunk->set_size(chunk_size);
|
||||
first_chunk_ = chunk;
|
||||
|
||||
// Initialize page header.
|
||||
// Set the object address and size in the page header and clear its
|
||||
// remembered set.
|
||||
Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
|
||||
Address object_address = page->ObjectAreaStart();
|
||||
// Clear the low order bit of the second word in the page to flag it as a
|
||||
@ -2634,7 +2802,13 @@ Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
|
||||
// low order bit should already be clear.
|
||||
ASSERT((chunk_size & 0x1) == 0);
|
||||
page->SetIsLargeObjectPage(true);
|
||||
page->SetRegionMarks(Page::kAllRegionsCleanMarks);
|
||||
page->ClearRSet();
|
||||
int extra_bytes = requested_size - object_size;
|
||||
if (extra_bytes > 0) {
|
||||
// The extra memory for the remembered set should be cleared.
|
||||
memset(object_address + object_size, 0, extra_bytes);
|
||||
}
|
||||
|
||||
return HeapObject::FromAddress(object_address);
|
||||
}
|
||||
|
||||
@ -2649,7 +2823,8 @@ Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
|
||||
|
||||
Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
|
||||
ASSERT(0 < size_in_bytes);
|
||||
return AllocateRawInternal(size_in_bytes,
|
||||
int extra_rset_bytes = ExtraRSetBytesFor(size_in_bytes);
|
||||
return AllocateRawInternal(size_in_bytes + extra_rset_bytes,
|
||||
size_in_bytes,
|
||||
NOT_EXECUTABLE);
|
||||
}
|
||||
@ -2676,61 +2851,59 @@ Object* LargeObjectSpace::FindObject(Address a) {
|
||||
return Failure::Exception();
|
||||
}
|
||||
|
||||
void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
|
||||
|
||||
void LargeObjectSpace::ClearRSet() {
|
||||
ASSERT(Page::is_rset_in_use());
|
||||
|
||||
LargeObjectIterator it(this);
|
||||
for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
|
||||
// We only have code, sequential strings, or fixed arrays in large
|
||||
// object space, and only fixed arrays need remembered set support.
|
||||
if (object->IsFixedArray()) {
|
||||
// Clear the normal remembered set region of the page;
|
||||
Page* page = Page::FromAddress(object->address());
|
||||
page->ClearRSet();
|
||||
|
||||
// Clear the extra remembered set.
|
||||
int size = object->Size();
|
||||
int extra_rset_bytes = ExtraRSetBytesFor(size);
|
||||
memset(object->address() + size, 0, extra_rset_bytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LargeObjectSpace::IterateRSet(ObjectSlotCallback copy_object_func) {
|
||||
ASSERT(Page::is_rset_in_use());
|
||||
|
||||
static void* lo_rset_histogram = StatsTable::CreateHistogram(
|
||||
"V8.RSetLO",
|
||||
0,
|
||||
// Keeping this histogram's buckets the same as the paged space histogram.
|
||||
Page::kObjectAreaSize / kPointerSize,
|
||||
30);
|
||||
|
||||
LargeObjectIterator it(this);
|
||||
for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
|
||||
// We only have code, sequential strings, or fixed arrays in large
|
||||
// object space, and only fixed arrays can possibly contain pointers to
|
||||
// the young generation.
|
||||
if (object->IsFixedArray()) {
|
||||
// Iterate the normal page remembered set range.
|
||||
Page* page = Page::FromAddress(object->address());
|
||||
uint32_t marks = page->GetRegionMarks();
|
||||
uint32_t newmarks = Page::kAllRegionsCleanMarks;
|
||||
Address object_end = object->address() + object->Size();
|
||||
int count = Heap::IterateRSetRange(page->ObjectAreaStart(),
|
||||
Min(page->ObjectAreaEnd(), object_end),
|
||||
page->RSetStart(),
|
||||
copy_object_func);
|
||||
|
||||
if (marks != Page::kAllRegionsCleanMarks) {
|
||||
// For a large page a single dirty mark corresponds to several
|
||||
// regions (modulo 32). So we treat a large page as a sequence of
|
||||
// normal pages of size Page::kPageSize having same dirty marks
|
||||
// and subsequently iterate dirty regions on each of these pages.
|
||||
Address start = object->address();
|
||||
Address end = page->ObjectAreaEnd();
|
||||
Address object_end = start + object->Size();
|
||||
|
||||
// Iterate regions of the first normal page covering object.
|
||||
uint32_t first_region_number = page->GetRegionNumberForAddress(start);
|
||||
newmarks |=
|
||||
Heap::IterateDirtyRegions(marks >> first_region_number,
|
||||
start,
|
||||
end,
|
||||
&Heap::IteratePointersInDirtyRegion,
|
||||
copy_object) << first_region_number;
|
||||
|
||||
start = end;
|
||||
end = start + Page::kPageSize;
|
||||
while (end <= object_end) {
|
||||
// Iterate next 32 regions.
|
||||
newmarks |=
|
||||
Heap::IterateDirtyRegions(marks,
|
||||
start,
|
||||
end,
|
||||
&Heap::IteratePointersInDirtyRegion,
|
||||
copy_object);
|
||||
start = end;
|
||||
end = start + Page::kPageSize;
|
||||
}
|
||||
|
||||
if (start != object_end) {
|
||||
// Iterate the last piece of an object which is less than
|
||||
// Page::kPageSize.
|
||||
newmarks |=
|
||||
Heap::IterateDirtyRegions(marks,
|
||||
start,
|
||||
object_end,
|
||||
&Heap::IteratePointersInDirtyRegion,
|
||||
copy_object);
|
||||
}
|
||||
|
||||
page->SetRegionMarks(newmarks);
|
||||
// Iterate the extra array elements.
|
||||
if (object_end > page->ObjectAreaEnd()) {
|
||||
count += Heap::IterateRSetRange(page->ObjectAreaEnd(), object_end,
|
||||
object_end, copy_object_func);
|
||||
}
|
||||
if (lo_rset_histogram != NULL) {
|
||||
StatsTable::AddHistogramSample(lo_rset_histogram, count);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2822,7 +2995,7 @@ void LargeObjectSpace::Verify() {
|
||||
} else if (object->IsFixedArray()) {
|
||||
// We loop over fixed arrays ourselves, rather then using the visitor,
|
||||
// because the visitor doesn't support the start/offset iteration
|
||||
// needed for IsRegionDirty.
|
||||
// needed for IsRSetSet.
|
||||
FixedArray* array = FixedArray::cast(object);
|
||||
for (int j = 0; j < array->length(); j++) {
|
||||
Object* element = array->get(j);
|
||||
@ -2831,11 +3004,8 @@ void LargeObjectSpace::Verify() {
|
||||
ASSERT(Heap::Contains(element_object));
|
||||
ASSERT(element_object->map()->IsMap());
|
||||
if (Heap::InNewSpace(element_object)) {
|
||||
Address array_addr = object->address();
|
||||
Address element_addr = array_addr + FixedArray::kHeaderSize +
|
||||
j * kPointerSize;
|
||||
|
||||
ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
|
||||
ASSERT(Page::IsRSetSet(object->address(),
|
||||
FixedArray::kHeaderSize + j * kPointerSize));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2876,6 +3046,33 @@ void LargeObjectSpace::CollectCodeStatistics() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LargeObjectSpace::PrintRSet() {
|
||||
LargeObjectIterator it(this);
|
||||
for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
|
||||
if (object->IsFixedArray()) {
|
||||
Page* page = Page::FromAddress(object->address());
|
||||
|
||||
Address allocation_top = object->address() + object->Size();
|
||||
PrintF("large page 0x%x:\n", page);
|
||||
PrintRSetRange(page->RSetStart(), page->RSetEnd(),
|
||||
reinterpret_cast<Object**>(object->address()),
|
||||
allocation_top);
|
||||
int extra_array_bytes = object->Size() - Page::kObjectAreaSize;
|
||||
int extra_rset_bits = RoundUp(extra_array_bytes / kPointerSize,
|
||||
kBitsPerInt);
|
||||
PrintF("------------------------------------------------------------"
|
||||
"-----------\n");
|
||||
PrintRSetRange(allocation_top,
|
||||
allocation_top + extra_rset_bits / kBitsPerByte,
|
||||
reinterpret_cast<Object**>(object->address()
|
||||
+ Page::kObjectAreaSize),
|
||||
allocation_top);
|
||||
PrintF("\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif // DEBUG
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
280
src/spaces.h
280
src/spaces.h
@ -45,46 +45,23 @@ namespace internal {
|
||||
// The old generation is collected by a mark-sweep-compact collector.
|
||||
//
|
||||
// The semispaces of the young generation are contiguous. The old and map
|
||||
// spaces consists of a list of pages. A page has a page header and an object
|
||||
// area. A page size is deliberately chosen as 8K bytes.
|
||||
// The first word of a page is an opaque page header that has the
|
||||
// spaces consists of a list of pages. A page has a page header, a remembered
|
||||
// set area, and an object area. A page size is deliberately chosen as 8K
|
||||
// bytes. The first word of a page is an opaque page header that has the
|
||||
// address of the next page and its ownership information. The second word may
|
||||
// have the allocation top address of this page. Heap objects are aligned to the
|
||||
// pointer size.
|
||||
// have the allocation top address of this page. The next 248 bytes are
|
||||
// remembered sets. Heap objects are aligned to the pointer size (4 bytes). A
|
||||
// remembered set bit corresponds to a pointer in the object area.
|
||||
//
|
||||
// There is a separate large object space for objects larger than
|
||||
// Page::kMaxHeapObjectSize, so that they do not have to move during
|
||||
// collection. The large object space is paged. Pages in large object space
|
||||
// may be larger than 8K.
|
||||
//
|
||||
// A card marking write barrier is used to keep track of intergenerational
|
||||
// references. Old space pages are divided into regions of Page::kRegionSize
|
||||
// size. Each region has a corresponding dirty bit in the page header which is
|
||||
// set if the region might contain pointers to new space. For details about
|
||||
// dirty bits encoding see comments in the Page::GetRegionNumberForAddress()
|
||||
// method body.
|
||||
//
|
||||
// During scavenges and mark-sweep collections we iterate intergenerational
|
||||
// pointers without decoding heap object maps so if the page belongs to old
|
||||
// pointer space or large object space it is essential to guarantee that
|
||||
// the page does not contain any garbage pointers to new space: every pointer
|
||||
// aligned word which satisfies the Heap::InNewSpace() predicate must be a
|
||||
// pointer to a live heap object in new space. Thus objects in old pointer
|
||||
// and large object spaces should have a special layout (e.g. no bare integer
|
||||
// fields). This requirement does not apply to map space which is iterated in
|
||||
// a special fashion. However we still require pointer fields of dead maps to
|
||||
// be cleaned.
|
||||
//
|
||||
// To enable lazy cleaning of old space pages we use a notion of allocation
|
||||
// watermark. Every pointer under watermark is considered to be well formed.
|
||||
// Page allocation watermark is not necessarily equal to page allocation top but
|
||||
// all alive objects on page should reside under allocation watermark.
|
||||
// During scavenge allocation watermark might be bumped and invalid pointers
|
||||
// might appear below it. To avoid following them we store a valid watermark
|
||||
// into special field in the page header and set a page WATERMARK_INVALIDATED
|
||||
// flag. For details see comments in the Page::SetAllocationWatermark() method
|
||||
// body.
|
||||
// collection. The large object space is paged and uses the same remembered
|
||||
// set implementation. Pages in large object space may be larger than 8K.
|
||||
//
|
||||
// NOTE: The mark-compact collector rebuilds the remembered set after a
|
||||
// collection. It reuses first a few words of the remembered set for
|
||||
// bookkeeping relocation information.
|
||||
|
||||
|
||||
// Some assertion macros used in the debugging mode.
|
||||
|
||||
@ -114,13 +91,25 @@ class AllocationInfo;
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// A page normally has 8K bytes. Large object pages may be larger. A page
|
||||
// address is always aligned to the 8K page size.
|
||||
// address is always aligned to the 8K page size. A page is divided into
|
||||
// three areas: the first two words are used for bookkeeping, the next 248
|
||||
// bytes are used as remembered set, and the rest of the page is the object
|
||||
// area.
|
||||
//
|
||||
// Each page starts with a header of Page::kPageHeaderSize size which contains
|
||||
// bookkeeping data.
|
||||
// Pointers are aligned to the pointer size (4), only 1 bit is needed
|
||||
// for a pointer in the remembered set. Given an address, its remembered set
|
||||
// bit position (offset from the start of the page) is calculated by dividing
|
||||
// its page offset by 32. Therefore, the object area in a page starts at the
|
||||
// 256th byte (8K/32). Bytes 0 to 255 do not need the remembered set, so that
|
||||
// the first two words (64 bits) in a page can be used for other purposes.
|
||||
//
|
||||
// On the 64-bit platform, we add an offset to the start of the remembered set,
|
||||
// and pointers are aligned to 8-byte pointer size. This means that we need
|
||||
// only 128 bytes for the RSet, and only get two bytes free in the RSet's RSet.
|
||||
// For this reason we add an offset to get room for the Page data at the start.
|
||||
//
|
||||
// The mark-compact collector transforms a map pointer into a page index and a
|
||||
// page offset. The exact encoding is described in the comments for
|
||||
// page offset. The excact encoding is described in the comments for
|
||||
// class MapWord in objects.h.
|
||||
//
|
||||
// The only way to get a page pointer is by calling factory methods:
|
||||
@ -161,25 +150,18 @@ class Page {
|
||||
// Return the end of allocation in this page. Undefined for unused pages.
|
||||
inline Address AllocationTop();
|
||||
|
||||
// Return the allocation watermark for the page.
|
||||
// For old space pages it is guaranteed that the area under the watermark
|
||||
// does not contain any garbage pointers to new space.
|
||||
inline Address AllocationWatermark();
|
||||
|
||||
// Return the allocation watermark offset from the beginning of the page.
|
||||
inline uint32_t AllocationWatermarkOffset();
|
||||
|
||||
inline void SetAllocationWatermark(Address allocation_watermark);
|
||||
|
||||
inline void SetCachedAllocationWatermark(Address allocation_watermark);
|
||||
inline Address CachedAllocationWatermark();
|
||||
|
||||
// Returns the start address of the object area in this page.
|
||||
Address ObjectAreaStart() { return address() + kObjectStartOffset; }
|
||||
|
||||
// Returns the end address (exclusive) of the object area in this page.
|
||||
Address ObjectAreaEnd() { return address() + Page::kPageSize; }
|
||||
|
||||
// Returns the start address of the remembered set area.
|
||||
Address RSetStart() { return address() + kRSetStartOffset; }
|
||||
|
||||
// Returns the end address of the remembered set area (exclusive).
|
||||
Address RSetEnd() { return address() + kRSetEndOffset; }
|
||||
|
||||
// Checks whether an address is page aligned.
|
||||
static bool IsAlignedToPageSize(Address a) {
|
||||
return 0 == (OffsetFrom(a) & kPageAlignmentMask);
|
||||
@ -211,22 +193,33 @@ class Page {
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
// Card marking support
|
||||
// Remembered set support
|
||||
|
||||
static const uint32_t kAllRegionsCleanMarks = 0x0;
|
||||
// Clears remembered set in this page.
|
||||
inline void ClearRSet();
|
||||
|
||||
inline uint32_t GetRegionMarks();
|
||||
inline void SetRegionMarks(uint32_t dirty);
|
||||
// Return the address of the remembered set word corresponding to an
|
||||
// object address/offset pair, and the bit encoded as a single-bit
|
||||
// mask in the output parameter 'bitmask'.
|
||||
INLINE(static Address ComputeRSetBitPosition(Address address, int offset,
|
||||
uint32_t* bitmask));
|
||||
|
||||
inline uint32_t GetRegionMaskForAddress(Address addr);
|
||||
inline int GetRegionNumberForAddress(Address addr);
|
||||
// Sets the corresponding remembered set bit for a given address.
|
||||
INLINE(static void SetRSet(Address address, int offset));
|
||||
|
||||
inline void MarkRegionDirty(Address addr);
|
||||
inline bool IsRegionDirty(Address addr);
|
||||
// Clears the corresponding remembered set bit for a given address.
|
||||
static inline void UnsetRSet(Address address, int offset);
|
||||
|
||||
inline void ClearRegionMarks(Address start,
|
||||
Address end,
|
||||
bool reaches_limit);
|
||||
// Checks whether the remembered set bit for a given address is set.
|
||||
static inline bool IsRSetSet(Address address, int offset);
|
||||
|
||||
#ifdef DEBUG
|
||||
// Use a state to mark whether remembered set space can be used for other
|
||||
// purposes.
|
||||
enum RSetState { IN_USE, NOT_IN_USE };
|
||||
static bool is_rset_in_use() { return rset_state_ == IN_USE; }
|
||||
static void set_rset_state(RSetState state) { rset_state_ = state; }
|
||||
#endif
|
||||
|
||||
// Page size in bytes. This must be a multiple of the OS page size.
|
||||
static const int kPageSize = 1 << kPageSizeBits;
|
||||
@ -234,11 +227,25 @@ class Page {
|
||||
// Page size mask.
|
||||
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
|
||||
|
||||
static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
|
||||
kIntSize + kPointerSize;
|
||||
// The offset of the remembered set in a page, in addition to the empty bytes
|
||||
// formed as the remembered bits of the remembered set itself.
|
||||
#ifdef V8_TARGET_ARCH_X64
|
||||
static const int kRSetOffset = 4 * kPointerSize; // Room for four pointers.
|
||||
#else
|
||||
static const int kRSetOffset = 0;
|
||||
#endif
|
||||
// The end offset of the remembered set in a page
|
||||
// (heaps are aligned to pointer size).
|
||||
static const int kRSetEndOffset = kRSetOffset + kPageSize / kBitsPerPointer;
|
||||
|
||||
// The start offset of the object area in a page.
|
||||
static const int kObjectStartOffset = MAP_POINTER_ALIGN(kPageHeaderSize);
|
||||
// This needs to be at least (bits per uint32_t) * kBitsPerPointer,
|
||||
// to align start of rset to a uint32_t address.
|
||||
static const int kObjectStartOffset = 256;
|
||||
|
||||
// The start offset of the used part of the remembered set in a page.
|
||||
static const int kRSetStartOffset = kRSetOffset +
|
||||
kObjectStartOffset / kBitsPerPointer;
|
||||
|
||||
// Object area size in bytes.
|
||||
static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
|
||||
@ -246,63 +253,13 @@ class Page {
|
||||
// Maximum object size that fits in a page.
|
||||
static const int kMaxHeapObjectSize = kObjectAreaSize;
|
||||
|
||||
static const int kDirtyFlagOffset = 2 * kPointerSize;
|
||||
static const int kRegionSizeLog2 = 8;
|
||||
static const int kRegionSize = 1 << kRegionSizeLog2;
|
||||
static const intptr_t kRegionAlignmentMask = (kRegionSize - 1);
|
||||
|
||||
STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
|
||||
|
||||
enum PageFlag {
|
||||
IS_NORMAL_PAGE = 1 << 0,
|
||||
WAS_IN_USE_BEFORE_MC = 1 << 1,
|
||||
|
||||
// Page allocation watermark was bumped by preallocation during scavenge.
|
||||
// Correct watermark can be retrieved by CachedAllocationWatermark() method
|
||||
WATERMARK_INVALIDATED = 1 << 2
|
||||
WAS_IN_USE_BEFORE_MC = 1 << 1
|
||||
};
|
||||
|
||||
// To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
|
||||
// scavenge we just invalidate the watermark on each old space page after
|
||||
// processing it. And then we flip the meaning of the WATERMARK_INVALIDATED
|
||||
// flag at the beginning of the next scavenge and each page becomes marked as
|
||||
// having a valid watermark.
|
||||
//
|
||||
// The following invariant must hold for pages in old pointer and map spaces:
|
||||
// If page is in use then page is marked as having invalid watermark at
|
||||
// the beginning and at the end of any GC.
|
||||
//
|
||||
// This invariant guarantees that after flipping flag meaning at the
|
||||
// beginning of scavenge all pages in use will be marked as having valid
|
||||
// watermark.
|
||||
static inline void FlipMeaningOfInvalidatedWatermarkFlag();
|
||||
|
||||
// Returns true if the page allocation watermark was not altered during
|
||||
// scavenge.
|
||||
inline bool IsWatermarkValid();
|
||||
|
||||
inline void InvalidateWatermark(bool value);
|
||||
|
||||
inline bool GetPageFlag(PageFlag flag);
|
||||
inline void SetPageFlag(PageFlag flag, bool value);
|
||||
inline void ClearPageFlags();
|
||||
|
||||
static const int kAllocationWatermarkOffsetShift = 3;
|
||||
static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1;
|
||||
static const uint32_t kAllocationWatermarkOffsetMask =
|
||||
((1 << kAllocationWatermarkOffsetBits) - 1) <<
|
||||
kAllocationWatermarkOffsetShift;
|
||||
|
||||
static const uint32_t kFlagsMask =
|
||||
((1 << kAllocationWatermarkOffsetShift) - 1);
|
||||
|
||||
STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
|
||||
kAllocationWatermarkOffsetBits);
|
||||
|
||||
// This field contains the meaning of the WATERMARK_INVALIDATED flag.
|
||||
// Instead of clearing this flag from all pages we just flip
|
||||
// its meaning at the beginning of a scavenge.
|
||||
static intptr_t watermark_invalidated_mark_;
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// Page header description.
|
||||
@ -322,24 +279,26 @@ class Page {
|
||||
// second word *may* (if the page start and large object chunk start are
|
||||
// the same) contain the large object chunk size. In either case, the
|
||||
// low-order bit for large object pages will be cleared.
|
||||
// For normal pages this word is used to store page flags and
|
||||
// offset of allocation top.
|
||||
intptr_t flags_;
|
||||
// For normal pages this word is used to store various page flags.
|
||||
int flags;
|
||||
|
||||
// This field contains dirty marks for regions covering the page. Only dirty
|
||||
// regions might contain intergenerational references.
|
||||
// Only 32 dirty marks are supported so for large object pages several regions
|
||||
// might be mapped to a single dirty mark.
|
||||
uint32_t dirty_regions_;
|
||||
// The following fields may overlap with remembered set, they can only
|
||||
// be used in the mark-compact collector when remembered set is not
|
||||
// used.
|
||||
|
||||
// The index of the page in its owner space.
|
||||
int mc_page_index;
|
||||
|
||||
// During mark-compact collections this field contains the forwarding address
|
||||
// of the first live object in this page.
|
||||
// During scavenge collection this field is used to store allocation watermark
|
||||
// if it is altered during scavenge.
|
||||
// The allocation pointer after relocating objects to this page.
|
||||
Address mc_relocation_top;
|
||||
|
||||
// The forwarding address of the first live object in this page.
|
||||
Address mc_first_forwarded;
|
||||
|
||||
#ifdef DEBUG
|
||||
private:
|
||||
static RSetState rset_state_; // state of the remembered set
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
@ -962,7 +921,8 @@ class PagedSpace : public Space {
|
||||
// Checks whether page is currently in use by this space.
|
||||
bool IsUsed(Page* page);
|
||||
|
||||
void MarkAllPagesClean();
|
||||
// Clears remembered sets of pages in this space.
|
||||
void ClearRSet();
|
||||
|
||||
// Prepares for a mark-compact GC.
|
||||
virtual void PrepareForMarkCompact(bool will_compact);
|
||||
@ -976,11 +936,6 @@ class PagedSpace : public Space {
|
||||
// The limit of allocation for a page in this space.
|
||||
virtual Address PageAllocationLimit(Page* page) = 0;
|
||||
|
||||
void FlushTopPageWatermark() {
|
||||
AllocationTopPage()->SetCachedAllocationWatermark(top());
|
||||
AllocationTopPage()->InvalidateWatermark(true);
|
||||
}
|
||||
|
||||
// Current capacity without growing (Size() + Available() + Waste()).
|
||||
int Capacity() { return accounting_stats_.Capacity(); }
|
||||
|
||||
@ -1035,8 +990,7 @@ class PagedSpace : public Space {
|
||||
|
||||
// Writes relocation info to the top page.
|
||||
void MCWriteRelocationInfoToPage() {
|
||||
TopPageOf(mc_forwarding_info_)->
|
||||
SetAllocationWatermark(mc_forwarding_info_.top);
|
||||
TopPageOf(mc_forwarding_info_)->mc_relocation_top = mc_forwarding_info_.top;
|
||||
}
|
||||
|
||||
// Computes the offset of a given address in this space to the beginning
|
||||
@ -1154,6 +1108,8 @@ class PagedSpace : public Space {
|
||||
#ifdef DEBUG
|
||||
// Returns the number of total pages in this space.
|
||||
int CountTotalPages();
|
||||
|
||||
void DoPrintRSet(const char* space_name);
|
||||
#endif
|
||||
private:
|
||||
|
||||
@ -1790,10 +1746,6 @@ class OldSpace : public PagedSpace {
|
||||
if (add_to_freelist) {
|
||||
int wasted_bytes = free_list_.Free(start, size_in_bytes);
|
||||
accounting_stats_.WasteBytes(wasted_bytes);
|
||||
} else {
|
||||
#ifdef DEBUG
|
||||
MemoryAllocator::ZapBlock(start, size_in_bytes);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@ -1810,6 +1762,8 @@ class OldSpace : public PagedSpace {
|
||||
#ifdef DEBUG
|
||||
// Reports statistics for the space
|
||||
void ReportStatistics();
|
||||
// Dump the remembered sets in the space to stdout.
|
||||
void PrintRSet();
|
||||
#endif
|
||||
|
||||
protected:
|
||||
@ -1858,10 +1812,6 @@ class FixedSpace : public PagedSpace {
|
||||
void Free(Address start, bool add_to_freelist) {
|
||||
if (add_to_freelist) {
|
||||
free_list_.Free(start);
|
||||
} else {
|
||||
#ifdef DEBUG
|
||||
MemoryAllocator::ZapBlock(start, object_size_in_bytes_);
|
||||
#endif
|
||||
}
|
||||
accounting_stats_.DeallocateBytes(object_size_in_bytes_);
|
||||
}
|
||||
@ -1878,6 +1828,9 @@ class FixedSpace : public PagedSpace {
|
||||
#ifdef DEBUG
|
||||
// Reports statistic info of the space
|
||||
void ReportStatistics();
|
||||
|
||||
// Dump the remembered sets in the space to stdout.
|
||||
void PrintRSet();
|
||||
#endif
|
||||
|
||||
protected:
|
||||
@ -1946,11 +1899,11 @@ class MapSpace : public FixedSpace {
|
||||
PageIterator it(this, PageIterator::ALL_PAGES);
|
||||
while (pages_left-- > 0) {
|
||||
ASSERT(it.has_next());
|
||||
it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
|
||||
it.next()->ClearRSet();
|
||||
}
|
||||
ASSERT(it.has_next());
|
||||
Page* top_page = it.next();
|
||||
top_page->SetRegionMarks(Page::kAllRegionsCleanMarks);
|
||||
top_page->ClearRSet();
|
||||
ASSERT(top_page->is_valid());
|
||||
|
||||
int offset = live_maps % kMapsPerPage * Map::kSize;
|
||||
@ -2041,8 +1994,9 @@ class LargeObjectChunk {
|
||||
public:
|
||||
// Allocates a new LargeObjectChunk that contains a large object page
|
||||
// (Page::kPageSize aligned) that has at least size_in_bytes (for a large
|
||||
// object) bytes after the object area start of that page.
|
||||
// The allocated chunk size is set in the output parameter chunk_size.
|
||||
// object and possibly extra remembered set words) bytes after the object
|
||||
// area start of that page. The allocated chunk size is set in the output
|
||||
// parameter chunk_size.
|
||||
static LargeObjectChunk* New(int size_in_bytes,
|
||||
size_t* chunk_size,
|
||||
Executability executable);
|
||||
@ -2065,12 +2019,16 @@ class LargeObjectChunk {
|
||||
// Returns the object in this chunk.
|
||||
inline HeapObject* GetObject();
|
||||
|
||||
// Given a requested size returns the physical size of a chunk to be
|
||||
// allocated.
|
||||
// Given a requested size (including any extra remembered set words),
|
||||
// returns the physical size of a chunk to be allocated.
|
||||
static int ChunkSizeFor(int size_in_bytes);
|
||||
|
||||
// Given a chunk size, returns the object size it can accommodate. Used by
|
||||
// LargeObjectSpace::Available.
|
||||
// Given a chunk size, returns the object size it can accommodate (not
|
||||
// including any extra remembered set words). Used by
|
||||
// LargeObjectSpace::Available. Note that this can overestimate the size
|
||||
// of object that will fit in a chunk---if the object requires extra
|
||||
// remembered set words (eg, for large fixed arrays), the actual object
|
||||
// size for the chunk will be smaller than reported by this function.
|
||||
static int ObjectSizeFor(int chunk_size) {
|
||||
if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
|
||||
return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
|
||||
@ -2106,7 +2064,8 @@ class LargeObjectSpace : public Space {
|
||||
// Allocates a large FixedArray.
|
||||
Object* AllocateRawFixedArray(int size_in_bytes);
|
||||
|
||||
// Available bytes for objects in this space.
|
||||
// Available bytes for objects in this space, not including any extra
|
||||
// remembered set words.
|
||||
int Available() {
|
||||
return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
|
||||
}
|
||||
@ -2124,8 +2083,11 @@ class LargeObjectSpace : public Space {
|
||||
// space, may be slow.
|
||||
Object* FindObject(Address a);
|
||||
|
||||
// Iterates objects covered by dirty regions.
|
||||
void IterateDirtyRegions(ObjectSlotCallback func);
|
||||
// Clears remembered sets.
|
||||
void ClearRSet();
|
||||
|
||||
// Iterates objects whose remembered set bits are set.
|
||||
void IterateRSet(ObjectSlotCallback func);
|
||||
|
||||
// Frees unmarked objects.
|
||||
void FreeUnmarkedObjects();
|
||||
@ -2152,6 +2114,8 @@ class LargeObjectSpace : public Space {
|
||||
virtual void Print();
|
||||
void ReportStatistics();
|
||||
void CollectCodeStatistics();
|
||||
// Dump the remembered sets in the space to stdout.
|
||||
void PrintRSet();
|
||||
#endif
|
||||
// Checks whether an address is in the object area in this space. It
|
||||
// iterates all objects in the space. May be slow.
|
||||
@ -2170,6 +2134,10 @@ class LargeObjectSpace : public Space {
|
||||
int object_size,
|
||||
Executability executable);
|
||||
|
||||
// Returns the number of extra bytes (rounded up to the nearest full word)
|
||||
// required for extra_object_bytes of extra pointers (in bytes).
|
||||
static inline int ExtraRSetBytesFor(int extra_object_bytes);
|
||||
|
||||
friend class LargeObjectIterator;
|
||||
|
||||
public:
|
||||
|
@ -308,8 +308,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
|
||||
// (tail-call) to the code in register edx without checking arguments.
|
||||
__ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ movsxlq(rbx,
|
||||
FieldOperand(rdx,
|
||||
SharedFunctionInfo::kFormalParameterCountOffset));
|
||||
FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||
__ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
|
||||
__ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
|
||||
__ cmpq(rax, rbx);
|
||||
@ -526,15 +525,15 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
|
||||
__ lea(scratch1, Operand(result, JSArray::kSize));
|
||||
__ movq(FieldOperand(result, JSArray::kElementsOffset), scratch1);
|
||||
|
||||
// Initialize the FixedArray and fill it with holes. FixedArray length is
|
||||
// Initialize the FixedArray and fill it with holes. FixedArray length is not
|
||||
// stored as a smi.
|
||||
// result: JSObject
|
||||
// scratch1: elements array
|
||||
// scratch2: start of next object
|
||||
__ Move(FieldOperand(scratch1, HeapObject::kMapOffset),
|
||||
__ Move(FieldOperand(scratch1, JSObject::kMapOffset),
|
||||
Factory::fixed_array_map());
|
||||
__ Move(FieldOperand(scratch1, FixedArray::kLengthOffset),
|
||||
Smi::FromInt(initial_capacity));
|
||||
__ movq(FieldOperand(scratch1, Array::kLengthOffset),
|
||||
Immediate(initial_capacity));
|
||||
|
||||
// Fill the FixedArray with the hole value. Inline the code if short.
|
||||
// Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
|
||||
@ -588,6 +587,7 @@ static void AllocateJSArray(MacroAssembler* masm,
|
||||
JSFunction::kPrototypeOrInitialMapOffset));
|
||||
|
||||
// Check whether an empty sized array is requested.
|
||||
__ SmiToInteger64(array_size, array_size);
|
||||
__ testq(array_size, array_size);
|
||||
__ j(not_zero, ¬_empty);
|
||||
|
||||
@ -605,11 +605,10 @@ static void AllocateJSArray(MacroAssembler* masm,
|
||||
// Allocate the JSArray object together with space for a FixedArray with the
|
||||
// requested elements.
|
||||
__ bind(¬_empty);
|
||||
SmiIndex index =
|
||||
masm->SmiToIndex(kScratchRegister, array_size, kPointerSizeLog2);
|
||||
ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
|
||||
__ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
|
||||
index.scale,
|
||||
index.reg,
|
||||
times_pointer_size,
|
||||
array_size,
|
||||
result,
|
||||
elements_array_end,
|
||||
scratch,
|
||||
@ -621,41 +620,43 @@ static void AllocateJSArray(MacroAssembler* masm,
|
||||
// result: JSObject
|
||||
// elements_array: initial map
|
||||
// elements_array_end: start of next object
|
||||
// array_size: size of array (smi)
|
||||
// array_size: size of array
|
||||
__ bind(&allocated);
|
||||
__ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
|
||||
__ Move(elements_array, Factory::empty_fixed_array());
|
||||
__ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
|
||||
// Field JSArray::kElementsOffset is initialized later.
|
||||
__ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
|
||||
__ Integer32ToSmi(scratch, array_size);
|
||||
__ movq(FieldOperand(result, JSArray::kLengthOffset), scratch);
|
||||
|
||||
// Calculate the location of the elements array and set elements array member
|
||||
// of the JSArray.
|
||||
// result: JSObject
|
||||
// elements_array_end: start of next object
|
||||
// array_size: size of array (smi)
|
||||
// array_size: size of array
|
||||
__ lea(elements_array, Operand(result, JSArray::kSize));
|
||||
__ movq(FieldOperand(result, JSArray::kElementsOffset), elements_array);
|
||||
|
||||
// Initialize the fixed array. FixedArray length is stored as a smi.
|
||||
// Initialize the fixed array. FixedArray length is not stored as a smi.
|
||||
// result: JSObject
|
||||
// elements_array: elements array
|
||||
// elements_array_end: start of next object
|
||||
// array_size: size of array (smi)
|
||||
// array_size: size of array
|
||||
ASSERT(kSmiTag == 0);
|
||||
__ Move(FieldOperand(elements_array, JSObject::kMapOffset),
|
||||
Factory::fixed_array_map());
|
||||
Label not_empty_2, fill_array;
|
||||
__ SmiTest(array_size);
|
||||
__ testq(array_size, array_size);
|
||||
__ j(not_zero, ¬_empty_2);
|
||||
// Length of the FixedArray is the number of pre-allocated elements even
|
||||
// though the actual JSArray has length 0.
|
||||
__ Move(FieldOperand(elements_array, FixedArray::kLengthOffset),
|
||||
Smi::FromInt(kPreallocatedArrayElements));
|
||||
__ movq(FieldOperand(elements_array, Array::kLengthOffset),
|
||||
Immediate(kPreallocatedArrayElements));
|
||||
__ jmp(&fill_array);
|
||||
__ bind(¬_empty_2);
|
||||
// For non-empty JSArrays the length of the FixedArray and the JSArray is the
|
||||
// same.
|
||||
__ movq(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
|
||||
__ movq(FieldOperand(elements_array, Array::kLengthOffset), array_size);
|
||||
|
||||
// Fill the allocated FixedArray with the hole value if requested.
|
||||
// result: JSObject
|
||||
@ -1038,9 +1039,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
// rdx: number of elements
|
||||
// rax: start of next object
|
||||
__ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
|
||||
__ movq(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map
|
||||
__ Integer32ToSmi(rdx, rdx);
|
||||
__ movq(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
|
||||
__ movq(Operand(rdi, JSObject::kMapOffset), rcx); // setup the map
|
||||
__ movl(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
|
||||
|
||||
// Initialize the fields to undefined.
|
||||
// rbx: JSObject
|
||||
|
@ -1858,7 +1858,8 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
|
||||
|
||||
frame_->EmitPush(rax); // <- slot 3
|
||||
frame_->EmitPush(rdx); // <- slot 2
|
||||
__ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
|
||||
__ movl(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
|
||||
__ Integer32ToSmi(rax, rax);
|
||||
frame_->EmitPush(rax); // <- slot 1
|
||||
frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
|
||||
entry.Jump();
|
||||
@ -1869,7 +1870,8 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
|
||||
frame_->EmitPush(rax); // <- slot 2
|
||||
|
||||
// Push the length of the array and the initial index onto the stack.
|
||||
__ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
|
||||
__ movl(rax, FieldOperand(rax, FixedArray::kLengthOffset));
|
||||
__ Integer32ToSmi(rax, rax);
|
||||
frame_->EmitPush(rax); // <- slot 1
|
||||
frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
|
||||
|
||||
@ -3840,13 +3842,11 @@ void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
|
||||
__ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
|
||||
Immediate(1 << Map::kIsUndetectable));
|
||||
destination()->false_target()->Branch(not_zero);
|
||||
__ movzxbq(kScratchRegister,
|
||||
FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
|
||||
__ cmpq(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
|
||||
destination()->false_target()->Branch(below);
|
||||
__ cmpq(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
|
||||
__ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
|
||||
destination()->false_target()->Branch(less);
|
||||
__ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
|
||||
obj.Unuse();
|
||||
destination()->Split(below_equal);
|
||||
destination()->Split(less_equal);
|
||||
}
|
||||
|
||||
|
||||
@ -4338,7 +4338,7 @@ void CodeGenerator::GenerateRandomHeapNumber(
|
||||
__ PrepareCallCFunction(0);
|
||||
__ CallCFunction(ExternalReference::random_uint32_function(), 0);
|
||||
|
||||
// Convert 32 random bits in rax to 0.(32 random bits) in a double
|
||||
// Convert 32 random bits in eax to 0.(32 random bits) in a double
|
||||
// by computing:
|
||||
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
|
||||
__ movl(rcx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
|
||||
@ -4433,8 +4433,7 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
|
||||
__ Move(FieldOperand(rcx, HeapObject::kMapOffset),
|
||||
Factory::fixed_array_map());
|
||||
// Set length.
|
||||
__ Integer32ToSmi(rdx, rbx);
|
||||
__ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
|
||||
__ movl(FieldOperand(rcx, FixedArray::kLengthOffset), rbx);
|
||||
// Fill contents of fixed-array with the-hole.
|
||||
__ Move(rdx, Factory::the_hole_value());
|
||||
__ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
|
||||
@ -4553,15 +4552,15 @@ void DeferredSearchCache::Generate() {
|
||||
// cache miss this optimization would hardly matter much.
|
||||
|
||||
// Check if we could add new entry to cache.
|
||||
__ movq(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
|
||||
__ movl(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
|
||||
__ movq(r9, FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
|
||||
__ SmiCompare(rbx, r9);
|
||||
__ SmiToInteger32(r9, r9);
|
||||
__ cmpq(rbx, r9);
|
||||
__ j(greater, &add_new_entry);
|
||||
|
||||
// Check if we could evict entry after finger.
|
||||
__ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
|
||||
__ SmiToInteger32(rdx, rdx);
|
||||
__ SmiToInteger32(rbx, rbx);
|
||||
__ addq(rdx, kEntrySizeImm);
|
||||
Label forward;
|
||||
__ cmpq(rbx, rdx);
|
||||
@ -4573,8 +4572,9 @@ void DeferredSearchCache::Generate() {
|
||||
__ jmp(&update_cache);
|
||||
|
||||
__ bind(&add_new_entry);
|
||||
// r9 holds cache size as smi.
|
||||
__ SmiToInteger32(rdx, r9);
|
||||
// r9 holds cache size as int.
|
||||
__ movq(rdx, r9);
|
||||
__ Integer32ToSmi(r9, r9);
|
||||
__ SmiAddConstant(rbx, r9, Smi::FromInt(JSFunctionResultCache::kEntrySize));
|
||||
__ movq(FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
|
||||
|
||||
@ -7132,8 +7132,13 @@ Result CodeGenerator::EmitKeyedLoad(bool is_global) {
|
||||
Result elements = allocator()->Allocate();
|
||||
ASSERT(elements.is_valid());
|
||||
|
||||
// Use a fresh temporary for the index and later the loaded
|
||||
// value.
|
||||
Result index = allocator()->Allocate();
|
||||
ASSERT(index.is_valid());
|
||||
|
||||
DeferredReferenceGetKeyedValue* deferred =
|
||||
new DeferredReferenceGetKeyedValue(elements.reg(),
|
||||
new DeferredReferenceGetKeyedValue(index.reg(),
|
||||
receiver.reg(),
|
||||
key.reg(),
|
||||
is_global);
|
||||
@ -7169,21 +7174,31 @@ Result CodeGenerator::EmitKeyedLoad(bool is_global) {
|
||||
Factory::fixed_array_map());
|
||||
deferred->Branch(not_equal);
|
||||
|
||||
// Check that key is within bounds.
|
||||
__ SmiCompare(key.reg(),
|
||||
FieldOperand(elements.reg(), FixedArray::kLengthOffset));
|
||||
// Shift the key to get the actual index value and check that
|
||||
// it is within bounds.
|
||||
__ SmiToInteger32(index.reg(), key.reg());
|
||||
__ cmpl(index.reg(),
|
||||
FieldOperand(elements.reg(), FixedArray::kLengthOffset));
|
||||
deferred->Branch(above_equal);
|
||||
|
||||
// The key register holds the smi-tagged key. Load the value and
|
||||
// check that it is not the hole value.
|
||||
Result value = elements;
|
||||
SmiIndex index =
|
||||
masm_->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
|
||||
// The index register holds the un-smi-tagged key. It has been
|
||||
// zero-extended to 64-bits, so it can be used directly as index in the
|
||||
// operand below.
|
||||
// Load and check that the result is not the hole. We could
|
||||
// reuse the index or elements register for the value.
|
||||
//
|
||||
// TODO(206): Consider whether it makes sense to try some
|
||||
// heuristic about which register to reuse. For example, if
|
||||
// one is rax, the we can reuse that one because the value
|
||||
// coming from the deferred code will be in rax.
|
||||
Result value = index;
|
||||
__ movq(value.reg(),
|
||||
FieldOperand(elements.reg(),
|
||||
index.reg,
|
||||
index.scale,
|
||||
FixedArray::kHeaderSize));
|
||||
Operand(elements.reg(),
|
||||
index.reg(),
|
||||
times_pointer_size,
|
||||
FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
elements.Unuse();
|
||||
index.Unuse();
|
||||
__ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
|
||||
deferred->Branch(equal);
|
||||
__ IncrementCounter(&Counters::keyed_load_inline, 1);
|
||||
@ -7482,7 +7497,7 @@ void Reference::SetValue(InitState init_state) {
|
||||
|
||||
// Check whether it is possible to omit the write barrier. If the
|
||||
// elements array is in new space or the value written is a smi we can
|
||||
// safely update the elements array without write barrier.
|
||||
// safely update the elements array without updating the remembered set.
|
||||
Label in_new_space;
|
||||
__ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
|
||||
if (!value_is_constant) {
|
||||
@ -7507,10 +7522,10 @@ void Reference::SetValue(InitState init_state) {
|
||||
// Store the value.
|
||||
SmiIndex index =
|
||||
masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
|
||||
__ movq(FieldOperand(tmp.reg(),
|
||||
index.reg,
|
||||
index.scale,
|
||||
FixedArray::kHeaderSize),
|
||||
__ movq(Operand(tmp.reg(),
|
||||
index.reg,
|
||||
index.scale,
|
||||
FixedArray::kHeaderSize - kHeapObjectTag),
|
||||
value.reg());
|
||||
__ IncrementCounter(&Counters::keyed_store_inline, 1);
|
||||
|
||||
@ -7592,7 +7607,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
|
||||
// Setup the object header.
|
||||
__ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
|
||||
__ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
|
||||
__ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
|
||||
__ movl(FieldOperand(rax, Array::kLengthOffset), Immediate(length));
|
||||
|
||||
// Setup the fixed slots.
|
||||
__ xor_(rbx, rbx); // Set to NULL.
|
||||
@ -8267,8 +8282,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
||||
// Check that the last match info has space for the capture registers and the
|
||||
// additional information. Ensure no overflow in add.
|
||||
ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
|
||||
__ movq(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
|
||||
__ SmiToInteger32(rax, rax);
|
||||
__ movl(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
|
||||
__ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
|
||||
__ cmpl(rdx, rax);
|
||||
__ j(greater, &runtime);
|
||||
@ -8546,10 +8560,9 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
|
||||
|
||||
// Make the hash mask from the length of the number string cache. It
|
||||
// contains two elements (number and string) for each cache entry.
|
||||
__ movq(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
|
||||
// Divide smi tagged length by two.
|
||||
__ PositiveSmiDivPowerOfTwoToInteger32(mask, mask, 1);
|
||||
__ subq(mask, Immediate(1)); // Make mask.
|
||||
__ movl(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
|
||||
__ shrl(mask, Immediate(1)); // Divide length by two (length is not a smi).
|
||||
__ subl(mask, Immediate(1)); // Make mask.
|
||||
|
||||
// Calculate the entry in the number string cache. The hash value in the
|
||||
// number string cache for smis is just the smi value, and the hash for
|
||||
@ -9069,6 +9082,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
|
||||
|
||||
// Get the parameters pointer from the stack and untag the length.
|
||||
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
|
||||
__ SmiToInteger32(rcx, rcx);
|
||||
|
||||
// Setup the elements pointer in the allocated arguments object and
|
||||
// initialize the header in the elements fixed array.
|
||||
@ -9076,8 +9090,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
|
||||
__ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
|
||||
__ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
|
||||
__ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
|
||||
__ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
|
||||
__ SmiToInteger32(rcx, rcx); // Untag length for the loop below.
|
||||
__ movl(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
|
||||
|
||||
// Copy the fixed array slots.
|
||||
Label loop;
|
||||
@ -10848,7 +10861,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
|
||||
__ bind(&allocated);
|
||||
// Fill the fields of the cons string.
|
||||
__ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
|
||||
__ movq(FieldOperand(rcx, ConsString::kHashFieldOffset),
|
||||
__ movl(FieldOperand(rcx, ConsString::kHashFieldOffset),
|
||||
Immediate(String::kEmptyHashField));
|
||||
__ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
|
||||
__ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -165,11 +165,11 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
|
||||
//
|
||||
// key - holds the smi key on entry and is unchanged if a branch is
|
||||
// performed to the miss label.
|
||||
// Holds the result on exit if the load succeeded.
|
||||
//
|
||||
// Scratch registers:
|
||||
//
|
||||
// r0 - holds the untagged key on entry and holds the hash once computed.
|
||||
// Holds the result on exit if the load succeeded.
|
||||
//
|
||||
// r1 - used to hold the capacity mask of the dictionary
|
||||
//
|
||||
@ -245,7 +245,7 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
|
||||
// Get the value at the masked, scaled index.
|
||||
const int kValueOffset =
|
||||
NumberDictionary::kElementsStartOffset + kPointerSize;
|
||||
__ movq(key, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
|
||||
__ movq(r0, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
|
||||
}
|
||||
|
||||
|
||||
@ -351,7 +351,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
// -- rsp[8] : name
|
||||
// -- rsp[16] : receiver
|
||||
// -----------------------------------
|
||||
Label slow, check_string, index_smi, index_string;
|
||||
Label slow, check_string, index_int, index_string;
|
||||
Label check_pixel_array, probe_dictionary;
|
||||
Label check_number_dictionary;
|
||||
|
||||
@ -377,23 +377,23 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
|
||||
// Check that the key is a smi.
|
||||
__ JumpIfNotSmi(rax, &check_string);
|
||||
|
||||
// Save key in rbx in case we want it for the number dictionary
|
||||
// case.
|
||||
__ movq(rbx, rax);
|
||||
__ SmiToInteger32(rax, rax);
|
||||
// Get the elements array of the object.
|
||||
__ bind(&index_smi);
|
||||
__ bind(&index_int);
|
||||
__ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset));
|
||||
// Check that the object is in fast mode (not dictionary).
|
||||
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
|
||||
Heap::kFixedArrayMapRootIndex);
|
||||
__ j(not_equal, &check_pixel_array);
|
||||
// Check that the key (index) is within bounds.
|
||||
__ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
|
||||
__ cmpl(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
|
||||
__ j(above_equal, &slow); // Unsigned comparison rejects negative indices.
|
||||
// Fast case: Do the load.
|
||||
SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
|
||||
__ movq(rax, FieldOperand(rcx,
|
||||
index.reg,
|
||||
index.scale,
|
||||
FixedArray::kHeaderSize));
|
||||
__ movq(rax, Operand(rcx, rax, times_pointer_size,
|
||||
FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
|
||||
// In case the loaded value is the_hole we have to consult GetProperty
|
||||
// to ensure the prototype chain is searched.
|
||||
@ -402,13 +402,12 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
__ ret(0);
|
||||
|
||||
// Check whether the elements is a pixel array.
|
||||
// rax: key
|
||||
// rax: untagged index
|
||||
// rcx: elements array
|
||||
__ bind(&check_pixel_array);
|
||||
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
|
||||
Heap::kPixelArrayMapRootIndex);
|
||||
__ j(not_equal, &check_number_dictionary);
|
||||
__ SmiToInteger32(rax, rax);
|
||||
__ cmpl(rax, FieldOperand(rcx, PixelArray::kLengthOffset));
|
||||
__ j(above_equal, &slow);
|
||||
__ movq(rcx, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
|
||||
@ -418,13 +417,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
|
||||
__ bind(&check_number_dictionary);
|
||||
// Check whether the elements is a number dictionary.
|
||||
// rax: key
|
||||
// rax: untagged index
|
||||
// rbx: key
|
||||
// rcx: elements
|
||||
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
|
||||
Heap::kHashTableMapRootIndex);
|
||||
__ j(not_equal, &slow);
|
||||
__ SmiToInteger32(rbx, rax);
|
||||
GenerateNumberDictionaryLoad(masm, &slow, rcx, rax, rbx, rdx, rdi);
|
||||
GenerateNumberDictionaryLoad(masm, &slow, rcx, rbx, rax, rdx, rdi);
|
||||
__ ret(0);
|
||||
|
||||
// Slow case: Load name and receiver from stack and jump to runtime.
|
||||
@ -513,11 +512,10 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
|
||||
(1 << String::kArrayIndexValueBits));
|
||||
__ bind(&index_string);
|
||||
// We want the smi-tagged index in rax.
|
||||
__ and_(rbx, Immediate(String::kArrayIndexValueMask));
|
||||
__ shr(rbx, Immediate(String::kHashShift));
|
||||
__ Integer32ToSmi(rax, rbx);
|
||||
__ jmp(&index_smi);
|
||||
__ movl(rax, rbx);
|
||||
__ and_(rax, Immediate(String::kArrayIndexHashMask));
|
||||
__ shrl(rax, Immediate(String::kHashShift));
|
||||
__ jmp(&index_int);
|
||||
}
|
||||
|
||||
|
||||
@ -858,7 +856,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
|
||||
Heap::kFixedArrayMapRootIndex);
|
||||
__ j(not_equal, &check_pixel_array);
|
||||
__ SmiCompare(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
|
||||
// Untag the key (for checking against untagged length in the fixed array).
|
||||
__ SmiToInteger32(rdx, rbx);
|
||||
__ cmpl(rdx, FieldOperand(rcx, Array::kLengthOffset));
|
||||
// rax: value
|
||||
// rcx: FixedArray
|
||||
// rbx: index (as a smi)
|
||||
@ -907,11 +907,13 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
// rbx: index (as a smi)
|
||||
// flags: smicompare (rdx.length(), rbx)
|
||||
__ j(not_equal, &slow); // do not leave holes in the array
|
||||
__ SmiCompare(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
|
||||
__ SmiToInteger64(rbx, rbx);
|
||||
__ cmpl(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
|
||||
__ j(above_equal, &slow);
|
||||
// Increment index to get new length.
|
||||
__ SmiAddConstant(rdi, rbx, Smi::FromInt(1));
|
||||
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
|
||||
// Increment and restore smi-tag.
|
||||
__ Integer64PlusConstantToSmi(rbx, rbx, 1);
|
||||
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rbx);
|
||||
__ SmiSubConstant(rbx, rbx, Smi::FromInt(1));
|
||||
__ jmp(&fast);
|
||||
|
||||
// Array case: Get the length and the elements array from the JS
|
||||
@ -939,14 +941,16 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
Label non_smi_value;
|
||||
__ JumpIfNotSmi(rax, &non_smi_value);
|
||||
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
|
||||
__ movq(FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize),
|
||||
__ movq(Operand(rcx, index.reg, index.scale,
|
||||
FixedArray::kHeaderSize - kHeapObjectTag),
|
||||
rax);
|
||||
__ ret(0);
|
||||
__ bind(&non_smi_value);
|
||||
// Slow case that needs to retain rbx for use by RecordWrite.
|
||||
// Update write barrier for the elements array address.
|
||||
SmiIndex index2 = masm->SmiToIndex(kScratchRegister, rbx, kPointerSizeLog2);
|
||||
__ movq(FieldOperand(rcx, index2.reg, index2.scale, FixedArray::kHeaderSize),
|
||||
__ movq(Operand(rcx, index2.reg, index2.scale,
|
||||
FixedArray::kHeaderSize - kHeapObjectTag),
|
||||
rax);
|
||||
__ movq(rdx, rax);
|
||||
__ RecordWriteNonSmi(rcx, 0, rdx, rbx);
|
||||
|
@ -90,21 +90,58 @@ void MacroAssembler::RecordWriteHelper(Register object,
|
||||
bind(¬_in_new_space);
|
||||
}
|
||||
|
||||
Label fast;
|
||||
|
||||
// Compute the page start address from the heap object pointer, and reuse
|
||||
// the 'object' register for it.
|
||||
and_(object, Immediate(~Page::kPageAlignmentMask));
|
||||
ASSERT(is_int32(~Page::kPageAlignmentMask));
|
||||
and_(object,
|
||||
Immediate(static_cast<int32_t>(~Page::kPageAlignmentMask)));
|
||||
Register page_start = object;
|
||||
|
||||
// Compute number of region covering addr. See Page::GetRegionNumberForAddress
|
||||
// method for more details.
|
||||
and_(addr, Immediate(Page::kPageAlignmentMask));
|
||||
shrl(addr, Immediate(Page::kRegionSizeLog2));
|
||||
// Compute the bit addr in the remembered set/index of the pointer in the
|
||||
// page. Reuse 'addr' as pointer_offset.
|
||||
subq(addr, page_start);
|
||||
shr(addr, Immediate(kPointerSizeLog2));
|
||||
Register pointer_offset = addr;
|
||||
|
||||
// Set dirty mark for region.
|
||||
bts(Operand(object, Page::kDirtyFlagOffset), addr);
|
||||
// If the bit offset lies beyond the normal remembered set range, it is in
|
||||
// the extra remembered set area of a large object.
|
||||
cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize));
|
||||
j(below, &fast);
|
||||
|
||||
// We have a large object containing pointers. It must be a FixedArray.
|
||||
|
||||
// Adjust 'page_start' so that addressing using 'pointer_offset' hits the
|
||||
// extra remembered set after the large object.
|
||||
|
||||
// Load the array length into 'scratch'.
|
||||
movl(scratch,
|
||||
Operand(page_start,
|
||||
Page::kObjectStartOffset + FixedArray::kLengthOffset));
|
||||
Register array_length = scratch;
|
||||
|
||||
// Extra remembered set starts right after the large object (a FixedArray), at
|
||||
// page_start + kObjectStartOffset + objectSize
|
||||
// where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length.
|
||||
// Add the delta between the end of the normal RSet and the start of the
|
||||
// extra RSet to 'page_start', so that addressing the bit using
|
||||
// 'pointer_offset' hits the extra RSet words.
|
||||
lea(page_start,
|
||||
Operand(page_start, array_length, times_pointer_size,
|
||||
Page::kObjectStartOffset + FixedArray::kHeaderSize
|
||||
- Page::kRSetEndOffset));
|
||||
|
||||
// NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
|
||||
// to limit code size. We should probably evaluate this decision by
|
||||
// measuring the performance of an equivalent implementation using
|
||||
// "simpler" instructions
|
||||
bind(&fast);
|
||||
bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
|
||||
}
|
||||
|
||||
|
||||
// For page containing |object| mark region covering [object+offset] dirty.
|
||||
// Set the remembered set bit for [object+offset].
|
||||
// object is the object being stored into, value is the object being stored.
|
||||
// If offset is zero, then the smi_index register contains the array index into
|
||||
// the elements array represented as a smi. Otherwise it can be used as a
|
||||
@ -119,8 +156,9 @@ void MacroAssembler::RecordWrite(Register object,
|
||||
// registers are rsi.
|
||||
ASSERT(!object.is(rsi) && !value.is(rsi) && !smi_index.is(rsi));
|
||||
|
||||
// First, check if a write barrier is even needed. The tests below
|
||||
// catch stores of Smis and stores into young gen.
|
||||
// First, check if a remembered set write is even needed. The tests below
|
||||
// catch stores of Smis and stores into young gen (which does not have space
|
||||
// for the remembered set bits).
|
||||
Label done;
|
||||
JumpIfSmi(value, &done);
|
||||
|
||||
@ -153,8 +191,8 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
|
||||
bind(&okay);
|
||||
}
|
||||
|
||||
// Test that the object address is not in the new space. We cannot
|
||||
// update page dirty marks for new space pages.
|
||||
// Test that the object address is not in the new space. We cannot
|
||||
// set remembered set bits in the new space.
|
||||
InNewSpace(object, scratch, equal, &done);
|
||||
|
||||
// The offset is relative to a tagged or untagged HeapObject pointer,
|
||||
@ -163,19 +201,48 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
|
||||
ASSERT(IsAligned(offset, kPointerSize) ||
|
||||
IsAligned(offset + kHeapObjectTag, kPointerSize));
|
||||
|
||||
Register dst = smi_index;
|
||||
if (offset != 0) {
|
||||
lea(dst, Operand(object, offset));
|
||||
// We use optimized write barrier code if the word being written to is not in
|
||||
// a large object page, or is in the first "page" of a large object page.
|
||||
// We make sure that an offset is inside the right limits whether it is
|
||||
// tagged or untagged.
|
||||
if ((offset > 0) && (offset < Page::kMaxHeapObjectSize - kHeapObjectTag)) {
|
||||
// Compute the bit offset in the remembered set, leave it in 'scratch'.
|
||||
lea(scratch, Operand(object, offset));
|
||||
ASSERT(is_int32(Page::kPageAlignmentMask));
|
||||
and_(scratch, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask)));
|
||||
shr(scratch, Immediate(kPointerSizeLog2));
|
||||
|
||||
// Compute the page address from the heap object pointer, leave it in
|
||||
// 'object' (immediate value is sign extended).
|
||||
and_(object, Immediate(~Page::kPageAlignmentMask));
|
||||
|
||||
// NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
|
||||
// to limit code size. We should probably evaluate this decision by
|
||||
// measuring the performance of an equivalent implementation using
|
||||
// "simpler" instructions
|
||||
bts(Operand(object, Page::kRSetOffset), scratch);
|
||||
} else {
|
||||
// array access: calculate the destination address in the same manner as
|
||||
// KeyedStoreIC::GenerateGeneric.
|
||||
SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
|
||||
lea(dst, FieldOperand(object,
|
||||
index.reg,
|
||||
index.scale,
|
||||
FixedArray::kHeaderSize));
|
||||
Register dst = smi_index;
|
||||
if (offset != 0) {
|
||||
lea(dst, Operand(object, offset));
|
||||
} else {
|
||||
// array access: calculate the destination address in the same manner as
|
||||
// KeyedStoreIC::GenerateGeneric.
|
||||
SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
|
||||
lea(dst, FieldOperand(object,
|
||||
index.reg,
|
||||
index.scale,
|
||||
FixedArray::kHeaderSize));
|
||||
}
|
||||
// If we are already generating a shared stub, not inlining the
|
||||
// record write code isn't going to save us any memory.
|
||||
if (generating_stub()) {
|
||||
RecordWriteHelper(object, dst, scratch);
|
||||
} else {
|
||||
RecordWriteStub stub(object, dst, scratch);
|
||||
CallStub(&stub);
|
||||
}
|
||||
}
|
||||
RecordWriteHelper(object, dst, scratch);
|
||||
|
||||
bind(&done);
|
||||
|
||||
@ -577,18 +644,6 @@ void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
|
||||
Register src,
|
||||
int power) {
|
||||
ASSERT((0 <= power) && (power < 32));
|
||||
if (dst.is(src)) {
|
||||
shr(dst, Immediate(power + kSmiShift));
|
||||
} else {
|
||||
UNIMPLEMENTED(); // Not used.
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Condition MacroAssembler::CheckSmi(Register src) {
|
||||
ASSERT_EQ(0, kSmiTag);
|
||||
testb(src, Immediate(kSmiTagMask));
|
||||
@ -747,7 +802,7 @@ void MacroAssembler::SmiSub(Register dst,
|
||||
|
||||
void MacroAssembler::SmiSub(Register dst,
|
||||
Register src1,
|
||||
const Operand& src2,
|
||||
Operand const& src2,
|
||||
Label* on_not_smi_result) {
|
||||
if (on_not_smi_result == NULL) {
|
||||
// No overflow checking. Use only when it's known that
|
||||
@ -865,15 +920,6 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
|
||||
ASSERT(!dst.is(kScratchRegister));
|
||||
if (constant->value() != 0) {
|
||||
Move(kScratchRegister, constant);
|
||||
addq(dst, kScratchRegister);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SmiAddConstant(Register dst,
|
||||
Register src,
|
||||
Smi* constant,
|
||||
@ -2553,7 +2599,7 @@ void MacroAssembler::AllocateTwoByteString(Register result,
|
||||
movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
|
||||
Integer32ToSmi(scratch1, length);
|
||||
movq(FieldOperand(result, String::kLengthOffset), scratch1);
|
||||
movq(FieldOperand(result, String::kHashFieldOffset),
|
||||
movl(FieldOperand(result, String::kHashFieldOffset),
|
||||
Immediate(String::kEmptyHashField));
|
||||
}
|
||||
|
||||
@ -2591,7 +2637,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
|
||||
movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
|
||||
Integer32ToSmi(scratch1, length);
|
||||
movq(FieldOperand(result, String::kLengthOffset), scratch1);
|
||||
movq(FieldOperand(result, String::kHashFieldOffset),
|
||||
movl(FieldOperand(result, String::kHashFieldOffset),
|
||||
Immediate(String::kEmptyHashField));
|
||||
}
|
||||
|
||||
|
@ -78,8 +78,8 @@ class MacroAssembler: public Assembler {
|
||||
// ---------------------------------------------------------------------------
|
||||
// GC Support
|
||||
|
||||
// For page containing |object| mark region covering |addr| dirty.
|
||||
// RecordWriteHelper only works if the object is not in new
|
||||
// Set the remebered set bit for an address which points into an
|
||||
// object. RecordWriteHelper only works if the object is not in new
|
||||
// space.
|
||||
void RecordWriteHelper(Register object,
|
||||
Register addr,
|
||||
@ -93,7 +93,7 @@ class MacroAssembler: public Assembler {
|
||||
Condition cc,
|
||||
Label* branch);
|
||||
|
||||
// For page containing |object| mark region covering [object+offset] dirty.
|
||||
// Set the remembered set bit for [object+offset].
|
||||
// object is the object being stored into, value is the object being stored.
|
||||
// If offset is zero, then the scratch register contains the array index into
|
||||
// the elements array represented as a Smi.
|
||||
@ -103,7 +103,7 @@ class MacroAssembler: public Assembler {
|
||||
Register value,
|
||||
Register scratch);
|
||||
|
||||
// For page containing |object| mark region covering [object+offset] dirty.
|
||||
// Set the remembered set bit for [object+offset].
|
||||
// The value is known to not be a smi.
|
||||
// object is the object being stored into, value is the object being stored.
|
||||
// If offset is zero, then the scratch register contains the array index into
|
||||
@ -220,13 +220,6 @@ class MacroAssembler: public Assembler {
|
||||
Register src,
|
||||
int power);
|
||||
|
||||
// Divide a positive smi's integer value by a power of two.
|
||||
// Provides result as 32-bit integer value.
|
||||
void PositiveSmiDivPowerOfTwoToInteger32(Register dst,
|
||||
Register src,
|
||||
int power);
|
||||
|
||||
|
||||
// Simple comparison of smis.
|
||||
void SmiCompare(Register dst, Register src);
|
||||
void SmiCompare(Register dst, Smi* src);
|
||||
@ -313,10 +306,6 @@ class MacroAssembler: public Assembler {
|
||||
// No overflow testing on the result is done.
|
||||
void SmiAddConstant(Register dst, Register src, Smi* constant);
|
||||
|
||||
// Add an integer constant to a tagged smi, giving a tagged smi as result.
|
||||
// No overflow testing on the result is done.
|
||||
void SmiAddConstant(const Operand& dst, Smi* constant);
|
||||
|
||||
// Add an integer constant to a tagged smi, giving a tagged smi as result,
|
||||
// or jumping to a label if the result cannot be represented by a smi.
|
||||
void SmiAddConstant(Register dst,
|
||||
@ -360,7 +349,7 @@ class MacroAssembler: public Assembler {
|
||||
|
||||
void SmiSub(Register dst,
|
||||
Register src1,
|
||||
const Operand& src2,
|
||||
Operand const& src2,
|
||||
Label* on_not_smi_result);
|
||||
|
||||
// Multiplies smi values and return the result as a smi,
|
||||
|
@ -1115,7 +1115,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
|
||||
__ j(not_equal, &miss);
|
||||
|
||||
if (argc == 1) { // Otherwise fall through to call builtin.
|
||||
Label call_builtin, exit, with_write_barrier, attempt_to_grow_elements;
|
||||
Label call_builtin, exit, with_rset_update, attempt_to_grow_elements;
|
||||
|
||||
// Get the array's length into rax and calculate new length.
|
||||
__ movq(rax, FieldOperand(rdx, JSArray::kLengthOffset));
|
||||
@ -1123,7 +1123,8 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
|
||||
__ SmiAddConstant(rax, rax, Smi::FromInt(argc));
|
||||
|
||||
// Get the element's length into rcx.
|
||||
__ movq(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
|
||||
__ movl(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
|
||||
__ Integer32ToSmi(rcx, rcx);
|
||||
|
||||
// Check if we could survive without allocation.
|
||||
__ SmiCompare(rax, rcx);
|
||||
@ -1142,12 +1143,12 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
|
||||
__ movq(Operand(rdx, 0), rcx);
|
||||
|
||||
// Check if value is a smi.
|
||||
__ JumpIfNotSmi(rcx, &with_write_barrier);
|
||||
__ JumpIfNotSmi(rcx, &with_rset_update);
|
||||
|
||||
__ bind(&exit);
|
||||
__ ret((argc + 1) * kPointerSize);
|
||||
|
||||
__ bind(&with_write_barrier);
|
||||
__ bind(&with_rset_update);
|
||||
|
||||
__ InNewSpace(rbx, rcx, equal, &exit);
|
||||
|
||||
@ -1195,11 +1196,11 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
|
||||
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
|
||||
|
||||
// Increment element's and array's sizes.
|
||||
__ SmiAddConstant(FieldOperand(rbx, FixedArray::kLengthOffset),
|
||||
Smi::FromInt(kAllocationDelta));
|
||||
__ addl(FieldOperand(rbx, FixedArray::kLengthOffset),
|
||||
Immediate(kAllocationDelta));
|
||||
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
|
||||
|
||||
// Elements are in new space, so write barrier is not required.
|
||||
// Elements are in new space, so no remembered set updates are necessary.
|
||||
__ ret((argc + 1) * kPointerSize);
|
||||
|
||||
__ bind(&call_builtin);
|
||||
|
@ -177,7 +177,7 @@ TEST(HeapObjects) {
|
||||
TEST(Tagging) {
|
||||
InitializeVM();
|
||||
int request = 24;
|
||||
CHECK_EQ(request, static_cast<int>(OBJECT_POINTER_ALIGN(request)));
|
||||
CHECK_EQ(request, static_cast<int>(OBJECT_SIZE_ALIGN(request)));
|
||||
CHECK(Smi::FromInt(42)->IsSmi());
|
||||
CHECK(Failure::RetryAfterGC(request, NEW_SPACE)->IsFailure());
|
||||
CHECK_EQ(request, Failure::RetryAfterGC(request, NEW_SPACE)->requested());
|
||||
@ -666,14 +666,14 @@ TEST(JSArray) {
|
||||
array->SetElementsLength(*length);
|
||||
|
||||
uint32_t int_length = 0;
|
||||
CHECK(length->ToArrayIndex(&int_length));
|
||||
CHECK(Array::IndexFromObject(*length, &int_length));
|
||||
CHECK_EQ(*length, array->length());
|
||||
CHECK(array->HasDictionaryElements()); // Must be in slow mode.
|
||||
|
||||
// array[length] = name.
|
||||
array->SetElement(int_length, *name);
|
||||
uint32_t new_int_length = 0;
|
||||
CHECK(array->length()->ToArrayIndex(&new_int_length));
|
||||
CHECK(Array::IndexFromObject(array->length(), &new_int_length));
|
||||
CHECK_EQ(static_cast<double>(int_length), new_int_length - 1);
|
||||
CHECK_EQ(array->GetElement(int_length), *name);
|
||||
CHECK_EQ(array->GetElement(0), *name);
|
||||
@ -830,7 +830,7 @@ TEST(LargeObjectSpaceContains) {
|
||||
}
|
||||
CHECK(bytes_to_page > FixedArray::kHeaderSize);
|
||||
|
||||
intptr_t* flags_ptr = &Page::FromAddress(next_page)->flags_;
|
||||
int* flags_ptr = &Page::FromAddress(next_page)->flags;
|
||||
Address flags_addr = reinterpret_cast<Address>(flags_ptr);
|
||||
|
||||
int bytes_to_allocate =
|
||||
@ -888,7 +888,7 @@ TEST(Regression39128) {
|
||||
|
||||
// The plan: create JSObject which references objects in new space.
|
||||
// Then clone this object (forcing it to go into old space) and check
|
||||
// that region dirty marks are updated correctly.
|
||||
// that only bits pertaining to the object are updated in remembered set.
|
||||
|
||||
// Step 1: prepare a map for the object. We add 1 inobject property to it.
|
||||
Handle<JSFunction> object_ctor(Top::global_context()->object_function());
|
||||
@ -931,7 +931,7 @@ TEST(Regression39128) {
|
||||
CHECK(!object->IsFailure());
|
||||
CHECK(new_space->Contains(object));
|
||||
JSObject* jsobject = JSObject::cast(object);
|
||||
CHECK_EQ(0, FixedArray::cast(jsobject->elements())->length());
|
||||
CHECK_EQ(0, jsobject->elements()->length());
|
||||
CHECK_EQ(0, jsobject->properties()->length());
|
||||
// Create a reference to object in new space in jsobject.
|
||||
jsobject->FastPropertyAtPut(-1, array);
|
||||
@ -951,9 +951,17 @@ TEST(Regression39128) {
|
||||
}
|
||||
CHECK(Heap::old_pointer_space()->Contains(clone->address()));
|
||||
|
||||
// Step 5: verify validity of region dirty marks.
|
||||
// Step 5: verify validity of remembered set.
|
||||
Address clone_addr = clone->address();
|
||||
Page* page = Page::FromAddress(clone_addr);
|
||||
// Check that region covering inobject property 1 is marked dirty.
|
||||
CHECK(page->IsRegionDirty(clone_addr + (object_size - kPointerSize)));
|
||||
// Check that remembered set tracks a reference from inobject property 1.
|
||||
CHECK(page->IsRSetSet(clone_addr, object_size - kPointerSize));
|
||||
// Probe several addresses after the object.
|
||||
for (int i = 0; i < 7; i++) {
|
||||
int offset = object_size + i * kPointerSize;
|
||||
if (clone_addr + offset >= page->ObjectAreaEnd()) {
|
||||
break;
|
||||
}
|
||||
CHECK(!page->IsRSetSet(clone_addr, offset));
|
||||
}
|
||||
}
|
||||
|
@ -273,7 +273,7 @@ static void CreateTraceCallerFunction(const char* func_name,
|
||||
// StackTracer uses Top::c_entry_fp as a starting point for stack
|
||||
// walking.
|
||||
TEST(CFromJSStackTrace) {
|
||||
#if defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64)
|
||||
#ifdef V8_HOST_ARCH_IA32
|
||||
// TODO(711) The hack of replacing the inline runtime function
|
||||
// RandomHeapNumber with GetFrameNumber does not work with the way the full
|
||||
// compiler generates inline runtime calls.
|
||||
@ -315,7 +315,7 @@ TEST(CFromJSStackTrace) {
|
||||
// Top::c_entry_fp value. In this case, StackTracer uses passed frame
|
||||
// pointer value as a starting point for stack walking.
|
||||
TEST(PureJSStackTrace) {
|
||||
#if defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64)
|
||||
#ifdef V8_HOST_ARCH_IA32
|
||||
// TODO(711) The hack of replacing the inline runtime function
|
||||
// RandomHeapNumber with GetFrameNumber does not work with the way the full
|
||||
// compiler generates inline runtime calls.
|
||||
|
@ -32,32 +32,40 @@
|
||||
|
||||
using namespace v8::internal;
|
||||
|
||||
static void VerifyRegionMarking(Address page_start) {
|
||||
static void VerifyRSet(Address page_start) {
|
||||
#ifdef DEBUG
|
||||
Page::set_rset_state(Page::IN_USE);
|
||||
#endif
|
||||
|
||||
Page* p = Page::FromAddress(page_start);
|
||||
|
||||
p->SetRegionMarks(Page::kAllRegionsCleanMarks);
|
||||
p->ClearRSet();
|
||||
|
||||
for (Address addr = p->ObjectAreaStart();
|
||||
addr < p->ObjectAreaEnd();
|
||||
addr += kPointerSize) {
|
||||
CHECK(!Page::FromAddress(addr)->IsRegionDirty(addr));
|
||||
CHECK(!Page::IsRSetSet(addr, 0));
|
||||
}
|
||||
|
||||
for (Address addr = p->ObjectAreaStart();
|
||||
addr < p->ObjectAreaEnd();
|
||||
addr += kPointerSize) {
|
||||
Page::FromAddress(addr)->MarkRegionDirty(addr);
|
||||
Page::SetRSet(addr, 0);
|
||||
}
|
||||
|
||||
for (Address addr = p->ObjectAreaStart();
|
||||
addr < p->ObjectAreaEnd();
|
||||
addr += kPointerSize) {
|
||||
CHECK(Page::FromAddress(addr)->IsRegionDirty(addr));
|
||||
CHECK(Page::IsRSetSet(addr, 0));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
TEST(Page) {
|
||||
#ifdef DEBUG
|
||||
Page::set_rset_state(Page::NOT_IN_USE);
|
||||
#endif
|
||||
|
||||
byte* mem = NewArray<byte>(2*Page::kPageSize);
|
||||
CHECK(mem != NULL);
|
||||
|
||||
@ -82,8 +90,8 @@ TEST(Page) {
|
||||
CHECK(p->OffsetToAddress(Page::kObjectStartOffset) == p->ObjectAreaStart());
|
||||
CHECK(p->OffsetToAddress(Page::kPageSize) == p->ObjectAreaEnd());
|
||||
|
||||
// test region marking
|
||||
VerifyRegionMarking(page_start);
|
||||
// test remember set
|
||||
VerifyRSet(page_start);
|
||||
|
||||
DeleteArray(mem);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user