Remove the atomic/thread safe stuff from the chunk table, since we don't
use cross thread synchronization in Crankshaft. Review URL: http://codereview.chromium.org/5979001 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@6075 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
b2dbea6c60
commit
e24362b50f
@ -364,15 +364,15 @@ void MemoryAllocator::TearDown() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void MemoryAllocator::FreeChunkTables(AtomicWord* array, int len, int level) {
|
void MemoryAllocator::FreeChunkTables(uintptr_t* array, int len, int level) {
|
||||||
for (int i = 0; i < len; i++) {
|
for (int i = 0; i < len; i++) {
|
||||||
if (array[i] != kUnusedChunkTableEntry) {
|
if (array[i] != kUnusedChunkTableEntry) {
|
||||||
AtomicWord* subarray = reinterpret_cast<AtomicWord*>(array[i]);
|
uintptr_t* subarray = reinterpret_cast<uintptr_t*>(array[i]);
|
||||||
if (level > 1) {
|
if (level > 1) {
|
||||||
Release_Store(&array[i], kUnusedChunkTableEntry);
|
array[i] = kUnusedChunkTableEntry;
|
||||||
FreeChunkTables(subarray, 1 << kChunkTableBitsPerLevel, level - 1);
|
FreeChunkTables(subarray, 1 << kChunkTableBitsPerLevel, level - 1);
|
||||||
} else {
|
} else {
|
||||||
Release_Store(&array[i], kUnusedChunkTableEntry);
|
array[i] = kUnusedChunkTableEntry;
|
||||||
}
|
}
|
||||||
delete[] subarray;
|
delete[] subarray;
|
||||||
}
|
}
|
||||||
@ -822,7 +822,7 @@ void MemoryAllocator::AddToAllocatedChunks(Address addr, intptr_t size) {
|
|||||||
|
|
||||||
void MemoryAllocator::AddChunkUsingAddress(uintptr_t chunk_start,
|
void MemoryAllocator::AddChunkUsingAddress(uintptr_t chunk_start,
|
||||||
uintptr_t chunk_index_base) {
|
uintptr_t chunk_index_base) {
|
||||||
AtomicWord* fine_grained = AllocatedChunksFinder(
|
uintptr_t* fine_grained = AllocatedChunksFinder(
|
||||||
chunk_table_,
|
chunk_table_,
|
||||||
chunk_index_base,
|
chunk_index_base,
|
||||||
kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel,
|
kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel,
|
||||||
@ -830,7 +830,7 @@ void MemoryAllocator::AddChunkUsingAddress(uintptr_t chunk_start,
|
|||||||
int index = FineGrainedIndexForAddress(chunk_index_base);
|
int index = FineGrainedIndexForAddress(chunk_index_base);
|
||||||
if (fine_grained[index] != kUnusedChunkTableEntry) index++;
|
if (fine_grained[index] != kUnusedChunkTableEntry) index++;
|
||||||
ASSERT(fine_grained[index] == kUnusedChunkTableEntry);
|
ASSERT(fine_grained[index] == kUnusedChunkTableEntry);
|
||||||
Release_Store(&fine_grained[index], chunk_start);
|
fine_grained[index] = chunk_start;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -845,7 +845,7 @@ void MemoryAllocator::RemoveFromAllocatedChunks(Address addr, intptr_t size) {
|
|||||||
void MemoryAllocator::RemoveChunkFoundUsingAddress(
|
void MemoryAllocator::RemoveChunkFoundUsingAddress(
|
||||||
uintptr_t chunk_start,
|
uintptr_t chunk_start,
|
||||||
uintptr_t chunk_index_base) {
|
uintptr_t chunk_index_base) {
|
||||||
AtomicWord* fine_grained = AllocatedChunksFinder(
|
uintptr_t* fine_grained = AllocatedChunksFinder(
|
||||||
chunk_table_,
|
chunk_table_,
|
||||||
chunk_index_base,
|
chunk_index_base,
|
||||||
kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel,
|
kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel,
|
||||||
@ -854,22 +854,23 @@ void MemoryAllocator::RemoveChunkFoundUsingAddress(
|
|||||||
ASSERT(fine_grained != kUnusedChunkTableEntry);
|
ASSERT(fine_grained != kUnusedChunkTableEntry);
|
||||||
int index = FineGrainedIndexForAddress(chunk_index_base);
|
int index = FineGrainedIndexForAddress(chunk_index_base);
|
||||||
ASSERT(fine_grained[index] != kUnusedChunkTableEntry);
|
ASSERT(fine_grained[index] != kUnusedChunkTableEntry);
|
||||||
if (fine_grained[index] != static_cast<AtomicWord>(chunk_start)) {
|
if (fine_grained[index] != chunk_start) {
|
||||||
index++;
|
index++;
|
||||||
ASSERT(fine_grained[index] == static_cast<AtomicWord>(chunk_start));
|
ASSERT(fine_grained[index] == chunk_start);
|
||||||
Release_Store(&fine_grained[index], kUnusedChunkTableEntry);
|
fine_grained[index] = kUnusedChunkTableEntry;
|
||||||
} else {
|
} else {
|
||||||
Release_Store(&fine_grained[index], fine_grained[index + 1]);
|
// If only one of the entries is used it must be the first, since
|
||||||
// Here for a moment the two entries are duplicates, but the reader can
|
// InAllocatedChunks relies on that. Move things around so that this is
|
||||||
// handle that.
|
// the case.
|
||||||
NoBarrier_Store(&fine_grained[index + 1], kUnusedChunkTableEntry);
|
fine_grained[index] = fine_grained[index + 1];
|
||||||
|
fine_grained[index + 1] = kUnusedChunkTableEntry;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool MemoryAllocator::InAllocatedChunks(Address addr) {
|
bool MemoryAllocator::InAllocatedChunks(Address addr) {
|
||||||
uintptr_t int_address = reinterpret_cast<uintptr_t>(addr);
|
uintptr_t int_address = reinterpret_cast<uintptr_t>(addr);
|
||||||
AtomicWord* fine_grained = AllocatedChunksFinder(
|
uintptr_t* fine_grained = AllocatedChunksFinder(
|
||||||
chunk_table_,
|
chunk_table_,
|
||||||
int_address,
|
int_address,
|
||||||
kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel,
|
kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel,
|
||||||
@ -877,21 +878,18 @@ bool MemoryAllocator::InAllocatedChunks(Address addr) {
|
|||||||
if (fine_grained == NULL) return false;
|
if (fine_grained == NULL) return false;
|
||||||
int index = FineGrainedIndexForAddress(int_address);
|
int index = FineGrainedIndexForAddress(int_address);
|
||||||
if (fine_grained[index] == kUnusedChunkTableEntry) return false;
|
if (fine_grained[index] == kUnusedChunkTableEntry) return false;
|
||||||
uintptr_t entry = static_cast<uintptr_t>(fine_grained[index]);
|
uintptr_t entry = fine_grained[index];
|
||||||
if (entry <= int_address && entry + kChunkSize > int_address) return true;
|
if (entry <= int_address && entry + kChunkSize > int_address) return true;
|
||||||
index++;
|
index++;
|
||||||
if (fine_grained[index] == kUnusedChunkTableEntry) return false;
|
if (fine_grained[index] == kUnusedChunkTableEntry) return false;
|
||||||
entry = static_cast<uintptr_t>(fine_grained[index]);
|
entry = fine_grained[index];
|
||||||
// At this point it would seem that we must have a hit, but there is a small
|
|
||||||
// window during RemoveChunkFoundUsingAddress where the two entries are
|
|
||||||
// duplicates and we have to handle that.
|
|
||||||
if (entry <= int_address && entry + kChunkSize > int_address) return true;
|
if (entry <= int_address && entry + kChunkSize > int_address) return true;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
AtomicWord* MemoryAllocator::AllocatedChunksFinder(
|
uintptr_t* MemoryAllocator::AllocatedChunksFinder(
|
||||||
AtomicWord* table,
|
uintptr_t* table,
|
||||||
uintptr_t address,
|
uintptr_t address,
|
||||||
int bit_position,
|
int bit_position,
|
||||||
CreateTables create_as_needed) {
|
CreateTables create_as_needed) {
|
||||||
@ -906,8 +904,8 @@ AtomicWord* MemoryAllocator::AllocatedChunksFinder(
|
|||||||
address & ((V8_INTPTR_C(1) << bit_position) - 1);
|
address & ((V8_INTPTR_C(1) << bit_position) - 1);
|
||||||
ASSERT((table == chunk_table_ && index < kChunkTableTopLevelEntries) ||
|
ASSERT((table == chunk_table_ && index < kChunkTableTopLevelEntries) ||
|
||||||
(table != chunk_table_ && index < 1 << kChunkTableBitsPerLevel));
|
(table != chunk_table_ && index < 1 << kChunkTableBitsPerLevel));
|
||||||
AtomicWord* more_fine_grained_table =
|
uintptr_t* more_fine_grained_table =
|
||||||
reinterpret_cast<AtomicWord*>(table[index]);
|
reinterpret_cast<uintptr_t*>(table[index]);
|
||||||
if (more_fine_grained_table == kUnusedChunkTableEntry) {
|
if (more_fine_grained_table == kUnusedChunkTableEntry) {
|
||||||
if (create_as_needed == kDontCreateTables) return NULL;
|
if (create_as_needed == kDontCreateTables) return NULL;
|
||||||
int words_needed = 1 << kChunkTableBitsPerLevel;
|
int words_needed = 1 << kChunkTableBitsPerLevel;
|
||||||
@ -915,12 +913,11 @@ AtomicWord* MemoryAllocator::AllocatedChunksFinder(
|
|||||||
words_needed =
|
words_needed =
|
||||||
(1 << kChunkTableBitsPerLevel) * kChunkTableFineGrainedWordsPerEntry;
|
(1 << kChunkTableBitsPerLevel) * kChunkTableFineGrainedWordsPerEntry;
|
||||||
}
|
}
|
||||||
more_fine_grained_table = new AtomicWord[words_needed];
|
more_fine_grained_table = new uintptr_t[words_needed];
|
||||||
for (int i = 0; i < words_needed; i++) {
|
for (int i = 0; i < words_needed; i++) {
|
||||||
more_fine_grained_table[i] = kUnusedChunkTableEntry;
|
more_fine_grained_table[i] = kUnusedChunkTableEntry;
|
||||||
}
|
}
|
||||||
Release_Store(&table[index],
|
table[index] = reinterpret_cast<uintptr_t>(more_fine_grained_table);
|
||||||
reinterpret_cast<AtomicWord>(more_fine_grained_table));
|
|
||||||
}
|
}
|
||||||
return AllocatedChunksFinder(
|
return AllocatedChunksFinder(
|
||||||
more_fine_grained_table,
|
more_fine_grained_table,
|
||||||
@ -930,7 +927,7 @@ AtomicWord* MemoryAllocator::AllocatedChunksFinder(
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
AtomicWord MemoryAllocator::chunk_table_[kChunkTableTopLevelEntries];
|
uintptr_t MemoryAllocator::chunk_table_[kChunkTableTopLevelEntries];
|
||||||
|
|
||||||
|
|
||||||
// -----------------------------------------------------------------------------
|
// -----------------------------------------------------------------------------
|
||||||
|
@ -28,7 +28,6 @@
|
|||||||
#ifndef V8_SPACES_H_
|
#ifndef V8_SPACES_H_
|
||||||
#define V8_SPACES_H_
|
#define V8_SPACES_H_
|
||||||
|
|
||||||
#include "atomicops.h"
|
|
||||||
#include "list-inl.h"
|
#include "list-inl.h"
|
||||||
#include "log.h"
|
#include "log.h"
|
||||||
|
|
||||||
@ -688,7 +687,7 @@ class MemoryAllocator : public AllStatic {
|
|||||||
// The chunks are not chunk-size aligned so for a given chunk-sized area of
|
// The chunks are not chunk-size aligned so for a given chunk-sized area of
|
||||||
// memory there can be two chunks that cover it.
|
// memory there can be two chunks that cover it.
|
||||||
static const int kChunkTableFineGrainedWordsPerEntry = 2;
|
static const int kChunkTableFineGrainedWordsPerEntry = 2;
|
||||||
static const AtomicWord kUnusedChunkTableEntry = 0;
|
static const uintptr_t kUnusedChunkTableEntry = 0;
|
||||||
|
|
||||||
// Maximum space size in bytes.
|
// Maximum space size in bytes.
|
||||||
static intptr_t capacity_;
|
static intptr_t capacity_;
|
||||||
@ -696,7 +695,7 @@ class MemoryAllocator : public AllStatic {
|
|||||||
static intptr_t capacity_executable_;
|
static intptr_t capacity_executable_;
|
||||||
|
|
||||||
// Top level table to track whether memory is part of a chunk or not.
|
// Top level table to track whether memory is part of a chunk or not.
|
||||||
static AtomicWord chunk_table_[kChunkTableTopLevelEntries];
|
static uintptr_t chunk_table_[kChunkTableTopLevelEntries];
|
||||||
|
|
||||||
// Allocated space size in bytes.
|
// Allocated space size in bytes.
|
||||||
static intptr_t size_;
|
static intptr_t size_;
|
||||||
@ -766,11 +765,11 @@ class MemoryAllocator : public AllStatic {
|
|||||||
// Controls whether the lookup creates intermediate levels of tables as
|
// Controls whether the lookup creates intermediate levels of tables as
|
||||||
// needed.
|
// needed.
|
||||||
enum CreateTables { kDontCreateTables, kCreateTablesAsNeeded };
|
enum CreateTables { kDontCreateTables, kCreateTablesAsNeeded };
|
||||||
static AtomicWord* AllocatedChunksFinder(AtomicWord* table,
|
static uintptr_t* AllocatedChunksFinder(uintptr_t* table,
|
||||||
uintptr_t address,
|
uintptr_t address,
|
||||||
int bit_position,
|
int bit_position,
|
||||||
CreateTables create_as_needed);
|
CreateTables create_as_needed);
|
||||||
static void FreeChunkTables(AtomicWord* array, int length, int level);
|
static void FreeChunkTables(uintptr_t* array, int length, int level);
|
||||||
static int FineGrainedIndexForAddress(uintptr_t address) {
|
static int FineGrainedIndexForAddress(uintptr_t address) {
|
||||||
int index = ((address >> kChunkSizeLog2) &
|
int index = ((address >> kChunkSizeLog2) &
|
||||||
((1 << kChunkTableBitsPerLevel) - 1));
|
((1 << kChunkTableBitsPerLevel) - 1));
|
||||||
|
Loading…
Reference in New Issue
Block a user