Isolates cleanup: get rid of some ugly macros.

Review URL: http://codereview.chromium.org/7062003

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@7993 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
vitalyr@chromium.org 2011-05-23 12:59:02 +00:00
parent dc69639e4c
commit 69bc282fd6
6 changed files with 35 additions and 49 deletions

View File

@ -396,7 +396,7 @@ class StringTracker {
// data. In multithreaded V8 programs this data is copied in and out of storage
// so that the currently executing thread always has its own copy of this
// data.
ISOLATED_CLASS HandleScopeImplementer {
class HandleScopeImplementer {
public:
explicit HandleScopeImplementer(Isolate* isolate)

View File

@ -116,7 +116,7 @@ class Bootstrapper {
bool IsActive() const { return nesting_ != 0; }
// Support for thread preemption.
RLYSTC int ArchiveSpacePerThread();
static int ArchiveSpacePerThread();
char* ArchiveState(char* to);
char* RestoreState(char* from);
void FreeThreadResources();

View File

@ -1371,20 +1371,6 @@ inline void Context::mark_out_of_memory() {
}
// Temporary macro to be used to flag definitions that are indeed static
// and not per-isolate. (It would be great to be able to grep for [static]!)
#define RLYSTC static
// Temporary macro to be used to flag classes that should be static.
#define STATIC_CLASS class
// Temporary macro to be used to flag classes that are completely converted
// to be isolate-friendly. Their mix of static/nonstatic methods/fields is
// correct.
#define ISOLATED_CLASS class
} } // namespace v8::internal
// TODO(isolates): Get rid of these -inl.h includes and place them only where

View File

@ -148,7 +148,7 @@ class SnapshotByteSource {
// This only works for objects in the first page of a space. Don't use this for
// things in newspace since it bypasses the write barrier.
RLYSTC const int k64 = (sizeof(uintptr_t) - 4) / 4;
static const int k64 = (sizeof(uintptr_t) - 4) / 4;
#define COMMON_REFERENCE_PATTERNS(f) \
f(kNumberOfSpaces, 2, (11 - k64)) \
@ -181,8 +181,8 @@ RLYSTC const int k64 = (sizeof(uintptr_t) - 4) / 4;
// both.
class SerializerDeserializer: public ObjectVisitor {
public:
RLYSTC void Iterate(ObjectVisitor* visitor);
RLYSTC void SetSnapshotCacheSize(int size);
static void Iterate(ObjectVisitor* visitor);
static void SetSnapshotCacheSize(int size);
protected:
// Where the pointed-to object can be found:
@ -220,34 +220,34 @@ class SerializerDeserializer: public ObjectVisitor {
// Misc.
// Raw data to be copied from the snapshot.
RLYSTC const int kRawData = 0x30;
static const int kRawData = 0x30;
// Some common raw lengths: 0x31-0x3f
// A tag emitted at strategic points in the snapshot to delineate sections.
// If the deserializer does not find these at the expected moments then it
// is an indication that the snapshot and the VM do not fit together.
// Examine the build process for architecture, version or configuration
// mismatches.
RLYSTC const int kSynchronize = 0x70;
static const int kSynchronize = 0x70;
// Used for the source code of the natives, which is in the executable, but
// is referred to from external strings in the snapshot.
RLYSTC const int kNativesStringResource = 0x71;
RLYSTC const int kNewPage = 0x72;
static const int kNativesStringResource = 0x71;
static const int kNewPage = 0x72;
// 0x73-0x7f Free.
// 0xb0-0xbf Free.
// 0xf0-0xff Free.
RLYSTC const int kLargeData = LAST_SPACE;
RLYSTC const int kLargeCode = kLargeData + 1;
RLYSTC const int kLargeFixedArray = kLargeCode + 1;
RLYSTC const int kNumberOfSpaces = kLargeFixedArray + 1;
RLYSTC const int kAnyOldSpace = -1;
static const int kLargeData = LAST_SPACE;
static const int kLargeCode = kLargeData + 1;
static const int kLargeFixedArray = kLargeCode + 1;
static const int kNumberOfSpaces = kLargeFixedArray + 1;
static const int kAnyOldSpace = -1;
// A bitmask for getting the space out of an instruction.
RLYSTC const int kSpaceMask = 15;
static const int kSpaceMask = 15;
RLYSTC inline bool SpaceIsLarge(int space) { return space >= kLargeData; }
RLYSTC inline bool SpaceIsPaged(int space) {
static inline bool SpaceIsLarge(int space) { return space >= kLargeData; }
static inline bool SpaceIsPaged(int space) {
return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE;
}
};
@ -380,19 +380,19 @@ class SerializationAddressMapper {
}
private:
RLYSTC bool SerializationMatchFun(void* key1, void* key2) {
static bool SerializationMatchFun(void* key1, void* key2) {
return key1 == key2;
}
RLYSTC uint32_t Hash(HeapObject* obj) {
static uint32_t Hash(HeapObject* obj) {
return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
}
RLYSTC void* Key(HeapObject* obj) {
static void* Key(HeapObject* obj) {
return reinterpret_cast<void*>(obj->address());
}
RLYSTC void* Value(int v) {
static void* Value(int v) {
return reinterpret_cast<void*>(v);
}
@ -403,7 +403,7 @@ class SerializationAddressMapper {
// There can be only one serializer per V8 process.
STATIC_CLASS Serializer : public SerializerDeserializer {
class Serializer : public SerializerDeserializer {
public:
explicit Serializer(SnapshotByteSink* sink);
~Serializer();
@ -415,25 +415,25 @@ STATIC_CLASS Serializer : public SerializerDeserializer {
return fullness_[space];
}
RLYSTC void Enable() {
static void Enable() {
if (!serialization_enabled_) {
ASSERT(!too_late_to_enable_now_);
}
serialization_enabled_ = true;
}
RLYSTC void Disable() { serialization_enabled_ = false; }
static void Disable() { serialization_enabled_ = false; }
// Call this when you have made use of the fact that there is no serialization
// going on.
RLYSTC void TooLateToEnableNow() { too_late_to_enable_now_ = true; }
RLYSTC bool enabled() { return serialization_enabled_; }
static void TooLateToEnableNow() { too_late_to_enable_now_ = true; }
static bool enabled() { return serialization_enabled_; }
SerializationAddressMapper* address_mapper() { return &address_mapper_; }
#ifdef DEBUG
virtual void Synchronize(const char* tag);
#endif
protected:
RLYSTC const int kInvalidRootIndex = -1;
static const int kInvalidRootIndex = -1;
virtual int RootIndex(HeapObject* heap_object) = 0;
virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0;
@ -488,11 +488,11 @@ STATIC_CLASS Serializer : public SerializerDeserializer {
// object space it may return kLargeCode or kLargeFixedArray in order
// to indicate to the deserializer what kind of large object allocation
// to make.
RLYSTC int SpaceOfObject(HeapObject* object);
static int SpaceOfObject(HeapObject* object);
// This just returns the space of the object. It will return LO_SPACE
// for all large objects since you can't check the type of the object
// once the map has been used for the serialization address.
RLYSTC int SpaceOfAlreadySerializedObject(HeapObject* object);
static int SpaceOfAlreadySerializedObject(HeapObject* object);
int Allocate(int space, int size, bool* new_page_started);
int EncodeExternalReference(Address addr) {
return external_reference_encoder_->Encode(addr);
@ -506,9 +506,9 @@ STATIC_CLASS Serializer : public SerializerDeserializer {
SnapshotByteSink* sink_;
int current_root_index_;
ExternalReferenceEncoder* external_reference_encoder_;
RLYSTC bool serialization_enabled_;
static bool serialization_enabled_;
// Did we already make use of the fact that serialization was not enabled?
RLYSTC bool too_late_to_enable_now_;
static bool too_late_to_enable_now_;
int large_object_total_;
SerializationAddressMapper address_mapper_;

View File

@ -33,7 +33,7 @@
namespace v8 {
namespace internal {
STATIC_CLASS Snapshot {
class Snapshot {
public:
// Initialize the VM from the given snapshot file. If snapshot_file is
// NULL, use the internal snapshot instead. Returns false if no snapshot

View File

@ -334,7 +334,7 @@ class StubCache {
Entry secondary_[kSecondaryTableSize];
// Computes the hashed offsets for primary and secondary caches.
RLYSTC int PrimaryOffset(String* name, Code::Flags flags, Map* map) {
static int PrimaryOffset(String* name, Code::Flags flags, Map* map) {
// This works well because the heap object tag size and the hash
// shift are equal. Shifting down the length field to get the
// hash code would effectively throw away two bits of the hash
@ -357,7 +357,7 @@ class StubCache {
return key & ((kPrimaryTableSize - 1) << kHeapObjectTagSize);
}
RLYSTC int SecondaryOffset(String* name, Code::Flags flags, int seed) {
static int SecondaryOffset(String* name, Code::Flags flags, int seed) {
// Use the seed from the primary cache in the secondary cache.
uint32_t string_low32bits =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
@ -374,7 +374,7 @@ class StubCache {
// ends in String::kHashShift 0s. Then we shift it so it is a multiple
// of sizeof(Entry). This makes it easier to avoid making mistakes
// in the hashed offset computations.
RLYSTC Entry* entry(Entry* table, int offset) {
static Entry* entry(Entry* table, int offset) {
const int shift_amount = kPointerSizeLog2 + 1 - String::kHashShift;
return reinterpret_cast<Entry*>(
reinterpret_cast<Address>(table) + (offset << shift_amount));