Move atomic-utils.h into base/

atomic-utils.h only uses functionality from base/, and also by moving it into
base/, code outside of v8_base can benefit from it.

BUG=v8:4991
LOG=n

Review-Url: https://codereview.chromium.org/1954603002
Cr-Commit-Position: refs/heads/master@{#36114}
This commit is contained in:
lpy 2016-05-09 09:22:09 -07:00 committed by Commit bot
parent 6f419dfe67
commit 57a73e81c6
12 changed files with 37 additions and 37 deletions

View File

@ -774,7 +774,6 @@ source_set("v8_base") {
"src/ast/scopes.h", "src/ast/scopes.h",
"src/ast/variables.cc", "src/ast/variables.cc",
"src/ast/variables.h", "src/ast/variables.h",
"src/atomic-utils.h",
"src/background-parsing-task.cc", "src/background-parsing-task.cc",
"src/background-parsing-task.h", "src/background-parsing-task.h",
"src/bailout-reason.cc", "src/bailout-reason.cc",
@ -1848,6 +1847,7 @@ source_set("v8_libbase") {
"src/base/accounting-allocator.cc", "src/base/accounting-allocator.cc",
"src/base/accounting-allocator.h", "src/base/accounting-allocator.h",
"src/base/adapters.h", "src/base/adapters.h",
"src/base/atomic-utils.h",
"src/base/atomicops.h", "src/base/atomicops.h",
"src/base/atomicops_internals_arm64_gcc.h", "src/base/atomicops_internals_arm64_gcc.h",
"src/base/atomicops_internals_arm_gcc.h", "src/base/atomicops_internals_arm_gcc.h",

View File

@ -11,7 +11,7 @@
#include "src/base/macros.h" #include "src/base/macros.h"
namespace v8 { namespace v8 {
namespace internal { namespace base {
template <class T> template <class T>
class AtomicNumber { class AtomicNumber {
@ -169,7 +169,7 @@ class AtomicEnumSet {
base::AtomicWord bits_; base::AtomicWord bits_;
}; };
} // namespace internal } // namespace base
} // namespace v8 } // namespace v8
#endif // #define V8_ATOMIC_UTILS_H_ #endif // #define V8_ATOMIC_UTILS_H_

View File

@ -6,7 +6,7 @@
#define V8_CANCELABLE_TASK_H_ #define V8_CANCELABLE_TASK_H_
#include "include/v8-platform.h" #include "include/v8-platform.h"
#include "src/atomic-utils.h" #include "src/base/atomic-utils.h"
#include "src/base/macros.h" #include "src/base/macros.h"
#include "src/base/platform/condition-variable.h" #include "src/base/platform/condition-variable.h"
#include "src/hashmap.h" #include "src/hashmap.h"
@ -104,13 +104,13 @@ class Cancelable {
} }
CancelableTaskManager* parent_; CancelableTaskManager* parent_;
AtomicValue<Status> status_; base::AtomicValue<Status> status_;
uint32_t id_; uint32_t id_;
// The counter is incremented for failing tries to cancel a task. This can be // The counter is incremented for failing tries to cancel a task. This can be
// used by the task itself as an indication how often external entities tried // used by the task itself as an indication how often external entities tried
// to abort it. // to abort it.
AtomicNumber<intptr_t> cancel_counter_; base::AtomicNumber<intptr_t> cancel_counter_;
friend class CancelableTaskManager; friend class CancelableTaskManager;

View File

@ -13,7 +13,7 @@
#include "include/v8.h" #include "include/v8.h"
#include "src/allocation.h" #include "src/allocation.h"
#include "src/assert-scope.h" #include "src/assert-scope.h"
#include "src/atomic-utils.h" #include "src/base/atomic-utils.h"
#include "src/globals.h" #include "src/globals.h"
#include "src/heap-symbols.h" #include "src/heap-symbols.h"
// TODO(mstarzinger): Two more includes to kill! // TODO(mstarzinger): Two more includes to kill!
@ -2003,11 +2003,11 @@ class Heap {
// This is not the depth of nested AlwaysAllocateScope's but rather a single // This is not the depth of nested AlwaysAllocateScope's but rather a single
// count, as scopes can be acquired from multiple tasks (read: threads). // count, as scopes can be acquired from multiple tasks (read: threads).
AtomicNumber<size_t> always_allocate_scope_count_; base::AtomicNumber<size_t> always_allocate_scope_count_;
// Stores the memory pressure level that set by MemoryPressureNotification // Stores the memory pressure level that set by MemoryPressureNotification
// and reset by a mark-compact garbage collection. // and reset by a mark-compact garbage collection.
AtomicValue<MemoryPressureLevel> memory_pressure_level_; base::AtomicValue<MemoryPressureLevel> memory_pressure_level_;
// For keeping track of context disposals. // For keeping track of context disposals.
int contexts_disposed_; int contexts_disposed_;

View File

@ -121,7 +121,7 @@ class PageParallelJob {
Item(MemoryChunk* chunk, typename JobTraits::PerPageData data, Item* next) Item(MemoryChunk* chunk, typename JobTraits::PerPageData data, Item* next)
: chunk(chunk), state(kAvailable), data(data), next(next) {} : chunk(chunk), state(kAvailable), data(data), next(next) {}
MemoryChunk* chunk; MemoryChunk* chunk;
AtomicValue<ProcessingState> state; base::AtomicValue<ProcessingState> state;
typename JobTraits::PerPageData data; typename JobTraits::PerPageData data;
Item* next; Item* next;
}; };

View File

@ -8,7 +8,7 @@
#include <list> #include <list>
#include "src/allocation.h" #include "src/allocation.h"
#include "src/atomic-utils.h" #include "src/base/atomic-utils.h"
#include "src/base/atomicops.h" #include "src/base/atomicops.h"
#include "src/base/bits.h" #include "src/base/bits.h"
#include "src/base/platform/mutex.h" #include "src/base/platform/mutex.h"
@ -586,7 +586,7 @@ class MemoryChunk {
return addr >= area_start() && addr <= area_end(); return addr >= area_start() && addr <= area_end();
} }
AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() { base::AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() {
return concurrent_sweeping_; return concurrent_sweeping_;
} }
@ -800,20 +800,20 @@ class MemoryChunk {
// Assuming the initial allocation on a page is sequential, // Assuming the initial allocation on a page is sequential,
// count highest number of bytes ever allocated on the page. // count highest number of bytes ever allocated on the page.
AtomicValue<intptr_t> high_water_mark_; base::AtomicValue<intptr_t> high_water_mark_;
base::Mutex* mutex_; base::Mutex* mutex_;
AtomicValue<ConcurrentSweepingState> concurrent_sweeping_; base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
// PagedSpace free-list statistics. // PagedSpace free-list statistics.
AtomicNumber<intptr_t> available_in_free_list_; base::AtomicNumber<intptr_t> available_in_free_list_;
AtomicNumber<intptr_t> wasted_memory_; base::AtomicNumber<intptr_t> wasted_memory_;
// next_chunk_ holds a pointer of type MemoryChunk // next_chunk_ holds a pointer of type MemoryChunk
AtomicValue<MemoryChunk*> next_chunk_; base::AtomicValue<MemoryChunk*> next_chunk_;
// prev_chunk_ holds a pointer of type MemoryChunk // prev_chunk_ holds a pointer of type MemoryChunk
AtomicValue<MemoryChunk*> prev_chunk_; base::AtomicValue<MemoryChunk*> prev_chunk_;
FreeListCategory categories_[kNumberOfCategories]; FreeListCategory categories_[kNumberOfCategories];
@ -1504,17 +1504,17 @@ class MemoryAllocator {
intptr_t capacity_executable_; intptr_t capacity_executable_;
// Allocated space size in bytes. // Allocated space size in bytes.
AtomicNumber<intptr_t> size_; base::AtomicNumber<intptr_t> size_;
// Allocated executable space size in bytes. // Allocated executable space size in bytes.
AtomicNumber<intptr_t> size_executable_; base::AtomicNumber<intptr_t> size_executable_;
// We keep the lowest and highest addresses allocated as a quick way // We keep the lowest and highest addresses allocated as a quick way
// of determining that pointers are outside the heap. The estimate is // of determining that pointers are outside the heap. The estimate is
// conservative, i.e. not all addrsses in 'allocated' space are allocated // conservative, i.e. not all addrsses in 'allocated' space are allocated
// to our heap. The range is [lowest, highest[, inclusive on the low end // to our heap. The range is [lowest, highest[, inclusive on the low end
// and exclusive on the high end. // and exclusive on the high end.
AtomicValue<void*> lowest_ever_allocated_; base::AtomicValue<void*> lowest_ever_allocated_;
AtomicValue<void*> highest_ever_allocated_; base::AtomicValue<void*> highest_ever_allocated_;
struct MemoryAllocationCallbackRegistration { struct MemoryAllocationCallbackRegistration {
MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
@ -1978,7 +1978,7 @@ class FreeList {
FreeListCategory* top(FreeListCategoryType type) { return categories_[type]; } FreeListCategory* top(FreeListCategoryType type) { return categories_[type]; }
PagedSpace* owner_; PagedSpace* owner_;
AtomicNumber<intptr_t> wasted_bytes_; base::AtomicNumber<intptr_t> wasted_bytes_;
FreeListCategory* categories_[kNumberOfCategories]; FreeListCategory* categories_[kNumberOfCategories];
friend class FreeListCategory; friend class FreeListCategory;

View File

@ -5,7 +5,7 @@
#ifndef V8_LOCKED_QUEUE_INL_ #ifndef V8_LOCKED_QUEUE_INL_
#define V8_LOCKED_QUEUE_INL_ #define V8_LOCKED_QUEUE_INL_
#include "src/atomic-utils.h" #include "src/base/atomic-utils.h"
#include "src/locked-queue.h" #include "src/locked-queue.h"
namespace v8 { namespace v8 {
@ -15,7 +15,7 @@ template <typename Record>
struct LockedQueue<Record>::Node : Malloced { struct LockedQueue<Record>::Node : Malloced {
Node() : next(nullptr) {} Node() : next(nullptr) {}
Record value; Record value;
AtomicValue<Node*> next; base::AtomicValue<Node*> next;
}; };

View File

@ -6,7 +6,7 @@
#define V8_PROFILER_CPU_PROFILER_H_ #define V8_PROFILER_CPU_PROFILER_H_
#include "src/allocation.h" #include "src/allocation.h"
#include "src/atomic-utils.h" #include "src/base/atomic-utils.h"
#include "src/base/atomicops.h" #include "src/base/atomicops.h"
#include "src/base/platform/time.h" #include "src/base/platform/time.h"
#include "src/compiler.h" #include "src/compiler.h"
@ -176,7 +176,7 @@ class ProfilerEventsProcessor : public base::Thread {
SamplingCircularQueue<TickSampleEventRecord, SamplingCircularQueue<TickSampleEventRecord,
kTickSampleQueueLength> ticks_buffer_; kTickSampleQueueLength> ticks_buffer_;
LockedQueue<TickSampleEventRecord> ticks_from_vm_buffer_; LockedQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
AtomicNumber<unsigned> last_code_event_id_; base::AtomicNumber<unsigned> last_code_event_id_;
unsigned last_processed_code_event_id_; unsigned last_processed_code_event_id_;
}; };

View File

@ -42,7 +42,7 @@
#endif #endif
#include "src/atomic-utils.h" #include "src/base/atomic-utils.h"
#include "src/base/platform/platform.h" #include "src/base/platform/platform.h"
#include "src/flags.h" #include "src/flags.h"
#include "src/frames-inl.h" #include "src/frames-inl.h"
@ -242,7 +242,7 @@ typedef List<Sampler*> SamplerList;
#if defined(USE_SIGNALS) #if defined(USE_SIGNALS)
class AtomicGuard { class AtomicGuard {
public: public:
explicit AtomicGuard(AtomicValue<int>* atomic, bool is_block = true) explicit AtomicGuard(base::AtomicValue<int>* atomic, bool is_block = true)
: atomic_(atomic), : atomic_(atomic),
is_success_(false) { is_success_(false) {
do { do {
@ -262,7 +262,7 @@ class AtomicGuard {
} }
private: private:
AtomicValue<int>* atomic_; base::AtomicValue<int>* atomic_;
bool is_success_; bool is_success_;
}; };
@ -747,7 +747,7 @@ class SamplerThread : public base::Thread {
friend class SignalHandler; friend class SignalHandler;
static base::LazyInstance<HashMap, HashMapCreateTrait>::type static base::LazyInstance<HashMap, HashMapCreateTrait>::type
thread_id_to_samplers_; thread_id_to_samplers_;
static AtomicValue<int> sampler_list_access_counter_; static base::AtomicValue<int> sampler_list_access_counter_;
static void AddSampler(Sampler* sampler) { static void AddSampler(Sampler* sampler) {
AtomicGuard atomic_guard(&sampler_list_access_counter_); AtomicGuard atomic_guard(&sampler_list_access_counter_);
// Add sampler into map if needed. // Add sampler into map if needed.
@ -779,7 +779,7 @@ SamplerThread* SamplerThread::instance_ = NULL;
#if defined(USE_SIGNALS) #if defined(USE_SIGNALS)
base::LazyInstance<HashMap, SamplerThread::HashMapCreateTrait>::type base::LazyInstance<HashMap, SamplerThread::HashMapCreateTrait>::type
SamplerThread::thread_id_to_samplers_ = LAZY_INSTANCE_INITIALIZER; SamplerThread::thread_id_to_samplers_ = LAZY_INSTANCE_INITIALIZER;
AtomicValue<int> SamplerThread::sampler_list_access_counter_(0); base::AtomicValue<int> SamplerThread::sampler_list_access_counter_(0);
// As Native Client does not support signal handling, profiling is disabled. // As Native Client does not support signal handling, profiling is disabled.
#if !V8_OS_NACL #if !V8_OS_NACL

View File

@ -444,7 +444,6 @@
'ast/scopes.h', 'ast/scopes.h',
'ast/variables.cc', 'ast/variables.cc',
'ast/variables.h', 'ast/variables.h',
'atomic-utils.h',
'background-parsing-task.cc', 'background-parsing-task.cc',
'background-parsing-task.h', 'background-parsing-task.h',
'bailout-reason.cc', 'bailout-reason.cc',
@ -1632,6 +1631,7 @@
'base/accounting-allocator.cc', 'base/accounting-allocator.cc',
'base/accounting-allocator.h', 'base/accounting-allocator.h',
'base/adapters.h', 'base/adapters.h',
'base/atomic-utils.h',
'base/atomicops.h', 'base/atomicops.h',
'base/atomicops_internals_arm64_gcc.h', 'base/atomicops_internals_arm64_gcc.h',
'base/atomicops_internals_arm_gcc.h', 'base/atomicops_internals_arm_gcc.h',

View File

@ -4,11 +4,11 @@
#include <limits.h> #include <limits.h>
#include "src/atomic-utils.h" #include "src/base/atomic-utils.h"
#include "testing/gtest/include/gtest/gtest.h" #include "testing/gtest/include/gtest/gtest.h"
namespace v8 { namespace v8 {
namespace internal { namespace base {
TEST(AtomicNumber, Constructor) { TEST(AtomicNumber, Constructor) {
// Test some common types. // Test some common types.
@ -213,5 +213,5 @@ TEST(AtomicEnumSet, Equality) {
EXPECT_FALSE(a != b); EXPECT_FALSE(a != b);
} }
} // namespace internal } // namespace base
} // namespace v8 } // namespace v8

View File

@ -23,7 +23,7 @@
'../..', '../..',
], ],
'sources': [ ### gcmole(all) ### 'sources': [ ### gcmole(all) ###
'atomic-utils-unittest.cc', 'base/atomic-utils-unittest.cc',
'base/bits-unittest.cc', 'base/bits-unittest.cc',
'base/cpu-unittest.cc', 'base/cpu-unittest.cc',
'base/division-by-constant-unittest.cc', 'base/division-by-constant-unittest.cc',