Move atomic ops and related files to base library

BUG=none
R=jkummerow@chromium.org
LOG=n

Review URL: https://codereview.chromium.org/316133002

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@21693 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
jochen@chromium.org 2014-06-05 12:14:47 +00:00
parent 9f6294010a
commit 799fc835f8
59 changed files with 296 additions and 261 deletions

View File

@ -367,8 +367,6 @@ source_set("v8_base") {
"src/assert-scope.cc",
"src/ast.cc",
"src/ast.h",
"src/atomicops.h",
"src/atomicops_internals_x86_gcc.cc",
"src/bignum-dtoa.cc",
"src/bignum-dtoa.h",
"src/bignum.cc",
@ -550,7 +548,6 @@ source_set("v8_base") {
"src/jsregexp-inl.h",
"src/jsregexp.cc",
"src/jsregexp.h",
"src/lazy-instance.h",
# TODO(jochen): move libplatform/ files to their own target.
"src/libplatform/default-platform.cc",
"src/libplatform/default-platform.h",
@ -588,8 +585,6 @@ source_set("v8_base") {
"src/objects-visiting.h",
"src/objects.cc",
"src/objects.h",
"src/once.cc",
"src/once.h",
"src/optimizing-compiler-thread.h",
"src/optimizing-compiler-thread.cc",
"src/parser.cc",
@ -959,8 +954,22 @@ source_set("v8_libbase") {
visibility = ":*" # Only targets in this file can depend on this.
sources = [
"src/base/atomicops.h",
"src/base/atomicops_internals_arm64_gcc.h",
"src/base/atomicops_internals_arm_gcc.h",
"src/base/atomicops_internals_atomicword_compat.h",
"src/base/atomicops_internals_mac.h",
"src/base/atomicops_internals_mips_gcc.h",
"src/base/atomicops_internals_tsan.h",
"src/base/atomicops_internals_x86_gcc.cc",
"src/base/atomicops_internals_x86_gcc.h",
"src/base/atomicops_internals_x86_msvc.h",
"src/base/build_config.h",
"src/base/lazy-instance.h",
"src/base/macros.h",
"src/base/once.cc",
"src/base/once.h",
"src/base/win32-headers.h",
]
configs -= [ "//build/config/compiler:chromium_code" ]

View File

@ -36,6 +36,7 @@
#include <cmath>
#include "src/api.h"
#include "src/base/lazy-instance.h"
#include "src/builtins.h"
#include "src/counters.h"
#include "src/cpu.h"
@ -46,7 +47,6 @@
#include "src/ic.h"
#include "src/isolate-inl.h"
#include "src/jsregexp.h"
#include "src/lazy-instance.h"
#include "src/platform.h"
#include "src/regexp-macro-assembler.h"
#include "src/regexp-stack.h"

View File

@ -22,8 +22,8 @@
// to use these.
//
#ifndef V8_ATOMICOPS_H_
#define V8_ATOMICOPS_H_
#ifndef V8_BASE_ATOMICOPS_H_
#define V8_BASE_ATOMICOPS_H_
#include "include/v8.h"
#include "src/base/build_config.h"
@ -38,7 +38,7 @@
#endif
namespace v8 {
namespace internal {
namespace base {
typedef char Atomic8;
typedef int32_t Atomic32;
@ -131,23 +131,23 @@ Atomic64 Acquire_Load(volatile const Atomic64* ptr);
Atomic64 Release_Load(volatile const Atomic64* ptr);
#endif // V8_HOST_ARCH_64_BIT
} } // namespace v8::internal
} } // namespace v8::base
// Include our platform specific implementation.
#if defined(THREAD_SANITIZER)
#include "src/atomicops_internals_tsan.h"
#include "src/base/atomicops_internals_tsan.h"
#elif defined(_MSC_VER) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "src/atomicops_internals_x86_msvc.h"
#include "src/base/atomicops_internals_x86_msvc.h"
#elif defined(__APPLE__)
#include "src/atomicops_internals_mac.h"
#include "src/base/atomicops_internals_mac.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_ARM64
#include "src/atomicops_internals_arm64_gcc.h"
#include "src/base/atomicops_internals_arm64_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_ARM
#include "src/atomicops_internals_arm_gcc.h"
#include "src/base/atomicops_internals_arm_gcc.h"
#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "src/atomicops_internals_x86_gcc.h"
#include "src/base/atomicops_internals_x86_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS
#include "src/atomicops_internals_mips_gcc.h"
#include "src/base/atomicops_internals_mips_gcc.h"
#else
#error "Atomic operations are not supported on your platform"
#endif
@ -155,7 +155,7 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
// On some platforms we need additional declarations to make
// AtomicWord compatible with our other Atomic* types.
#if defined(__APPLE__) || defined(__OpenBSD__)
#include "src/atomicops_internals_atomicword_compat.h"
#include "src/base/atomicops_internals_atomicword_compat.h"
#endif
#endif // V8_ATOMICOPS_H_
#endif // V8_BASE_ATOMICOPS_H_

View File

@ -4,11 +4,11 @@
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
#define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
#ifndef V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
#define V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
namespace v8 {
namespace internal {
namespace base {
inline void MemoryBarrier() {
__asm__ __volatile__ ("dmb ish" ::: "memory"); // NOLINT
@ -311,6 +311,6 @@ inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
return *ptr;
}
} } // namespace v8::internal
} } // namespace v8::base
#endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
#endif // V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_

View File

@ -6,15 +6,15 @@
//
// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
#ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
#define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
#ifndef V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
#define V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
#if defined(__QNXNTO__)
#include <sys/cpuinline.h>
#endif
namespace v8 {
namespace internal {
namespace base {
// Memory barriers on ARM are funky, but the kernel is here to help:
//
@ -296,6 +296,6 @@ inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; }
} } // namespace v8::internal
} } // namespace v8::base
#endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
#endif // V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_

View File

@ -4,8 +4,8 @@
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef V8_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
#define V8_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
#ifndef V8_BASE_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
#define V8_BASE_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
// AtomicWord is a synonym for intptr_t, and Atomic32 is a synonym for int32,
// which in turn means int. On some LP32 platforms, intptr_t is an int, but
@ -21,7 +21,7 @@
#if !defined(V8_HOST_ARCH_64_BIT)
namespace v8 {
namespace internal {
namespace base {
inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
@ -51,14 +51,14 @@ inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return v8::internal::Acquire_CompareAndSwap(
return v8::base::Acquire_CompareAndSwap(
reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
}
inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return v8::internal::Release_CompareAndSwap(
return v8::base::Release_CompareAndSwap(
reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
}
@ -68,12 +68,12 @@ inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
}
inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
return v8::internal::Acquire_Store(
return v8::base::Acquire_Store(
reinterpret_cast<volatile Atomic32*>(ptr), value);
}
inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
return v8::internal::Release_Store(
return v8::base::Release_Store(
reinterpret_cast<volatile Atomic32*>(ptr), value);
}
@ -83,17 +83,17 @@ inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
}
inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
return v8::internal::Acquire_Load(
return v8::base::Acquire_Load(
reinterpret_cast<volatile const Atomic32*>(ptr));
}
inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
return v8::internal::Release_Load(
return v8::base::Release_Load(
reinterpret_cast<volatile const Atomic32*>(ptr));
}
} } // namespace v8::internal
} } // namespace v8::base
#endif // !defined(V8_HOST_ARCH_64_BIT)
#endif // V8_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
#endif // V8_BASE_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_

View File

@ -4,13 +4,13 @@
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef V8_ATOMICOPS_INTERNALS_MAC_H_
#define V8_ATOMICOPS_INTERNALS_MAC_H_
#ifndef V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
#define V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
#include <libkern/OSAtomic.h>
namespace v8 {
namespace internal {
namespace base {
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
@ -199,6 +199,6 @@ inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
#endif // defined(__LP64__)
} } // namespace v8::internal
} } // namespace v8::base
#endif // V8_ATOMICOPS_INTERNALS_MAC_H_
#endif // V8_BASE_ATOMICOPS_INTERNALS_MAC_H_

View File

@ -4,11 +4,11 @@
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
#define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
#ifndef V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
#define V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
namespace v8 {
namespace internal {
namespace base {
// Atomically execute:
// result = *ptr;
@ -154,6 +154,6 @@ inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
return *ptr;
}
} } // namespace v8::internal
} } // namespace v8::base
#endif // V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
#endif // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_

View File

@ -6,11 +6,11 @@
// This file is an internal atomic implementation for compiler-based
// ThreadSanitizer. Use base/atomicops.h instead.
#ifndef V8_ATOMICOPS_INTERNALS_TSAN_H_
#define V8_ATOMICOPS_INTERNALS_TSAN_H_
#ifndef V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
#define V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
namespace v8 {
namespace internal {
namespace base {
#ifndef TSAN_INTERFACE_ATOMIC_H
#define TSAN_INTERFACE_ATOMIC_H
@ -371,9 +371,9 @@ inline void MemoryBarrier() {
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}
} // namespace internal
} // namespace base
} // namespace v8
#undef ATOMICOPS_COMPILER_BARRIER
#endif // V8_ATOMICOPS_INTERNALS_TSAN_H_
#endif // V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_

View File

@ -7,13 +7,13 @@
#include <string.h>
#include "src/atomicops.h"
#include "src/base/atomicops.h"
// This file only makes sense with atomicops_internals_x86_gcc.h -- it
// depends on structs that are defined in that file. If atomicops.h
// doesn't sub-include that file, then we aren't needed, and shouldn't
// try to do anything.
#ifdef V8_ATOMICOPS_INTERNALS_X86_GCC_H_
#ifdef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
// Inline cpuid instruction. In PIC compilations, %ebx contains the address
// of the global offset table. To avoid breaking such executables, this code
@ -35,7 +35,7 @@
#if defined(cpuid) // initialize the struct only on x86
namespace v8 {
namespace internal {
namespace base {
// Set the flags so that code will run correctly and conservatively, so even
// if we haven't been initialized yet, we're probably single threaded, and our
@ -45,13 +45,13 @@ struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
false, // no SSE2
};
} } // namespace v8::internal
} } // namespace v8::base
namespace {
// Initialize the AtomicOps_Internalx86CPUFeatures struct.
void AtomicOps_Internalx86CPUFeaturesInit() {
using v8::internal::AtomicOps_Internalx86CPUFeatures;
using v8::base::AtomicOps_Internalx86CPUFeatures;
uint32_t eax = 0;
uint32_t ebx = 0;
@ -108,4 +108,4 @@ AtomicOpsx86Initializer g_initer;
#endif // if x86
#endif // ifdef V8_ATOMICOPS_INTERNALS_X86_GCC_H_
#endif // ifdef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_

View File

@ -4,11 +4,11 @@
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef V8_ATOMICOPS_INTERNALS_X86_GCC_H_
#define V8_ATOMICOPS_INTERNALS_X86_GCC_H_
#ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
#define V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
namespace v8 {
namespace internal {
namespace base {
// This struct is not part of the public API of this module; clients may not
// use it.
@ -265,8 +265,8 @@ inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
#endif // defined(__x86_64__)
} } // namespace v8::internal
} } // namespace v8::base
#undef ATOMICOPS_COMPILER_BARRIER
#endif // V8_ATOMICOPS_INTERNALS_X86_GCC_H_
#endif // V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_

View File

@ -4,11 +4,11 @@
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef V8_ATOMICOPS_INTERNALS_X86_MSVC_H_
#define V8_ATOMICOPS_INTERNALS_X86_MSVC_H_
#ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
#define V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
#include "src/base/macros.h"
#include "src/win32-headers.h"
#include "src/base/win32-headers.h"
#if defined(V8_HOST_ARCH_64_BIT)
// windows.h #defines this (only on x64). This causes problems because the
@ -20,7 +20,7 @@
#endif
namespace v8 {
namespace internal {
namespace base {
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
@ -197,6 +197,6 @@ inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
#endif // defined(_WIN64)
} } // namespace v8::internal
} } // namespace v8::base
#endif // V8_ATOMICOPS_INTERNALS_X86_MSVC_H_
#endif // V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_

View File

@ -65,14 +65,14 @@
// The macro LAZY_DYNAMIC_INSTANCE_INITIALIZER must be used to initialize
// dynamic lazy instances.
#ifndef V8_LAZY_INSTANCE_H_
#define V8_LAZY_INSTANCE_H_
#ifndef V8_BASE_LAZY_INSTANCE_H_
#define V8_BASE_LAZY_INSTANCE_H_
#include "src/base/macros.h"
#include "src/once.h"
#include "src/base/once.h"
namespace v8 {
namespace internal {
namespace base {
#define LAZY_STATIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, { {} } }
#define LAZY_DYNAMIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, 0 }
@ -232,6 +232,6 @@ struct LazyDynamicInstance {
CreateTrait, InitOnceTrait, DestroyTrait> type;
};
} } // namespace v8::internal
} } // namespace v8::base
#endif // V8_LAZY_INSTANCE_H_
#endif // V8_BASE_LAZY_INSTANCE_H_

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/once.h"
#include "src/base/once.h"
#ifdef _WIN32
#include <windows.h>
@ -10,10 +10,10 @@
#include <sched.h>
#endif
#include "src/atomicops.h"
#include "src/base/atomicops.h"
namespace v8 {
namespace internal {
namespace base {
void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg) {
AtomicWord state = Acquire_Load(once);
@ -50,4 +50,4 @@ void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg) {
}
}
} } // namespace v8::internal
} } // namespace v8::base

View File

@ -49,19 +49,19 @@
// whatsoever to statically-initialize its synchronization primitives, so our
// only choice is to assume that dynamic initialization is single-threaded.
#ifndef V8_ONCE_H_
#define V8_ONCE_H_
#ifndef V8_BASE_ONCE_H_
#define V8_BASE_ONCE_H_
#include "src/atomicops.h"
#include "src/base/atomicops.h"
namespace v8 {
namespace internal {
namespace base {
typedef AtomicWord OnceType;
#define V8_ONCE_INIT 0
#define V8_DECLARE_ONCE(NAME) ::v8::internal::OnceType NAME
#define V8_DECLARE_ONCE(NAME) ::v8::base::OnceType NAME
enum {
ONCE_STATE_UNINITIALIZED = 0,
@ -95,6 +95,6 @@ inline void CallOnce(OnceType* once,
}
}
} } // namespace v8::internal
} } // namespace v8::base
#endif // V8_ONCE_H_
#endif // V8_BASE_ONCE_H_

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WIN32_HEADERS_H_
#define V8_WIN32_HEADERS_H_
#ifndef V8_BASE_WIN32_HEADERS_H_
#define V8_BASE_WIN32_HEADERS_H_
#ifndef WIN32_LEAN_AND_MEAN
// WIN32_LEAN_AND_MEAN implies NOCRYPT and NOGDI.
@ -76,4 +76,4 @@
#undef CreateSemaphore
#undef Yield
#endif // V8_WIN32_HEADERS_H_
#endif // V8_BASE_WIN32_HEADERS_H_

View File

@ -6,6 +6,7 @@
#include "src/api.h"
#include "src/arguments.h"
#include "src/base/once.h"
#include "src/bootstrapper.h"
#include "src/builtins.h"
#include "src/cpu-profiler.h"
@ -1534,11 +1535,11 @@ struct BuiltinDesc {
class BuiltinFunctionTable {
public:
BuiltinDesc* functions() {
CallOnce(&once_, &Builtins::InitBuiltinFunctionTable);
base::CallOnce(&once_, &Builtins::InitBuiltinFunctionTable);
return functions_;
}
OnceType once_;
base::OnceType once_;
BuiltinDesc functions_[Builtins::builtin_count + 1];
friend class Builtins;

View File

@ -24,8 +24,8 @@ SamplingCircularQueue<T, L>::~SamplingCircularQueue() {
template<typename T, unsigned L>
T* SamplingCircularQueue<T, L>::Peek() {
MemoryBarrier();
if (Acquire_Load(&dequeue_pos_->marker) == kFull) {
base::MemoryBarrier();
if (base::Acquire_Load(&dequeue_pos_->marker) == kFull) {
return &dequeue_pos_->record;
}
return NULL;
@ -34,15 +34,15 @@ T* SamplingCircularQueue<T, L>::Peek() {
template<typename T, unsigned L>
void SamplingCircularQueue<T, L>::Remove() {
Release_Store(&dequeue_pos_->marker, kEmpty);
base::Release_Store(&dequeue_pos_->marker, kEmpty);
dequeue_pos_ = Next(dequeue_pos_);
}
template<typename T, unsigned L>
T* SamplingCircularQueue<T, L>::StartEnqueue() {
MemoryBarrier();
if (Acquire_Load(&enqueue_pos_->marker) == kEmpty) {
base::MemoryBarrier();
if (base::Acquire_Load(&enqueue_pos_->marker) == kEmpty) {
return &enqueue_pos_->record;
}
return NULL;
@ -51,7 +51,7 @@ T* SamplingCircularQueue<T, L>::StartEnqueue() {
template<typename T, unsigned L>
void SamplingCircularQueue<T, L>::FinishEnqueue() {
Release_Store(&enqueue_pos_->marker, kFull);
base::Release_Store(&enqueue_pos_->marker, kFull);
enqueue_pos_ = Next(enqueue_pos_);
}

View File

@ -5,7 +5,7 @@
#ifndef V8_CIRCULAR_QUEUE_H_
#define V8_CIRCULAR_QUEUE_H_
#include "src/atomicops.h"
#include "src/base/atomicops.h"
#include "src/globals.h"
namespace v8 {
@ -50,7 +50,7 @@ class SamplingCircularQueue {
struct V8_ALIGNED(PROCESSOR_CACHE_LINE_SIZE) Entry {
Entry() : marker(kEmpty) {}
T record;
Atomic32 marker;
base::Atomic32 marker;
};
Entry* Next(Entry* entry);

View File

@ -6,7 +6,7 @@
#define V8_CPU_PROFILER_H_
#include "src/allocation.h"
#include "src/atomicops.h"
#include "src/base/atomicops.h"
#include "src/circular-queue.h"
#include "src/platform/time.h"
#include "src/sampler.h"

View File

@ -23,7 +23,7 @@
#include "src/checks.h"
#if V8_OS_WIN
#include "src/win32-headers.h"
#include "src/base/win32-headers.h"
#endif
namespace v8 {

View File

@ -5,6 +5,7 @@
#include "src/elements-kind.h"
#include "src/api.h"
#include "src/base/lazy-instance.h"
#include "src/elements.h"
#include "src/objects.h"
@ -102,8 +103,8 @@ struct InitializeFastElementsKindSequence {
};
static LazyInstance<ElementsKind*,
InitializeFastElementsKindSequence>::type
static base::LazyInstance<ElementsKind*,
InitializeFastElementsKindSequence>::type
fast_elements_kind_sequence = LAZY_INSTANCE_INITIALIZER;

View File

@ -5,7 +5,6 @@
#ifndef V8_FLAGS_H_
#define V8_FLAGS_H_
#include "src/atomicops.h"
#include "src/globals.h"
namespace v8 {

View File

@ -8,7 +8,6 @@
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/full-codegen.h"
#include "src/lazy-instance.h"
#include "src/mark-compact.h"
#include "src/safepoint-table.h"
#include "src/scopeinfo.h"

View File

@ -6,6 +6,7 @@
#include "src/accessors.h"
#include "src/api.h"
#include "src/base/once.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/compilation-cache.h"
@ -21,7 +22,6 @@
#include "src/natives.h"
#include "src/objects-visiting.h"
#include "src/objects-visiting-inl.h"
#include "src/once.h"
#include "src/runtime-profiler.h"
#include "src/scopeinfo.h"
#include "src/snapshot.h"
@ -5149,7 +5149,7 @@ bool Heap::SetUp() {
if (!ConfigureHeapDefault()) return false;
}
CallOnce(&initialize_gc_once, &InitializeGCOnce);
base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
MarkMapPointersAsEncoded(false);

View File

@ -37,10 +37,10 @@
namespace v8 {
namespace internal {
Atomic32 ThreadId::highest_thread_id_ = 0;
base::Atomic32 ThreadId::highest_thread_id_ = 0;
int ThreadId::AllocateThreadId() {
int new_id = NoBarrier_AtomicIncrement(&highest_thread_id_, 1);
int new_id = base::NoBarrier_AtomicIncrement(&highest_thread_id_, 1);
return new_id;
}
@ -114,7 +114,7 @@ enum DefaultIsolateStatus {
static DefaultIsolateStatus default_isolate_status_
= kDefaultIsolateUninitialized;
Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL;
Atomic32 Isolate::isolate_counter_ = 0;
base::Atomic32 Isolate::isolate_counter_ = 0;
Isolate::PerIsolateThreadData*
Isolate::FindOrAllocatePerThreadDataForThisThread() {
@ -1487,7 +1487,7 @@ Isolate::Isolate()
num_sweeper_threads_(0),
stress_deopt_count_(0),
next_optimization_id_(0) {
id_ = NoBarrier_AtomicIncrement(&isolate_counter_, 1);
id_ = base::NoBarrier_AtomicIncrement(&isolate_counter_, 1);
TRACE_ISOLATE(constructor);
memset(isolate_addresses_, 0,

View File

@ -8,7 +8,7 @@
#include "include/v8-debug.h"
#include "src/allocation.h"
#include "src/assert-scope.h"
#include "src/atomicops.h"
#include "src/base/atomicops.h"
#include "src/builtins.h"
#include "src/contexts.h"
#include "src/execution.h"
@ -191,7 +191,7 @@ class ThreadId {
int id_;
static Atomic32 highest_thread_id_;
static base::Atomic32 highest_thread_id_;
friend class Isolate;
};
@ -1148,7 +1148,7 @@ class Isolate {
static ThreadDataTable* thread_data_table_;
// A global counter for all generated Isolates, might overflow.
static Atomic32 isolate_counter_;
static base::Atomic32 isolate_counter_;
void Deinit();
@ -1185,7 +1185,7 @@ class Isolate {
// the Error object.
bool IsErrorObject(Handle<Object> obj);
Atomic32 id_;
base::Atomic32 id_;
EntryStackItem* entry_stack_;
int stack_trace_nesting_level_;
StringStream* incomplete_message_;
@ -1196,7 +1196,7 @@ class Isolate {
Counters* counters_;
CodeRange* code_range_;
RecursiveMutex break_access_;
Atomic32 debugger_initialized_;
base::Atomic32 debugger_initialized_;
Logger* logger_;
StackGuard stack_guard_;
StatsTable* stats_table_;

View File

@ -4,6 +4,7 @@
#include "src/v8.h"
#include "src/base/atomicops.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/cpu-profiler.h"
@ -2972,20 +2973,20 @@ static void UpdatePointer(HeapObject** address, HeapObject* object) {
// compare and swap may fail in the case where the pointer update tries to
// update garbage memory which was concurrently accessed by the sweeper.
if (new_addr != NULL) {
NoBarrier_CompareAndSwap(
reinterpret_cast<AtomicWord*>(address),
reinterpret_cast<AtomicWord>(object),
reinterpret_cast<AtomicWord>(HeapObject::FromAddress(new_addr)));
base::NoBarrier_CompareAndSwap(
reinterpret_cast<base::AtomicWord*>(address),
reinterpret_cast<base::AtomicWord>(object),
reinterpret_cast<base::AtomicWord>(HeapObject::FromAddress(new_addr)));
} else {
// We have to zap this pointer, because the store buffer may overflow later,
// and then we have to scan the entire heap and we don't want to find
// spurious newspace pointers in the old space.
// TODO(mstarzinger): This was changed to a sentinel value to track down
// rare crashes, change it back to Smi::FromInt(0) later.
NoBarrier_CompareAndSwap(
reinterpret_cast<AtomicWord*>(address),
reinterpret_cast<AtomicWord>(object),
reinterpret_cast<AtomicWord>(Smi::FromInt(0x0f100d00 >> 1)));
base::NoBarrier_CompareAndSwap(
reinterpret_cast<base::AtomicWord*>(address),
reinterpret_cast<base::AtomicWord>(object),
reinterpret_cast<base::AtomicWord>(Smi::FromInt(0x0f100d00 >> 1)));
}
}

View File

@ -12,6 +12,7 @@
#ifndef V8_OBJECTS_INL_H_
#define V8_OBJECTS_INL_H_
#include "src/base/atomicops.h"
#include "src/elements.h"
#include "src/objects.h"
#include "src/contexts.h"
@ -1122,24 +1123,26 @@ bool JSProxy::HasElementWithHandler(Handle<JSProxy> proxy, uint32_t index) {
#define READ_FIELD(p, offset) \
(*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)))
#define ACQUIRE_READ_FIELD(p, offset) \
reinterpret_cast<Object*>( \
Acquire_Load(reinterpret_cast<AtomicWord*>(FIELD_ADDR(p, offset))))
#define ACQUIRE_READ_FIELD(p, offset) \
reinterpret_cast<Object*>(base::Acquire_Load( \
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset))))
#define NOBARRIER_READ_FIELD(p, offset) \
reinterpret_cast<Object*>( \
NoBarrier_Load(reinterpret_cast<AtomicWord*>(FIELD_ADDR(p, offset))))
#define NOBARRIER_READ_FIELD(p, offset) \
reinterpret_cast<Object*>(base::NoBarrier_Load( \
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset))))
#define WRITE_FIELD(p, offset, value) \
(*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
#define RELEASE_WRITE_FIELD(p, offset, value) \
Release_Store(reinterpret_cast<AtomicWord*>(FIELD_ADDR(p, offset)), \
reinterpret_cast<AtomicWord>(value));
#define RELEASE_WRITE_FIELD(p, offset, value) \
base::Release_Store( \
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
reinterpret_cast<base::AtomicWord>(value));
#define NOBARRIER_WRITE_FIELD(p, offset, value) \
NoBarrier_Store(reinterpret_cast<AtomicWord*>(FIELD_ADDR(p, offset)), \
reinterpret_cast<AtomicWord>(value));
#define NOBARRIER_WRITE_FIELD(p, offset, value) \
base::NoBarrier_Store( \
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
reinterpret_cast<base::AtomicWord>(value));
#define WRITE_BARRIER(heap, object, offset, value) \
heap->incremental_marking()->RecordWrite( \
@ -1235,16 +1238,17 @@ bool JSProxy::HasElementWithHandler(Handle<JSProxy> proxy, uint32_t index) {
#define READ_BYTE_FIELD(p, offset) \
(*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)))
#define NOBARRIER_READ_BYTE_FIELD(p, offset) \
static_cast<byte>(NoBarrier_Load( \
reinterpret_cast<Atomic8*>(FIELD_ADDR(p, offset))) )
#define NOBARRIER_READ_BYTE_FIELD(p, offset) \
static_cast<byte>(base::NoBarrier_Load( \
reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset))))
#define WRITE_BYTE_FIELD(p, offset, value) \
(*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value)
#define NOBARRIER_WRITE_BYTE_FIELD(p, offset, value) \
NoBarrier_Store(reinterpret_cast<Atomic8*>(FIELD_ADDR(p, offset)), \
static_cast<Atomic8>(value));
#define NOBARRIER_WRITE_BYTE_FIELD(p, offset, value) \
base::NoBarrier_Store( \
reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic8>(value));
Object** HeapObject::RawField(HeapObject* obj, int byte_offset) {
return &READ_FIELD(obj, byte_offset);

View File

@ -138,7 +138,7 @@ class VisitorDispatchTable {
// every element of callbacks_ array will remain correct
// pointer (memcpy might be implemented as a byte copying loop).
for (int i = 0; i < StaticVisitorBase::kVisitorIdCount; i++) {
NoBarrier_Store(&callbacks_[i], other->callbacks_[i]);
base::NoBarrier_Store(&callbacks_[i], other->callbacks_[i]);
}
}
@ -152,7 +152,7 @@ class VisitorDispatchTable {
void Register(StaticVisitorBase::VisitorId id, Callback callback) {
ASSERT(id < StaticVisitorBase::kVisitorIdCount); // id is unsigned.
callbacks_[id] = reinterpret_cast<AtomicWord>(callback);
callbacks_[id] = reinterpret_cast<base::AtomicWord>(callback);
}
template<typename Visitor,
@ -184,7 +184,7 @@ class VisitorDispatchTable {
}
private:
AtomicWord callbacks_[StaticVisitorBase::kVisitorIdCount];
base::AtomicWord callbacks_[StaticVisitorBase::kVisitorIdCount];
};

View File

@ -6,6 +6,7 @@
#include "src/v8.h"
#include "src/base/atomicops.h"
#include "src/full-codegen.h"
#include "src/hydrogen.h"
#include "src/isolate.h"
@ -51,7 +52,7 @@ void OptimizingCompilerThread::Run() {
OS::Sleep(FLAG_concurrent_recompilation_delay);
}
switch (static_cast<StopFlag>(Acquire_Load(&stop_thread_))) {
switch (static_cast<StopFlag>(base::Acquire_Load(&stop_thread_))) {
case CONTINUE:
break;
case STOP:
@ -65,7 +66,8 @@ void OptimizingCompilerThread::Run() {
{ AllowHandleDereference allow_handle_dereference;
FlushInputQueue(true);
}
Release_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
base::Release_Store(&stop_thread_,
static_cast<base::AtomicWord>(CONTINUE));
stop_semaphore_.Signal();
// Return to start of consumer loop.
continue;
@ -169,7 +171,7 @@ void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) {
void OptimizingCompilerThread::Flush() {
ASSERT(!IsOptimizerThread());
Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH));
base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
if (FLAG_block_concurrent_recompilation) Unblock();
input_queue_semaphore_.Signal();
stop_semaphore_.Wait();
@ -183,7 +185,7 @@ void OptimizingCompilerThread::Flush() {
void OptimizingCompilerThread::Stop() {
ASSERT(!IsOptimizerThread());
Release_Store(&stop_thread_, static_cast<AtomicWord>(STOP));
base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP));
if (FLAG_block_concurrent_recompilation) Unblock();
input_queue_semaphore_.Signal();
stop_semaphore_.Wait();

View File

@ -5,7 +5,7 @@
#ifndef V8_OPTIMIZING_COMPILER_THREAD_H_
#define V8_OPTIMIZING_COMPILER_THREAD_H_
#include "src/atomicops.h"
#include "src/base/atomicops.h"
#include "src/flags.h"
#include "src/list.h"
#include "src/platform.h"
@ -38,7 +38,8 @@ class OptimizingCompilerThread : public Thread {
osr_hits_(0),
osr_attempts_(0),
blocked_jobs_(0) {
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
base::NoBarrier_Store(&stop_thread_,
static_cast<base::AtomicWord>(CONTINUE));
input_queue_ = NewArray<OptimizedCompileJob*>(input_queue_capacity_);
if (FLAG_concurrent_osr) {
// Allocate and mark OSR buffer slots as empty.
@ -126,7 +127,7 @@ class OptimizingCompilerThread : public Thread {
int osr_buffer_capacity_;
int osr_buffer_cursor_;
volatile AtomicWord stop_thread_;
volatile base::AtomicWord stop_thread_;
TimeDelta time_spent_compiling_;
TimeDelta time_spent_total_;

View File

@ -18,9 +18,9 @@
#include "src/v8.h"
#include "src/base/win32-headers.h"
#include "src/platform.h"
#include "src/v8threads.h"
#include "src/win32-headers.h"
namespace v8 {
namespace internal {

View File

@ -45,6 +45,10 @@
#include "src/isolate-inl.h"
#include "src/platform.h"
#ifdef V8_FAST_TLS_SUPPORTED
#include "src/base/atomicops.h"
#endif
namespace v8 {
namespace internal {
@ -599,7 +603,7 @@ static pthread_key_t LocalKeyToPthreadKey(Thread::LocalStorageKey local_key) {
#ifdef V8_FAST_TLS_SUPPORTED
static Atomic32 tls_base_offset_initialized = 0;
static base::Atomic32 tls_base_offset_initialized = 0;
intptr_t kMacTlsBaseOffset = 0;
// It's safe to do the initialization more that once, but it has to be
@ -635,7 +639,7 @@ static void InitializeTlsBaseOffset() {
kMacTlsBaseOffset = 0;
}
Release_Store(&tls_base_offset_initialized, 1);
base::Release_Store(&tls_base_offset_initialized, 1);
}

View File

@ -15,7 +15,7 @@
#endif // MINGW_HAS_SECURE_API
#endif // __MINGW32__
#include "src/win32-headers.h"
#include "src/base/win32-headers.h"
#include "src/v8.h"

View File

@ -43,7 +43,7 @@ int signbit(double x);
// Microsoft Visual C++ specific stuff.
#if V8_LIBC_MSVCRT
#include "src/win32-headers.h"
#include "src/base/win32-headers.h"
#include "src/win32-math.h"
int strncasecmp(const char* s1, const char* s2, int n);

View File

@ -5,6 +5,7 @@
#ifndef V8_PLATFORM_CONDITION_VARIABLE_H_
#define V8_PLATFORM_CONDITION_VARIABLE_H_
#include "src/base/lazy-instance.h"
#include "src/platform/mutex.h"
namespace v8 {
@ -106,9 +107,9 @@ class ConditionVariable V8_FINAL {
// LockGuard<Mutex> lock_guard(&my_mutex);
// my_condvar.Pointer()->Wait(&my_mutex);
// }
typedef LazyStaticInstance<ConditionVariable,
DefaultConstructTrait<ConditionVariable>,
ThreadSafeInitOnceTrait>::type LazyConditionVariable;
typedef base::LazyStaticInstance<
ConditionVariable, base::DefaultConstructTrait<ConditionVariable>,
base::ThreadSafeInitOnceTrait>::type LazyConditionVariable;
#define LAZY_CONDITION_VARIABLE_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER

View File

@ -5,11 +5,11 @@
#ifndef V8_PLATFORM_MUTEX_H_
#define V8_PLATFORM_MUTEX_H_
#include "src/checks.h"
#include "src/lazy-instance.h"
#include "src/base/lazy-instance.h"
#if V8_OS_WIN
#include "src/win32-headers.h"
#include "src/base/win32-headers.h"
#endif
#include "src/checks.h"
#if V8_OS_POSIX
#include <pthread.h> // NOLINT
@ -101,9 +101,9 @@ class Mutex V8_FINAL {
// // Do something.
// }
//
typedef LazyStaticInstance<Mutex,
DefaultConstructTrait<Mutex>,
ThreadSafeInitOnceTrait>::type LazyMutex;
typedef v8::base::LazyStaticInstance<
Mutex, v8::base::DefaultConstructTrait<Mutex>,
v8::base::ThreadSafeInitOnceTrait>::type LazyMutex;
#define LAZY_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
@ -182,9 +182,9 @@ class RecursiveMutex V8_FINAL {
// // Do something.
// }
//
typedef LazyStaticInstance<RecursiveMutex,
DefaultConstructTrait<RecursiveMutex>,
ThreadSafeInitOnceTrait>::type LazyRecursiveMutex;
typedef v8::base::LazyStaticInstance<
RecursiveMutex, v8::base::DefaultConstructTrait<RecursiveMutex>,
v8::base::ThreadSafeInitOnceTrait>::type LazyRecursiveMutex;
#define LAZY_RECURSIVE_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER

View File

@ -5,9 +5,9 @@
#ifndef V8_PLATFORM_SEMAPHORE_H_
#define V8_PLATFORM_SEMAPHORE_H_
#include "src/lazy-instance.h"
#include "src/base/lazy-instance.h"
#if V8_OS_WIN
#include "src/win32-headers.h"
#include "src/base/win32-headers.h"
#endif
#if V8_OS_MACOSX
@ -90,10 +90,10 @@ struct CreateSemaphoreTrait {
template <int N>
struct LazySemaphore {
typedef typename LazyDynamicInstance<
typedef typename v8::base::LazyDynamicInstance<
Semaphore,
CreateSemaphoreTrait<N>,
ThreadSafeInitOnceTrait>::type type;
v8::base::ThreadSafeInitOnceTrait>::type type;
};
#define LAZY_SEMAPHORE_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER

View File

@ -13,12 +13,13 @@
#include <string.h>
#if V8_OS_WIN
#include "src/base/lazy-instance.h"
#include "src/base/win32-headers.h"
#endif
#include "src/checks.h"
#include "src/cpu.h"
#include "src/platform.h"
#if V8_OS_WIN
#include "src/win32-headers.h"
#endif
namespace v8 {
namespace internal {
@ -193,9 +194,9 @@ class Clock V8_FINAL {
};
static LazyStaticInstance<Clock,
DefaultConstructTrait<Clock>,
ThreadSafeInitOnceTrait>::type clock = LAZY_STATIC_INSTANCE_INITIALIZER;
static base::LazyStaticInstance<Clock, base::DefaultConstructTrait<Clock>,
base::ThreadSafeInitOnceTrait>::type clock =
LAZY_STATIC_INSTANCE_INITIALIZER;
Time Time::Now() {
@ -462,10 +463,11 @@ class RolloverProtectedTickClock V8_FINAL : public TickClock {
};
static LazyStaticInstance<RolloverProtectedTickClock,
DefaultConstructTrait<RolloverProtectedTickClock>,
ThreadSafeInitOnceTrait>::type tick_clock =
LAZY_STATIC_INSTANCE_INITIALIZER;
static base::LazyStaticInstance<
RolloverProtectedTickClock,
base::DefaultConstructTrait<RolloverProtectedTickClock>,
base::ThreadSafeInitOnceTrait>::type tick_clock =
LAZY_STATIC_INSTANCE_INITIALIZER;
struct CreateHighResTickClockTrait {
@ -489,9 +491,9 @@ struct CreateHighResTickClockTrait {
};
static LazyDynamicInstance<TickClock,
static base::LazyDynamicInstance<TickClock,
CreateHighResTickClockTrait,
ThreadSafeInitOnceTrait>::type high_res_tick_clock =
base::ThreadSafeInitOnceTrait>::type high_res_tick_clock =
LAZY_DYNAMIC_INSTANCE_INITIALIZER;

View File

@ -6,7 +6,6 @@
#define V8_RUNTIME_PROFILER_H_
#include "src/allocation.h"
#include "src/atomicops.h"
namespace v8 {
namespace internal {

View File

@ -38,7 +38,7 @@
#elif V8_OS_WIN || V8_OS_CYGWIN
#include "src/win32-headers.h"
#include "src/base/win32-headers.h"
#endif
@ -654,7 +654,7 @@ void Sampler::Stop() {
void Sampler::IncreaseProfilingDepth() {
NoBarrier_AtomicIncrement(&profiling_, 1);
base::NoBarrier_AtomicIncrement(&profiling_, 1);
#if defined(USE_SIGNALS)
SignalHandler::IncreaseSamplerCount();
#endif
@ -665,7 +665,7 @@ void Sampler::DecreaseProfilingDepth() {
#if defined(USE_SIGNALS)
SignalHandler::DecreaseSamplerCount();
#endif
NoBarrier_AtomicIncrement(&profiling_, -1);
base::NoBarrier_AtomicIncrement(&profiling_, -1);
}

View File

@ -5,7 +5,7 @@
#ifndef V8_SAMPLER_H_
#define V8_SAMPLER_H_
#include "src/atomicops.h"
#include "src/base/atomicops.h"
#include "src/frames.h"
#include "src/globals.h"
@ -74,20 +74,20 @@ class Sampler {
// Whether the sampling thread should use this Sampler for CPU profiling?
bool IsProfiling() const {
return NoBarrier_Load(&profiling_) > 0 &&
!NoBarrier_Load(&has_processing_thread_);
return base::NoBarrier_Load(&profiling_) > 0 &&
!base::NoBarrier_Load(&has_processing_thread_);
}
void IncreaseProfilingDepth();
void DecreaseProfilingDepth();
// Whether the sampler is running (that is, consumes resources).
bool IsActive() const { return NoBarrier_Load(&active_); }
bool IsActive() const { return base::NoBarrier_Load(&active_); }
void DoSample();
// If true next sample must be initiated on the profiler event processor
// thread right after latest sample is processed.
void SetHasProcessingThread(bool value) {
NoBarrier_Store(&has_processing_thread_, value);
base::NoBarrier_Store(&has_processing_thread_, value);
}
// Used in tests to make sure that stack sampling is performed.
@ -108,13 +108,13 @@ class Sampler {
virtual void Tick(TickSample* sample) = 0;
private:
void SetActive(bool value) { NoBarrier_Store(&active_, value); }
void SetActive(bool value) { base::NoBarrier_Store(&active_, value); }
Isolate* isolate_;
const int interval_;
Atomic32 profiling_;
Atomic32 has_processing_thread_;
Atomic32 active_;
base::Atomic32 profiling_;
base::Atomic32 has_processing_thread_;
base::Atomic32 active_;
PlatformData* data_; // Platform specific data.
bool is_counting_samples_;
// Counts stack samples taken in JS VM state.

View File

@ -2046,11 +2046,13 @@ void FreeListNode::set_next(FreeListNode* next) {
// stage.
if (map() == GetHeap()->raw_unchecked_free_space_map()) {
ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
NoBarrier_Store(reinterpret_cast<AtomicWord*>(address() + kNextOffset),
reinterpret_cast<AtomicWord>(next));
base::NoBarrier_Store(
reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
reinterpret_cast<base::AtomicWord>(next));
} else {
NoBarrier_Store(reinterpret_cast<AtomicWord*>(address() + kPointerSize),
reinterpret_cast<AtomicWord>(next));
base::NoBarrier_Store(
reinterpret_cast<base::AtomicWord*>(address() + kPointerSize),
reinterpret_cast<base::AtomicWord>(next));
}
}
@ -2071,7 +2073,7 @@ intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
category->end()->set_next(top());
}
set_top(category->top());
NoBarrier_Store(&top_, category->top_);
base::NoBarrier_Store(&top_, category->top_);
available_ += category->available();
category->Reset();
}

View File

@ -6,6 +6,7 @@
#define V8_SPACES_H_
#include "src/allocation.h"
#include "src/base/atomicops.h"
#include "src/hashmap.h"
#include "src/list.h"
#include "src/log.h"
@ -291,19 +292,19 @@ class MemoryChunk {
bool is_valid() { return address() != NULL; }
MemoryChunk* next_chunk() const {
return reinterpret_cast<MemoryChunk*>(Acquire_Load(&next_chunk_));
return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&next_chunk_));
}
MemoryChunk* prev_chunk() const {
return reinterpret_cast<MemoryChunk*>(Acquire_Load(&prev_chunk_));
return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&prev_chunk_));
}
void set_next_chunk(MemoryChunk* next) {
Release_Store(&next_chunk_, reinterpret_cast<AtomicWord>(next));
base::Release_Store(&next_chunk_, reinterpret_cast<base::AtomicWord>(next));
}
void set_prev_chunk(MemoryChunk* prev) {
Release_Store(&prev_chunk_, reinterpret_cast<AtomicWord>(prev));
base::Release_Store(&prev_chunk_, reinterpret_cast<base::AtomicWord>(prev));
}
Space* owner() const {
@ -461,18 +462,17 @@ class MemoryChunk {
ParallelSweepingState parallel_sweeping() {
return static_cast<ParallelSweepingState>(
Acquire_Load(&parallel_sweeping_));
base::Acquire_Load(&parallel_sweeping_));
}
void set_parallel_sweeping(ParallelSweepingState state) {
Release_Store(&parallel_sweeping_, state);
base::Release_Store(&parallel_sweeping_, state);
}
bool TryParallelSweeping() {
return Acquire_CompareAndSwap(&parallel_sweeping_,
PARALLEL_SWEEPING_PENDING,
PARALLEL_SWEEPING_IN_PROGRESS) ==
PARALLEL_SWEEPING_PENDING;
return base::Acquire_CompareAndSwap(
&parallel_sweeping_, PARALLEL_SWEEPING_PENDING,
PARALLEL_SWEEPING_IN_PROGRESS) == PARALLEL_SWEEPING_PENDING;
}
// Manage live byte count (count of bytes known to be live,
@ -707,7 +707,7 @@ class MemoryChunk {
// count highest number of bytes ever allocated on the page.
int high_water_mark_;
AtomicWord parallel_sweeping_;
base::AtomicWord parallel_sweeping_;
// PagedSpace free-list statistics.
intptr_t available_in_small_free_list_;
@ -726,9 +726,9 @@ class MemoryChunk {
private:
// next_chunk_ holds a pointer of type MemoryChunk
AtomicWord next_chunk_;
base::AtomicWord next_chunk_;
// prev_chunk_ holds a pointer of type MemoryChunk
AtomicWord prev_chunk_;
base::AtomicWord prev_chunk_;
friend class MemoryAllocator;
};
@ -1532,11 +1532,11 @@ class FreeListCategory {
void RepairFreeList(Heap* heap);
FreeListNode* top() const {
return reinterpret_cast<FreeListNode*>(NoBarrier_Load(&top_));
return reinterpret_cast<FreeListNode*>(base::NoBarrier_Load(&top_));
}
void set_top(FreeListNode* top) {
NoBarrier_Store(&top_, reinterpret_cast<AtomicWord>(top));
base::NoBarrier_Store(&top_, reinterpret_cast<base::AtomicWord>(top));
}
FreeListNode** GetEndAddress() { return &end_; }
@ -1560,7 +1560,7 @@ class FreeListCategory {
private:
// top_ points to the top FreeListNode* in the free list category.
AtomicWord top_;
base::AtomicWord top_;
FreeListNode* end_;
Mutex mutex_;

View File

@ -7,6 +7,8 @@
#include <algorithm>
#include "src/v8.h"
#include "src/base/atomicops.h"
#include "src/counters.h"
#include "src/store-buffer-inl.h"
@ -345,7 +347,7 @@ void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
// checks that pointers which satisfy predicate point into
// the active semispace.
Object* object = reinterpret_cast<Object*>(
NoBarrier_Load(reinterpret_cast<AtomicWord*>(slot)));
base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
heap_->InNewSpace(object);
slot_address += kPointerSize;
}
@ -382,7 +384,7 @@ void StoreBuffer::FindPointersToNewSpaceInRegion(
slot_address += kPointerSize) {
Object** slot = reinterpret_cast<Object**>(slot_address);
Object* object = reinterpret_cast<Object*>(
NoBarrier_Load(reinterpret_cast<AtomicWord*>(slot)));
base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
if (heap_->InNewSpace(object)) {
HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
ASSERT(heap_object->IsHeapObject());
@ -391,7 +393,7 @@ void StoreBuffer::FindPointersToNewSpaceInRegion(
if (clear_maps) ClearDeadObject(heap_object);
slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
object = reinterpret_cast<Object*>(
NoBarrier_Load(reinterpret_cast<AtomicWord*>(slot)));
base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
if (heap_->InNewSpace(object)) {
EnterDirectlyIntoStoreBuffer(slot_address);
}
@ -470,7 +472,7 @@ void StoreBuffer::IteratePointersInStoreBuffer(
#endif
Object** slot = reinterpret_cast<Object**>(*current);
Object* object = reinterpret_cast<Object*>(
NoBarrier_Load(reinterpret_cast<AtomicWord*>(slot)));
base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
if (heap_->InFromSpace(object)) {
HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
// The new space object was not promoted if it still contains a map
@ -478,7 +480,7 @@ void StoreBuffer::IteratePointersInStoreBuffer(
if (clear_maps) ClearDeadObject(heap_object);
slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
object = reinterpret_cast<Object*>(
NoBarrier_Load(reinterpret_cast<AtomicWord*>(slot)));
base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
if (heap_->InNewSpace(object)) {
EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
}

View File

@ -23,7 +23,7 @@ SweeperThread::SweeperThread(Isolate* isolate)
end_sweeping_semaphore_(0),
stop_semaphore_(0) {
ASSERT(!FLAG_job_based_sweeping);
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
base::NoBarrier_Store(&stop_thread_, static_cast<base::AtomicWord>(false));
}
@ -36,7 +36,7 @@ void SweeperThread::Run() {
while (true) {
start_sweeping_semaphore_.Wait();
if (Acquire_Load(&stop_thread_)) {
if (base::Acquire_Load(&stop_thread_)) {
stop_semaphore_.Signal();
return;
}
@ -49,7 +49,7 @@ void SweeperThread::Run() {
void SweeperThread::Stop() {
Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(true));
start_sweeping_semaphore_.Signal();
stop_semaphore_.Wait();
Join();

View File

@ -5,7 +5,7 @@
#ifndef V8_SWEEPER_THREAD_H_
#define V8_SWEEPER_THREAD_H_
#include "src/atomicops.h"
#include "src/base/atomicops.h"
#include "src/flags.h"
#include "src/platform.h"
#include "src/utils.h"
@ -37,7 +37,7 @@ class SweeperThread : public Thread {
Semaphore start_sweeping_semaphore_;
Semaphore end_sweeping_semaphore_;
Semaphore stop_semaphore_;
volatile AtomicWord stop_thread_;
volatile base::AtomicWord stop_thread_;
};
} } // namespace v8::internal

View File

@ -7,8 +7,6 @@
#include "src/unbound-queue.h"
#include "src/atomicops.h"
namespace v8 {
namespace internal {
@ -26,7 +24,7 @@ struct UnboundQueue<Record>::Node: public Malloced {
template<typename Record>
UnboundQueue<Record>::UnboundQueue() {
first_ = new Node(Record());
divider_ = last_ = reinterpret_cast<AtomicWord>(first_);
divider_ = last_ = reinterpret_cast<base::AtomicWord>(first_);
}
@ -46,10 +44,10 @@ void UnboundQueue<Record>::DeleteFirst() {
template<typename Record>
bool UnboundQueue<Record>::Dequeue(Record* rec) {
if (divider_ == Acquire_Load(&last_)) return false;
if (divider_ == base::Acquire_Load(&last_)) return false;
Node* next = reinterpret_cast<Node*>(divider_)->next;
*rec = next->value;
Release_Store(&divider_, reinterpret_cast<AtomicWord>(next));
base::Release_Store(&divider_, reinterpret_cast<base::AtomicWord>(next));
return true;
}
@ -58,9 +56,9 @@ template<typename Record>
void UnboundQueue<Record>::Enqueue(const Record& rec) {
Node*& next = reinterpret_cast<Node*>(last_)->next;
next = new Node(rec);
Release_Store(&last_, reinterpret_cast<AtomicWord>(next));
base::Release_Store(&last_, reinterpret_cast<base::AtomicWord>(next));
while (first_ != reinterpret_cast<Node*>(Acquire_Load(&divider_))) {
while (first_ != reinterpret_cast<Node*>(base::Acquire_Load(&divider_))) {
DeleteFirst();
}
}
@ -68,13 +66,13 @@ void UnboundQueue<Record>::Enqueue(const Record& rec) {
template<typename Record>
bool UnboundQueue<Record>::IsEmpty() const {
return NoBarrier_Load(&divider_) == NoBarrier_Load(&last_);
return base::NoBarrier_Load(&divider_) == base::NoBarrier_Load(&last_);
}
template<typename Record>
Record* UnboundQueue<Record>::Peek() const {
if (divider_ == Acquire_Load(&last_)) return NULL;
if (divider_ == base::Acquire_Load(&last_)) return NULL;
Node* next = reinterpret_cast<Node*>(divider_)->next;
return &next->value;
}

View File

@ -6,6 +6,7 @@
#define V8_UNBOUND_QUEUE_
#include "src/allocation.h"
#include "src/base/atomicops.h"
namespace v8 {
namespace internal {
@ -34,8 +35,8 @@ class UnboundQueue BASE_EMBEDDED {
struct Node;
Node* first_;
AtomicWord divider_; // Node*
AtomicWord last_; // Node*
base::AtomicWord divider_; // Node*
base::AtomicWord last_; // Node*
DISALLOW_COPY_AND_ASSIGN(UnboundQueue);
};

View File

@ -5,6 +5,7 @@
#include "src/v8.h"
#include "src/assembler.h"
#include "src/base/once.h"
#include "src/isolate.h"
#include "src/elements.h"
#include "src/bootstrapper.h"
@ -18,7 +19,6 @@
#endif
#include "src/lithium-allocator.h"
#include "src/objects.h"
#include "src/once.h"
#include "src/platform.h"
#include "src/sampler.h"
#include "src/runtime-profiler.h"
@ -111,7 +111,7 @@ void V8::InitializeOncePerProcessImpl() {
void V8::InitializeOncePerProcess() {
CallOnce(&init_once, &InitializeOncePerProcessImpl);
base::CallOnce(&init_once, &InitializeOncePerProcessImpl);
}

View File

@ -8,7 +8,7 @@
#include "include/v8.h"
#if V8_OS_WIN
#include "src/win32-headers.h"
#include "src/base/win32-headers.h"
extern "C" {
BOOL WINAPI DllMain(HANDLE hinstDLL,

View File

@ -8,7 +8,7 @@
// (http://www.opengroup.org/onlinepubs/000095399/)
#if defined(_MSC_VER) && (_MSC_VER < 1800)
#include "src/win32-headers.h"
#include "src/base/win32-headers.h"
#include <limits.h> // Required for INT_MAX etc.
#include <float.h> // Required for DBL_MAX and on Win32 for finite()
#include <cmath>

View File

@ -10,8 +10,8 @@
#if V8_TARGET_ARCH_X64
#include "src/base/lazy-instance.h"
#include "src/disasm.h"
#include "src/lazy-instance.h"
namespace disasm {
@ -248,7 +248,7 @@ void InstructionTable::AddJumpConditionalShort() {
}
static v8::internal::LazyInstance<InstructionTable>::type instruction_table =
static v8::base::LazyInstance<InstructionTable>::type instruction_table =
LAZY_INSTANCE_INITIALIZER;

View File

@ -27,9 +27,10 @@
#include "src/v8.h"
#include "src/atomicops.h"
#include "src/base/atomicops.h"
#include "test/cctest/cctest.h"
using namespace v8::base;
using namespace v8::internal;

View File

@ -35,7 +35,7 @@ using i::SamplingCircularQueue;
TEST(SamplingCircularQueue) {
typedef i::AtomicWord Record;
typedef v8::base::AtomicWord Record;
const int kMaxRecordsInQueue = 4;
SamplingCircularQueue<Record, kMaxRecordsInQueue> scq;
@ -99,7 +99,7 @@ TEST(SamplingCircularQueue) {
namespace {
typedef i::AtomicWord Record;
typedef v8::base::AtomicWord Record;
typedef SamplingCircularQueue<Record, 12> TestSampleQueue;
class ProducerThread: public i::Thread {

View File

@ -31,8 +31,8 @@
#include "src/v8.h"
#include "src/base/win32-headers.h"
#include "src/platform.h"
#include "src/win32-headers.h"
#include "test/cctest/cctest.h"
using namespace ::v8::internal;

View File

@ -33,7 +33,7 @@
#include "test/cctest/cctest.h"
#if V8_OS_WIN
#include "src/win32-headers.h"
#include "src/base/win32-headers.h"
#endif
using namespace v8::internal;

View File

@ -265,8 +265,6 @@
'../../src/assert-scope.cc',
'../../src/ast.cc',
'../../src/ast.h',
'../../src/atomicops.h',
'../../src/atomicops_internals_x86_gcc.cc',
'../../src/bignum-dtoa.cc',
'../../src/bignum-dtoa.h',
'../../src/bignum.cc',
@ -448,7 +446,6 @@
'../../src/jsregexp-inl.h',
'../../src/jsregexp.cc',
'../../src/jsregexp.h',
'../../src/lazy-instance.h',
# TODO(jochen): move libplatform/ files to their own target.
'../../src/libplatform/default-platform.cc',
'../../src/libplatform/default-platform.h',
@ -486,8 +483,6 @@
'../../src/objects-visiting.h',
'../../src/objects.cc',
'../../src/objects.h',
'../../src/once.cc',
'../../src/once.h',
'../../src/optimizing-compiler-thread.h',
'../../src/optimizing-compiler-thread.cc',
'../../src/parser.cc',
@ -1057,8 +1052,7 @@
},
{
'target_name': 'v8_libbase.<(v8_target_arch)',
# TODO(jochen): Should be a static library once it has sources in it.
'type': 'none',
'type': 'static_library',
'variables': {
'optimize': 'max',
},
@ -1066,8 +1060,22 @@
'../..',
],
'sources': [
'../../src/base/atomicops.h',
'../../src/base/atomicops_internals_arm64_gcc.h',
'../../src/base/atomicops_internals_arm_gcc.h',
'../../src/base/atomicops_internals_atomicword_compat.h',
'../../src/base/atomicops_internals_mac.h',
'../../src/base/atomicops_internals_mips_gcc.h',
'../../src/base/atomicops_internals_tsan.h',
'../../src/base/atomicops_internals_x86_gcc.cc',
'../../src/base/atomicops_internals_x86_gcc.h',
'../../src/base/atomicops_internals_x86_msvc.h',
'../../src/base/build_config.h',
'../../src/base/lazy-instance.h',
'../../src/base/macros.h',
'../../src/base/once.cc',
'../../src/base/once.h',
'../../src/base/win32-headers.h',
],
'conditions': [
['want_separate_host_toolset==1', {