Reland Update implementation of atomics with latest Chromium version but use compiler builtin atomics

BUG=

Review-Url: https://chromiumcodereview.appspot.com/2438273002
Cr-Commit-Position: refs/heads/master@{#40496}
This commit is contained in:
hpayer 2016-10-21 03:05:32 -07:00 committed by Commit bot
parent bac992a114
commit 726becfb49
17 changed files with 196 additions and 2484 deletions

View File

@ -2192,16 +2192,8 @@ v8_component("v8_libbase") {
"src/base/adapters.h",
"src/base/atomic-utils.h",
"src/base/atomicops.h",
"src/base/atomicops_internals_arm64_gcc.h",
"src/base/atomicops_internals_arm_gcc.h",
"src/base/atomicops_internals_atomicword_compat.h",
"src/base/atomicops_internals_mac.h",
"src/base/atomicops_internals_mips64_gcc.h",
"src/base/atomicops_internals_mips_gcc.h",
"src/base/atomicops_internals_s390_gcc.h",
"src/base/atomicops_internals_tsan.h",
"src/base/atomicops_internals_x86_gcc.cc",
"src/base/atomicops_internals_x86_gcc.h",
"src/base/atomicops_internals_portable.h",
"src/base/atomicops_internals_x86_msvc.h",
"src/base/base-export.h",
"src/base/bits.cc",

View File

@ -26,9 +26,17 @@
#define V8_BASE_ATOMICOPS_H_
#include <stdint.h>
// Small C++ header which defines implementation specific macros used to
// identify the STL implementation.
// - libc++: captures __config for _LIBCPP_VERSION
// - libstdc++: captures bits/c++config.h for __GLIBCXX__
#include <cstddef>
#include "src/base/base-export.h"
#include "src/base/build_config.h"
#if defined(_WIN32) && defined(V8_HOST_ARCH_64_BIT)
#if defined(V8_OS_WIN) && defined(V8_HOST_ARCH_64_BIT)
// windows.h #defines this (only on x64). This causes problems because the
// public API also uses MemoryBarrier at the public name for this fence. So, on
// X64, undef it, and call its documented
@ -100,13 +108,11 @@ Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
void MemoryBarrier();
void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value);
void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
void Release_Store(volatile Atomic32* ptr, Atomic32 value);
Atomic8 NoBarrier_Load(volatile const Atomic8* ptr);
Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
Atomic32 Acquire_Load(volatile const Atomic32* ptr);
Atomic32 Release_Load(volatile const Atomic32* ptr);
// 64-bit atomic operations (only available on 64-bit processors).
#ifdef V8_HOST_ARCH_64_BIT
@ -124,44 +130,25 @@ Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value);
void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
void Release_Store(volatile Atomic64* ptr, Atomic64 value);
Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
Atomic64 Acquire_Load(volatile const Atomic64* ptr);
Atomic64 Release_Load(volatile const Atomic64* ptr);
#endif // V8_HOST_ARCH_64_BIT
} // namespace base
} // namespace v8
// Include our platform specific implementation.
#if defined(THREAD_SANITIZER)
#include "src/base/atomicops_internals_tsan.h"
#elif defined(_MSC_VER) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#if defined(V8_OS_WIN)
// TODO(hpayer): The MSVC header includes windows.h, which other files end up
// relying on. Fix this as part of crbug.com/559247.
#include "src/base/atomicops_internals_x86_msvc.h"
#elif defined(__APPLE__)
#include "src/base/atomicops_internals_mac.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_ARM64
#include "src/base/atomicops_internals_arm64_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_ARM
#include "src/base/atomicops_internals_arm_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_PPC
#include "src/base/atomicops_internals_ppc_gcc.h"
#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "src/base/atomicops_internals_x86_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS
#include "src/base/atomicops_internals_mips_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS64
#include "src/base/atomicops_internals_mips64_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_S390
#include "src/base/atomicops_internals_s390_gcc.h"
#else
#error "Atomic operations are not supported on your platform"
#include "src/base/atomicops_internals_portable.h"
#endif
// On some platforms we need additional declarations to make
// AtomicWord compatible with our other Atomic* types.
#if defined(__APPLE__) || defined(__OpenBSD__) || defined(V8_OS_AIX)
#if defined(V8_OS_MACOSX) || defined(V8_OS_OPENBSD) || defined(V8_OS_AIX)
#include "src/base/atomicops_internals_atomicword_compat.h"
#endif

View File

@ -1,317 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
#define V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
namespace v8 {
namespace base {
inline void MemoryBarrier() {
__asm__ __volatile__ ("dmb ish" ::: "memory"); // NOLINT
}
// NoBarrier versions of the operation include "memory" in the clobber list.
// This is not required for direct usage of the NoBarrier versions of the
// operations. However this is required for correctness when they are used as
// part of the Acquire or Release versions, to ensure that nothing from outside
// the call is reordered between the operation and the memory barrier. This does
// not change the code generated, so has no or minimal impact on the
// NoBarrier operations.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
"cmp %w[prev], %w[old_value] \n\t"
"bne 1f \n\t"
"stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
"cbnz %w[temp], 0b \n\t" // Retry if it did not work.
"1: \n\t"
: [prev]"=&r" (prev),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
: [old_value]"IJr" (old_value),
[new_value]"r" (new_value)
: "cc", "memory"
); // NOLINT
return prev;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 result;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %w[result], %[ptr] \n\t" // Load the previous value.
"stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
"cbnz %w[temp], 0b \n\t" // Retry if it did not work.
: [result]"=&r" (result),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
: [new_value]"r" (new_value)
: "memory"
); // NOLINT
return result;
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 result;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %w[result], %[ptr] \n\t" // Load the previous value.
"add %w[result], %w[result], %w[increment]\n\t"
"stxr %w[temp], %w[result], %[ptr] \n\t" // Try to store the result.
"cbnz %w[temp], 0b \n\t" // Retry on failure.
: [result]"=&r" (result),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
: [increment]"IJr" (increment)
: "memory"
); // NOLINT
return result;
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 result;
MemoryBarrier();
result = NoBarrier_AtomicIncrement(ptr, increment);
MemoryBarrier();
return result;
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev;
prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrier();
return prev;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev;
MemoryBarrier();
prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
return prev;
}
inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
*ptr = value;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
__asm__ __volatile__ ( // NOLINT
"stlr %w[value], %[ptr] \n\t"
: [ptr]"=Q" (*ptr)
: [value]"r" (value)
: "memory"
); // NOLINT
}
inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
return *ptr;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value;
__asm__ __volatile__ ( // NOLINT
"ldar %w[value], %[ptr] \n\t"
: [value]"=r" (value)
: [ptr]"Q" (*ptr)
: "memory"
); // NOLINT
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
// 64-bit versions of the operations.
// See the 32-bit versions for comments.
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %[prev], %[ptr] \n\t"
"cmp %[prev], %[old_value] \n\t"
"bne 1f \n\t"
"stxr %w[temp], %[new_value], %[ptr] \n\t"
"cbnz %w[temp], 0b \n\t"
"1: \n\t"
: [prev]"=&r" (prev),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
: [old_value]"IJr" (old_value),
[new_value]"r" (new_value)
: "cc", "memory"
); // NOLINT
return prev;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
Atomic64 result;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %[result], %[ptr] \n\t"
"stxr %w[temp], %[new_value], %[ptr] \n\t"
"cbnz %w[temp], 0b \n\t"
: [result]"=&r" (result),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
: [new_value]"r" (new_value)
: "memory"
); // NOLINT
return result;
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
Atomic64 result;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %[result], %[ptr] \n\t"
"add %[result], %[result], %[increment] \n\t"
"stxr %w[temp], %[result], %[ptr] \n\t"
"cbnz %w[temp], 0b \n\t"
: [result]"=&r" (result),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
: [increment]"IJr" (increment)
: "memory"
); // NOLINT
return result;
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
Atomic64 result;
MemoryBarrier();
result = NoBarrier_AtomicIncrement(ptr, increment);
MemoryBarrier();
return result;
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev;
prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrier();
return prev;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev;
MemoryBarrier();
prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
return prev;
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
__asm__ __volatile__ ( // NOLINT
"stlr %x[value], %[ptr] \n\t"
: [ptr]"=Q" (*ptr)
: [value]"r" (value)
: "memory"
); // NOLINT
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value;
__asm__ __volatile__ ( // NOLINT
"ldar %x[value], %[ptr] \n\t"
: [value]"=r" (value)
: [ptr]"Q" (*ptr)
: "memory"
); // NOLINT
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
} // namespace base
} // namespace v8
#endif // V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_

View File

@ -1,304 +0,0 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is an internal atomic implementation, use atomicops.h instead.
//
// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
#ifndef V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
#define V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
#if defined(__QNXNTO__)
#include <sys/cpuinline.h>
#endif
namespace v8 {
namespace base {
// Memory barriers on ARM are funky, but the kernel is here to help:
//
// * ARMv5 didn't support SMP, there is no memory barrier instruction at
// all on this architecture, or when targeting its machine code.
//
// * Some ARMv6 CPUs support SMP. A full memory barrier can be produced by
// writing a random value to a very specific coprocessor register.
//
// * On ARMv7, the "dmb" instruction is used to perform a full memory
// barrier (though writing to the co-processor will still work).
// However, on single core devices (e.g. Nexus One, or Nexus S),
// this instruction will take up to 200 ns, which is huge, even though
// it's completely un-needed on these devices.
//
// * There is no easy way to determine at runtime if the device is
// single or multi-core. However, the kernel provides a useful helper
// function at a fixed memory address (0xffff0fa0), which will always
// perform a memory barrier in the most efficient way. I.e. on single
// core devices, this is an empty function that exits immediately.
// On multi-core devices, it implements a full memory barrier.
//
// * This source could be compiled to ARMv5 machine code that runs on a
// multi-core ARMv6 or ARMv7 device. In this case, memory barriers
// are needed for correct execution. Always call the kernel helper, even
// when targeting ARMv5TE.
//
inline void MemoryBarrier() {
#if defined(__ANDROID__)
// Note: This is a function call, which is also an implicit compiler barrier.
typedef void (*KernelMemoryBarrierFunc)();
((KernelMemoryBarrierFunc)0xffff0fa0)();
#elif defined(__QNXNTO__)
__cpu_membarrier();
#else
// Fallback to GCC built-in function
__sync_synchronize();
#endif
}
// An ARM toolchain would only define one of these depending on which
// variant of the target architecture is being used. This tests against
// any known ARMv6 or ARMv7 variant, where it is possible to directly
// use ldrex/strex instructions to implement fast atomic operations.
#if defined(__ARM_ARCH_8A__) || \
defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || \
defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \
defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__)
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
int reloop;
do {
// The following is equivalent to:
//
// prev_value = LDREX(ptr)
// reloop = 0
// if (prev_value != old_value)
// reloop = STREX(ptr, new_value)
__asm__ __volatile__(" ldrex %0, [%3]\n"
" mov %1, #0\n"
" cmp %0, %4\n"
#ifdef __thumb2__
" it eq\n"
#endif
" strexeq %1, %5, [%3]\n"
: "=&r"(prev_value), "=&r"(reloop), "+m"(*ptr)
: "r"(ptr), "r"(old_value), "r"(new_value)
: "cc", "memory");
} while (reloop != 0);
return prev_value;
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 result = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrier();
return result;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
MemoryBarrier();
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 value;
int reloop;
do {
// Equivalent to:
//
// value = LDREX(ptr)
// value += increment
// reloop = STREX(ptr, value)
//
__asm__ __volatile__(" ldrex %0, [%3]\n"
" add %0, %0, %4\n"
" strex %1, %0, [%3]\n"
: "=&r"(value), "=&r"(reloop), "+m"(*ptr)
: "r"(ptr), "r"(increment)
: "cc", "memory");
} while (reloop);
return value;
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
// TODO(digit): Investigate if it's possible to implement this with
// a single MemoryBarrier() operation between the LDREX and STREX.
// See http://crbug.com/246514
MemoryBarrier();
Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
MemoryBarrier();
return result;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 old_value;
int reloop;
do {
// old_value = LDREX(ptr)
// reloop = STREX(ptr, new_value)
__asm__ __volatile__(" ldrex %0, [%3]\n"
" strex %1, %4, [%3]\n"
: "=&r"(old_value), "=&r"(reloop), "+m"(*ptr)
: "r"(ptr), "r"(new_value)
: "cc", "memory");
} while (reloop != 0);
return old_value;
}
// This tests against any known ARMv5 variant.
#elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) || \
defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__)
// The kernel also provides a helper function to perform an atomic
// compare-and-swap operation at the hard-wired address 0xffff0fc0.
// On ARMv5, this is implemented by a special code path that the kernel
// detects and treats specially when thread pre-emption happens.
// On ARMv6 and higher, it uses LDREX/STREX instructions instead.
//
// Note that this always perform a full memory barrier, there is no
// need to add calls MemoryBarrier() before or after it. It also
// returns 0 on success, and 1 on exit.
//
// Available and reliable since Linux 2.6.24. Both Android and ChromeOS
// use newer kernel revisions, so this should not be a concern.
namespace {
inline int LinuxKernelCmpxchg(Atomic32 old_value,
Atomic32 new_value,
volatile Atomic32* ptr) {
typedef int (*KernelCmpxchgFunc)(Atomic32, Atomic32, volatile Atomic32*);
return ((KernelCmpxchgFunc)0xffff0fc0)(old_value, new_value, ptr);
}
} // namespace
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
for (;;) {
prev_value = *ptr;
if (prev_value != old_value)
return prev_value;
if (!LinuxKernelCmpxchg(old_value, new_value, ptr))
return old_value;
}
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (LinuxKernelCmpxchg(old_value, new_value, ptr));
return old_value;
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return Barrier_AtomicIncrement(ptr, increment);
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
for (;;) {
// Atomic exchange the old value with an incremented one.
Atomic32 old_value = *ptr;
Atomic32 new_value = old_value + increment;
if (!LinuxKernelCmpxchg(old_value, new_value, ptr)) {
// The exchange took place as expected.
return new_value;
}
// Otherwise, *ptr changed mid-loop and we need to retry.
}
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
for (;;) {
prev_value = *ptr;
if (prev_value != old_value) {
// Always ensure acquire semantics.
MemoryBarrier();
return prev_value;
}
if (!LinuxKernelCmpxchg(old_value, new_value, ptr))
return old_value;
}
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
// This could be implemented as:
// MemoryBarrier();
// return NoBarrier_CompareAndSwap();
//
// But would use 3 barriers per succesful CAS. To save performance,
// use Acquire_CompareAndSwap(). Its implementation guarantees that:
// - A succesful swap uses only 2 barriers (in the kernel helper).
// - An early return due to (prev_value != old_value) performs
// a memory barrier with no store, which is equivalent to the
// generic implementation above.
return Acquire_CompareAndSwap(ptr, old_value, new_value);
}
#else
# error "Your CPU's ARM architecture is not supported yet"
#endif
// NOTE: Atomicity of the following load and store operations is only
// guaranteed in case of 32-bit alignement of |ptr| values.
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; }
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
// Byte accessors.
inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
*ptr = value;
}
inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; }
} // namespace base
} // namespace v8
#endif // V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_

View File

@ -67,11 +67,6 @@ inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
reinterpret_cast<volatile Atomic32*>(ptr), value);
}
inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
return v8::base::Acquire_Store(
reinterpret_cast<volatile Atomic32*>(ptr), value);
}
inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
return v8::base::Release_Store(
reinterpret_cast<volatile Atomic32*>(ptr), value);
@ -87,11 +82,6 @@ inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
reinterpret_cast<volatile const Atomic32*>(ptr));
}
inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
return v8::base::Release_Load(
reinterpret_cast<volatile const Atomic32*>(ptr));
}
} // namespace base
} // namespace v8

View File

@ -1,216 +0,0 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
#define V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
#include <libkern/OSAtomic.h>
namespace v8 {
namespace base {
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
inline void MemoryBarrier() { OSMemoryBarrier(); }
inline void AcquireMemoryBarrier() {
// On x86 processors, loads already have acquire semantics, so
// there is no need to put a full barrier here.
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
ATOMICOPS_COMPILER_BARRIER();
#else
MemoryBarrier();
#endif
}
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
do {
if (OSAtomicCompareAndSwap32(old_value, new_value,
const_cast<Atomic32*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap32(old_value, new_value,
const_cast<Atomic32*>(ptr)));
return old_value;
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
do {
if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
const_cast<Atomic32*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return Acquire_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
*ptr = value;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
return *ptr;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
AcquireMemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
#ifdef __LP64__
// 64-bit implementation on 64-bit platform
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value;
do {
if (OSAtomicCompareAndSwap64(old_value, new_value,
reinterpret_cast<volatile int64_t*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
Atomic64 old_value;
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap64(old_value, new_value,
reinterpret_cast<volatile int64_t*>(ptr)));
return old_value;
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr));
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return OSAtomicAdd64Barrier(increment,
reinterpret_cast<volatile int64_t*>(ptr));
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value;
do {
if (OSAtomicCompareAndSwap64Barrier(
old_value, new_value, reinterpret_cast<volatile int64_t*>(ptr))) {
return old_value;
}
prev_value = *ptr;
} while (prev_value == old_value);
return prev_value;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
// The lib kern interface does not distinguish between
// Acquire and Release memory barriers; they are equivalent.
return Acquire_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
AcquireMemoryBarrier();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
#endif // defined(__LP64__)
#undef ATOMICOPS_COMPILER_BARRIER
} // namespace base
} // namespace v8
#endif // V8_BASE_ATOMICOPS_INTERNALS_MAC_H_

View File

@ -1,310 +0,0 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
#define V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
namespace v8 {
namespace base {
// Atomically execute:
// result = *ptr;
// if (*ptr == old_value)
// *ptr = new_value;
// return result;
//
// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
// Always return the old value of "*ptr"
//
// This routine implies no memory barriers.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev, tmp;
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
"ll %0, %5\n" // prev = *ptr
"bne %0, %3, 2f\n" // if (prev != old_value) goto 2
"move %2, %4\n" // tmp = new_value
"sc %2, %1\n" // *ptr = tmp (with atomic check)
"beqz %2, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
"2:\n"
".set pop\n"
: "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
: "r" (old_value), "r" (new_value), "m" (*ptr)
: "memory");
return prev;
}
// Atomically store new_value into *ptr, returning the previous value held in
// *ptr. This routine implies no memory barriers.
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 temp, old;
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
"ll %1, %2\n" // old = *ptr
"move %0, %3\n" // temp = new_value
"sc %0, %2\n" // *ptr = temp (with atomic check)
"beqz %0, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
".set pop\n"
: "=&r" (temp), "=&r" (old), "=m" (*ptr)
: "r" (new_value), "m" (*ptr)
: "memory");
return old;
}
// Atomically increment *ptr by "increment". Returns the new value of
// *ptr with the increment applied. This routine implies no memory barriers.
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 temp, temp2;
__asm__ __volatile__(
".set push\n"
".set noreorder\n"
"1:\n"
"ll %0, %2\n" // temp = *ptr
"addu %1, %0, %3\n" // temp2 = temp + increment
"sc %1, %2\n" // *ptr = temp2 (with atomic check)
"beqz %1, 1b\n" // start again on atomic error
"addu %1, %0, %3\n" // temp2 = temp + increment
".set pop\n"
: "=&r"(temp), "=&r"(temp2), "=ZC"(*ptr)
: "Ir"(increment), "m"(*ptr)
: "memory");
// temp2 now holds the final value.
return temp2;
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
MemoryBarrier();
Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
MemoryBarrier();
return res;
}
// "Acquire" operations
// ensure that no later memory access can be reordered ahead of the operation.
// "Release" operations ensure that no previous memory access can be reordered
// after the operation. "Barrier" operations have both "Acquire" and "Release"
// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
// access.
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrier();
return res;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
MemoryBarrier();
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
*ptr = value;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void MemoryBarrier() {
__asm__ __volatile__("sync" : : : "memory");
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
return *ptr;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
// 64-bit versions of the atomic ops.
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev, tmp;
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
"lld %0, %5\n" // prev = *ptr
"bne %0, %3, 2f\n" // if (prev != old_value) goto 2
"move %2, %4\n" // tmp = new_value
"scd %2, %1\n" // *ptr = tmp (with atomic check)
"beqz %2, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
"2:\n"
".set pop\n"
: "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
: "r" (old_value), "r" (new_value), "m" (*ptr)
: "memory");
return prev;
}
// Atomically store new_value into *ptr, returning the previous value held in
// *ptr. This routine implies no memory barriers.
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
Atomic64 temp, old;
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
"lld %1, %2\n" // old = *ptr
"move %0, %3\n" // temp = new_value
"scd %0, %2\n" // *ptr = temp (with atomic check)
"beqz %0, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
".set pop\n"
: "=&r" (temp), "=&r" (old), "=m" (*ptr)
: "r" (new_value), "m" (*ptr)
: "memory");
return old;
}
// Atomically increment *ptr by "increment". Returns the new value of
// *ptr with the increment applied. This routine implies no memory barriers.
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
Atomic64 temp, temp2;
__asm__ __volatile__(
".set push\n"
".set noreorder\n"
"1:\n"
"lld %0, %2\n" // temp = *ptr
"daddu %1, %0, %3\n" // temp2 = temp + increment
"scd %1, %2\n" // *ptr = temp2 (with atomic check)
"beqz %1, 1b\n" // start again on atomic error
"daddu %1, %0, %3\n" // temp2 = temp + increment
".set pop\n"
: "=&r"(temp), "=&r"(temp2), "=ZC"(*ptr)
: "Ir"(increment), "m"(*ptr)
: "memory");
// temp2 now holds the final value.
return temp2;
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
MemoryBarrier();
Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment);
MemoryBarrier();
return res;
}
// "Acquire" operations
// ensure that no later memory access can be reordered ahead of the operation.
// "Release" operations ensure that no previous memory access can be reordered
// after the operation. "Barrier" operations have both "Acquire" and "Release"
// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
// access.
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrier();
return res;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
MemoryBarrier();
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
} // namespace base
} // namespace v8
#endif // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_

View File

@ -1,161 +0,0 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
#define V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
namespace v8 {
namespace base {
// Atomically execute:
// result = *ptr;
// if (*ptr == old_value)
// *ptr = new_value;
// return result;
//
// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
// Always return the old value of "*ptr"
//
// This routine implies no memory barriers.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev, tmp;
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
"ll %0, 0(%4)\n" // prev = *ptr
"bne %0, %2, 2f\n" // if (prev != old_value) goto 2
"move %1, %3\n" // tmp = new_value
"sc %1, 0(%4)\n" // *ptr = tmp (with atomic check)
"beqz %1, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
"2:\n"
".set pop\n"
: "=&r" (prev), "=&r" (tmp)
: "r" (old_value), "r" (new_value), "r" (ptr)
: "memory");
return prev;
}
// Atomically store new_value into *ptr, returning the previous value held in
// *ptr. This routine implies no memory barriers.
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 temp, old;
__asm__ __volatile__(".set push\n"
".set noreorder\n"
".set at\n"
"1:\n"
"ll %1, 0(%3)\n" // old = *ptr
"move %0, %2\n" // temp = new_value
"sc %0, 0(%3)\n" // *ptr = temp (with atomic check)
"beqz %0, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
".set pop\n"
: "=&r" (temp), "=&r" (old)
: "r" (new_value), "r" (ptr)
: "memory");
return old;
}
// Atomically increment *ptr by "increment". Returns the new value of
// *ptr with the increment applied. This routine implies no memory barriers.
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 temp, temp2;
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
"ll %0, 0(%3)\n" // temp = *ptr
"addu %1, %0, %2\n" // temp2 = temp + increment
"sc %1, 0(%3)\n" // *ptr = temp2 (with atomic check)
"beqz %1, 1b\n" // start again on atomic error
"addu %1, %0, %2\n" // temp2 = temp + increment
".set pop\n"
: "=&r" (temp), "=&r" (temp2)
: "Ir" (increment), "r" (ptr)
: "memory");
// temp2 now holds the final value.
return temp2;
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
MemoryBarrier();
Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
MemoryBarrier();
return res;
}
// "Acquire" operations
// ensure that no later memory access can be reordered ahead of the operation.
// "Release" operations ensure that no previous memory access can be reordered
// after the operation. "Barrier" operations have both "Acquire" and "Release"
// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
// access.
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrier();
return res;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
MemoryBarrier();
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
*ptr = value;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void MemoryBarrier() {
__asm__ __volatile__("sync" : : : "memory");
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
return *ptr;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
} // namespace base
} // namespace v8
#endif // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_

View File

@ -0,0 +1,172 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is an internal atomic implementation, use atomicops.h instead.
//
// This implementation uses C++11 atomics' member functions. The code base is
// currently written assuming atomicity revolves around accesses instead of
// C++11's memory locations. The burden is on the programmer to ensure that all
// memory locations accessed atomically are never accessed non-atomically (tsan
// should help with this).
//
// Of note in this implementation:
// * All NoBarrier variants are implemented as relaxed.
// * All Barrier variants are implemented as sequentially-consistent.
// * Compare exchange's failure ordering is always the same as the success one
// (except for release, which fails as relaxed): using a weaker ordering is
// only valid under certain uses of compare exchange.
// * Acquire store doesn't exist in the C11 memory model, it is instead
// implemented as a relaxed store followed by a sequentially consistent
// fence.
// * Release load doesn't exist in the C11 memory model, it is instead
// implemented as sequentially consistent fence followed by a relaxed load.
// * Atomic increment is expected to return the post-incremented value, whereas
// C11 fetch add returns the previous value. The implementation therefore
// needs to increment twice (which the compiler should be able to detect and
// optimize).
#ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
#define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
#include <atomic>
#include "src/base/build_config.h"
namespace v8 {
namespace base {
// This implementation is transitional and maintains the original API for
// atomicops.h.
inline void MemoryBarrier() {
#if defined(__GLIBCXX__)
// Work around libstdc++ bug 51038 where atomic_thread_fence was declared but
// not defined, leading to the linker complaining about undefined references.
__atomic_thread_fence(std::memory_order_seq_cst);
#else
std::atomic_thread_fence(std::memory_order_seq_cst);
#endif
}
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
__atomic_compare_exchange_n(ptr, &old_value, new_value, false,
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
return old_value;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST);
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value, Atomic32 new_value) {
__atomic_compare_exchange_n(ptr, &old_value, new_value, false,
__ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
return old_value;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value, Atomic32 new_value) {
__atomic_compare_exchange_n(ptr, &old_value, new_value, false,
__ATOMIC_RELEASE, __ATOMIC_RELAXED);
return old_value;
}
inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELAXED);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELAXED);
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELEASE);
}
inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
return __atomic_load_n(ptr, __ATOMIC_RELAXED);
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return __atomic_load_n(ptr, __ATOMIC_RELAXED);
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
}
#if defined(V8_HOST_ARCH_64_BIT)
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
__atomic_compare_exchange_n(ptr, &old_value, new_value, false,
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
return old_value;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST);
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value, Atomic64 new_value) {
__atomic_compare_exchange_n(ptr, &old_value, new_value, false,
__ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
return old_value;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value, Atomic64 new_value) {
__atomic_compare_exchange_n(ptr, &old_value, new_value, false,
__ATOMIC_RELEASE, __ATOMIC_RELAXED);
return old_value;
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELAXED);
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELEASE);
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return __atomic_load_n(ptr, __ATOMIC_RELAXED);
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
}
#endif // defined(V8_HOST_ARCH_64_BIT)
} // namespace base
} // namespace v8
#endif // V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_

View File

@ -1,168 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is an internal atomic implementation, use atomicops.h instead.
//
#ifndef V8_BASE_ATOMICOPS_INTERNALS_PPC_H_
#define V8_BASE_ATOMICOPS_INTERNALS_PPC_H_
namespace v8 {
namespace base {
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return (__sync_val_compare_and_swap(ptr, old_value, new_value));
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
return old_value;
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return Barrier_AtomicIncrement(ptr, increment);
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
for (;;) {
Atomic32 old_value = *ptr;
Atomic32 new_value = old_value + increment;
if (__sync_bool_compare_and_swap(ptr, old_value, new_value)) {
return new_value;
// The exchange took place as expected.
}
// Otherwise, *ptr changed mid-loop and we need to retry.
}
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value, Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value, Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
*ptr = value;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void MemoryBarrier() {
__asm__ __volatile__("sync" : : : "memory"); }
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; }
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; }
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
#ifdef V8_TARGET_ARCH_PPC64
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return (__sync_val_compare_and_swap(ptr, old_value, new_value));
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
Atomic64 old_value;
do {
old_value = *ptr;
} while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
return old_value;
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return Barrier_AtomicIncrement(ptr, increment);
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
for (;;) {
Atomic64 old_value = *ptr;
Atomic64 new_value = old_value + increment;
if (__sync_bool_compare_and_swap(ptr, old_value, new_value)) {
return new_value;
// The exchange took place as expected.
}
// Otherwise, *ptr changed mid-loop and we need to retry.
}
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value, Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value, Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { return *ptr; }
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
#endif
} // namespace base
} // namespace v8
#endif // V8_BASE_ATOMICOPS_INTERNALS_PPC_GCC_H_

View File

@ -1,152 +0,0 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef V8_BASE_ATOMICOPS_INTERNALS_S390_H_
#define V8_BASE_ATOMICOPS_INTERNALS_S390_H_
namespace v8 {
namespace base {
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return (__sync_val_compare_and_swap(ptr, old_value, new_value));
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
} while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
return old_value;
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return Barrier_AtomicIncrement(ptr, increment);
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return __sync_add_and_fetch(ptr, increment);
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value, Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value, Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
*ptr = value;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void MemoryBarrier() { __sync_synchronize(); }
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; }
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; }
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
#ifdef V8_TARGET_ARCH_S390X
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return (__sync_val_compare_and_swap(ptr, old_value, new_value));
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
Atomic64 old_value;
do {
old_value = *ptr;
} while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
return old_value;
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return Barrier_AtomicIncrement(ptr, increment);
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return __sync_add_and_fetch(ptr, increment);
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value, Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value, Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { return *ptr; }
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
#endif
} // namespace base
} // namespace v8
#endif // V8_BASE_ATOMICOPS_INTERNALS_S390_H_

View File

@ -1,363 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is an internal atomic implementation for compiler-based
// ThreadSanitizer. Use base/atomicops.h instead.
#ifndef V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
#define V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
namespace v8 {
namespace base {
#ifndef TSAN_INTERFACE_ATOMIC_H
#define TSAN_INTERFACE_ATOMIC_H
extern "C" {
typedef char __tsan_atomic8;
typedef short __tsan_atomic16; // NOLINT
typedef int __tsan_atomic32;
typedef long __tsan_atomic64; // NOLINT
#if defined(__SIZEOF_INT128__) \
|| (__clang_major__ * 100 + __clang_minor__ >= 302)
typedef __int128 __tsan_atomic128;
#define __TSAN_HAS_INT128 1
#else
typedef char __tsan_atomic128;
#define __TSAN_HAS_INT128 0
#endif
typedef enum {
__tsan_memory_order_relaxed,
__tsan_memory_order_consume,
__tsan_memory_order_acquire,
__tsan_memory_order_release,
__tsan_memory_order_acq_rel,
__tsan_memory_order_seq_cst,
} __tsan_memory_order;
__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a,
__tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a,
__tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a,
__tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a,
__tsan_memory_order mo);
__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128* a,
__tsan_memory_order mo);
void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v,
__tsan_memory_order mo);
void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v,
__tsan_memory_order mo);
void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v,
__tsan_memory_order mo);
void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v,
__tsan_memory_order mo);
void __tsan_atomic128_store(volatile __tsan_atomic128* a, __tsan_atomic128 v,
__tsan_memory_order mo);
__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128* a,
__tsan_atomic128 v, __tsan_memory_order mo);
__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128* a,
__tsan_atomic128 v, __tsan_memory_order mo);
__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128* a,
__tsan_atomic128 v, __tsan_memory_order mo);
__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128* a,
__tsan_atomic128 v, __tsan_memory_order mo);
__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128* a,
__tsan_atomic128 v, __tsan_memory_order mo);
__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128* a,
__tsan_atomic128 v, __tsan_memory_order mo);
int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a,
__tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a,
__tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a,
__tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a,
__tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128* a,
__tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a,
__tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a,
__tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a,
__tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a,
__tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128* a,
__tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
volatile __tsan_atomic8* a, __tsan_atomic8 c, __tsan_atomic8 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
volatile __tsan_atomic16* a, __tsan_atomic16 c, __tsan_atomic16 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
volatile __tsan_atomic32* a, __tsan_atomic32 c, __tsan_atomic32 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
volatile __tsan_atomic64* a, __tsan_atomic64 c, __tsan_atomic64 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
volatile __tsan_atomic128* a, __tsan_atomic128 c, __tsan_atomic128 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
void __tsan_atomic_thread_fence(__tsan_memory_order mo);
void __tsan_atomic_signal_fence(__tsan_memory_order mo);
} // extern "C"
#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 cmp = old_value;
__tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
return cmp;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
__tsan_memory_order_relaxed);
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
__tsan_memory_order_acquire);
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
__tsan_memory_order_release);
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return increment + __tsan_atomic32_fetch_add(ptr, increment,
__tsan_memory_order_relaxed);
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return increment + __tsan_atomic32_fetch_add(ptr, increment,
__tsan_memory_order_acq_rel);
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 cmp = old_value;
__tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_acquire, __tsan_memory_order_acquire);
return cmp;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 cmp = old_value;
__tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_release, __tsan_memory_order_relaxed);
return cmp;
}
inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
__tsan_atomic8_store(ptr, value, __tsan_memory_order_relaxed);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
}
inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
return __tsan_atomic8_load(ptr, __tsan_memory_order_relaxed);
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
}
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 cmp = old_value;
__tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
return cmp;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return increment + __tsan_atomic64_fetch_add(ptr, increment,
__tsan_memory_order_relaxed);
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return increment + __tsan_atomic64_fetch_add(ptr, increment,
__tsan_memory_order_acq_rel);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 cmp = old_value;
__tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_acquire, __tsan_memory_order_acquire);
return cmp;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 cmp = old_value;
__tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_release, __tsan_memory_order_relaxed);
return cmp;
}
inline void MemoryBarrier() {
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}
} // namespace base
} // namespace v8
#endif // V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_

View File

@ -1,116 +0,0 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This module gets enough CPU information to optimize the
// atomicops module on x86.
#include <string.h> // NOLINT(build/include)
#include "src/base/atomicops.h"
// This file only makes sense with atomicops_internals_x86_gcc.h -- it
// depends on structs that are defined in that file. If atomicops.h
// doesn't sub-include that file, then we aren't needed, and shouldn't
// try to do anything.
#ifdef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
// Inline cpuid instruction. In PIC compilations, %ebx contains the address
// of the global offset table. To avoid breaking such executables, this code
// must preserve that register's value across cpuid instructions.
#if defined(__i386__)
#define cpuid(a, b, c, d, inp) \
asm("mov %%ebx, %%edi\n" \
"cpuid\n" \
"xchg %%edi, %%ebx\n" \
: "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
#elif defined(__x86_64__)
#define cpuid(a, b, c, d, inp) \
asm("mov %%rbx, %%rdi\n" \
"cpuid\n" \
"xchg %%rdi, %%rbx\n" \
: "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
#endif
#if defined(cpuid) // initialize the struct only on x86
namespace v8 {
namespace base {
// Set the flags so that code will run correctly and conservatively, so even
// if we haven't been initialized yet, we're probably single threaded, and our
// default values should hopefully be pretty safe.
struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
false, // bug can't exist before process spawns multiple threads
#if !defined(__SSE2__)
false, // no SSE2
#endif
};
} // namespace base
} // namespace v8
namespace {
// Initialize the AtomicOps_Internalx86CPUFeatures struct.
void AtomicOps_Internalx86CPUFeaturesInit() {
using v8::base::AtomicOps_Internalx86CPUFeatures;
uint32_t eax = 0;
uint32_t ebx = 0;
uint32_t ecx = 0;
uint32_t edx = 0;
// Get vendor string (issue CPUID with eax = 0)
cpuid(eax, ebx, ecx, edx, 0);
char vendor[13];
memcpy(vendor, &ebx, 4);
memcpy(vendor + 4, &edx, 4);
memcpy(vendor + 8, &ecx, 4);
vendor[12] = 0;
// get feature flags in ecx/edx, and family/model in eax
cpuid(eax, ebx, ecx, edx, 1);
int family = (eax >> 8) & 0xf; // family and model fields
int model = (eax >> 4) & 0xf;
if (family == 0xf) { // use extended family and model fields
family += (eax >> 20) & 0xff;
model += ((eax >> 16) & 0xf) << 4;
}
// Opteron Rev E has a bug in which on very rare occasions a locked
// instruction doesn't act as a read-acquire barrier if followed by a
// non-locked read-modify-write instruction. Rev F has this bug in
// pre-release versions, but not in versions released to customers,
// so we test only for Rev E, which is family 15, model 32..63 inclusive.
if (strcmp(vendor, "AuthenticAMD") == 0 && // AMD
family == 15 &&
32 <= model && model <= 63) {
AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = true;
} else {
AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false;
}
#if !defined(__SSE2__)
// edx bit 26 is SSE2 which we use to tell use whether we can use mfence
AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
#endif
}
class AtomicOpsx86Initializer {
public:
AtomicOpsx86Initializer() {
AtomicOps_Internalx86CPUFeaturesInit();
}
};
// A global to get use initialized on startup via static initialization :/
AtomicOpsx86Initializer g_initer;
} // namespace
#endif // if x86
#endif // ifdef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_

View File

@ -1,278 +0,0 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
#define V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
#include "src/base/base-export.h"
namespace v8 {
namespace base {
// This struct is not part of the public API of this module; clients may not
// use it.
// Features of this x86. Values may not be correct before main() is run,
// but are set conservatively.
struct AtomicOps_x86CPUFeatureStruct {
bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
// after acquire compare-and-swap.
#if !defined(__SSE2__)
bool has_sse2; // Processor has SSE2.
#endif
};
V8_BASE_EXPORT extern struct AtomicOps_x86CPUFeatureStruct
AtomicOps_Internalx86CPUFeatures;
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
// 32-bit low-level operations on any platform.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev;
__asm__ __volatile__("lock; cmpxchgl %1,%2"
: "=a" (prev)
: "q" (new_value), "m" (*ptr), "0" (old_value)
: "memory");
return prev;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
__asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg.
: "=r" (new_value)
: "m" (*ptr), "0" (new_value)
: "memory");
return new_value; // Now it's the previous value.
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 temp = increment;
__asm__ __volatile__("lock; xaddl %0,%1"
: "+r" (temp), "+m" (*ptr)
: : "memory");
// temp now holds the old value of *ptr
return temp + increment;
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 temp = increment;
__asm__ __volatile__("lock; xaddl %0,%1"
: "+r" (temp), "+m" (*ptr)
: : "memory");
// temp now holds the old value of *ptr
if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
__asm__ __volatile__("lfence" : : : "memory");
}
return temp + increment;
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
__asm__ __volatile__("lfence" : : : "memory");
}
return x;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
*ptr = value;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
#if defined(__x86_64__) || defined(__SSE2__)
// 64-bit implementations of memory barrier can be simpler, because it
// "mfence" is guaranteed to exist.
inline void MemoryBarrier() {
__asm__ __volatile__("mfence" : : : "memory");
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
#else
inline void MemoryBarrier() {
if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
__asm__ __volatile__("mfence" : : : "memory");
} else { // mfence is faster but not present on PIII
Atomic32 x = 0;
NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII
}
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
*ptr = value;
__asm__ __volatile__("mfence" : : : "memory");
} else {
NoBarrier_AtomicExchange(ptr, value);
// acts as a barrier on PIII
}
}
#endif
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
ATOMICOPS_COMPILER_BARRIER();
*ptr = value; // An x86 store acts as a release barrier.
// See comments in Atomic64 version of Release_Store(), below.
}
inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
return *ptr;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr; // An x86 load acts as a acquire barrier.
// See comments in Atomic64 version of Release_Store(), below.
ATOMICOPS_COMPILER_BARRIER();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
#if defined(__x86_64__) && defined(V8_HOST_ARCH_64_BIT)
// 64-bit low-level operations on 64-bit platform.
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev;
__asm__ __volatile__("lock; cmpxchgq %1,%2"
: "=a" (prev)
: "q" (new_value), "m" (*ptr), "0" (old_value)
: "memory");
return prev;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
__asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg.
: "=r" (new_value)
: "m" (*ptr), "0" (new_value)
: "memory");
return new_value; // Now it's the previous value.
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
Atomic64 temp = increment;
__asm__ __volatile__("lock; xaddq %0,%1"
: "+r" (temp), "+m" (*ptr)
: : "memory");
// temp now contains the previous value of *ptr
return temp + increment;
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
Atomic64 temp = increment;
__asm__ __volatile__("lock; xaddq %0,%1"
: "+r" (temp), "+m" (*ptr)
: : "memory");
// temp now contains the previous value of *ptr
if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
__asm__ __volatile__("lfence" : : : "memory");
}
return temp + increment;
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
ATOMICOPS_COMPILER_BARRIER();
*ptr = value; // An x86 store acts as a release barrier
// for current AMD/Intel chips as of Jan 2008.
// See also Acquire_Load(), below.
// When new chips come out, check:
// IA-32 Intel Architecture Software Developer's Manual, Volume 3:
// System Programming Guide, Chatper 7: Multiple-processor management,
// Section 7.2, Memory Ordering.
// Last seen at:
// http://developer.intel.com/design/pentium4/manuals/index_new.htm
//
// x86 stores/loads fail to act as barriers for a few instructions (clflush
// maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
// not generated by the compiler, and are rare. Users of these instructions
// need to know about cache behaviour in any case since all of these involve
// either flushing cache lines or non-temporal cache hints.
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr; // An x86 load acts as a acquire barrier,
// for current AMD/Intel chips as of Jan 2008.
// See also Release_Store(), above.
ATOMICOPS_COMPILER_BARRIER();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
__asm__ __volatile__("lfence" : : : "memory");
}
return x;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
#endif // defined(__x86_64__)
} // namespace base
} // namespace v8
#undef ATOMICOPS_COMPILER_BARRIER
#endif // V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is an internal atomic implementation, use atomicops.h instead.
// This file is an internal atomic implementation, use base/atomicops.h instead.
#ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
#define V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
@ -26,25 +26,23 @@ inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
LONG result = InterlockedCompareExchange(
reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(new_value),
reinterpret_cast<volatile LONG*>(ptr), static_cast<LONG>(new_value),
static_cast<LONG>(old_value));
return static_cast<Atomic32>(result);
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
LONG result = InterlockedExchange(
reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(new_value));
LONG result = InterlockedExchange(reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(new_value));
return static_cast<Atomic32>(result);
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return InterlockedExchangeAdd(
reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(increment)) + increment;
return InterlockedExchangeAdd(reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(increment)) +
increment;
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
@ -52,9 +50,6 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
return Barrier_AtomicIncrement(ptr, increment);
}
#if !(defined(_MSC_VER) && _MSC_VER >= 1400)
#error "We require at least vs2005 for MemoryBarrier"
#endif
inline void MemoryBarrier() {
#if defined(V8_HOST_ARCH_64_BIT)
// See #undef and note at the top of this file.
@ -85,11 +80,6 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
NoBarrier_AtomicExchange(ptr, value);
// acts as a barrier in this implementation
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value; // works w/o barrier for current Intel chips as of June 2005
// See comments in Atomic64 version of Release_Store() below.
@ -108,16 +98,11 @@ inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
#if defined(_WIN64)
// 64-bit low-level operations on 64-bit platform.
STATIC_ASSERT(sizeof(Atomic64) == sizeof(PVOID));
static_assert(sizeof(Atomic64) == sizeof(PVOID), "atomic word is atomic");
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
@ -152,11 +137,6 @@ inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
NoBarrier_AtomicExchange(ptr, value);
// acts as a barrier in this implementation
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value; // works w/o barrier for current Intel chips as of June 2005
@ -177,11 +157,6 @@ inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {

View File

@ -1780,17 +1780,8 @@
'base/adapters.h',
'base/atomic-utils.h',
'base/atomicops.h',
'base/atomicops_internals_arm64_gcc.h',
'base/atomicops_internals_arm_gcc.h',
'base/atomicops_internals_atomicword_compat.h',
'base/atomicops_internals_mac.h',
'base/atomicops_internals_mips_gcc.h',
'base/atomicops_internals_mips64_gcc.h',
'base/atomicops_internals_ppc_gcc.h',
'base/atomicops_internals_s390_gcc.h',
'base/atomicops_internals_tsan.h',
'base/atomicops_internals_x86_gcc.cc',
'base/atomicops_internals_x86_gcc.h',
'base/atomicops_internals_portable.h',
'base/atomicops_internals_x86_msvc.h',
'base/base-export.h',
'base/bits.cc',

View File

@ -193,11 +193,6 @@ static void TestStore() {
NoBarrier_Store(&value, kVal2);
CHECK_EQU(kVal2, value);
Acquire_Store(&value, kVal1);
CHECK_EQU(kVal1, value);
Acquire_Store(&value, kVal2);
CHECK_EQU(kVal2, value);
Release_Store(&value, kVal1);
CHECK_EQU(kVal1, value);
Release_Store(&value, kVal2);
@ -238,11 +233,6 @@ static void TestLoad() {
CHECK_EQU(kVal1, Acquire_Load(&value));
value = kVal2;
CHECK_EQU(kVal2, Acquire_Load(&value));
value = kVal1;
CHECK_EQU(kVal1, Release_Load(&value));
value = kVal2;
CHECK_EQU(kVal2, Release_Load(&value));
}