Rename "NoBarrier" memory operations to "Relaxed".

This is consistent with C++ memory model and avoids confusion with GC
write barrier.

BUG=

Review-Url: https://codereview.chromium.org/2912773002
Cr-Commit-Position: refs/heads/master@{#45584}
This commit is contained in:
ulan 2017-05-30 00:44:37 -07:00 committed by Commit Bot
parent cc2fb993a1
commit 23cc6be3fc
38 changed files with 240 additions and 254 deletions

View File

@ -68,18 +68,18 @@ class NoBarrierAtomicValue {
}
V8_INLINE bool TrySetValue(T old_value, T new_value) {
return base::NoBarrier_CompareAndSwap(
return base::Relaxed_CompareAndSwap(
&value_, cast_helper<T>::to_storage_type(old_value),
cast_helper<T>::to_storage_type(new_value)) ==
cast_helper<T>::to_storage_type(old_value);
}
V8_INLINE T Value() const {
return cast_helper<T>::to_return_type(base::NoBarrier_Load(&value_));
return cast_helper<T>::to_return_type(base::Relaxed_Load(&value_));
}
V8_INLINE void SetValue(T new_value) {
base::NoBarrier_Store(&value_, cast_helper<T>::to_storage_type(new_value));
base::Relaxed_Store(&value_, cast_helper<T>::to_storage_type(new_value));
}
private:

View File

@ -14,10 +14,10 @@
// do not know what you are doing, avoid these routines, and use a Mutex.
//
// It is incorrect to make direct assignments to/from an atomic variable.
// You should use one of the Load or Store routines. The NoBarrier
// versions are provided when no barriers are needed:
// NoBarrier_Store()
// NoBarrier_Load()
// You should use one of the Load or Store routines. The Relaxed versions
// are provided when no fences are needed:
// Relaxed_Store()
// Relaxed_Load()
// Although there are currently no compiler enforcement, you are encouraged
// to use these.
//
@ -74,17 +74,16 @@ typedef intptr_t AtomicWord;
// Always return the old value of "*ptr"
//
// This routine implies no memory barriers.
Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value);
Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value,
Atomic32 new_value);
// Atomically store new_value into *ptr, returning the previous value held in
// *ptr. This routine implies no memory barriers.
Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
// Atomically increment *ptr by "increment". Returns the new value of
// *ptr with the increment applied. This routine implies no memory barriers.
Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment);
@ -106,21 +105,20 @@ Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 new_value);
void MemoryBarrier();
void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value);
void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value);
void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value);
void Release_Store(volatile Atomic32* ptr, Atomic32 value);
Atomic8 NoBarrier_Load(volatile const Atomic8* ptr);
Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
Atomic8 Relaxed_Load(volatile const Atomic8* ptr);
Atomic32 Relaxed_Load(volatile const Atomic32* ptr);
Atomic32 Acquire_Load(volatile const Atomic32* ptr);
// 64-bit atomic operations (only available on 64-bit processors).
#ifdef V8_HOST_ARCH_64_BIT
Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value);
Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value,
Atomic64 new_value);
Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
@ -129,9 +127,9 @@ Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value);
void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value);
void Release_Store(volatile Atomic64* ptr, Atomic64 value);
Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
Atomic64 Relaxed_Load(volatile const Atomic64* ptr);
Atomic64 Acquire_Load(volatile const Atomic64* ptr);
#endif // V8_HOST_ARCH_64_BIT

View File

@ -23,23 +23,23 @@
namespace v8 {
namespace base {
inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return NoBarrier_CompareAndSwap(
reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
inline AtomicWord Relaxed_CompareAndSwap(volatile AtomicWord* ptr,
AtomicWord old_value,
AtomicWord new_value) {
return Relaxed_CompareAndSwap(reinterpret_cast<volatile Atomic32*>(ptr),
old_value, new_value);
}
inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
AtomicWord new_value) {
return NoBarrier_AtomicExchange(
reinterpret_cast<volatile Atomic32*>(ptr), new_value);
inline AtomicWord Relaxed_AtomicExchange(volatile AtomicWord* ptr,
AtomicWord new_value) {
return Relaxed_AtomicExchange(reinterpret_cast<volatile Atomic32*>(ptr),
new_value);
}
inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
AtomicWord increment) {
return NoBarrier_AtomicIncrement(
reinterpret_cast<volatile Atomic32*>(ptr), increment);
inline AtomicWord Relaxed_AtomicIncrement(volatile AtomicWord* ptr,
AtomicWord increment) {
return Relaxed_AtomicIncrement(reinterpret_cast<volatile Atomic32*>(ptr),
increment);
}
inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
@ -62,9 +62,8 @@ inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
}
inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
NoBarrier_Store(
reinterpret_cast<volatile Atomic32*>(ptr), value);
inline void Relaxed_Store(volatile AtomicWord* ptr, AtomicWord value) {
Relaxed_Store(reinterpret_cast<volatile Atomic32*>(ptr), value);
}
inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
@ -72,9 +71,8 @@ inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
reinterpret_cast<volatile Atomic32*>(ptr), value);
}
inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
return NoBarrier_Load(
reinterpret_cast<volatile const Atomic32*>(ptr));
inline AtomicWord Relaxed_Load(volatile const AtomicWord* ptr) {
return Relaxed_Load(reinterpret_cast<volatile const Atomic32*>(ptr));
}
inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {

View File

@ -49,21 +49,20 @@ inline void MemoryBarrier() {
#endif
}
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value, Atomic32 new_value) {
__atomic_compare_exchange_n(ptr, &old_value, new_value, false,
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
return old_value;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
}
@ -86,11 +85,11 @@ inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
return old_value;
}
inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELAXED);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELAXED);
}
@ -98,11 +97,11 @@ inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELEASE);
}
inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) {
return __atomic_load_n(ptr, __ATOMIC_RELAXED);
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) {
return __atomic_load_n(ptr, __ATOMIC_RELAXED);
}
@ -112,21 +111,20 @@ inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
#if defined(V8_HOST_ARCH_64_BIT)
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value, Atomic64 new_value) {
__atomic_compare_exchange_n(ptr, &old_value, new_value, false,
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
return old_value;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
}
@ -149,7 +147,7 @@ inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
return old_value;
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELAXED);
}
@ -157,7 +155,7 @@ inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELEASE);
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) {
return __atomic_load_n(ptr, __ATOMIC_RELAXED);
}

View File

@ -22,17 +22,16 @@
namespace v8 {
namespace base {
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value, Atomic32 new_value) {
LONG result = InterlockedCompareExchange(
reinterpret_cast<volatile LONG*>(ptr), static_cast<LONG>(new_value),
static_cast<LONG>(old_value));
return static_cast<Atomic32>(result);
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
LONG result = InterlockedExchange(reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(new_value));
return static_cast<Atomic32>(result);
@ -45,8 +44,8 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
increment;
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return Barrier_AtomicIncrement(ptr, increment);
}
@ -63,20 +62,20 @@ inline void MemoryBarrier() {
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
return Relaxed_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
return Relaxed_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) {
*ptr = value;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
@ -85,13 +84,9 @@ inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
// See comments in Atomic64 version of Release_Store() below.
}
inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
return *ptr;
}
inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) { return *ptr; }
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) { return *ptr; }
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
@ -104,17 +99,16 @@ inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
static_assert(sizeof(Atomic64) == sizeof(PVOID), "atomic word is atomic");
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value, Atomic64 new_value) {
PVOID result = InterlockedCompareExchangePointer(
reinterpret_cast<volatile PVOID*>(ptr),
reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
return reinterpret_cast<Atomic64>(result);
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
PVOID result = InterlockedExchangePointer(
reinterpret_cast<volatile PVOID*>(ptr),
reinterpret_cast<PVOID>(new_value));
@ -128,12 +122,12 @@ inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
static_cast<LONGLONG>(increment)) + increment;
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return Barrier_AtomicIncrement(ptr, increment);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
@ -148,9 +142,7 @@ inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
// http://developer.intel.com/design/pentium4/manuals/index_new.htm
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) { return *ptr; }
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
@ -160,13 +152,13 @@ inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
return Relaxed_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
return Relaxed_CompareAndSwap(ptr, old_value, new_value);
}

View File

@ -33,7 +33,7 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
blocked_jobs_(0),
ref_count_(0),
recompilation_delay_(FLAG_concurrent_recompilation_delay) {
base::NoBarrier_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
base::Relaxed_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
input_queue_ = NewArray<CompilationJob*>(input_queue_capacity_);
}

View File

@ -2438,7 +2438,7 @@ std::unique_ptr<SerializationData> Worker::GetMessage() {
while (!out_queue_.Dequeue(&result)) {
// If the worker is no longer running, and there are no messages in the
// queue, don't expect any more messages from it.
if (!base::NoBarrier_Load(&running_)) break;
if (!base::Relaxed_Load(&running_)) break;
out_semaphore_.Wait();
}
return result;
@ -2446,7 +2446,7 @@ std::unique_ptr<SerializationData> Worker::GetMessage() {
void Worker::Terminate() {
base::NoBarrier_Store(&running_, false);
base::Relaxed_Store(&running_, false);
// Post NULL to wake the Worker thread message loop, and tell it to stop
// running.
PostMessage(NULL);

View File

@ -402,8 +402,8 @@ void Debug::ThreadInit() {
thread_local_.async_task_count_ = 0;
clear_suspended_generator();
thread_local_.restart_fp_ = nullptr;
base::NoBarrier_Store(&thread_local_.current_debug_scope_,
static_cast<base::AtomicWord>(0));
base::Relaxed_Store(&thread_local_.current_debug_scope_,
static_cast<base::AtomicWord>(0));
UpdateHookOnFunctionCall();
}
@ -2225,8 +2225,8 @@ DebugScope::DebugScope(Debug* debug)
no_termination_exceptons_(debug_->isolate_,
StackGuard::TERMINATE_EXECUTION) {
// Link recursive debugger entry.
base::NoBarrier_Store(&debug_->thread_local_.current_debug_scope_,
reinterpret_cast<base::AtomicWord>(this));
base::Relaxed_Store(&debug_->thread_local_.current_debug_scope_,
reinterpret_cast<base::AtomicWord>(this));
// Store the previous break id, frame id and return value.
break_id_ = debug_->break_id();
@ -2250,8 +2250,8 @@ DebugScope::DebugScope(Debug* debug)
DebugScope::~DebugScope() {
// Leaving this debugger entry.
base::NoBarrier_Store(&debug_->thread_local_.current_debug_scope_,
reinterpret_cast<base::AtomicWord>(prev_));
base::Relaxed_Store(&debug_->thread_local_.current_debug_scope_,
reinterpret_cast<base::AtomicWord>(prev_));
// Restore to the previous break state.
debug_->thread_local_.break_frame_id_ = break_frame_id_;

View File

@ -379,7 +379,7 @@ class Debug {
// Flags and states.
DebugScope* debugger_entry() {
return reinterpret_cast<DebugScope*>(
base::NoBarrier_Load(&thread_local_.current_debug_scope_));
base::Relaxed_Load(&thread_local_.current_debug_scope_));
}
inline Handle<Context> debug_context() { return debug_context_; }
@ -391,7 +391,7 @@ class Debug {
inline bool is_active() const { return is_active_; }
inline bool is_loaded() const { return !debug_context_.is_null(); }
inline bool in_debug_scope() const {
return !!base::NoBarrier_Load(&thread_local_.current_debug_scope_);
return !!base::Relaxed_Load(&thread_local_.current_debug_scope_);
}
void set_break_points_active(bool v) { break_points_active_ = v; }
bool break_points_active() const { return break_points_active_; }

View File

@ -199,18 +199,18 @@ class V8_EXPORT_PRIVATE StackGuard final {
base::AtomicWord climit_;
uintptr_t jslimit() {
return bit_cast<uintptr_t>(base::NoBarrier_Load(&jslimit_));
return bit_cast<uintptr_t>(base::Relaxed_Load(&jslimit_));
}
void set_jslimit(uintptr_t limit) {
return base::NoBarrier_Store(&jslimit_,
static_cast<base::AtomicWord>(limit));
return base::Relaxed_Store(&jslimit_,
static_cast<base::AtomicWord>(limit));
}
uintptr_t climit() {
return bit_cast<uintptr_t>(base::NoBarrier_Load(&climit_));
return bit_cast<uintptr_t>(base::Relaxed_Load(&climit_));
}
void set_climit(uintptr_t limit) {
return base::NoBarrier_Store(&climit_,
static_cast<base::AtomicWord>(limit));
return base::Relaxed_Store(&climit_,
static_cast<base::AtomicWord>(limit));
}
PostponeInterruptsScope* postpone_interrupts_;

View File

@ -59,7 +59,7 @@ class ConcurrentMarkingVisitor final
void VisitPointers(HeapObject* host, Object** start, Object** end) override {
for (Object** p = start; p < end; p++) {
Object* object = reinterpret_cast<Object*>(
base::NoBarrier_Load(reinterpret_cast<const base::AtomicWord*>(p)));
base::Relaxed_Load(reinterpret_cast<const base::AtomicWord*>(p)));
if (!object->IsHeapObject()) continue;
MarkObject(HeapObject::cast(object));
}
@ -183,7 +183,7 @@ class ConcurrentMarkingVisitor final
Object** end) override {
for (Object** p = start; p < end; p++) {
Object* object = reinterpret_cast<Object*>(
base::NoBarrier_Load(reinterpret_cast<const base::AtomicWord*>(p)));
base::Relaxed_Load(reinterpret_cast<const base::AtomicWord*>(p)));
slot_snapshot_->add(p, object);
}
}

View File

@ -3128,7 +3128,7 @@ HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
filler->set_map_after_allocation(
reinterpret_cast<Map*>(root(kFreeSpaceMapRootIndex)),
SKIP_WRITE_BARRIER);
FreeSpace::cast(filler)->nobarrier_set_size(size);
FreeSpace::cast(filler)->relaxed_write_size(size);
}
if (mode == ClearRecordedSlots::kYes) {
ClearRecordedSlotRange(addr, addr + size);

View File

@ -1687,8 +1687,8 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
}
base::NoBarrier_Store(reinterpret_cast<base::AtomicWord*>(src_addr),
reinterpret_cast<base::AtomicWord>(dst_addr));
base::Relaxed_Store(reinterpret_cast<base::AtomicWord*>(src_addr),
reinterpret_cast<base::AtomicWord>(dst_addr));
}
EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces,
@ -3414,7 +3414,7 @@ void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
static inline SlotCallbackResult UpdateSlot(Object** slot) {
Object* obj = reinterpret_cast<Object*>(
base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
base::Relaxed_Load(reinterpret_cast<base::AtomicWord*>(slot)));
if (obj->IsHeapObject()) {
HeapObject* heap_obj = HeapObject::cast(obj);
@ -3425,10 +3425,9 @@ static inline SlotCallbackResult UpdateSlot(Object** slot) {
Page::FromAddress(heap_obj->address())
->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
HeapObject* target = map_word.ToForwardingAddress();
base::NoBarrier_CompareAndSwap(
reinterpret_cast<base::AtomicWord*>(slot),
reinterpret_cast<base::AtomicWord>(obj),
reinterpret_cast<base::AtomicWord>(target));
base::Relaxed_CompareAndSwap(reinterpret_cast<base::AtomicWord*>(slot),
reinterpret_cast<base::AtomicWord>(obj),
reinterpret_cast<base::AtomicWord>(target));
DCHECK(!heap_obj->GetHeap()->InFromSpace(target));
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
}

View File

@ -71,7 +71,7 @@ inline bool MarkBit::Set<MarkBit::ATOMIC>() {
base::Atomic32 old_value;
base::Atomic32 new_value;
do {
old_value = base::NoBarrier_Load(cell_);
old_value = base::Relaxed_Load(cell_);
if (old_value & mask_) return false;
new_value = old_value | mask_;
} while (base::Release_CompareAndSwap(cell_, old_value, new_value) !=
@ -81,7 +81,7 @@ inline bool MarkBit::Set<MarkBit::ATOMIC>() {
template <>
inline bool MarkBit::Get<MarkBit::NON_ATOMIC>() {
return (base::NoBarrier_Load(cell_) & mask_) != 0;
return (base::Relaxed_Load(cell_) & mask_) != 0;
}
template <>
@ -101,7 +101,7 @@ inline bool MarkBit::Clear<MarkBit::ATOMIC>() {
base::Atomic32 old_value;
base::Atomic32 new_value;
do {
old_value = base::NoBarrier_Load(cell_);
old_value = base::Relaxed_Load(cell_);
if (!(old_value & mask_)) return false;
new_value = old_value & ~mask_;
} while (base::Release_CompareAndSwap(cell_, old_value, new_value) !=

View File

@ -100,7 +100,7 @@ class VisitorDispatchTable {
// every element of callbacks_ array will remain correct
// pointer (memcpy might be implemented as a byte copying loop).
for (int i = 0; i < kVisitorIdCount; i++) {
base::NoBarrier_Store(&callbacks_[i], other->callbacks_[i]);
base::Relaxed_Store(&callbacks_[i], other->callbacks_[i]);
}
}

View File

@ -2841,7 +2841,7 @@ size_t FreeListCategory::SumFreeList() {
FreeSpace* cur = top();
while (cur != NULL) {
DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex));
sum += cur->nobarrier_size();
sum += cur->relaxed_read_size();
cur = cur->next();
}
return sum;

View File

@ -17,21 +17,21 @@ base::LazyInstance<ICStats>::type ICStats::instance_ =
LAZY_INSTANCE_INITIALIZER;
ICStats::ICStats() : ic_infos_(MAX_IC_INFO), pos_(0) {
base::NoBarrier_Store(&enabled_, 0);
base::Relaxed_Store(&enabled_, 0);
}
void ICStats::Begin() {
if (V8_LIKELY(!FLAG_ic_stats)) return;
base::NoBarrier_Store(&enabled_, 1);
base::Relaxed_Store(&enabled_, 1);
}
void ICStats::End() {
if (base::NoBarrier_Load(&enabled_) != 1) return;
if (base::Relaxed_Load(&enabled_) != 1) return;
++pos_;
if (pos_ == MAX_IC_INFO) {
Dump();
}
base::NoBarrier_Store(&enabled_, 0);
base::Relaxed_Store(&enabled_, 0);
}
void ICStats::Reset() {

View File

@ -364,7 +364,7 @@ Response V8ProfilerAgentImpl::getBestEffortCoverage(
String16 V8ProfilerAgentImpl::nextProfileId() {
return String16::fromInteger(
v8::base::NoBarrier_AtomicIncrement(&s_lastProfileId, 1));
v8::base::Relaxed_AtomicIncrement(&s_lastProfileId, 1));
}
void V8ProfilerAgentImpl::startProfiling(const String16& title) {

View File

@ -62,7 +62,7 @@ namespace internal {
base::Atomic32 ThreadId::highest_thread_id_ = 0;
int ThreadId::AllocateThreadId() {
int new_id = base::NoBarrier_AtomicIncrement(&highest_thread_id_, 1);
int new_id = base::Relaxed_AtomicIncrement(&highest_thread_id_, 1);
return new_id;
}
@ -189,7 +189,7 @@ void Isolate::InitializeOncePerProcess() {
CHECK(thread_data_table_ == NULL);
isolate_key_ = base::Thread::CreateThreadLocalKey();
#if DEBUG
base::NoBarrier_Store(&isolate_key_created_, 1);
base::Relaxed_Store(&isolate_key_created_, 1);
#endif
thread_id_key_ = base::Thread::CreateThreadLocalKey();
per_isolate_thread_data_key_ = base::Thread::CreateThreadLocalKey();
@ -2344,7 +2344,7 @@ Isolate::Isolate(bool enable_serializer)
base::LockGuard<base::Mutex> lock_guard(thread_data_table_mutex_.Pointer());
CHECK(thread_data_table_);
}
id_ = base::NoBarrier_AtomicIncrement(&isolate_counter_, 1);
id_ = base::Relaxed_AtomicIncrement(&isolate_counter_, 1);
TRACE_ISOLATE(constructor);
memset(isolate_addresses_, 0,
@ -2390,7 +2390,7 @@ void Isolate::TearDown() {
// direct pointer. We don't use Enter/Exit here to avoid
// initializing the thread data.
PerIsolateThreadData* saved_data = CurrentPerIsolateThreadData();
DCHECK(base::NoBarrier_Load(&isolate_key_created_) == 1);
DCHECK(base::Relaxed_Load(&isolate_key_created_) == 1);
Isolate* saved_isolate =
reinterpret_cast<Isolate*>(base::Thread::GetThreadLocal(isolate_key_));
SetIsolateThreadLocals(this, NULL);

View File

@ -222,10 +222,10 @@ class Interpreter;
class ThreadId {
public:
// Creates an invalid ThreadId.
ThreadId() { base::NoBarrier_Store(&id_, kInvalidId); }
ThreadId() { base::Relaxed_Store(&id_, kInvalidId); }
ThreadId& operator=(const ThreadId& other) {
base::NoBarrier_Store(&id_, base::NoBarrier_Load(&other.id_));
base::Relaxed_Store(&id_, base::Relaxed_Load(&other.id_));
return *this;
}
@ -237,17 +237,17 @@ class ThreadId {
// Compares ThreadIds for equality.
INLINE(bool Equals(const ThreadId& other) const) {
return base::NoBarrier_Load(&id_) == base::NoBarrier_Load(&other.id_);
return base::Relaxed_Load(&id_) == base::Relaxed_Load(&other.id_);
}
// Checks whether this ThreadId refers to any thread.
INLINE(bool IsValid() const) {
return base::NoBarrier_Load(&id_) != kInvalidId;
return base::Relaxed_Load(&id_) != kInvalidId;
}
// Converts ThreadId to an integer representation
// (required for public API: V8::V8::GetCurrentThreadId).
int ToInteger() const { return static_cast<int>(base::NoBarrier_Load(&id_)); }
int ToInteger() const { return static_cast<int>(base::Relaxed_Load(&id_)); }
// Converts ThreadId to an integer representation
// (required for public API: V8::V8::TerminateExecution).
@ -256,7 +256,7 @@ class ThreadId {
private:
static const int kInvalidId = -1;
explicit ThreadId(int id) { base::NoBarrier_Store(&id_, id); }
explicit ThreadId(int id) { base::Relaxed_Store(&id_, id); }
static int AllocateThreadId();
@ -523,7 +523,7 @@ class Isolate {
// Returns the isolate inside which the current thread is running.
INLINE(static Isolate* Current()) {
DCHECK(base::NoBarrier_Load(&isolate_key_created_) == 1);
DCHECK(base::Relaxed_Load(&isolate_key_created_) == 1);
Isolate* isolate = reinterpret_cast<Isolate*>(
base::Thread::GetExistingThreadLocal(isolate_key_));
DCHECK(isolate != NULL);

View File

@ -144,7 +144,7 @@ void TracingController::UpdateCategoryGroupEnabledFlag(size_t category_index) {
}
void TracingController::UpdateCategoryGroupEnabledFlags() {
size_t category_index = base::NoBarrier_Load(&g_category_index);
size_t category_index = base::Relaxed_Load(&g_category_index);
for (size_t i = 0; i < category_index; i++) UpdateCategoryGroupEnabledFlag(i);
}

View File

@ -602,7 +602,7 @@ void Sampler::Stop() {
void Sampler::IncreaseProfilingDepth() {
base::NoBarrier_AtomicIncrement(&profiling_, 1);
base::Relaxed_AtomicIncrement(&profiling_, 1);
#if defined(USE_SIGNALS)
SignalHandler::IncreaseSamplerCount();
#endif
@ -613,7 +613,7 @@ void Sampler::DecreaseProfilingDepth() {
#if defined(USE_SIGNALS)
SignalHandler::DecreaseSamplerCount();
#endif
base::NoBarrier_AtomicIncrement(&profiling_, -1);
base::Relaxed_AtomicIncrement(&profiling_, -1);
}

View File

@ -46,24 +46,24 @@ class Sampler {
// Whether the sampling thread should use this Sampler for CPU profiling?
bool IsProfiling() const {
return base::NoBarrier_Load(&profiling_) > 0 &&
!base::NoBarrier_Load(&has_processing_thread_);
return base::Relaxed_Load(&profiling_) > 0 &&
!base::Relaxed_Load(&has_processing_thread_);
}
void IncreaseProfilingDepth();
void DecreaseProfilingDepth();
// Whether the sampler is running (that is, consumes resources).
bool IsActive() const { return base::NoBarrier_Load(&active_) != 0; }
bool IsActive() const { return base::Relaxed_Load(&active_) != 0; }
// CpuProfiler collects samples by calling DoSample directly
// without calling Start. To keep it working, we register the sampler
// with the CpuProfiler.
bool IsRegistered() const { return base::NoBarrier_Load(&registered_) != 0; }
bool IsRegistered() const { return base::Relaxed_Load(&registered_) != 0; }
void DoSample();
void SetHasProcessingThread(bool value) {
base::NoBarrier_Store(&has_processing_thread_, value);
base::Relaxed_Store(&has_processing_thread_, value);
}
// Used in tests to make sure that stack sampling is performed.
@ -85,8 +85,8 @@ class Sampler {
unsigned external_sample_count_;
private:
void SetActive(bool value) { base::NoBarrier_Store(&active_, value); }
void SetRegistered(bool value) { base::NoBarrier_Store(&registered_, value); }
void SetActive(bool value) { base::Relaxed_Store(&active_, value); }
void SetRegistered(bool value) { base::Relaxed_Store(&registered_, value); }
Isolate* isolate_;
base::Atomic32 profiling_;

View File

@ -559,7 +559,7 @@ class Profiler: public base::Thread {
if (paused_)
return;
if (Succ(head_) == static_cast<int>(base::NoBarrier_Load(&tail_))) {
if (Succ(head_) == static_cast<int>(base::Relaxed_Load(&tail_))) {
overflow_ = true;
} else {
buffer_[head_] = *sample;
@ -578,10 +578,10 @@ class Profiler: public base::Thread {
// Waits for a signal and removes profiling data.
bool Remove(v8::TickSample* sample) {
buffer_semaphore_.Wait(); // Wait for an element.
*sample = buffer_[base::NoBarrier_Load(&tail_)];
*sample = buffer_[base::Relaxed_Load(&tail_)];
bool result = overflow_;
base::NoBarrier_Store(&tail_, static_cast<base::Atomic32>(
Succ(base::NoBarrier_Load(&tail_))));
base::Relaxed_Store(
&tail_, static_cast<base::Atomic32>(Succ(base::Relaxed_Load(&tail_))));
overflow_ = false;
return result;
}
@ -667,8 +667,8 @@ Profiler::Profiler(Isolate* isolate)
buffer_semaphore_(0),
engaged_(false),
paused_(false) {
base::NoBarrier_Store(&tail_, 0);
base::NoBarrier_Store(&running_, 0);
base::Relaxed_Store(&tail_, 0);
base::Relaxed_Store(&running_, 0);
}
@ -685,7 +685,7 @@ void Profiler::Engage() {
}
// Start thread processing the profiler buffer.
base::NoBarrier_Store(&running_, 1);
base::Relaxed_Store(&running_, 1);
Start();
// Register to get ticks.
@ -705,7 +705,7 @@ void Profiler::Disengage() {
// Terminate the worker thread by setting running_ to false,
// inserting a fake element in the queue and then wait for
// the thread to terminate.
base::NoBarrier_Store(&running_, 0);
base::Relaxed_Store(&running_, 0);
v8::TickSample sample;
// Reset 'paused_' flag, otherwise semaphore may not be signalled.
resume();
@ -719,7 +719,7 @@ void Profiler::Disengage() {
void Profiler::Run() {
v8::TickSample sample;
bool overflow = Remove(&sample);
while (base::NoBarrier_Load(&running_)) {
while (base::Relaxed_Load(&running_)) {
LOG(isolate_, TickEvent(&sample, overflow));
overflow = Remove(&sample);
}

View File

@ -1328,13 +1328,13 @@ HeapObject** HeapObject::map_slot() {
MapWord HeapObject::map_word() const {
return MapWord(
reinterpret_cast<uintptr_t>(NOBARRIER_READ_FIELD(this, kMapOffset)));
reinterpret_cast<uintptr_t>(RELAXED_READ_FIELD(this, kMapOffset)));
}
void HeapObject::set_map_word(MapWord map_word) {
NOBARRIER_WRITE_FIELD(
this, kMapOffset, reinterpret_cast<Object*>(map_word.value_));
RELAXED_WRITE_FIELD(this, kMapOffset,
reinterpret_cast<Object*>(map_word.value_));
}
@ -2184,7 +2184,7 @@ void Object::VerifyApiCallResultType() {
Object* FixedArray::get(int index) const {
SLOW_DCHECK(index >= 0 && index < this->length());
return NOBARRIER_READ_FIELD(this, kHeaderSize + index * kPointerSize);
return RELAXED_READ_FIELD(this, kHeaderSize + index * kPointerSize);
}
Handle<Object> FixedArray::get(FixedArray* array, int index, Isolate* isolate) {
@ -2213,7 +2213,7 @@ void FixedArray::set(int index, Smi* value) {
DCHECK(index >= 0 && index < this->length());
DCHECK(reinterpret_cast<Object*>(value)->IsSmi());
int offset = kHeaderSize + index * kPointerSize;
NOBARRIER_WRITE_FIELD(this, offset, value);
RELAXED_WRITE_FIELD(this, offset, value);
}
@ -2223,7 +2223,7 @@ void FixedArray::set(int index, Object* value) {
DCHECK_GE(index, 0);
DCHECK_LT(index, this->length());
int offset = kHeaderSize + index * kPointerSize;
NOBARRIER_WRITE_FIELD(this, offset, value);
RELAXED_WRITE_FIELD(this, offset, value);
WRITE_BARRIER(GetHeap(), this, offset, value);
}
@ -2448,7 +2448,7 @@ void FixedArray::set(int index,
DCHECK_GE(index, 0);
DCHECK_LT(index, this->length());
int offset = kHeaderSize + index * kPointerSize;
NOBARRIER_WRITE_FIELD(this, offset, value);
RELAXED_WRITE_FIELD(this, offset, value);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
}
@ -2460,7 +2460,7 @@ void FixedArray::NoWriteBarrierSet(FixedArray* array,
DCHECK_GE(index, 0);
DCHECK_LT(index, array->length());
DCHECK(!array->GetHeap()->InNewSpace(value));
NOBARRIER_WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
RELAXED_WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
}
void FixedArray::set_undefined(int index) {
@ -3183,7 +3183,7 @@ SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
SYNCHRONIZED_SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
NOBARRIER_SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
RELAXED_SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
SMI_ACCESSORS(String, length, kLengthOffset)
SYNCHRONIZED_SMI_ACCESSORS(String, length, kLengthOffset)
@ -3195,7 +3195,7 @@ int FreeSpace::Size() { return size(); }
FreeSpace* FreeSpace::next() {
DCHECK(map() == GetHeap()->root(Heap::kFreeSpaceMapRootIndex) ||
(!GetHeap()->deserialization_complete() && map() == NULL));
DCHECK_LE(kNextOffset + kPointerSize, nobarrier_size());
DCHECK_LE(kNextOffset + kPointerSize, relaxed_read_size());
return reinterpret_cast<FreeSpace*>(
Memory::Address_at(address() + kNextOffset));
}
@ -3204,8 +3204,8 @@ FreeSpace* FreeSpace::next() {
void FreeSpace::set_next(FreeSpace* next) {
DCHECK(map() == GetHeap()->root(Heap::kFreeSpaceMapRootIndex) ||
(!GetHeap()->deserialization_complete() && map() == NULL));
DCHECK_LE(kNextOffset + kPointerSize, nobarrier_size());
base::NoBarrier_Store(
DCHECK_LE(kNextOffset + kPointerSize, relaxed_read_size());
base::Relaxed_Store(
reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
reinterpret_cast<base::AtomicWord>(next));
}
@ -4086,8 +4086,7 @@ void Map::set_visitor_id(int id) {
int Map::instance_size() {
return NOBARRIER_READ_BYTE_FIELD(
this, kInstanceSizeOffset) << kPointerSizeLog2;
return RELAXED_READ_BYTE_FIELD(this, kInstanceSizeOffset) << kPointerSizeLog2;
}
@ -4167,7 +4166,7 @@ int HeapObject::SizeFromMap(Map* map) {
return reinterpret_cast<BytecodeArray*>(this)->BytecodeArraySize();
}
if (instance_type == FREE_SPACE_TYPE) {
return reinterpret_cast<FreeSpace*>(this)->nobarrier_size();
return reinterpret_cast<FreeSpace*>(this)->relaxed_read_size();
}
if (instance_type == STRING_TYPE ||
instance_type == INTERNALIZED_STRING_TYPE) {
@ -4197,8 +4196,7 @@ void Map::set_instance_size(int value) {
DCHECK_EQ(0, value & (kPointerSize - 1));
value >>= kPointerSizeLog2;
DCHECK(0 <= value && value < 256);
NOBARRIER_WRITE_BYTE_FIELD(
this, kInstanceSizeOffset, static_cast<byte>(value));
RELAXED_WRITE_BYTE_FIELD(this, kInstanceSizeOffset, static_cast<byte>(value));
}
@ -5936,7 +5934,7 @@ void Foreign::set_foreign_address(Address value) {
template <class Derived>
void SmallOrderedHashTable<Derived>::SetDataEntry(int entry, Object* value) {
int offset = GetDataEntryOffset(entry);
NOBARRIER_WRITE_FIELD(this, offset, value);
RELAXED_WRITE_FIELD(this, offset, value);
WRITE_BARRIER(GetHeap(), this, offset, value);
}

View File

@ -3392,8 +3392,8 @@ class FreeSpace: public HeapObject {
inline int size() const;
inline void set_size(int value);
inline int nobarrier_size() const;
inline void nobarrier_set_size(int value);
inline int relaxed_read_size() const;
inline void relaxed_write_size(int value);
inline int Size();

View File

@ -14,7 +14,7 @@
#undef SMI_ACCESSORS_CHECKED
#undef SMI_ACCESSORS
#undef SYNCHRONIZED_SMI_ACCESSORS
#undef NOBARRIER_SMI_ACCESSORS
#undef RELAXED_SMI_ACCESSORS
#undef BOOL_GETTER
#undef BOOL_ACCESSORS
#undef TYPE_CHECKER
@ -22,10 +22,10 @@
#undef FIELD_ADDR_CONST
#undef READ_FIELD
#undef ACQUIRE_READ_FIELD
#undef NOBARRIER_READ_FIELD
#undef RELAXED_READ_FIELD
#undef WRITE_FIELD
#undef RELEASE_WRITE_FIELD
#undef NOBARRIER_WRITE_FIELD
#undef RELAXED_WRITE_FIELD
#undef WRITE_BARRIER
#undef CONDITIONAL_WRITE_BARRIER
#undef READ_DOUBLE_FIELD
@ -53,9 +53,9 @@
#undef READ_INT64_FIELD
#undef WRITE_INT64_FIELD
#undef READ_BYTE_FIELD
#undef NOBARRIER_READ_BYTE_FIELD
#undef RELAXED_READ_BYTE_FIELD
#undef WRITE_BYTE_FIELD
#undef NOBARRIER_WRITE_BYTE_FIELD
#undef RELAXED_WRITE_BYTE_FIELD
#undef DECLARE_VERIFIER
#undef DEFINE_DEOPT_ELEMENT_ACCESSORS
#undef DEFINE_DEOPT_ENTRY_ACCESSORS

View File

@ -8,6 +8,10 @@
// Note 2: This file is deliberately missing the include guards (the undeffing
// approach wouldn't work otherwise).
// The accessors with RELAXED_, ACQUIRE_, and RELEASE_ prefixes should be used
// for fields that can be written to and read from multiple threads at the same
// time. See comments in src/base/atomicops.h for the memory ordering sematics.
#define DECL_BOOLEAN_ACCESSORS(name) \
inline bool name() const; \
inline void set_##name(bool value);
@ -80,13 +84,13 @@
RELEASE_WRITE_FIELD(this, offset, Smi::FromInt(value)); \
}
#define NOBARRIER_SMI_ACCESSORS(holder, name, offset) \
int holder::nobarrier_##name() const { \
Object* value = NOBARRIER_READ_FIELD(this, offset); \
return Smi::cast(value)->value(); \
} \
void holder::nobarrier_set_##name(int value) { \
NOBARRIER_WRITE_FIELD(this, offset, Smi::FromInt(value)); \
#define RELAXED_SMI_ACCESSORS(holder, name, offset) \
int holder::relaxed_read_##name() const { \
Object* value = RELAXED_READ_FIELD(this, offset); \
return Smi::cast(value)->value(); \
} \
void holder::relaxed_write_##name(int value) { \
RELAXED_WRITE_FIELD(this, offset, Smi::FromInt(value)); \
}
#define BOOL_GETTER(holder, field, name, offset) \
@ -116,13 +120,13 @@
reinterpret_cast<Object*>(base::Acquire_Load( \
reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR_CONST(p, offset))))
#define NOBARRIER_READ_FIELD(p, offset) \
reinterpret_cast<Object*>(base::NoBarrier_Load( \
#define RELAXED_READ_FIELD(p, offset) \
reinterpret_cast<Object*>(base::Relaxed_Load( \
reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR_CONST(p, offset))))
#ifdef V8_CONCURRENT_MARKING
#define WRITE_FIELD(p, offset, value) \
base::NoBarrier_Store( \
base::Relaxed_Store( \
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
reinterpret_cast<base::AtomicWord>(value));
#else
@ -135,8 +139,8 @@
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
reinterpret_cast<base::AtomicWord>(value));
#define NOBARRIER_WRITE_FIELD(p, offset, value) \
base::NoBarrier_Store( \
#define RELAXED_WRITE_FIELD(p, offset, value) \
base::Relaxed_Store( \
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
reinterpret_cast<base::AtomicWord>(value));
@ -229,17 +233,16 @@
#define READ_BYTE_FIELD(p, offset) \
(*reinterpret_cast<const byte*>(FIELD_ADDR_CONST(p, offset)))
#define NOBARRIER_READ_BYTE_FIELD(p, offset) \
static_cast<byte>(base::NoBarrier_Load( \
#define RELAXED_READ_BYTE_FIELD(p, offset) \
static_cast<byte>(base::Relaxed_Load( \
reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset))))
#define WRITE_BYTE_FIELD(p, offset, value) \
(*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value)
#define NOBARRIER_WRITE_BYTE_FIELD(p, offset, value) \
base::NoBarrier_Store( \
reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic8>(value));
#define RELAXED_WRITE_BYTE_FIELD(p, offset, value) \
base::Relaxed_Store(reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic8>(value));
#ifdef VERIFY_HEAP
#define DECLARE_VERIFIER(Name) void Name##Verify();

View File

@ -92,7 +92,7 @@ void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate,
void ProfilerEventsProcessor::StopSynchronously() {
if (!base::NoBarrier_AtomicExchange(&running_, 0)) return;
if (!base::Relaxed_AtomicExchange(&running_, 0)) return;
Join();
}
@ -143,7 +143,7 @@ ProfilerEventsProcessor::SampleProcessingResult
void ProfilerEventsProcessor::Run() {
while (!!base::NoBarrier_Load(&running_)) {
while (!!base::Relaxed_Load(&running_)) {
base::TimeTicks nextSampleTime =
base::TimeTicks::HighResolutionNow() + period_;
base::TimeTicks now;

View File

@ -138,7 +138,7 @@ class ProfilerEventsProcessor : public base::Thread {
// Thread control.
virtual void Run();
void StopSynchronously();
INLINE(bool running()) { return !!base::NoBarrier_Load(&running_); }
INLINE(bool running()) { return !!base::Relaxed_Load(&running_); }
void Enqueue(const CodeEventsContainer& event);
// Puts current stack into tick sample events buffer.

View File

@ -66,7 +66,7 @@ void UnboundQueue<Record>::Enqueue(const Record& rec) {
template<typename Record>
bool UnboundQueue<Record>::IsEmpty() const {
return base::NoBarrier_Load(&divider_) == base::NoBarrier_Load(&last_);
return base::Relaxed_Load(&divider_) == base::Relaxed_Load(&last_);
}

View File

@ -107,9 +107,9 @@ enum CategoryGroupEnabledFlags {
// Defines atomic operations used internally by the tracing system.
#define TRACE_EVENT_API_ATOMIC_WORD v8::base::AtomicWord
#define TRACE_EVENT_API_ATOMIC_LOAD(var) v8::base::NoBarrier_Load(&(var))
#define TRACE_EVENT_API_ATOMIC_LOAD(var) v8::base::Relaxed_Load(&(var))
#define TRACE_EVENT_API_ATOMIC_STORE(var, value) \
v8::base::NoBarrier_Store(&(var), (value))
v8::base::Relaxed_Store(&(var), (value))
////////////////////////////////////////////////////////////////////////////////

View File

@ -32,7 +32,7 @@ void Locker::Initialize(v8::Isolate* isolate) {
top_level_ = true;
isolate_ = reinterpret_cast<i::Isolate*>(isolate);
// Record that the Locker has been used at least once.
base::NoBarrier_Store(&g_locker_was_ever_used_, 1);
base::Relaxed_Store(&g_locker_was_ever_used_, 1);
// Get the big lock if necessary.
if (!isolate_->thread_manager()->IsLockedByCurrentThread()) {
isolate_->thread_manager()->Lock();
@ -60,7 +60,7 @@ bool Locker::IsLocked(v8::Isolate* isolate) {
bool Locker::IsActive() {
return !!base::NoBarrier_Load(&g_locker_was_ever_used_);
return !!base::Relaxed_Load(&g_locker_was_ever_used_);
}

View File

@ -85,10 +85,10 @@ Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
void* memory = malloc(bytes);
if (memory) {
base::AtomicWord current =
base::NoBarrier_AtomicIncrement(&current_memory_usage_, bytes);
base::AtomicWord max = base::NoBarrier_Load(&max_memory_usage_);
base::Relaxed_AtomicIncrement(&current_memory_usage_, bytes);
base::AtomicWord max = base::Relaxed_Load(&max_memory_usage_);
while (current > max) {
max = base::NoBarrier_CompareAndSwap(&max_memory_usage_, max, current);
max = base::Relaxed_CompareAndSwap(&max_memory_usage_, max, current);
}
}
return reinterpret_cast<Segment*>(memory);
@ -105,22 +105,22 @@ void AccountingAllocator::ReturnSegment(Segment* segment) {
}
void AccountingAllocator::FreeSegment(Segment* memory) {
base::NoBarrier_AtomicIncrement(
&current_memory_usage_, -static_cast<base::AtomicWord>(memory->size()));
base::Relaxed_AtomicIncrement(&current_memory_usage_,
-static_cast<base::AtomicWord>(memory->size()));
memory->ZapHeader();
free(memory);
}
size_t AccountingAllocator::GetCurrentMemoryUsage() const {
return base::NoBarrier_Load(&current_memory_usage_);
return base::Relaxed_Load(&current_memory_usage_);
}
size_t AccountingAllocator::GetMaxMemoryUsage() const {
return base::NoBarrier_Load(&max_memory_usage_);
return base::Relaxed_Load(&max_memory_usage_);
}
size_t AccountingAllocator::GetCurrentPoolSize() const {
return base::NoBarrier_Load(&current_pool_size_);
return base::Relaxed_Load(&current_pool_size_);
}
Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) {
@ -145,7 +145,7 @@ Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) {
segment->set_next(nullptr);
unused_segments_sizes_[power]--;
base::NoBarrier_AtomicIncrement(
base::Relaxed_AtomicIncrement(
&current_pool_size_, -static_cast<base::AtomicWord>(segment->size()));
}
}
@ -179,7 +179,7 @@ bool AccountingAllocator::AddSegmentToPool(Segment* segment) {
segment->set_next(unused_segments_heads_[power]);
unused_segments_heads_[power] = segment;
base::NoBarrier_AtomicIncrement(&current_pool_size_, size);
base::Relaxed_AtomicIncrement(&current_pool_size_, size);
unused_segments_sizes_[power]++;
}

View File

@ -134,7 +134,7 @@ v8::Local<v8::Object> CcTest::global() {
}
void CcTest::InitializeVM() {
CHECK(!v8::base::NoBarrier_Load(&isolate_used_));
CHECK(!v8::base::Relaxed_Load(&isolate_used_));
CHECK(!initialize_called_);
initialize_called_ = true;
v8::HandleScope handle_scope(CcTest::isolate());

View File

@ -110,7 +110,7 @@ class CcTest {
static v8::Isolate* isolate() {
CHECK(isolate_ != NULL);
v8::base::NoBarrier_Store(&isolate_used_, 1);
v8::base::Relaxed_Store(&isolate_used_, 1);
return isolate_;
}

View File

@ -15478,10 +15478,10 @@ class RegExpInterruptionThread : public v8::base::Thread {
: Thread(Options("TimeoutThread")), isolate_(isolate) {}
virtual void Run() {
for (v8::base::NoBarrier_Store(&regexp_interruption_data.loop_count, 0);
v8::base::NoBarrier_Load(&regexp_interruption_data.loop_count) < 7;
v8::base::NoBarrier_AtomicIncrement(
&regexp_interruption_data.loop_count, 1)) {
for (v8::base::Relaxed_Store(&regexp_interruption_data.loop_count, 0);
v8::base::Relaxed_Load(&regexp_interruption_data.loop_count) < 7;
v8::base::Relaxed_AtomicIncrement(&regexp_interruption_data.loop_count,
1)) {
// Wait a bit before requesting GC.
v8::base::OS::Sleep(v8::base::TimeDelta::FromMilliseconds(50));
reinterpret_cast<i::Isolate*>(isolate_)->stack_guard()->RequestGC();
@ -15498,7 +15498,7 @@ class RegExpInterruptionThread : public v8::base::Thread {
void RunBeforeGC(v8::Isolate* isolate, v8::GCType type,
v8::GCCallbackFlags flags) {
if (v8::base::NoBarrier_Load(&regexp_interruption_data.loop_count) != 2) {
if (v8::base::Relaxed_Load(&regexp_interruption_data.loop_count) != 2) {
return;
}
v8::HandleScope scope(isolate);

View File

@ -44,9 +44,9 @@ template <class AtomicType>
static void TestAtomicIncrement() {
// For now, we just test the single-threaded execution.
// Use a guard value to make sure that NoBarrier_AtomicIncrement doesn't
// Use a guard value to make sure that Relaxed_AtomicIncrement doesn't
// go outside the expected address bounds. This is to test that the
// 32-bit NoBarrier_AtomicIncrement doesn't do the wrong thing on 64-bit
// 32-bit Relaxed_AtomicIncrement doesn't do the wrong thing on 64-bit
// machines.
struct {
AtomicType prev_word;
@ -62,47 +62,47 @@ static void TestAtomicIncrement() {
s.count = 0;
s.next_word = next_word_value;
CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, 1), 1);
CHECK_EQU(Relaxed_AtomicIncrement(&s.count, 1), 1);
CHECK_EQU(s.count, 1);
CHECK_EQU(s.prev_word, prev_word_value);
CHECK_EQU(s.next_word, next_word_value);
CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, 2), 3);
CHECK_EQU(Relaxed_AtomicIncrement(&s.count, 2), 3);
CHECK_EQU(s.count, 3);
CHECK_EQU(s.prev_word, prev_word_value);
CHECK_EQU(s.next_word, next_word_value);
CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, 3), 6);
CHECK_EQU(Relaxed_AtomicIncrement(&s.count, 3), 6);
CHECK_EQU(s.count, 6);
CHECK_EQU(s.prev_word, prev_word_value);
CHECK_EQU(s.next_word, next_word_value);
CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, -3), 3);
CHECK_EQU(Relaxed_AtomicIncrement(&s.count, -3), 3);
CHECK_EQU(s.count, 3);
CHECK_EQU(s.prev_word, prev_word_value);
CHECK_EQU(s.next_word, next_word_value);
CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, -2), 1);
CHECK_EQU(Relaxed_AtomicIncrement(&s.count, -2), 1);
CHECK_EQU(s.count, 1);
CHECK_EQU(s.prev_word, prev_word_value);
CHECK_EQU(s.next_word, next_word_value);
CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, -1), 0);
CHECK_EQU(Relaxed_AtomicIncrement(&s.count, -1), 0);
CHECK_EQU(s.count, 0);
CHECK_EQU(s.prev_word, prev_word_value);
CHECK_EQU(s.next_word, next_word_value);
CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, -1), -1);
CHECK_EQU(Relaxed_AtomicIncrement(&s.count, -1), -1);
CHECK_EQU(s.count, -1);
CHECK_EQU(s.prev_word, prev_word_value);
CHECK_EQU(s.next_word, next_word_value);
CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, -4), -5);
CHECK_EQU(Relaxed_AtomicIncrement(&s.count, -4), -5);
CHECK_EQU(s.count, -5);
CHECK_EQU(s.prev_word, prev_word_value);
CHECK_EQU(s.next_word, next_word_value);
CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, 5), 0);
CHECK_EQU(Relaxed_AtomicIncrement(&s.count, 5), 0);
CHECK_EQU(s.count, 0);
CHECK_EQU(s.prev_word, prev_word_value);
CHECK_EQU(s.next_word, next_word_value);
@ -112,7 +112,7 @@ static void TestAtomicIncrement() {
template <class AtomicType>
static void TestCompareAndSwap() {
AtomicType value = 0;
AtomicType prev = NoBarrier_CompareAndSwap(&value, 0, 1);
AtomicType prev = Relaxed_CompareAndSwap(&value, 0, 1);
CHECK_EQU(1, value);
CHECK_EQU(0, prev);
@ -121,12 +121,12 @@ static void TestCompareAndSwap() {
const AtomicType k_test_val =
(static_cast<AtomicType>(1) << (NUM_BITS(AtomicType) - 2)) + 11;
value = k_test_val;
prev = NoBarrier_CompareAndSwap(&value, 0, 5);
prev = Relaxed_CompareAndSwap(&value, 0, 5);
CHECK_EQU(k_test_val, value);
CHECK_EQU(k_test_val, prev);
value = k_test_val;
prev = NoBarrier_CompareAndSwap(&value, k_test_val, 5);
prev = Relaxed_CompareAndSwap(&value, k_test_val, 5);
CHECK_EQU(5, value);
CHECK_EQU(k_test_val, prev);
}
@ -135,7 +135,7 @@ static void TestCompareAndSwap() {
template <class AtomicType>
static void TestAtomicExchange() {
AtomicType value = 0;
AtomicType new_value = NoBarrier_AtomicExchange(&value, 1);
AtomicType new_value = Relaxed_AtomicExchange(&value, 1);
CHECK_EQU(1, value);
CHECK_EQU(0, new_value);
@ -144,12 +144,12 @@ static void TestAtomicExchange() {
const AtomicType k_test_val =
(static_cast<AtomicType>(1) << (NUM_BITS(AtomicType) - 2)) + 11;
value = k_test_val;
new_value = NoBarrier_AtomicExchange(&value, k_test_val);
new_value = Relaxed_AtomicExchange(&value, k_test_val);
CHECK_EQU(k_test_val, value);
CHECK_EQU(k_test_val, new_value);
value = k_test_val;
new_value = NoBarrier_AtomicExchange(&value, 5);
new_value = Relaxed_AtomicExchange(&value, 5);
CHECK_EQU(5, value);
CHECK_EQU(k_test_val, new_value);
}
@ -161,11 +161,11 @@ static void TestAtomicIncrementBounds() {
AtomicType test_val = static_cast<AtomicType>(1)
<< (NUM_BITS(AtomicType) / 2);
AtomicType value = test_val - 1;
AtomicType new_value = NoBarrier_AtomicIncrement(&value, 1);
AtomicType new_value = Relaxed_AtomicIncrement(&value, 1);
CHECK_EQU(test_val, value);
CHECK_EQU(value, new_value);
NoBarrier_AtomicIncrement(&value, -1);
Relaxed_AtomicIncrement(&value, -1);
CHECK_EQU(test_val - 1, value);
}
@ -188,9 +188,9 @@ static void TestStore() {
AtomicType value;
NoBarrier_Store(&value, kVal1);
Relaxed_Store(&value, kVal1);
CHECK_EQU(kVal1, value);
NoBarrier_Store(&value, kVal2);
Relaxed_Store(&value, kVal2);
CHECK_EQU(kVal2, value);
Release_Store(&value, kVal1);
@ -208,9 +208,9 @@ static void TestStoreAtomic8() {
Atomic8 value;
NoBarrier_Store(&value, kVal1);
Relaxed_Store(&value, kVal1);
CHECK_EQU(kVal1, value);
NoBarrier_Store(&value, kVal2);
Relaxed_Store(&value, kVal2);
CHECK_EQU(kVal2, value);
}
@ -225,9 +225,9 @@ static void TestLoad() {
AtomicType value;
value = kVal1;
CHECK_EQU(kVal1, NoBarrier_Load(&value));
CHECK_EQU(kVal1, Relaxed_Load(&value));
value = kVal2;
CHECK_EQU(kVal2, NoBarrier_Load(&value));
CHECK_EQU(kVal2, Relaxed_Load(&value));
value = kVal1;
CHECK_EQU(kVal1, Acquire_Load(&value));
@ -245,9 +245,9 @@ static void TestLoadAtomic8() {
Atomic8 value;
value = kVal1;
CHECK_EQU(kVal1, NoBarrier_Load(&value));
CHECK_EQU(kVal1, Relaxed_Load(&value));
value = kVal2;
CHECK_EQU(kVal2, NoBarrier_Load(&value));
CHECK_EQU(kVal2, Relaxed_Load(&value));
}