[base] Introduce MutexGuard as typedef for LockGuard<Mutex>
LockGuard is mostly used with Mutex. Since both are defined outside the internal namespace, we often have to write {base::LockGuard<base::Mutex>}. This CL shortens this to {base::MutexGuard} across the code base R=mlippautz@chromium.org Bug: v8:8238 Cq-Include-Trybots: luci.chromium.try:linux_chromium_headless_rel;master.tryserver.blink:linux_trusty_blink_rel Change-Id: I020d5933b73aafb98c4b72e3bb2dfd07c979ba73 Reviewed-on: https://chromium-review.googlesource.com/c/1278796 Reviewed-by: Michael Lippautz <mlippautz@chromium.org> Reviewed-by: Yang Guo <yangguo@chromium.org> Commit-Queue: Clemens Hammacher <clemensh@chromium.org> Cr-Commit-Position: refs/heads/master@{#56612}
This commit is contained in:
parent
230dd86ce6
commit
75b5666175
@ -933,14 +933,14 @@ void Simulator::TrashCallerSaveRegisters() {
|
||||
int Simulator::ReadW(int32_t addr) {
|
||||
// All supported ARM targets allow unaligned accesses, so we don't need to
|
||||
// check the alignment here.
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyLoad(addr);
|
||||
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
int Simulator::ReadExW(int32_t addr) {
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyLoadExcl(addr, TransactionSize::Word);
|
||||
global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
|
||||
&global_monitor_processor_);
|
||||
@ -951,7 +951,7 @@ int Simulator::ReadExW(int32_t addr) {
|
||||
void Simulator::WriteW(int32_t addr, int value) {
|
||||
// All supported ARM targets allow unaligned accesses, so we don't need to
|
||||
// check the alignment here.
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyStore(addr);
|
||||
global_monitor_.Pointer()->NotifyStore_Locked(addr,
|
||||
&global_monitor_processor_);
|
||||
@ -960,7 +960,7 @@ void Simulator::WriteW(int32_t addr, int value) {
|
||||
}
|
||||
|
||||
int Simulator::WriteExW(int32_t addr, int value) {
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Word) &&
|
||||
global_monitor_.Pointer()->NotifyStoreExcl_Locked(
|
||||
addr, &global_monitor_processor_)) {
|
||||
@ -975,7 +975,7 @@ int Simulator::WriteExW(int32_t addr, int value) {
|
||||
uint16_t Simulator::ReadHU(int32_t addr) {
|
||||
// All supported ARM targets allow unaligned accesses, so we don't need to
|
||||
// check the alignment here.
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyLoad(addr);
|
||||
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
|
||||
return *ptr;
|
||||
@ -984,14 +984,14 @@ uint16_t Simulator::ReadHU(int32_t addr) {
|
||||
int16_t Simulator::ReadH(int32_t addr) {
|
||||
// All supported ARM targets allow unaligned accesses, so we don't need to
|
||||
// check the alignment here.
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyLoad(addr);
|
||||
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
uint16_t Simulator::ReadExHU(int32_t addr) {
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyLoadExcl(addr, TransactionSize::HalfWord);
|
||||
global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
|
||||
&global_monitor_processor_);
|
||||
@ -1002,7 +1002,7 @@ uint16_t Simulator::ReadExHU(int32_t addr) {
|
||||
void Simulator::WriteH(int32_t addr, uint16_t value) {
|
||||
// All supported ARM targets allow unaligned accesses, so we don't need to
|
||||
// check the alignment here.
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyStore(addr);
|
||||
global_monitor_.Pointer()->NotifyStore_Locked(addr,
|
||||
&global_monitor_processor_);
|
||||
@ -1013,7 +1013,7 @@ void Simulator::WriteH(int32_t addr, uint16_t value) {
|
||||
void Simulator::WriteH(int32_t addr, int16_t value) {
|
||||
// All supported ARM targets allow unaligned accesses, so we don't need to
|
||||
// check the alignment here.
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyStore(addr);
|
||||
global_monitor_.Pointer()->NotifyStore_Locked(addr,
|
||||
&global_monitor_processor_);
|
||||
@ -1022,7 +1022,7 @@ void Simulator::WriteH(int32_t addr, int16_t value) {
|
||||
}
|
||||
|
||||
int Simulator::WriteExH(int32_t addr, uint16_t value) {
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::HalfWord) &&
|
||||
global_monitor_.Pointer()->NotifyStoreExcl_Locked(
|
||||
addr, &global_monitor_processor_)) {
|
||||
@ -1035,21 +1035,21 @@ int Simulator::WriteExH(int32_t addr, uint16_t value) {
|
||||
}
|
||||
|
||||
uint8_t Simulator::ReadBU(int32_t addr) {
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyLoad(addr);
|
||||
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
int8_t Simulator::ReadB(int32_t addr) {
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyLoad(addr);
|
||||
int8_t* ptr = reinterpret_cast<int8_t*>(addr);
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
uint8_t Simulator::ReadExBU(int32_t addr) {
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyLoadExcl(addr, TransactionSize::Byte);
|
||||
global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
|
||||
&global_monitor_processor_);
|
||||
@ -1058,7 +1058,7 @@ uint8_t Simulator::ReadExBU(int32_t addr) {
|
||||
}
|
||||
|
||||
void Simulator::WriteB(int32_t addr, uint8_t value) {
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyStore(addr);
|
||||
global_monitor_.Pointer()->NotifyStore_Locked(addr,
|
||||
&global_monitor_processor_);
|
||||
@ -1067,7 +1067,7 @@ void Simulator::WriteB(int32_t addr, uint8_t value) {
|
||||
}
|
||||
|
||||
void Simulator::WriteB(int32_t addr, int8_t value) {
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyStore(addr);
|
||||
global_monitor_.Pointer()->NotifyStore_Locked(addr,
|
||||
&global_monitor_processor_);
|
||||
@ -1076,7 +1076,7 @@ void Simulator::WriteB(int32_t addr, int8_t value) {
|
||||
}
|
||||
|
||||
int Simulator::WriteExB(int32_t addr, uint8_t value) {
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Byte) &&
|
||||
global_monitor_.Pointer()->NotifyStoreExcl_Locked(
|
||||
addr, &global_monitor_processor_)) {
|
||||
@ -1091,14 +1091,14 @@ int Simulator::WriteExB(int32_t addr, uint8_t value) {
|
||||
int32_t* Simulator::ReadDW(int32_t addr) {
|
||||
// All supported ARM targets allow unaligned accesses, so we don't need to
|
||||
// check the alignment here.
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyLoad(addr);
|
||||
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
int32_t* Simulator::ReadExDW(int32_t addr) {
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyLoadExcl(addr, TransactionSize::DoubleWord);
|
||||
global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
|
||||
&global_monitor_processor_);
|
||||
@ -1109,7 +1109,7 @@ int32_t* Simulator::ReadExDW(int32_t addr) {
|
||||
void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
|
||||
// All supported ARM targets allow unaligned accesses, so we don't need to
|
||||
// check the alignment here.
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyStore(addr);
|
||||
global_monitor_.Pointer()->NotifyStore_Locked(addr,
|
||||
&global_monitor_processor_);
|
||||
@ -1119,7 +1119,7 @@ void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
|
||||
}
|
||||
|
||||
int Simulator::WriteExDW(int32_t addr, int32_t value1, int32_t value2) {
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::DoubleWord) &&
|
||||
global_monitor_.Pointer()->NotifyStoreExcl_Locked(
|
||||
addr, &global_monitor_processor_)) {
|
||||
@ -6071,7 +6071,7 @@ void Simulator::GlobalMonitor::PrependProcessor_Locked(Processor* processor) {
|
||||
}
|
||||
|
||||
void Simulator::GlobalMonitor::RemoveProcessor(Processor* processor) {
|
||||
base::LockGuard<base::Mutex> lock_guard(&mutex);
|
||||
base::MutexGuard lock_guard(&mutex);
|
||||
if (!IsProcessorInLinkedList_Locked(processor)) {
|
||||
return;
|
||||
}
|
||||
|
@ -1731,7 +1731,7 @@ void Simulator::LoadStoreHelper(Instruction* instr,
|
||||
uintptr_t stack = 0;
|
||||
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
if (instr->IsLoad()) {
|
||||
local_monitor_.NotifyLoad();
|
||||
} else {
|
||||
@ -1865,7 +1865,7 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
|
||||
uintptr_t stack = 0;
|
||||
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
if (instr->IsLoad()) {
|
||||
local_monitor_.NotifyLoad();
|
||||
} else {
|
||||
@ -2016,7 +2016,7 @@ void Simulator::VisitLoadLiteral(Instruction* instr) {
|
||||
unsigned rt = instr->Rt();
|
||||
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyLoad();
|
||||
}
|
||||
|
||||
@ -2107,7 +2107,7 @@ void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
|
||||
unsigned access_size = 1 << instr->LoadStoreXSizeLog2();
|
||||
uintptr_t address = LoadStoreAddress(rn, 0, AddrMode::Offset);
|
||||
DCHECK_EQ(address % access_size, 0);
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
if (is_load != 0) {
|
||||
if (is_exclusive) {
|
||||
local_monitor_.NotifyLoadExcl(address, get_transaction_size(access_size));
|
||||
@ -4483,7 +4483,7 @@ void Simulator::NEONLoadStoreMultiStructHelper(const Instruction* instr,
|
||||
}
|
||||
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
if (log_read) {
|
||||
local_monitor_.NotifyLoad();
|
||||
} else {
|
||||
@ -4729,7 +4729,7 @@ void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
|
||||
}
|
||||
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
if (do_load) {
|
||||
local_monitor_.NotifyLoad();
|
||||
} else {
|
||||
@ -5863,7 +5863,7 @@ void Simulator::GlobalMonitor::PrependProcessor_Locked(Processor* processor) {
|
||||
}
|
||||
|
||||
void Simulator::GlobalMonitor::RemoveProcessor(Processor* processor) {
|
||||
base::LockGuard<base::Mutex> lock_guard(&mutex);
|
||||
base::MutexGuard lock_guard(&mutex);
|
||||
if (!IsProcessorInLinkedList_Locked(processor)) {
|
||||
return;
|
||||
}
|
||||
|
@ -113,7 +113,7 @@ void AssemblerBase::FlushICache(void* start, size_t size) {
|
||||
if (size == 0) return;
|
||||
|
||||
#if defined(USE_SIMULATOR)
|
||||
base::LockGuard<base::Mutex> lock_guard(Simulator::i_cache_mutex());
|
||||
base::MutexGuard lock_guard(Simulator::i_cache_mutex());
|
||||
Simulator::FlushICache(Simulator::i_cache(), start, size);
|
||||
#else
|
||||
CpuFeatures::FlushICache(start, size);
|
||||
|
@ -28,7 +28,7 @@ size_t BoundedPageAllocator::size() const { return region_allocator_.size(); }
|
||||
void* BoundedPageAllocator::AllocatePages(void* hint, size_t size,
|
||||
size_t alignment,
|
||||
PageAllocator::Permission access) {
|
||||
LockGuard<Mutex> guard(&mutex_);
|
||||
MutexGuard guard(&mutex_);
|
||||
CHECK(IsAligned(alignment, region_allocator_.page_size()));
|
||||
|
||||
// Region allocator does not support alignments bigger than it's own
|
||||
@ -46,7 +46,7 @@ void* BoundedPageAllocator::AllocatePages(void* hint, size_t size,
|
||||
}
|
||||
|
||||
bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) {
|
||||
LockGuard<Mutex> guard(&mutex_);
|
||||
MutexGuard guard(&mutex_);
|
||||
|
||||
Address address = reinterpret_cast<Address>(raw_address);
|
||||
size_t freed_size = region_allocator_.FreeRegion(address);
|
||||
@ -72,13 +72,13 @@ bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size,
|
||||
{
|
||||
// There must be an allocated region at given |address| of a size not
|
||||
// smaller than |size|.
|
||||
LockGuard<Mutex> guard(&mutex_);
|
||||
MutexGuard guard(&mutex_);
|
||||
CHECK_EQ(allocated_size, region_allocator_.CheckRegion(address));
|
||||
}
|
||||
#endif
|
||||
|
||||
if (new_allocated_size < allocated_size) {
|
||||
LockGuard<Mutex> guard(&mutex_);
|
||||
MutexGuard guard(&mutex_);
|
||||
region_allocator_.TrimRegion(address, new_allocated_size);
|
||||
}
|
||||
|
||||
|
@ -41,7 +41,7 @@ ConditionVariable::~ConditionVariable() {
|
||||
// Darwin kernel. http://crbug.com/517681.
|
||||
{
|
||||
Mutex lock;
|
||||
LockGuard<Mutex> l(&lock);
|
||||
MutexGuard l(&lock);
|
||||
struct timespec ts;
|
||||
ts.tv_sec = 0;
|
||||
ts.tv_nsec = 1;
|
||||
|
@ -79,7 +79,6 @@ class V8_BASE_EXPORT ConditionVariable final {
|
||||
DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
|
||||
};
|
||||
|
||||
|
||||
// POD ConditionVariable initialized lazily (i.e. the first time Pointer() is
|
||||
// called).
|
||||
// Usage:
|
||||
@ -87,7 +86,7 @@ class V8_BASE_EXPORT ConditionVariable final {
|
||||
// LAZY_CONDITION_VARIABLE_INITIALIZER;
|
||||
//
|
||||
// void my_function() {
|
||||
// LockGuard<Mutex> lock_guard(&my_mutex);
|
||||
// MutexGuard lock_guard(&my_mutex);
|
||||
// my_condvar.Pointer()->Wait(&my_mutex);
|
||||
// }
|
||||
typedef LazyStaticInstance<
|
||||
|
@ -92,13 +92,12 @@ class V8_BASE_EXPORT Mutex final {
|
||||
DISALLOW_COPY_AND_ASSIGN(Mutex);
|
||||
};
|
||||
|
||||
|
||||
// POD Mutex initialized lazily (i.e. the first time Pointer() is called).
|
||||
// Usage:
|
||||
// static LazyMutex my_mutex = LAZY_MUTEX_INITIALIZER;
|
||||
//
|
||||
// void my_function() {
|
||||
// LockGuard<Mutex> guard(my_mutex.Pointer());
|
||||
// MutexGuard guard(my_mutex.Pointer());
|
||||
// // Do something.
|
||||
// }
|
||||
//
|
||||
@ -225,6 +224,8 @@ class LockGuard final {
|
||||
DISALLOW_COPY_AND_ASSIGN(LockGuard);
|
||||
};
|
||||
|
||||
using MutexGuard = LockGuard<Mutex>;
|
||||
|
||||
} // namespace base
|
||||
} // namespace v8
|
||||
|
||||
|
@ -213,7 +213,7 @@ size_t OS::CommitPageSize() {
|
||||
// static
|
||||
void OS::SetRandomMmapSeed(int64_t seed) {
|
||||
if (seed) {
|
||||
LockGuard<Mutex> guard(rng_mutex.Pointer());
|
||||
MutexGuard guard(rng_mutex.Pointer());
|
||||
platform_random_number_generator.Pointer()->SetSeed(seed);
|
||||
}
|
||||
}
|
||||
@ -222,7 +222,7 @@ void OS::SetRandomMmapSeed(int64_t seed) {
|
||||
void* OS::GetRandomMmapAddr() {
|
||||
uintptr_t raw_addr;
|
||||
{
|
||||
LockGuard<Mutex> guard(rng_mutex.Pointer());
|
||||
MutexGuard guard(rng_mutex.Pointer());
|
||||
platform_random_number_generator.Pointer()->NextBytes(&raw_addr,
|
||||
sizeof(raw_addr));
|
||||
}
|
||||
@ -734,7 +734,7 @@ static void* ThreadEntry(void* arg) {
|
||||
// We take the lock here to make sure that pthread_create finished first since
|
||||
// we don't know which thread will run first (the original thread or the new
|
||||
// one).
|
||||
{ LockGuard<Mutex> lock_guard(&thread->data()->thread_creation_mutex_); }
|
||||
{ MutexGuard lock_guard(&thread->data()->thread_creation_mutex_); }
|
||||
SetThreadName(thread->name());
|
||||
DCHECK_NE(thread->data()->thread_, kNoThread);
|
||||
thread->NotifyStartedAndRun();
|
||||
@ -769,7 +769,7 @@ void Thread::Start() {
|
||||
DCHECK_EQ(0, result);
|
||||
}
|
||||
{
|
||||
LockGuard<Mutex> lock_guard(&data_->thread_creation_mutex_);
|
||||
MutexGuard lock_guard(&data_->thread_creation_mutex_);
|
||||
result = pthread_create(&data_->thread_, &attr, ThreadEntry, this);
|
||||
}
|
||||
DCHECK_EQ(0, result);
|
||||
|
@ -723,7 +723,7 @@ size_t OS::CommitPageSize() {
|
||||
// static
|
||||
void OS::SetRandomMmapSeed(int64_t seed) {
|
||||
if (seed) {
|
||||
LockGuard<Mutex> guard(rng_mutex.Pointer());
|
||||
MutexGuard guard(rng_mutex.Pointer());
|
||||
platform_random_number_generator.Pointer()->SetSeed(seed);
|
||||
}
|
||||
}
|
||||
@ -744,7 +744,7 @@ void* OS::GetRandomMmapAddr() {
|
||||
#endif
|
||||
uintptr_t address;
|
||||
{
|
||||
LockGuard<Mutex> guard(rng_mutex.Pointer());
|
||||
MutexGuard guard(rng_mutex.Pointer());
|
||||
platform_random_number_generator.Pointer()->NextBytes(&address,
|
||||
sizeof(address));
|
||||
}
|
||||
|
@ -302,7 +302,7 @@ class Clock final {
|
||||
// Time between resampling the un-granular clock for this API (1 minute).
|
||||
const TimeDelta kMaxElapsedTime = TimeDelta::FromMinutes(1);
|
||||
|
||||
LockGuard<Mutex> lock_guard(&mutex_);
|
||||
MutexGuard lock_guard(&mutex_);
|
||||
|
||||
// Determine current time and ticks.
|
||||
TimeTicks ticks = GetSystemTicks();
|
||||
@ -321,7 +321,7 @@ class Clock final {
|
||||
}
|
||||
|
||||
Time NowFromSystemTime() {
|
||||
LockGuard<Mutex> lock_guard(&mutex_);
|
||||
MutexGuard lock_guard(&mutex_);
|
||||
initial_ticks_ = GetSystemTicks();
|
||||
initial_time_ = GetSystemTime();
|
||||
return initial_time_;
|
||||
|
@ -23,14 +23,15 @@ static RandomNumberGenerator::EntropySource entropy_source = nullptr;
|
||||
|
||||
// static
|
||||
void RandomNumberGenerator::SetEntropySource(EntropySource source) {
|
||||
LockGuard<Mutex> lock_guard(entropy_mutex.Pointer());
|
||||
MutexGuard lock_guard(entropy_mutex.Pointer());
|
||||
entropy_source = source;
|
||||
}
|
||||
|
||||
|
||||
RandomNumberGenerator::RandomNumberGenerator() {
|
||||
// Check if embedder supplied an entropy source.
|
||||
{ LockGuard<Mutex> lock_guard(entropy_mutex.Pointer());
|
||||
{
|
||||
MutexGuard lock_guard(entropy_mutex.Pointer());
|
||||
if (entropy_source != nullptr) {
|
||||
int64_t seed;
|
||||
if (entropy_source(reinterpret_cast<unsigned char*>(&seed),
|
||||
|
@ -66,7 +66,7 @@ void BasicBlockProfiler::Data::ResetCounts() {
|
||||
}
|
||||
|
||||
BasicBlockProfiler::Data* BasicBlockProfiler::NewData(size_t n_blocks) {
|
||||
base::LockGuard<base::Mutex> lock(&data_list_mutex_);
|
||||
base::MutexGuard lock(&data_list_mutex_);
|
||||
Data* data = new Data(n_blocks);
|
||||
data_list_.push_back(data);
|
||||
return data;
|
||||
|
@ -30,7 +30,7 @@ CancelableTaskManager::CancelableTaskManager()
|
||||
: task_id_counter_(0), canceled_(false) {}
|
||||
|
||||
CancelableTaskManager::Id CancelableTaskManager::Register(Cancelable* task) {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
CancelableTaskManager::Id id = ++task_id_counter_;
|
||||
// Id overflows are not supported.
|
||||
CHECK_NE(0, id);
|
||||
@ -40,7 +40,7 @@ CancelableTaskManager::Id CancelableTaskManager::Register(Cancelable* task) {
|
||||
}
|
||||
|
||||
void CancelableTaskManager::RemoveFinishedTask(CancelableTaskManager::Id id) {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
size_t removed = cancelable_tasks_.erase(id);
|
||||
USE(removed);
|
||||
DCHECK_NE(0u, removed);
|
||||
@ -49,7 +49,7 @@ void CancelableTaskManager::RemoveFinishedTask(CancelableTaskManager::Id id) {
|
||||
|
||||
CancelableTaskManager::TryAbortResult CancelableTaskManager::TryAbort(
|
||||
CancelableTaskManager::Id id) {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
auto entry = cancelable_tasks_.find(id);
|
||||
if (entry != cancelable_tasks_.end()) {
|
||||
Cancelable* value = entry->second;
|
||||
@ -70,7 +70,7 @@ void CancelableTaskManager::CancelAndWait() {
|
||||
// the way if possible, i.e., if they have not started yet. After each round
|
||||
// of canceling we wait for the background tasks that have already been
|
||||
// started.
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
canceled_ = true;
|
||||
|
||||
// Cancelable tasks could be running or could potentially register new
|
||||
@ -94,7 +94,7 @@ void CancelableTaskManager::CancelAndWait() {
|
||||
CancelableTaskManager::TryAbortResult CancelableTaskManager::TryAbortAll() {
|
||||
// Clean up all cancelable fore- and background tasks. Tasks are canceled on
|
||||
// the way if possible, i.e., if they have not started yet.
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
|
||||
if (cancelable_tasks_.empty()) return kTaskRemoved;
|
||||
|
||||
|
@ -101,11 +101,11 @@ class CodeEventDispatcher {
|
||||
CodeEventDispatcher() = default;
|
||||
|
||||
bool AddListener(CodeEventListener* listener) {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
return listeners_.insert(listener).second;
|
||||
}
|
||||
void RemoveListener(CodeEventListener* listener) {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
listeners_.erase(listener);
|
||||
}
|
||||
bool IsListeningToCodeEvents() {
|
||||
@ -117,8 +117,8 @@ class CodeEventDispatcher {
|
||||
return false;
|
||||
}
|
||||
|
||||
#define CODE_EVENT_DISPATCH(code) \
|
||||
base::LockGuard<base::Mutex> guard(&mutex_); \
|
||||
#define CODE_EVENT_DISPATCH(code) \
|
||||
base::MutexGuard guard(&mutex_); \
|
||||
for (auto it = listeners_.begin(); it != listeners_.end(); ++it) (*it)->code
|
||||
|
||||
void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
|
||||
|
@ -14,7 +14,7 @@ namespace internal {
|
||||
void CompilationStatistics::RecordPhaseStats(const char* phase_kind_name,
|
||||
const char* phase_name,
|
||||
const BasicStats& stats) {
|
||||
base::LockGuard<base::Mutex> guard(&record_mutex_);
|
||||
base::MutexGuard guard(&record_mutex_);
|
||||
|
||||
std::string phase_name_str(phase_name);
|
||||
auto it = phase_map_.find(phase_name_str);
|
||||
@ -28,7 +28,7 @@ void CompilationStatistics::RecordPhaseStats(const char* phase_kind_name,
|
||||
|
||||
void CompilationStatistics::RecordPhaseKindStats(const char* phase_kind_name,
|
||||
const BasicStats& stats) {
|
||||
base::LockGuard<base::Mutex> guard(&record_mutex_);
|
||||
base::MutexGuard guard(&record_mutex_);
|
||||
|
||||
std::string phase_kind_name_str(phase_kind_name);
|
||||
auto it = phase_kind_map_.find(phase_kind_name_str);
|
||||
@ -43,7 +43,7 @@ void CompilationStatistics::RecordPhaseKindStats(const char* phase_kind_name,
|
||||
|
||||
void CompilationStatistics::RecordTotalStats(size_t source_size,
|
||||
const BasicStats& stats) {
|
||||
base::LockGuard<base::Mutex> guard(&record_mutex_);
|
||||
base::MutexGuard guard(&record_mutex_);
|
||||
|
||||
source_size += source_size;
|
||||
total_stats_.Accumulate(stats);
|
||||
|
@ -78,7 +78,7 @@ base::Optional<CompilerDispatcher::JobId> CompilerDispatcher::Enqueue(
|
||||
// Post a a background worker task to perform the compilation on the worker
|
||||
// thread.
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
pending_background_jobs_.insert(it->second.get());
|
||||
}
|
||||
ScheduleMoreWorkerTasksIfNeeded();
|
||||
@ -117,7 +117,7 @@ void CompilerDispatcher::RegisterSharedFunctionInfo(
|
||||
shared_to_unoptimized_job_id_.Set(function_handle, job_id);
|
||||
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
job->function = function_handle;
|
||||
if (job->IsReadyToFinalize(lock)) {
|
||||
// Schedule an idle task to finalize job if it is ready.
|
||||
@ -132,7 +132,7 @@ void CompilerDispatcher::WaitForJobIfRunningOnBackground(Job* job) {
|
||||
RuntimeCallTimerScope runtimeTimer(
|
||||
isolate_, RuntimeCallCounterId::kCompileWaitForDispatcher);
|
||||
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
if (running_background_jobs_.find(job) == running_background_jobs_.end()) {
|
||||
pending_background_jobs_.erase(job);
|
||||
return;
|
||||
@ -188,7 +188,7 @@ void CompilerDispatcher::AbortAll() {
|
||||
jobs_.clear();
|
||||
shared_to_unoptimized_job_id_.Clear();
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
DCHECK(pending_background_jobs_.empty());
|
||||
DCHECK(running_background_jobs_.empty());
|
||||
}
|
||||
@ -207,7 +207,7 @@ CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::GetJobFor(
|
||||
}
|
||||
|
||||
void CompilerDispatcher::ScheduleIdleTaskFromAnyThread(
|
||||
const base::LockGuard<base::Mutex>&) {
|
||||
const base::MutexGuard&) {
|
||||
if (!taskrunner_->IdleTasksEnabled()) return;
|
||||
if (idle_task_scheduled_) return;
|
||||
|
||||
@ -221,7 +221,7 @@ void CompilerDispatcher::ScheduleMoreWorkerTasksIfNeeded() {
|
||||
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
|
||||
"V8.CompilerDispatcherScheduleMoreWorkerTasksIfNeeded");
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
if (pending_background_jobs_.empty()) return;
|
||||
if (platform_->NumberOfWorkerThreads() <= num_worker_tasks_) {
|
||||
return;
|
||||
@ -238,7 +238,7 @@ void CompilerDispatcher::DoBackgroundWork() {
|
||||
for (;;) {
|
||||
Job* job = nullptr;
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
if (!pending_background_jobs_.empty()) {
|
||||
auto it = pending_background_jobs_.begin();
|
||||
job = *it;
|
||||
@ -260,7 +260,7 @@ void CompilerDispatcher::DoBackgroundWork() {
|
||||
job->task->Run();
|
||||
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
running_background_jobs_.erase(job);
|
||||
|
||||
job->has_run = true;
|
||||
@ -278,7 +278,7 @@ void CompilerDispatcher::DoBackgroundWork() {
|
||||
}
|
||||
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
--num_worker_tasks_;
|
||||
}
|
||||
// Don't touch |this| anymore after this point, as it might have been
|
||||
@ -289,7 +289,7 @@ void CompilerDispatcher::DoIdleWork(double deadline_in_seconds) {
|
||||
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
|
||||
"V8.CompilerDispatcherDoIdleWork");
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
idle_task_scheduled_ = false;
|
||||
}
|
||||
|
||||
@ -302,7 +302,7 @@ void CompilerDispatcher::DoIdleWork(double deadline_in_seconds) {
|
||||
// Find a job which is pending finalization and has a shared function info
|
||||
CompilerDispatcher::JobMap::const_iterator it;
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
for (it = jobs_.cbegin(); it != jobs_.cend(); ++it) {
|
||||
if (it->second->IsReadyToFinalize(lock)) break;
|
||||
}
|
||||
@ -326,7 +326,7 @@ void CompilerDispatcher::DoIdleWork(double deadline_in_seconds) {
|
||||
|
||||
// We didn't return above so there still might be jobs to finalize.
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
ScheduleIdleTaskFromAnyThread(lock);
|
||||
}
|
||||
}
|
||||
|
@ -116,12 +116,12 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
|
||||
explicit Job(BackgroundCompileTask* task_arg);
|
||||
~Job();
|
||||
|
||||
bool IsReadyToFinalize(const base::LockGuard<base::Mutex>&) {
|
||||
bool IsReadyToFinalize(const base::MutexGuard&) {
|
||||
return has_run && !function.is_null();
|
||||
}
|
||||
|
||||
bool IsReadyToFinalize(base::Mutex* mutex) {
|
||||
base::LockGuard<base::Mutex> lock(mutex);
|
||||
base::MutexGuard lock(mutex);
|
||||
return IsReadyToFinalize(lock);
|
||||
}
|
||||
|
||||
@ -136,7 +136,7 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
|
||||
void WaitForJobIfRunningOnBackground(Job* job);
|
||||
JobMap::const_iterator GetJobFor(Handle<SharedFunctionInfo> shared) const;
|
||||
void ScheduleMoreWorkerTasksIfNeeded();
|
||||
void ScheduleIdleTaskFromAnyThread(const base::LockGuard<base::Mutex>&);
|
||||
void ScheduleIdleTaskFromAnyThread(const base::MutexGuard&);
|
||||
void DoBackgroundWork();
|
||||
void DoIdleWork(double deadline_in_seconds);
|
||||
// Returns iterator to the inserted job.
|
||||
|
@ -46,7 +46,7 @@ class OptimizingCompileDispatcher::CompileTask : public CancelableTask {
|
||||
worker_thread_runtime_call_stats_(
|
||||
isolate->counters()->worker_thread_runtime_call_stats()),
|
||||
dispatcher_(dispatcher) {
|
||||
base::LockGuard<base::Mutex> lock_guard(&dispatcher_->ref_count_mutex_);
|
||||
base::MutexGuard lock_guard(&dispatcher_->ref_count_mutex_);
|
||||
++dispatcher_->ref_count_;
|
||||
}
|
||||
|
||||
@ -78,7 +78,7 @@ class OptimizingCompileDispatcher::CompileTask : public CancelableTask {
|
||||
dispatcher_->CompileNext(dispatcher_->NextInput(true));
|
||||
}
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock_guard(&dispatcher_->ref_count_mutex_);
|
||||
base::MutexGuard lock_guard(&dispatcher_->ref_count_mutex_);
|
||||
if (--dispatcher_->ref_count_ == 0) {
|
||||
dispatcher_->ref_count_zero_.NotifyOne();
|
||||
}
|
||||
@ -95,7 +95,7 @@ class OptimizingCompileDispatcher::CompileTask : public CancelableTask {
|
||||
OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
|
||||
#ifdef DEBUG
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
|
||||
base::MutexGuard lock_guard(&ref_count_mutex_);
|
||||
DCHECK_EQ(0, ref_count_);
|
||||
}
|
||||
#endif
|
||||
@ -105,7 +105,7 @@ OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
|
||||
|
||||
OptimizedCompilationJob* OptimizingCompileDispatcher::NextInput(
|
||||
bool check_if_flushing) {
|
||||
base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
|
||||
base::MutexGuard access_input_queue_(&input_queue_mutex_);
|
||||
if (input_queue_length_ == 0) return nullptr;
|
||||
OptimizedCompilationJob* job = input_queue_[InputQueueIndex(0)];
|
||||
DCHECK_NOT_NULL(job);
|
||||
@ -131,7 +131,7 @@ void OptimizingCompileDispatcher::CompileNext(OptimizedCompilationJob* job) {
|
||||
// The function may have already been optimized by OSR. Simply continue.
|
||||
// Use a mutex to make sure that functions marked for install
|
||||
// are always also queued.
|
||||
base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
|
||||
base::MutexGuard access_output_queue_(&output_queue_mutex_);
|
||||
output_queue_.push(job);
|
||||
isolate_->stack_guard()->RequestInstallCode();
|
||||
}
|
||||
@ -140,7 +140,7 @@ void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
|
||||
for (;;) {
|
||||
OptimizedCompilationJob* job = nullptr;
|
||||
{
|
||||
base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
|
||||
base::MutexGuard access_output_queue_(&output_queue_mutex_);
|
||||
if (output_queue_.empty()) return;
|
||||
job = output_queue_.front();
|
||||
output_queue_.pop();
|
||||
@ -153,7 +153,7 @@ void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
|
||||
void OptimizingCompileDispatcher::Flush(BlockingBehavior blocking_behavior) {
|
||||
if (blocking_behavior == BlockingBehavior::kDontBlock) {
|
||||
if (FLAG_block_concurrent_recompilation) Unblock();
|
||||
base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
|
||||
base::MutexGuard access_input_queue_(&input_queue_mutex_);
|
||||
while (input_queue_length_ > 0) {
|
||||
OptimizedCompilationJob* job = input_queue_[InputQueueIndex(0)];
|
||||
DCHECK_NOT_NULL(job);
|
||||
@ -170,7 +170,7 @@ void OptimizingCompileDispatcher::Flush(BlockingBehavior blocking_behavior) {
|
||||
base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
|
||||
if (FLAG_block_concurrent_recompilation) Unblock();
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
|
||||
base::MutexGuard lock_guard(&ref_count_mutex_);
|
||||
while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
|
||||
base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
|
||||
}
|
||||
@ -184,7 +184,7 @@ void OptimizingCompileDispatcher::Stop() {
|
||||
base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
|
||||
if (FLAG_block_concurrent_recompilation) Unblock();
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
|
||||
base::MutexGuard lock_guard(&ref_count_mutex_);
|
||||
while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
|
||||
base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
|
||||
}
|
||||
@ -205,7 +205,7 @@ void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
|
||||
for (;;) {
|
||||
OptimizedCompilationJob* job = nullptr;
|
||||
{
|
||||
base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
|
||||
base::MutexGuard access_output_queue_(&output_queue_mutex_);
|
||||
if (output_queue_.empty()) return;
|
||||
job = output_queue_.front();
|
||||
output_queue_.pop();
|
||||
@ -230,7 +230,7 @@ void OptimizingCompileDispatcher::QueueForOptimization(
|
||||
DCHECK(IsQueueAvailable());
|
||||
{
|
||||
// Add job to the back of the input queue.
|
||||
base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
|
||||
base::MutexGuard access_input_queue(&input_queue_mutex_);
|
||||
DCHECK_LT(input_queue_length_, input_queue_capacity_);
|
||||
input_queue_[InputQueueIndex(input_queue_length_)] = job;
|
||||
input_queue_length_++;
|
||||
|
@ -45,7 +45,7 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
|
||||
void InstallOptimizedFunctions();
|
||||
|
||||
inline bool IsQueueAvailable() {
|
||||
base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
|
||||
base::MutexGuard access_input_queue(&input_queue_mutex_);
|
||||
return input_queue_length_ < input_queue_capacity_;
|
||||
}
|
||||
|
||||
|
@ -34,35 +34,35 @@ StatsCounterThreadSafe::StatsCounterThreadSafe(Counters* counters,
|
||||
|
||||
void StatsCounterThreadSafe::Set(int Value) {
|
||||
if (ptr_) {
|
||||
base::LockGuard<base::Mutex> Guard(&mutex_);
|
||||
base::MutexGuard Guard(&mutex_);
|
||||
SetLoc(ptr_, Value);
|
||||
}
|
||||
}
|
||||
|
||||
void StatsCounterThreadSafe::Increment() {
|
||||
if (ptr_) {
|
||||
base::LockGuard<base::Mutex> Guard(&mutex_);
|
||||
base::MutexGuard Guard(&mutex_);
|
||||
IncrementLoc(ptr_);
|
||||
}
|
||||
}
|
||||
|
||||
void StatsCounterThreadSafe::Increment(int value) {
|
||||
if (ptr_) {
|
||||
base::LockGuard<base::Mutex> Guard(&mutex_);
|
||||
base::MutexGuard Guard(&mutex_);
|
||||
IncrementLoc(ptr_, value);
|
||||
}
|
||||
}
|
||||
|
||||
void StatsCounterThreadSafe::Decrement() {
|
||||
if (ptr_) {
|
||||
base::LockGuard<base::Mutex> Guard(&mutex_);
|
||||
base::MutexGuard Guard(&mutex_);
|
||||
DecrementLoc(ptr_);
|
||||
}
|
||||
}
|
||||
|
||||
void StatsCounterThreadSafe::Decrement(int value) {
|
||||
if (ptr_) {
|
||||
base::LockGuard<base::Mutex> Guard(&mutex_);
|
||||
base::MutexGuard Guard(&mutex_);
|
||||
DecrementLoc(ptr_, value);
|
||||
}
|
||||
}
|
||||
@ -543,14 +543,14 @@ RuntimeCallStats* WorkerThreadRuntimeCallStats::NewTable() {
|
||||
base::make_unique<RuntimeCallStats>();
|
||||
RuntimeCallStats* result = new_table.get();
|
||||
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
tables_.push_back(std::move(new_table));
|
||||
return result;
|
||||
}
|
||||
|
||||
void WorkerThreadRuntimeCallStats::AddToMainTable(
|
||||
RuntimeCallStats* main_call_stats) {
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
for (auto& worker_stats : tables_) {
|
||||
DCHECK_NE(main_call_stats, worker_stats.get());
|
||||
main_call_stats->Add(worker_stats.get());
|
||||
|
@ -132,7 +132,7 @@ class DelayedTasksPlatform : public Platform {
|
||||
std::shared_ptr<TaskRunner> runner =
|
||||
platform_->GetForegroundTaskRunner(isolate);
|
||||
|
||||
base::LockGuard<base::Mutex> lock_guard(&mutex_);
|
||||
base::MutexGuard lock_guard(&mutex_);
|
||||
// Check if we can re-materialize the weak ptr in our map.
|
||||
std::weak_ptr<DelayedTaskRunner>& weak_delayed_runner =
|
||||
delayed_task_runners_[runner.get()];
|
||||
@ -230,7 +230,7 @@ class DelayedTasksPlatform : public Platform {
|
||||
public:
|
||||
void operator()(DelayedTaskRunner* runner) const {
|
||||
TaskRunner* original_runner = runner->task_runner_.get();
|
||||
base::LockGuard<base::Mutex> lock_guard(&runner->platform_->mutex_);
|
||||
base::MutexGuard lock_guard(&runner->platform_->mutex_);
|
||||
auto& delayed_task_runners = runner->platform_->delayed_task_runners_;
|
||||
DCHECK_EQ(1, delayed_task_runners.count(original_runner));
|
||||
delayed_task_runners.erase(original_runner);
|
||||
@ -276,7 +276,7 @@ class DelayedTasksPlatform : public Platform {
|
||||
delayed_task_runners_;
|
||||
|
||||
int32_t GetRandomDelayInMilliseconds() {
|
||||
base::LockGuard<base::Mutex> lock_guard(&mutex_);
|
||||
base::MutexGuard lock_guard(&mutex_);
|
||||
double delay_fraction = rng_.NextDouble();
|
||||
// Sleep up to 100ms (100000us). Square {delay_fraction} to shift
|
||||
// distribution towards shorter sleeps.
|
||||
|
26
src/d8.cc
26
src/d8.cc
@ -427,7 +427,7 @@ class BackgroundCompileThread : public base::Thread {
|
||||
|
||||
ScriptCompiler::CachedData* Shell::LookupCodeCache(Isolate* isolate,
|
||||
Local<Value> source) {
|
||||
base::LockGuard<base::Mutex> lock_guard(cached_code_mutex_.Pointer());
|
||||
base::MutexGuard lock_guard(cached_code_mutex_.Pointer());
|
||||
CHECK(source->IsString());
|
||||
v8::String::Utf8Value key(isolate, source);
|
||||
DCHECK(*key);
|
||||
@ -445,7 +445,7 @@ ScriptCompiler::CachedData* Shell::LookupCodeCache(Isolate* isolate,
|
||||
|
||||
void Shell::StoreInCodeCache(Isolate* isolate, Local<Value> source,
|
||||
const ScriptCompiler::CachedData* cache_data) {
|
||||
base::LockGuard<base::Mutex> lock_guard(cached_code_mutex_.Pointer());
|
||||
base::MutexGuard lock_guard(cached_code_mutex_.Pointer());
|
||||
CHECK(source->IsString());
|
||||
if (cache_data == nullptr) return;
|
||||
v8::String::Utf8Value key(isolate, source);
|
||||
@ -1398,7 +1398,7 @@ void Shell::WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args) {
|
||||
}
|
||||
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock_guard(workers_mutex_.Pointer());
|
||||
base::MutexGuard lock_guard(workers_mutex_.Pointer());
|
||||
if (workers_.size() >= kMaxWorkers) {
|
||||
Throw(args.GetIsolate(), "Too many workers, I won't let you create more");
|
||||
return;
|
||||
@ -1931,7 +1931,7 @@ void Shell::Initialize(Isolate* isolate) {
|
||||
|
||||
Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
|
||||
// This needs to be a critical section since this is not thread-safe
|
||||
base::LockGuard<base::Mutex> lock_guard(context_mutex_.Pointer());
|
||||
base::MutexGuard lock_guard(context_mutex_.Pointer());
|
||||
// Initialize the global objects
|
||||
Local<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
|
||||
EscapableHandleScope handle_scope(isolate);
|
||||
@ -2561,14 +2561,14 @@ ExternalizedContents::~ExternalizedContents() {
|
||||
}
|
||||
|
||||
void SerializationDataQueue::Enqueue(std::unique_ptr<SerializationData> data) {
|
||||
base::LockGuard<base::Mutex> lock_guard(&mutex_);
|
||||
base::MutexGuard lock_guard(&mutex_);
|
||||
data_.push_back(std::move(data));
|
||||
}
|
||||
|
||||
bool SerializationDataQueue::Dequeue(
|
||||
std::unique_ptr<SerializationData>* out_data) {
|
||||
out_data->reset();
|
||||
base::LockGuard<base::Mutex> lock_guard(&mutex_);
|
||||
base::MutexGuard lock_guard(&mutex_);
|
||||
if (data_.empty()) return false;
|
||||
*out_data = std::move(data_[0]);
|
||||
data_.erase(data_.begin());
|
||||
@ -2577,13 +2577,13 @@ bool SerializationDataQueue::Dequeue(
|
||||
|
||||
|
||||
bool SerializationDataQueue::IsEmpty() {
|
||||
base::LockGuard<base::Mutex> lock_guard(&mutex_);
|
||||
base::MutexGuard lock_guard(&mutex_);
|
||||
return data_.empty();
|
||||
}
|
||||
|
||||
|
||||
void SerializationDataQueue::Clear() {
|
||||
base::LockGuard<base::Mutex> lock_guard(&mutex_);
|
||||
base::MutexGuard lock_guard(&mutex_);
|
||||
data_.clear();
|
||||
}
|
||||
|
||||
@ -2982,7 +2982,7 @@ void Shell::CollectGarbage(Isolate* isolate) {
|
||||
}
|
||||
|
||||
void Shell::SetWaitUntilDone(Isolate* isolate, bool value) {
|
||||
base::LockGuard<base::Mutex> guard(isolate_status_lock_.Pointer());
|
||||
base::MutexGuard guard(isolate_status_lock_.Pointer());
|
||||
if (isolate_status_.count(isolate) == 0) {
|
||||
isolate_status_.insert(std::make_pair(isolate, value));
|
||||
} else {
|
||||
@ -3027,7 +3027,7 @@ bool ProcessMessages(
|
||||
|
||||
void Shell::CompleteMessageLoop(Isolate* isolate) {
|
||||
auto get_waiting_behaviour = [isolate]() {
|
||||
base::LockGuard<base::Mutex> guard(isolate_status_lock_.Pointer());
|
||||
base::MutexGuard guard(isolate_status_lock_.Pointer());
|
||||
DCHECK_GT(isolate_status_.count(isolate), 0);
|
||||
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
|
||||
i::wasm::WasmEngine* wasm_engine = i_isolate->wasm_engine();
|
||||
@ -3280,7 +3280,7 @@ std::unique_ptr<SerializationData> Shell::SerializeValue(
|
||||
data = serializer.Release();
|
||||
}
|
||||
// Append externalized contents even when WriteValue fails.
|
||||
base::LockGuard<base::Mutex> lock_guard(workers_mutex_.Pointer());
|
||||
base::MutexGuard lock_guard(workers_mutex_.Pointer());
|
||||
serializer.AppendExternalizedContentsTo(&externalized_contents_);
|
||||
return data;
|
||||
}
|
||||
@ -3300,7 +3300,7 @@ void Shell::CleanupWorkers() {
|
||||
// create a new Worker, it would deadlock.
|
||||
std::vector<Worker*> workers_copy;
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock_guard(workers_mutex_.Pointer());
|
||||
base::MutexGuard lock_guard(workers_mutex_.Pointer());
|
||||
allow_new_workers_ = false;
|
||||
workers_copy.swap(workers_);
|
||||
}
|
||||
@ -3311,7 +3311,7 @@ void Shell::CleanupWorkers() {
|
||||
}
|
||||
|
||||
// Now that all workers are terminated, we can re-enable Worker creation.
|
||||
base::LockGuard<base::Mutex> lock_guard(workers_mutex_.Pointer());
|
||||
base::MutexGuard lock_guard(workers_mutex_.Pointer());
|
||||
allow_new_workers_ = true;
|
||||
externalized_contents_.clear();
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ class Debug::TemporaryObjectsTracker : public HeapObjectAllocationTracker {
|
||||
|
||||
void MoveEvent(Address from, Address to, int) override {
|
||||
if (from == to) return;
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
auto it = objects_.find(from);
|
||||
if (it == objects_.end()) {
|
||||
// If temporary object was collected we can get MoveEvent which moves
|
||||
|
@ -33,7 +33,7 @@ void FutexWaitListNode::NotifyWake() {
|
||||
// interrupts, or if FutexEmulation::Wait was just called and the mutex
|
||||
// hasn't been locked yet. In either of those cases, we set the interrupted
|
||||
// flag to true, which will be tested after the mutex is re-locked.
|
||||
base::LockGuard<base::Mutex> lock_guard(FutexEmulation::mutex_.Pointer());
|
||||
base::MutexGuard lock_guard(FutexEmulation::mutex_.Pointer());
|
||||
if (waiting_) {
|
||||
cond_.NotifyOne();
|
||||
interrupted_ = true;
|
||||
@ -80,7 +80,7 @@ void AtomicsWaitWakeHandle::Wake() {
|
||||
// The split lock by itself isn’t an issue, as long as the caller properly
|
||||
// synchronizes this with the closing `AtomicsWaitCallback`.
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock_guard(FutexEmulation::mutex_.Pointer());
|
||||
base::MutexGuard lock_guard(FutexEmulation::mutex_.Pointer());
|
||||
stopped_ = true;
|
||||
}
|
||||
isolate_->futex_wait_list_node()->NotifyWake();
|
||||
@ -133,7 +133,7 @@ Object* FutexEmulation::Wait(Isolate* isolate,
|
||||
AtomicsWaitEvent callback_result = AtomicsWaitEvent::kWokenUp;
|
||||
|
||||
do { // Not really a loop, just makes it easier to break out early.
|
||||
base::LockGuard<base::Mutex> lock_guard(mutex_.Pointer());
|
||||
base::MutexGuard lock_guard(mutex_.Pointer());
|
||||
// Reset node->waiting_ = false when leaving this scope (but while
|
||||
// still holding the lock).
|
||||
ResetWaitingOnScopeExit reset_waiting(node);
|
||||
@ -243,7 +243,7 @@ Object* FutexEmulation::Wake(Handle<JSArrayBuffer> array_buffer, size_t addr,
|
||||
int waiters_woken = 0;
|
||||
void* backing_store = array_buffer->backing_store();
|
||||
|
||||
base::LockGuard<base::Mutex> lock_guard(mutex_.Pointer());
|
||||
base::MutexGuard lock_guard(mutex_.Pointer());
|
||||
FutexWaitListNode* node = wait_list_.Pointer()->head_;
|
||||
while (node && num_waiters_to_wake > 0) {
|
||||
if (backing_store == node->backing_store_ && addr == node->wait_addr_) {
|
||||
@ -266,7 +266,7 @@ Object* FutexEmulation::NumWaitersForTesting(Handle<JSArrayBuffer> array_buffer,
|
||||
DCHECK_LT(addr, array_buffer->byte_length());
|
||||
void* backing_store = array_buffer->backing_store();
|
||||
|
||||
base::LockGuard<base::Mutex> lock_guard(mutex_.Pointer());
|
||||
base::MutexGuard lock_guard(mutex_.Pointer());
|
||||
|
||||
int waiters = 0;
|
||||
FutexWaitListNode* node = wait_list_.Pointer()->head_;
|
||||
|
@ -2124,7 +2124,7 @@ static void AddCode(const char* name, Code* code, SharedFunctionInfo* shared,
|
||||
void EventHandler(const v8::JitCodeEvent* event) {
|
||||
if (!FLAG_gdbjit) return;
|
||||
if (event->code_type != v8::JitCodeEvent::JIT_CODE) return;
|
||||
base::LockGuard<base::Mutex> lock_guard(mutex.Pointer());
|
||||
base::MutexGuard lock_guard(mutex.Pointer());
|
||||
switch (event->type) {
|
||||
case v8::JitCodeEvent::CODE_ADDED: {
|
||||
Address addr = reinterpret_cast<Address>(event->code_start);
|
||||
|
@ -28,13 +28,13 @@ void ArrayBufferCollector::QueueOrFreeGarbageAllocations(
|
||||
if (heap_->ShouldReduceMemory()) {
|
||||
FreeAllocationsHelper(heap_, allocations);
|
||||
} else {
|
||||
base::LockGuard<base::Mutex> guard(&allocations_mutex_);
|
||||
base::MutexGuard guard(&allocations_mutex_);
|
||||
allocations_.push_back(std::move(allocations));
|
||||
}
|
||||
}
|
||||
|
||||
void ArrayBufferCollector::PerformFreeAllocations() {
|
||||
base::LockGuard<base::Mutex> guard(&allocations_mutex_);
|
||||
base::MutexGuard guard(&allocations_mutex_);
|
||||
for (const std::vector<JSArrayBuffer::Allocation>& allocations :
|
||||
allocations_) {
|
||||
FreeAllocationsHelper(heap_, allocations);
|
||||
|
@ -21,7 +21,7 @@ void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer* buffer) {
|
||||
const size_t length = buffer->byte_length();
|
||||
Page* page = Page::FromAddress(buffer->address());
|
||||
{
|
||||
base::LockGuard<base::Mutex> guard(page->mutex());
|
||||
base::MutexGuard guard(page->mutex());
|
||||
LocalArrayBufferTracker* tracker = page->local_tracker();
|
||||
if (tracker == nullptr) {
|
||||
page->AllocateLocalTracker();
|
||||
@ -44,7 +44,7 @@ void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer* buffer) {
|
||||
Page* page = Page::FromAddress(buffer->address());
|
||||
const size_t length = buffer->byte_length();
|
||||
{
|
||||
base::LockGuard<base::Mutex> guard(page->mutex());
|
||||
base::MutexGuard guard(page->mutex());
|
||||
LocalArrayBufferTracker* tracker = page->local_tracker();
|
||||
DCHECK_NOT_NULL(tracker);
|
||||
tracker->Remove(buffer, length);
|
||||
|
@ -37,7 +37,7 @@ void LocalArrayBufferTracker::Process(Callback callback) {
|
||||
DCHECK_NOT_NULL(new_buffer);
|
||||
Page* target_page = Page::FromAddress(new_buffer->address());
|
||||
{
|
||||
base::LockGuard<base::Mutex> guard(target_page->mutex());
|
||||
base::MutexGuard guard(target_page->mutex());
|
||||
LocalArrayBufferTracker* tracker = target_page->local_tracker();
|
||||
if (tracker == nullptr) {
|
||||
target_page->AllocateLocalTracker();
|
||||
@ -120,7 +120,7 @@ bool ArrayBufferTracker::ProcessBuffers(Page* page, ProcessingMode mode) {
|
||||
bool ArrayBufferTracker::IsTracked(JSArrayBuffer* buffer) {
|
||||
Page* page = Page::FromAddress(buffer->address());
|
||||
{
|
||||
base::LockGuard<base::Mutex> guard(page->mutex());
|
||||
base::MutexGuard guard(page->mutex());
|
||||
LocalArrayBufferTracker* tracker = page->local_tracker();
|
||||
if (tracker == nullptr) return false;
|
||||
return tracker->IsTracked(buffer);
|
||||
|
@ -34,17 +34,17 @@ class OneshotBarrier {
|
||||
OneshotBarrier() : tasks_(0), waiting_(0), done_(false) {}
|
||||
|
||||
void Start() {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
tasks_++;
|
||||
}
|
||||
|
||||
void NotifyAll() {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
if (waiting_ > 0) condition_.NotifyAll();
|
||||
}
|
||||
|
||||
bool Wait() {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
if (done_) return true;
|
||||
|
||||
DCHECK_LE(waiting_, tasks_);
|
||||
|
@ -660,7 +660,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
|
||||
}
|
||||
|
||||
{
|
||||
base::LockGuard<base::Mutex> guard(&pending_lock_);
|
||||
base::MutexGuard guard(&pending_lock_);
|
||||
is_pending_[task_id] = false;
|
||||
--pending_task_count_;
|
||||
pending_condition_.NotifyAll();
|
||||
@ -676,7 +676,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
|
||||
void ConcurrentMarking::ScheduleTasks() {
|
||||
DCHECK(!heap_->IsTearingDown());
|
||||
if (!FLAG_concurrent_marking) return;
|
||||
base::LockGuard<base::Mutex> guard(&pending_lock_);
|
||||
base::MutexGuard guard(&pending_lock_);
|
||||
DCHECK_EQ(0, pending_task_count_);
|
||||
if (task_count_ == 0) {
|
||||
static const int num_cores =
|
||||
@ -715,7 +715,7 @@ void ConcurrentMarking::ScheduleTasks() {
|
||||
void ConcurrentMarking::RescheduleTasksIfNeeded() {
|
||||
if (!FLAG_concurrent_marking || heap_->IsTearingDown()) return;
|
||||
{
|
||||
base::LockGuard<base::Mutex> guard(&pending_lock_);
|
||||
base::MutexGuard guard(&pending_lock_);
|
||||
if (pending_task_count_ > 0) return;
|
||||
}
|
||||
if (!shared_->IsGlobalPoolEmpty() ||
|
||||
@ -727,7 +727,7 @@ void ConcurrentMarking::RescheduleTasksIfNeeded() {
|
||||
|
||||
bool ConcurrentMarking::Stop(StopRequest stop_request) {
|
||||
if (!FLAG_concurrent_marking) return false;
|
||||
base::LockGuard<base::Mutex> guard(&pending_lock_);
|
||||
base::MutexGuard guard(&pending_lock_);
|
||||
|
||||
if (pending_task_count_ == 0) return false;
|
||||
|
||||
@ -758,7 +758,7 @@ bool ConcurrentMarking::Stop(StopRequest stop_request) {
|
||||
bool ConcurrentMarking::IsStopped() {
|
||||
if (!FLAG_concurrent_marking) return true;
|
||||
|
||||
base::LockGuard<base::Mutex> guard(&pending_lock_);
|
||||
base::MutexGuard guard(&pending_lock_);
|
||||
return pending_task_count_ == 0;
|
||||
}
|
||||
|
||||
|
@ -196,7 +196,7 @@ void GCTracer::ResetForTesting() {
|
||||
average_mark_compact_duration_ = 0;
|
||||
current_mark_compact_mutator_utilization_ = 1.0;
|
||||
previous_mark_compact_end_time_ = 0;
|
||||
base::LockGuard<base::Mutex> guard(&background_counter_mutex_);
|
||||
base::MutexGuard guard(&background_counter_mutex_);
|
||||
for (int i = 0; i < BackgroundScope::NUMBER_OF_SCOPES; i++) {
|
||||
background_counter_[i].total_duration_ms = 0;
|
||||
background_counter_[i].runtime_call_counter.Reset();
|
||||
@ -1060,7 +1060,7 @@ void GCTracer::FetchBackgroundCounters(int first_global_scope,
|
||||
int last_background_scope) {
|
||||
DCHECK_EQ(last_global_scope - first_global_scope,
|
||||
last_background_scope - first_background_scope);
|
||||
base::LockGuard<base::Mutex> guard(&background_counter_mutex_);
|
||||
base::MutexGuard guard(&background_counter_mutex_);
|
||||
int background_mc_scopes = last_background_scope - first_background_scope + 1;
|
||||
for (int i = 0; i < background_mc_scopes; i++) {
|
||||
current_.scopes[first_global_scope + i] +=
|
||||
@ -1085,7 +1085,7 @@ void GCTracer::FetchBackgroundCounters(int first_global_scope,
|
||||
void GCTracer::AddBackgroundScopeSample(
|
||||
BackgroundScope::ScopeId scope, double duration,
|
||||
RuntimeCallCounter* runtime_call_counter) {
|
||||
base::LockGuard<base::Mutex> guard(&background_counter_mutex_);
|
||||
base::MutexGuard guard(&background_counter_mutex_);
|
||||
BackgroundCounter& counter = background_counter_[scope];
|
||||
counter.total_duration_ms += duration;
|
||||
if (runtime_call_counter) {
|
||||
|
@ -1858,7 +1858,7 @@ void Heap::CheckNewSpaceExpansionCriteria() {
|
||||
|
||||
void Heap::EvacuateYoungGeneration() {
|
||||
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_FAST_PROMOTE);
|
||||
base::LockGuard<base::Mutex> guard(relocation_mutex());
|
||||
base::MutexGuard guard(relocation_mutex());
|
||||
ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
|
||||
if (!FLAG_concurrent_marking) {
|
||||
DCHECK(fast_promotion_mode_);
|
||||
@ -1901,7 +1901,7 @@ void Heap::EvacuateYoungGeneration() {
|
||||
|
||||
void Heap::Scavenge() {
|
||||
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
|
||||
base::LockGuard<base::Mutex> guard(relocation_mutex());
|
||||
base::MutexGuard guard(relocation_mutex());
|
||||
ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
|
||||
// There are soft limits in the allocation code, designed to trigger a mark
|
||||
// sweep collection by failing allocations. There is no sense in trying to
|
||||
@ -1954,7 +1954,7 @@ void Heap::ComputeFastPromotionMode() {
|
||||
|
||||
void Heap::UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk) {
|
||||
if (unprotected_memory_chunks_registry_enabled_) {
|
||||
base::LockGuard<base::Mutex> guard(&unprotected_memory_chunks_mutex_);
|
||||
base::MutexGuard guard(&unprotected_memory_chunks_mutex_);
|
||||
if (unprotected_memory_chunks_.insert(chunk).second) {
|
||||
chunk->SetReadAndWritable();
|
||||
}
|
||||
@ -3222,7 +3222,7 @@ void Heap::MemoryPressureNotification(MemoryPressureLevel level,
|
||||
void Heap::EagerlyFreeExternalMemory() {
|
||||
for (Page* page : *old_space()) {
|
||||
if (!page->SweepingDone()) {
|
||||
base::LockGuard<base::Mutex> guard(page->mutex());
|
||||
base::MutexGuard guard(page->mutex());
|
||||
if (!page->SweepingDone()) {
|
||||
ArrayBufferTracker::FreeDead(
|
||||
page, mark_compact_collector()->non_atomic_marking_state());
|
||||
|
@ -2776,7 +2776,7 @@ void LiveObjectVisitor::RecomputeLiveBytes(MemoryChunk* chunk,
|
||||
|
||||
void MarkCompactCollector::Evacuate() {
|
||||
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
|
||||
base::LockGuard<base::Mutex> guard(heap()->relocation_mutex());
|
||||
base::MutexGuard guard(heap()->relocation_mutex());
|
||||
|
||||
{
|
||||
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE);
|
||||
@ -2941,7 +2941,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
|
||||
void Process() override {
|
||||
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
|
||||
"RememberedSetUpdatingItem::Process");
|
||||
base::LockGuard<base::Mutex> guard(chunk_->mutex());
|
||||
base::MutexGuard guard(chunk_->mutex());
|
||||
CodePageMemoryModificationScope memory_modification_scope(chunk_);
|
||||
UpdateUntypedPointers();
|
||||
UpdateTypedPointers();
|
||||
@ -3303,7 +3303,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
|
||||
|
||||
void MarkCompactCollector::ReportAbortedEvacuationCandidate(
|
||||
HeapObject* failed_object, Page* page) {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
|
||||
aborted_evacuation_candidates_.push_back(std::make_pair(failed_object, page));
|
||||
}
|
||||
@ -4147,7 +4147,7 @@ class PageMarkingItem : public MarkingItem {
|
||||
void Process(YoungGenerationMarkingTask* task) override {
|
||||
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
|
||||
"PageMarkingItem::Process");
|
||||
base::LockGuard<base::Mutex> guard(chunk_->mutex());
|
||||
base::MutexGuard guard(chunk_->mutex());
|
||||
MarkUntypedPointers(task);
|
||||
MarkTypedPointers(task);
|
||||
}
|
||||
@ -4325,7 +4325,7 @@ void MinorMarkCompactCollector::ProcessMarkingWorklist() {
|
||||
|
||||
void MinorMarkCompactCollector::Evacuate() {
|
||||
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE);
|
||||
base::LockGuard<base::Mutex> guard(heap()->relocation_mutex());
|
||||
base::MutexGuard guard(heap()->relocation_mutex());
|
||||
|
||||
{
|
||||
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_PROLOGUE);
|
||||
|
@ -272,7 +272,7 @@ void ObjectStats::Dump(std::stringstream& stream) {
|
||||
}
|
||||
|
||||
void ObjectStats::CheckpointObjectStats() {
|
||||
base::LockGuard<base::Mutex> lock_guard(object_stats_mutex.Pointer());
|
||||
base::MutexGuard lock_guard(object_stats_mutex.Pointer());
|
||||
MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
|
||||
MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
|
||||
ClearObjectStats();
|
||||
|
@ -226,7 +226,7 @@ class SlotSet : public Malloced {
|
||||
}
|
||||
|
||||
int NumberOfPreFreedEmptyBuckets() {
|
||||
base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
|
||||
base::MutexGuard guard(&to_be_freed_buckets_mutex_);
|
||||
return static_cast<int>(to_be_freed_buckets_.size());
|
||||
}
|
||||
|
||||
@ -253,7 +253,7 @@ class SlotSet : public Malloced {
|
||||
}
|
||||
|
||||
void FreeToBeFreedBuckets() {
|
||||
base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
|
||||
base::MutexGuard guard(&to_be_freed_buckets_mutex_);
|
||||
while (!to_be_freed_buckets_.empty()) {
|
||||
Bucket top = to_be_freed_buckets_.top();
|
||||
to_be_freed_buckets_.pop();
|
||||
@ -294,7 +294,7 @@ class SlotSet : public Malloced {
|
||||
void PreFreeEmptyBucket(int bucket_index) {
|
||||
Bucket bucket = LoadBucket(&buckets_[bucket_index]);
|
||||
if (bucket != nullptr) {
|
||||
base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
|
||||
base::MutexGuard guard(&to_be_freed_buckets_mutex_);
|
||||
to_be_freed_buckets_.push(bucket);
|
||||
StoreBucket(&buckets_[bucket_index], nullptr);
|
||||
}
|
||||
@ -532,7 +532,7 @@ class TypedSlotSet {
|
||||
} else {
|
||||
set_top(n);
|
||||
}
|
||||
base::LockGuard<base::Mutex> guard(&to_be_freed_chunks_mutex_);
|
||||
base::MutexGuard guard(&to_be_freed_chunks_mutex_);
|
||||
to_be_freed_chunks_.push(chunk);
|
||||
} else {
|
||||
previous = chunk;
|
||||
@ -543,7 +543,7 @@ class TypedSlotSet {
|
||||
}
|
||||
|
||||
void FreeToBeFreedChunks() {
|
||||
base::LockGuard<base::Mutex> guard(&to_be_freed_chunks_mutex_);
|
||||
base::MutexGuard guard(&to_be_freed_chunks_mutex_);
|
||||
while (!to_be_freed_chunks_.empty()) {
|
||||
Chunk* top = to_be_freed_chunks_.top();
|
||||
to_be_freed_chunks_.pop();
|
||||
|
@ -509,7 +509,7 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
|
||||
|
||||
V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
|
||||
int size_in_bytes, AllocationAlignment alignment) {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
return AllocateRaw(size_in_bytes, alignment);
|
||||
}
|
||||
|
||||
|
@ -98,7 +98,7 @@ static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
|
||||
LAZY_INSTANCE_INITIALIZER;
|
||||
|
||||
Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
auto it = recently_freed_.find(code_range_size);
|
||||
if (it == recently_freed_.end() || it->second.empty()) {
|
||||
return reinterpret_cast<Address>(GetRandomMmapAddr());
|
||||
@ -110,7 +110,7 @@ Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
|
||||
|
||||
void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
|
||||
size_t code_range_size) {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
recently_freed_[code_range_size].push_back(code_range_start);
|
||||
}
|
||||
|
||||
@ -361,7 +361,7 @@ void MemoryAllocator::Unmapper::TearDown() {
|
||||
}
|
||||
|
||||
int MemoryAllocator::Unmapper::NumberOfChunks() {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
size_t result = 0;
|
||||
for (int i = 0; i < kNumberOfChunkQueues; i++) {
|
||||
result += chunks_[i].size();
|
||||
@ -370,7 +370,7 @@ int MemoryAllocator::Unmapper::NumberOfChunks() {
|
||||
}
|
||||
|
||||
size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
|
||||
size_t sum = 0;
|
||||
// kPooled chunks are already uncommited. We only have to account for
|
||||
@ -468,7 +468,7 @@ void MemoryChunk::SetReadAndExecutable() {
|
||||
DCHECK(owner()->identity() == CODE_SPACE || owner()->identity() == LO_SPACE);
|
||||
// Decrementing the write_unprotect_counter_ and changing the page
|
||||
// protection mode has to be atomic.
|
||||
base::LockGuard<base::Mutex> guard(page_protection_change_mutex_);
|
||||
base::MutexGuard guard(page_protection_change_mutex_);
|
||||
if (write_unprotect_counter_ == 0) {
|
||||
// This is a corner case that may happen when we have a
|
||||
// CodeSpaceMemoryModificationScope open and this page was newly
|
||||
@ -493,7 +493,7 @@ void MemoryChunk::SetReadAndWritable() {
|
||||
DCHECK(owner()->identity() == CODE_SPACE || owner()->identity() == LO_SPACE);
|
||||
// Incrementing the write_unprotect_counter_ and changing the page
|
||||
// protection mode has to be atomic.
|
||||
base::LockGuard<base::Mutex> guard(page_protection_change_mutex_);
|
||||
base::MutexGuard guard(page_protection_change_mutex_);
|
||||
write_unprotect_counter_++;
|
||||
DCHECK_LE(write_unprotect_counter_, kMaxWriteUnprotectCounter);
|
||||
if (write_unprotect_counter_ == 1) {
|
||||
@ -1425,12 +1425,12 @@ void PagedSpace::RefillFreeList() {
|
||||
if (is_local()) {
|
||||
DCHECK_NE(this, p->owner());
|
||||
PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner());
|
||||
base::LockGuard<base::Mutex> guard(owner->mutex());
|
||||
base::MutexGuard guard(owner->mutex());
|
||||
owner->RefineAllocatedBytesAfterSweeping(p);
|
||||
owner->RemovePage(p);
|
||||
added += AddPage(p);
|
||||
} else {
|
||||
base::LockGuard<base::Mutex> guard(mutex());
|
||||
base::MutexGuard guard(mutex());
|
||||
DCHECK_EQ(this, p->owner());
|
||||
RefineAllocatedBytesAfterSweeping(p);
|
||||
added += RelinkFreeListCategories(p);
|
||||
@ -1442,7 +1442,7 @@ void PagedSpace::RefillFreeList() {
|
||||
}
|
||||
|
||||
void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
|
||||
base::LockGuard<base::Mutex> guard(mutex());
|
||||
base::MutexGuard guard(mutex());
|
||||
|
||||
DCHECK(identity() == other->identity());
|
||||
// Unmerged fields:
|
||||
@ -1505,7 +1505,7 @@ void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
|
||||
}
|
||||
|
||||
Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
|
||||
base::LockGuard<base::Mutex> guard(mutex());
|
||||
base::MutexGuard guard(mutex());
|
||||
// Check for pages that still contain free list entries. Bail out for smaller
|
||||
// categories.
|
||||
const int minimum_category =
|
||||
@ -1581,7 +1581,7 @@ void PagedSpace::ShrinkImmortalImmovablePages() {
|
||||
bool PagedSpace::Expand() {
|
||||
// Always lock against the main space as we can only adjust capacity and
|
||||
// pages concurrently for the main paged space.
|
||||
base::LockGuard<base::Mutex> guard(heap()->paged_space(identity())->mutex());
|
||||
base::MutexGuard guard(heap()->paged_space(identity())->mutex());
|
||||
|
||||
const int size = AreaSize();
|
||||
|
||||
@ -2209,7 +2209,7 @@ bool NewSpace::AddFreshPage() {
|
||||
|
||||
|
||||
bool NewSpace::AddFreshPageSynchronized() {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
return AddFreshPage();
|
||||
}
|
||||
|
||||
@ -3373,7 +3373,7 @@ Object* LargeObjectSpace::FindObject(Address a) {
|
||||
}
|
||||
|
||||
LargePage* LargeObjectSpace::FindPageThreadSafe(Address a) {
|
||||
base::LockGuard<base::Mutex> guard(&chunk_map_mutex_);
|
||||
base::MutexGuard guard(&chunk_map_mutex_);
|
||||
return FindPage(a);
|
||||
}
|
||||
|
||||
@ -3409,7 +3409,7 @@ void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
|
||||
void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
|
||||
// There may be concurrent access on the chunk map. We have to take the lock
|
||||
// here.
|
||||
base::LockGuard<base::Mutex> guard(&chunk_map_mutex_);
|
||||
base::MutexGuard guard(&chunk_map_mutex_);
|
||||
for (Address current = reinterpret_cast<Address>(page);
|
||||
current < reinterpret_cast<Address>(page) + page->size();
|
||||
current += MemoryChunk::kPageSize) {
|
||||
|
@ -1223,13 +1223,13 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
|
||||
|
||||
template <ChunkQueueType type>
|
||||
void AddMemoryChunkSafe(MemoryChunk* chunk) {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
chunks_[type].push_back(chunk);
|
||||
}
|
||||
|
||||
template <ChunkQueueType type>
|
||||
MemoryChunk* GetMemoryChunkSafe() {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
if (chunks_[type].empty()) return nullptr;
|
||||
MemoryChunk* chunk = chunks_[type].back();
|
||||
chunks_[type].pop_back();
|
||||
|
@ -133,7 +133,7 @@ int StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
|
||||
}
|
||||
|
||||
void StoreBuffer::FlipStoreBuffers() {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
int other = (current_ + 1) % kStoreBuffers;
|
||||
MoveEntriesToRememberedSet(other);
|
||||
lazy_top_[current_] = top_;
|
||||
@ -155,7 +155,7 @@ void StoreBuffer::MoveEntriesToRememberedSet(int index) {
|
||||
|
||||
// We are taking the chunk map mutex here because the page lookup of addr
|
||||
// below may require us to check if addr is part of a large page.
|
||||
base::LockGuard<base::Mutex> guard(heap_->lo_space()->chunk_map_mutex());
|
||||
base::MutexGuard guard(heap_->lo_space()->chunk_map_mutex());
|
||||
for (Address* current = start_[index]; current < lazy_top_[index];
|
||||
current++) {
|
||||
Address addr = *current;
|
||||
@ -184,7 +184,7 @@ void StoreBuffer::MoveEntriesToRememberedSet(int index) {
|
||||
}
|
||||
|
||||
void StoreBuffer::MoveAllEntriesToRememberedSet() {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
int other = (current_ + 1) % kStoreBuffers;
|
||||
MoveEntriesToRememberedSet(other);
|
||||
lazy_top_[current_] = top_;
|
||||
@ -193,7 +193,7 @@ void StoreBuffer::MoveAllEntriesToRememberedSet() {
|
||||
}
|
||||
|
||||
void StoreBuffer::ConcurrentlyProcessStoreBuffer() {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
int other = (current_ + 1) % kStoreBuffers;
|
||||
MoveEntriesToRememberedSet(other);
|
||||
task_running_ = false;
|
||||
|
@ -181,7 +181,7 @@ void Sweeper::SweepOrWaitUntilSweepingCompleted(Page* page) {
|
||||
}
|
||||
|
||||
Page* Sweeper::GetSweptPageSafe(PagedSpace* space) {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
SweptList& list = swept_list_[GetSweepSpaceIndex(space->identity())];
|
||||
if (!list.empty()) {
|
||||
auto last_page = list.back();
|
||||
@ -410,7 +410,7 @@ int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
|
||||
DCHECK(IsValidSweepingSpace(identity));
|
||||
int max_freed = 0;
|
||||
{
|
||||
base::LockGuard<base::Mutex> guard(page->mutex());
|
||||
base::MutexGuard guard(page->mutex());
|
||||
// If this page was already swept in the meantime, we can return here.
|
||||
if (page->SweepingDone()) return 0;
|
||||
|
||||
@ -437,7 +437,7 @@ int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
|
||||
}
|
||||
|
||||
{
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
swept_list_[GetSweepSpaceIndex(identity)].push_back(page);
|
||||
}
|
||||
return max_freed;
|
||||
@ -456,7 +456,7 @@ void Sweeper::ScheduleIncrementalSweepingTask() {
|
||||
|
||||
void Sweeper::AddPage(AllocationSpace space, Page* page,
|
||||
Sweeper::AddPageMode mode) {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
DCHECK(IsValidSweepingSpace(space));
|
||||
DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning());
|
||||
if (mode == Sweeper::REGULAR) {
|
||||
@ -482,7 +482,7 @@ void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
|
||||
}
|
||||
|
||||
Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
DCHECK(IsValidSweepingSpace(space));
|
||||
int space_index = GetSweepSpaceIndex(space);
|
||||
Page* page = nullptr;
|
||||
|
@ -282,13 +282,13 @@ class Worklist {
|
||||
}
|
||||
|
||||
V8_INLINE void Push(Segment* segment) {
|
||||
base::LockGuard<base::Mutex> guard(&lock_);
|
||||
base::MutexGuard guard(&lock_);
|
||||
segment->set_next(top_);
|
||||
set_top(segment);
|
||||
}
|
||||
|
||||
V8_INLINE bool Pop(Segment** segment) {
|
||||
base::LockGuard<base::Mutex> guard(&lock_);
|
||||
base::MutexGuard guard(&lock_);
|
||||
if (top_ != nullptr) {
|
||||
*segment = top_;
|
||||
set_top(top_->next());
|
||||
@ -302,7 +302,7 @@ class Worklist {
|
||||
}
|
||||
|
||||
void Clear() {
|
||||
base::LockGuard<base::Mutex> guard(&lock_);
|
||||
base::MutexGuard guard(&lock_);
|
||||
Segment* current = top_;
|
||||
while (current != nullptr) {
|
||||
Segment* tmp = current;
|
||||
@ -315,7 +315,7 @@ class Worklist {
|
||||
// See Worklist::Update.
|
||||
template <typename Callback>
|
||||
void Update(Callback callback) {
|
||||
base::LockGuard<base::Mutex> guard(&lock_);
|
||||
base::MutexGuard guard(&lock_);
|
||||
Segment* prev = nullptr;
|
||||
Segment* current = top_;
|
||||
while (current != nullptr) {
|
||||
@ -339,7 +339,7 @@ class Worklist {
|
||||
// See Worklist::Iterate.
|
||||
template <typename Callback>
|
||||
void Iterate(Callback callback) {
|
||||
base::LockGuard<base::Mutex> guard(&lock_);
|
||||
base::MutexGuard guard(&lock_);
|
||||
for (Segment* current = top_; current != nullptr;
|
||||
current = current->next()) {
|
||||
current->Iterate(callback);
|
||||
@ -349,7 +349,7 @@ class Worklist {
|
||||
std::pair<Segment*, Segment*> Extract() {
|
||||
Segment* top = nullptr;
|
||||
{
|
||||
base::LockGuard<base::Mutex> guard(&lock_);
|
||||
base::MutexGuard guard(&lock_);
|
||||
if (top_ == nullptr) return std::make_pair(nullptr, nullptr);
|
||||
top = top_;
|
||||
set_top(nullptr);
|
||||
@ -362,7 +362,7 @@ class Worklist {
|
||||
void MergeList(Segment* start, Segment* end) {
|
||||
if (start == nullptr) return;
|
||||
{
|
||||
base::LockGuard<base::Mutex> guard(&lock_);
|
||||
base::MutexGuard guard(&lock_);
|
||||
end->set_next(top_);
|
||||
set_top(start);
|
||||
}
|
||||
|
@ -404,7 +404,7 @@ struct V8InspectorImpl::EvaluateScope::CancelToken {
|
||||
|
||||
V8InspectorImpl::EvaluateScope::~EvaluateScope() {
|
||||
if (m_cancelToken) {
|
||||
v8::base::LockGuard<v8::base::Mutex> lock(&m_cancelToken->m_mutex);
|
||||
v8::base::MutexGuard lock(&m_cancelToken->m_mutex);
|
||||
m_cancelToken->m_canceled = true;
|
||||
m_isolate->CancelTerminateExecution();
|
||||
}
|
||||
@ -418,7 +418,7 @@ class V8InspectorImpl::EvaluateScope::TerminateTask : public v8::Task {
|
||||
void Run() override {
|
||||
// CancelToken contains m_canceled bool which may be changed from main
|
||||
// thread, so lock mutex first.
|
||||
v8::base::LockGuard<v8::base::Mutex> lock(&m_token->m_mutex);
|
||||
v8::base::MutexGuard lock(&m_token->m_mutex);
|
||||
if (m_token->m_canceled) return;
|
||||
m_isolate->TerminateExecution();
|
||||
}
|
||||
|
@ -183,7 +183,7 @@ Isolate::PerIsolateThreadData*
|
||||
ThreadId thread_id = ThreadId::Current();
|
||||
PerIsolateThreadData* per_thread = nullptr;
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock_guard(&thread_data_table_mutex_);
|
||||
base::MutexGuard lock_guard(&thread_data_table_mutex_);
|
||||
per_thread = thread_data_table_.Lookup(thread_id);
|
||||
if (per_thread == nullptr) {
|
||||
per_thread = new PerIsolateThreadData(this, thread_id);
|
||||
@ -200,7 +200,7 @@ void Isolate::DiscardPerThreadDataForThisThread() {
|
||||
if (thread_id_int) {
|
||||
ThreadId thread_id = ThreadId(thread_id_int);
|
||||
DCHECK(!thread_manager_->mutex_owner_.Equals(thread_id));
|
||||
base::LockGuard<base::Mutex> lock_guard(&thread_data_table_mutex_);
|
||||
base::MutexGuard lock_guard(&thread_data_table_mutex_);
|
||||
PerIsolateThreadData* per_thread = thread_data_table_.Lookup(thread_id);
|
||||
if (per_thread) {
|
||||
DCHECK(!per_thread->thread_state_);
|
||||
@ -220,7 +220,7 @@ Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThread(
|
||||
ThreadId thread_id) {
|
||||
PerIsolateThreadData* per_thread = nullptr;
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock_guard(&thread_data_table_mutex_);
|
||||
base::MutexGuard lock_guard(&thread_data_table_mutex_);
|
||||
per_thread = thread_data_table_.Lookup(thread_id);
|
||||
}
|
||||
return per_thread;
|
||||
@ -2417,7 +2417,7 @@ char* Isolate::RestoreThread(char* from) {
|
||||
}
|
||||
|
||||
void Isolate::ReleaseSharedPtrs() {
|
||||
base::LockGuard<base::Mutex> lock(&managed_ptr_destructors_mutex_);
|
||||
base::MutexGuard lock(&managed_ptr_destructors_mutex_);
|
||||
while (managed_ptr_destructors_head_) {
|
||||
ManagedPtrDestructor* l = managed_ptr_destructors_head_;
|
||||
ManagedPtrDestructor* n = nullptr;
|
||||
@ -2431,7 +2431,7 @@ void Isolate::ReleaseSharedPtrs() {
|
||||
}
|
||||
|
||||
void Isolate::RegisterManagedPtrDestructor(ManagedPtrDestructor* destructor) {
|
||||
base::LockGuard<base::Mutex> lock(&managed_ptr_destructors_mutex_);
|
||||
base::MutexGuard lock(&managed_ptr_destructors_mutex_);
|
||||
DCHECK_NULL(destructor->prev_);
|
||||
DCHECK_NULL(destructor->next_);
|
||||
if (managed_ptr_destructors_head_) {
|
||||
@ -2442,7 +2442,7 @@ void Isolate::RegisterManagedPtrDestructor(ManagedPtrDestructor* destructor) {
|
||||
}
|
||||
|
||||
void Isolate::UnregisterManagedPtrDestructor(ManagedPtrDestructor* destructor) {
|
||||
base::LockGuard<base::Mutex> lock(&managed_ptr_destructors_mutex_);
|
||||
base::MutexGuard lock(&managed_ptr_destructors_mutex_);
|
||||
if (destructor->prev_) {
|
||||
destructor->prev_->next_ = destructor->next_;
|
||||
} else {
|
||||
@ -2731,7 +2731,7 @@ void Isolate::TearDown() {
|
||||
Deinit();
|
||||
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock_guard(&thread_data_table_mutex_);
|
||||
base::MutexGuard lock_guard(&thread_data_table_mutex_);
|
||||
thread_data_table_.RemoveAllThreads();
|
||||
}
|
||||
|
||||
@ -4289,14 +4289,14 @@ void Isolate::CheckDetachedContextsAfterGC() {
|
||||
}
|
||||
|
||||
double Isolate::LoadStartTimeMs() {
|
||||
base::LockGuard<base::Mutex> guard(&rail_mutex_);
|
||||
base::MutexGuard guard(&rail_mutex_);
|
||||
return load_start_time_ms_;
|
||||
}
|
||||
|
||||
void Isolate::SetRAILMode(RAILMode rail_mode) {
|
||||
RAILMode old_rail_mode = rail_mode_.Value();
|
||||
if (old_rail_mode != PERFORMANCE_LOAD && rail_mode == PERFORMANCE_LOAD) {
|
||||
base::LockGuard<base::Mutex> guard(&rail_mutex_);
|
||||
base::MutexGuard guard(&rail_mutex_);
|
||||
load_start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
|
||||
}
|
||||
rail_mode_.SetValue(rail_mode);
|
||||
|
@ -15,7 +15,7 @@ DefaultForegroundTaskRunner::DefaultForegroundTaskRunner(
|
||||
: idle_task_support_(idle_task_support), time_function_(time_function) {}
|
||||
|
||||
void DefaultForegroundTaskRunner::Terminate() {
|
||||
base::LockGuard<base::Mutex> guard(&lock_);
|
||||
base::MutexGuard guard(&lock_);
|
||||
terminated_ = true;
|
||||
|
||||
// Drain the task queues.
|
||||
@ -24,15 +24,15 @@ void DefaultForegroundTaskRunner::Terminate() {
|
||||
while (!idle_task_queue_.empty()) idle_task_queue_.pop();
|
||||
}
|
||||
|
||||
void DefaultForegroundTaskRunner::PostTaskLocked(
|
||||
std::unique_ptr<Task> task, const base::LockGuard<base::Mutex>&) {
|
||||
void DefaultForegroundTaskRunner::PostTaskLocked(std::unique_ptr<Task> task,
|
||||
const base::MutexGuard&) {
|
||||
if (terminated_) return;
|
||||
task_queue_.push(std::move(task));
|
||||
event_loop_control_.NotifyOne();
|
||||
}
|
||||
|
||||
void DefaultForegroundTaskRunner::PostTask(std::unique_ptr<Task> task) {
|
||||
base::LockGuard<base::Mutex> guard(&lock_);
|
||||
base::MutexGuard guard(&lock_);
|
||||
PostTaskLocked(std::move(task), guard);
|
||||
}
|
||||
|
||||
@ -43,7 +43,7 @@ double DefaultForegroundTaskRunner::MonotonicallyIncreasingTime() {
|
||||
void DefaultForegroundTaskRunner::PostDelayedTask(std::unique_ptr<Task> task,
|
||||
double delay_in_seconds) {
|
||||
DCHECK_GE(delay_in_seconds, 0.0);
|
||||
base::LockGuard<base::Mutex> guard(&lock_);
|
||||
base::MutexGuard guard(&lock_);
|
||||
if (terminated_) return;
|
||||
double deadline = MonotonicallyIncreasingTime() + delay_in_seconds;
|
||||
delayed_task_queue_.push(std::make_pair(deadline, std::move(task)));
|
||||
@ -51,7 +51,7 @@ void DefaultForegroundTaskRunner::PostDelayedTask(std::unique_ptr<Task> task,
|
||||
|
||||
void DefaultForegroundTaskRunner::PostIdleTask(std::unique_ptr<IdleTask> task) {
|
||||
CHECK_EQ(IdleTaskSupport::kEnabled, idle_task_support_);
|
||||
base::LockGuard<base::Mutex> guard(&lock_);
|
||||
base::MutexGuard guard(&lock_);
|
||||
if (terminated_) return;
|
||||
idle_task_queue_.push(std::move(task));
|
||||
}
|
||||
@ -62,7 +62,7 @@ bool DefaultForegroundTaskRunner::IdleTasksEnabled() {
|
||||
|
||||
std::unique_ptr<Task> DefaultForegroundTaskRunner::PopTaskFromQueue(
|
||||
MessageLoopBehavior wait_for_work) {
|
||||
base::LockGuard<base::Mutex> guard(&lock_);
|
||||
base::MutexGuard guard(&lock_);
|
||||
// Move delayed tasks that hit their deadline to the main queue.
|
||||
std::unique_ptr<Task> task = PopTaskFromDelayedQueueLocked(guard);
|
||||
while (task) {
|
||||
@ -83,7 +83,7 @@ std::unique_ptr<Task> DefaultForegroundTaskRunner::PopTaskFromQueue(
|
||||
|
||||
std::unique_ptr<Task>
|
||||
DefaultForegroundTaskRunner::PopTaskFromDelayedQueueLocked(
|
||||
const base::LockGuard<base::Mutex>&) {
|
||||
const base::MutexGuard&) {
|
||||
if (delayed_task_queue_.empty()) return {};
|
||||
|
||||
double now = MonotonicallyIncreasingTime();
|
||||
@ -102,7 +102,7 @@ DefaultForegroundTaskRunner::PopTaskFromDelayedQueueLocked(
|
||||
}
|
||||
|
||||
std::unique_ptr<IdleTask> DefaultForegroundTaskRunner::PopTaskFromIdleQueue() {
|
||||
base::LockGuard<base::Mutex> guard(&lock_);
|
||||
base::MutexGuard guard(&lock_);
|
||||
if (idle_task_queue_.empty()) return {};
|
||||
|
||||
std::unique_ptr<IdleTask> task = std::move(idle_task_queue_.front());
|
||||
@ -111,8 +111,7 @@ std::unique_ptr<IdleTask> DefaultForegroundTaskRunner::PopTaskFromIdleQueue() {
|
||||
return task;
|
||||
}
|
||||
|
||||
void DefaultForegroundTaskRunner::WaitForTaskLocked(
|
||||
const base::LockGuard<base::Mutex>&) {
|
||||
void DefaultForegroundTaskRunner::WaitForTaskLocked(const base::MutexGuard&) {
|
||||
event_loop_control_.Wait(&lock_);
|
||||
}
|
||||
|
||||
|
@ -29,7 +29,7 @@ class V8_PLATFORM_EXPORT DefaultForegroundTaskRunner
|
||||
|
||||
std::unique_ptr<IdleTask> PopTaskFromIdleQueue();
|
||||
|
||||
void WaitForTaskLocked(const base::LockGuard<base::Mutex>&);
|
||||
void WaitForTaskLocked(const base::MutexGuard&);
|
||||
|
||||
double MonotonicallyIncreasingTime();
|
||||
|
||||
@ -46,13 +46,11 @@ class V8_PLATFORM_EXPORT DefaultForegroundTaskRunner
|
||||
private:
|
||||
// The same as PostTask, but the lock is already held by the caller. The
|
||||
// {guard} parameter should make sure that the caller is holding the lock.
|
||||
void PostTaskLocked(std::unique_ptr<Task> task,
|
||||
const base::LockGuard<base::Mutex>&);
|
||||
void PostTaskLocked(std::unique_ptr<Task> task, const base::MutexGuard&);
|
||||
|
||||
// A caller of this function has to hold {lock_}. The {guard} parameter should
|
||||
// make sure that the caller is holding the lock.
|
||||
std::unique_ptr<Task> PopTaskFromDelayedQueueLocked(
|
||||
const base::LockGuard<base::Mutex>&);
|
||||
std::unique_ptr<Task> PopTaskFromDelayedQueueLocked(const base::MutexGuard&);
|
||||
|
||||
bool terminated_ = false;
|
||||
base::Mutex lock_;
|
||||
|
@ -92,7 +92,7 @@ DefaultPlatform::DefaultPlatform(
|
||||
}
|
||||
|
||||
DefaultPlatform::~DefaultPlatform() {
|
||||
base::LockGuard<base::Mutex> guard(&lock_);
|
||||
base::MutexGuard guard(&lock_);
|
||||
if (worker_threads_task_runner_) worker_threads_task_runner_->Terminate();
|
||||
for (auto it : foreground_task_runner_map_) {
|
||||
it.second->Terminate();
|
||||
@ -100,7 +100,7 @@ DefaultPlatform::~DefaultPlatform() {
|
||||
}
|
||||
|
||||
void DefaultPlatform::SetThreadPoolSize(int thread_pool_size) {
|
||||
base::LockGuard<base::Mutex> guard(&lock_);
|
||||
base::MutexGuard guard(&lock_);
|
||||
DCHECK_GE(thread_pool_size, 0);
|
||||
if (thread_pool_size < 1) {
|
||||
thread_pool_size = base::SysInfo::NumberOfProcessors() - 1;
|
||||
@ -110,7 +110,7 @@ void DefaultPlatform::SetThreadPoolSize(int thread_pool_size) {
|
||||
}
|
||||
|
||||
void DefaultPlatform::EnsureBackgroundTaskRunnerInitialized() {
|
||||
base::LockGuard<base::Mutex> guard(&lock_);
|
||||
base::MutexGuard guard(&lock_);
|
||||
if (!worker_threads_task_runner_) {
|
||||
worker_threads_task_runner_ =
|
||||
std::make_shared<DefaultWorkerThreadsTaskRunner>(thread_pool_size_);
|
||||
@ -128,7 +128,7 @@ double DefaultTimeFunction() {
|
||||
|
||||
void DefaultPlatform::SetTimeFunctionForTesting(
|
||||
DefaultPlatform::TimeFunction time_function) {
|
||||
base::LockGuard<base::Mutex> guard(&lock_);
|
||||
base::MutexGuard guard(&lock_);
|
||||
time_function_for_testing_ = time_function;
|
||||
// The time function has to be right after the construction of the platform.
|
||||
DCHECK(foreground_task_runner_map_.empty());
|
||||
@ -139,7 +139,7 @@ bool DefaultPlatform::PumpMessageLoop(v8::Isolate* isolate,
|
||||
bool failed_result = wait_for_work == MessageLoopBehavior::kWaitForWork;
|
||||
std::shared_ptr<DefaultForegroundTaskRunner> task_runner;
|
||||
{
|
||||
base::LockGuard<base::Mutex> guard(&lock_);
|
||||
base::MutexGuard guard(&lock_);
|
||||
auto it = foreground_task_runner_map_.find(isolate);
|
||||
if (it == foreground_task_runner_map_.end()) return failed_result;
|
||||
task_runner = it->second;
|
||||
@ -157,7 +157,7 @@ void DefaultPlatform::RunIdleTasks(v8::Isolate* isolate,
|
||||
DCHECK_EQ(IdleTaskSupport::kEnabled, idle_task_support_);
|
||||
std::shared_ptr<DefaultForegroundTaskRunner> task_runner;
|
||||
{
|
||||
base::LockGuard<base::Mutex> guard(&lock_);
|
||||
base::MutexGuard guard(&lock_);
|
||||
if (foreground_task_runner_map_.find(isolate) ==
|
||||
foreground_task_runner_map_.end()) {
|
||||
return;
|
||||
@ -176,7 +176,7 @@ void DefaultPlatform::RunIdleTasks(v8::Isolate* isolate,
|
||||
|
||||
std::shared_ptr<TaskRunner> DefaultPlatform::GetForegroundTaskRunner(
|
||||
v8::Isolate* isolate) {
|
||||
base::LockGuard<base::Mutex> guard(&lock_);
|
||||
base::MutexGuard guard(&lock_);
|
||||
if (foreground_task_runner_map_.find(isolate) ==
|
||||
foreground_task_runner_map_.end()) {
|
||||
foreground_task_runner_map_.insert(std::make_pair(
|
||||
|
@ -24,7 +24,7 @@ DefaultWorkerThreadsTaskRunner::~DefaultWorkerThreadsTaskRunner() {
|
||||
}
|
||||
|
||||
void DefaultWorkerThreadsTaskRunner::Terminate() {
|
||||
base::LockGuard<base::Mutex> guard(&lock_);
|
||||
base::MutexGuard guard(&lock_);
|
||||
terminated_ = true;
|
||||
queue_.Terminate();
|
||||
// Clearing the thread pool lets all worker threads join.
|
||||
@ -32,14 +32,14 @@ void DefaultWorkerThreadsTaskRunner::Terminate() {
|
||||
}
|
||||
|
||||
void DefaultWorkerThreadsTaskRunner::PostTask(std::unique_ptr<Task> task) {
|
||||
base::LockGuard<base::Mutex> guard(&lock_);
|
||||
base::MutexGuard guard(&lock_);
|
||||
if (terminated_) return;
|
||||
queue_.Append(std::move(task));
|
||||
}
|
||||
|
||||
void DefaultWorkerThreadsTaskRunner::PostDelayedTask(std::unique_ptr<Task> task,
|
||||
double delay_in_seconds) {
|
||||
base::LockGuard<base::Mutex> guard(&lock_);
|
||||
base::MutexGuard guard(&lock_);
|
||||
if (terminated_) return;
|
||||
if (delay_in_seconds == 0) {
|
||||
queue_.Append(std::move(task));
|
||||
|
@ -16,13 +16,13 @@ TaskQueue::TaskQueue() : process_queue_semaphore_(0), terminated_(false) {}
|
||||
|
||||
|
||||
TaskQueue::~TaskQueue() {
|
||||
base::LockGuard<base::Mutex> guard(&lock_);
|
||||
base::MutexGuard guard(&lock_);
|
||||
DCHECK(terminated_);
|
||||
DCHECK(task_queue_.empty());
|
||||
}
|
||||
|
||||
void TaskQueue::Append(std::unique_ptr<Task> task) {
|
||||
base::LockGuard<base::Mutex> guard(&lock_);
|
||||
base::MutexGuard guard(&lock_);
|
||||
DCHECK(!terminated_);
|
||||
task_queue_.push(std::move(task));
|
||||
process_queue_semaphore_.Signal();
|
||||
@ -31,7 +31,7 @@ void TaskQueue::Append(std::unique_ptr<Task> task) {
|
||||
std::unique_ptr<Task> TaskQueue::GetNext() {
|
||||
for (;;) {
|
||||
{
|
||||
base::LockGuard<base::Mutex> guard(&lock_);
|
||||
base::MutexGuard guard(&lock_);
|
||||
if (!task_queue_.empty()) {
|
||||
std::unique_ptr<Task> result = std::move(task_queue_.front());
|
||||
task_queue_.pop();
|
||||
@ -48,7 +48,7 @@ std::unique_ptr<Task> TaskQueue::GetNext() {
|
||||
|
||||
|
||||
void TaskQueue::Terminate() {
|
||||
base::LockGuard<base::Mutex> guard(&lock_);
|
||||
base::MutexGuard guard(&lock_);
|
||||
DCHECK(!terminated_);
|
||||
terminated_ = true;
|
||||
process_queue_semaphore_.Signal();
|
||||
@ -57,7 +57,7 @@ void TaskQueue::Terminate() {
|
||||
void TaskQueue::BlockUntilQueueEmptyForTesting() {
|
||||
for (;;) {
|
||||
{
|
||||
base::LockGuard<base::Mutex> guard(&lock_);
|
||||
base::MutexGuard guard(&lock_);
|
||||
if (task_queue_.empty()) return;
|
||||
}
|
||||
base::OS::Sleep(base::TimeDelta::FromMilliseconds(5));
|
||||
|
@ -16,7 +16,7 @@ TraceBufferRingBuffer::TraceBufferRingBuffer(size_t max_chunks,
|
||||
}
|
||||
|
||||
TraceObject* TraceBufferRingBuffer::AddTraceEvent(uint64_t* handle) {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
if (is_empty_ || chunks_[chunk_index_]->IsFull()) {
|
||||
chunk_index_ = is_empty_ ? 0 : NextChunkIndex(chunk_index_);
|
||||
is_empty_ = false;
|
||||
@ -35,7 +35,7 @@ TraceObject* TraceBufferRingBuffer::AddTraceEvent(uint64_t* handle) {
|
||||
}
|
||||
|
||||
TraceObject* TraceBufferRingBuffer::GetEventByHandle(uint64_t handle) {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
size_t chunk_index, event_index;
|
||||
uint32_t chunk_seq;
|
||||
ExtractHandle(handle, &chunk_index, &chunk_seq, &event_index);
|
||||
@ -46,7 +46,7 @@ TraceObject* TraceBufferRingBuffer::GetEventByHandle(uint64_t handle) {
|
||||
}
|
||||
|
||||
bool TraceBufferRingBuffer::Flush() {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
// This flushes all the traces stored in the buffer.
|
||||
if (!is_empty_) {
|
||||
for (size_t i = NextChunkIndex(chunk_index_);; i = NextChunkIndex(i)) {
|
||||
|
@ -47,7 +47,7 @@ TracingController::~TracingController() {
|
||||
|
||||
{
|
||||
// Free memory for category group names allocated via strdup.
|
||||
base::LockGuard<base::Mutex> lock(mutex_.get());
|
||||
base::MutexGuard lock(mutex_.get());
|
||||
for (size_t i = g_category_index - 1; i >= g_num_builtin_categories; --i) {
|
||||
const char* group = g_category_groups[i];
|
||||
g_category_groups[i] = nullptr;
|
||||
@ -143,7 +143,7 @@ void TracingController::StartTracing(TraceConfig* trace_config) {
|
||||
trace_config_.reset(trace_config);
|
||||
std::unordered_set<v8::TracingController::TraceStateObserver*> observers_copy;
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock(mutex_.get());
|
||||
base::MutexGuard lock(mutex_.get());
|
||||
mode_ = RECORDING_MODE;
|
||||
UpdateCategoryGroupEnabledFlags();
|
||||
observers_copy = observers_;
|
||||
@ -162,7 +162,7 @@ void TracingController::StopTracing() {
|
||||
UpdateCategoryGroupEnabledFlags();
|
||||
std::unordered_set<v8::TracingController::TraceStateObserver*> observers_copy;
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock(mutex_.get());
|
||||
base::MutexGuard lock(mutex_.get());
|
||||
observers_copy = observers_;
|
||||
}
|
||||
for (auto o : observers_copy) {
|
||||
@ -213,7 +213,7 @@ const uint8_t* TracingController::GetCategoryGroupEnabledInternal(
|
||||
}
|
||||
|
||||
// Slow path. Grab the lock.
|
||||
base::LockGuard<base::Mutex> lock(mutex_.get());
|
||||
base::MutexGuard lock(mutex_.get());
|
||||
|
||||
// Check the list again with lock in hand.
|
||||
unsigned char* category_group_enabled = nullptr;
|
||||
@ -251,7 +251,7 @@ const uint8_t* TracingController::GetCategoryGroupEnabledInternal(
|
||||
void TracingController::AddTraceStateObserver(
|
||||
v8::TracingController::TraceStateObserver* observer) {
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock(mutex_.get());
|
||||
base::MutexGuard lock(mutex_.get());
|
||||
observers_.insert(observer);
|
||||
if (mode_ != RECORDING_MODE) return;
|
||||
}
|
||||
@ -261,7 +261,7 @@ void TracingController::AddTraceStateObserver(
|
||||
|
||||
void TracingController::RemoveTraceStateObserver(
|
||||
v8::TracingController::TraceStateObserver* observer) {
|
||||
base::LockGuard<base::Mutex> lock(mutex_.get());
|
||||
base::MutexGuard lock(mutex_.get());
|
||||
DCHECK(observers_.find(observer) != observers_.end());
|
||||
observers_.erase(observer);
|
||||
}
|
||||
|
@ -385,17 +385,17 @@ class SignalHandler {
|
||||
}
|
||||
|
||||
static void IncreaseSamplerCount() {
|
||||
base::LockGuard<base::Mutex> lock_guard(mutex_);
|
||||
base::MutexGuard lock_guard(mutex_);
|
||||
if (++client_count_ == 1) Install();
|
||||
}
|
||||
|
||||
static void DecreaseSamplerCount() {
|
||||
base::LockGuard<base::Mutex> lock_guard(mutex_);
|
||||
base::MutexGuard lock_guard(mutex_);
|
||||
if (--client_count_ == 0) Restore();
|
||||
}
|
||||
|
||||
static bool Installed() {
|
||||
base::LockGuard<base::Mutex> lock_guard(mutex_);
|
||||
base::MutexGuard lock_guard(mutex_);
|
||||
return signal_handler_installed_;
|
||||
}
|
||||
|
||||
|
@ -46,7 +46,7 @@ inline void LockedQueue<Record>::Enqueue(const Record& record) {
|
||||
CHECK_NOT_NULL(n);
|
||||
n->value = record;
|
||||
{
|
||||
base::LockGuard<base::Mutex> guard(&tail_mutex_);
|
||||
base::MutexGuard guard(&tail_mutex_);
|
||||
tail_->next.SetValue(n);
|
||||
tail_ = n;
|
||||
}
|
||||
@ -57,7 +57,7 @@ template <typename Record>
|
||||
inline bool LockedQueue<Record>::Dequeue(Record* record) {
|
||||
Node* old_head = nullptr;
|
||||
{
|
||||
base::LockGuard<base::Mutex> guard(&head_mutex_);
|
||||
base::MutexGuard guard(&head_mutex_);
|
||||
old_head = head_;
|
||||
Node* const next_node = head_->next.Value();
|
||||
if (next_node == nullptr) return false;
|
||||
@ -71,14 +71,14 @@ inline bool LockedQueue<Record>::Dequeue(Record* record) {
|
||||
|
||||
template <typename Record>
|
||||
inline bool LockedQueue<Record>::IsEmpty() const {
|
||||
base::LockGuard<base::Mutex> guard(&head_mutex_);
|
||||
base::MutexGuard guard(&head_mutex_);
|
||||
return head_->next.Value() == nullptr;
|
||||
}
|
||||
|
||||
|
||||
template <typename Record>
|
||||
inline bool LockedQueue<Record>::Peek(Record* record) const {
|
||||
base::LockGuard<base::Mutex> guard(&head_mutex_);
|
||||
base::MutexGuard guard(&head_mutex_);
|
||||
Node* const next_node = head_->next.Value();
|
||||
if (next_node == nullptr) return false;
|
||||
*record = next_node->value;
|
||||
|
@ -97,7 +97,7 @@ class Log {
|
||||
void AppendRawCharacter(const char character);
|
||||
|
||||
Log* log_;
|
||||
base::LockGuard<base::Mutex> lock_guard_;
|
||||
base::MutexGuard lock_guard_;
|
||||
};
|
||||
|
||||
private:
|
||||
|
@ -687,7 +687,7 @@ void JitLogger::LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
|
||||
}
|
||||
|
||||
void JitLogger::CodeMoveEvent(AbstractCode* from, AbstractCode* to) {
|
||||
base::LockGuard<base::Mutex> guard(&logger_mutex_);
|
||||
base::MutexGuard guard(&logger_mutex_);
|
||||
|
||||
JitCodeEvent event;
|
||||
event.type = JitCodeEvent::CODE_MOVED;
|
||||
|
@ -871,7 +871,7 @@ void Simulator::TrashCallerSaveRegisters() {
|
||||
}
|
||||
|
||||
int Simulator::WriteExDW(intptr_t addr, uint64_t value, Instruction* instr) {
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Word) &&
|
||||
global_monitor_.Pointer()->NotifyStoreExcl_Locked(
|
||||
addr, &global_monitor_processor_)) {
|
||||
@ -884,7 +884,7 @@ int Simulator::WriteExDW(intptr_t addr, uint64_t value, Instruction* instr) {
|
||||
}
|
||||
|
||||
uint64_t Simulator::ReadExDWU(intptr_t addr, Instruction* instr) {
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyLoadExcl(addr, TransactionSize::Word);
|
||||
global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
|
||||
&global_monitor_processor_);
|
||||
@ -895,14 +895,14 @@ uint64_t Simulator::ReadExDWU(intptr_t addr, Instruction* instr) {
|
||||
uint32_t Simulator::ReadWU(intptr_t addr, Instruction* instr) {
|
||||
// All supported PPC targets allow unaligned accesses, so we don't need to
|
||||
// check the alignment here.
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyLoad(addr);
|
||||
uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
uint32_t Simulator::ReadExWU(intptr_t addr, Instruction* instr) {
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyLoadExcl(addr, TransactionSize::Word);
|
||||
global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
|
||||
&global_monitor_processor_);
|
||||
@ -913,7 +913,7 @@ uint32_t Simulator::ReadExWU(intptr_t addr, Instruction* instr) {
|
||||
int32_t Simulator::ReadW(intptr_t addr, Instruction* instr) {
|
||||
// All supported PPC targets allow unaligned accesses, so we don't need to
|
||||
// check the alignment here.
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyLoad(addr);
|
||||
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
|
||||
return *ptr;
|
||||
@ -923,7 +923,7 @@ int32_t Simulator::ReadW(intptr_t addr, Instruction* instr) {
|
||||
void Simulator::WriteW(intptr_t addr, uint32_t value, Instruction* instr) {
|
||||
// All supported PPC targets allow unaligned accesses, so we don't need to
|
||||
// check the alignment here.
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyStore(addr);
|
||||
global_monitor_.Pointer()->NotifyStore_Locked(addr,
|
||||
&global_monitor_processor_);
|
||||
@ -933,7 +933,7 @@ void Simulator::WriteW(intptr_t addr, uint32_t value, Instruction* instr) {
|
||||
}
|
||||
|
||||
int Simulator::WriteExW(intptr_t addr, uint32_t value, Instruction* instr) {
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Word) &&
|
||||
global_monitor_.Pointer()->NotifyStoreExcl_Locked(
|
||||
addr, &global_monitor_processor_)) {
|
||||
@ -948,7 +948,7 @@ int Simulator::WriteExW(intptr_t addr, uint32_t value, Instruction* instr) {
|
||||
void Simulator::WriteW(intptr_t addr, int32_t value, Instruction* instr) {
|
||||
// All supported PPC targets allow unaligned accesses, so we don't need to
|
||||
// check the alignment here.
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyStore(addr);
|
||||
global_monitor_.Pointer()->NotifyStore_Locked(addr,
|
||||
&global_monitor_processor_);
|
||||
@ -960,14 +960,14 @@ void Simulator::WriteW(intptr_t addr, int32_t value, Instruction* instr) {
|
||||
uint16_t Simulator::ReadHU(intptr_t addr, Instruction* instr) {
|
||||
// All supported PPC targets allow unaligned accesses, so we don't need to
|
||||
// check the alignment here.
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyLoad(addr);
|
||||
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
uint16_t Simulator::ReadExHU(intptr_t addr, Instruction* instr) {
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyLoadExcl(addr, TransactionSize::HalfWord);
|
||||
global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
|
||||
&global_monitor_processor_);
|
||||
@ -978,7 +978,7 @@ uint16_t Simulator::ReadExHU(intptr_t addr, Instruction* instr) {
|
||||
int16_t Simulator::ReadH(intptr_t addr, Instruction* instr) {
|
||||
// All supported PPC targets allow unaligned accesses, so we don't need to
|
||||
// check the alignment here.
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyLoad(addr);
|
||||
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
|
||||
return *ptr;
|
||||
@ -988,7 +988,7 @@ int16_t Simulator::ReadH(intptr_t addr, Instruction* instr) {
|
||||
void Simulator::WriteH(intptr_t addr, uint16_t value, Instruction* instr) {
|
||||
// All supported PPC targets allow unaligned accesses, so we don't need to
|
||||
// check the alignment here.
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyStore(addr);
|
||||
global_monitor_.Pointer()->NotifyStore_Locked(addr,
|
||||
&global_monitor_processor_);
|
||||
@ -1001,7 +1001,7 @@ void Simulator::WriteH(intptr_t addr, uint16_t value, Instruction* instr) {
|
||||
void Simulator::WriteH(intptr_t addr, int16_t value, Instruction* instr) {
|
||||
// All supported PPC targets allow unaligned accesses, so we don't need to
|
||||
// check the alignment here.
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyStore(addr);
|
||||
global_monitor_.Pointer()->NotifyStore_Locked(addr,
|
||||
&global_monitor_processor_);
|
||||
@ -1011,7 +1011,7 @@ void Simulator::WriteH(intptr_t addr, int16_t value, Instruction* instr) {
|
||||
}
|
||||
|
||||
int Simulator::WriteExH(intptr_t addr, uint16_t value, Instruction* instr) {
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::HalfWord) &&
|
||||
global_monitor_.Pointer()->NotifyStoreExcl_Locked(
|
||||
addr, &global_monitor_processor_)) {
|
||||
@ -1026,7 +1026,7 @@ int Simulator::WriteExH(intptr_t addr, uint16_t value, Instruction* instr) {
|
||||
uint8_t Simulator::ReadBU(intptr_t addr) {
|
||||
// All supported PPC targets allow unaligned accesses, so we don't need to
|
||||
// check the alignment here.
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyLoad(addr);
|
||||
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
|
||||
return *ptr;
|
||||
@ -1036,14 +1036,14 @@ uint8_t Simulator::ReadBU(intptr_t addr) {
|
||||
int8_t Simulator::ReadB(intptr_t addr) {
|
||||
// All supported PPC targets allow unaligned accesses, so we don't need to
|
||||
// check the alignment here.
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyLoad(addr);
|
||||
int8_t* ptr = reinterpret_cast<int8_t*>(addr);
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
uint8_t Simulator::ReadExBU(intptr_t addr) {
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyLoadExcl(addr, TransactionSize::Byte);
|
||||
global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
|
||||
&global_monitor_processor_);
|
||||
@ -1054,7 +1054,7 @@ uint8_t Simulator::ReadExBU(intptr_t addr) {
|
||||
void Simulator::WriteB(intptr_t addr, uint8_t value) {
|
||||
// All supported PPC targets allow unaligned accesses, so we don't need to
|
||||
// check the alignment here.
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyStore(addr);
|
||||
global_monitor_.Pointer()->NotifyStore_Locked(addr,
|
||||
&global_monitor_processor_);
|
||||
@ -1066,7 +1066,7 @@ void Simulator::WriteB(intptr_t addr, uint8_t value) {
|
||||
void Simulator::WriteB(intptr_t addr, int8_t value) {
|
||||
// All supported PPC targets allow unaligned accesses, so we don't need to
|
||||
// check the alignment here.
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyStore(addr);
|
||||
global_monitor_.Pointer()->NotifyStore_Locked(addr,
|
||||
&global_monitor_processor_);
|
||||
@ -1075,7 +1075,7 @@ void Simulator::WriteB(intptr_t addr, int8_t value) {
|
||||
}
|
||||
|
||||
int Simulator::WriteExB(intptr_t addr, uint8_t value) {
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Byte) &&
|
||||
global_monitor_.Pointer()->NotifyStoreExcl_Locked(
|
||||
addr, &global_monitor_processor_)) {
|
||||
@ -1090,7 +1090,7 @@ int Simulator::WriteExB(intptr_t addr, uint8_t value) {
|
||||
intptr_t* Simulator::ReadDW(intptr_t addr) {
|
||||
// All supported PPC targets allow unaligned accesses, so we don't need to
|
||||
// check the alignment here.
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyLoad(addr);
|
||||
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
|
||||
return ptr;
|
||||
@ -1100,7 +1100,7 @@ intptr_t* Simulator::ReadDW(intptr_t addr) {
|
||||
void Simulator::WriteDW(intptr_t addr, int64_t value) {
|
||||
// All supported PPC targets allow unaligned accesses, so we don't need to
|
||||
// check the alignment here.
|
||||
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
base::MutexGuard lock_guard(&global_monitor_.Pointer()->mutex);
|
||||
local_monitor_.NotifyStore(addr);
|
||||
global_monitor_.Pointer()->NotifyStore_Locked(addr,
|
||||
&global_monitor_processor_);
|
||||
@ -4332,7 +4332,7 @@ void Simulator::GlobalMonitor::PrependProcessor_Locked(Processor* processor) {
|
||||
}
|
||||
|
||||
void Simulator::GlobalMonitor::RemoveProcessor(Processor* processor) {
|
||||
base::LockGuard<base::Mutex> lock_guard(&mutex);
|
||||
base::MutexGuard lock_guard(&mutex);
|
||||
if (!IsProcessorInLinkedList_Locked(processor)) {
|
||||
return;
|
||||
}
|
||||
|
@ -254,12 +254,12 @@ namespace {
|
||||
class CpuProfilersManager {
|
||||
public:
|
||||
void AddProfiler(Isolate* isolate, CpuProfiler* profiler) {
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
profilers_.emplace(isolate, profiler);
|
||||
}
|
||||
|
||||
void RemoveProfiler(Isolate* isolate, CpuProfiler* profiler) {
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
auto range = profilers_.equal_range(isolate);
|
||||
for (auto it = range.first; it != range.second; ++it) {
|
||||
if (it->second != profiler) continue;
|
||||
@ -270,7 +270,7 @@ class CpuProfilersManager {
|
||||
}
|
||||
|
||||
void CallCollectSample(Isolate* isolate) {
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
auto range = profilers_.equal_range(isolate);
|
||||
for (auto it = range.first; it != range.second; ++it) {
|
||||
it->second->CollectSample();
|
||||
|
@ -178,7 +178,7 @@ SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Object> obj) {
|
||||
}
|
||||
|
||||
void HeapProfiler::ObjectMoveEvent(Address from, Address to, int size) {
|
||||
base::LockGuard<base::Mutex> guard(&profiler_mutex_);
|
||||
base::MutexGuard guard(&profiler_mutex_);
|
||||
bool known_object = ids_->MoveObject(from, to, size);
|
||||
if (!known_object && allocation_tracker_) {
|
||||
allocation_tracker_->address_to_trace()->MoveObject(from, to, size);
|
||||
|
@ -40,7 +40,7 @@ void TracingCpuProfilerImpl::OnTraceEnabled() {
|
||||
}
|
||||
|
||||
void TracingCpuProfilerImpl::OnTraceDisabled() {
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
if (!profiling_enabled_) return;
|
||||
profiling_enabled_ = false;
|
||||
isolate_->RequestInterrupt(
|
||||
@ -51,7 +51,7 @@ void TracingCpuProfilerImpl::OnTraceDisabled() {
|
||||
}
|
||||
|
||||
void TracingCpuProfilerImpl::StartProfiling() {
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
if (!profiling_enabled_ || profiler_) return;
|
||||
bool enabled;
|
||||
TRACE_EVENT_CATEGORY_GROUP_ENABLED(
|
||||
@ -64,7 +64,7 @@ void TracingCpuProfilerImpl::StartProfiling() {
|
||||
}
|
||||
|
||||
void TracingCpuProfilerImpl::StopProfiling() {
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
if (!profiler_) return;
|
||||
profiler_->StopProfiling("");
|
||||
profiler_.reset();
|
||||
|
@ -45,7 +45,7 @@ base::LazyInstance<base::Mutex>::type g_PerIsolateWasmControlsMutex =
|
||||
|
||||
bool IsWasmCompileAllowed(v8::Isolate* isolate, v8::Local<v8::Value> value,
|
||||
bool is_async) {
|
||||
base::LockGuard<base::Mutex> guard(g_PerIsolateWasmControlsMutex.Pointer());
|
||||
base::MutexGuard guard(g_PerIsolateWasmControlsMutex.Pointer());
|
||||
DCHECK_GT(g_PerIsolateWasmControls.Get().count(isolate), 0);
|
||||
const WasmCompileControls& ctrls = g_PerIsolateWasmControls.Get().at(isolate);
|
||||
return (is_async && ctrls.AllowAnySizeForAsync) ||
|
||||
@ -58,7 +58,7 @@ bool IsWasmCompileAllowed(v8::Isolate* isolate, v8::Local<v8::Value> value,
|
||||
bool IsWasmInstantiateAllowed(v8::Isolate* isolate,
|
||||
v8::Local<v8::Value> module_or_bytes,
|
||||
bool is_async) {
|
||||
base::LockGuard<base::Mutex> guard(g_PerIsolateWasmControlsMutex.Pointer());
|
||||
base::MutexGuard guard(g_PerIsolateWasmControlsMutex.Pointer());
|
||||
DCHECK_GT(g_PerIsolateWasmControls.Get().count(isolate), 0);
|
||||
const WasmCompileControls& ctrls = g_PerIsolateWasmControls.Get().at(isolate);
|
||||
if (is_async && ctrls.AllowAnySizeForAsync) return true;
|
||||
@ -481,7 +481,7 @@ RUNTIME_FUNCTION(Runtime_SetWasmCompileControls) {
|
||||
CHECK_EQ(args.length(), 2);
|
||||
CONVERT_ARG_HANDLE_CHECKED(Smi, block_size, 0);
|
||||
CONVERT_BOOLEAN_ARG_CHECKED(allow_async, 1);
|
||||
base::LockGuard<base::Mutex> guard(g_PerIsolateWasmControlsMutex.Pointer());
|
||||
base::MutexGuard guard(g_PerIsolateWasmControlsMutex.Pointer());
|
||||
WasmCompileControls& ctrl = (*g_PerIsolateWasmControls.Pointer())[v8_isolate];
|
||||
ctrl.AllowAnySizeForAsync = allow_async;
|
||||
ctrl.MaxWasmBufferSize = static_cast<uint32_t>(block_size->value());
|
||||
|
@ -61,7 +61,7 @@ void SimulatorBase::GlobalTearDown() {
|
||||
// static
|
||||
Address SimulatorBase::RedirectExternalReference(Address external_function,
|
||||
ExternalReference::Type type) {
|
||||
base::LockGuard<base::Mutex> lock_guard(Simulator::redirection_mutex());
|
||||
base::MutexGuard lock_guard(Simulator::redirection_mutex());
|
||||
Redirection* redirection = Redirection::Get(external_function, type);
|
||||
return redirection->address_of_instruction();
|
||||
}
|
||||
@ -70,7 +70,7 @@ Redirection::Redirection(Address external_function,
|
||||
ExternalReference::Type type)
|
||||
: external_function_(external_function), type_(type), next_(nullptr) {
|
||||
next_ = Simulator::redirection();
|
||||
base::LockGuard<base::Mutex> lock_guard(Simulator::i_cache_mutex());
|
||||
base::MutexGuard lock_guard(Simulator::i_cache_mutex());
|
||||
Simulator::SetRedirectInstruction(
|
||||
reinterpret_cast<Instruction*>(address_of_instruction()));
|
||||
Simulator::FlushICache(Simulator::i_cache(),
|
||||
|
@ -23,8 +23,7 @@ static base::LazyMutex external_startup_data_mutex = LAZY_MUTEX_INITIALIZER;
|
||||
static v8::StartupData external_startup_blob = {nullptr, 0};
|
||||
|
||||
void SetSnapshotFromFile(StartupData* snapshot_blob) {
|
||||
base::LockGuard<base::Mutex> lock_guard(
|
||||
external_startup_data_mutex.Pointer());
|
||||
base::MutexGuard lock_guard(external_startup_data_mutex.Pointer());
|
||||
DCHECK(snapshot_blob);
|
||||
DCHECK(snapshot_blob->data);
|
||||
DCHECK_GT(snapshot_blob->raw_size, 0);
|
||||
@ -35,8 +34,7 @@ void SetSnapshotFromFile(StartupData* snapshot_blob) {
|
||||
|
||||
|
||||
const v8::StartupData* Snapshot::DefaultSnapshotBlob() {
|
||||
base::LockGuard<base::Mutex> lock_guard(
|
||||
external_startup_data_mutex.Pointer());
|
||||
base::MutexGuard lock_guard(external_startup_data_mutex.Pointer());
|
||||
return &external_startup_blob;
|
||||
}
|
||||
} // namespace internal
|
||||
|
@ -104,7 +104,7 @@ class CompilationState {
|
||||
Isolate* isolate() const { return isolate_; }
|
||||
|
||||
bool failed() const {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
return failed_;
|
||||
}
|
||||
|
||||
@ -2860,7 +2860,7 @@ void CompilationState::AddCompilationUnits(
|
||||
std::vector<std::unique_ptr<WasmCompilationUnit>>& baseline_units,
|
||||
std::vector<std::unique_ptr<WasmCompilationUnit>>& tiering_units) {
|
||||
{
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
|
||||
if (compile_mode_ == CompileMode::kTiering) {
|
||||
DCHECK_EQ(baseline_units.size(), tiering_units.size());
|
||||
@ -2884,7 +2884,7 @@ void CompilationState::AddCompilationUnits(
|
||||
|
||||
std::unique_ptr<WasmCompilationUnit>
|
||||
CompilationState::GetNextCompilationUnit() {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
|
||||
std::vector<std::unique_ptr<WasmCompilationUnit>>& units =
|
||||
baseline_compilation_units_.empty() ? tiering_compilation_units_
|
||||
@ -2900,7 +2900,7 @@ CompilationState::GetNextCompilationUnit() {
|
||||
}
|
||||
|
||||
std::unique_ptr<WasmCompilationUnit> CompilationState::GetNextExecutedUnit() {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
std::vector<std::unique_ptr<WasmCompilationUnit>>& units = finish_units();
|
||||
if (units.empty()) return {};
|
||||
std::unique_ptr<WasmCompilationUnit> ret = std::move(units.back());
|
||||
@ -2909,7 +2909,7 @@ std::unique_ptr<WasmCompilationUnit> CompilationState::GetNextExecutedUnit() {
|
||||
}
|
||||
|
||||
bool CompilationState::HasCompilationUnitToFinish() {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
return !finish_units().empty();
|
||||
}
|
||||
|
||||
@ -2952,7 +2952,7 @@ void CompilationState::OnFinishedUnit() {
|
||||
|
||||
void CompilationState::ScheduleUnitForFinishing(
|
||||
std::unique_ptr<WasmCompilationUnit> unit, ExecutionTier mode) {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
if (compile_mode_ == CompileMode::kTiering &&
|
||||
mode == ExecutionTier::kOptimized) {
|
||||
tiering_finish_units_.push_back(std::move(unit));
|
||||
@ -2968,7 +2968,7 @@ void CompilationState::ScheduleUnitForFinishing(
|
||||
}
|
||||
|
||||
void CompilationState::OnBackgroundTaskStopped(const WasmFeatures& detected) {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
DCHECK_LE(1, num_background_tasks_);
|
||||
--num_background_tasks_;
|
||||
UnionFeaturesInto(&detected_features_, detected);
|
||||
@ -2979,7 +2979,7 @@ void CompilationState::PublishDetectedFeatures(Isolate* isolate,
|
||||
// Notifying the isolate of the feature counts must take place under
|
||||
// the mutex, because even if we have finished baseline compilation,
|
||||
// tiering compilations may still occur in the background.
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
UnionFeaturesInto(&detected_features_, detected);
|
||||
UpdateFeatureUseCounts(isolate, detected_features_);
|
||||
}
|
||||
@ -2987,7 +2987,7 @@ void CompilationState::PublishDetectedFeatures(Isolate* isolate,
|
||||
void CompilationState::RestartBackgroundTasks(size_t max) {
|
||||
size_t num_restart;
|
||||
{
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
// No need to restart tasks if compilation already failed.
|
||||
if (failed_) return;
|
||||
|
||||
@ -3015,7 +3015,7 @@ void CompilationState::RestartBackgroundTasks(size_t max) {
|
||||
}
|
||||
|
||||
bool CompilationState::SetFinisherIsRunning(bool value) {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
if (finisher_is_running_ == value) return false;
|
||||
finisher_is_running_ = value;
|
||||
return true;
|
||||
@ -3028,7 +3028,7 @@ void CompilationState::ScheduleFinisherTask() {
|
||||
|
||||
void CompilationState::Abort() {
|
||||
{
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
failed_ = true;
|
||||
}
|
||||
background_task_manager_.CancelAndWait();
|
||||
|
@ -397,7 +397,7 @@ WasmCode* NativeModule::AddOwnedCode(
|
||||
{
|
||||
// Both allocation and insertion in owned_code_ happen in the same critical
|
||||
// section, thus ensuring owned_code_'s elements are rarely if ever moved.
|
||||
base::LockGuard<base::Mutex> lock(&allocation_mutex_);
|
||||
base::MutexGuard lock(&allocation_mutex_);
|
||||
Vector<byte> executable_buffer = AllocateForCode(instructions.size());
|
||||
// Ownership will be transferred to {owned_code_} below.
|
||||
code = new WasmCode(this, index, executable_buffer, stack_slots,
|
||||
@ -430,7 +430,7 @@ WasmCode* NativeModule::AddOwnedCode(
|
||||
WasmCode* NativeModule::AddInterpreterEntry(Handle<Code> code, uint32_t index) {
|
||||
WasmCode* ret = AddAnonymousCode(code, WasmCode::kInterpreterEntry);
|
||||
ret->index_ = index;
|
||||
base::LockGuard<base::Mutex> lock(&allocation_mutex_);
|
||||
base::MutexGuard lock(&allocation_mutex_);
|
||||
InstallCode(ret);
|
||||
return ret;
|
||||
}
|
||||
@ -597,7 +597,7 @@ WasmCode* NativeModule::AddDeserializedCode(
|
||||
if (!code->protected_instructions_.is_empty()) {
|
||||
code->RegisterTrapHandlerData();
|
||||
}
|
||||
base::LockGuard<base::Mutex> lock(&allocation_mutex_);
|
||||
base::MutexGuard lock(&allocation_mutex_);
|
||||
InstallCode(code);
|
||||
// Note: we do not flush the i-cache here, since the code needs to be
|
||||
// relocated anyway. The caller is responsible for flushing the i-cache later.
|
||||
@ -605,7 +605,7 @@ WasmCode* NativeModule::AddDeserializedCode(
|
||||
}
|
||||
|
||||
void NativeModule::PublishCode(WasmCode* code) {
|
||||
base::LockGuard<base::Mutex> lock(&allocation_mutex_);
|
||||
base::MutexGuard lock(&allocation_mutex_);
|
||||
// Skip publishing code if there is an active redirection to the interpreter
|
||||
// for the given function index, in order to preserve the redirection.
|
||||
if (has_code(code->index()) &&
|
||||
@ -619,7 +619,7 @@ void NativeModule::PublishCode(WasmCode* code) {
|
||||
}
|
||||
|
||||
std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
|
||||
base::LockGuard<base::Mutex> lock(&allocation_mutex_);
|
||||
base::MutexGuard lock(&allocation_mutex_);
|
||||
std::vector<WasmCode*> result;
|
||||
result.reserve(code_table().size());
|
||||
for (WasmCode* code : code_table()) result.push_back(code);
|
||||
@ -741,7 +741,7 @@ Vector<byte> NativeModule::AllocateForCode(size_t size) {
|
||||
}
|
||||
|
||||
WasmCode* NativeModule::Lookup(Address pc) const {
|
||||
base::LockGuard<base::Mutex> lock(&allocation_mutex_);
|
||||
base::MutexGuard lock(&allocation_mutex_);
|
||||
if (owned_code_.empty()) return nullptr;
|
||||
auto iter = std::upper_bound(owned_code_.begin(), owned_code_.end(), pc,
|
||||
WasmCodeUniquePtrComparator());
|
||||
@ -836,13 +836,13 @@ bool WasmCodeManager::Commit(Address start, size_t size) {
|
||||
|
||||
void WasmCodeManager::AssignRanges(Address start, Address end,
|
||||
NativeModule* native_module) {
|
||||
base::LockGuard<base::Mutex> lock(&native_modules_mutex_);
|
||||
base::MutexGuard lock(&native_modules_mutex_);
|
||||
lookup_map_.insert(std::make_pair(start, std::make_pair(end, native_module)));
|
||||
}
|
||||
|
||||
void WasmCodeManager::AssignRangesAndAddModule(Address start, Address end,
|
||||
NativeModule* native_module) {
|
||||
base::LockGuard<base::Mutex> lock(&native_modules_mutex_);
|
||||
base::MutexGuard lock(&native_modules_mutex_);
|
||||
lookup_map_.insert(std::make_pair(start, std::make_pair(end, native_module)));
|
||||
native_modules_.emplace(native_module);
|
||||
}
|
||||
@ -870,7 +870,7 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
|
||||
}
|
||||
|
||||
void WasmCodeManager::SampleModuleSizes(Isolate* isolate) const {
|
||||
base::LockGuard<base::Mutex> lock(&native_modules_mutex_);
|
||||
base::MutexGuard lock(&native_modules_mutex_);
|
||||
for (NativeModule* native_module : native_modules_) {
|
||||
int code_size =
|
||||
static_cast<int>(native_module->committed_code_space_.load() / MB);
|
||||
@ -917,7 +917,7 @@ size_t WasmCodeManager::EstimateNativeModuleSize(const WasmModule* module) {
|
||||
}
|
||||
|
||||
bool WasmCodeManager::ShouldForceCriticalMemoryPressureNotification() {
|
||||
base::LockGuard<base::Mutex> lock(&native_modules_mutex_);
|
||||
base::MutexGuard lock(&native_modules_mutex_);
|
||||
// TODO(titzer): we force a critical memory pressure notification
|
||||
// when the code space is almost exhausted, but only upon the next module
|
||||
// creation. This is only for one isolate, and it should really do this for
|
||||
@ -1017,7 +1017,7 @@ bool NativeModule::SetExecutable(bool executable) {
|
||||
}
|
||||
|
||||
void WasmCodeManager::FreeNativeModule(NativeModule* native_module) {
|
||||
base::LockGuard<base::Mutex> lock(&native_modules_mutex_);
|
||||
base::MutexGuard lock(&native_modules_mutex_);
|
||||
DCHECK_EQ(1, native_modules_.count(native_module));
|
||||
native_modules_.erase(native_module);
|
||||
TRACE_HEAP("Freeing NativeModule %p\n", this);
|
||||
@ -1040,7 +1040,7 @@ void WasmCodeManager::FreeNativeModule(NativeModule* native_module) {
|
||||
}
|
||||
|
||||
NativeModule* WasmCodeManager::LookupNativeModule(Address pc) const {
|
||||
base::LockGuard<base::Mutex> lock(&native_modules_mutex_);
|
||||
base::MutexGuard lock(&native_modules_mutex_);
|
||||
if (lookup_map_.empty()) return nullptr;
|
||||
|
||||
auto iter = lookup_map_.upper_bound(pc);
|
||||
|
@ -206,7 +206,7 @@ Handle<WasmModuleObject> WasmEngine::ImportNativeModule(
|
||||
}
|
||||
|
||||
CompilationStatistics* WasmEngine::GetOrCreateTurboStatistics() {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
if (compilation_stats_ == nullptr) {
|
||||
compilation_stats_.reset(new CompilationStatistics());
|
||||
}
|
||||
@ -214,7 +214,7 @@ CompilationStatistics* WasmEngine::GetOrCreateTurboStatistics() {
|
||||
}
|
||||
|
||||
void WasmEngine::DumpAndResetTurboStatistics() {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
if (compilation_stats_ != nullptr) {
|
||||
StdoutStream os;
|
||||
os << AsPrintableStatistics{*compilation_stats_.get(), false} << std::endl;
|
||||
@ -223,7 +223,7 @@ void WasmEngine::DumpAndResetTurboStatistics() {
|
||||
}
|
||||
|
||||
CodeTracer* WasmEngine::GetCodeTracer() {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
if (code_tracer_ == nullptr) code_tracer_.reset(new CodeTracer(-1));
|
||||
return code_tracer_.get();
|
||||
}
|
||||
@ -236,14 +236,14 @@ AsyncCompileJob* WasmEngine::CreateAsyncCompileJob(
|
||||
new AsyncCompileJob(isolate, enabled, std::move(bytes_copy), length,
|
||||
context, std::move(resolver));
|
||||
// Pass ownership to the unique_ptr in {jobs_}.
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
jobs_[job] = std::unique_ptr<AsyncCompileJob>(job);
|
||||
return job;
|
||||
}
|
||||
|
||||
std::unique_ptr<AsyncCompileJob> WasmEngine::RemoveCompileJob(
|
||||
AsyncCompileJob* job) {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
auto item = jobs_.find(job);
|
||||
DCHECK(item != jobs_.end());
|
||||
std::unique_ptr<AsyncCompileJob> result = std::move(item->second);
|
||||
@ -252,7 +252,7 @@ std::unique_ptr<AsyncCompileJob> WasmEngine::RemoveCompileJob(
|
||||
}
|
||||
|
||||
bool WasmEngine::HasRunningCompileJob(Isolate* isolate) {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
DCHECK_EQ(1, isolates_.count(isolate));
|
||||
for (auto& entry : jobs_) {
|
||||
if (entry.first->isolate() == isolate) return true;
|
||||
@ -261,7 +261,7 @@ bool WasmEngine::HasRunningCompileJob(Isolate* isolate) {
|
||||
}
|
||||
|
||||
void WasmEngine::DeleteCompileJobsOnIsolate(Isolate* isolate) {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
DCHECK_EQ(1, isolates_.count(isolate));
|
||||
for (auto it = jobs_.begin(); it != jobs_.end();) {
|
||||
if (it->first->isolate() == isolate) {
|
||||
@ -273,13 +273,13 @@ void WasmEngine::DeleteCompileJobsOnIsolate(Isolate* isolate) {
|
||||
}
|
||||
|
||||
void WasmEngine::AddIsolate(Isolate* isolate) {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
DCHECK_EQ(0, isolates_.count(isolate));
|
||||
isolates_.insert(isolate);
|
||||
}
|
||||
|
||||
void WasmEngine::RemoveIsolate(Isolate* isolate) {
|
||||
base::LockGuard<base::Mutex> guard(&mutex_);
|
||||
base::MutexGuard guard(&mutex_);
|
||||
DCHECK_EQ(1, isolates_.count(isolate));
|
||||
isolates_.erase(isolate);
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ class WasmImportWrapperCache {
|
||||
WasmCode* GetOrCompile(Isolate* isolate, compiler::WasmImportCallKind kind,
|
||||
FunctionSig* sig) {
|
||||
// TODO(titzer): remove the isolate parameter.
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
CacheKey key(static_cast<uint8_t>(kind), *sig);
|
||||
WasmCode*& cached = entry_map_[key];
|
||||
if (cached == nullptr) {
|
||||
|
@ -179,7 +179,7 @@ void WasmMemoryTracker::RegisterAllocation(Isolate* isolate,
|
||||
size_t allocation_length,
|
||||
void* buffer_start,
|
||||
size_t buffer_length) {
|
||||
base::LockGuard<base::Mutex> scope_lock(&mutex_);
|
||||
base::MutexGuard scope_lock(&mutex_);
|
||||
|
||||
allocated_address_space_ += allocation_length;
|
||||
AddAddressSpaceSample(isolate);
|
||||
@ -191,7 +191,7 @@ void WasmMemoryTracker::RegisterAllocation(Isolate* isolate,
|
||||
|
||||
WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation(
|
||||
Isolate* isolate, const void* buffer_start) {
|
||||
base::LockGuard<base::Mutex> scope_lock(&mutex_);
|
||||
base::MutexGuard scope_lock(&mutex_);
|
||||
|
||||
auto find_result = allocations_.find(buffer_start);
|
||||
CHECK_NE(find_result, allocations_.end());
|
||||
@ -216,7 +216,7 @@ WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation(
|
||||
|
||||
const WasmMemoryTracker::AllocationData* WasmMemoryTracker::FindAllocationData(
|
||||
const void* buffer_start) {
|
||||
base::LockGuard<base::Mutex> scope_lock(&mutex_);
|
||||
base::MutexGuard scope_lock(&mutex_);
|
||||
const auto& result = allocations_.find(buffer_start);
|
||||
if (result != allocations_.end()) {
|
||||
return &result->second;
|
||||
@ -225,12 +225,12 @@ const WasmMemoryTracker::AllocationData* WasmMemoryTracker::FindAllocationData(
|
||||
}
|
||||
|
||||
bool WasmMemoryTracker::IsWasmMemory(const void* buffer_start) {
|
||||
base::LockGuard<base::Mutex> scope_lock(&mutex_);
|
||||
base::MutexGuard scope_lock(&mutex_);
|
||||
return allocations_.find(buffer_start) != allocations_.end();
|
||||
}
|
||||
|
||||
bool WasmMemoryTracker::HasFullGuardRegions(const void* buffer_start) {
|
||||
base::LockGuard<base::Mutex> scope_lock(&mutex_);
|
||||
base::MutexGuard scope_lock(&mutex_);
|
||||
const auto allocation = allocations_.find(buffer_start);
|
||||
|
||||
if (allocation == allocations_.end()) {
|
||||
|
@ -43,7 +43,7 @@ void AccountingAllocator::ConfigureSegmentPool(const size_t max_pool_size) {
|
||||
(size_t(1) << kMinSegmentSizePower);
|
||||
size_t fits_fully = max_pool_size / full_size;
|
||||
|
||||
base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
|
||||
base::MutexGuard lock_guard(&unused_segments_mutex_);
|
||||
|
||||
// We assume few zones (less than 'fits_fully' many) to be active at the same
|
||||
// time. When zones grow regularly, they will keep requesting segments of
|
||||
@ -138,7 +138,7 @@ Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) {
|
||||
|
||||
Segment* segment;
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
|
||||
base::MutexGuard lock_guard(&unused_segments_mutex_);
|
||||
|
||||
segment = unused_segments_heads_[power];
|
||||
|
||||
@ -173,7 +173,7 @@ bool AccountingAllocator::AddSegmentToPool(Segment* segment) {
|
||||
power -= kMinSegmentSizePower;
|
||||
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
|
||||
base::MutexGuard lock_guard(&unused_segments_mutex_);
|
||||
|
||||
if (unused_segments_sizes_[power] >= unused_segments_max_sizes_[power]) {
|
||||
return false;
|
||||
@ -189,7 +189,7 @@ bool AccountingAllocator::AddSegmentToPool(Segment* segment) {
|
||||
}
|
||||
|
||||
void AccountingAllocator::ClearPool() {
|
||||
base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
|
||||
base::MutexGuard lock_guard(&unused_segments_mutex_);
|
||||
|
||||
for (size_t power = 0; power <= kMaxSegmentSizePower - kMinSegmentSizePower;
|
||||
power++) {
|
||||
|
@ -292,7 +292,7 @@ class MemoryAccessThread : public v8::base::Thread {
|
||||
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate_);
|
||||
{
|
||||
v8::Isolate::Scope scope(isolate_);
|
||||
v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex_);
|
||||
v8::base::MutexGuard lock_guard(&mutex_);
|
||||
while (!is_finished_) {
|
||||
while (!(has_request_ || is_finished_)) {
|
||||
has_request_cv_.Wait(&mutex_);
|
||||
@ -313,7 +313,7 @@ class MemoryAccessThread : public v8::base::Thread {
|
||||
|
||||
void NextAndWait(TestData* test_data, MemoryAccess access) {
|
||||
DCHECK(!has_request_);
|
||||
v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex_);
|
||||
v8::base::MutexGuard lock_guard(&mutex_);
|
||||
test_data_ = test_data;
|
||||
access_ = access;
|
||||
has_request_ = true;
|
||||
@ -325,7 +325,7 @@ class MemoryAccessThread : public v8::base::Thread {
|
||||
}
|
||||
|
||||
void Finish() {
|
||||
v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex_);
|
||||
v8::base::MutexGuard lock_guard(&mutex_);
|
||||
is_finished_ = true;
|
||||
has_request_cv_.NotifyOne();
|
||||
}
|
||||
|
@ -303,7 +303,7 @@ class MemoryAccessThread : public v8::base::Thread {
|
||||
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate_);
|
||||
{
|
||||
v8::Isolate::Scope scope(isolate_);
|
||||
v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex_);
|
||||
v8::base::MutexGuard lock_guard(&mutex_);
|
||||
while (!is_finished_) {
|
||||
while (!(has_request_ || is_finished_)) {
|
||||
has_request_cv_.Wait(&mutex_);
|
||||
@ -324,7 +324,7 @@ class MemoryAccessThread : public v8::base::Thread {
|
||||
|
||||
void NextAndWait(TestData* test_data, MemoryAccess access) {
|
||||
DCHECK(!has_request_);
|
||||
v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex_);
|
||||
v8::base::MutexGuard lock_guard(&mutex_);
|
||||
test_data_ = test_data;
|
||||
access_ = access;
|
||||
has_request_ = true;
|
||||
@ -336,7 +336,7 @@ class MemoryAccessThread : public v8::base::Thread {
|
||||
}
|
||||
|
||||
void Finish() {
|
||||
v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex_);
|
||||
v8::base::MutexGuard lock_guard(&mutex_);
|
||||
is_finished_ = true;
|
||||
has_request_cv_.NotifyOne();
|
||||
}
|
||||
|
@ -16,7 +16,7 @@ TEST(ConditionVariable, WaitForAfterNofityOnSameThread) {
|
||||
Mutex mutex;
|
||||
ConditionVariable cv;
|
||||
|
||||
LockGuard<Mutex> lock_guard(&mutex);
|
||||
MutexGuard lock_guard(&mutex);
|
||||
|
||||
cv.NotifyOne();
|
||||
EXPECT_FALSE(cv.WaitFor(&mutex, TimeDelta::FromMicroseconds(n)));
|
||||
@ -37,7 +37,7 @@ class ThreadWithMutexAndConditionVariable final : public Thread {
|
||||
finished_(false) {}
|
||||
|
||||
void Run() override {
|
||||
LockGuard<Mutex> lock_guard(&mutex_);
|
||||
MutexGuard lock_guard(&mutex_);
|
||||
running_ = true;
|
||||
cv_.NotifyOne();
|
||||
while (running_) {
|
||||
@ -61,7 +61,7 @@ TEST(ConditionVariable, MultipleThreadsWithSeparateConditionVariables) {
|
||||
ThreadWithMutexAndConditionVariable threads[kThreadCount];
|
||||
|
||||
for (int n = 0; n < kThreadCount; ++n) {
|
||||
LockGuard<Mutex> lock_guard(&threads[n].mutex_);
|
||||
MutexGuard lock_guard(&threads[n].mutex_);
|
||||
EXPECT_FALSE(threads[n].running_);
|
||||
EXPECT_FALSE(threads[n].finished_);
|
||||
threads[n].Start();
|
||||
@ -72,13 +72,13 @@ TEST(ConditionVariable, MultipleThreadsWithSeparateConditionVariables) {
|
||||
}
|
||||
|
||||
for (int n = kThreadCount - 1; n >= 0; --n) {
|
||||
LockGuard<Mutex> lock_guard(&threads[n].mutex_);
|
||||
MutexGuard lock_guard(&threads[n].mutex_);
|
||||
EXPECT_TRUE(threads[n].running_);
|
||||
EXPECT_FALSE(threads[n].finished_);
|
||||
}
|
||||
|
||||
for (int n = 0; n < kThreadCount; ++n) {
|
||||
LockGuard<Mutex> lock_guard(&threads[n].mutex_);
|
||||
MutexGuard lock_guard(&threads[n].mutex_);
|
||||
EXPECT_TRUE(threads[n].running_);
|
||||
EXPECT_FALSE(threads[n].finished_);
|
||||
// Tell the nth thread to quit.
|
||||
@ -88,7 +88,7 @@ TEST(ConditionVariable, MultipleThreadsWithSeparateConditionVariables) {
|
||||
|
||||
for (int n = kThreadCount - 1; n >= 0; --n) {
|
||||
// Wait for nth thread to quit.
|
||||
LockGuard<Mutex> lock_guard(&threads[n].mutex_);
|
||||
MutexGuard lock_guard(&threads[n].mutex_);
|
||||
while (!threads[n].finished_) {
|
||||
threads[n].cv_.Wait(&threads[n].mutex_);
|
||||
}
|
||||
@ -98,7 +98,7 @@ TEST(ConditionVariable, MultipleThreadsWithSeparateConditionVariables) {
|
||||
|
||||
for (int n = 0; n < kThreadCount; ++n) {
|
||||
threads[n].Join();
|
||||
LockGuard<Mutex> lock_guard(&threads[n].mutex_);
|
||||
MutexGuard lock_guard(&threads[n].mutex_);
|
||||
EXPECT_FALSE(threads[n].running_);
|
||||
EXPECT_TRUE(threads[n].finished_);
|
||||
}
|
||||
@ -117,7 +117,7 @@ class ThreadWithSharedMutexAndConditionVariable final : public Thread {
|
||||
mutex_(nullptr) {}
|
||||
|
||||
void Run() override {
|
||||
LockGuard<Mutex> lock_guard(mutex_);
|
||||
MutexGuard lock_guard(mutex_);
|
||||
running_ = true;
|
||||
cv_->NotifyAll();
|
||||
while (running_) {
|
||||
@ -149,7 +149,7 @@ TEST(ConditionVariable, MultipleThreadsWithSharedSeparateConditionVariables) {
|
||||
|
||||
// Start all threads.
|
||||
{
|
||||
LockGuard<Mutex> lock_guard(&mutex);
|
||||
MutexGuard lock_guard(&mutex);
|
||||
for (int n = 0; n < kThreadCount; ++n) {
|
||||
EXPECT_FALSE(threads[n].running_);
|
||||
EXPECT_FALSE(threads[n].finished_);
|
||||
@ -159,7 +159,7 @@ TEST(ConditionVariable, MultipleThreadsWithSharedSeparateConditionVariables) {
|
||||
|
||||
// Wait for all threads to start.
|
||||
{
|
||||
LockGuard<Mutex> lock_guard(&mutex);
|
||||
MutexGuard lock_guard(&mutex);
|
||||
for (int n = kThreadCount - 1; n >= 0; --n) {
|
||||
while (!threads[n].running_) {
|
||||
cv.Wait(&mutex);
|
||||
@ -169,7 +169,7 @@ TEST(ConditionVariable, MultipleThreadsWithSharedSeparateConditionVariables) {
|
||||
|
||||
// Make sure that all threads are running.
|
||||
{
|
||||
LockGuard<Mutex> lock_guard(&mutex);
|
||||
MutexGuard lock_guard(&mutex);
|
||||
for (int n = 0; n < kThreadCount; ++n) {
|
||||
EXPECT_TRUE(threads[n].running_);
|
||||
EXPECT_FALSE(threads[n].finished_);
|
||||
@ -178,7 +178,7 @@ TEST(ConditionVariable, MultipleThreadsWithSharedSeparateConditionVariables) {
|
||||
|
||||
// Tell all threads to quit.
|
||||
{
|
||||
LockGuard<Mutex> lock_guard(&mutex);
|
||||
MutexGuard lock_guard(&mutex);
|
||||
for (int n = kThreadCount - 1; n >= 0; --n) {
|
||||
EXPECT_TRUE(threads[n].running_);
|
||||
EXPECT_FALSE(threads[n].finished_);
|
||||
@ -190,7 +190,7 @@ TEST(ConditionVariable, MultipleThreadsWithSharedSeparateConditionVariables) {
|
||||
|
||||
// Wait for all threads to quit.
|
||||
{
|
||||
LockGuard<Mutex> lock_guard(&mutex);
|
||||
MutexGuard lock_guard(&mutex);
|
||||
for (int n = 0; n < kThreadCount; ++n) {
|
||||
while (!threads[n].finished_) {
|
||||
cv.Wait(&mutex);
|
||||
@ -200,7 +200,7 @@ TEST(ConditionVariable, MultipleThreadsWithSharedSeparateConditionVariables) {
|
||||
|
||||
// Make sure all threads are finished.
|
||||
{
|
||||
LockGuard<Mutex> lock_guard(&mutex);
|
||||
MutexGuard lock_guard(&mutex);
|
||||
for (int n = kThreadCount - 1; n >= 0; --n) {
|
||||
EXPECT_FALSE(threads[n].running_);
|
||||
EXPECT_TRUE(threads[n].finished_);
|
||||
@ -234,7 +234,7 @@ class LoopIncrementThread final : public Thread {
|
||||
void Run() override {
|
||||
int last_count = -1;
|
||||
while (true) {
|
||||
LockGuard<Mutex> lock_guard(mutex_);
|
||||
MutexGuard lock_guard(mutex_);
|
||||
int count = *counter_;
|
||||
while (count % thread_count_ != rem_ && count < limit_) {
|
||||
cv_->Wait(mutex_);
|
||||
|
@ -11,8 +11,8 @@ namespace base {
|
||||
|
||||
TEST(Mutex, LockGuardMutex) {
|
||||
Mutex mutex;
|
||||
{ LockGuard<Mutex> lock_guard(&mutex); }
|
||||
{ LockGuard<Mutex> lock_guard(&mutex); }
|
||||
{ MutexGuard lock_guard(&mutex); }
|
||||
{ MutexGuard lock_guard(&mutex); }
|
||||
}
|
||||
|
||||
|
||||
@ -28,8 +28,8 @@ TEST(Mutex, LockGuardRecursiveMutex) {
|
||||
|
||||
TEST(Mutex, LockGuardLazyMutex) {
|
||||
LazyMutex lazy_mutex = LAZY_MUTEX_INITIALIZER;
|
||||
{ LockGuard<Mutex> lock_guard(lazy_mutex.Pointer()); }
|
||||
{ LockGuard<Mutex> lock_guard(lazy_mutex.Pointer()); }
|
||||
{ MutexGuard lock_guard(lazy_mutex.Pointer()); }
|
||||
{ MutexGuard lock_guard(lazy_mutex.Pointer()); }
|
||||
}
|
||||
|
||||
|
||||
|
@ -112,7 +112,7 @@ class MockPlatform : public v8::Platform {
|
||||
sem_(0),
|
||||
tracing_controller_(V8::GetCurrentPlatform()->GetTracingController()) {}
|
||||
~MockPlatform() override {
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
EXPECT_TRUE(foreground_tasks_.empty());
|
||||
EXPECT_TRUE(worker_tasks_.empty());
|
||||
EXPECT_TRUE(idle_task_ == nullptr);
|
||||
@ -126,7 +126,7 @@ class MockPlatform : public v8::Platform {
|
||||
}
|
||||
|
||||
void CallOnWorkerThread(std::unique_ptr<Task> task) override {
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
worker_tasks_.push_back(std::move(task));
|
||||
}
|
||||
|
||||
@ -136,7 +136,7 @@ class MockPlatform : public v8::Platform {
|
||||
}
|
||||
|
||||
void CallOnForegroundThread(v8::Isolate* isolate, Task* task) override {
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
foreground_tasks_.push_back(std::unique_ptr<Task>(task));
|
||||
}
|
||||
|
||||
@ -147,7 +147,7 @@ class MockPlatform : public v8::Platform {
|
||||
|
||||
void CallIdleOnForegroundThread(v8::Isolate* isolate,
|
||||
IdleTask* task) override {
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
ASSERT_TRUE(idle_task_ == nullptr);
|
||||
idle_task_ = task;
|
||||
}
|
||||
@ -171,7 +171,7 @@ class MockPlatform : public v8::Platform {
|
||||
time_step_ = time_step;
|
||||
IdleTask* task;
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
task = idle_task_;
|
||||
ASSERT_TRUE(idle_task_ != nullptr);
|
||||
idle_task_ = nullptr;
|
||||
@ -181,24 +181,24 @@ class MockPlatform : public v8::Platform {
|
||||
}
|
||||
|
||||
bool IdleTaskPending() {
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
return idle_task_;
|
||||
}
|
||||
|
||||
bool WorkerTasksPending() {
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
return !worker_tasks_.empty();
|
||||
}
|
||||
|
||||
bool ForegroundTasksPending() {
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
return !foreground_tasks_.empty();
|
||||
}
|
||||
|
||||
void RunWorkerTasksAndBlock(Platform* platform) {
|
||||
std::vector<std::unique_ptr<Task>> tasks;
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
tasks.swap(worker_tasks_);
|
||||
}
|
||||
platform->CallOnWorkerThread(
|
||||
@ -209,7 +209,7 @@ class MockPlatform : public v8::Platform {
|
||||
void RunWorkerTasks(Platform* platform) {
|
||||
std::vector<std::unique_ptr<Task>> tasks;
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
tasks.swap(worker_tasks_);
|
||||
}
|
||||
platform->CallOnWorkerThread(
|
||||
@ -219,7 +219,7 @@ class MockPlatform : public v8::Platform {
|
||||
void RunForegroundTasks() {
|
||||
std::vector<std::unique_ptr<Task>> tasks;
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
tasks.swap(foreground_tasks_);
|
||||
}
|
||||
for (auto& task : tasks) {
|
||||
@ -232,7 +232,7 @@ class MockPlatform : public v8::Platform {
|
||||
void ClearWorkerTasks() {
|
||||
std::vector<std::unique_ptr<Task>> tasks;
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
tasks.swap(worker_tasks_);
|
||||
}
|
||||
}
|
||||
@ -240,13 +240,13 @@ class MockPlatform : public v8::Platform {
|
||||
void ClearForegroundTasks() {
|
||||
std::vector<std::unique_ptr<Task>> tasks;
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
tasks.swap(foreground_tasks_);
|
||||
}
|
||||
}
|
||||
|
||||
void ClearIdleTask() {
|
||||
base::LockGuard<base::Mutex> lock(&mutex_);
|
||||
base::MutexGuard lock(&mutex_);
|
||||
ASSERT_TRUE(idle_task_ != nullptr);
|
||||
delete idle_task_;
|
||||
idle_task_ = nullptr;
|
||||
@ -283,7 +283,7 @@ class MockPlatform : public v8::Platform {
|
||||
: platform_(platform) {}
|
||||
|
||||
void PostTask(std::unique_ptr<v8::Task> task) override {
|
||||
base::LockGuard<base::Mutex> lock(&platform_->mutex_);
|
||||
base::MutexGuard lock(&platform_->mutex_);
|
||||
platform_->foreground_tasks_.push_back(std::move(task));
|
||||
}
|
||||
|
||||
@ -294,7 +294,7 @@ class MockPlatform : public v8::Platform {
|
||||
|
||||
void PostIdleTask(std::unique_ptr<IdleTask> task) override {
|
||||
DCHECK(IdleTasksEnabled());
|
||||
base::LockGuard<base::Mutex> lock(&platform_->mutex_);
|
||||
base::MutexGuard lock(&platform_->mutex_);
|
||||
ASSERT_TRUE(platform_->idle_task_ == nullptr);
|
||||
platform_->idle_task_ = task.release();
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ class CountingThread final : public base::Thread {
|
||||
|
||||
private:
|
||||
void ProcessWork() {
|
||||
base::LockGuard<base::Mutex> guard(mutex_);
|
||||
base::MutexGuard guard(mutex_);
|
||||
processed_work_ += *work_;
|
||||
*work_ = 0;
|
||||
}
|
||||
@ -129,7 +129,7 @@ TEST(OneshotBarrier, Processing_Concurrent) {
|
||||
|
||||
for (size_t i = 0; i < kWorkCounter; i++) {
|
||||
{
|
||||
base::LockGuard<base::Mutex> guard(&mutex);
|
||||
base::MutexGuard guard(&mutex);
|
||||
work++;
|
||||
}
|
||||
barrier.NotifyAll();
|
||||
|
Loading…
Reference in New Issue
Block a user