Clean up ThreadId
The {id_} stored in {ThreadId} should not be atomic. Only getting a new id for the current thread needs to be atomic. If any user of {ThreadId} needs atomicity, that user should wrap {ThreadId} in a {std::atomic} instead. Drive-by: Remove {Equals} method, use {operator==} instead. Drive-by: Move static methods after member methods. R=ishell@chromium.org Cq-Include-Trybots: luci.v8.try:v8_linux64_tsan_rel Bug: v8:8834 Change-Id: Id0470eb2fa907948843ac1153e2dc5dcd9a8fbc8 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1494006 Reviewed-by: Georg Neis <neis@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Reviewed-by: Igor Sheludko <ishell@chromium.org> Commit-Queue: Clemens Hammacher <clemensh@chromium.org> Cr-Commit-Position: refs/heads/master@{#60146}
This commit is contained in:
parent
4c7cabb1d8
commit
656254b17b
@ -8696,7 +8696,7 @@ void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
|
||||
bool on_isolate_thread =
|
||||
v8::Locker::IsActive()
|
||||
? isolate->thread_manager()->IsLockedByCurrentThread()
|
||||
: i::ThreadId::Current().Equals(isolate->thread_id());
|
||||
: i::ThreadId::Current() == isolate->thread_id();
|
||||
isolate->heap()->MemoryPressureNotification(level, on_isolate_thread);
|
||||
}
|
||||
|
||||
|
@ -186,7 +186,7 @@ AstStringConstants::AstStringConstants(Isolate* isolate, uint64_t hash_seed)
|
||||
: zone_(isolate->allocator(), ZONE_NAME),
|
||||
string_table_(AstRawString::Compare),
|
||||
hash_seed_(hash_seed) {
|
||||
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
|
||||
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
|
||||
#define F(name, str) \
|
||||
{ \
|
||||
const char* data = str; \
|
||||
|
@ -36,7 +36,7 @@ uint32_t BuiltinsConstantsTableBuilder::AddObject(Handle<Object> object) {
|
||||
isolate_->heap()->builtins_constants_table());
|
||||
|
||||
// Must be on the main thread.
|
||||
DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
|
||||
DCHECK_EQ(ThreadId::Current(), isolate_->thread_id());
|
||||
|
||||
// Must be generating embedded builtin code.
|
||||
DCHECK(isolate_->IsGeneratingEmbeddedBuiltins());
|
||||
|
@ -149,7 +149,7 @@ CompilationJob::Status UnoptimizedCompilationJob::ExecuteJob() {
|
||||
|
||||
CompilationJob::Status UnoptimizedCompilationJob::FinalizeJob(
|
||||
Handle<SharedFunctionInfo> shared_info, Isolate* isolate) {
|
||||
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
|
||||
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
|
||||
DisallowCodeDependencyChange no_dependency_change;
|
||||
DisallowJavascriptExecution no_js(isolate);
|
||||
|
||||
@ -200,7 +200,7 @@ void UnoptimizedCompilationJob::RecordFunctionCompilation(
|
||||
// Implementation of OptimizedCompilationJob
|
||||
|
||||
CompilationJob::Status OptimizedCompilationJob::PrepareJob(Isolate* isolate) {
|
||||
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
|
||||
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
|
||||
DisallowJavascriptExecution no_js(isolate);
|
||||
|
||||
if (FLAG_trace_opt && compilation_info()->IsOptimizing()) {
|
||||
@ -226,7 +226,7 @@ CompilationJob::Status OptimizedCompilationJob::ExecuteJob() {
|
||||
}
|
||||
|
||||
CompilationJob::Status OptimizedCompilationJob::FinalizeJob(Isolate* isolate) {
|
||||
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
|
||||
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
|
||||
DisallowJavascriptExecution no_js(isolate);
|
||||
|
||||
// Delegate to the underlying implementation.
|
||||
@ -921,7 +921,7 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(
|
||||
IsCompiledScope* is_compiled_scope) {
|
||||
TimerEventScope<TimerEventCompileCode> top_level_timer(isolate);
|
||||
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileCode");
|
||||
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
|
||||
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
|
||||
|
||||
PostponeInterruptsScope postpone(isolate);
|
||||
DCHECK(!isolate->native_context().is_null());
|
||||
@ -1145,7 +1145,7 @@ bool Compiler::CollectSourcePositions(Isolate* isolate,
|
||||
return false;
|
||||
|
||||
DCHECK(AllowCompilation::IsAllowed(isolate));
|
||||
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
|
||||
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
|
||||
DCHECK(!isolate->has_pending_exception());
|
||||
VMState<BYTECODE_COMPILER> state(isolate);
|
||||
PostponeInterruptsScope postpone(isolate);
|
||||
@ -1222,7 +1222,7 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
|
||||
|
||||
Isolate* isolate = shared_info->GetIsolate();
|
||||
DCHECK(AllowCompilation::IsAllowed(isolate));
|
||||
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
|
||||
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
|
||||
DCHECK(!isolate->has_pending_exception());
|
||||
DCHECK(!shared_info->HasBytecodeArray());
|
||||
VMState<BYTECODE_COMPILER> state(isolate);
|
||||
|
@ -489,8 +489,7 @@ void RuntimeCallStats::CorrectCurrentCounterId(
|
||||
}
|
||||
|
||||
bool RuntimeCallStats::IsCalledOnTheSameThread() {
|
||||
if (!thread_id_.Equals(ThreadId::Invalid()))
|
||||
return thread_id_.Equals(ThreadId::Current());
|
||||
if (thread_id_.IsValid()) return thread_id_ == ThreadId::Current();
|
||||
thread_id_ = ThreadId::Current();
|
||||
return true;
|
||||
}
|
||||
|
@ -955,7 +955,7 @@ BytecodeGenerator::BytecodeGenerator(
|
||||
|
||||
Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(
|
||||
Isolate* isolate, Handle<Script> script) {
|
||||
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
|
||||
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
|
||||
#ifdef DEBUG
|
||||
// Unoptimized compilation should be context-independent. Verify that we don't
|
||||
// access the native context by nulling it out during finalization.
|
||||
|
@ -327,7 +327,8 @@ Isolate::PerIsolateThreadData*
|
||||
void Isolate::DiscardPerThreadDataForThisThread() {
|
||||
ThreadId thread_id = ThreadId::TryGetCurrent();
|
||||
if (thread_id.IsValid()) {
|
||||
DCHECK(!thread_manager_->mutex_owner_.Equals(thread_id));
|
||||
DCHECK_NE(thread_manager_->mutex_owner_.load(std::memory_order_relaxed),
|
||||
thread_id);
|
||||
base::MutexGuard lock_guard(&thread_data_table_mutex_);
|
||||
PerIsolateThreadData* per_thread = thread_data_table_.Lookup(thread_id);
|
||||
if (per_thread) {
|
||||
@ -3498,8 +3499,8 @@ void Isolate::Enter() {
|
||||
DCHECK(Current() == this);
|
||||
DCHECK_NOT_NULL(entry_stack_);
|
||||
DCHECK(entry_stack_->previous_thread_data == nullptr ||
|
||||
entry_stack_->previous_thread_data->thread_id().Equals(
|
||||
ThreadId::Current()));
|
||||
entry_stack_->previous_thread_data->thread_id() ==
|
||||
ThreadId::Current());
|
||||
// Same thread re-enters the isolate, no need to re-init anything.
|
||||
entry_stack_->entry_count++;
|
||||
return;
|
||||
@ -3525,8 +3526,8 @@ void Isolate::Enter() {
|
||||
void Isolate::Exit() {
|
||||
DCHECK_NOT_NULL(entry_stack_);
|
||||
DCHECK(entry_stack_->previous_thread_data == nullptr ||
|
||||
entry_stack_->previous_thread_data->thread_id().Equals(
|
||||
ThreadId::Current()));
|
||||
entry_stack_->previous_thread_data->thread_id() ==
|
||||
ThreadId::Current());
|
||||
|
||||
if (--entry_stack_->entry_count > 0) return;
|
||||
|
||||
|
@ -448,7 +448,7 @@ class Isolate final : private HiddenFactory {
|
||||
#endif
|
||||
|
||||
bool Matches(Isolate* isolate, ThreadId thread_id) const {
|
||||
return isolate_ == isolate && thread_id_.Equals(thread_id);
|
||||
return isolate_ == isolate && thread_id_ == thread_id;
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -446,7 +446,7 @@ void Parser::DeserializeScopeChain(
|
||||
InitializeEmptyScopeChain(info);
|
||||
Handle<ScopeInfo> outer_scope_info;
|
||||
if (maybe_outer_scope_info.ToHandle(&outer_scope_info)) {
|
||||
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
|
||||
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
|
||||
original_scope_ = Scope::DeserializeScopeChain(
|
||||
isolate, zone(), *outer_scope_info, info->script_scope(),
|
||||
ast_value_factory(), mode);
|
||||
|
@ -5,10 +5,6 @@
|
||||
#ifndef V8_THREAD_ID_H_
|
||||
#define V8_THREAD_ID_H_
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "src/base/macros.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
@ -16,16 +12,16 @@ namespace internal {
|
||||
class ThreadId {
|
||||
public:
|
||||
// Creates an invalid ThreadId.
|
||||
ThreadId() : ThreadId(kInvalidId) {}
|
||||
constexpr ThreadId() noexcept : ThreadId(kInvalidId) {}
|
||||
|
||||
ThreadId(const ThreadId& other) V8_NOEXCEPT : ThreadId(other.ToInteger()) {}
|
||||
bool operator==(const ThreadId& other) const { return id_ == other.id_; }
|
||||
bool operator!=(const ThreadId& other) const { return id_ != other.id_; }
|
||||
|
||||
ThreadId& operator=(const ThreadId& other) V8_NOEXCEPT {
|
||||
id_.store(other.ToInteger(), std::memory_order_relaxed);
|
||||
return *this;
|
||||
}
|
||||
// Checks whether this ThreadId refers to any thread.
|
||||
bool IsValid() const { return id_ != kInvalidId; }
|
||||
|
||||
bool operator==(const ThreadId& other) const { return Equals(other); }
|
||||
// Converts ThreadId to an integer representation.
|
||||
constexpr int ToInteger() const { return id_; }
|
||||
|
||||
// Returns ThreadId for current thread if it exists or invalid id.
|
||||
static ThreadId TryGetCurrent();
|
||||
@ -34,32 +30,20 @@ class ThreadId {
|
||||
static ThreadId Current() { return ThreadId(GetCurrentThreadId()); }
|
||||
|
||||
// Returns invalid ThreadId (guaranteed not to be equal to any thread).
|
||||
static ThreadId Invalid() { return ThreadId(kInvalidId); }
|
||||
|
||||
// Compares ThreadIds for equality.
|
||||
V8_INLINE bool Equals(const ThreadId& other) const {
|
||||
return ToInteger() == other.ToInteger();
|
||||
}
|
||||
|
||||
// Checks whether this ThreadId refers to any thread.
|
||||
V8_INLINE bool IsValid() const { return ToInteger() != kInvalidId; }
|
||||
|
||||
// Converts ThreadId to an integer representation
|
||||
// (required for public API: V8::V8::GetCurrentThreadId).
|
||||
int ToInteger() const { return id_.load(std::memory_order_relaxed); }
|
||||
static constexpr ThreadId Invalid() { return ThreadId(kInvalidId); }
|
||||
|
||||
// Converts ThreadId to an integer representation
|
||||
// (required for public API: V8::V8::TerminateExecution).
|
||||
static ThreadId FromInteger(int id) { return ThreadId(id); }
|
||||
static constexpr ThreadId FromInteger(int id) { return ThreadId(id); }
|
||||
|
||||
private:
|
||||
static constexpr int kInvalidId = -1;
|
||||
|
||||
explicit ThreadId(int id) { id_.store(id, std::memory_order_relaxed); }
|
||||
explicit constexpr ThreadId(int id) noexcept : id_(id) {}
|
||||
|
||||
static int GetCurrentThreadId();
|
||||
|
||||
std::atomic<int> id_;
|
||||
int id_;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
@ -105,7 +105,7 @@ bool ThreadManager::RestoreThread() {
|
||||
// First check whether the current thread has been 'lazily archived', i.e.
|
||||
// not archived at all. If that is the case we put the state storage we
|
||||
// had prepared back in the free list, since we didn't need it after all.
|
||||
if (lazily_archived_thread_.Equals(ThreadId::Current())) {
|
||||
if (lazily_archived_thread_ == ThreadId::Current()) {
|
||||
lazily_archived_thread_ = ThreadId::Invalid();
|
||||
Isolate::PerIsolateThreadData* per_thread =
|
||||
isolate_->FindPerThreadDataForThisThread();
|
||||
@ -157,13 +157,13 @@ bool ThreadManager::RestoreThread() {
|
||||
|
||||
void ThreadManager::Lock() {
|
||||
mutex_.Lock();
|
||||
mutex_owner_ = ThreadId::Current();
|
||||
mutex_owner_.store(ThreadId::Current(), std::memory_order_relaxed);
|
||||
DCHECK(IsLockedByCurrentThread());
|
||||
}
|
||||
|
||||
|
||||
void ThreadManager::Unlock() {
|
||||
mutex_owner_ = ThreadId::Invalid();
|
||||
mutex_owner_.store(ThreadId::Invalid(), std::memory_order_relaxed);
|
||||
mutex_.Unlock();
|
||||
}
|
||||
|
||||
@ -268,7 +268,7 @@ void ThreadManager::DeleteThreadStateList(ThreadState* anchor) {
|
||||
|
||||
|
||||
void ThreadManager::ArchiveThread() {
|
||||
DCHECK(lazily_archived_thread_.Equals(ThreadId::Invalid()));
|
||||
DCHECK_EQ(lazily_archived_thread_, ThreadId::Invalid());
|
||||
DCHECK(!IsArchived());
|
||||
DCHECK(IsLockedByCurrentThread());
|
||||
ThreadState* state = GetFreeThreadState();
|
||||
@ -278,9 +278,9 @@ void ThreadManager::ArchiveThread() {
|
||||
per_thread->set_thread_state(state);
|
||||
lazily_archived_thread_ = ThreadId::Current();
|
||||
lazily_archived_thread_state_ = state;
|
||||
DCHECK(state->id().Equals(ThreadId::Invalid()));
|
||||
DCHECK_EQ(state->id(), ThreadId::Invalid());
|
||||
state->set_id(CurrentId());
|
||||
DCHECK(!state->id().Equals(ThreadId::Invalid()));
|
||||
DCHECK_NE(state->id(), ThreadId::Invalid());
|
||||
}
|
||||
|
||||
|
||||
@ -352,7 +352,7 @@ ThreadId ThreadManager::CurrentId() {
|
||||
void ThreadManager::TerminateExecution(ThreadId thread_id) {
|
||||
for (ThreadState* state = FirstThreadStateInUse(); state != nullptr;
|
||||
state = state->Next()) {
|
||||
if (thread_id.Equals(state->id())) {
|
||||
if (thread_id == state->id()) {
|
||||
state->set_terminate_on_restore(true);
|
||||
}
|
||||
}
|
||||
|
@ -5,6 +5,8 @@
|
||||
#ifndef V8_V8THREADS_H_
|
||||
#define V8_V8THREADS_H_
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "src/isolate.h"
|
||||
|
||||
namespace v8 {
|
||||
@ -75,8 +77,8 @@ class ThreadManager {
|
||||
|
||||
void Iterate(RootVisitor* v);
|
||||
void IterateArchivedThreads(ThreadVisitor* v);
|
||||
bool IsLockedByCurrentThread() {
|
||||
return mutex_owner_.Equals(ThreadId::Current());
|
||||
bool IsLockedByCurrentThread() const {
|
||||
return mutex_owner_.load(std::memory_order_relaxed) == ThreadId::Current();
|
||||
}
|
||||
|
||||
ThreadId CurrentId();
|
||||
@ -96,7 +98,9 @@ class ThreadManager {
|
||||
void EagerlyArchiveThread();
|
||||
|
||||
base::Mutex mutex_;
|
||||
ThreadId mutex_owner_;
|
||||
// {ThreadId} must be trivially copyable to be stored in {std::atomic}.
|
||||
ASSERT_TRIVIALLY_COPYABLE(i::ThreadId);
|
||||
std::atomic<ThreadId> mutex_owner_;
|
||||
ThreadId lazily_archived_thread_;
|
||||
ThreadState* lazily_archived_thread_state_;
|
||||
|
||||
|
@ -25,18 +25,21 @@
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "src/v8.h"
|
||||
#include "test/cctest/cctest.h"
|
||||
|
||||
#include "src/base/platform/platform.h"
|
||||
#include "src/isolate.h"
|
||||
#include "src/thread-id.h"
|
||||
|
||||
class ThreadIdValidationThread : public v8::base::Thread {
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// {ThreadId} must be trivially copyable to be stored in {std::atomic}.
|
||||
ASSERT_TRIVIALLY_COPYABLE(i::ThreadId);
|
||||
using AtomicThreadId = std::atomic<i::ThreadId>;
|
||||
|
||||
class ThreadIdValidationThread : public base::Thread {
|
||||
public:
|
||||
ThreadIdValidationThread(v8::base::Thread* thread_to_start,
|
||||
std::vector<i::ThreadId>* refs,
|
||||
unsigned int thread_no,
|
||||
v8::base::Semaphore* semaphore)
|
||||
ThreadIdValidationThread(base::Thread* thread_to_start, AtomicThreadId* refs,
|
||||
unsigned int thread_no, base::Semaphore* semaphore)
|
||||
: Thread(Options("ThreadRefValidationThread")),
|
||||
refs_(refs),
|
||||
thread_no_(thread_no),
|
||||
@ -45,11 +48,11 @@ class ThreadIdValidationThread : public v8::base::Thread {
|
||||
|
||||
void Run() override {
|
||||
i::ThreadId thread_id = i::ThreadId::Current();
|
||||
for (int i = 0; i < thread_no_; i++) {
|
||||
CHECK(!(*refs_)[i].Equals(thread_id));
|
||||
}
|
||||
CHECK(thread_id.IsValid());
|
||||
(*refs_)[thread_no_] = thread_id;
|
||||
for (int i = 0; i < thread_no_; i++) {
|
||||
CHECK_NE(refs_[i].load(std::memory_order_relaxed), thread_id);
|
||||
}
|
||||
refs_[thread_no_].store(thread_id, std::memory_order_relaxed);
|
||||
if (thread_to_start_ != nullptr) {
|
||||
thread_to_start_->Start();
|
||||
}
|
||||
@ -57,33 +60,28 @@ class ThreadIdValidationThread : public v8::base::Thread {
|
||||
}
|
||||
|
||||
private:
|
||||
std::vector<i::ThreadId>* refs_;
|
||||
int thread_no_;
|
||||
v8::base::Thread* thread_to_start_;
|
||||
v8::base::Semaphore* semaphore_;
|
||||
AtomicThreadId* const refs_;
|
||||
const int thread_no_;
|
||||
base::Thread* const thread_to_start_;
|
||||
base::Semaphore* const semaphore_;
|
||||
};
|
||||
|
||||
|
||||
TEST(ThreadIdValidation) {
|
||||
const int kNThreads = 100;
|
||||
std::vector<ThreadIdValidationThread*> threads;
|
||||
std::vector<i::ThreadId> refs;
|
||||
threads.reserve(kNThreads);
|
||||
refs.reserve(kNThreads);
|
||||
v8::base::Semaphore semaphore(0);
|
||||
ThreadIdValidationThread* prev = nullptr;
|
||||
constexpr int kNThreads = 100;
|
||||
std::unique_ptr<ThreadIdValidationThread> threads[kNThreads];
|
||||
AtomicThreadId refs[kNThreads];
|
||||
base::Semaphore semaphore(0);
|
||||
for (int i = kNThreads - 1; i >= 0; i--) {
|
||||
ThreadIdValidationThread* newThread =
|
||||
new ThreadIdValidationThread(prev, &refs, i, &semaphore);
|
||||
threads.push_back(newThread);
|
||||
prev = newThread;
|
||||
refs.push_back(i::ThreadId::Invalid());
|
||||
ThreadIdValidationThread* prev =
|
||||
i == kNThreads - 1 ? nullptr : threads[i + 1].get();
|
||||
threads[i] =
|
||||
base::make_unique<ThreadIdValidationThread>(prev, refs, i, &semaphore);
|
||||
}
|
||||
prev->Start();
|
||||
threads[0]->Start();
|
||||
for (int i = 0; i < kNThreads; i++) {
|
||||
semaphore.Wait();
|
||||
}
|
||||
for (int i = 0; i < kNThreads; i++) {
|
||||
delete threads[i];
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
Loading…
Reference in New Issue
Block a user