v8/test/unittests/test-utils.cc
Shu-yu Guo 7ff1857560 Reland "[shared-struct] Add Atomics.Mutex"
This is a reland of commit ea9a1f1cbe

Changes since revert:
- Make the state field uintptr-aligned since arm64 faults on
  atomic accesses to non-naturally aligned addresses.

Original change's description:
> [shared-struct] Add Atomics.Mutex
>
> This CL adds a moving GC-safe, JS-exposed mutex behind the
> --harmony-struct flag. It uses a ParkingLot-inspired algorithm and
> each mutex manages its own waiter queue.
>
> For more details, please see the design doc: https://docs.google.com/document/d/1QHkmiTF770GKxtoP-VQ1eKF42MpedLUeqiQPfCqus0Y/edit?usp=sharing
>
> Bug: v8:12547
> Change-Id: Ic58f8750d2e14ecd573173d17d5235a136bedef9
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3595460
> Commit-Queue: Shu-yu Guo <syg@chromium.org>
> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
> Reviewed-by: Adam Klein <adamk@chromium.org>
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#80789}

Bug: v8:12547
Change-Id: I776cbf6ea860dcc6cb0ac51694a9b584b53d255c
Cq-Include-Trybots: luci.v8.try:v8_linux64_tsan_rel_ng
Cq-Include-Trybots: luci.v8.try:v8_mac_arm64_rel_ng
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3673354
Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: Adam Klein <adamk@chromium.org>
Commit-Queue: Shu-yu Guo <syg@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80875}
2022-06-01 01:12:07 +00:00

118 lines
4.0 KiB
C++

// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "test/unittests/test-utils.h"
#include "include/libplatform/libplatform.h"
#include "include/v8-isolate.h"
#include "src/api/api-inl.h"
#include "src/base/platform/time.h"
#include "src/execution/isolate.h"
#include "src/flags/flags.h"
#include "src/init/v8.h"
#include "src/objects/objects-inl.h"
namespace v8 {
namespace {
// counter_lookup_callback doesn't pass through any state information about
// the current Isolate, so we have to store the current counter map somewhere.
// Fortunately tests run serially, so we can just store it in a static global.
CounterMap* kCurrentCounterMap = nullptr;
} // namespace
IsolateWrapper::IsolateWrapper(CountersMode counters_mode,
IsolateSharedMode shared_mode,
v8::Isolate* shared_isolate_if_client)
: array_buffer_allocator_(
v8::ArrayBuffer::Allocator::NewDefaultAllocator()) {
CHECK_NULL(kCurrentCounterMap);
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = array_buffer_allocator_.get();
if (counters_mode == kEnableCounters) {
counter_map_ = std::make_unique<CounterMap>();
kCurrentCounterMap = counter_map_.get();
create_params.counter_lookup_callback = [](const char* name) {
CHECK_NOT_NULL(kCurrentCounterMap);
// If the name doesn't exist in the counter map, operator[] will default
// initialize it to zero.
return &(*kCurrentCounterMap)[name];
};
} else {
create_params.counter_lookup_callback = [](const char* name) -> int* {
return nullptr;
};
}
if (shared_mode == kSharedIsolate) {
isolate_ = reinterpret_cast<v8::Isolate*>(
internal::Isolate::NewShared(create_params));
} else {
if (shared_mode == kClientIsolate) {
CHECK_NOT_NULL(shared_isolate_if_client);
create_params.experimental_attach_to_shared_isolate =
shared_isolate_if_client;
}
isolate_ = v8::Isolate::New(create_params);
}
CHECK_NOT_NULL(isolate());
}
IsolateWrapper::~IsolateWrapper() {
v8::Platform* platform = internal::V8::GetCurrentPlatform();
CHECK_NOT_NULL(platform);
while (platform::PumpMessageLoop(platform, isolate())) continue;
isolate_->Dispose();
if (counter_map_) {
CHECK_EQ(kCurrentCounterMap, counter_map_.get());
kCurrentCounterMap = nullptr;
} else {
CHECK_NULL(kCurrentCounterMap);
}
}
namespace internal {
SaveFlags::SaveFlags() {
// For each flag, save the current flag value.
#define FLAG_MODE_APPLY(ftype, ctype, nam, def, cmt) SAVED_##nam = FLAG_##nam;
#include "src/flags/flag-definitions.h"
#undef FLAG_MODE_APPLY
}
SaveFlags::~SaveFlags() {
// For each flag, set back the old flag value if it changed (don't write the
// flag if it didn't change, to keep TSAN happy).
#define FLAG_MODE_APPLY(ftype, ctype, nam, def, cmt) \
if (SAVED_##nam != FLAG_##nam) { \
FLAG_##nam = SAVED_##nam; \
}
#include "src/flags/flag-definitions.h" // NOLINT
#undef FLAG_MODE_APPLY
}
ManualGCScope::ManualGCScope(i::Isolate* isolate) {
// Some tests run threaded (back-to-back) and thus the GC may already be
// running by the time a ManualGCScope is created. Finalizing existing marking
// prevents any undefined/unexpected behavior.
if (isolate && isolate->heap()->incremental_marking()->IsMarking()) {
isolate->heap()->CollectGarbage(i::OLD_SPACE,
i::GarbageCollectionReason::kTesting);
}
i::FLAG_concurrent_marking = false;
i::FLAG_concurrent_sweeping = false;
i::FLAG_stress_incremental_marking = false;
i::FLAG_stress_concurrent_allocation = false;
// Parallel marking has a dependency on concurrent marking.
i::FLAG_parallel_marking = false;
i::FLAG_detect_ineffective_gcs_near_heap_limit = false;
}
} // namespace internal
} // namespace v8