2020-04-15 10:49:38 +00:00
|
|
|
// Copyright 2020 the V8 project authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file.
|
|
|
|
|
|
|
|
#include <memory>
|
|
|
|
|
|
|
|
#include "src/api/api.h"
|
|
|
|
#include "src/base/platform/condition-variable.h"
|
|
|
|
#include "src/base/platform/mutex.h"
|
|
|
|
#include "src/base/platform/semaphore.h"
|
2020-08-25 15:33:11 +00:00
|
|
|
#include "src/codegen/assembler-inl.h"
|
|
|
|
#include "src/codegen/assembler.h"
|
|
|
|
#include "src/codegen/macro-assembler-inl.h"
|
|
|
|
#include "src/codegen/macro-assembler.h"
|
2020-04-15 10:49:38 +00:00
|
|
|
#include "src/common/globals.h"
|
|
|
|
#include "src/handles/handles-inl.h"
|
2020-10-14 11:47:05 +00:00
|
|
|
#include "src/handles/handles.h"
|
2020-04-15 10:49:38 +00:00
|
|
|
#include "src/handles/local-handles-inl.h"
|
|
|
|
#include "src/handles/persistent-handles.h"
|
|
|
|
#include "src/heap/concurrent-allocator-inl.h"
|
|
|
|
#include "src/heap/heap.h"
|
2020-07-31 20:26:53 +00:00
|
|
|
#include "src/heap/local-heap-inl.h"
|
2020-11-17 10:16:09 +00:00
|
|
|
#include "src/heap/parked-scope.h"
|
2020-04-15 10:49:38 +00:00
|
|
|
#include "src/heap/safepoint.h"
|
|
|
|
#include "src/objects/heap-number.h"
|
2020-05-26 12:27:52 +00:00
|
|
|
#include "src/objects/heap-object.h"
|
2020-04-15 10:49:38 +00:00
|
|
|
#include "test/cctest/cctest.h"
|
|
|
|
#include "test/cctest/heap/heap-utils.h"
|
|
|
|
|
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
|
|
|
|
2020-10-14 11:47:05 +00:00
|
|
|
namespace {
|
2020-05-26 12:27:52 +00:00
|
|
|
void CreateFixedArray(Heap* heap, Address start, int size) {
|
|
|
|
HeapObject object = HeapObject::FromAddress(start);
|
|
|
|
object.set_map_after_allocation(ReadOnlyRoots(heap).fixed_array_map(),
|
|
|
|
SKIP_WRITE_BARRIER);
|
|
|
|
FixedArray array = FixedArray::cast(object);
|
|
|
|
int length = (size - FixedArray::kHeaderSize) / kTaggedSize;
|
|
|
|
array.set_length(length);
|
|
|
|
MemsetTagged(array.data_start(), ReadOnlyRoots(heap).undefined_value(),
|
|
|
|
length);
|
|
|
|
}
|
|
|
|
|
2020-04-30 10:03:24 +00:00
|
|
|
const int kNumIterations = 2000;
|
2020-05-26 12:27:52 +00:00
|
|
|
const int kSmallObjectSize = 10 * kTaggedSize;
|
|
|
|
const int kMediumObjectSize = 8 * KB;
|
2020-04-15 10:49:38 +00:00
|
|
|
|
2020-10-14 11:47:05 +00:00
|
|
|
void AllocateSomeObjects(LocalHeap* local_heap) {
|
|
|
|
for (int i = 0; i < kNumIterations; i++) {
|
|
|
|
Address address = local_heap->AllocateRawOrFail(
|
|
|
|
kSmallObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
|
|
|
|
AllocationAlignment::kWordAligned);
|
|
|
|
CreateFixedArray(local_heap->heap(), address, kSmallObjectSize);
|
|
|
|
address = local_heap->AllocateRawOrFail(
|
|
|
|
kMediumObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
|
|
|
|
AllocationAlignment::kWordAligned);
|
|
|
|
CreateFixedArray(local_heap->heap(), address, kMediumObjectSize);
|
|
|
|
if (i % 10 == 0) {
|
|
|
|
local_heap->Safepoint();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} // namespace
|
|
|
|
|
2020-04-15 10:49:38 +00:00
|
|
|
class ConcurrentAllocationThread final : public v8::base::Thread {
|
|
|
|
public:
|
2021-03-29 09:04:50 +00:00
|
|
|
explicit ConcurrentAllocationThread(Heap* heap,
|
|
|
|
std::atomic<int>* pending = nullptr)
|
2020-04-15 10:49:38 +00:00
|
|
|
: v8::base::Thread(base::Thread::Options("ThreadWithLocalHeap")),
|
2020-04-30 10:03:24 +00:00
|
|
|
heap_(heap),
|
|
|
|
pending_(pending) {}
|
2020-04-15 10:49:38 +00:00
|
|
|
|
|
|
|
void Run() override {
|
2020-10-14 11:47:05 +00:00
|
|
|
LocalHeap local_heap(heap_, ThreadKind::kBackground);
|
2020-10-15 10:57:22 +00:00
|
|
|
UnparkedScope unparked_scope(&local_heap);
|
2020-10-14 11:47:05 +00:00
|
|
|
AllocateSomeObjects(&local_heap);
|
2021-03-29 09:04:50 +00:00
|
|
|
if (pending_) pending_->fetch_sub(1);
|
2020-04-15 10:49:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Heap* heap_;
|
2020-04-30 10:03:24 +00:00
|
|
|
std::atomic<int>* pending_;
|
2020-04-15 10:49:38 +00:00
|
|
|
};
|
|
|
|
|
2020-04-30 10:03:24 +00:00
|
|
|
UNINITIALIZED_TEST(ConcurrentAllocationInOldSpace) {
|
2020-05-08 08:18:37 +00:00
|
|
|
FLAG_max_old_space_size = 32;
|
2020-08-13 12:55:14 +00:00
|
|
|
FLAG_stress_concurrent_allocation = false;
|
2020-04-30 10:03:24 +00:00
|
|
|
|
|
|
|
v8::Isolate::CreateParams create_params;
|
|
|
|
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
|
|
|
|
v8::Isolate* isolate = v8::Isolate::New(create_params);
|
|
|
|
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
|
|
|
|
|
2020-04-15 10:49:38 +00:00
|
|
|
std::vector<std::unique_ptr<ConcurrentAllocationThread>> threads;
|
|
|
|
|
|
|
|
const int kThreads = 4;
|
|
|
|
|
2020-04-30 10:03:24 +00:00
|
|
|
std::atomic<int> pending(kThreads);
|
|
|
|
|
2020-04-15 10:49:38 +00:00
|
|
|
for (int i = 0; i < kThreads; i++) {
|
2020-04-30 10:03:24 +00:00
|
|
|
auto thread = std::make_unique<ConcurrentAllocationThread>(
|
|
|
|
i_isolate->heap(), &pending);
|
2020-08-03 06:13:14 +00:00
|
|
|
CHECK(thread->Start());
|
|
|
|
threads.push_back(std::move(thread));
|
|
|
|
}
|
|
|
|
|
|
|
|
while (pending > 0) {
|
|
|
|
v8::platform::PumpMessageLoop(i::V8::GetCurrentPlatform(), isolate);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto& thread : threads) {
|
|
|
|
thread->Join();
|
|
|
|
}
|
|
|
|
|
|
|
|
isolate->Dispose();
|
|
|
|
}
|
|
|
|
|
2020-10-14 11:47:05 +00:00
|
|
|
UNINITIALIZED_TEST(ConcurrentAllocationInOldSpaceFromMainThread) {
|
|
|
|
FLAG_max_old_space_size = 4;
|
|
|
|
FLAG_stress_concurrent_allocation = false;
|
|
|
|
|
|
|
|
v8::Isolate::CreateParams create_params;
|
|
|
|
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
|
|
|
|
v8::Isolate* isolate = v8::Isolate::New(create_params);
|
|
|
|
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
|
|
|
|
|
2020-11-17 10:16:09 +00:00
|
|
|
AllocateSomeObjects(i_isolate->main_thread_local_heap());
|
2020-10-14 11:47:05 +00:00
|
|
|
|
|
|
|
isolate->Dispose();
|
|
|
|
}
|
|
|
|
|
2021-03-29 09:04:50 +00:00
|
|
|
UNINITIALIZED_TEST(ConcurrentAllocationWhileMainThreadIsParked) {
|
|
|
|
FLAG_max_old_space_size = 4;
|
|
|
|
FLAG_stress_concurrent_allocation = false;
|
|
|
|
|
|
|
|
v8::Isolate::CreateParams create_params;
|
|
|
|
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
|
|
|
|
v8::Isolate* isolate = v8::Isolate::New(create_params);
|
|
|
|
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
|
|
|
|
|
|
|
|
std::vector<std::unique_ptr<ConcurrentAllocationThread>> threads;
|
|
|
|
const int kThreads = 4;
|
|
|
|
|
|
|
|
{
|
|
|
|
ParkedScope scope(i_isolate->main_thread_local_isolate());
|
|
|
|
|
|
|
|
for (int i = 0; i < kThreads; i++) {
|
|
|
|
auto thread =
|
|
|
|
std::make_unique<ConcurrentAllocationThread>(i_isolate->heap());
|
|
|
|
CHECK(thread->Start());
|
|
|
|
threads.push_back(std::move(thread));
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto& thread : threads) {
|
|
|
|
thread->Join();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
isolate->Dispose();
|
|
|
|
}
|
|
|
|
|
|
|
|
UNINITIALIZED_TEST(ConcurrentAllocationWhileMainThreadParksAndUnparks) {
|
|
|
|
FLAG_max_old_space_size = 4;
|
|
|
|
FLAG_stress_concurrent_allocation = false;
|
|
|
|
FLAG_incremental_marking = false;
|
|
|
|
|
|
|
|
v8::Isolate::CreateParams create_params;
|
|
|
|
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
|
|
|
|
v8::Isolate* isolate = v8::Isolate::New(create_params);
|
|
|
|
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
|
|
|
|
|
|
|
|
std::vector<std::unique_ptr<ConcurrentAllocationThread>> threads;
|
|
|
|
const int kThreads = 4;
|
|
|
|
|
|
|
|
for (int i = 0; i < kThreads; i++) {
|
|
|
|
auto thread =
|
|
|
|
std::make_unique<ConcurrentAllocationThread>(i_isolate->heap());
|
|
|
|
CHECK(thread->Start());
|
|
|
|
threads.push_back(std::move(thread));
|
|
|
|
}
|
|
|
|
|
|
|
|
for (int i = 0; i < 300'000; i++) {
|
|
|
|
ParkedScope scope(i_isolate->main_thread_local_isolate());
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
ParkedScope scope(i_isolate->main_thread_local_isolate());
|
|
|
|
|
|
|
|
for (auto& thread : threads) {
|
|
|
|
thread->Join();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
isolate->Dispose();
|
|
|
|
}
|
|
|
|
|
|
|
|
UNINITIALIZED_TEST(ConcurrentAllocationWhileMainThreadRunsWithSafepoints) {
|
|
|
|
FLAG_max_old_space_size = 4;
|
|
|
|
FLAG_stress_concurrent_allocation = false;
|
|
|
|
FLAG_incremental_marking = false;
|
|
|
|
|
|
|
|
v8::Isolate::CreateParams create_params;
|
|
|
|
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
|
|
|
|
v8::Isolate* isolate = v8::Isolate::New(create_params);
|
|
|
|
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
|
|
|
|
|
|
|
|
std::vector<std::unique_ptr<ConcurrentAllocationThread>> threads;
|
|
|
|
const int kThreads = 4;
|
|
|
|
|
|
|
|
for (int i = 0; i < kThreads; i++) {
|
|
|
|
auto thread =
|
|
|
|
std::make_unique<ConcurrentAllocationThread>(i_isolate->heap());
|
|
|
|
CHECK(thread->Start());
|
|
|
|
threads.push_back(std::move(thread));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Some of the following Safepoint() invocations are supposed to perform a GC.
|
|
|
|
for (int i = 0; i < 1'000'000; i++) {
|
|
|
|
i_isolate->main_thread_local_heap()->Safepoint();
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
ParkedScope scope(i_isolate->main_thread_local_isolate());
|
|
|
|
|
|
|
|
for (auto& thread : threads) {
|
|
|
|
thread->Join();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
i_isolate->main_thread_local_heap()->Safepoint();
|
|
|
|
isolate->Dispose();
|
|
|
|
}
|
|
|
|
|
2020-08-03 06:13:14 +00:00
|
|
|
class LargeObjectConcurrentAllocationThread final : public v8::base::Thread {
|
|
|
|
public:
|
|
|
|
explicit LargeObjectConcurrentAllocationThread(Heap* heap,
|
|
|
|
std::atomic<int>* pending)
|
|
|
|
: v8::base::Thread(base::Thread::Options("ThreadWithLocalHeap")),
|
|
|
|
heap_(heap),
|
|
|
|
pending_(pending) {}
|
|
|
|
|
|
|
|
void Run() override {
|
2020-10-14 11:47:05 +00:00
|
|
|
LocalHeap local_heap(heap_, ThreadKind::kBackground);
|
2020-10-15 10:57:22 +00:00
|
|
|
UnparkedScope unparked_scope(&local_heap);
|
2020-08-03 06:13:14 +00:00
|
|
|
const size_t kLargeObjectSize = kMaxRegularHeapObjectSize * 2;
|
|
|
|
|
|
|
|
for (int i = 0; i < kNumIterations; i++) {
|
2020-11-03 10:11:28 +00:00
|
|
|
AllocationResult result = local_heap.AllocateRaw(
|
2020-08-03 06:13:14 +00:00
|
|
|
kLargeObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
|
|
|
|
AllocationAlignment::kWordAligned);
|
2020-11-03 10:11:28 +00:00
|
|
|
if (result.IsRetry()) {
|
2021-03-29 09:04:50 +00:00
|
|
|
local_heap.TryPerformCollection();
|
2020-11-03 10:11:28 +00:00
|
|
|
} else {
|
|
|
|
Address address = result.ToAddress();
|
|
|
|
CreateFixedArray(heap_, address, kLargeObjectSize);
|
|
|
|
}
|
2020-08-03 06:13:14 +00:00
|
|
|
local_heap.Safepoint();
|
|
|
|
}
|
|
|
|
|
|
|
|
pending_->fetch_sub(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
Heap* heap_;
|
|
|
|
std::atomic<int>* pending_;
|
|
|
|
};
|
|
|
|
|
|
|
|
UNINITIALIZED_TEST(ConcurrentAllocationInLargeSpace) {
|
|
|
|
FLAG_max_old_space_size = 32;
|
2020-08-13 12:55:14 +00:00
|
|
|
FLAG_stress_concurrent_allocation = false;
|
2020-08-03 06:13:14 +00:00
|
|
|
|
|
|
|
v8::Isolate::CreateParams create_params;
|
|
|
|
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
|
|
|
|
v8::Isolate* isolate = v8::Isolate::New(create_params);
|
|
|
|
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
|
|
|
|
|
|
|
|
std::vector<std::unique_ptr<LargeObjectConcurrentAllocationThread>> threads;
|
|
|
|
|
|
|
|
const int kThreads = 4;
|
|
|
|
|
|
|
|
std::atomic<int> pending(kThreads);
|
|
|
|
|
|
|
|
for (int i = 0; i < kThreads; i++) {
|
|
|
|
auto thread = std::make_unique<LargeObjectConcurrentAllocationThread>(
|
|
|
|
i_isolate->heap(), &pending);
|
2020-04-15 10:49:38 +00:00
|
|
|
CHECK(thread->Start());
|
|
|
|
threads.push_back(std::move(thread));
|
|
|
|
}
|
|
|
|
|
2020-04-30 10:03:24 +00:00
|
|
|
while (pending > 0) {
|
|
|
|
v8::platform::PumpMessageLoop(i::V8::GetCurrentPlatform(), isolate);
|
|
|
|
}
|
|
|
|
|
2020-04-15 10:49:38 +00:00
|
|
|
for (auto& thread : threads) {
|
|
|
|
thread->Join();
|
|
|
|
}
|
2020-04-30 10:03:24 +00:00
|
|
|
|
|
|
|
isolate->Dispose();
|
2020-04-15 10:49:38 +00:00
|
|
|
}
|
|
|
|
|
2020-05-26 12:27:52 +00:00
|
|
|
const int kWhiteIterations = 1000;
|
|
|
|
|
|
|
|
class ConcurrentBlackAllocationThread final : public v8::base::Thread {
|
|
|
|
public:
|
|
|
|
explicit ConcurrentBlackAllocationThread(
|
|
|
|
Heap* heap, std::vector<Address>* objects, base::Semaphore* sema_white,
|
|
|
|
base::Semaphore* sema_marking_started)
|
|
|
|
: v8::base::Thread(base::Thread::Options("ThreadWithLocalHeap")),
|
|
|
|
heap_(heap),
|
|
|
|
objects_(objects),
|
|
|
|
sema_white_(sema_white),
|
|
|
|
sema_marking_started_(sema_marking_started) {}
|
|
|
|
|
|
|
|
void Run() override {
|
2020-10-14 11:47:05 +00:00
|
|
|
LocalHeap local_heap(heap_, ThreadKind::kBackground);
|
2020-10-15 10:57:22 +00:00
|
|
|
UnparkedScope unparked_scope(&local_heap);
|
2020-05-26 12:27:52 +00:00
|
|
|
|
|
|
|
for (int i = 0; i < kNumIterations; i++) {
|
|
|
|
if (i == kWhiteIterations) {
|
|
|
|
ParkedScope scope(&local_heap);
|
|
|
|
sema_white_->Signal();
|
|
|
|
sema_marking_started_->Wait();
|
|
|
|
}
|
2020-07-31 20:26:53 +00:00
|
|
|
Address address = local_heap.AllocateRawOrFail(
|
|
|
|
kSmallObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
|
|
|
|
AllocationAlignment::kWordAligned);
|
2020-05-26 12:27:52 +00:00
|
|
|
objects_->push_back(address);
|
|
|
|
CreateFixedArray(heap_, address, kSmallObjectSize);
|
2020-07-31 20:26:53 +00:00
|
|
|
address = local_heap.AllocateRawOrFail(
|
|
|
|
kMediumObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
|
|
|
|
AllocationAlignment::kWordAligned);
|
2020-05-26 12:27:52 +00:00
|
|
|
objects_->push_back(address);
|
|
|
|
CreateFixedArray(heap_, address, kMediumObjectSize);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Heap* heap_;
|
|
|
|
std::vector<Address>* objects_;
|
|
|
|
base::Semaphore* sema_white_;
|
|
|
|
base::Semaphore* sema_marking_started_;
|
|
|
|
};
|
|
|
|
|
|
|
|
UNINITIALIZED_TEST(ConcurrentBlackAllocation) {
|
2021-04-21 00:32:15 +00:00
|
|
|
if (!FLAG_incremental_marking) return;
|
2020-05-26 12:27:52 +00:00
|
|
|
v8::Isolate::CreateParams create_params;
|
|
|
|
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
|
|
|
|
v8::Isolate* isolate = v8::Isolate::New(create_params);
|
|
|
|
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
|
|
|
|
Heap* heap = i_isolate->heap();
|
|
|
|
|
|
|
|
std::vector<Address> objects;
|
|
|
|
|
|
|
|
base::Semaphore sema_white(0);
|
|
|
|
base::Semaphore sema_marking_started(0);
|
|
|
|
|
|
|
|
auto thread = std::make_unique<ConcurrentBlackAllocationThread>(
|
|
|
|
heap, &objects, &sema_white, &sema_marking_started);
|
|
|
|
CHECK(thread->Start());
|
|
|
|
|
|
|
|
sema_white.Wait();
|
|
|
|
heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
|
|
|
|
i::GarbageCollectionReason::kTesting);
|
|
|
|
sema_marking_started.Signal();
|
|
|
|
|
|
|
|
thread->Join();
|
|
|
|
|
|
|
|
const int kObjectsAllocatedPerIteration = 2;
|
|
|
|
|
|
|
|
for (int i = 0; i < kNumIterations * kObjectsAllocatedPerIteration; i++) {
|
|
|
|
Address address = objects[i];
|
|
|
|
HeapObject object = HeapObject::FromAddress(address);
|
|
|
|
|
|
|
|
if (i < kWhiteIterations * kObjectsAllocatedPerIteration) {
|
|
|
|
CHECK(heap->incremental_marking()->marking_state()->IsWhite(object));
|
|
|
|
} else {
|
|
|
|
CHECK(heap->incremental_marking()->marking_state()->IsBlack(object));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
isolate->Dispose();
|
|
|
|
}
|
|
|
|
|
2020-08-21 10:27:42 +00:00
|
|
|
class ConcurrentWriteBarrierThread final : public v8::base::Thread {
|
|
|
|
public:
|
|
|
|
explicit ConcurrentWriteBarrierThread(Heap* heap, FixedArray fixed_array,
|
|
|
|
HeapObject value)
|
|
|
|
: v8::base::Thread(base::Thread::Options("ThreadWithLocalHeap")),
|
|
|
|
heap_(heap),
|
|
|
|
fixed_array_(fixed_array),
|
|
|
|
value_(value) {}
|
|
|
|
|
|
|
|
void Run() override {
|
2020-10-14 11:47:05 +00:00
|
|
|
LocalHeap local_heap(heap_, ThreadKind::kBackground);
|
2020-10-15 10:57:22 +00:00
|
|
|
UnparkedScope unparked_scope(&local_heap);
|
2020-08-21 10:27:42 +00:00
|
|
|
fixed_array_.set(0, value_);
|
|
|
|
}
|
|
|
|
|
|
|
|
Heap* heap_;
|
|
|
|
FixedArray fixed_array_;
|
|
|
|
HeapObject value_;
|
|
|
|
};
|
|
|
|
|
|
|
|
UNINITIALIZED_TEST(ConcurrentWriteBarrier) {
|
2021-04-21 00:32:15 +00:00
|
|
|
if (!FLAG_incremental_marking) return;
|
2020-09-21 18:40:30 +00:00
|
|
|
if (!FLAG_concurrent_marking) {
|
|
|
|
// The test requires concurrent marking barrier.
|
|
|
|
return;
|
|
|
|
}
|
2020-08-21 10:27:42 +00:00
|
|
|
ManualGCScope manual_gc_scope;
|
|
|
|
|
|
|
|
v8::Isolate::CreateParams create_params;
|
|
|
|
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
|
|
|
|
v8::Isolate* isolate = v8::Isolate::New(create_params);
|
|
|
|
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
|
|
|
|
Heap* heap = i_isolate->heap();
|
|
|
|
|
|
|
|
FixedArray fixed_array;
|
|
|
|
HeapObject value;
|
|
|
|
{
|
|
|
|
HandleScope handle_scope(i_isolate);
|
|
|
|
Handle<FixedArray> fixed_array_handle(
|
|
|
|
i_isolate->factory()->NewFixedArray(1));
|
2020-08-25 15:33:11 +00:00
|
|
|
Handle<HeapNumber> value_handle(
|
|
|
|
i_isolate->factory()->NewHeapNumber<AllocationType::kOld>(1.1));
|
2020-08-21 10:27:42 +00:00
|
|
|
fixed_array = *fixed_array_handle;
|
|
|
|
value = *value_handle;
|
|
|
|
}
|
|
|
|
heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
|
|
|
|
i::GarbageCollectionReason::kTesting);
|
|
|
|
CHECK(heap->incremental_marking()->marking_state()->IsWhite(value));
|
|
|
|
|
|
|
|
auto thread =
|
|
|
|
std::make_unique<ConcurrentWriteBarrierThread>(heap, fixed_array, value);
|
|
|
|
CHECK(thread->Start());
|
|
|
|
|
|
|
|
thread->Join();
|
|
|
|
|
|
|
|
CHECK(heap->incremental_marking()->marking_state()->IsBlackOrGrey(value));
|
2020-08-25 15:33:11 +00:00
|
|
|
heap::InvokeMarkSweep(i_isolate);
|
|
|
|
|
|
|
|
isolate->Dispose();
|
|
|
|
}
|
|
|
|
|
|
|
|
class ConcurrentRecordRelocSlotThread final : public v8::base::Thread {
|
|
|
|
public:
|
|
|
|
explicit ConcurrentRecordRelocSlotThread(Heap* heap, Code code,
|
|
|
|
HeapObject value)
|
|
|
|
: v8::base::Thread(base::Thread::Options("ThreadWithLocalHeap")),
|
|
|
|
heap_(heap),
|
|
|
|
code_(code),
|
|
|
|
value_(value) {}
|
|
|
|
|
|
|
|
void Run() override {
|
2020-10-14 11:47:05 +00:00
|
|
|
LocalHeap local_heap(heap_, ThreadKind::kBackground);
|
2020-10-15 10:57:22 +00:00
|
|
|
UnparkedScope unparked_scope(&local_heap);
|
2020-08-25 15:33:11 +00:00
|
|
|
int mode_mask = RelocInfo::EmbeddedObjectModeMask();
|
|
|
|
for (RelocIterator it(code_, mode_mask); !it.done(); it.next()) {
|
|
|
|
DCHECK(RelocInfo::IsEmbeddedObjectMode(it.rinfo()->rmode()));
|
|
|
|
it.rinfo()->set_target_object(heap_, value_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Heap* heap_;
|
|
|
|
Code code_;
|
|
|
|
HeapObject value_;
|
|
|
|
};
|
|
|
|
|
|
|
|
UNINITIALIZED_TEST(ConcurrentRecordRelocSlot) {
|
2021-04-21 00:32:15 +00:00
|
|
|
if (!FLAG_incremental_marking) return;
|
2020-09-21 18:40:30 +00:00
|
|
|
if (!FLAG_concurrent_marking) {
|
|
|
|
// The test requires concurrent marking barrier.
|
|
|
|
return;
|
|
|
|
}
|
2020-08-25 15:33:11 +00:00
|
|
|
FLAG_manual_evacuation_candidates_selection = true;
|
|
|
|
ManualGCScope manual_gc_scope;
|
|
|
|
|
|
|
|
v8::Isolate::CreateParams create_params;
|
|
|
|
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
|
|
|
|
v8::Isolate* isolate = v8::Isolate::New(create_params);
|
|
|
|
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
|
|
|
|
Heap* heap = i_isolate->heap();
|
|
|
|
|
|
|
|
Code code;
|
|
|
|
HeapObject value;
|
|
|
|
{
|
|
|
|
HandleScope handle_scope(i_isolate);
|
|
|
|
i::byte buffer[i::Assembler::kDefaultBufferSize];
|
|
|
|
MacroAssembler masm(i_isolate, v8::internal::CodeObjectRequired::kYes,
|
|
|
|
ExternalAssemblerBuffer(buffer, sizeof(buffer)));
|
2021-02-12 08:41:14 +00:00
|
|
|
#if V8_TARGET_ARCH_ARM64
|
|
|
|
// Arm64 requires stack alignment.
|
|
|
|
UseScratchRegisterScope temps(&masm);
|
|
|
|
Register tmp = temps.AcquireX();
|
|
|
|
masm.Mov(tmp, Operand(ReadOnlyRoots(heap).undefined_value_handle()));
|
|
|
|
masm.Push(tmp, padreg);
|
|
|
|
#else
|
2020-08-25 15:33:11 +00:00
|
|
|
masm.Push(ReadOnlyRoots(heap).undefined_value_handle());
|
2021-02-12 08:41:14 +00:00
|
|
|
#endif
|
2020-08-25 15:33:11 +00:00
|
|
|
CodeDesc desc;
|
|
|
|
masm.GetCode(i_isolate, &desc);
|
|
|
|
Handle<Code> code_handle =
|
Reland "Reland "[deoptimizer] Change deopt entries into builtins""
This is a reland of fbfa9bf4ec72b1b73a96b70ccb68cd98c321511b
The arm64 was missing proper codegen for CFI, thus sizes were off.
Original change's description:
> Reland "[deoptimizer] Change deopt entries into builtins"
>
> This is a reland of 7f58ced72eb65b6b5530ccabaf2eaebe45bf9d33
>
> It fixes the different exit size emitted on x64/Atom CPUs due to
> performance tuning in TurboAssembler::Call. Additionally, add
> cctests to verify the fixed size exits.
>
> Original change's description:
> > [deoptimizer] Change deopt entries into builtins
> >
> > While the overall goal of this commit is to change deoptimization
> > entries into builtins, there are multiple related things happening:
> >
> > - Deoptimization entries, formerly stubs (i.e. Code objects generated
> > at runtime, guaranteed to be immovable), have been converted into
> > builtins. The major restriction is that we now need to preserve the
> > kRootRegister, which was formerly used on most architectures to pass
> > the deoptimization id. The solution differs based on platform.
> > - Renamed DEOPT_ENTRIES_OR_FOR_TESTING code kind to FOR_TESTING.
> > - Removed heap/ support for immovable Code generation.
> > - Removed the DeserializerData class (no longer needed).
> > - arm64: to preserve 4-byte deopt exits, introduced a new optimization
> > in which the final jump to the deoptimization entry is generated
> > once per Code object, and deopt exits can continue to emit a
> > near-call.
> > - arm,ia32,x64: change to fixed-size deopt exits. This reduces exit
> > sizes by 4/8, 5, and 5 bytes, respectively.
> >
> > On arm the deopt exit size is reduced from 12 (or 16) bytes to 8 bytes
> > by using the same strategy as on arm64 (recalc deopt id from return
> > address). Before:
> >
> > e300a002 movw r10, <id>
> > e59fc024 ldr ip, [pc, <entry offset>]
> > e12fff3c blx ip
> >
> > After:
> >
> > e59acb35 ldr ip, [r10, <entry offset>]
> > e12fff3c blx ip
> >
> > On arm64 the deopt exit size remains 4 bytes (or 8 bytes in same cases
> > with CFI). Additionally, up to 4 builtin jumps are emitted per Code
> > object (max 32 bytes added overhead per Code object). Before:
> >
> > 9401cdae bl <entry offset>
> >
> > After:
> >
> > # eager deoptimization entry jump.
> > f95b1f50 ldr x16, [x26, <eager entry offset>]
> > d61f0200 br x16
> > # lazy deoptimization entry jump.
> > f95b2b50 ldr x16, [x26, <lazy entry offset>]
> > d61f0200 br x16
> > # the deopt exit.
> > 97fffffc bl <eager deoptimization entry jump offset>
> >
> > On ia32 the deopt exit size is reduced from 10 to 5 bytes. Before:
> >
> > bb00000000 mov ebx,<id>
> > e825f5372b call <entry>
> >
> > After:
> >
> > e8ea2256ba call <entry>
> >
> > On x64 the deopt exit size is reduced from 12 to 7 bytes. Before:
> >
> > 49c7c511000000 REX.W movq r13,<id>
> > e8ea2f0700 call <entry>
> >
> > After:
> >
> > 41ff9560360000 call [r13+<entry offset>]
> >
> > Bug: v8:8661,v8:8768
> > Change-Id: I13e30aedc360474dc818fecc528ce87c3bfeed42
> > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2465834
> > Commit-Queue: Jakob Gruber <jgruber@chromium.org>
> > Reviewed-by: Ross McIlroy <rmcilroy@chromium.org>
> > Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
> > Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> > Cr-Commit-Position: refs/heads/master@{#70597}
>
> Tbr: ulan@chromium.org, tebbi@chromium.org, rmcilroy@chromium.org
> Bug: v8:8661,v8:8768,chromium:1140165
> Change-Id: Ibcd5c39c58a70bf2b2ac221aa375fc68d495e144
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2485506
> Reviewed-by: Jakob Gruber <jgruber@chromium.org>
> Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
> Commit-Queue: Jakob Gruber <jgruber@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#70655}
Tbr: ulan@chromium.org, tebbi@chromium.org, rmcilroy@chromium.org
Bug: v8:8661
Bug: v8:8768
Bug: chromium:1140165
Change-Id: I471cc94fc085e527dc9bfb5a84b96bd907c2333f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2488682
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70672}
2020-10-21 05:12:25 +00:00
|
|
|
Factory::CodeBuilder(i_isolate, desc, CodeKind::FOR_TESTING).Build();
|
2020-08-25 15:33:11 +00:00
|
|
|
heap::AbandonCurrentlyFreeMemory(heap->old_space());
|
|
|
|
Handle<HeapNumber> value_handle(
|
|
|
|
i_isolate->factory()->NewHeapNumber<AllocationType::kOld>(1.1));
|
|
|
|
heap::ForceEvacuationCandidate(Page::FromHeapObject(*value_handle));
|
|
|
|
code = *code_handle;
|
|
|
|
value = *value_handle;
|
|
|
|
}
|
|
|
|
heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
|
|
|
|
i::GarbageCollectionReason::kTesting);
|
|
|
|
CHECK(heap->incremental_marking()->marking_state()->IsWhite(value));
|
|
|
|
|
|
|
|
{
|
|
|
|
CodeSpaceMemoryModificationScope modification_scope(heap);
|
|
|
|
auto thread =
|
|
|
|
std::make_unique<ConcurrentRecordRelocSlotThread>(heap, code, value);
|
|
|
|
CHECK(thread->Start());
|
|
|
|
|
|
|
|
thread->Join();
|
|
|
|
}
|
|
|
|
|
|
|
|
CHECK(heap->incremental_marking()->marking_state()->IsBlackOrGrey(value));
|
|
|
|
heap::InvokeMarkSweep(i_isolate);
|
2020-08-21 10:27:42 +00:00
|
|
|
|
|
|
|
isolate->Dispose();
|
|
|
|
}
|
|
|
|
|
2020-04-15 10:49:38 +00:00
|
|
|
} // namespace internal
|
|
|
|
} // namespace v8
|