Revert "v8::ItemParallelJob : Do not launch more Tasks than there are Items to process."
This reverts commit 8a27c7d396
.
Reason for revert:
Having more tasks then work items is intentional in some use cases, i.e. Scavenging where RunInParallel() does parallel processing on a dynamic workload *after* the initial set of work items:
{
barrier_->Start();
TimedScope scope(&scavenging_time);
PageScavengingItem* item = nullptr;
while ((item = GetItem<PageScavengingItem>()) != nullptr) {
item->Process(scavenger_);
item->MarkFinished();
}
do {
scavenger_->Process(barrier_);
} while (!barrier_->Wait());
scavenger_->Process();
}
Original change's description:
> v8::ItemParallelJob : Do not launch more Tasks than there are Items to process.
>
> Except when there are 0 items. For some reason I don't quite understand yet, not
> calling Run() on tasks_[0] when there are 0 items results in DCHECKs...
>
> Bug: chromium:806237
> Change-Id: I38c8fffde64a42f93f4efda492832651137eebd7
> Reviewed-on: https://chromium-review.googlesource.com/888704
> Commit-Queue: Gabriel Charette <gab@chromium.org>
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#50924}
TBR=gab@chromium.org,mlippautz@chromium.org
Change-Id: Iad2ab16bb41f339de8e3fbca1c08c5d26b8a0111
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: chromium:806237
Reviewed-on: https://chromium-review.googlesource.com/891186
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Commit-Queue: Gabriel Charette <gab@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50928}
This commit is contained in:
parent
dfd224c4ea
commit
2407b2bd1b
@ -7,10 +7,8 @@
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "src/base/macros.h"
|
||||
#include "src/base/platform/semaphore.h"
|
||||
#include "src/cancelable-task.h"
|
||||
#include "src/utils.h"
|
||||
#include "src/v8.h"
|
||||
|
||||
namespace v8 {
|
||||
@ -134,32 +132,17 @@ class ItemParallelJob {
|
||||
int NumberOfTasks() const { return static_cast<int>(tasks_.size()); }
|
||||
|
||||
void Run() {
|
||||
DCHECK_GT(tasks_.size(), 0);
|
||||
DCHECK_GE(tasks_.size(), 0);
|
||||
const size_t num_tasks = tasks_.size();
|
||||
const size_t num_items = items_.size();
|
||||
|
||||
// TODO(gab): Make it impossible to have more |tasks_| than |items_| in the
|
||||
// first place.
|
||||
// TODO(gab): Figure out why |num_tasks == 0| when |num_items == 0| breaks a
|
||||
// lot of stuff, keep running at least the main thread's task until then.
|
||||
const size_t num_tasks = Max(size_t(1), Min(num_items, tasks_.size()));
|
||||
|
||||
for (size_t i = num_tasks; i < tasks_.size(); i++) {
|
||||
// Abort unused tasks right away.
|
||||
Task* aborted_task = tasks_[i];
|
||||
auto aborted = cancelable_task_manager_->TryAbort(aborted_task->id());
|
||||
USE(aborted);
|
||||
DCHECK_EQ(CancelableTaskManager::kTaskAborted, aborted);
|
||||
// CallOnBackgroundThread() usually handles the deletion.
|
||||
delete aborted_task;
|
||||
}
|
||||
|
||||
const size_t items_per_task = (num_items + num_tasks - 1) / num_tasks;
|
||||
CancelableTaskManager::Id* task_ids =
|
||||
new CancelableTaskManager::Id[num_tasks];
|
||||
size_t start_index = 0;
|
||||
Task* main_task = nullptr;
|
||||
for (size_t i = 0, start_index = 0; i < num_tasks;
|
||||
i++, start_index += items_per_task) {
|
||||
Task* task = tasks_[i];
|
||||
Task* task = nullptr;
|
||||
for (size_t i = 0; i < num_tasks; i++, start_index += items_per_task) {
|
||||
task = tasks_[i];
|
||||
if (start_index >= num_items) {
|
||||
start_index -= num_items;
|
||||
}
|
||||
@ -172,11 +155,9 @@ class ItemParallelJob {
|
||||
main_task = task;
|
||||
}
|
||||
}
|
||||
|
||||
// Contribute on main thread.
|
||||
main_task->Run();
|
||||
delete main_task;
|
||||
|
||||
// Wait for background tasks.
|
||||
for (size_t i = 0; i < num_tasks; i++) {
|
||||
if (cancelable_task_manager_->TryAbort(task_ids[i]) !=
|
||||
|
@ -23,18 +23,12 @@ class ItemParallelJobTest : public TestWithIsolate {
|
||||
|
||||
namespace {
|
||||
|
||||
class SimpleTask : public ItemParallelJob::Task {
|
||||
class EmptyTask : public ItemParallelJob::Task {
|
||||
public:
|
||||
SimpleTask(Isolate* isolate, bool* did_run)
|
||||
explicit EmptyTask(Isolate* isolate, bool* did_run)
|
||||
: ItemParallelJob::Task(isolate), did_run_(did_run) {}
|
||||
|
||||
void RunInParallel() override {
|
||||
ItemParallelJob::Item* item = nullptr;
|
||||
while ((item = GetItem<ItemParallelJob::Item>()) != nullptr) {
|
||||
item->MarkFinished();
|
||||
}
|
||||
*did_run_ = true;
|
||||
}
|
||||
void RunInParallel() override { *did_run_ = true; }
|
||||
|
||||
private:
|
||||
bool* did_run_;
|
||||
@ -154,54 +148,16 @@ class ItemB : public BaseItem {
|
||||
|
||||
} // namespace
|
||||
|
||||
// TODO(gab): Figure out why skipping the main task when there are 0 work items
|
||||
// results in DCHECKs, https://crbug.com/806237.
|
||||
TEST_F(ItemParallelJobTest, DISABLED_SimpleTaskWithNoItemsDoesntRun) {
|
||||
TEST_F(ItemParallelJobTest, EmptyTaskRuns) {
|
||||
bool did_run = false;
|
||||
ItemParallelJob job(i_isolate()->cancelable_task_manager(),
|
||||
parallel_job_semaphore());
|
||||
job.AddTask(new SimpleTask(i_isolate(), &did_run));
|
||||
|
||||
job.Run();
|
||||
EXPECT_FALSE(did_run);
|
||||
}
|
||||
|
||||
TEST_F(ItemParallelJobTest, SimpleTaskRuns) {
|
||||
bool did_run = false;
|
||||
ItemParallelJob job(i_isolate()->cancelable_task_manager(),
|
||||
parallel_job_semaphore());
|
||||
job.AddTask(new SimpleTask(i_isolate(), &did_run));
|
||||
|
||||
// |job| needs at least one work item or it will not spawn the SimpleTask.
|
||||
job.AddItem(new ItemParallelJob::Item);
|
||||
|
||||
job.AddTask(new EmptyTask(i_isolate(), &did_run));
|
||||
job.Run();
|
||||
EXPECT_TRUE(did_run);
|
||||
}
|
||||
|
||||
TEST_F(ItemParallelJobTest, DontSpawnMoreTasksThanItems) {
|
||||
bool did_run1 = false;
|
||||
bool did_run2 = false;
|
||||
bool did_run3 = false;
|
||||
ItemParallelJob job(i_isolate()->cancelable_task_manager(),
|
||||
parallel_job_semaphore());
|
||||
job.AddTask(new SimpleTask(i_isolate(), &did_run1));
|
||||
job.AddTask(new SimpleTask(i_isolate(), &did_run2));
|
||||
job.AddTask(new SimpleTask(i_isolate(), &did_run3));
|
||||
|
||||
// Schedule only two work items.
|
||||
job.AddItem(new ItemParallelJob::Item);
|
||||
job.AddItem(new ItemParallelJob::Item);
|
||||
|
||||
job.Run();
|
||||
// Only the first two tasks should have been scheduled. Neither is guaranteed
|
||||
// to run though as either may complete the full workload before the other
|
||||
// starts.
|
||||
EXPECT_TRUE(did_run1 || did_run2);
|
||||
EXPECT_FALSE(did_run3);
|
||||
}
|
||||
|
||||
TEST_F(ItemParallelJobTest, SingleThreadProcessing) {
|
||||
TEST_F(ItemParallelJobTest, FinishAllItems) {
|
||||
const int kItems = 111;
|
||||
bool was_processed[kItems];
|
||||
for (int i = 0; i < kItems; i++) {
|
||||
@ -219,27 +175,6 @@ TEST_F(ItemParallelJobTest, SingleThreadProcessing) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(ItemParallelJobTest, ParallelProcessing) {
|
||||
const int kItems = 111;
|
||||
bool was_processed[kItems];
|
||||
for (int i = 0; i < kItems; i++) {
|
||||
was_processed[i] = false;
|
||||
}
|
||||
ItemParallelJob job(i_isolate()->cancelable_task_manager(),
|
||||
parallel_job_semaphore());
|
||||
job.AddTask(new EagerTask(i_isolate()));
|
||||
job.AddTask(new EagerTask(i_isolate()));
|
||||
job.AddTask(new EagerTask(i_isolate()));
|
||||
job.AddTask(new EagerTask(i_isolate()));
|
||||
for (int i = 0; i < kItems; i++) {
|
||||
job.AddItem(new SimpleItem(&was_processed[i]));
|
||||
}
|
||||
job.Run();
|
||||
for (int i = 0; i < kItems; i++) {
|
||||
EXPECT_TRUE(was_processed[i]);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(ItemParallelJobTest, DistributeItemsMultipleTasks) {
|
||||
const int kItemsAndTasks = 2; // Main thread + additional task.
|
||||
bool was_processed[kItemsAndTasks];
|
||||
|
Loading…
Reference in New Issue
Block a user