fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 1 | // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
robliao | 7ac34ba | 2016-06-23 03:16:25 +0900 | [diff] [blame] | 5 | #include "base/task_scheduler/scheduler_worker_pool_impl.h" |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 6 | |
| 7 | #include <stddef.h> |
| 8 | |
| 9 | #include <memory> |
| 10 | #include <unordered_set> |
| 11 | #include <vector> |
| 12 | |
robliao | 2e95175 | 2016-07-23 03:12:18 +0900 | [diff] [blame] | 13 | #include "base/atomicops.h" |
Jeffrey He | b23ff4c | 2017-08-23 07:32:49 +0900 | [diff] [blame] | 14 | #include "base/barrier_closure.h" |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 15 | #include "base/bind.h" |
| 16 | #include "base/bind_helpers.h" |
fdoray | dace22d | 2016-04-29 04:35:47 +0900 | [diff] [blame] | 17 | #include "base/callback.h" |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 18 | #include "base/macros.h" |
| 19 | #include "base/memory/ptr_util.h" |
| 20 | #include "base/memory/ref_counted.h" |
fdoray | 4b83678 | 2016-09-28 05:44:25 +0900 | [diff] [blame] | 21 | #include "base/metrics/histogram.h" |
| 22 | #include "base/metrics/histogram_samples.h" |
| 23 | #include "base/metrics/statistics_recorder.h" |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 24 | #include "base/synchronization/condition_variable.h" |
| 25 | #include "base/synchronization/lock.h" |
| 26 | #include "base/synchronization/waitable_event.h" |
| 27 | #include "base/task_runner.h" |
fdoray | c2c7499 | 2016-04-20 10:39:21 +0900 | [diff] [blame] | 28 | #include "base/task_scheduler/delayed_task_manager.h" |
robliao | df2e154 | 2016-07-21 06:46:52 +0900 | [diff] [blame] | 29 | #include "base/task_scheduler/scheduler_worker_pool_params.h" |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 30 | #include "base/task_scheduler/sequence.h" |
| 31 | #include "base/task_scheduler/sequence_sort_key.h" |
| 32 | #include "base/task_scheduler/task_tracker.h" |
fdoray | 570633b | 2016-04-26 01:24:46 +0900 | [diff] [blame] | 33 | #include "base/task_scheduler/test_task_factory.h" |
fdoray | 9c56ea3 | 2016-11-02 23:35:26 +0900 | [diff] [blame] | 34 | #include "base/task_scheduler/test_utils.h" |
gab | bcf9c76 | 2016-08-02 01:39:56 +0900 | [diff] [blame] | 35 | #include "base/test/gtest_util.h" |
fdoray | a660091 | 2016-10-15 06:40:37 +0900 | [diff] [blame] | 36 | #include "base/test/test_simple_task_runner.h" |
| 37 | #include "base/test/test_timeouts.h" |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 38 | #include "base/threading/platform_thread.h" |
Jeffrey He | b23ff4c | 2017-08-23 07:32:49 +0900 | [diff] [blame] | 39 | #include "base/threading/scoped_blocking_call.h" |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 40 | #include "base/threading/simple_thread.h" |
fdoray | a660091 | 2016-10-15 06:40:37 +0900 | [diff] [blame] | 41 | #include "base/threading/thread.h" |
robliao | 2e95175 | 2016-07-23 03:12:18 +0900 | [diff] [blame] | 42 | #include "base/threading/thread_checker_impl.h" |
| 43 | #include "base/threading/thread_local_storage.h" |
fdoray | 4292526 | 2016-04-29 06:36:33 +0900 | [diff] [blame] | 44 | #include "base/threading/thread_restrictions.h" |
robliao | 2e95175 | 2016-07-23 03:12:18 +0900 | [diff] [blame] | 45 | #include "base/time/time.h" |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 46 | #include "testing/gtest/include/gtest/gtest.h" |
| 47 | |
| 48 | namespace base { |
| 49 | namespace internal { |
| 50 | namespace { |
| 51 | |
fdoray | 4b83678 | 2016-09-28 05:44:25 +0900 | [diff] [blame] | 52 | constexpr size_t kNumWorkersInWorkerPool = 4; |
| 53 | constexpr size_t kNumThreadsPostingTasks = 4; |
| 54 | constexpr size_t kNumTasksPostedPerThread = 150; |
| 55 | // This can't be lower because Windows' WaitableEvent wakes up too early when a |
| 56 | // small timeout is used. This results in many spurious wake ups before a worker |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 57 | // is allowed to cleanup. |
| 58 | constexpr TimeDelta kReclaimTimeForCleanupTests = |
fdoray | 4b83678 | 2016-09-28 05:44:25 +0900 | [diff] [blame] | 59 | TimeDelta::FromMilliseconds(500); |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 60 | constexpr TimeDelta kExtraTimeToWaitForCleanup = TimeDelta::FromSeconds(1); |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 61 | |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 62 | class TaskSchedulerWorkerPoolImplTestBase { |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 63 | protected: |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 64 | TaskSchedulerWorkerPoolImplTestBase() |
| 65 | : service_thread_("TaskSchedulerServiceThread"){}; |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 66 | |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 67 | void SetUp() { |
fdoray | 2890d2f | 2017-04-08 09:51:58 +0900 | [diff] [blame] | 68 | CreateAndStartWorkerPool(TimeDelta::Max(), kNumWorkersInWorkerPool); |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 69 | } |
| 70 | |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 71 | void TearDown() { |
fdoray | a660091 | 2016-10-15 06:40:37 +0900 | [diff] [blame] | 72 | service_thread_.Stop(); |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 73 | task_tracker_.Flush(); |
robliao | cf19f74 | 2016-06-23 03:36:41 +0900 | [diff] [blame] | 74 | worker_pool_->WaitForAllWorkersIdleForTesting(); |
robliao | 7ac34ba | 2016-06-23 03:16:25 +0900 | [diff] [blame] | 75 | worker_pool_->JoinForTesting(); |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 76 | } |
| 77 | |
fdoray | 2890d2f | 2017-04-08 09:51:58 +0900 | [diff] [blame] | 78 | void CreateWorkerPool() { |
fdoray | a660091 | 2016-10-15 06:40:37 +0900 | [diff] [blame] | 79 | ASSERT_FALSE(worker_pool_); |
fdoray | a660091 | 2016-10-15 06:40:37 +0900 | [diff] [blame] | 80 | service_thread_.Start(); |
fdoray | 4a475d6 | 2017-04-20 22:13:11 +0900 | [diff] [blame] | 81 | delayed_task_manager_.Start(service_thread_.task_runner()); |
Jeremy Roman | cd0c467 | 2017-08-17 08:27:24 +0900 | [diff] [blame] | 82 | worker_pool_ = std::make_unique<SchedulerWorkerPoolImpl>( |
| 83 | "TestWorkerPool", ThreadPriority::NORMAL, &task_tracker_, |
| 84 | &delayed_task_manager_); |
robliao | 2e95175 | 2016-07-23 03:12:18 +0900 | [diff] [blame] | 85 | ASSERT_TRUE(worker_pool_); |
| 86 | } |
| 87 | |
fdoray | 2890d2f | 2017-04-08 09:51:58 +0900 | [diff] [blame] | 88 | void StartWorkerPool(TimeDelta suggested_reclaim_time, size_t num_workers) { |
| 89 | ASSERT_TRUE(worker_pool_); |
Jeffrey He | 997f449 | 2017-07-27 07:44:45 +0900 | [diff] [blame] | 90 | worker_pool_->Start( |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 91 | SchedulerWorkerPoolParams(num_workers, suggested_reclaim_time), |
| 92 | service_thread_.task_runner()); |
fdoray | 2890d2f | 2017-04-08 09:51:58 +0900 | [diff] [blame] | 93 | } |
| 94 | |
| 95 | void CreateAndStartWorkerPool(TimeDelta suggested_reclaim_time, |
| 96 | size_t num_workers) { |
| 97 | CreateWorkerPool(); |
| 98 | StartWorkerPool(suggested_reclaim_time, num_workers); |
| 99 | } |
| 100 | |
robliao | 7ac34ba | 2016-06-23 03:16:25 +0900 | [diff] [blame] | 101 | std::unique_ptr<SchedulerWorkerPoolImpl> worker_pool_; |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 102 | |
| 103 | TaskTracker task_tracker_; |
fdoray | a660091 | 2016-10-15 06:40:37 +0900 | [diff] [blame] | 104 | Thread service_thread_; |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 105 | |
fdoray | 9b0b233 | 2016-04-26 06:34:33 +0900 | [diff] [blame] | 106 | private: |
fdoray | 4a475d6 | 2017-04-20 22:13:11 +0900 | [diff] [blame] | 107 | DelayedTaskManager delayed_task_manager_; |
| 108 | |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 109 | DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolImplTestBase); |
| 110 | }; |
| 111 | |
| 112 | class TaskSchedulerWorkerPoolImplTest |
| 113 | : public TaskSchedulerWorkerPoolImplTestBase, |
| 114 | public testing::Test { |
| 115 | protected: |
| 116 | TaskSchedulerWorkerPoolImplTest() = default; |
| 117 | |
| 118 | void SetUp() override { TaskSchedulerWorkerPoolImplTestBase::SetUp(); } |
| 119 | |
| 120 | void TearDown() override { TaskSchedulerWorkerPoolImplTestBase::TearDown(); } |
| 121 | |
| 122 | private: |
robliao | 7ac34ba | 2016-06-23 03:16:25 +0900 | [diff] [blame] | 123 | DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolImplTest); |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 124 | }; |
| 125 | |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 126 | class TaskSchedulerWorkerPoolImplTestParam |
| 127 | : public TaskSchedulerWorkerPoolImplTestBase, |
| 128 | public testing::TestWithParam<test::ExecutionMode> { |
| 129 | protected: |
| 130 | TaskSchedulerWorkerPoolImplTestParam() = default; |
| 131 | |
| 132 | void SetUp() override { TaskSchedulerWorkerPoolImplTestBase::SetUp(); } |
| 133 | |
| 134 | void TearDown() override { TaskSchedulerWorkerPoolImplTestBase::TearDown(); } |
| 135 | |
| 136 | private: |
| 137 | DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolImplTestParam); |
| 138 | }; |
| 139 | |
fdoray | 570633b | 2016-04-26 01:24:46 +0900 | [diff] [blame] | 140 | using PostNestedTask = test::TestTaskFactory::PostNestedTask; |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 141 | |
Jeffrey He | 68d29bc | 2017-08-24 02:14:16 +0900 | [diff] [blame] | 142 | class ThreadPostingTasksWaitIdle : public SimpleThread { |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 143 | public: |
robliao | 7ac34ba | 2016-06-23 03:16:25 +0900 | [diff] [blame] | 144 | // Constructs a thread that posts tasks to |worker_pool| through an |
Jeffrey He | 68d29bc | 2017-08-24 02:14:16 +0900 | [diff] [blame] | 145 | // |execution_mode| task runner. The thread waits until all workers in |
| 146 | // |worker_pool| are idle before posting a new task. |
| 147 | ThreadPostingTasksWaitIdle(SchedulerWorkerPoolImpl* worker_pool, |
| 148 | test::ExecutionMode execution_mode) |
| 149 | : SimpleThread("ThreadPostingTasksWaitIdle"), |
robliao | 7ac34ba | 2016-06-23 03:16:25 +0900 | [diff] [blame] | 150 | worker_pool_(worker_pool), |
fdoray | 9c56ea3 | 2016-11-02 23:35:26 +0900 | [diff] [blame] | 151 | factory_(CreateTaskRunnerWithExecutionMode(worker_pool, execution_mode), |
fdoray | 570633b | 2016-04-26 01:24:46 +0900 | [diff] [blame] | 152 | execution_mode) { |
robliao | 7ac34ba | 2016-06-23 03:16:25 +0900 | [diff] [blame] | 153 | DCHECK(worker_pool_); |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 154 | } |
| 155 | |
fdoray | 570633b | 2016-04-26 01:24:46 +0900 | [diff] [blame] | 156 | const test::TestTaskFactory* factory() const { return &factory_; } |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 157 | |
| 158 | private: |
| 159 | void Run() override { |
Yeol | 0d4f3eb | 2017-07-26 02:09:10 +0900 | [diff] [blame] | 160 | EXPECT_FALSE(factory_.task_runner()->RunsTasksInCurrentSequence()); |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 161 | |
| 162 | for (size_t i = 0; i < kNumTasksPostedPerThread; ++i) { |
Jeffrey He | 68d29bc | 2017-08-24 02:14:16 +0900 | [diff] [blame] | 163 | worker_pool_->WaitForAllWorkersIdleForTesting(); |
| 164 | EXPECT_TRUE(factory_.PostTask(PostNestedTask::NO, Closure())); |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 165 | } |
| 166 | } |
| 167 | |
robliao | 7ac34ba | 2016-06-23 03:16:25 +0900 | [diff] [blame] | 168 | SchedulerWorkerPoolImpl* const worker_pool_; |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 169 | const scoped_refptr<TaskRunner> task_runner_; |
fdoray | 570633b | 2016-04-26 01:24:46 +0900 | [diff] [blame] | 170 | test::TestTaskFactory factory_; |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 171 | |
Jeffrey He | 68d29bc | 2017-08-24 02:14:16 +0900 | [diff] [blame] | 172 | DISALLOW_COPY_AND_ASSIGN(ThreadPostingTasksWaitIdle); |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 173 | }; |
| 174 | |
fdoray | 9b0b233 | 2016-04-26 06:34:33 +0900 | [diff] [blame] | 175 | } // namespace |
| 176 | |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 177 | TEST_P(TaskSchedulerWorkerPoolImplTestParam, PostTasksWaitAllWorkersIdle) { |
robliao | cf19f74 | 2016-06-23 03:36:41 +0900 | [diff] [blame] | 178 | // Create threads to post tasks. To verify that workers can sleep and be woken |
| 179 | // up when new tasks are posted, wait for all workers to become idle before |
| 180 | // posting a new task. |
Jeffrey He | 68d29bc | 2017-08-24 02:14:16 +0900 | [diff] [blame] | 181 | std::vector<std::unique_ptr<ThreadPostingTasksWaitIdle>> |
| 182 | threads_posting_tasks; |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 183 | for (size_t i = 0; i < kNumThreadsPostingTasks; ++i) { |
Jeffrey He | 68d29bc | 2017-08-24 02:14:16 +0900 | [diff] [blame] | 184 | threads_posting_tasks.push_back( |
| 185 | MakeUnique<ThreadPostingTasksWaitIdle>(worker_pool_.get(), GetParam())); |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 186 | threads_posting_tasks.back()->Start(); |
| 187 | } |
| 188 | |
| 189 | // Wait for all tasks to run. |
| 190 | for (const auto& thread_posting_tasks : threads_posting_tasks) { |
| 191 | thread_posting_tasks->Join(); |
| 192 | thread_posting_tasks->factory()->WaitForAllTasksToRun(); |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 193 | } |
| 194 | |
robliao | cf19f74 | 2016-06-23 03:36:41 +0900 | [diff] [blame] | 195 | // Wait until all workers are idle to be sure that no task accesses its |
| 196 | // TestTaskFactory after |thread_posting_tasks| is destroyed. |
| 197 | worker_pool_->WaitForAllWorkersIdleForTesting(); |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 198 | } |
| 199 | |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 200 | TEST_P(TaskSchedulerWorkerPoolImplTestParam, PostTasksWithOneAvailableWorker) { |
robliao | cf19f74 | 2016-06-23 03:36:41 +0900 | [diff] [blame] | 201 | // Post blocking tasks to keep all workers busy except one until |event| is |
fdoray | dace22d | 2016-04-29 04:35:47 +0900 | [diff] [blame] | 202 | // signaled. Use different factories so that tasks are added to different |
| 203 | // sequences and can run simultaneously when the execution mode is SEQUENCED. |
gab | 5616233 | 2016-06-02 06:15:33 +0900 | [diff] [blame] | 204 | WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL, |
| 205 | WaitableEvent::InitialState::NOT_SIGNALED); |
fdoray | 570633b | 2016-04-26 01:24:46 +0900 | [diff] [blame] | 206 | std::vector<std::unique_ptr<test::TestTaskFactory>> blocked_task_factories; |
robliao | 7ac34ba | 2016-06-23 03:16:25 +0900 | [diff] [blame] | 207 | for (size_t i = 0; i < (kNumWorkersInWorkerPool - 1); ++i) { |
Jeremy Roman | cd0c467 | 2017-08-17 08:27:24 +0900 | [diff] [blame] | 208 | blocked_task_factories.push_back(std::make_unique<test::TestTaskFactory>( |
fdoray | 9c56ea3 | 2016-11-02 23:35:26 +0900 | [diff] [blame] | 209 | CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam()), |
ricea | d95e71b | 2016-09-13 13:10:11 +0900 | [diff] [blame] | 210 | GetParam())); |
fdoray | dace22d | 2016-04-29 04:35:47 +0900 | [diff] [blame] | 211 | EXPECT_TRUE(blocked_task_factories.back()->PostTask( |
| 212 | PostNestedTask::NO, Bind(&WaitableEvent::Wait, Unretained(&event)))); |
fdoray | 8224323 | 2016-04-16 08:25:15 +0900 | [diff] [blame] | 213 | blocked_task_factories.back()->WaitForAllTasksToRun(); |
| 214 | } |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 215 | |
| 216 | // Post |kNumTasksPostedPerThread| tasks that should all run despite the fact |
robliao | cf19f74 | 2016-06-23 03:36:41 +0900 | [diff] [blame] | 217 | // that only one worker in |worker_pool_| isn't busy. |
fdoray | 570633b | 2016-04-26 01:24:46 +0900 | [diff] [blame] | 218 | test::TestTaskFactory short_task_factory( |
fdoray | 9c56ea3 | 2016-11-02 23:35:26 +0900 | [diff] [blame] | 219 | CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam()), |
fdoray | 570633b | 2016-04-26 01:24:46 +0900 | [diff] [blame] | 220 | GetParam()); |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 221 | for (size_t i = 0; i < kNumTasksPostedPerThread; ++i) |
fdoray | dace22d | 2016-04-29 04:35:47 +0900 | [diff] [blame] | 222 | EXPECT_TRUE(short_task_factory.PostTask(PostNestedTask::NO, Closure())); |
fdoray | 8224323 | 2016-04-16 08:25:15 +0900 | [diff] [blame] | 223 | short_task_factory.WaitForAllTasksToRun(); |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 224 | |
| 225 | // Release tasks waiting on |event|. |
| 226 | event.Signal(); |
| 227 | |
robliao | cf19f74 | 2016-06-23 03:36:41 +0900 | [diff] [blame] | 228 | // Wait until all workers are idle to be sure that no task accesses |
fdoray | 570633b | 2016-04-26 01:24:46 +0900 | [diff] [blame] | 229 | // its TestTaskFactory after it is destroyed. |
robliao | cf19f74 | 2016-06-23 03:36:41 +0900 | [diff] [blame] | 230 | worker_pool_->WaitForAllWorkersIdleForTesting(); |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 231 | } |
| 232 | |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 233 | TEST_P(TaskSchedulerWorkerPoolImplTestParam, Saturate) { |
robliao | 7ac34ba | 2016-06-23 03:16:25 +0900 | [diff] [blame] | 234 | // Verify that it is possible to have |kNumWorkersInWorkerPool| |
fdoray | dace22d | 2016-04-29 04:35:47 +0900 | [diff] [blame] | 235 | // tasks/sequences running simultaneously. Use different factories so that the |
| 236 | // blocking tasks are added to different sequences and can run simultaneously |
| 237 | // when the execution mode is SEQUENCED. |
gab | 5616233 | 2016-06-02 06:15:33 +0900 | [diff] [blame] | 238 | WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL, |
| 239 | WaitableEvent::InitialState::NOT_SIGNALED); |
fdoray | 570633b | 2016-04-26 01:24:46 +0900 | [diff] [blame] | 240 | std::vector<std::unique_ptr<test::TestTaskFactory>> factories; |
robliao | 7ac34ba | 2016-06-23 03:16:25 +0900 | [diff] [blame] | 241 | for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) { |
Jeremy Roman | cd0c467 | 2017-08-17 08:27:24 +0900 | [diff] [blame] | 242 | factories.push_back(std::make_unique<test::TestTaskFactory>( |
fdoray | 9c56ea3 | 2016-11-02 23:35:26 +0900 | [diff] [blame] | 243 | CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam()), |
ricea | d95e71b | 2016-09-13 13:10:11 +0900 | [diff] [blame] | 244 | GetParam())); |
fdoray | dace22d | 2016-04-29 04:35:47 +0900 | [diff] [blame] | 245 | EXPECT_TRUE(factories.back()->PostTask( |
| 246 | PostNestedTask::NO, Bind(&WaitableEvent::Wait, Unretained(&event)))); |
fdoray | 8224323 | 2016-04-16 08:25:15 +0900 | [diff] [blame] | 247 | factories.back()->WaitForAllTasksToRun(); |
| 248 | } |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 249 | |
| 250 | // Release tasks waiting on |event|. |
| 251 | event.Signal(); |
| 252 | |
robliao | cf19f74 | 2016-06-23 03:36:41 +0900 | [diff] [blame] | 253 | // Wait until all workers are idle to be sure that no task accesses |
fdoray | 570633b | 2016-04-26 01:24:46 +0900 | [diff] [blame] | 254 | // its TestTaskFactory after it is destroyed. |
robliao | cf19f74 | 2016-06-23 03:36:41 +0900 | [diff] [blame] | 255 | worker_pool_->WaitForAllWorkersIdleForTesting(); |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 256 | } |
| 257 | |
fdoray | 8224323 | 2016-04-16 08:25:15 +0900 | [diff] [blame] | 258 | INSTANTIATE_TEST_CASE_P(Parallel, |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 259 | TaskSchedulerWorkerPoolImplTestParam, |
fdoray | 9c56ea3 | 2016-11-02 23:35:26 +0900 | [diff] [blame] | 260 | ::testing::Values(test::ExecutionMode::PARALLEL)); |
fdoray | 8224323 | 2016-04-16 08:25:15 +0900 | [diff] [blame] | 261 | INSTANTIATE_TEST_CASE_P(Sequenced, |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 262 | TaskSchedulerWorkerPoolImplTestParam, |
fdoray | 9c56ea3 | 2016-11-02 23:35:26 +0900 | [diff] [blame] | 263 | ::testing::Values(test::ExecutionMode::SEQUENCED)); |
robliao | 2e95175 | 2016-07-23 03:12:18 +0900 | [diff] [blame] | 264 | |
| 265 | namespace { |
| 266 | |
fdoray | 2890d2f | 2017-04-08 09:51:58 +0900 | [diff] [blame] | 267 | class TaskSchedulerWorkerPoolImplPostTaskBeforeStartTest |
| 268 | : public TaskSchedulerWorkerPoolImplTest { |
| 269 | public: |
| 270 | void SetUp() override { |
| 271 | CreateWorkerPool(); |
| 272 | // Let the test start the worker pool. |
| 273 | } |
| 274 | }; |
| 275 | |
| 276 | void TaskPostedBeforeStart(PlatformThreadRef* platform_thread_ref, |
| 277 | WaitableEvent* task_scheduled, |
| 278 | WaitableEvent* barrier) { |
| 279 | *platform_thread_ref = PlatformThread::CurrentRef(); |
| 280 | task_scheduled->Signal(); |
| 281 | barrier->Wait(); |
| 282 | } |
| 283 | |
| 284 | } // namespace |
| 285 | |
| 286 | // Verify that 2 tasks posted before Start() to a SchedulerWorkerPoolImpl with |
| 287 | // more than 2 workers are scheduled on different workers when Start() is |
| 288 | // called. |
| 289 | TEST_F(TaskSchedulerWorkerPoolImplPostTaskBeforeStartTest, |
| 290 | PostTasksBeforeStart) { |
| 291 | PlatformThreadRef task_1_thread_ref; |
| 292 | PlatformThreadRef task_2_thread_ref; |
| 293 | WaitableEvent task_1_scheduled(WaitableEvent::ResetPolicy::MANUAL, |
| 294 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 295 | WaitableEvent task_2_scheduled(WaitableEvent::ResetPolicy::MANUAL, |
| 296 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 297 | |
| 298 | // This event is used to prevent a task from completing before the other task |
| 299 | // is scheduled. If that happened, both tasks could run on the same worker and |
| 300 | // this test couldn't verify that the correct number of workers were woken up. |
| 301 | WaitableEvent barrier(WaitableEvent::ResetPolicy::MANUAL, |
| 302 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 303 | |
fdoray | b701340 | 2017-05-09 13:18:32 +0900 | [diff] [blame] | 304 | worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()}) |
tzik | 330d83f | 2017-06-26 15:13:17 +0900 | [diff] [blame] | 305 | ->PostTask( |
| 306 | FROM_HERE, |
| 307 | BindOnce(&TaskPostedBeforeStart, Unretained(&task_1_thread_ref), |
| 308 | Unretained(&task_1_scheduled), Unretained(&barrier))); |
fdoray | b701340 | 2017-05-09 13:18:32 +0900 | [diff] [blame] | 309 | worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()}) |
tzik | 330d83f | 2017-06-26 15:13:17 +0900 | [diff] [blame] | 310 | ->PostTask( |
| 311 | FROM_HERE, |
| 312 | BindOnce(&TaskPostedBeforeStart, Unretained(&task_2_thread_ref), |
| 313 | Unretained(&task_2_scheduled), Unretained(&barrier))); |
fdoray | 2890d2f | 2017-04-08 09:51:58 +0900 | [diff] [blame] | 314 | |
| 315 | // Workers should not be created and tasks should not run before the pool is |
| 316 | // started. |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 317 | EXPECT_EQ(0U, worker_pool_->NumberOfWorkersForTesting()); |
fdoray | 2890d2f | 2017-04-08 09:51:58 +0900 | [diff] [blame] | 318 | EXPECT_FALSE(task_1_scheduled.IsSignaled()); |
| 319 | EXPECT_FALSE(task_2_scheduled.IsSignaled()); |
| 320 | |
| 321 | StartWorkerPool(TimeDelta::Max(), kNumWorkersInWorkerPool); |
| 322 | |
| 323 | // Tasks should be scheduled shortly after the pool is started. |
| 324 | task_1_scheduled.Wait(); |
| 325 | task_2_scheduled.Wait(); |
| 326 | |
| 327 | // Tasks should be scheduled on different threads. |
| 328 | EXPECT_NE(task_1_thread_ref, task_2_thread_ref); |
| 329 | |
| 330 | barrier.Signal(); |
| 331 | task_tracker_.Flush(); |
| 332 | } |
| 333 | |
Jeffrey He | 2afe923 | 2017-08-11 00:22:35 +0900 | [diff] [blame] | 334 | // Verify that posting many tasks before Start will cause the number of workers |
| 335 | // to grow to |worker_capacity_| during Start. |
| 336 | TEST_F(TaskSchedulerWorkerPoolImplPostTaskBeforeStartTest, PostManyTasks) { |
| 337 | scoped_refptr<TaskRunner> task_runner = |
| 338 | worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()}); |
| 339 | constexpr size_t kNumTasksPosted = 2 * kNumWorkersInWorkerPool; |
| 340 | for (size_t i = 0; i < kNumTasksPosted; ++i) |
| 341 | task_runner->PostTask(FROM_HERE, BindOnce(&DoNothing)); |
| 342 | |
| 343 | EXPECT_EQ(0U, worker_pool_->NumberOfWorkersForTesting()); |
| 344 | |
| 345 | StartWorkerPool(TimeDelta::Max(), kNumWorkersInWorkerPool); |
| 346 | ASSERT_GT(kNumTasksPosted, worker_pool_->GetWorkerCapacityForTesting()); |
| 347 | EXPECT_EQ(kNumWorkersInWorkerPool, |
| 348 | worker_pool_->GetWorkerCapacityForTesting()); |
| 349 | |
| 350 | EXPECT_EQ(worker_pool_->NumberOfWorkersForTesting(), |
| 351 | worker_pool_->GetWorkerCapacityForTesting()); |
| 352 | } |
| 353 | |
fdoray | 2890d2f | 2017-04-08 09:51:58 +0900 | [diff] [blame] | 354 | namespace { |
| 355 | |
robliao | 2e95175 | 2016-07-23 03:12:18 +0900 | [diff] [blame] | 356 | constexpr size_t kMagicTlsValue = 42; |
| 357 | |
| 358 | class TaskSchedulerWorkerPoolCheckTlsReuse |
| 359 | : public TaskSchedulerWorkerPoolImplTest { |
| 360 | public: |
| 361 | void SetTlsValueAndWait() { |
| 362 | slot_.Set(reinterpret_cast<void*>(kMagicTlsValue)); |
| 363 | waiter_.Wait(); |
| 364 | } |
| 365 | |
| 366 | void CountZeroTlsValuesAndWait(WaitableEvent* count_waiter) { |
| 367 | if (!slot_.Get()) |
| 368 | subtle::NoBarrier_AtomicIncrement(&zero_tls_values_, 1); |
| 369 | |
| 370 | count_waiter->Signal(); |
| 371 | waiter_.Wait(); |
| 372 | } |
| 373 | |
| 374 | protected: |
| 375 | TaskSchedulerWorkerPoolCheckTlsReuse() : |
| 376 | waiter_(WaitableEvent::ResetPolicy::MANUAL, |
| 377 | WaitableEvent::InitialState::NOT_SIGNALED) {} |
| 378 | |
| 379 | void SetUp() override { |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 380 | CreateAndStartWorkerPool(kReclaimTimeForCleanupTests, |
fdoray | 2890d2f | 2017-04-08 09:51:58 +0900 | [diff] [blame] | 381 | kNumWorkersInWorkerPool); |
robliao | 2e95175 | 2016-07-23 03:12:18 +0900 | [diff] [blame] | 382 | } |
| 383 | |
| 384 | subtle::Atomic32 zero_tls_values_ = 0; |
| 385 | |
| 386 | WaitableEvent waiter_; |
| 387 | |
| 388 | private: |
| 389 | ThreadLocalStorage::Slot slot_; |
| 390 | |
| 391 | DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolCheckTlsReuse); |
| 392 | }; |
| 393 | |
| 394 | } // namespace |
| 395 | |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 396 | // Checks that at least one worker has been cleaned up by checking the TLS. |
| 397 | TEST_F(TaskSchedulerWorkerPoolCheckTlsReuse, CheckCleanupWorkers) { |
| 398 | // Saturate the workers and mark each worker's thread with a magic TLS value. |
robliao | 2e95175 | 2016-07-23 03:12:18 +0900 | [diff] [blame] | 399 | std::vector<std::unique_ptr<test::TestTaskFactory>> factories; |
| 400 | for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) { |
Jeremy Roman | cd0c467 | 2017-08-17 08:27:24 +0900 | [diff] [blame] | 401 | factories.push_back(std::make_unique<test::TestTaskFactory>( |
fdoray | b701340 | 2017-05-09 13:18:32 +0900 | [diff] [blame] | 402 | worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()}), |
fdoray | 9c56ea3 | 2016-11-02 23:35:26 +0900 | [diff] [blame] | 403 | test::ExecutionMode::PARALLEL)); |
robliao | 2e95175 | 2016-07-23 03:12:18 +0900 | [diff] [blame] | 404 | ASSERT_TRUE(factories.back()->PostTask( |
| 405 | PostNestedTask::NO, |
| 406 | Bind(&TaskSchedulerWorkerPoolCheckTlsReuse::SetTlsValueAndWait, |
| 407 | Unretained(this)))); |
| 408 | factories.back()->WaitForAllTasksToRun(); |
| 409 | } |
| 410 | |
| 411 | // Release tasks waiting on |waiter_|. |
| 412 | waiter_.Signal(); |
| 413 | worker_pool_->WaitForAllWorkersIdleForTesting(); |
| 414 | |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 415 | // All workers should be done running by now, so reset for the next phase. |
robliao | 2e95175 | 2016-07-23 03:12:18 +0900 | [diff] [blame] | 416 | waiter_.Reset(); |
| 417 | |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 418 | // Give the worker pool a chance to cleanup its workers. |
| 419 | PlatformThread::Sleep(kReclaimTimeForCleanupTests + |
| 420 | kExtraTimeToWaitForCleanup); |
robliao | 2e95175 | 2016-07-23 03:12:18 +0900 | [diff] [blame] | 421 | |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 422 | worker_pool_->DisallowWorkerCleanupForTesting(); |
robliao | 2e95175 | 2016-07-23 03:12:18 +0900 | [diff] [blame] | 423 | |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 424 | // Saturate and count the worker threads that do not have the magic TLS value. |
| 425 | // If the value is not there, that means we're at a new worker. |
robliao | 2e95175 | 2016-07-23 03:12:18 +0900 | [diff] [blame] | 426 | std::vector<std::unique_ptr<WaitableEvent>> count_waiters; |
| 427 | for (auto& factory : factories) { |
| 428 | count_waiters.push_back(WrapUnique(new WaitableEvent( |
| 429 | WaitableEvent::ResetPolicy::MANUAL, |
| 430 | WaitableEvent::InitialState::NOT_SIGNALED))); |
| 431 | ASSERT_TRUE(factory->PostTask( |
| 432 | PostNestedTask::NO, |
| 433 | Bind(&TaskSchedulerWorkerPoolCheckTlsReuse::CountZeroTlsValuesAndWait, |
| 434 | Unretained(this), |
| 435 | count_waiters.back().get()))); |
| 436 | factory->WaitForAllTasksToRun(); |
| 437 | } |
| 438 | |
| 439 | // Wait for all counters to complete. |
| 440 | for (auto& count_waiter : count_waiters) |
| 441 | count_waiter->Wait(); |
| 442 | |
| 443 | EXPECT_GT(subtle::NoBarrier_Load(&zero_tls_values_), 0); |
| 444 | |
| 445 | // Release tasks waiting on |waiter_|. |
| 446 | waiter_.Signal(); |
| 447 | } |
| 448 | |
fdoray | 4b83678 | 2016-09-28 05:44:25 +0900 | [diff] [blame] | 449 | namespace { |
| 450 | |
| 451 | class TaskSchedulerWorkerPoolHistogramTest |
| 452 | : public TaskSchedulerWorkerPoolImplTest { |
| 453 | public: |
| 454 | TaskSchedulerWorkerPoolHistogramTest() = default; |
| 455 | |
| 456 | protected: |
fdoray | 23df36e | 2016-10-21 01:25:56 +0900 | [diff] [blame] | 457 | // Override SetUp() to allow every test case to initialize a worker pool with |
| 458 | // its own arguments. |
fdoray | 4b83678 | 2016-09-28 05:44:25 +0900 | [diff] [blame] | 459 | void SetUp() override {} |
| 460 | |
fdoray | 4b83678 | 2016-09-28 05:44:25 +0900 | [diff] [blame] | 461 | private: |
| 462 | std::unique_ptr<StatisticsRecorder> statistics_recorder_ = |
| 463 | StatisticsRecorder::CreateTemporaryForTesting(); |
| 464 | |
fdoray | 4b83678 | 2016-09-28 05:44:25 +0900 | [diff] [blame] | 465 | DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolHistogramTest); |
| 466 | }; |
| 467 | |
| 468 | } // namespace |
| 469 | |
| 470 | TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBetweenWaits) { |
| 471 | WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL, |
| 472 | WaitableEvent::InitialState::NOT_SIGNALED); |
fdoray | 2890d2f | 2017-04-08 09:51:58 +0900 | [diff] [blame] | 473 | CreateAndStartWorkerPool(TimeDelta::Max(), kNumWorkersInWorkerPool); |
fdoray | 26866e2 | 2016-11-30 04:45:01 +0900 | [diff] [blame] | 474 | auto task_runner = worker_pool_->CreateSequencedTaskRunnerWithTraits( |
fdoray | b701340 | 2017-05-09 13:18:32 +0900 | [diff] [blame] | 475 | {WithBaseSyncPrimitives()}); |
fdoray | 4b83678 | 2016-09-28 05:44:25 +0900 | [diff] [blame] | 476 | |
| 477 | // Post a task. |
| 478 | task_runner->PostTask(FROM_HERE, |
tzik | 6bdbeb2 | 2017-04-12 00:00:44 +0900 | [diff] [blame] | 479 | BindOnce(&WaitableEvent::Wait, Unretained(&event))); |
fdoray | 4b83678 | 2016-09-28 05:44:25 +0900 | [diff] [blame] | 480 | |
| 481 | // Post 2 more tasks while the first task hasn't completed its execution. It |
| 482 | // is guaranteed that these tasks will run immediately after the first task, |
| 483 | // without allowing the worker to sleep. |
tzik | 6bdbeb2 | 2017-04-12 00:00:44 +0900 | [diff] [blame] | 484 | task_runner->PostTask(FROM_HERE, BindOnce(&DoNothing)); |
| 485 | task_runner->PostTask(FROM_HERE, BindOnce(&DoNothing)); |
fdoray | 4b83678 | 2016-09-28 05:44:25 +0900 | [diff] [blame] | 486 | |
| 487 | // Allow tasks to run and wait until the SchedulerWorker is idle. |
| 488 | event.Signal(); |
| 489 | worker_pool_->WaitForAllWorkersIdleForTesting(); |
| 490 | |
| 491 | // Wake up the SchedulerWorker that just became idle by posting a task and |
| 492 | // wait until it becomes idle again. The SchedulerWorker should record the |
| 493 | // TaskScheduler.NumTasksBetweenWaits.* histogram on wake up. |
tzik | 6bdbeb2 | 2017-04-12 00:00:44 +0900 | [diff] [blame] | 494 | task_runner->PostTask(FROM_HERE, BindOnce(&DoNothing)); |
fdoray | 4b83678 | 2016-09-28 05:44:25 +0900 | [diff] [blame] | 495 | worker_pool_->WaitForAllWorkersIdleForTesting(); |
| 496 | |
| 497 | // Verify that counts were recorded to the histogram as expected. |
fdoray | fd7279f | 2016-10-14 10:30:36 +0900 | [diff] [blame] | 498 | const auto* histogram = worker_pool_->num_tasks_between_waits_histogram(); |
| 499 | EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(0)); |
| 500 | EXPECT_EQ(1, histogram->SnapshotSamples()->GetCount(3)); |
| 501 | EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(10)); |
fdoray | 4b83678 | 2016-09-28 05:44:25 +0900 | [diff] [blame] | 502 | } |
| 503 | |
| 504 | namespace { |
| 505 | |
| 506 | void SignalAndWaitEvent(WaitableEvent* signal_event, |
| 507 | WaitableEvent* wait_event) { |
| 508 | signal_event->Signal(); |
| 509 | wait_event->Wait(); |
| 510 | } |
| 511 | |
| 512 | } // namespace |
| 513 | |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 514 | TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBetweenWaitsWithCleanup) { |
fdoray | 4b83678 | 2016-09-28 05:44:25 +0900 | [diff] [blame] | 515 | WaitableEvent tasks_can_exit_event(WaitableEvent::ResetPolicy::MANUAL, |
| 516 | WaitableEvent::InitialState::NOT_SIGNALED); |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 517 | CreateAndStartWorkerPool(kReclaimTimeForCleanupTests, |
| 518 | kNumWorkersInWorkerPool); |
fdoray | b701340 | 2017-05-09 13:18:32 +0900 | [diff] [blame] | 519 | auto task_runner = |
| 520 | worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()}); |
fdoray | 4b83678 | 2016-09-28 05:44:25 +0900 | [diff] [blame] | 521 | |
| 522 | // Post tasks to saturate the pool. |
| 523 | std::vector<std::unique_ptr<WaitableEvent>> task_started_events; |
| 524 | for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) { |
Jeremy Roman | cd0c467 | 2017-08-17 08:27:24 +0900 | [diff] [blame] | 525 | task_started_events.push_back(std::make_unique<WaitableEvent>( |
| 526 | WaitableEvent::ResetPolicy::MANUAL, |
| 527 | WaitableEvent::InitialState::NOT_SIGNALED)); |
tzik | 6bdbeb2 | 2017-04-12 00:00:44 +0900 | [diff] [blame] | 528 | task_runner->PostTask(FROM_HERE, |
| 529 | BindOnce(&SignalAndWaitEvent, |
| 530 | Unretained(task_started_events.back().get()), |
| 531 | Unretained(&tasks_can_exit_event))); |
fdoray | 4b83678 | 2016-09-28 05:44:25 +0900 | [diff] [blame] | 532 | } |
| 533 | for (const auto& task_started_event : task_started_events) |
| 534 | task_started_event->Wait(); |
| 535 | |
| 536 | // Allow tasks to complete their execution and wait to allow workers to |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 537 | // cleanup. |
fdoray | 4b83678 | 2016-09-28 05:44:25 +0900 | [diff] [blame] | 538 | tasks_can_exit_event.Signal(); |
| 539 | worker_pool_->WaitForAllWorkersIdleForTesting(); |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 540 | PlatformThread::Sleep(kReclaimTimeForCleanupTests + |
| 541 | kExtraTimeToWaitForCleanup); |
fdoray | 4b83678 | 2016-09-28 05:44:25 +0900 | [diff] [blame] | 542 | |
| 543 | // Wake up SchedulerWorkers by posting tasks. They should record the |
| 544 | // TaskScheduler.NumTasksBetweenWaits.* histogram on wake up. |
| 545 | tasks_can_exit_event.Reset(); |
| 546 | task_started_events.clear(); |
| 547 | for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) { |
Jeremy Roman | cd0c467 | 2017-08-17 08:27:24 +0900 | [diff] [blame] | 548 | task_started_events.push_back(std::make_unique<WaitableEvent>( |
| 549 | WaitableEvent::ResetPolicy::MANUAL, |
| 550 | WaitableEvent::InitialState::NOT_SIGNALED)); |
tzik | 6bdbeb2 | 2017-04-12 00:00:44 +0900 | [diff] [blame] | 551 | task_runner->PostTask(FROM_HERE, |
| 552 | BindOnce(&SignalAndWaitEvent, |
| 553 | Unretained(task_started_events.back().get()), |
| 554 | Unretained(&tasks_can_exit_event))); |
fdoray | 4b83678 | 2016-09-28 05:44:25 +0900 | [diff] [blame] | 555 | } |
| 556 | for (const auto& task_started_event : task_started_events) |
| 557 | task_started_event->Wait(); |
| 558 | |
fdoray | fd7279f | 2016-10-14 10:30:36 +0900 | [diff] [blame] | 559 | const auto* histogram = worker_pool_->num_tasks_between_waits_histogram(); |
| 560 | |
fdoray | 4b83678 | 2016-09-28 05:44:25 +0900 | [diff] [blame] | 561 | // Verify that counts were recorded to the histogram as expected. |
| 562 | // - The "0" bucket has a count of at least 1 because the SchedulerWorker on |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 563 | // top of the idle stack isn't allowed to cleanup when its sleep timeout |
fdoray | 4b83678 | 2016-09-28 05:44:25 +0900 | [diff] [blame] | 564 | // expires. Instead, it waits on its WaitableEvent again without running a |
| 565 | // task. The count may be higher than 1 because of spurious wake ups before |
| 566 | // the sleep timeout expires. |
fdoray | fd7279f | 2016-10-14 10:30:36 +0900 | [diff] [blame] | 567 | EXPECT_GE(histogram->SnapshotSamples()->GetCount(0), 1); |
fdoray | 4b83678 | 2016-09-28 05:44:25 +0900 | [diff] [blame] | 568 | // - The "1" bucket has a count of |kNumWorkersInWorkerPool| because each |
| 569 | // SchedulerWorker ran a task before waiting on its WaitableEvent at the |
| 570 | // beginning of the test. |
| 571 | EXPECT_EQ(static_cast<int>(kNumWorkersInWorkerPool), |
fdoray | fd7279f | 2016-10-14 10:30:36 +0900 | [diff] [blame] | 572 | histogram->SnapshotSamples()->GetCount(1)); |
| 573 | EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(10)); |
fdoray | 4b83678 | 2016-09-28 05:44:25 +0900 | [diff] [blame] | 574 | |
| 575 | tasks_can_exit_event.Signal(); |
| 576 | worker_pool_->WaitForAllWorkersIdleForTesting(); |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 577 | worker_pool_->DisallowWorkerCleanupForTesting(); |
fdoray | 4b83678 | 2016-09-28 05:44:25 +0900 | [diff] [blame] | 578 | } |
| 579 | |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 580 | TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBeforeCleanup) { |
| 581 | CreateAndStartWorkerPool(kReclaimTimeForCleanupTests, |
| 582 | kNumWorkersInWorkerPool); |
robliao | 989772a | 2017-02-27 11:41:56 +0900 | [diff] [blame] | 583 | |
| 584 | auto histogrammed_thread_task_runner = |
| 585 | worker_pool_->CreateSequencedTaskRunnerWithTraits( |
fdoray | b701340 | 2017-05-09 13:18:32 +0900 | [diff] [blame] | 586 | {WithBaseSyncPrimitives()}); |
robliao | 989772a | 2017-02-27 11:41:56 +0900 | [diff] [blame] | 587 | |
| 588 | // Post 3 tasks and hold the thread for idle thread stack ordering. |
| 589 | // This test assumes |histogrammed_thread_task_runner| gets assigned the same |
| 590 | // thread for each of its tasks. |
| 591 | PlatformThreadRef thread_ref; |
| 592 | histogrammed_thread_task_runner->PostTask( |
tzik | 6bdbeb2 | 2017-04-12 00:00:44 +0900 | [diff] [blame] | 593 | FROM_HERE, BindOnce( |
robliao | 989772a | 2017-02-27 11:41:56 +0900 | [diff] [blame] | 594 | [](PlatformThreadRef* thread_ref) { |
| 595 | ASSERT_TRUE(thread_ref); |
| 596 | *thread_ref = PlatformThread::CurrentRef(); |
| 597 | }, |
| 598 | Unretained(&thread_ref))); |
| 599 | histogrammed_thread_task_runner->PostTask( |
tzik | 6bdbeb2 | 2017-04-12 00:00:44 +0900 | [diff] [blame] | 600 | FROM_HERE, BindOnce( |
robliao | 79bfa45 | 2017-03-14 09:30:45 +0900 | [diff] [blame] | 601 | [](PlatformThreadRef* thread_ref) { |
| 602 | ASSERT_FALSE(thread_ref->is_null()); |
| 603 | EXPECT_EQ(*thread_ref, PlatformThread::CurrentRef()); |
robliao | 989772a | 2017-02-27 11:41:56 +0900 | [diff] [blame] | 604 | }, |
robliao | 79bfa45 | 2017-03-14 09:30:45 +0900 | [diff] [blame] | 605 | Unretained(&thread_ref))); |
| 606 | |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 607 | WaitableEvent cleanup_thread_running( |
robliao | 989772a | 2017-02-27 11:41:56 +0900 | [diff] [blame] | 608 | WaitableEvent::ResetPolicy::MANUAL, |
| 609 | WaitableEvent::InitialState::NOT_SIGNALED); |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 610 | WaitableEvent cleanup_thread_continue( |
robliao | 989772a | 2017-02-27 11:41:56 +0900 | [diff] [blame] | 611 | WaitableEvent::ResetPolicy::MANUAL, |
| 612 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 613 | histogrammed_thread_task_runner->PostTask( |
| 614 | FROM_HERE, |
tzik | 6bdbeb2 | 2017-04-12 00:00:44 +0900 | [diff] [blame] | 615 | BindOnce( |
robliao | 79bfa45 | 2017-03-14 09:30:45 +0900 | [diff] [blame] | 616 | [](PlatformThreadRef* thread_ref, |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 617 | WaitableEvent* cleanup_thread_running, |
| 618 | WaitableEvent* cleanup_thread_continue) { |
robliao | 79bfa45 | 2017-03-14 09:30:45 +0900 | [diff] [blame] | 619 | ASSERT_FALSE(thread_ref->is_null()); |
| 620 | EXPECT_EQ(*thread_ref, PlatformThread::CurrentRef()); |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 621 | cleanup_thread_running->Signal(); |
| 622 | cleanup_thread_continue->Wait(); |
robliao | 989772a | 2017-02-27 11:41:56 +0900 | [diff] [blame] | 623 | }, |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 624 | Unretained(&thread_ref), Unretained(&cleanup_thread_running), |
| 625 | Unretained(&cleanup_thread_continue))); |
robliao | 989772a | 2017-02-27 11:41:56 +0900 | [diff] [blame] | 626 | |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 627 | cleanup_thread_running.Wait(); |
robliao | 989772a | 2017-02-27 11:41:56 +0900 | [diff] [blame] | 628 | |
| 629 | // To allow the SchedulerWorker associated with |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 630 | // |histogrammed_thread_task_runner| to cleanup, make sure it isn't on top of |
robliao | 989772a | 2017-02-27 11:41:56 +0900 | [diff] [blame] | 631 | // the idle stack by waking up another SchedulerWorker via |
| 632 | // |task_runner_for_top_idle|. |histogrammed_thread_task_runner| should |
| 633 | // release and go idle first and then |task_runner_for_top_idle| should |
| 634 | // release and go idle. This allows the SchedulerWorker associated with |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 635 | // |histogrammed_thread_task_runner| to cleanup. |
robliao | 989772a | 2017-02-27 11:41:56 +0900 | [diff] [blame] | 636 | WaitableEvent top_idle_thread_running( |
| 637 | WaitableEvent::ResetPolicy::MANUAL, |
| 638 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 639 | WaitableEvent top_idle_thread_continue( |
| 640 | WaitableEvent::ResetPolicy::MANUAL, |
| 641 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 642 | auto task_runner_for_top_idle = |
| 643 | worker_pool_->CreateSequencedTaskRunnerWithTraits( |
fdoray | b701340 | 2017-05-09 13:18:32 +0900 | [diff] [blame] | 644 | {WithBaseSyncPrimitives()}); |
robliao | 989772a | 2017-02-27 11:41:56 +0900 | [diff] [blame] | 645 | task_runner_for_top_idle->PostTask( |
tzik | 6bdbeb2 | 2017-04-12 00:00:44 +0900 | [diff] [blame] | 646 | FROM_HERE, BindOnce( |
robliao | 989772a | 2017-02-27 11:41:56 +0900 | [diff] [blame] | 647 | [](PlatformThreadRef thread_ref, |
| 648 | WaitableEvent* top_idle_thread_running, |
| 649 | WaitableEvent* top_idle_thread_continue) { |
robliao | 79bfa45 | 2017-03-14 09:30:45 +0900 | [diff] [blame] | 650 | ASSERT_FALSE(thread_ref.is_null()); |
robliao | 989772a | 2017-02-27 11:41:56 +0900 | [diff] [blame] | 651 | EXPECT_NE(thread_ref, PlatformThread::CurrentRef()) |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 652 | << "Worker reused. Worker will not cleanup and the " |
robliao | 989772a | 2017-02-27 11:41:56 +0900 | [diff] [blame] | 653 | "histogram value will be wrong."; |
| 654 | top_idle_thread_running->Signal(); |
| 655 | top_idle_thread_continue->Wait(); |
| 656 | }, |
| 657 | thread_ref, Unretained(&top_idle_thread_running), |
| 658 | Unretained(&top_idle_thread_continue))); |
| 659 | top_idle_thread_running.Wait(); |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 660 | cleanup_thread_continue.Signal(); |
robliao | 989772a | 2017-02-27 11:41:56 +0900 | [diff] [blame] | 661 | // Wait for the thread processing the |histogrammed_thread_task_runner| work |
| 662 | // to go to the idle stack. |
| 663 | PlatformThread::Sleep(TestTimeouts::tiny_timeout()); |
| 664 | top_idle_thread_continue.Signal(); |
| 665 | // Allow the thread processing the |histogrammed_thread_task_runner| work to |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 666 | // cleanup. |
| 667 | PlatformThread::Sleep(kReclaimTimeForCleanupTests + |
| 668 | kReclaimTimeForCleanupTests); |
robliao | 989772a | 2017-02-27 11:41:56 +0900 | [diff] [blame] | 669 | worker_pool_->WaitForAllWorkersIdleForTesting(); |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 670 | worker_pool_->DisallowWorkerCleanupForTesting(); |
robliao | 989772a | 2017-02-27 11:41:56 +0900 | [diff] [blame] | 671 | |
| 672 | // Verify that counts were recorded to the histogram as expected. |
| 673 | const auto* histogram = worker_pool_->num_tasks_before_detach_histogram(); |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 674 | // Note: There'll be a thread that cleanups after running no tasks. This |
Jeffrey He | 9a7fff2 | 2017-07-28 01:11:10 +0900 | [diff] [blame] | 675 | // thread was the one created to maintain an idle thread after posting the |
| 676 | // task via |task_runner_for_top_idle|. |
| 677 | EXPECT_EQ(1, histogram->SnapshotSamples()->GetCount(0)); |
robliao | 989772a | 2017-02-27 11:41:56 +0900 | [diff] [blame] | 678 | EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(1)); |
| 679 | EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(2)); |
| 680 | EXPECT_EQ(1, histogram->SnapshotSamples()->GetCount(3)); |
| 681 | EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(4)); |
| 682 | EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(5)); |
| 683 | EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(6)); |
| 684 | EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(10)); |
| 685 | } |
| 686 | |
robliao | 8ff674c | 2016-11-18 03:33:32 +0900 | [diff] [blame] | 687 | TEST(TaskSchedulerWorkerPoolStandbyPolicyTest, InitOne) { |
| 688 | TaskTracker task_tracker; |
fdoray | 4a475d6 | 2017-04-20 22:13:11 +0900 | [diff] [blame] | 689 | DelayedTaskManager delayed_task_manager; |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 690 | scoped_refptr<TaskRunner> service_thread_task_runner = |
| 691 | MakeRefCounted<TestSimpleTaskRunner>(); |
| 692 | delayed_task_manager.Start(service_thread_task_runner); |
Jeremy Roman | cd0c467 | 2017-08-17 08:27:24 +0900 | [diff] [blame] | 693 | auto worker_pool = std::make_unique<SchedulerWorkerPoolImpl>( |
fdoray | 9b6446c | 2017-05-09 02:31:00 +0900 | [diff] [blame] | 694 | "OnePolicyWorkerPool", ThreadPriority::NORMAL, &task_tracker, |
robliao | 8ff674c | 2016-11-18 03:33:32 +0900 | [diff] [blame] | 695 | &delayed_task_manager); |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 696 | worker_pool->Start(SchedulerWorkerPoolParams(8U, TimeDelta::Max()), |
| 697 | service_thread_task_runner); |
robliao | 8ff674c | 2016-11-18 03:33:32 +0900 | [diff] [blame] | 698 | ASSERT_TRUE(worker_pool); |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 699 | EXPECT_EQ(1U, worker_pool->NumberOfWorkersForTesting()); |
robliao | 8ff674c | 2016-11-18 03:33:32 +0900 | [diff] [blame] | 700 | worker_pool->JoinForTesting(); |
| 701 | } |
| 702 | |
Jeffrey He | 9a7fff2 | 2017-07-28 01:11:10 +0900 | [diff] [blame] | 703 | // Verify the SchedulerWorkerPoolImpl keeps at least one idle standby thread, |
| 704 | // capacity permitting. |
| 705 | TEST(TaskSchedulerWorkerPoolStandbyPolicyTest, VerifyStandbyThread) { |
| 706 | constexpr size_t worker_capacity = 3; |
| 707 | |
| 708 | TaskTracker task_tracker; |
| 709 | DelayedTaskManager delayed_task_manager; |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 710 | scoped_refptr<TaskRunner> service_thread_task_runner = |
| 711 | MakeRefCounted<TestSimpleTaskRunner>(); |
| 712 | delayed_task_manager.Start(service_thread_task_runner); |
Jeremy Roman | cd0c467 | 2017-08-17 08:27:24 +0900 | [diff] [blame] | 713 | auto worker_pool = std::make_unique<SchedulerWorkerPoolImpl>( |
Jeffrey He | 9a7fff2 | 2017-07-28 01:11:10 +0900 | [diff] [blame] | 714 | "StandbyThreadWorkerPool", ThreadPriority::NORMAL, &task_tracker, |
| 715 | &delayed_task_manager); |
| 716 | worker_pool->Start( |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 717 | SchedulerWorkerPoolParams(worker_capacity, kReclaimTimeForCleanupTests), |
| 718 | service_thread_task_runner); |
Jeffrey He | 9a7fff2 | 2017-07-28 01:11:10 +0900 | [diff] [blame] | 719 | ASSERT_TRUE(worker_pool); |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 720 | EXPECT_EQ(1U, worker_pool->NumberOfWorkersForTesting()); |
Jeffrey He | 9a7fff2 | 2017-07-28 01:11:10 +0900 | [diff] [blame] | 721 | |
| 722 | auto task_runner = |
| 723 | worker_pool->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()}); |
| 724 | |
| 725 | WaitableEvent thread_running(WaitableEvent::ResetPolicy::AUTOMATIC, |
| 726 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 727 | WaitableEvent thread_continue(WaitableEvent::ResetPolicy::MANUAL, |
| 728 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 729 | |
| 730 | RepeatingClosure closure = BindRepeating( |
| 731 | [](WaitableEvent* thread_running, WaitableEvent* thread_continue) { |
| 732 | thread_running->Signal(); |
| 733 | thread_continue->Wait(); |
| 734 | }, |
| 735 | Unretained(&thread_running), Unretained(&thread_continue)); |
| 736 | |
| 737 | // There should be one idle thread until we reach worker capacity |
| 738 | for (size_t i = 0; i < worker_capacity; ++i) { |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 739 | EXPECT_EQ(i + 1, worker_pool->NumberOfWorkersForTesting()); |
Jeffrey He | 9a7fff2 | 2017-07-28 01:11:10 +0900 | [diff] [blame] | 740 | task_runner->PostTask(FROM_HERE, closure); |
| 741 | thread_running.Wait(); |
| 742 | } |
| 743 | |
| 744 | // There should not be an extra idle thread if it means going above capacity |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 745 | EXPECT_EQ(worker_capacity, worker_pool->NumberOfWorkersForTesting()); |
Jeffrey He | 9a7fff2 | 2017-07-28 01:11:10 +0900 | [diff] [blame] | 746 | |
| 747 | thread_continue.Signal(); |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 748 | // Give time for a worker to cleanup. Verify that the pool attempts to keep |
| 749 | // one idle active worker. |
| 750 | PlatformThread::Sleep(kReclaimTimeForCleanupTests + |
| 751 | kExtraTimeToWaitForCleanup); |
| 752 | EXPECT_EQ(1U, worker_pool->NumberOfWorkersForTesting()); |
Jeffrey He | 9a7fff2 | 2017-07-28 01:11:10 +0900 | [diff] [blame] | 753 | |
Jeffrey He | 92b46b4 | 2017-08-09 00:05:01 +0900 | [diff] [blame] | 754 | worker_pool->DisallowWorkerCleanupForTesting(); |
Jeffrey He | 9a7fff2 | 2017-07-28 01:11:10 +0900 | [diff] [blame] | 755 | worker_pool->JoinForTesting(); |
| 756 | } |
| 757 | |
Francois Doray | 7c49b87 | 2017-09-12 02:27:50 +0900 | [diff] [blame] | 758 | namespace { |
| 759 | |
| 760 | enum class OptionalBlockingType { |
| 761 | NO_BLOCK, |
| 762 | MAY_BLOCK, |
| 763 | WILL_BLOCK, |
| 764 | }; |
| 765 | |
| 766 | struct NestedBlockingType { |
| 767 | NestedBlockingType(BlockingType first_in, |
| 768 | OptionalBlockingType second_in, |
| 769 | BlockingType behaves_as_in) |
| 770 | : first(first_in), second(second_in), behaves_as(behaves_as_in) {} |
| 771 | |
| 772 | BlockingType first; |
| 773 | OptionalBlockingType second; |
| 774 | BlockingType behaves_as; |
| 775 | }; |
| 776 | |
| 777 | class NestedScopedBlockingCall { |
Jeffrey He | b23ff4c | 2017-08-23 07:32:49 +0900 | [diff] [blame] | 778 | public: |
Francois Doray | 7c49b87 | 2017-09-12 02:27:50 +0900 | [diff] [blame] | 779 | NestedScopedBlockingCall(const NestedBlockingType& nested_blocking_type) |
| 780 | : first_scoped_blocking_call_(nested_blocking_type.first), |
| 781 | second_scoped_blocking_call_( |
| 782 | nested_blocking_type.second == OptionalBlockingType::WILL_BLOCK |
| 783 | ? std::make_unique<ScopedBlockingCall>(BlockingType::WILL_BLOCK) |
| 784 | : (nested_blocking_type.second == |
| 785 | OptionalBlockingType::MAY_BLOCK |
| 786 | ? std::make_unique<ScopedBlockingCall>( |
| 787 | BlockingType::MAY_BLOCK) |
| 788 | : nullptr)) {} |
| 789 | |
| 790 | private: |
| 791 | ScopedBlockingCall first_scoped_blocking_call_; |
| 792 | std::unique_ptr<ScopedBlockingCall> second_scoped_blocking_call_; |
| 793 | |
| 794 | DISALLOW_COPY_AND_ASSIGN(NestedScopedBlockingCall); |
| 795 | }; |
| 796 | |
| 797 | } // namespace |
| 798 | |
| 799 | class TaskSchedulerWorkerPoolBlockingTest |
| 800 | : public TaskSchedulerWorkerPoolImplTestBase, |
| 801 | public testing::TestWithParam<NestedBlockingType> { |
| 802 | public: |
| 803 | TaskSchedulerWorkerPoolBlockingTest() |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 804 | : blocking_thread_running_(WaitableEvent::ResetPolicy::AUTOMATIC, |
Jeffrey He | b23ff4c | 2017-08-23 07:32:49 +0900 | [diff] [blame] | 805 | WaitableEvent::InitialState::NOT_SIGNALED), |
| 806 | blocking_thread_continue_(WaitableEvent::ResetPolicy::MANUAL, |
| 807 | WaitableEvent::InitialState::NOT_SIGNALED) {} |
| 808 | |
Francois Doray | 7c49b87 | 2017-09-12 02:27:50 +0900 | [diff] [blame] | 809 | static std::string ParamInfoToString( |
| 810 | ::testing::TestParamInfo<NestedBlockingType> param_info) { |
| 811 | std::string str = param_info.param.first == BlockingType::MAY_BLOCK |
| 812 | ? "MAY_BLOCK" |
| 813 | : "WILL_BLOCK"; |
| 814 | if (param_info.param.second == OptionalBlockingType::MAY_BLOCK) |
| 815 | str += "_MAY_BLOCK"; |
| 816 | else if (param_info.param.second == OptionalBlockingType::WILL_BLOCK) |
| 817 | str += "_WILL_BLOCK"; |
| 818 | return str; |
| 819 | } |
| 820 | |
Jeffrey He | b23ff4c | 2017-08-23 07:32:49 +0900 | [diff] [blame] | 821 | void SetUp() override { |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 822 | TaskSchedulerWorkerPoolImplTestBase::SetUp(); |
Jeffrey He | b23ff4c | 2017-08-23 07:32:49 +0900 | [diff] [blame] | 823 | task_runner_ = |
| 824 | worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()}); |
| 825 | } |
| 826 | |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 827 | void TearDown() override { TaskSchedulerWorkerPoolImplTestBase::TearDown(); } |
| 828 | |
Jeffrey He | b23ff4c | 2017-08-23 07:32:49 +0900 | [diff] [blame] | 829 | protected: |
| 830 | // Saturates the worker pool with a task that first blocks, waits to be |
| 831 | // unblocked, then exits. |
Francois Doray | 7c49b87 | 2017-09-12 02:27:50 +0900 | [diff] [blame] | 832 | void SaturateWithBlockingTasks( |
| 833 | const NestedBlockingType& nested_blocking_type) { |
Jeffrey He | b23ff4c | 2017-08-23 07:32:49 +0900 | [diff] [blame] | 834 | RepeatingClosure blocking_thread_running_closure = |
| 835 | BarrierClosure(kNumWorkersInWorkerPool, |
| 836 | BindOnce(&WaitableEvent::Signal, |
| 837 | Unretained(&blocking_thread_running_))); |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 838 | |
Jeffrey He | b23ff4c | 2017-08-23 07:32:49 +0900 | [diff] [blame] | 839 | for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) { |
| 840 | task_runner_->PostTask( |
| 841 | FROM_HERE, |
| 842 | BindOnce( |
| 843 | [](Closure* blocking_thread_running_closure, |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 844 | WaitableEvent* blocking_thread_continue_, |
Francois Doray | 7c49b87 | 2017-09-12 02:27:50 +0900 | [diff] [blame] | 845 | const NestedBlockingType& nested_blocking_type) { |
| 846 | NestedScopedBlockingCall nested_scoped_blocking_call( |
| 847 | nested_blocking_type); |
Jeffrey He | b23ff4c | 2017-08-23 07:32:49 +0900 | [diff] [blame] | 848 | blocking_thread_running_closure->Run(); |
Francois Doray | e058b72 | 2017-09-12 04:53:29 +0900 | [diff] [blame^] | 849 | |
| 850 | { |
| 851 | // Use ScopedClearBlockingObserverForTesting to avoid |
| 852 | // affecting the worker capacity with this WaitableEvent. |
| 853 | internal::ScopedClearBlockingObserverForTesting |
| 854 | scoped_clear_blocking_observer; |
| 855 | blocking_thread_continue_->Wait(); |
| 856 | } |
Jeffrey He | b23ff4c | 2017-08-23 07:32:49 +0900 | [diff] [blame] | 857 | |
| 858 | }, |
| 859 | Unretained(&blocking_thread_running_closure), |
Francois Doray | 7c49b87 | 2017-09-12 02:27:50 +0900 | [diff] [blame] | 860 | Unretained(&blocking_thread_continue_), nested_blocking_type)); |
Jeffrey He | b23ff4c | 2017-08-23 07:32:49 +0900 | [diff] [blame] | 861 | } |
| 862 | blocking_thread_running_.Wait(); |
| 863 | } |
| 864 | |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 865 | // Returns how long we can expect a change to |worker_capacity_| to occur |
| 866 | // after a task has become blocked. |
| 867 | TimeDelta GetWorkerCapacityChangeSleepTime() { |
| 868 | return std::max(SchedulerWorkerPoolImpl::kBlockedWorkersPollPeriod, |
| 869 | worker_pool_->MayBlockThreshold()) + |
| 870 | TestTimeouts::tiny_timeout(); |
| 871 | } |
| 872 | |
| 873 | // Waits up to some amount of time until |worker_pool_|'s worker capacity |
| 874 | // reaches |expected_worker_capacity|. |
| 875 | void ExpectWorkerCapacityAfterDelay(size_t expected_worker_capacity) { |
| 876 | constexpr int kMaxAttempts = 4; |
| 877 | for (int i = 0; |
| 878 | i < kMaxAttempts && worker_pool_->GetWorkerCapacityForTesting() != |
| 879 | expected_worker_capacity; |
| 880 | ++i) { |
| 881 | PlatformThread::Sleep(GetWorkerCapacityChangeSleepTime()); |
| 882 | } |
| 883 | |
| 884 | EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(), |
| 885 | expected_worker_capacity); |
| 886 | } |
| 887 | |
Jeffrey He | b23ff4c | 2017-08-23 07:32:49 +0900 | [diff] [blame] | 888 | // Unblocks tasks posted by SaturateWithBlockingTasks(). |
| 889 | void UnblockTasks() { blocking_thread_continue_.Signal(); } |
| 890 | |
| 891 | scoped_refptr<TaskRunner> task_runner_; |
| 892 | |
| 893 | private: |
| 894 | WaitableEvent blocking_thread_running_; |
| 895 | WaitableEvent blocking_thread_continue_; |
| 896 | |
Francois Doray | 7c49b87 | 2017-09-12 02:27:50 +0900 | [diff] [blame] | 897 | DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolBlockingTest); |
Jeffrey He | b23ff4c | 2017-08-23 07:32:49 +0900 | [diff] [blame] | 898 | }; |
| 899 | |
| 900 | // Verify that BlockingScopeEntered() causes worker capacity to increase and |
| 901 | // creates a worker if needed. Also verify that BlockingScopeExited() decreases |
| 902 | // worker capacity after an increase. |
Francois Doray | 7c49b87 | 2017-09-12 02:27:50 +0900 | [diff] [blame] | 903 | TEST_P(TaskSchedulerWorkerPoolBlockingTest, ThreadBlockedUnblocked) { |
Jeffrey He | b23ff4c | 2017-08-23 07:32:49 +0900 | [diff] [blame] | 904 | ASSERT_EQ(worker_pool_->GetWorkerCapacityForTesting(), |
| 905 | kNumWorkersInWorkerPool); |
| 906 | |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 907 | SaturateWithBlockingTasks(GetParam()); |
Francois Doray | 7c49b87 | 2017-09-12 02:27:50 +0900 | [diff] [blame] | 908 | if (GetParam().behaves_as == BlockingType::MAY_BLOCK) |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 909 | ExpectWorkerCapacityAfterDelay(2 * kNumWorkersInWorkerPool); |
Jeffrey He | b23ff4c | 2017-08-23 07:32:49 +0900 | [diff] [blame] | 910 | // A range of possible number of workers is accepted because of |
| 911 | // crbug.com/757897. |
| 912 | EXPECT_GE(worker_pool_->NumberOfWorkersForTesting(), |
| 913 | kNumWorkersInWorkerPool + 1); |
| 914 | EXPECT_LE(worker_pool_->NumberOfWorkersForTesting(), |
| 915 | 2 * kNumWorkersInWorkerPool); |
| 916 | EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(), |
| 917 | 2 * kNumWorkersInWorkerPool); |
| 918 | |
| 919 | UnblockTasks(); |
| 920 | task_tracker_.Flush(); |
| 921 | EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(), |
| 922 | kNumWorkersInWorkerPool); |
| 923 | } |
| 924 | |
| 925 | // Verify that tasks posted in a saturated pool before a ScopedBlockingCall will |
| 926 | // execute after ScopedBlockingCall is instantiated. |
Francois Doray | 7c49b87 | 2017-09-12 02:27:50 +0900 | [diff] [blame] | 927 | TEST_P(TaskSchedulerWorkerPoolBlockingTest, PostBeforeBlocking) { |
Jeffrey He | b23ff4c | 2017-08-23 07:32:49 +0900 | [diff] [blame] | 928 | WaitableEvent thread_running(WaitableEvent::ResetPolicy::AUTOMATIC, |
| 929 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 930 | WaitableEvent thread_can_block(WaitableEvent::ResetPolicy::MANUAL, |
| 931 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 932 | WaitableEvent thread_continue(WaitableEvent::ResetPolicy::MANUAL, |
| 933 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 934 | |
| 935 | for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) { |
| 936 | task_runner_->PostTask( |
| 937 | FROM_HERE, |
| 938 | BindOnce( |
Francois Doray | 7c49b87 | 2017-09-12 02:27:50 +0900 | [diff] [blame] | 939 | [](const NestedBlockingType& nested_blocking_type, |
| 940 | WaitableEvent* thread_running, WaitableEvent* thread_can_block, |
Jeffrey He | b23ff4c | 2017-08-23 07:32:49 +0900 | [diff] [blame] | 941 | WaitableEvent* thread_continue) { |
| 942 | thread_running->Signal(); |
Francois Doray | e058b72 | 2017-09-12 04:53:29 +0900 | [diff] [blame^] | 943 | { |
| 944 | // Use ScopedClearBlockingObserverForTesting to avoid affecting |
| 945 | // the worker capacity with this WaitableEvent. |
| 946 | internal::ScopedClearBlockingObserverForTesting |
| 947 | scoped_clear_blocking_observer; |
| 948 | thread_can_block->Wait(); |
| 949 | } |
| 950 | |
Francois Doray | 7c49b87 | 2017-09-12 02:27:50 +0900 | [diff] [blame] | 951 | NestedScopedBlockingCall nested_scoped_blocking_call( |
| 952 | nested_blocking_type); |
Francois Doray | e058b72 | 2017-09-12 04:53:29 +0900 | [diff] [blame^] | 953 | |
| 954 | { |
| 955 | // Use ScopedClearBlockingObserverForTesting to avoid affecting |
| 956 | // the worker capacity with this WaitableEvent. |
| 957 | internal::ScopedClearBlockingObserverForTesting |
| 958 | scoped_clear_blocking_observer; |
| 959 | thread_continue->Wait(); |
| 960 | } |
Jeffrey He | b23ff4c | 2017-08-23 07:32:49 +0900 | [diff] [blame] | 961 | }, |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 962 | GetParam(), Unretained(&thread_running), |
| 963 | Unretained(&thread_can_block), Unretained(&thread_continue))); |
Jeffrey He | b23ff4c | 2017-08-23 07:32:49 +0900 | [diff] [blame] | 964 | thread_running.Wait(); |
| 965 | } |
| 966 | |
| 967 | // All workers should be occupied and the pool should be saturated. Workers |
| 968 | // have not entered ScopedBlockingCall yet. |
| 969 | EXPECT_EQ(worker_pool_->NumberOfWorkersForTesting(), kNumWorkersInWorkerPool); |
| 970 | EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(), |
| 971 | kNumWorkersInWorkerPool); |
| 972 | |
| 973 | WaitableEvent extra_thread_running(WaitableEvent::ResetPolicy::MANUAL, |
| 974 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 975 | WaitableEvent extra_threads_continue( |
| 976 | WaitableEvent::ResetPolicy::MANUAL, |
| 977 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 978 | RepeatingClosure extra_threads_running_barrier = BarrierClosure( |
| 979 | kNumWorkersInWorkerPool, |
| 980 | BindOnce(&WaitableEvent::Signal, Unretained(&extra_thread_running))); |
| 981 | for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) { |
| 982 | task_runner_->PostTask(FROM_HERE, |
| 983 | BindOnce( |
| 984 | [](Closure* extra_threads_running_barrier, |
| 985 | WaitableEvent* extra_threads_continue) { |
| 986 | extra_threads_running_barrier->Run(); |
Francois Doray | e058b72 | 2017-09-12 04:53:29 +0900 | [diff] [blame^] | 987 | { |
| 988 | // Use ScopedClearBlockingObserverForTesting |
| 989 | // to avoid affecting the worker capacity |
| 990 | // with this WaitableEvent. |
| 991 | internal:: |
| 992 | ScopedClearBlockingObserverForTesting |
| 993 | scoped_clear_blocking_observer; |
| 994 | extra_threads_continue->Wait(); |
| 995 | } |
Jeffrey He | b23ff4c | 2017-08-23 07:32:49 +0900 | [diff] [blame] | 996 | }, |
| 997 | Unretained(&extra_threads_running_barrier), |
| 998 | Unretained(&extra_threads_continue))); |
| 999 | } |
| 1000 | |
| 1001 | // Allow tasks to enter ScopedBlockingCall. Workers should be created for the |
| 1002 | // tasks we just posted. |
| 1003 | thread_can_block.Signal(); |
Francois Doray | 7c49b87 | 2017-09-12 02:27:50 +0900 | [diff] [blame] | 1004 | if (GetParam().behaves_as == BlockingType::MAY_BLOCK) |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 1005 | ExpectWorkerCapacityAfterDelay(2 * kNumWorkersInWorkerPool); |
Jeffrey He | b23ff4c | 2017-08-23 07:32:49 +0900 | [diff] [blame] | 1006 | |
| 1007 | // Should not block forever. |
| 1008 | extra_thread_running.Wait(); |
| 1009 | EXPECT_EQ(worker_pool_->NumberOfWorkersForTesting(), |
| 1010 | 2 * kNumWorkersInWorkerPool); |
| 1011 | extra_threads_continue.Signal(); |
| 1012 | |
| 1013 | thread_continue.Signal(); |
| 1014 | task_tracker_.Flush(); |
| 1015 | } |
| 1016 | // Verify that workers become idle when the pool is over-capacity and that |
| 1017 | // those workers do no work. |
Francois Doray | 7c49b87 | 2017-09-12 02:27:50 +0900 | [diff] [blame] | 1018 | TEST_P(TaskSchedulerWorkerPoolBlockingTest, WorkersIdleWhenOverCapacity) { |
Jeffrey He | b23ff4c | 2017-08-23 07:32:49 +0900 | [diff] [blame] | 1019 | ASSERT_EQ(worker_pool_->GetWorkerCapacityForTesting(), |
| 1020 | kNumWorkersInWorkerPool); |
| 1021 | |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 1022 | SaturateWithBlockingTasks(GetParam()); |
Francois Doray | 7c49b87 | 2017-09-12 02:27:50 +0900 | [diff] [blame] | 1023 | if (GetParam().behaves_as == BlockingType::MAY_BLOCK) |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 1024 | ExpectWorkerCapacityAfterDelay(2 * kNumWorkersInWorkerPool); |
Jeffrey He | b23ff4c | 2017-08-23 07:32:49 +0900 | [diff] [blame] | 1025 | EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(), |
| 1026 | 2 * kNumWorkersInWorkerPool); |
| 1027 | // A range of possible number of workers is accepted because of |
| 1028 | // crbug.com/757897. |
| 1029 | EXPECT_GE(worker_pool_->NumberOfWorkersForTesting(), |
| 1030 | kNumWorkersInWorkerPool + 1); |
| 1031 | EXPECT_LE(worker_pool_->NumberOfWorkersForTesting(), |
| 1032 | 2 * kNumWorkersInWorkerPool); |
| 1033 | |
| 1034 | WaitableEvent thread_running(WaitableEvent::ResetPolicy::AUTOMATIC, |
| 1035 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 1036 | WaitableEvent thread_continue(WaitableEvent::ResetPolicy::MANUAL, |
| 1037 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 1038 | |
| 1039 | RepeatingClosure thread_running_barrier = BarrierClosure( |
| 1040 | kNumWorkersInWorkerPool, |
| 1041 | BindOnce(&WaitableEvent::Signal, Unretained(&thread_running))); |
| 1042 | // Posting these tasks should cause new workers to be created. |
| 1043 | for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) { |
Francois Doray | e058b72 | 2017-09-12 04:53:29 +0900 | [diff] [blame^] | 1044 | auto callback = BindOnce( |
| 1045 | [](Closure* thread_running_barrier, WaitableEvent* thread_continue) { |
| 1046 | thread_running_barrier->Run(); |
| 1047 | { |
| 1048 | // Use ScopedClearBlockingObserver ForTesting to avoid affecting the |
| 1049 | // worker capacity with this WaitableEvent. |
| 1050 | internal::ScopedClearBlockingObserverForTesting |
| 1051 | scoped_clear_blocking_observer; |
| 1052 | thread_continue->Wait(); |
| 1053 | } |
| 1054 | }, |
| 1055 | Unretained(&thread_running_barrier), Unretained(&thread_continue)); |
| 1056 | task_runner_->PostTask(FROM_HERE, std::move(callback)); |
Jeffrey He | b23ff4c | 2017-08-23 07:32:49 +0900 | [diff] [blame] | 1057 | } |
| 1058 | thread_running.Wait(); |
| 1059 | |
| 1060 | ASSERT_EQ(worker_pool_->NumberOfIdleWorkersForTesting(), 0U); |
| 1061 | EXPECT_EQ(worker_pool_->NumberOfWorkersForTesting(), |
| 1062 | 2 * kNumWorkersInWorkerPool); |
| 1063 | |
| 1064 | AtomicFlag is_exiting; |
| 1065 | // These tasks should not get executed until after other tasks become |
| 1066 | // unblocked. |
| 1067 | for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) { |
| 1068 | task_runner_->PostTask(FROM_HERE, BindOnce( |
| 1069 | [](AtomicFlag* is_exiting) { |
| 1070 | EXPECT_TRUE(is_exiting->IsSet()); |
| 1071 | }, |
| 1072 | Unretained(&is_exiting))); |
| 1073 | } |
| 1074 | |
| 1075 | // The original |kNumWorkersInWorkerPool| will finish their tasks after being |
| 1076 | // unblocked. There will be work in the work queue, but the pool should now |
| 1077 | // be over-capacity and workers will become idle. |
| 1078 | UnblockTasks(); |
| 1079 | worker_pool_->WaitForWorkersIdleForTesting(kNumWorkersInWorkerPool); |
| 1080 | EXPECT_EQ(worker_pool_->NumberOfIdleWorkersForTesting(), |
| 1081 | kNumWorkersInWorkerPool); |
| 1082 | |
| 1083 | // Posting more tasks should not cause workers idle from the pool being over |
| 1084 | // capacity to begin doing work. |
| 1085 | for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) { |
| 1086 | task_runner_->PostTask(FROM_HERE, BindOnce( |
| 1087 | [](AtomicFlag* is_exiting) { |
| 1088 | EXPECT_TRUE(is_exiting->IsSet()); |
| 1089 | }, |
| 1090 | Unretained(&is_exiting))); |
| 1091 | } |
| 1092 | |
| 1093 | // Give time for those idle workers to possibly do work (which should not |
| 1094 | // happen). |
| 1095 | PlatformThread::Sleep(TestTimeouts::tiny_timeout()); |
| 1096 | |
| 1097 | is_exiting.Set(); |
| 1098 | // Unblocks the new workers. |
| 1099 | thread_continue.Signal(); |
| 1100 | task_tracker_.Flush(); |
| 1101 | } |
| 1102 | |
Francois Doray | 7c49b87 | 2017-09-12 02:27:50 +0900 | [diff] [blame] | 1103 | INSTANTIATE_TEST_CASE_P( |
| 1104 | , |
| 1105 | TaskSchedulerWorkerPoolBlockingTest, |
| 1106 | ::testing::Values(NestedBlockingType(BlockingType::MAY_BLOCK, |
| 1107 | OptionalBlockingType::NO_BLOCK, |
| 1108 | BlockingType::MAY_BLOCK), |
| 1109 | NestedBlockingType(BlockingType::WILL_BLOCK, |
| 1110 | OptionalBlockingType::NO_BLOCK, |
| 1111 | BlockingType::WILL_BLOCK), |
| 1112 | NestedBlockingType(BlockingType::MAY_BLOCK, |
| 1113 | OptionalBlockingType::WILL_BLOCK, |
| 1114 | BlockingType::WILL_BLOCK), |
| 1115 | NestedBlockingType(BlockingType::WILL_BLOCK, |
| 1116 | OptionalBlockingType::MAY_BLOCK, |
| 1117 | BlockingType::WILL_BLOCK)), |
| 1118 | TaskSchedulerWorkerPoolBlockingTest::ParamInfoToString); |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 1119 | |
Francois Doray | 7c49b87 | 2017-09-12 02:27:50 +0900 | [diff] [blame] | 1120 | // Verify that if a thread enters the scope of a MAY_BLOCK ScopedBlockingCall, |
| 1121 | // but exits the scope before the MayBlockThreshold() is reached, that the |
| 1122 | // worker capacity does not increase. |
| 1123 | TEST_F(TaskSchedulerWorkerPoolBlockingTest, ThreadBlockUnblockPremature) { |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 1124 | ASSERT_EQ(worker_pool_->GetWorkerCapacityForTesting(), |
| 1125 | kNumWorkersInWorkerPool); |
| 1126 | |
| 1127 | TimeDelta worker_capacity_change_sleep = GetWorkerCapacityChangeSleepTime(); |
| 1128 | worker_pool_->MaximizeMayBlockThresholdForTesting(); |
| 1129 | |
Francois Doray | 7c49b87 | 2017-09-12 02:27:50 +0900 | [diff] [blame] | 1130 | SaturateWithBlockingTasks(NestedBlockingType(BlockingType::MAY_BLOCK, |
| 1131 | OptionalBlockingType::NO_BLOCK, |
| 1132 | BlockingType::MAY_BLOCK)); |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 1133 | PlatformThread::Sleep(worker_capacity_change_sleep); |
| 1134 | EXPECT_EQ(worker_pool_->NumberOfWorkersForTesting(), kNumWorkersInWorkerPool); |
| 1135 | EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(), |
| 1136 | kNumWorkersInWorkerPool); |
| 1137 | |
| 1138 | UnblockTasks(); |
| 1139 | task_tracker_.Flush(); |
| 1140 | EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(), |
| 1141 | kNumWorkersInWorkerPool); |
| 1142 | } |
| 1143 | |
Francois Doray | 7c49b87 | 2017-09-12 02:27:50 +0900 | [diff] [blame] | 1144 | // Verify that if worker capacity is incremented because of a MAY_BLOCK |
| 1145 | // ScopedBlockingCall, it isn't incremented again when there is a nested |
| 1146 | // WILL_BLOCK ScopedBlockingCall. |
| 1147 | TEST_F(TaskSchedulerWorkerPoolBlockingTest, |
| 1148 | MayBlockIncreaseCapacityNestedWillBlock) { |
| 1149 | ASSERT_EQ(worker_pool_->GetWorkerCapacityForTesting(), |
| 1150 | kNumWorkersInWorkerPool); |
| 1151 | auto task_runner = |
| 1152 | worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()}); |
| 1153 | WaitableEvent can_return(WaitableEvent::ResetPolicy::MANUAL, |
| 1154 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 1155 | |
| 1156 | // Saturate the pool so that a MAY_BLOCK ScopedBlockingCall would increment |
| 1157 | // the worker capacity. |
| 1158 | for (size_t i = 0; i < kNumWorkersInWorkerPool - 1; ++i) { |
Francois Doray | e058b72 | 2017-09-12 04:53:29 +0900 | [diff] [blame^] | 1159 | task_runner->PostTask(FROM_HERE, |
| 1160 | BindOnce( |
| 1161 | [](WaitableEvent* can_return) { |
| 1162 | // Use ScopedClearBlockingObserverForTesting to |
| 1163 | // avoid affecting the worker capacity with this |
| 1164 | // WaitableEvent. |
| 1165 | internal::ScopedClearBlockingObserverForTesting |
| 1166 | scoped_clear_blocking_observer; |
| 1167 | can_return->Wait(); |
| 1168 | }, |
| 1169 | Unretained(&can_return))); |
Francois Doray | 7c49b87 | 2017-09-12 02:27:50 +0900 | [diff] [blame] | 1170 | } |
| 1171 | |
| 1172 | WaitableEvent can_instantiate_will_block( |
| 1173 | WaitableEvent::ResetPolicy::MANUAL, |
| 1174 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 1175 | WaitableEvent did_instantiate_will_block( |
| 1176 | WaitableEvent::ResetPolicy::MANUAL, |
| 1177 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 1178 | |
| 1179 | // Post a task that instantiates a MAY_BLOCK ScopedBlockingCall. |
| 1180 | task_runner->PostTask( |
| 1181 | FROM_HERE, |
| 1182 | BindOnce( |
| 1183 | [](WaitableEvent* can_instantiate_will_block, |
| 1184 | WaitableEvent* did_instantiate_will_block, |
| 1185 | WaitableEvent* can_return) { |
| 1186 | ScopedBlockingCall may_block(BlockingType::MAY_BLOCK); |
Francois Doray | e058b72 | 2017-09-12 04:53:29 +0900 | [diff] [blame^] | 1187 | { |
| 1188 | // Use ScopedClearBlockingObserverForTesting to avoid affecting |
| 1189 | // the worker capacity with this WaitableEvent. |
| 1190 | internal::ScopedClearBlockingObserverForTesting |
| 1191 | scoped_clear_blocking_observer; |
| 1192 | can_instantiate_will_block->Wait(); |
| 1193 | } |
Francois Doray | 7c49b87 | 2017-09-12 02:27:50 +0900 | [diff] [blame] | 1194 | ScopedBlockingCall will_block(BlockingType::WILL_BLOCK); |
| 1195 | did_instantiate_will_block->Signal(); |
Francois Doray | e058b72 | 2017-09-12 04:53:29 +0900 | [diff] [blame^] | 1196 | { |
| 1197 | // Use ScopedClearBlockingObserverForTesting to avoid affecting |
| 1198 | // the worker capacity with this WaitableEvent. |
| 1199 | internal::ScopedClearBlockingObserverForTesting |
| 1200 | scoped_clear_blocking_observer; |
| 1201 | can_return->Wait(); |
| 1202 | } |
Francois Doray | 7c49b87 | 2017-09-12 02:27:50 +0900 | [diff] [blame] | 1203 | }, |
| 1204 | Unretained(&can_instantiate_will_block), |
| 1205 | Unretained(&did_instantiate_will_block), Unretained(&can_return))); |
| 1206 | |
| 1207 | // After a short delay, worker capacity should be incremented. |
| 1208 | ExpectWorkerCapacityAfterDelay(kNumWorkersInWorkerPool + 1); |
| 1209 | |
| 1210 | // Wait until the task instantiates a WILL_BLOCK ScopedBlockingCall. |
| 1211 | can_instantiate_will_block.Signal(); |
| 1212 | did_instantiate_will_block.Wait(); |
| 1213 | |
| 1214 | // Worker capacity shouldn't be incremented again. |
| 1215 | EXPECT_EQ(kNumWorkersInWorkerPool + 1, |
| 1216 | worker_pool_->GetWorkerCapacityForTesting()); |
| 1217 | |
| 1218 | // Tear down. |
| 1219 | can_return.Signal(); |
| 1220 | task_tracker_.Flush(); |
| 1221 | EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(), |
| 1222 | kNumWorkersInWorkerPool); |
| 1223 | } |
| 1224 | |
Jeffrey He | b23ff4c | 2017-08-23 07:32:49 +0900 | [diff] [blame] | 1225 | // Verify that workers that become idle due to the pool being over capacity will |
| 1226 | // eventually cleanup. |
| 1227 | TEST(TaskSchedulerWorkerPoolOverWorkerCapacityTest, VerifyCleanup) { |
| 1228 | constexpr size_t kWorkerCapacity = 3; |
| 1229 | |
| 1230 | TaskTracker task_tracker; |
| 1231 | DelayedTaskManager delayed_task_manager; |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 1232 | scoped_refptr<TaskRunner> service_thread_task_runner = |
| 1233 | MakeRefCounted<TestSimpleTaskRunner>(); |
| 1234 | delayed_task_manager.Start(service_thread_task_runner); |
Jeffrey He | b23ff4c | 2017-08-23 07:32:49 +0900 | [diff] [blame] | 1235 | SchedulerWorkerPoolImpl worker_pool("OverWorkerCapacityTestWorkerPool", |
| 1236 | ThreadPriority::NORMAL, &task_tracker, |
| 1237 | &delayed_task_manager); |
| 1238 | worker_pool.Start( |
Francois Doray | b922375 | 2017-09-06 03:44:43 +0900 | [diff] [blame] | 1239 | SchedulerWorkerPoolParams(kWorkerCapacity, kReclaimTimeForCleanupTests), |
| 1240 | service_thread_task_runner); |
Jeffrey He | b23ff4c | 2017-08-23 07:32:49 +0900 | [diff] [blame] | 1241 | |
| 1242 | scoped_refptr<TaskRunner> task_runner = |
| 1243 | worker_pool.CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()}); |
| 1244 | |
| 1245 | WaitableEvent thread_running(WaitableEvent::ResetPolicy::AUTOMATIC, |
| 1246 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 1247 | WaitableEvent thread_continue(WaitableEvent::ResetPolicy::MANUAL, |
| 1248 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 1249 | RepeatingClosure thread_running_barrier = BarrierClosure( |
| 1250 | kWorkerCapacity, |
| 1251 | BindOnce(&WaitableEvent::Signal, Unretained(&thread_running))); |
| 1252 | |
| 1253 | WaitableEvent blocked_call_continue( |
| 1254 | WaitableEvent::ResetPolicy::MANUAL, |
| 1255 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 1256 | |
| 1257 | RepeatingClosure closure = BindRepeating( |
| 1258 | [](Closure* thread_running_barrier, WaitableEvent* thread_continue, |
| 1259 | WaitableEvent* blocked_call_continue) { |
| 1260 | thread_running_barrier->Run(); |
| 1261 | { |
| 1262 | ScopedBlockingCall scoped_blocking_call(BlockingType::WILL_BLOCK); |
| 1263 | blocked_call_continue->Wait(); |
| 1264 | } |
| 1265 | thread_continue->Wait(); |
| 1266 | |
| 1267 | }, |
| 1268 | Unretained(&thread_running_barrier), Unretained(&thread_continue), |
| 1269 | Unretained(&blocked_call_continue)); |
| 1270 | |
| 1271 | for (size_t i = 0; i < kWorkerCapacity; ++i) |
| 1272 | task_runner->PostTask(FROM_HERE, closure); |
| 1273 | |
| 1274 | thread_running.Wait(); |
| 1275 | |
| 1276 | WaitableEvent extra_threads_running( |
| 1277 | WaitableEvent::ResetPolicy::AUTOMATIC, |
| 1278 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 1279 | WaitableEvent extra_threads_continue( |
| 1280 | WaitableEvent::ResetPolicy::MANUAL, |
| 1281 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 1282 | |
| 1283 | RepeatingClosure extra_threads_running_barrier = BarrierClosure( |
| 1284 | kWorkerCapacity, |
| 1285 | BindOnce(&WaitableEvent::Signal, Unretained(&extra_threads_running))); |
| 1286 | // These tasks should run on the new threads from increasing worker capacity. |
| 1287 | for (size_t i = 0; i < kWorkerCapacity; ++i) { |
| 1288 | task_runner->PostTask(FROM_HERE, |
| 1289 | BindOnce( |
| 1290 | [](Closure* extra_threads_running_barrier, |
| 1291 | WaitableEvent* extra_threads_continue) { |
| 1292 | extra_threads_running_barrier->Run(); |
| 1293 | extra_threads_continue->Wait(); |
| 1294 | }, |
| 1295 | Unretained(&extra_threads_running_barrier), |
| 1296 | Unretained(&extra_threads_continue))); |
| 1297 | } |
| 1298 | extra_threads_running.Wait(); |
| 1299 | |
| 1300 | ASSERT_EQ(kWorkerCapacity * 2, worker_pool.NumberOfWorkersForTesting()); |
| 1301 | EXPECT_EQ(kWorkerCapacity * 2, worker_pool.GetWorkerCapacityForTesting()); |
| 1302 | blocked_call_continue.Signal(); |
| 1303 | extra_threads_continue.Signal(); |
| 1304 | |
| 1305 | TimeTicks before_cleanup_start = TimeTicks::Now(); |
| 1306 | while (TimeTicks::Now() - before_cleanup_start < |
| 1307 | kReclaimTimeForCleanupTests + kExtraTimeToWaitForCleanup) { |
| 1308 | if (worker_pool.NumberOfWorkersForTesting() <= kWorkerCapacity + 1) |
| 1309 | break; |
| 1310 | |
| 1311 | // Periodically post tasks to ensure that posting tasks does not prevent |
| 1312 | // workers that are idle due to the pool being over capacity from cleaning |
| 1313 | // up. |
| 1314 | task_runner->PostTask(FROM_HERE, BindOnce(&DoNothing)); |
| 1315 | PlatformThread::Sleep(kReclaimTimeForCleanupTests / 2); |
| 1316 | } |
| 1317 | // Note: one worker above capacity will not get cleaned up since it's on the |
| 1318 | // top of the idle stack. |
| 1319 | EXPECT_EQ(kWorkerCapacity + 1, worker_pool.NumberOfWorkersForTesting()); |
| 1320 | |
| 1321 | thread_continue.Signal(); |
| 1322 | |
| 1323 | worker_pool.DisallowWorkerCleanupForTesting(); |
| 1324 | worker_pool.JoinForTesting(); |
| 1325 | } |
| 1326 | |
Jeffrey He | 4426474 | 2017-08-27 11:38:05 +0900 | [diff] [blame] | 1327 | // Verify that the maximum number of workers is 256 and that hitting the max |
| 1328 | // leaves the pool in a valid state with regards to worker capacity. |
Francois Doray | 7c49b87 | 2017-09-12 02:27:50 +0900 | [diff] [blame] | 1329 | TEST_F(TaskSchedulerWorkerPoolBlockingTest, MaximumWorkersTest) { |
Jeffrey He | 4426474 | 2017-08-27 11:38:05 +0900 | [diff] [blame] | 1330 | constexpr size_t kMaxNumberOfWorkers = 256; |
| 1331 | constexpr size_t kNumExtraTasks = 10; |
| 1332 | |
| 1333 | WaitableEvent early_blocking_thread_running( |
| 1334 | WaitableEvent::ResetPolicy::MANUAL, |
| 1335 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 1336 | RepeatingClosure early_threads_barrier_closure = |
| 1337 | BarrierClosure(kMaxNumberOfWorkers, |
| 1338 | BindOnce(&WaitableEvent::Signal, |
| 1339 | Unretained(&early_blocking_thread_running))); |
| 1340 | |
| 1341 | WaitableEvent early_threads_finished( |
| 1342 | WaitableEvent::ResetPolicy::MANUAL, |
| 1343 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 1344 | RepeatingClosure early_threads_finished_barrier = BarrierClosure( |
| 1345 | kMaxNumberOfWorkers, |
| 1346 | BindOnce(&WaitableEvent::Signal, Unretained(&early_threads_finished))); |
| 1347 | |
| 1348 | WaitableEvent early_release_thread_continue( |
| 1349 | WaitableEvent::ResetPolicy::MANUAL, |
| 1350 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 1351 | |
| 1352 | // Post ScopedBlockingCall tasks to hit the worker cap. |
| 1353 | for (size_t i = 0; i < kMaxNumberOfWorkers; ++i) { |
| 1354 | task_runner_->PostTask(FROM_HERE, |
| 1355 | BindOnce( |
| 1356 | [](Closure* early_threads_barrier_closure, |
| 1357 | WaitableEvent* early_release_thread_continue, |
| 1358 | Closure* early_threads_finished) { |
| 1359 | { |
| 1360 | ScopedBlockingCall scoped_blocking_call( |
| 1361 | BlockingType::WILL_BLOCK); |
| 1362 | early_threads_barrier_closure->Run(); |
| 1363 | early_release_thread_continue->Wait(); |
| 1364 | } |
| 1365 | early_threads_finished->Run(); |
| 1366 | }, |
| 1367 | Unretained(&early_threads_barrier_closure), |
| 1368 | Unretained(&early_release_thread_continue), |
| 1369 | Unretained(&early_threads_finished_barrier))); |
| 1370 | } |
| 1371 | |
| 1372 | early_blocking_thread_running.Wait(); |
| 1373 | EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(), |
| 1374 | kNumWorkersInWorkerPool + kMaxNumberOfWorkers); |
| 1375 | |
| 1376 | WaitableEvent late_release_thread_contine( |
| 1377 | WaitableEvent::ResetPolicy::MANUAL, |
| 1378 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 1379 | |
| 1380 | WaitableEvent late_blocking_thread_running( |
| 1381 | WaitableEvent::ResetPolicy::MANUAL, |
| 1382 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 1383 | RepeatingClosure late_threads_barrier_closure = BarrierClosure( |
| 1384 | kNumExtraTasks, BindOnce(&WaitableEvent::Signal, |
| 1385 | Unretained(&late_blocking_thread_running))); |
| 1386 | |
| 1387 | // Posts additional tasks. Note: we should already have |kMaxNumberOfWorkers| |
| 1388 | // tasks running. These tasks should not be able to get executed yet as |
| 1389 | // the pool is already at its max worker cap. |
| 1390 | for (size_t i = 0; i < kNumExtraTasks; ++i) { |
| 1391 | task_runner_->PostTask( |
| 1392 | FROM_HERE, |
| 1393 | BindOnce( |
| 1394 | [](Closure* late_threads_barrier_closure, |
| 1395 | WaitableEvent* late_release_thread_contine) { |
| 1396 | ScopedBlockingCall scoped_blocking_call(BlockingType::WILL_BLOCK); |
| 1397 | late_threads_barrier_closure->Run(); |
| 1398 | late_release_thread_contine->Wait(); |
| 1399 | }, |
| 1400 | Unretained(&late_threads_barrier_closure), |
| 1401 | Unretained(&late_release_thread_contine))); |
| 1402 | } |
| 1403 | |
| 1404 | // Give time to see if we exceed the max number of workers. |
| 1405 | PlatformThread::Sleep(TestTimeouts::tiny_timeout()); |
| 1406 | EXPECT_LE(worker_pool_->NumberOfWorkersForTesting(), kMaxNumberOfWorkers); |
| 1407 | |
| 1408 | early_release_thread_continue.Signal(); |
| 1409 | early_threads_finished.Wait(); |
| 1410 | late_blocking_thread_running.Wait(); |
| 1411 | |
| 1412 | WaitableEvent final_tasks_running(WaitableEvent::ResetPolicy::MANUAL, |
| 1413 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 1414 | WaitableEvent final_tasks_continue(WaitableEvent::ResetPolicy::MANUAL, |
| 1415 | WaitableEvent::InitialState::NOT_SIGNALED); |
| 1416 | RepeatingClosure final_tasks_running_barrier = BarrierClosure( |
| 1417 | kNumWorkersInWorkerPool, |
| 1418 | BindOnce(&WaitableEvent::Signal, Unretained(&final_tasks_running))); |
| 1419 | |
| 1420 | // Verify that we are still able to saturate the pool. |
| 1421 | for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) { |
| 1422 | task_runner_->PostTask( |
| 1423 | FROM_HERE, |
| 1424 | BindOnce( |
| 1425 | [](Closure* closure, WaitableEvent* final_tasks_continue) { |
| 1426 | closure->Run(); |
| 1427 | final_tasks_continue->Wait(); |
| 1428 | }, |
| 1429 | Unretained(&final_tasks_running_barrier), |
| 1430 | Unretained(&final_tasks_continue))); |
| 1431 | } |
| 1432 | final_tasks_running.Wait(); |
| 1433 | EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(), |
| 1434 | kNumWorkersInWorkerPool + kNumExtraTasks); |
| 1435 | late_release_thread_contine.Signal(); |
| 1436 | final_tasks_continue.Signal(); |
| 1437 | task_tracker_.Flush(); |
| 1438 | } |
| 1439 | |
fdoray | a2d271b | 2016-04-15 23:09:08 +0900 | [diff] [blame] | 1440 | } // namespace internal |
| 1441 | } // namespace base |