blob: 117d6e8d391325bc02c29561347edbac4359ceda [file] [log] [blame]
fdoraya2d271b2016-04-15 23:09:08 +09001// Copyright 2016 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
robliao7ac34ba2016-06-23 03:16:25 +09005#include "base/task_scheduler/scheduler_worker_pool_impl.h"
fdoraya2d271b2016-04-15 23:09:08 +09006
7#include <stddef.h>
8
9#include <memory>
10#include <unordered_set>
11#include <vector>
12
robliao2e951752016-07-23 03:12:18 +090013#include "base/atomicops.h"
Jeffrey Heb23ff4c2017-08-23 07:32:49 +090014#include "base/barrier_closure.h"
fdoraya2d271b2016-04-15 23:09:08 +090015#include "base/bind.h"
16#include "base/bind_helpers.h"
fdoraydace22d2016-04-29 04:35:47 +090017#include "base/callback.h"
fdoraya2d271b2016-04-15 23:09:08 +090018#include "base/macros.h"
19#include "base/memory/ptr_util.h"
20#include "base/memory/ref_counted.h"
fdoray4b836782016-09-28 05:44:25 +090021#include "base/metrics/histogram.h"
22#include "base/metrics/histogram_samples.h"
23#include "base/metrics/statistics_recorder.h"
fdoraya2d271b2016-04-15 23:09:08 +090024#include "base/synchronization/condition_variable.h"
25#include "base/synchronization/lock.h"
26#include "base/synchronization/waitable_event.h"
27#include "base/task_runner.h"
fdorayc2c74992016-04-20 10:39:21 +090028#include "base/task_scheduler/delayed_task_manager.h"
robliaodf2e1542016-07-21 06:46:52 +090029#include "base/task_scheduler/scheduler_worker_pool_params.h"
fdoraya2d271b2016-04-15 23:09:08 +090030#include "base/task_scheduler/sequence.h"
31#include "base/task_scheduler/sequence_sort_key.h"
32#include "base/task_scheduler/task_tracker.h"
fdoray570633b2016-04-26 01:24:46 +090033#include "base/task_scheduler/test_task_factory.h"
fdoray9c56ea32016-11-02 23:35:26 +090034#include "base/task_scheduler/test_utils.h"
gabbcf9c762016-08-02 01:39:56 +090035#include "base/test/gtest_util.h"
fdoraya6600912016-10-15 06:40:37 +090036#include "base/test/test_simple_task_runner.h"
37#include "base/test/test_timeouts.h"
fdoraya2d271b2016-04-15 23:09:08 +090038#include "base/threading/platform_thread.h"
Jeffrey Heb23ff4c2017-08-23 07:32:49 +090039#include "base/threading/scoped_blocking_call.h"
fdoraya2d271b2016-04-15 23:09:08 +090040#include "base/threading/simple_thread.h"
fdoraya6600912016-10-15 06:40:37 +090041#include "base/threading/thread.h"
robliao2e951752016-07-23 03:12:18 +090042#include "base/threading/thread_checker_impl.h"
43#include "base/threading/thread_local_storage.h"
fdoray42925262016-04-29 06:36:33 +090044#include "base/threading/thread_restrictions.h"
robliao2e951752016-07-23 03:12:18 +090045#include "base/time/time.h"
Robert Liao4eaab522017-11-02 05:06:03 +090046#include "build/build_config.h"
fdoraya2d271b2016-04-15 23:09:08 +090047#include "testing/gtest/include/gtest/gtest.h"
48
Robert Liao4eaab522017-11-02 05:06:03 +090049#if defined(OS_WIN)
50#include "base/win/com_init_util.h"
51#endif // defined(OS_WIN)
52
fdoraya2d271b2016-04-15 23:09:08 +090053namespace base {
54namespace internal {
55namespace {
56
fdoray4b836782016-09-28 05:44:25 +090057constexpr size_t kNumWorkersInWorkerPool = 4;
58constexpr size_t kNumThreadsPostingTasks = 4;
59constexpr size_t kNumTasksPostedPerThread = 150;
60// This can't be lower because Windows' WaitableEvent wakes up too early when a
61// small timeout is used. This results in many spurious wake ups before a worker
Jeffrey He92b46b42017-08-09 00:05:01 +090062// is allowed to cleanup.
63constexpr TimeDelta kReclaimTimeForCleanupTests =
fdoray4b836782016-09-28 05:44:25 +090064 TimeDelta::FromMilliseconds(500);
Jeffrey He92b46b42017-08-09 00:05:01 +090065constexpr TimeDelta kExtraTimeToWaitForCleanup = TimeDelta::FromSeconds(1);
fdoraya2d271b2016-04-15 23:09:08 +090066
Francois Dorayb9223752017-09-06 03:44:43 +090067class TaskSchedulerWorkerPoolImplTestBase {
fdoraya2d271b2016-04-15 23:09:08 +090068 protected:
Francois Dorayb9223752017-09-06 03:44:43 +090069 TaskSchedulerWorkerPoolImplTestBase()
70 : service_thread_("TaskSchedulerServiceThread"){};
fdoraya2d271b2016-04-15 23:09:08 +090071
Robert Liao10d0c8e2017-11-17 10:37:44 +090072 void CommonSetUp() {
fdoray2890d2f2017-04-08 09:51:58 +090073 CreateAndStartWorkerPool(TimeDelta::Max(), kNumWorkersInWorkerPool);
fdoraya2d271b2016-04-15 23:09:08 +090074 }
75
Robert Liao10d0c8e2017-11-17 10:37:44 +090076 void CommonTearDown() {
fdoraya6600912016-10-15 06:40:37 +090077 service_thread_.Stop();
Robert Liaoe25acff2018-01-25 07:39:17 +090078 task_tracker_.FlushForTesting();
robliaocf19f742016-06-23 03:36:41 +090079 worker_pool_->WaitForAllWorkersIdleForTesting();
robliao7ac34ba2016-06-23 03:16:25 +090080 worker_pool_->JoinForTesting();
fdoraya2d271b2016-04-15 23:09:08 +090081 }
82
fdoray2890d2f2017-04-08 09:51:58 +090083 void CreateWorkerPool() {
fdoraya6600912016-10-15 06:40:37 +090084 ASSERT_FALSE(worker_pool_);
fdoraya6600912016-10-15 06:40:37 +090085 service_thread_.Start();
fdoray4a475d62017-04-20 22:13:11 +090086 delayed_task_manager_.Start(service_thread_.task_runner());
Jeremy Romancd0c4672017-08-17 08:27:24 +090087 worker_pool_ = std::make_unique<SchedulerWorkerPoolImpl>(
Gabriel Charettee1640632018-01-19 01:56:31 +090088 "TestWorkerPool", "A", ThreadPriority::NORMAL, &task_tracker_,
Jeremy Romancd0c4672017-08-17 08:27:24 +090089 &delayed_task_manager_);
robliao2e951752016-07-23 03:12:18 +090090 ASSERT_TRUE(worker_pool_);
91 }
92
Robert Liao4eaab522017-11-02 05:06:03 +090093 virtual void StartWorkerPool(TimeDelta suggested_reclaim_time,
94 size_t num_workers) {
fdoray2890d2f2017-04-08 09:51:58 +090095 ASSERT_TRUE(worker_pool_);
Jeffrey He997f4492017-07-27 07:44:45 +090096 worker_pool_->Start(
Francois Dorayb9223752017-09-06 03:44:43 +090097 SchedulerWorkerPoolParams(num_workers, suggested_reclaim_time),
Robert Liao4eaab522017-11-02 05:06:03 +090098 service_thread_.task_runner(),
99 SchedulerWorkerPoolImpl::WorkerEnvironment::NONE);
fdoray2890d2f2017-04-08 09:51:58 +0900100 }
101
102 void CreateAndStartWorkerPool(TimeDelta suggested_reclaim_time,
103 size_t num_workers) {
104 CreateWorkerPool();
105 StartWorkerPool(suggested_reclaim_time, num_workers);
106 }
107
robliao7ac34ba2016-06-23 03:16:25 +0900108 std::unique_ptr<SchedulerWorkerPoolImpl> worker_pool_;
fdoraya2d271b2016-04-15 23:09:08 +0900109
Gabriel Charette19075392018-01-19 20:00:53 +0900110 TaskTracker task_tracker_ = {"Test"};
fdoraya6600912016-10-15 06:40:37 +0900111 Thread service_thread_;
fdoraya2d271b2016-04-15 23:09:08 +0900112
fdoray9b0b2332016-04-26 06:34:33 +0900113 private:
fdoray4a475d62017-04-20 22:13:11 +0900114 DelayedTaskManager delayed_task_manager_;
115
Francois Dorayb9223752017-09-06 03:44:43 +0900116 DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolImplTestBase);
117};
118
119class TaskSchedulerWorkerPoolImplTest
120 : public TaskSchedulerWorkerPoolImplTestBase,
121 public testing::Test {
122 protected:
123 TaskSchedulerWorkerPoolImplTest() = default;
124
Robert Liao10d0c8e2017-11-17 10:37:44 +0900125 void SetUp() override { TaskSchedulerWorkerPoolImplTestBase::CommonSetUp(); }
Francois Dorayb9223752017-09-06 03:44:43 +0900126
Robert Liao10d0c8e2017-11-17 10:37:44 +0900127 void TearDown() override {
128 TaskSchedulerWorkerPoolImplTestBase::CommonTearDown();
129 }
Francois Dorayb9223752017-09-06 03:44:43 +0900130
131 private:
robliao7ac34ba2016-06-23 03:16:25 +0900132 DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolImplTest);
fdoraya2d271b2016-04-15 23:09:08 +0900133};
134
Francois Dorayb9223752017-09-06 03:44:43 +0900135class TaskSchedulerWorkerPoolImplTestParam
136 : public TaskSchedulerWorkerPoolImplTestBase,
137 public testing::TestWithParam<test::ExecutionMode> {
138 protected:
139 TaskSchedulerWorkerPoolImplTestParam() = default;
140
Robert Liao10d0c8e2017-11-17 10:37:44 +0900141 void SetUp() override { TaskSchedulerWorkerPoolImplTestBase::CommonSetUp(); }
Francois Dorayb9223752017-09-06 03:44:43 +0900142
Robert Liao10d0c8e2017-11-17 10:37:44 +0900143 void TearDown() override {
144 TaskSchedulerWorkerPoolImplTestBase::CommonTearDown();
145 }
Francois Dorayb9223752017-09-06 03:44:43 +0900146
147 private:
148 DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolImplTestParam);
149};
150
fdoray570633b2016-04-26 01:24:46 +0900151using PostNestedTask = test::TestTaskFactory::PostNestedTask;
fdoraya2d271b2016-04-15 23:09:08 +0900152
Jeffrey He68d29bc2017-08-24 02:14:16 +0900153class ThreadPostingTasksWaitIdle : public SimpleThread {
fdoraya2d271b2016-04-15 23:09:08 +0900154 public:
robliao7ac34ba2016-06-23 03:16:25 +0900155 // Constructs a thread that posts tasks to |worker_pool| through an
Jeffrey He68d29bc2017-08-24 02:14:16 +0900156 // |execution_mode| task runner. The thread waits until all workers in
157 // |worker_pool| are idle before posting a new task.
158 ThreadPostingTasksWaitIdle(SchedulerWorkerPoolImpl* worker_pool,
159 test::ExecutionMode execution_mode)
160 : SimpleThread("ThreadPostingTasksWaitIdle"),
robliao7ac34ba2016-06-23 03:16:25 +0900161 worker_pool_(worker_pool),
fdoray9c56ea32016-11-02 23:35:26 +0900162 factory_(CreateTaskRunnerWithExecutionMode(worker_pool, execution_mode),
fdoray570633b2016-04-26 01:24:46 +0900163 execution_mode) {
robliao7ac34ba2016-06-23 03:16:25 +0900164 DCHECK(worker_pool_);
fdoraya2d271b2016-04-15 23:09:08 +0900165 }
166
fdoray570633b2016-04-26 01:24:46 +0900167 const test::TestTaskFactory* factory() const { return &factory_; }
fdoraya2d271b2016-04-15 23:09:08 +0900168
169 private:
170 void Run() override {
Yeol0d4f3eb2017-07-26 02:09:10 +0900171 EXPECT_FALSE(factory_.task_runner()->RunsTasksInCurrentSequence());
fdoraya2d271b2016-04-15 23:09:08 +0900172
173 for (size_t i = 0; i < kNumTasksPostedPerThread; ++i) {
Jeffrey He68d29bc2017-08-24 02:14:16 +0900174 worker_pool_->WaitForAllWorkersIdleForTesting();
175 EXPECT_TRUE(factory_.PostTask(PostNestedTask::NO, Closure()));
fdoraya2d271b2016-04-15 23:09:08 +0900176 }
177 }
178
robliao7ac34ba2016-06-23 03:16:25 +0900179 SchedulerWorkerPoolImpl* const worker_pool_;
fdoraya2d271b2016-04-15 23:09:08 +0900180 const scoped_refptr<TaskRunner> task_runner_;
fdoray570633b2016-04-26 01:24:46 +0900181 test::TestTaskFactory factory_;
fdoraya2d271b2016-04-15 23:09:08 +0900182
Jeffrey He68d29bc2017-08-24 02:14:16 +0900183 DISALLOW_COPY_AND_ASSIGN(ThreadPostingTasksWaitIdle);
fdoraya2d271b2016-04-15 23:09:08 +0900184};
185
fdoray9b0b2332016-04-26 06:34:33 +0900186} // namespace
187
Francois Dorayb9223752017-09-06 03:44:43 +0900188TEST_P(TaskSchedulerWorkerPoolImplTestParam, PostTasksWaitAllWorkersIdle) {
robliaocf19f742016-06-23 03:36:41 +0900189 // Create threads to post tasks. To verify that workers can sleep and be woken
190 // up when new tasks are posted, wait for all workers to become idle before
191 // posting a new task.
Jeffrey He68d29bc2017-08-24 02:14:16 +0900192 std::vector<std::unique_ptr<ThreadPostingTasksWaitIdle>>
193 threads_posting_tasks;
fdoraya2d271b2016-04-15 23:09:08 +0900194 for (size_t i = 0; i < kNumThreadsPostingTasks; ++i) {
Jeffrey He68d29bc2017-08-24 02:14:16 +0900195 threads_posting_tasks.push_back(
196 MakeUnique<ThreadPostingTasksWaitIdle>(worker_pool_.get(), GetParam()));
fdoraya2d271b2016-04-15 23:09:08 +0900197 threads_posting_tasks.back()->Start();
198 }
199
200 // Wait for all tasks to run.
201 for (const auto& thread_posting_tasks : threads_posting_tasks) {
202 thread_posting_tasks->Join();
203 thread_posting_tasks->factory()->WaitForAllTasksToRun();
fdoraya2d271b2016-04-15 23:09:08 +0900204 }
205
robliaocf19f742016-06-23 03:36:41 +0900206 // Wait until all workers are idle to be sure that no task accesses its
207 // TestTaskFactory after |thread_posting_tasks| is destroyed.
208 worker_pool_->WaitForAllWorkersIdleForTesting();
fdoraya2d271b2016-04-15 23:09:08 +0900209}
210
Francois Dorayb9223752017-09-06 03:44:43 +0900211TEST_P(TaskSchedulerWorkerPoolImplTestParam, PostTasksWithOneAvailableWorker) {
robliaocf19f742016-06-23 03:36:41 +0900212 // Post blocking tasks to keep all workers busy except one until |event| is
fdoraydace22d2016-04-29 04:35:47 +0900213 // signaled. Use different factories so that tasks are added to different
214 // sequences and can run simultaneously when the execution mode is SEQUENCED.
gab56162332016-06-02 06:15:33 +0900215 WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
216 WaitableEvent::InitialState::NOT_SIGNALED);
fdoray570633b2016-04-26 01:24:46 +0900217 std::vector<std::unique_ptr<test::TestTaskFactory>> blocked_task_factories;
robliao7ac34ba2016-06-23 03:16:25 +0900218 for (size_t i = 0; i < (kNumWorkersInWorkerPool - 1); ++i) {
Jeremy Romancd0c4672017-08-17 08:27:24 +0900219 blocked_task_factories.push_back(std::make_unique<test::TestTaskFactory>(
fdoray9c56ea32016-11-02 23:35:26 +0900220 CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam()),
ricead95e71b2016-09-13 13:10:11 +0900221 GetParam()));
fdoraydace22d2016-04-29 04:35:47 +0900222 EXPECT_TRUE(blocked_task_factories.back()->PostTask(
223 PostNestedTask::NO, Bind(&WaitableEvent::Wait, Unretained(&event))));
fdoray82243232016-04-16 08:25:15 +0900224 blocked_task_factories.back()->WaitForAllTasksToRun();
225 }
fdoraya2d271b2016-04-15 23:09:08 +0900226
227 // Post |kNumTasksPostedPerThread| tasks that should all run despite the fact
robliaocf19f742016-06-23 03:36:41 +0900228 // that only one worker in |worker_pool_| isn't busy.
fdoray570633b2016-04-26 01:24:46 +0900229 test::TestTaskFactory short_task_factory(
fdoray9c56ea32016-11-02 23:35:26 +0900230 CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam()),
fdoray570633b2016-04-26 01:24:46 +0900231 GetParam());
fdoraya2d271b2016-04-15 23:09:08 +0900232 for (size_t i = 0; i < kNumTasksPostedPerThread; ++i)
fdoraydace22d2016-04-29 04:35:47 +0900233 EXPECT_TRUE(short_task_factory.PostTask(PostNestedTask::NO, Closure()));
fdoray82243232016-04-16 08:25:15 +0900234 short_task_factory.WaitForAllTasksToRun();
fdoraya2d271b2016-04-15 23:09:08 +0900235
236 // Release tasks waiting on |event|.
237 event.Signal();
238
robliaocf19f742016-06-23 03:36:41 +0900239 // Wait until all workers are idle to be sure that no task accesses
fdoray570633b2016-04-26 01:24:46 +0900240 // its TestTaskFactory after it is destroyed.
robliaocf19f742016-06-23 03:36:41 +0900241 worker_pool_->WaitForAllWorkersIdleForTesting();
fdoraya2d271b2016-04-15 23:09:08 +0900242}
243
Francois Dorayb9223752017-09-06 03:44:43 +0900244TEST_P(TaskSchedulerWorkerPoolImplTestParam, Saturate) {
robliao7ac34ba2016-06-23 03:16:25 +0900245 // Verify that it is possible to have |kNumWorkersInWorkerPool|
fdoraydace22d2016-04-29 04:35:47 +0900246 // tasks/sequences running simultaneously. Use different factories so that the
247 // blocking tasks are added to different sequences and can run simultaneously
248 // when the execution mode is SEQUENCED.
gab56162332016-06-02 06:15:33 +0900249 WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
250 WaitableEvent::InitialState::NOT_SIGNALED);
fdoray570633b2016-04-26 01:24:46 +0900251 std::vector<std::unique_ptr<test::TestTaskFactory>> factories;
robliao7ac34ba2016-06-23 03:16:25 +0900252 for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
Jeremy Romancd0c4672017-08-17 08:27:24 +0900253 factories.push_back(std::make_unique<test::TestTaskFactory>(
fdoray9c56ea32016-11-02 23:35:26 +0900254 CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam()),
ricead95e71b2016-09-13 13:10:11 +0900255 GetParam()));
fdoraydace22d2016-04-29 04:35:47 +0900256 EXPECT_TRUE(factories.back()->PostTask(
257 PostNestedTask::NO, Bind(&WaitableEvent::Wait, Unretained(&event))));
fdoray82243232016-04-16 08:25:15 +0900258 factories.back()->WaitForAllTasksToRun();
259 }
fdoraya2d271b2016-04-15 23:09:08 +0900260
261 // Release tasks waiting on |event|.
262 event.Signal();
263
robliaocf19f742016-06-23 03:36:41 +0900264 // Wait until all workers are idle to be sure that no task accesses
fdoray570633b2016-04-26 01:24:46 +0900265 // its TestTaskFactory after it is destroyed.
robliaocf19f742016-06-23 03:36:41 +0900266 worker_pool_->WaitForAllWorkersIdleForTesting();
fdoraya2d271b2016-04-15 23:09:08 +0900267}
268
Robert Liao4eaab522017-11-02 05:06:03 +0900269#if defined(OS_WIN)
270TEST_P(TaskSchedulerWorkerPoolImplTestParam, NoEnvironment) {
271 // Verify that COM is not initialized in a SchedulerWorkerPoolImpl initialized
272 // with SchedulerWorkerPoolImpl::WorkerEnvironment::NONE.
273 scoped_refptr<TaskRunner> task_runner =
274 CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam());
275
276 WaitableEvent task_running(WaitableEvent::ResetPolicy::MANUAL,
277 WaitableEvent::InitialState::NOT_SIGNALED);
278 task_runner->PostTask(
279 FROM_HERE, BindOnce(
280 [](WaitableEvent* task_running) {
281 win::AssertComApartmentType(win::ComApartmentType::NONE);
282 task_running->Signal();
283 },
284 &task_running));
285
286 task_running.Wait();
287
288 worker_pool_->WaitForAllWorkersIdleForTesting();
289}
290#endif // defined(OS_WIN)
291
fdoray82243232016-04-16 08:25:15 +0900292INSTANTIATE_TEST_CASE_P(Parallel,
Francois Dorayb9223752017-09-06 03:44:43 +0900293 TaskSchedulerWorkerPoolImplTestParam,
fdoray9c56ea32016-11-02 23:35:26 +0900294 ::testing::Values(test::ExecutionMode::PARALLEL));
fdoray82243232016-04-16 08:25:15 +0900295INSTANTIATE_TEST_CASE_P(Sequenced,
Francois Dorayb9223752017-09-06 03:44:43 +0900296 TaskSchedulerWorkerPoolImplTestParam,
fdoray9c56ea32016-11-02 23:35:26 +0900297 ::testing::Values(test::ExecutionMode::SEQUENCED));
robliao2e951752016-07-23 03:12:18 +0900298
Robert Liao4eaab522017-11-02 05:06:03 +0900299#if defined(OS_WIN)
300
301namespace {
302
303class TaskSchedulerWorkerPoolImplTestCOMMTAParam
304 : public TaskSchedulerWorkerPoolImplTestBase,
305 public testing::TestWithParam<test::ExecutionMode> {
306 protected:
307 TaskSchedulerWorkerPoolImplTestCOMMTAParam() = default;
308
Robert Liao10d0c8e2017-11-17 10:37:44 +0900309 void SetUp() override { TaskSchedulerWorkerPoolImplTestBase::CommonSetUp(); }
Robert Liao4eaab522017-11-02 05:06:03 +0900310
Robert Liao10d0c8e2017-11-17 10:37:44 +0900311 void TearDown() override {
312 TaskSchedulerWorkerPoolImplTestBase::CommonTearDown();
313 }
Robert Liao4eaab522017-11-02 05:06:03 +0900314
315 private:
316 void StartWorkerPool(TimeDelta suggested_reclaim_time,
317 size_t num_workers) override {
318 ASSERT_TRUE(worker_pool_);
319 worker_pool_->Start(
320 SchedulerWorkerPoolParams(num_workers, suggested_reclaim_time),
321 service_thread_.task_runner(),
322 SchedulerWorkerPoolImpl::WorkerEnvironment::COM_MTA);
323 }
324
325 DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolImplTestCOMMTAParam);
326};
327
328} // namespace
329
330TEST_P(TaskSchedulerWorkerPoolImplTestCOMMTAParam, COMMTAInitialized) {
331 // Verify that SchedulerWorkerPoolImpl workers have a COM MTA available.
332 scoped_refptr<TaskRunner> task_runner =
333 CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam());
334
335 WaitableEvent task_running(WaitableEvent::ResetPolicy::MANUAL,
336 WaitableEvent::InitialState::NOT_SIGNALED);
337 task_runner->PostTask(
338 FROM_HERE, BindOnce(
339 [](WaitableEvent* task_running) {
340 win::AssertComApartmentType(win::ComApartmentType::MTA);
341 task_running->Signal();
342 },
343 &task_running));
344
345 task_running.Wait();
346
347 worker_pool_->WaitForAllWorkersIdleForTesting();
348}
349
350INSTANTIATE_TEST_CASE_P(Parallel,
351 TaskSchedulerWorkerPoolImplTestCOMMTAParam,
352 ::testing::Values(test::ExecutionMode::PARALLEL));
353INSTANTIATE_TEST_CASE_P(Sequenced,
354 TaskSchedulerWorkerPoolImplTestCOMMTAParam,
355 ::testing::Values(test::ExecutionMode::SEQUENCED));
356
357#endif // defined(OS_WIN)
358
robliao2e951752016-07-23 03:12:18 +0900359namespace {
360
fdoray2890d2f2017-04-08 09:51:58 +0900361class TaskSchedulerWorkerPoolImplPostTaskBeforeStartTest
362 : public TaskSchedulerWorkerPoolImplTest {
363 public:
364 void SetUp() override {
365 CreateWorkerPool();
366 // Let the test start the worker pool.
367 }
368};
369
370void TaskPostedBeforeStart(PlatformThreadRef* platform_thread_ref,
Francois Doray5f643432017-10-12 01:27:12 +0900371 WaitableEvent* task_running,
fdoray2890d2f2017-04-08 09:51:58 +0900372 WaitableEvent* barrier) {
373 *platform_thread_ref = PlatformThread::CurrentRef();
Francois Doray5f643432017-10-12 01:27:12 +0900374 task_running->Signal();
fdoray2890d2f2017-04-08 09:51:58 +0900375 barrier->Wait();
376}
377
378} // namespace
379
380// Verify that 2 tasks posted before Start() to a SchedulerWorkerPoolImpl with
Francois Doray5f643432017-10-12 01:27:12 +0900381// more than 2 workers run on different workers when Start() is called.
fdoray2890d2f2017-04-08 09:51:58 +0900382TEST_F(TaskSchedulerWorkerPoolImplPostTaskBeforeStartTest,
383 PostTasksBeforeStart) {
384 PlatformThreadRef task_1_thread_ref;
385 PlatformThreadRef task_2_thread_ref;
Francois Doray5f643432017-10-12 01:27:12 +0900386 WaitableEvent task_1_running(WaitableEvent::ResetPolicy::MANUAL,
387 WaitableEvent::InitialState::NOT_SIGNALED);
388 WaitableEvent task_2_running(WaitableEvent::ResetPolicy::MANUAL,
389 WaitableEvent::InitialState::NOT_SIGNALED);
fdoray2890d2f2017-04-08 09:51:58 +0900390
391 // This event is used to prevent a task from completing before the other task
Francois Doray5f643432017-10-12 01:27:12 +0900392 // starts running. If that happened, both tasks could run on the same worker
393 // and this test couldn't verify that the correct number of workers were woken
394 // up.
fdoray2890d2f2017-04-08 09:51:58 +0900395 WaitableEvent barrier(WaitableEvent::ResetPolicy::MANUAL,
396 WaitableEvent::InitialState::NOT_SIGNALED);
397
fdorayb7013402017-05-09 13:18:32 +0900398 worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()})
tzik330d83f2017-06-26 15:13:17 +0900399 ->PostTask(
400 FROM_HERE,
401 BindOnce(&TaskPostedBeforeStart, Unretained(&task_1_thread_ref),
Francois Doray5f643432017-10-12 01:27:12 +0900402 Unretained(&task_1_running), Unretained(&barrier)));
fdorayb7013402017-05-09 13:18:32 +0900403 worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()})
tzik330d83f2017-06-26 15:13:17 +0900404 ->PostTask(
405 FROM_HERE,
406 BindOnce(&TaskPostedBeforeStart, Unretained(&task_2_thread_ref),
Francois Doray5f643432017-10-12 01:27:12 +0900407 Unretained(&task_2_running), Unretained(&barrier)));
fdoray2890d2f2017-04-08 09:51:58 +0900408
409 // Workers should not be created and tasks should not run before the pool is
410 // started.
Jeffrey He92b46b42017-08-09 00:05:01 +0900411 EXPECT_EQ(0U, worker_pool_->NumberOfWorkersForTesting());
Francois Doray5f643432017-10-12 01:27:12 +0900412 EXPECT_FALSE(task_1_running.IsSignaled());
413 EXPECT_FALSE(task_2_running.IsSignaled());
fdoray2890d2f2017-04-08 09:51:58 +0900414
415 StartWorkerPool(TimeDelta::Max(), kNumWorkersInWorkerPool);
416
Francois Doray5f643432017-10-12 01:27:12 +0900417 // Tasks should run shortly after the pool is started.
418 task_1_running.Wait();
419 task_2_running.Wait();
fdoray2890d2f2017-04-08 09:51:58 +0900420
Francois Doray5f643432017-10-12 01:27:12 +0900421 // Tasks should run on different threads.
fdoray2890d2f2017-04-08 09:51:58 +0900422 EXPECT_NE(task_1_thread_ref, task_2_thread_ref);
423
424 barrier.Signal();
Robert Liaoe25acff2018-01-25 07:39:17 +0900425 task_tracker_.FlushForTesting();
fdoray2890d2f2017-04-08 09:51:58 +0900426}
427
Jeffrey He2afe9232017-08-11 00:22:35 +0900428// Verify that posting many tasks before Start will cause the number of workers
429// to grow to |worker_capacity_| during Start.
430TEST_F(TaskSchedulerWorkerPoolImplPostTaskBeforeStartTest, PostManyTasks) {
431 scoped_refptr<TaskRunner> task_runner =
432 worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()});
433 constexpr size_t kNumTasksPosted = 2 * kNumWorkersInWorkerPool;
434 for (size_t i = 0; i < kNumTasksPosted; ++i)
Peter Kasting24efe5e2018-02-24 09:03:01 +0900435 task_runner->PostTask(FROM_HERE, DoNothing());
Jeffrey He2afe9232017-08-11 00:22:35 +0900436
437 EXPECT_EQ(0U, worker_pool_->NumberOfWorkersForTesting());
438
439 StartWorkerPool(TimeDelta::Max(), kNumWorkersInWorkerPool);
440 ASSERT_GT(kNumTasksPosted, worker_pool_->GetWorkerCapacityForTesting());
441 EXPECT_EQ(kNumWorkersInWorkerPool,
442 worker_pool_->GetWorkerCapacityForTesting());
443
444 EXPECT_EQ(worker_pool_->NumberOfWorkersForTesting(),
445 worker_pool_->GetWorkerCapacityForTesting());
446}
447
fdoray2890d2f2017-04-08 09:51:58 +0900448namespace {
449
robliao2e951752016-07-23 03:12:18 +0900450constexpr size_t kMagicTlsValue = 42;
451
452class TaskSchedulerWorkerPoolCheckTlsReuse
453 : public TaskSchedulerWorkerPoolImplTest {
454 public:
455 void SetTlsValueAndWait() {
456 slot_.Set(reinterpret_cast<void*>(kMagicTlsValue));
457 waiter_.Wait();
458 }
459
460 void CountZeroTlsValuesAndWait(WaitableEvent* count_waiter) {
461 if (!slot_.Get())
462 subtle::NoBarrier_AtomicIncrement(&zero_tls_values_, 1);
463
464 count_waiter->Signal();
465 waiter_.Wait();
466 }
467
468 protected:
469 TaskSchedulerWorkerPoolCheckTlsReuse() :
470 waiter_(WaitableEvent::ResetPolicy::MANUAL,
471 WaitableEvent::InitialState::NOT_SIGNALED) {}
472
473 void SetUp() override {
Jeffrey He92b46b42017-08-09 00:05:01 +0900474 CreateAndStartWorkerPool(kReclaimTimeForCleanupTests,
fdoray2890d2f2017-04-08 09:51:58 +0900475 kNumWorkersInWorkerPool);
robliao2e951752016-07-23 03:12:18 +0900476 }
477
478 subtle::Atomic32 zero_tls_values_ = 0;
479
480 WaitableEvent waiter_;
481
482 private:
483 ThreadLocalStorage::Slot slot_;
484
485 DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolCheckTlsReuse);
486};
487
488} // namespace
489
Jeffrey He92b46b42017-08-09 00:05:01 +0900490// Checks that at least one worker has been cleaned up by checking the TLS.
491TEST_F(TaskSchedulerWorkerPoolCheckTlsReuse, CheckCleanupWorkers) {
492 // Saturate the workers and mark each worker's thread with a magic TLS value.
robliao2e951752016-07-23 03:12:18 +0900493 std::vector<std::unique_ptr<test::TestTaskFactory>> factories;
494 for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
Jeremy Romancd0c4672017-08-17 08:27:24 +0900495 factories.push_back(std::make_unique<test::TestTaskFactory>(
fdorayb7013402017-05-09 13:18:32 +0900496 worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()}),
fdoray9c56ea32016-11-02 23:35:26 +0900497 test::ExecutionMode::PARALLEL));
robliao2e951752016-07-23 03:12:18 +0900498 ASSERT_TRUE(factories.back()->PostTask(
499 PostNestedTask::NO,
500 Bind(&TaskSchedulerWorkerPoolCheckTlsReuse::SetTlsValueAndWait,
501 Unretained(this))));
502 factories.back()->WaitForAllTasksToRun();
503 }
504
505 // Release tasks waiting on |waiter_|.
506 waiter_.Signal();
507 worker_pool_->WaitForAllWorkersIdleForTesting();
508
Jeffrey He92b46b42017-08-09 00:05:01 +0900509 // All workers should be done running by now, so reset for the next phase.
robliao2e951752016-07-23 03:12:18 +0900510 waiter_.Reset();
511
Jeffrey He92b46b42017-08-09 00:05:01 +0900512 // Give the worker pool a chance to cleanup its workers.
513 PlatformThread::Sleep(kReclaimTimeForCleanupTests +
514 kExtraTimeToWaitForCleanup);
robliao2e951752016-07-23 03:12:18 +0900515
Jeffrey He92b46b42017-08-09 00:05:01 +0900516 worker_pool_->DisallowWorkerCleanupForTesting();
robliao2e951752016-07-23 03:12:18 +0900517
Jeffrey He92b46b42017-08-09 00:05:01 +0900518 // Saturate and count the worker threads that do not have the magic TLS value.
519 // If the value is not there, that means we're at a new worker.
robliao2e951752016-07-23 03:12:18 +0900520 std::vector<std::unique_ptr<WaitableEvent>> count_waiters;
521 for (auto& factory : factories) {
522 count_waiters.push_back(WrapUnique(new WaitableEvent(
523 WaitableEvent::ResetPolicy::MANUAL,
524 WaitableEvent::InitialState::NOT_SIGNALED)));
525 ASSERT_TRUE(factory->PostTask(
526 PostNestedTask::NO,
527 Bind(&TaskSchedulerWorkerPoolCheckTlsReuse::CountZeroTlsValuesAndWait,
528 Unretained(this),
529 count_waiters.back().get())));
530 factory->WaitForAllTasksToRun();
531 }
532
533 // Wait for all counters to complete.
534 for (auto& count_waiter : count_waiters)
535 count_waiter->Wait();
536
537 EXPECT_GT(subtle::NoBarrier_Load(&zero_tls_values_), 0);
538
539 // Release tasks waiting on |waiter_|.
540 waiter_.Signal();
541}
542
fdoray4b836782016-09-28 05:44:25 +0900543namespace {
544
545class TaskSchedulerWorkerPoolHistogramTest
546 : public TaskSchedulerWorkerPoolImplTest {
547 public:
548 TaskSchedulerWorkerPoolHistogramTest() = default;
549
550 protected:
fdoray23df36e2016-10-21 01:25:56 +0900551 // Override SetUp() to allow every test case to initialize a worker pool with
552 // its own arguments.
fdoray4b836782016-09-28 05:44:25 +0900553 void SetUp() override {}
554
fdoray4b836782016-09-28 05:44:25 +0900555 private:
556 std::unique_ptr<StatisticsRecorder> statistics_recorder_ =
557 StatisticsRecorder::CreateTemporaryForTesting();
558
fdoray4b836782016-09-28 05:44:25 +0900559 DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolHistogramTest);
560};
561
562} // namespace
563
564TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBetweenWaits) {
565 WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
566 WaitableEvent::InitialState::NOT_SIGNALED);
fdoray2890d2f2017-04-08 09:51:58 +0900567 CreateAndStartWorkerPool(TimeDelta::Max(), kNumWorkersInWorkerPool);
fdoray26866e22016-11-30 04:45:01 +0900568 auto task_runner = worker_pool_->CreateSequencedTaskRunnerWithTraits(
fdorayb7013402017-05-09 13:18:32 +0900569 {WithBaseSyncPrimitives()});
fdoray4b836782016-09-28 05:44:25 +0900570
571 // Post a task.
572 task_runner->PostTask(FROM_HERE,
tzik6bdbeb22017-04-12 00:00:44 +0900573 BindOnce(&WaitableEvent::Wait, Unretained(&event)));
fdoray4b836782016-09-28 05:44:25 +0900574
575 // Post 2 more tasks while the first task hasn't completed its execution. It
576 // is guaranteed that these tasks will run immediately after the first task,
577 // without allowing the worker to sleep.
Peter Kasting24efe5e2018-02-24 09:03:01 +0900578 task_runner->PostTask(FROM_HERE, DoNothing());
579 task_runner->PostTask(FROM_HERE, DoNothing());
fdoray4b836782016-09-28 05:44:25 +0900580
581 // Allow tasks to run and wait until the SchedulerWorker is idle.
582 event.Signal();
583 worker_pool_->WaitForAllWorkersIdleForTesting();
584
585 // Wake up the SchedulerWorker that just became idle by posting a task and
586 // wait until it becomes idle again. The SchedulerWorker should record the
587 // TaskScheduler.NumTasksBetweenWaits.* histogram on wake up.
Peter Kasting24efe5e2018-02-24 09:03:01 +0900588 task_runner->PostTask(FROM_HERE, DoNothing());
fdoray4b836782016-09-28 05:44:25 +0900589 worker_pool_->WaitForAllWorkersIdleForTesting();
590
591 // Verify that counts were recorded to the histogram as expected.
fdorayfd7279f2016-10-14 10:30:36 +0900592 const auto* histogram = worker_pool_->num_tasks_between_waits_histogram();
593 EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(0));
594 EXPECT_EQ(1, histogram->SnapshotSamples()->GetCount(3));
595 EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(10));
fdoray4b836782016-09-28 05:44:25 +0900596}
597
598namespace {
599
600void SignalAndWaitEvent(WaitableEvent* signal_event,
601 WaitableEvent* wait_event) {
602 signal_event->Signal();
603 wait_event->Wait();
604}
605
606} // namespace
607
Gabriel Charette6aa7d7e2018-02-23 19:25:03 +0900608// Verifies that NumTasksBetweenWaits histogram is logged as expected across
609// idle and cleanup periods.
610TEST_F(TaskSchedulerWorkerPoolHistogramTest,
611 NumTasksBetweenWaitsWithIdlePeriodAndCleanup) {
fdoray4b836782016-09-28 05:44:25 +0900612 WaitableEvent tasks_can_exit_event(WaitableEvent::ResetPolicy::MANUAL,
613 WaitableEvent::InitialState::NOT_SIGNALED);
Jeffrey He92b46b42017-08-09 00:05:01 +0900614 CreateAndStartWorkerPool(kReclaimTimeForCleanupTests,
615 kNumWorkersInWorkerPool);
fdorayb7013402017-05-09 13:18:32 +0900616 auto task_runner =
617 worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()});
fdoray4b836782016-09-28 05:44:25 +0900618
619 // Post tasks to saturate the pool.
620 std::vector<std::unique_ptr<WaitableEvent>> task_started_events;
621 for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
Jeremy Romancd0c4672017-08-17 08:27:24 +0900622 task_started_events.push_back(std::make_unique<WaitableEvent>(
623 WaitableEvent::ResetPolicy::MANUAL,
624 WaitableEvent::InitialState::NOT_SIGNALED));
tzik6bdbeb22017-04-12 00:00:44 +0900625 task_runner->PostTask(FROM_HERE,
626 BindOnce(&SignalAndWaitEvent,
627 Unretained(task_started_events.back().get()),
628 Unretained(&tasks_can_exit_event)));
fdoray4b836782016-09-28 05:44:25 +0900629 }
630 for (const auto& task_started_event : task_started_events)
631 task_started_event->Wait();
632
633 // Allow tasks to complete their execution and wait to allow workers to
Gabriel Charette6aa7d7e2018-02-23 19:25:03 +0900634 // cleanup (at least one of them will not cleanup to keep the idle thread
635 // count above zero).
fdoray4b836782016-09-28 05:44:25 +0900636 tasks_can_exit_event.Signal();
637 worker_pool_->WaitForAllWorkersIdleForTesting();
Jeffrey He92b46b42017-08-09 00:05:01 +0900638 PlatformThread::Sleep(kReclaimTimeForCleanupTests +
639 kExtraTimeToWaitForCleanup);
fdoray4b836782016-09-28 05:44:25 +0900640
641 // Wake up SchedulerWorkers by posting tasks. They should record the
642 // TaskScheduler.NumTasksBetweenWaits.* histogram on wake up.
643 tasks_can_exit_event.Reset();
644 task_started_events.clear();
645 for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
Jeremy Romancd0c4672017-08-17 08:27:24 +0900646 task_started_events.push_back(std::make_unique<WaitableEvent>(
647 WaitableEvent::ResetPolicy::MANUAL,
648 WaitableEvent::InitialState::NOT_SIGNALED));
tzik6bdbeb22017-04-12 00:00:44 +0900649 task_runner->PostTask(FROM_HERE,
650 BindOnce(&SignalAndWaitEvent,
651 Unretained(task_started_events.back().get()),
652 Unretained(&tasks_can_exit_event)));
fdoray4b836782016-09-28 05:44:25 +0900653 }
654 for (const auto& task_started_event : task_started_events)
655 task_started_event->Wait();
656
fdorayfd7279f2016-10-14 10:30:36 +0900657 const auto* histogram = worker_pool_->num_tasks_between_waits_histogram();
658
fdoray4b836782016-09-28 05:44:25 +0900659 // Verify that counts were recorded to the histogram as expected.
Gabriel Charette6aa7d7e2018-02-23 19:25:03 +0900660 // - The "0" bucket does not have a report because we do not report this
661 // histogram when threads get no work twice in a row and cleanup (or go idle
662 // if last on idle stack).
663 EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(0));
fdoray4b836782016-09-28 05:44:25 +0900664 // - The "1" bucket has a count of |kNumWorkersInWorkerPool| because each
665 // SchedulerWorker ran a task before waiting on its WaitableEvent at the
Gabriel Charette6aa7d7e2018-02-23 19:25:03 +0900666 // beginning of the test (counted the same whether resurrecting post-cleanup
667 // or waking from idle).
fdoray4b836782016-09-28 05:44:25 +0900668 EXPECT_EQ(static_cast<int>(kNumWorkersInWorkerPool),
fdorayfd7279f2016-10-14 10:30:36 +0900669 histogram->SnapshotSamples()->GetCount(1));
670 EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(10));
fdoray4b836782016-09-28 05:44:25 +0900671
672 tasks_can_exit_event.Signal();
673 worker_pool_->WaitForAllWorkersIdleForTesting();
Jeffrey He92b46b42017-08-09 00:05:01 +0900674 worker_pool_->DisallowWorkerCleanupForTesting();
fdoray4b836782016-09-28 05:44:25 +0900675}
676
Jeffrey He92b46b42017-08-09 00:05:01 +0900677TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBeforeCleanup) {
678 CreateAndStartWorkerPool(kReclaimTimeForCleanupTests,
679 kNumWorkersInWorkerPool);
robliao989772a2017-02-27 11:41:56 +0900680
681 auto histogrammed_thread_task_runner =
682 worker_pool_->CreateSequencedTaskRunnerWithTraits(
fdorayb7013402017-05-09 13:18:32 +0900683 {WithBaseSyncPrimitives()});
robliao989772a2017-02-27 11:41:56 +0900684
685 // Post 3 tasks and hold the thread for idle thread stack ordering.
686 // This test assumes |histogrammed_thread_task_runner| gets assigned the same
687 // thread for each of its tasks.
688 PlatformThreadRef thread_ref;
689 histogrammed_thread_task_runner->PostTask(
tzik6bdbeb22017-04-12 00:00:44 +0900690 FROM_HERE, BindOnce(
robliao989772a2017-02-27 11:41:56 +0900691 [](PlatformThreadRef* thread_ref) {
692 ASSERT_TRUE(thread_ref);
693 *thread_ref = PlatformThread::CurrentRef();
694 },
695 Unretained(&thread_ref)));
696 histogrammed_thread_task_runner->PostTask(
tzik6bdbeb22017-04-12 00:00:44 +0900697 FROM_HERE, BindOnce(
robliao79bfa452017-03-14 09:30:45 +0900698 [](PlatformThreadRef* thread_ref) {
699 ASSERT_FALSE(thread_ref->is_null());
700 EXPECT_EQ(*thread_ref, PlatformThread::CurrentRef());
robliao989772a2017-02-27 11:41:56 +0900701 },
robliao79bfa452017-03-14 09:30:45 +0900702 Unretained(&thread_ref)));
703
Jeffrey He92b46b42017-08-09 00:05:01 +0900704 WaitableEvent cleanup_thread_running(
robliao989772a2017-02-27 11:41:56 +0900705 WaitableEvent::ResetPolicy::MANUAL,
706 WaitableEvent::InitialState::NOT_SIGNALED);
Jeffrey He92b46b42017-08-09 00:05:01 +0900707 WaitableEvent cleanup_thread_continue(
robliao989772a2017-02-27 11:41:56 +0900708 WaitableEvent::ResetPolicy::MANUAL,
709 WaitableEvent::InitialState::NOT_SIGNALED);
710 histogrammed_thread_task_runner->PostTask(
711 FROM_HERE,
tzik6bdbeb22017-04-12 00:00:44 +0900712 BindOnce(
robliao79bfa452017-03-14 09:30:45 +0900713 [](PlatformThreadRef* thread_ref,
Jeffrey He92b46b42017-08-09 00:05:01 +0900714 WaitableEvent* cleanup_thread_running,
715 WaitableEvent* cleanup_thread_continue) {
robliao79bfa452017-03-14 09:30:45 +0900716 ASSERT_FALSE(thread_ref->is_null());
717 EXPECT_EQ(*thread_ref, PlatformThread::CurrentRef());
Jeffrey He92b46b42017-08-09 00:05:01 +0900718 cleanup_thread_running->Signal();
719 cleanup_thread_continue->Wait();
robliao989772a2017-02-27 11:41:56 +0900720 },
Jeffrey He92b46b42017-08-09 00:05:01 +0900721 Unretained(&thread_ref), Unretained(&cleanup_thread_running),
722 Unretained(&cleanup_thread_continue)));
robliao989772a2017-02-27 11:41:56 +0900723
Jeffrey He92b46b42017-08-09 00:05:01 +0900724 cleanup_thread_running.Wait();
robliao989772a2017-02-27 11:41:56 +0900725
726 // To allow the SchedulerWorker associated with
Jeffrey He92b46b42017-08-09 00:05:01 +0900727 // |histogrammed_thread_task_runner| to cleanup, make sure it isn't on top of
robliao989772a2017-02-27 11:41:56 +0900728 // the idle stack by waking up another SchedulerWorker via
729 // |task_runner_for_top_idle|. |histogrammed_thread_task_runner| should
730 // release and go idle first and then |task_runner_for_top_idle| should
731 // release and go idle. This allows the SchedulerWorker associated with
Jeffrey He92b46b42017-08-09 00:05:01 +0900732 // |histogrammed_thread_task_runner| to cleanup.
robliao989772a2017-02-27 11:41:56 +0900733 WaitableEvent top_idle_thread_running(
734 WaitableEvent::ResetPolicy::MANUAL,
735 WaitableEvent::InitialState::NOT_SIGNALED);
736 WaitableEvent top_idle_thread_continue(
737 WaitableEvent::ResetPolicy::MANUAL,
738 WaitableEvent::InitialState::NOT_SIGNALED);
739 auto task_runner_for_top_idle =
740 worker_pool_->CreateSequencedTaskRunnerWithTraits(
fdorayb7013402017-05-09 13:18:32 +0900741 {WithBaseSyncPrimitives()});
robliao989772a2017-02-27 11:41:56 +0900742 task_runner_for_top_idle->PostTask(
tzik6bdbeb22017-04-12 00:00:44 +0900743 FROM_HERE, BindOnce(
robliao989772a2017-02-27 11:41:56 +0900744 [](PlatformThreadRef thread_ref,
745 WaitableEvent* top_idle_thread_running,
746 WaitableEvent* top_idle_thread_continue) {
robliao79bfa452017-03-14 09:30:45 +0900747 ASSERT_FALSE(thread_ref.is_null());
robliao989772a2017-02-27 11:41:56 +0900748 EXPECT_NE(thread_ref, PlatformThread::CurrentRef())
Jeffrey He92b46b42017-08-09 00:05:01 +0900749 << "Worker reused. Worker will not cleanup and the "
robliao989772a2017-02-27 11:41:56 +0900750 "histogram value will be wrong.";
751 top_idle_thread_running->Signal();
752 top_idle_thread_continue->Wait();
753 },
754 thread_ref, Unretained(&top_idle_thread_running),
755 Unretained(&top_idle_thread_continue)));
756 top_idle_thread_running.Wait();
Jeffrey He92b46b42017-08-09 00:05:01 +0900757 cleanup_thread_continue.Signal();
robliao989772a2017-02-27 11:41:56 +0900758 // Wait for the thread processing the |histogrammed_thread_task_runner| work
759 // to go to the idle stack.
760 PlatformThread::Sleep(TestTimeouts::tiny_timeout());
761 top_idle_thread_continue.Signal();
762 // Allow the thread processing the |histogrammed_thread_task_runner| work to
Jeffrey He92b46b42017-08-09 00:05:01 +0900763 // cleanup.
764 PlatformThread::Sleep(kReclaimTimeForCleanupTests +
765 kReclaimTimeForCleanupTests);
robliao989772a2017-02-27 11:41:56 +0900766 worker_pool_->WaitForAllWorkersIdleForTesting();
Jeffrey He92b46b42017-08-09 00:05:01 +0900767 worker_pool_->DisallowWorkerCleanupForTesting();
robliao989772a2017-02-27 11:41:56 +0900768
769 // Verify that counts were recorded to the histogram as expected.
770 const auto* histogram = worker_pool_->num_tasks_before_detach_histogram();
Jeffrey He92b46b42017-08-09 00:05:01 +0900771 // Note: There'll be a thread that cleanups after running no tasks. This
Jeffrey He9a7fff22017-07-28 01:11:10 +0900772 // thread was the one created to maintain an idle thread after posting the
773 // task via |task_runner_for_top_idle|.
774 EXPECT_EQ(1, histogram->SnapshotSamples()->GetCount(0));
robliao989772a2017-02-27 11:41:56 +0900775 EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(1));
776 EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(2));
777 EXPECT_EQ(1, histogram->SnapshotSamples()->GetCount(3));
778 EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(4));
779 EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(5));
780 EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(6));
781 EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(10));
782}
783
robliao8ff674c2016-11-18 03:33:32 +0900784TEST(TaskSchedulerWorkerPoolStandbyPolicyTest, InitOne) {
Gabriel Charette19075392018-01-19 20:00:53 +0900785 TaskTracker task_tracker("Test");
fdoray4a475d62017-04-20 22:13:11 +0900786 DelayedTaskManager delayed_task_manager;
Francois Dorayb9223752017-09-06 03:44:43 +0900787 scoped_refptr<TaskRunner> service_thread_task_runner =
788 MakeRefCounted<TestSimpleTaskRunner>();
789 delayed_task_manager.Start(service_thread_task_runner);
Jeremy Romancd0c4672017-08-17 08:27:24 +0900790 auto worker_pool = std::make_unique<SchedulerWorkerPoolImpl>(
Gabriel Charettee1640632018-01-19 01:56:31 +0900791 "OnePolicyWorkerPool", "A", ThreadPriority::NORMAL, &task_tracker,
robliao8ff674c2016-11-18 03:33:32 +0900792 &delayed_task_manager);
Francois Dorayb9223752017-09-06 03:44:43 +0900793 worker_pool->Start(SchedulerWorkerPoolParams(8U, TimeDelta::Max()),
Robert Liao4eaab522017-11-02 05:06:03 +0900794 service_thread_task_runner,
795 SchedulerWorkerPoolImpl::WorkerEnvironment::NONE);
robliao8ff674c2016-11-18 03:33:32 +0900796 ASSERT_TRUE(worker_pool);
Jeffrey He92b46b42017-08-09 00:05:01 +0900797 EXPECT_EQ(1U, worker_pool->NumberOfWorkersForTesting());
robliao8ff674c2016-11-18 03:33:32 +0900798 worker_pool->JoinForTesting();
799}
800
Jeffrey He9a7fff22017-07-28 01:11:10 +0900801// Verify the SchedulerWorkerPoolImpl keeps at least one idle standby thread,
802// capacity permitting.
803TEST(TaskSchedulerWorkerPoolStandbyPolicyTest, VerifyStandbyThread) {
804 constexpr size_t worker_capacity = 3;
805
Gabriel Charette19075392018-01-19 20:00:53 +0900806 TaskTracker task_tracker("Test");
Jeffrey He9a7fff22017-07-28 01:11:10 +0900807 DelayedTaskManager delayed_task_manager;
Francois Dorayb9223752017-09-06 03:44:43 +0900808 scoped_refptr<TaskRunner> service_thread_task_runner =
809 MakeRefCounted<TestSimpleTaskRunner>();
810 delayed_task_manager.Start(service_thread_task_runner);
Jeremy Romancd0c4672017-08-17 08:27:24 +0900811 auto worker_pool = std::make_unique<SchedulerWorkerPoolImpl>(
Gabriel Charettee1640632018-01-19 01:56:31 +0900812 "StandbyThreadWorkerPool", "A", ThreadPriority::NORMAL, &task_tracker,
Jeffrey He9a7fff22017-07-28 01:11:10 +0900813 &delayed_task_manager);
814 worker_pool->Start(
Francois Dorayb9223752017-09-06 03:44:43 +0900815 SchedulerWorkerPoolParams(worker_capacity, kReclaimTimeForCleanupTests),
Robert Liao4eaab522017-11-02 05:06:03 +0900816 service_thread_task_runner,
817 SchedulerWorkerPoolImpl::WorkerEnvironment::NONE);
Jeffrey He9a7fff22017-07-28 01:11:10 +0900818 ASSERT_TRUE(worker_pool);
Jeffrey He92b46b42017-08-09 00:05:01 +0900819 EXPECT_EQ(1U, worker_pool->NumberOfWorkersForTesting());
Jeffrey He9a7fff22017-07-28 01:11:10 +0900820
821 auto task_runner =
822 worker_pool->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()});
823
824 WaitableEvent thread_running(WaitableEvent::ResetPolicy::AUTOMATIC,
825 WaitableEvent::InitialState::NOT_SIGNALED);
826 WaitableEvent thread_continue(WaitableEvent::ResetPolicy::MANUAL,
827 WaitableEvent::InitialState::NOT_SIGNALED);
828
829 RepeatingClosure closure = BindRepeating(
830 [](WaitableEvent* thread_running, WaitableEvent* thread_continue) {
831 thread_running->Signal();
832 thread_continue->Wait();
833 },
834 Unretained(&thread_running), Unretained(&thread_continue));
835
836 // There should be one idle thread until we reach worker capacity
837 for (size_t i = 0; i < worker_capacity; ++i) {
Jeffrey He92b46b42017-08-09 00:05:01 +0900838 EXPECT_EQ(i + 1, worker_pool->NumberOfWorkersForTesting());
Jeffrey He9a7fff22017-07-28 01:11:10 +0900839 task_runner->PostTask(FROM_HERE, closure);
840 thread_running.Wait();
841 }
842
843 // There should not be an extra idle thread if it means going above capacity
Jeffrey He92b46b42017-08-09 00:05:01 +0900844 EXPECT_EQ(worker_capacity, worker_pool->NumberOfWorkersForTesting());
Jeffrey He9a7fff22017-07-28 01:11:10 +0900845
846 thread_continue.Signal();
Jeffrey He92b46b42017-08-09 00:05:01 +0900847 // Give time for a worker to cleanup. Verify that the pool attempts to keep
848 // one idle active worker.
849 PlatformThread::Sleep(kReclaimTimeForCleanupTests +
850 kExtraTimeToWaitForCleanup);
851 EXPECT_EQ(1U, worker_pool->NumberOfWorkersForTesting());
Jeffrey He9a7fff22017-07-28 01:11:10 +0900852
Jeffrey He92b46b42017-08-09 00:05:01 +0900853 worker_pool->DisallowWorkerCleanupForTesting();
Jeffrey He9a7fff22017-07-28 01:11:10 +0900854 worker_pool->JoinForTesting();
855}
856
Francois Doray7c49b872017-09-12 02:27:50 +0900857namespace {
858
859enum class OptionalBlockingType {
860 NO_BLOCK,
861 MAY_BLOCK,
862 WILL_BLOCK,
863};
864
865struct NestedBlockingType {
866 NestedBlockingType(BlockingType first_in,
867 OptionalBlockingType second_in,
868 BlockingType behaves_as_in)
869 : first(first_in), second(second_in), behaves_as(behaves_as_in) {}
870
871 BlockingType first;
872 OptionalBlockingType second;
873 BlockingType behaves_as;
874};
875
876class NestedScopedBlockingCall {
Jeffrey Heb23ff4c2017-08-23 07:32:49 +0900877 public:
Francois Doray7c49b872017-09-12 02:27:50 +0900878 NestedScopedBlockingCall(const NestedBlockingType& nested_blocking_type)
879 : first_scoped_blocking_call_(nested_blocking_type.first),
880 second_scoped_blocking_call_(
881 nested_blocking_type.second == OptionalBlockingType::WILL_BLOCK
882 ? std::make_unique<ScopedBlockingCall>(BlockingType::WILL_BLOCK)
883 : (nested_blocking_type.second ==
884 OptionalBlockingType::MAY_BLOCK
885 ? std::make_unique<ScopedBlockingCall>(
886 BlockingType::MAY_BLOCK)
887 : nullptr)) {}
888
889 private:
890 ScopedBlockingCall first_scoped_blocking_call_;
891 std::unique_ptr<ScopedBlockingCall> second_scoped_blocking_call_;
892
893 DISALLOW_COPY_AND_ASSIGN(NestedScopedBlockingCall);
894};
895
896} // namespace
897
898class TaskSchedulerWorkerPoolBlockingTest
899 : public TaskSchedulerWorkerPoolImplTestBase,
900 public testing::TestWithParam<NestedBlockingType> {
901 public:
902 TaskSchedulerWorkerPoolBlockingTest()
Francois Dorayb9223752017-09-06 03:44:43 +0900903 : blocking_thread_running_(WaitableEvent::ResetPolicy::AUTOMATIC,
Jeffrey Heb23ff4c2017-08-23 07:32:49 +0900904 WaitableEvent::InitialState::NOT_SIGNALED),
905 blocking_thread_continue_(WaitableEvent::ResetPolicy::MANUAL,
906 WaitableEvent::InitialState::NOT_SIGNALED) {}
907
Francois Doray7c49b872017-09-12 02:27:50 +0900908 static std::string ParamInfoToString(
909 ::testing::TestParamInfo<NestedBlockingType> param_info) {
910 std::string str = param_info.param.first == BlockingType::MAY_BLOCK
911 ? "MAY_BLOCK"
912 : "WILL_BLOCK";
913 if (param_info.param.second == OptionalBlockingType::MAY_BLOCK)
914 str += "_MAY_BLOCK";
915 else if (param_info.param.second == OptionalBlockingType::WILL_BLOCK)
916 str += "_WILL_BLOCK";
917 return str;
918 }
919
Jeffrey Heb23ff4c2017-08-23 07:32:49 +0900920 void SetUp() override {
Robert Liao10d0c8e2017-11-17 10:37:44 +0900921 TaskSchedulerWorkerPoolImplTestBase::CommonSetUp();
Jeffrey Heb23ff4c2017-08-23 07:32:49 +0900922 task_runner_ =
923 worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()});
924 }
925
Robert Liao10d0c8e2017-11-17 10:37:44 +0900926 void TearDown() override {
927 TaskSchedulerWorkerPoolImplTestBase::CommonTearDown();
928 }
Francois Dorayb9223752017-09-06 03:44:43 +0900929
Jeffrey Heb23ff4c2017-08-23 07:32:49 +0900930 protected:
931 // Saturates the worker pool with a task that first blocks, waits to be
932 // unblocked, then exits.
Francois Doray7c49b872017-09-12 02:27:50 +0900933 void SaturateWithBlockingTasks(
934 const NestedBlockingType& nested_blocking_type) {
Jeffrey Heb23ff4c2017-08-23 07:32:49 +0900935 RepeatingClosure blocking_thread_running_closure =
936 BarrierClosure(kNumWorkersInWorkerPool,
937 BindOnce(&WaitableEvent::Signal,
938 Unretained(&blocking_thread_running_)));
Francois Dorayb9223752017-09-06 03:44:43 +0900939
Jeffrey Heb23ff4c2017-08-23 07:32:49 +0900940 for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
941 task_runner_->PostTask(
942 FROM_HERE,
943 BindOnce(
944 [](Closure* blocking_thread_running_closure,
Francois Dorayb9223752017-09-06 03:44:43 +0900945 WaitableEvent* blocking_thread_continue_,
Francois Doray7c49b872017-09-12 02:27:50 +0900946 const NestedBlockingType& nested_blocking_type) {
947 NestedScopedBlockingCall nested_scoped_blocking_call(
948 nested_blocking_type);
Jeffrey Heb23ff4c2017-08-23 07:32:49 +0900949 blocking_thread_running_closure->Run();
Francois Doraye058b722017-09-12 04:53:29 +0900950
951 {
952 // Use ScopedClearBlockingObserverForTesting to avoid
953 // affecting the worker capacity with this WaitableEvent.
954 internal::ScopedClearBlockingObserverForTesting
955 scoped_clear_blocking_observer;
956 blocking_thread_continue_->Wait();
957 }
Jeffrey Heb23ff4c2017-08-23 07:32:49 +0900958
959 },
960 Unretained(&blocking_thread_running_closure),
Francois Doray7c49b872017-09-12 02:27:50 +0900961 Unretained(&blocking_thread_continue_), nested_blocking_type));
Jeffrey Heb23ff4c2017-08-23 07:32:49 +0900962 }
963 blocking_thread_running_.Wait();
964 }
965
Francois Dorayb9223752017-09-06 03:44:43 +0900966 // Returns how long we can expect a change to |worker_capacity_| to occur
967 // after a task has become blocked.
968 TimeDelta GetWorkerCapacityChangeSleepTime() {
969 return std::max(SchedulerWorkerPoolImpl::kBlockedWorkersPollPeriod,
970 worker_pool_->MayBlockThreshold()) +
971 TestTimeouts::tiny_timeout();
972 }
973
Wez9a709382017-12-07 02:16:22 +0900974 // Waits indefinitely, until |worker_pool_|'s worker capacity increases to
975 // |expected_worker_capacity|.
976 void ExpectWorkerCapacityIncreasesTo(size_t expected_worker_capacity) {
977 size_t capacity = worker_pool_->GetWorkerCapacityForTesting();
978 while (capacity != expected_worker_capacity) {
Francois Dorayb9223752017-09-06 03:44:43 +0900979 PlatformThread::Sleep(GetWorkerCapacityChangeSleepTime());
Wez9a709382017-12-07 02:16:22 +0900980 size_t new_capacity = worker_pool_->GetWorkerCapacityForTesting();
981 ASSERT_GE(new_capacity, capacity);
982 capacity = new_capacity;
Francois Dorayb9223752017-09-06 03:44:43 +0900983 }
Francois Dorayb9223752017-09-06 03:44:43 +0900984 }
985
Jeffrey Heb23ff4c2017-08-23 07:32:49 +0900986 // Unblocks tasks posted by SaturateWithBlockingTasks().
987 void UnblockTasks() { blocking_thread_continue_.Signal(); }
988
989 scoped_refptr<TaskRunner> task_runner_;
990
991 private:
992 WaitableEvent blocking_thread_running_;
993 WaitableEvent blocking_thread_continue_;
994
Francois Doray7c49b872017-09-12 02:27:50 +0900995 DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolBlockingTest);
Jeffrey Heb23ff4c2017-08-23 07:32:49 +0900996};
997
998// Verify that BlockingScopeEntered() causes worker capacity to increase and
999// creates a worker if needed. Also verify that BlockingScopeExited() decreases
1000// worker capacity after an increase.
Francois Doray7c49b872017-09-12 02:27:50 +09001001TEST_P(TaskSchedulerWorkerPoolBlockingTest, ThreadBlockedUnblocked) {
Jeffrey Heb23ff4c2017-08-23 07:32:49 +09001002 ASSERT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
1003 kNumWorkersInWorkerPool);
1004
Francois Dorayb9223752017-09-06 03:44:43 +09001005 SaturateWithBlockingTasks(GetParam());
Francois Doray7c49b872017-09-12 02:27:50 +09001006 if (GetParam().behaves_as == BlockingType::MAY_BLOCK)
Wez9a709382017-12-07 02:16:22 +09001007 ExpectWorkerCapacityIncreasesTo(2 * kNumWorkersInWorkerPool);
Jeffrey Heb23ff4c2017-08-23 07:32:49 +09001008 // A range of possible number of workers is accepted because of
1009 // crbug.com/757897.
1010 EXPECT_GE(worker_pool_->NumberOfWorkersForTesting(),
1011 kNumWorkersInWorkerPool + 1);
1012 EXPECT_LE(worker_pool_->NumberOfWorkersForTesting(),
1013 2 * kNumWorkersInWorkerPool);
1014 EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
1015 2 * kNumWorkersInWorkerPool);
1016
1017 UnblockTasks();
Robert Liaoe25acff2018-01-25 07:39:17 +09001018 task_tracker_.FlushForTesting();
Jeffrey Heb23ff4c2017-08-23 07:32:49 +09001019 EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
1020 kNumWorkersInWorkerPool);
1021}
1022
1023// Verify that tasks posted in a saturated pool before a ScopedBlockingCall will
1024// execute after ScopedBlockingCall is instantiated.
Francois Doray7c49b872017-09-12 02:27:50 +09001025TEST_P(TaskSchedulerWorkerPoolBlockingTest, PostBeforeBlocking) {
Jeffrey Heb23ff4c2017-08-23 07:32:49 +09001026 WaitableEvent thread_running(WaitableEvent::ResetPolicy::AUTOMATIC,
1027 WaitableEvent::InitialState::NOT_SIGNALED);
1028 WaitableEvent thread_can_block(WaitableEvent::ResetPolicy::MANUAL,
1029 WaitableEvent::InitialState::NOT_SIGNALED);
1030 WaitableEvent thread_continue(WaitableEvent::ResetPolicy::MANUAL,
1031 WaitableEvent::InitialState::NOT_SIGNALED);
1032
1033 for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
1034 task_runner_->PostTask(
1035 FROM_HERE,
1036 BindOnce(
Francois Doray7c49b872017-09-12 02:27:50 +09001037 [](const NestedBlockingType& nested_blocking_type,
1038 WaitableEvent* thread_running, WaitableEvent* thread_can_block,
Jeffrey Heb23ff4c2017-08-23 07:32:49 +09001039 WaitableEvent* thread_continue) {
1040 thread_running->Signal();
Francois Doraye058b722017-09-12 04:53:29 +09001041 {
1042 // Use ScopedClearBlockingObserverForTesting to avoid affecting
1043 // the worker capacity with this WaitableEvent.
1044 internal::ScopedClearBlockingObserverForTesting
1045 scoped_clear_blocking_observer;
1046 thread_can_block->Wait();
1047 }
1048
Francois Doray7c49b872017-09-12 02:27:50 +09001049 NestedScopedBlockingCall nested_scoped_blocking_call(
1050 nested_blocking_type);
Francois Doraye058b722017-09-12 04:53:29 +09001051
1052 {
1053 // Use ScopedClearBlockingObserverForTesting to avoid affecting
1054 // the worker capacity with this WaitableEvent.
1055 internal::ScopedClearBlockingObserverForTesting
1056 scoped_clear_blocking_observer;
1057 thread_continue->Wait();
1058 }
Jeffrey Heb23ff4c2017-08-23 07:32:49 +09001059 },
Francois Dorayb9223752017-09-06 03:44:43 +09001060 GetParam(), Unretained(&thread_running),
1061 Unretained(&thread_can_block), Unretained(&thread_continue)));
Jeffrey Heb23ff4c2017-08-23 07:32:49 +09001062 thread_running.Wait();
1063 }
1064
1065 // All workers should be occupied and the pool should be saturated. Workers
1066 // have not entered ScopedBlockingCall yet.
1067 EXPECT_EQ(worker_pool_->NumberOfWorkersForTesting(), kNumWorkersInWorkerPool);
1068 EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
1069 kNumWorkersInWorkerPool);
1070
1071 WaitableEvent extra_thread_running(WaitableEvent::ResetPolicy::MANUAL,
1072 WaitableEvent::InitialState::NOT_SIGNALED);
1073 WaitableEvent extra_threads_continue(
1074 WaitableEvent::ResetPolicy::MANUAL,
1075 WaitableEvent::InitialState::NOT_SIGNALED);
1076 RepeatingClosure extra_threads_running_barrier = BarrierClosure(
1077 kNumWorkersInWorkerPool,
1078 BindOnce(&WaitableEvent::Signal, Unretained(&extra_thread_running)));
1079 for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
1080 task_runner_->PostTask(FROM_HERE,
1081 BindOnce(
1082 [](Closure* extra_threads_running_barrier,
1083 WaitableEvent* extra_threads_continue) {
1084 extra_threads_running_barrier->Run();
Francois Doraye058b722017-09-12 04:53:29 +09001085 {
1086 // Use ScopedClearBlockingObserverForTesting
1087 // to avoid affecting the worker capacity
1088 // with this WaitableEvent.
1089 internal::
1090 ScopedClearBlockingObserverForTesting
1091 scoped_clear_blocking_observer;
1092 extra_threads_continue->Wait();
1093 }
Jeffrey Heb23ff4c2017-08-23 07:32:49 +09001094 },
1095 Unretained(&extra_threads_running_barrier),
1096 Unretained(&extra_threads_continue)));
1097 }
1098
1099 // Allow tasks to enter ScopedBlockingCall. Workers should be created for the
1100 // tasks we just posted.
1101 thread_can_block.Signal();
Francois Doray7c49b872017-09-12 02:27:50 +09001102 if (GetParam().behaves_as == BlockingType::MAY_BLOCK)
Wez9a709382017-12-07 02:16:22 +09001103 ExpectWorkerCapacityIncreasesTo(2 * kNumWorkersInWorkerPool);
Jeffrey Heb23ff4c2017-08-23 07:32:49 +09001104
1105 // Should not block forever.
1106 extra_thread_running.Wait();
1107 EXPECT_EQ(worker_pool_->NumberOfWorkersForTesting(),
1108 2 * kNumWorkersInWorkerPool);
1109 extra_threads_continue.Signal();
1110
1111 thread_continue.Signal();
Robert Liaoe25acff2018-01-25 07:39:17 +09001112 task_tracker_.FlushForTesting();
Jeffrey Heb23ff4c2017-08-23 07:32:49 +09001113}
1114// Verify that workers become idle when the pool is over-capacity and that
1115// those workers do no work.
Francois Doray7c49b872017-09-12 02:27:50 +09001116TEST_P(TaskSchedulerWorkerPoolBlockingTest, WorkersIdleWhenOverCapacity) {
Jeffrey Heb23ff4c2017-08-23 07:32:49 +09001117 ASSERT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
1118 kNumWorkersInWorkerPool);
1119
Francois Dorayb9223752017-09-06 03:44:43 +09001120 SaturateWithBlockingTasks(GetParam());
Francois Doray7c49b872017-09-12 02:27:50 +09001121 if (GetParam().behaves_as == BlockingType::MAY_BLOCK)
Wez9a709382017-12-07 02:16:22 +09001122 ExpectWorkerCapacityIncreasesTo(2 * kNumWorkersInWorkerPool);
Jeffrey Heb23ff4c2017-08-23 07:32:49 +09001123 EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
1124 2 * kNumWorkersInWorkerPool);
1125 // A range of possible number of workers is accepted because of
1126 // crbug.com/757897.
1127 EXPECT_GE(worker_pool_->NumberOfWorkersForTesting(),
1128 kNumWorkersInWorkerPool + 1);
1129 EXPECT_LE(worker_pool_->NumberOfWorkersForTesting(),
1130 2 * kNumWorkersInWorkerPool);
1131
1132 WaitableEvent thread_running(WaitableEvent::ResetPolicy::AUTOMATIC,
1133 WaitableEvent::InitialState::NOT_SIGNALED);
1134 WaitableEvent thread_continue(WaitableEvent::ResetPolicy::MANUAL,
1135 WaitableEvent::InitialState::NOT_SIGNALED);
1136
1137 RepeatingClosure thread_running_barrier = BarrierClosure(
1138 kNumWorkersInWorkerPool,
1139 BindOnce(&WaitableEvent::Signal, Unretained(&thread_running)));
1140 // Posting these tasks should cause new workers to be created.
1141 for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
Francois Doraye058b722017-09-12 04:53:29 +09001142 auto callback = BindOnce(
1143 [](Closure* thread_running_barrier, WaitableEvent* thread_continue) {
1144 thread_running_barrier->Run();
1145 {
1146 // Use ScopedClearBlockingObserver ForTesting to avoid affecting the
1147 // worker capacity with this WaitableEvent.
1148 internal::ScopedClearBlockingObserverForTesting
1149 scoped_clear_blocking_observer;
1150 thread_continue->Wait();
1151 }
1152 },
1153 Unretained(&thread_running_barrier), Unretained(&thread_continue));
1154 task_runner_->PostTask(FROM_HERE, std::move(callback));
Jeffrey Heb23ff4c2017-08-23 07:32:49 +09001155 }
1156 thread_running.Wait();
1157
1158 ASSERT_EQ(worker_pool_->NumberOfIdleWorkersForTesting(), 0U);
1159 EXPECT_EQ(worker_pool_->NumberOfWorkersForTesting(),
1160 2 * kNumWorkersInWorkerPool);
1161
1162 AtomicFlag is_exiting;
1163 // These tasks should not get executed until after other tasks become
1164 // unblocked.
1165 for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
1166 task_runner_->PostTask(FROM_HERE, BindOnce(
1167 [](AtomicFlag* is_exiting) {
1168 EXPECT_TRUE(is_exiting->IsSet());
1169 },
1170 Unretained(&is_exiting)));
1171 }
1172
1173 // The original |kNumWorkersInWorkerPool| will finish their tasks after being
1174 // unblocked. There will be work in the work queue, but the pool should now
1175 // be over-capacity and workers will become idle.
1176 UnblockTasks();
1177 worker_pool_->WaitForWorkersIdleForTesting(kNumWorkersInWorkerPool);
1178 EXPECT_EQ(worker_pool_->NumberOfIdleWorkersForTesting(),
1179 kNumWorkersInWorkerPool);
1180
1181 // Posting more tasks should not cause workers idle from the pool being over
1182 // capacity to begin doing work.
1183 for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
1184 task_runner_->PostTask(FROM_HERE, BindOnce(
1185 [](AtomicFlag* is_exiting) {
1186 EXPECT_TRUE(is_exiting->IsSet());
1187 },
1188 Unretained(&is_exiting)));
1189 }
1190
1191 // Give time for those idle workers to possibly do work (which should not
1192 // happen).
1193 PlatformThread::Sleep(TestTimeouts::tiny_timeout());
1194
1195 is_exiting.Set();
1196 // Unblocks the new workers.
1197 thread_continue.Signal();
Robert Liaoe25acff2018-01-25 07:39:17 +09001198 task_tracker_.FlushForTesting();
Jeffrey Heb23ff4c2017-08-23 07:32:49 +09001199}
1200
Francois Doray7c49b872017-09-12 02:27:50 +09001201INSTANTIATE_TEST_CASE_P(
1202 ,
1203 TaskSchedulerWorkerPoolBlockingTest,
1204 ::testing::Values(NestedBlockingType(BlockingType::MAY_BLOCK,
1205 OptionalBlockingType::NO_BLOCK,
1206 BlockingType::MAY_BLOCK),
1207 NestedBlockingType(BlockingType::WILL_BLOCK,
1208 OptionalBlockingType::NO_BLOCK,
1209 BlockingType::WILL_BLOCK),
1210 NestedBlockingType(BlockingType::MAY_BLOCK,
1211 OptionalBlockingType::WILL_BLOCK,
1212 BlockingType::WILL_BLOCK),
1213 NestedBlockingType(BlockingType::WILL_BLOCK,
1214 OptionalBlockingType::MAY_BLOCK,
1215 BlockingType::WILL_BLOCK)),
1216 TaskSchedulerWorkerPoolBlockingTest::ParamInfoToString);
Francois Dorayb9223752017-09-06 03:44:43 +09001217
Francois Doray7c49b872017-09-12 02:27:50 +09001218// Verify that if a thread enters the scope of a MAY_BLOCK ScopedBlockingCall,
1219// but exits the scope before the MayBlockThreshold() is reached, that the
1220// worker capacity does not increase.
1221TEST_F(TaskSchedulerWorkerPoolBlockingTest, ThreadBlockUnblockPremature) {
Francois Dorayb9223752017-09-06 03:44:43 +09001222 ASSERT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
1223 kNumWorkersInWorkerPool);
1224
1225 TimeDelta worker_capacity_change_sleep = GetWorkerCapacityChangeSleepTime();
1226 worker_pool_->MaximizeMayBlockThresholdForTesting();
1227
Francois Doray7c49b872017-09-12 02:27:50 +09001228 SaturateWithBlockingTasks(NestedBlockingType(BlockingType::MAY_BLOCK,
1229 OptionalBlockingType::NO_BLOCK,
1230 BlockingType::MAY_BLOCK));
Francois Dorayb9223752017-09-06 03:44:43 +09001231 PlatformThread::Sleep(worker_capacity_change_sleep);
1232 EXPECT_EQ(worker_pool_->NumberOfWorkersForTesting(), kNumWorkersInWorkerPool);
1233 EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
1234 kNumWorkersInWorkerPool);
1235
1236 UnblockTasks();
Robert Liaoe25acff2018-01-25 07:39:17 +09001237 task_tracker_.FlushForTesting();
Francois Dorayb9223752017-09-06 03:44:43 +09001238 EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
1239 kNumWorkersInWorkerPool);
1240}
1241
Francois Doray7c49b872017-09-12 02:27:50 +09001242// Verify that if worker capacity is incremented because of a MAY_BLOCK
1243// ScopedBlockingCall, it isn't incremented again when there is a nested
1244// WILL_BLOCK ScopedBlockingCall.
1245TEST_F(TaskSchedulerWorkerPoolBlockingTest,
1246 MayBlockIncreaseCapacityNestedWillBlock) {
1247 ASSERT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
1248 kNumWorkersInWorkerPool);
1249 auto task_runner =
1250 worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()});
1251 WaitableEvent can_return(WaitableEvent::ResetPolicy::MANUAL,
1252 WaitableEvent::InitialState::NOT_SIGNALED);
1253
1254 // Saturate the pool so that a MAY_BLOCK ScopedBlockingCall would increment
1255 // the worker capacity.
1256 for (size_t i = 0; i < kNumWorkersInWorkerPool - 1; ++i) {
Francois Doraye058b722017-09-12 04:53:29 +09001257 task_runner->PostTask(FROM_HERE,
1258 BindOnce(
1259 [](WaitableEvent* can_return) {
1260 // Use ScopedClearBlockingObserverForTesting to
1261 // avoid affecting the worker capacity with this
1262 // WaitableEvent.
1263 internal::ScopedClearBlockingObserverForTesting
1264 scoped_clear_blocking_observer;
1265 can_return->Wait();
1266 },
1267 Unretained(&can_return)));
Francois Doray7c49b872017-09-12 02:27:50 +09001268 }
1269
1270 WaitableEvent can_instantiate_will_block(
1271 WaitableEvent::ResetPolicy::MANUAL,
1272 WaitableEvent::InitialState::NOT_SIGNALED);
1273 WaitableEvent did_instantiate_will_block(
1274 WaitableEvent::ResetPolicy::MANUAL,
1275 WaitableEvent::InitialState::NOT_SIGNALED);
1276
1277 // Post a task that instantiates a MAY_BLOCK ScopedBlockingCall.
1278 task_runner->PostTask(
1279 FROM_HERE,
1280 BindOnce(
1281 [](WaitableEvent* can_instantiate_will_block,
1282 WaitableEvent* did_instantiate_will_block,
1283 WaitableEvent* can_return) {
1284 ScopedBlockingCall may_block(BlockingType::MAY_BLOCK);
Francois Doraye058b722017-09-12 04:53:29 +09001285 {
1286 // Use ScopedClearBlockingObserverForTesting to avoid affecting
1287 // the worker capacity with this WaitableEvent.
1288 internal::ScopedClearBlockingObserverForTesting
1289 scoped_clear_blocking_observer;
1290 can_instantiate_will_block->Wait();
1291 }
Francois Doray7c49b872017-09-12 02:27:50 +09001292 ScopedBlockingCall will_block(BlockingType::WILL_BLOCK);
1293 did_instantiate_will_block->Signal();
Francois Doraye058b722017-09-12 04:53:29 +09001294 {
1295 // Use ScopedClearBlockingObserverForTesting to avoid affecting
1296 // the worker capacity with this WaitableEvent.
1297 internal::ScopedClearBlockingObserverForTesting
1298 scoped_clear_blocking_observer;
1299 can_return->Wait();
1300 }
Francois Doray7c49b872017-09-12 02:27:50 +09001301 },
1302 Unretained(&can_instantiate_will_block),
1303 Unretained(&did_instantiate_will_block), Unretained(&can_return)));
1304
1305 // After a short delay, worker capacity should be incremented.
Wez9a709382017-12-07 02:16:22 +09001306 ExpectWorkerCapacityIncreasesTo(kNumWorkersInWorkerPool + 1);
Francois Doray7c49b872017-09-12 02:27:50 +09001307
1308 // Wait until the task instantiates a WILL_BLOCK ScopedBlockingCall.
1309 can_instantiate_will_block.Signal();
1310 did_instantiate_will_block.Wait();
1311
1312 // Worker capacity shouldn't be incremented again.
1313 EXPECT_EQ(kNumWorkersInWorkerPool + 1,
1314 worker_pool_->GetWorkerCapacityForTesting());
1315
1316 // Tear down.
1317 can_return.Signal();
Robert Liaoe25acff2018-01-25 07:39:17 +09001318 task_tracker_.FlushForTesting();
Francois Doray7c49b872017-09-12 02:27:50 +09001319 EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
1320 kNumWorkersInWorkerPool);
1321}
1322
Jeffrey Heb23ff4c2017-08-23 07:32:49 +09001323// Verify that workers that become idle due to the pool being over capacity will
1324// eventually cleanup.
1325TEST(TaskSchedulerWorkerPoolOverWorkerCapacityTest, VerifyCleanup) {
1326 constexpr size_t kWorkerCapacity = 3;
1327
Gabriel Charette19075392018-01-19 20:00:53 +09001328 TaskTracker task_tracker("Test");
Jeffrey Heb23ff4c2017-08-23 07:32:49 +09001329 DelayedTaskManager delayed_task_manager;
Francois Dorayb9223752017-09-06 03:44:43 +09001330 scoped_refptr<TaskRunner> service_thread_task_runner =
1331 MakeRefCounted<TestSimpleTaskRunner>();
1332 delayed_task_manager.Start(service_thread_task_runner);
Gabriel Charettee1640632018-01-19 01:56:31 +09001333 SchedulerWorkerPoolImpl worker_pool("OverWorkerCapacityTestWorkerPool", "A",
Jeffrey Heb23ff4c2017-08-23 07:32:49 +09001334 ThreadPriority::NORMAL, &task_tracker,
1335 &delayed_task_manager);
1336 worker_pool.Start(
Francois Dorayb9223752017-09-06 03:44:43 +09001337 SchedulerWorkerPoolParams(kWorkerCapacity, kReclaimTimeForCleanupTests),
Robert Liao4eaab522017-11-02 05:06:03 +09001338 service_thread_task_runner,
1339 SchedulerWorkerPoolImpl::WorkerEnvironment::NONE);
Jeffrey Heb23ff4c2017-08-23 07:32:49 +09001340
1341 scoped_refptr<TaskRunner> task_runner =
1342 worker_pool.CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()});
1343
1344 WaitableEvent thread_running(WaitableEvent::ResetPolicy::AUTOMATIC,
1345 WaitableEvent::InitialState::NOT_SIGNALED);
1346 WaitableEvent thread_continue(WaitableEvent::ResetPolicy::MANUAL,
1347 WaitableEvent::InitialState::NOT_SIGNALED);
1348 RepeatingClosure thread_running_barrier = BarrierClosure(
1349 kWorkerCapacity,
1350 BindOnce(&WaitableEvent::Signal, Unretained(&thread_running)));
1351
1352 WaitableEvent blocked_call_continue(
1353 WaitableEvent::ResetPolicy::MANUAL,
1354 WaitableEvent::InitialState::NOT_SIGNALED);
1355
1356 RepeatingClosure closure = BindRepeating(
1357 [](Closure* thread_running_barrier, WaitableEvent* thread_continue,
1358 WaitableEvent* blocked_call_continue) {
1359 thread_running_barrier->Run();
1360 {
1361 ScopedBlockingCall scoped_blocking_call(BlockingType::WILL_BLOCK);
1362 blocked_call_continue->Wait();
1363 }
1364 thread_continue->Wait();
1365
1366 },
1367 Unretained(&thread_running_barrier), Unretained(&thread_continue),
1368 Unretained(&blocked_call_continue));
1369
1370 for (size_t i = 0; i < kWorkerCapacity; ++i)
1371 task_runner->PostTask(FROM_HERE, closure);
1372
1373 thread_running.Wait();
1374
1375 WaitableEvent extra_threads_running(
1376 WaitableEvent::ResetPolicy::AUTOMATIC,
1377 WaitableEvent::InitialState::NOT_SIGNALED);
1378 WaitableEvent extra_threads_continue(
1379 WaitableEvent::ResetPolicy::MANUAL,
1380 WaitableEvent::InitialState::NOT_SIGNALED);
1381
1382 RepeatingClosure extra_threads_running_barrier = BarrierClosure(
1383 kWorkerCapacity,
1384 BindOnce(&WaitableEvent::Signal, Unretained(&extra_threads_running)));
1385 // These tasks should run on the new threads from increasing worker capacity.
1386 for (size_t i = 0; i < kWorkerCapacity; ++i) {
1387 task_runner->PostTask(FROM_HERE,
1388 BindOnce(
1389 [](Closure* extra_threads_running_barrier,
1390 WaitableEvent* extra_threads_continue) {
1391 extra_threads_running_barrier->Run();
1392 extra_threads_continue->Wait();
1393 },
1394 Unretained(&extra_threads_running_barrier),
1395 Unretained(&extra_threads_continue)));
1396 }
1397 extra_threads_running.Wait();
1398
1399 ASSERT_EQ(kWorkerCapacity * 2, worker_pool.NumberOfWorkersForTesting());
1400 EXPECT_EQ(kWorkerCapacity * 2, worker_pool.GetWorkerCapacityForTesting());
1401 blocked_call_continue.Signal();
1402 extra_threads_continue.Signal();
1403
1404 TimeTicks before_cleanup_start = TimeTicks::Now();
1405 while (TimeTicks::Now() - before_cleanup_start <
1406 kReclaimTimeForCleanupTests + kExtraTimeToWaitForCleanup) {
1407 if (worker_pool.NumberOfWorkersForTesting() <= kWorkerCapacity + 1)
1408 break;
1409
1410 // Periodically post tasks to ensure that posting tasks does not prevent
1411 // workers that are idle due to the pool being over capacity from cleaning
1412 // up.
Peter Kasting24efe5e2018-02-24 09:03:01 +09001413 task_runner->PostTask(FROM_HERE, DoNothing());
Jeffrey Heb23ff4c2017-08-23 07:32:49 +09001414 PlatformThread::Sleep(kReclaimTimeForCleanupTests / 2);
1415 }
1416 // Note: one worker above capacity will not get cleaned up since it's on the
1417 // top of the idle stack.
1418 EXPECT_EQ(kWorkerCapacity + 1, worker_pool.NumberOfWorkersForTesting());
1419
1420 thread_continue.Signal();
1421
1422 worker_pool.DisallowWorkerCleanupForTesting();
1423 worker_pool.JoinForTesting();
1424}
1425
Jeffrey He44264742017-08-27 11:38:05 +09001426// Verify that the maximum number of workers is 256 and that hitting the max
1427// leaves the pool in a valid state with regards to worker capacity.
Francois Doray7c49b872017-09-12 02:27:50 +09001428TEST_F(TaskSchedulerWorkerPoolBlockingTest, MaximumWorkersTest) {
Jeffrey He44264742017-08-27 11:38:05 +09001429 constexpr size_t kMaxNumberOfWorkers = 256;
1430 constexpr size_t kNumExtraTasks = 10;
1431
1432 WaitableEvent early_blocking_thread_running(
1433 WaitableEvent::ResetPolicy::MANUAL,
1434 WaitableEvent::InitialState::NOT_SIGNALED);
1435 RepeatingClosure early_threads_barrier_closure =
1436 BarrierClosure(kMaxNumberOfWorkers,
1437 BindOnce(&WaitableEvent::Signal,
1438 Unretained(&early_blocking_thread_running)));
1439
1440 WaitableEvent early_threads_finished(
1441 WaitableEvent::ResetPolicy::MANUAL,
1442 WaitableEvent::InitialState::NOT_SIGNALED);
1443 RepeatingClosure early_threads_finished_barrier = BarrierClosure(
1444 kMaxNumberOfWorkers,
1445 BindOnce(&WaitableEvent::Signal, Unretained(&early_threads_finished)));
1446
1447 WaitableEvent early_release_thread_continue(
1448 WaitableEvent::ResetPolicy::MANUAL,
1449 WaitableEvent::InitialState::NOT_SIGNALED);
1450
1451 // Post ScopedBlockingCall tasks to hit the worker cap.
1452 for (size_t i = 0; i < kMaxNumberOfWorkers; ++i) {
1453 task_runner_->PostTask(FROM_HERE,
1454 BindOnce(
1455 [](Closure* early_threads_barrier_closure,
1456 WaitableEvent* early_release_thread_continue,
1457 Closure* early_threads_finished) {
1458 {
1459 ScopedBlockingCall scoped_blocking_call(
1460 BlockingType::WILL_BLOCK);
1461 early_threads_barrier_closure->Run();
1462 early_release_thread_continue->Wait();
1463 }
1464 early_threads_finished->Run();
1465 },
1466 Unretained(&early_threads_barrier_closure),
1467 Unretained(&early_release_thread_continue),
1468 Unretained(&early_threads_finished_barrier)));
1469 }
1470
1471 early_blocking_thread_running.Wait();
1472 EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
1473 kNumWorkersInWorkerPool + kMaxNumberOfWorkers);
1474
1475 WaitableEvent late_release_thread_contine(
1476 WaitableEvent::ResetPolicy::MANUAL,
1477 WaitableEvent::InitialState::NOT_SIGNALED);
1478
1479 WaitableEvent late_blocking_thread_running(
1480 WaitableEvent::ResetPolicy::MANUAL,
1481 WaitableEvent::InitialState::NOT_SIGNALED);
1482 RepeatingClosure late_threads_barrier_closure = BarrierClosure(
1483 kNumExtraTasks, BindOnce(&WaitableEvent::Signal,
1484 Unretained(&late_blocking_thread_running)));
1485
1486 // Posts additional tasks. Note: we should already have |kMaxNumberOfWorkers|
1487 // tasks running. These tasks should not be able to get executed yet as
1488 // the pool is already at its max worker cap.
1489 for (size_t i = 0; i < kNumExtraTasks; ++i) {
1490 task_runner_->PostTask(
1491 FROM_HERE,
1492 BindOnce(
1493 [](Closure* late_threads_barrier_closure,
1494 WaitableEvent* late_release_thread_contine) {
1495 ScopedBlockingCall scoped_blocking_call(BlockingType::WILL_BLOCK);
1496 late_threads_barrier_closure->Run();
1497 late_release_thread_contine->Wait();
1498 },
1499 Unretained(&late_threads_barrier_closure),
1500 Unretained(&late_release_thread_contine)));
1501 }
1502
1503 // Give time to see if we exceed the max number of workers.
1504 PlatformThread::Sleep(TestTimeouts::tiny_timeout());
1505 EXPECT_LE(worker_pool_->NumberOfWorkersForTesting(), kMaxNumberOfWorkers);
1506
1507 early_release_thread_continue.Signal();
1508 early_threads_finished.Wait();
1509 late_blocking_thread_running.Wait();
1510
1511 WaitableEvent final_tasks_running(WaitableEvent::ResetPolicy::MANUAL,
1512 WaitableEvent::InitialState::NOT_SIGNALED);
1513 WaitableEvent final_tasks_continue(WaitableEvent::ResetPolicy::MANUAL,
1514 WaitableEvent::InitialState::NOT_SIGNALED);
1515 RepeatingClosure final_tasks_running_barrier = BarrierClosure(
1516 kNumWorkersInWorkerPool,
1517 BindOnce(&WaitableEvent::Signal, Unretained(&final_tasks_running)));
1518
1519 // Verify that we are still able to saturate the pool.
1520 for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
1521 task_runner_->PostTask(
1522 FROM_HERE,
1523 BindOnce(
1524 [](Closure* closure, WaitableEvent* final_tasks_continue) {
1525 closure->Run();
1526 final_tasks_continue->Wait();
1527 },
1528 Unretained(&final_tasks_running_barrier),
1529 Unretained(&final_tasks_continue)));
1530 }
1531 final_tasks_running.Wait();
1532 EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
1533 kNumWorkersInWorkerPool + kNumExtraTasks);
1534 late_release_thread_contine.Signal();
1535 final_tasks_continue.Signal();
Robert Liaoe25acff2018-01-25 07:39:17 +09001536 task_tracker_.FlushForTesting();
Jeffrey He44264742017-08-27 11:38:05 +09001537}
1538
fdoraya2d271b2016-04-15 23:09:08 +09001539} // namespace internal
1540} // namespace base