blob: 0fb2e813e0372f93300ca9c75be27a07b9267f8d [file] [log] [blame]
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001/*
2 * Copyright 2004 The WebRTC Project Authors. All rights reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020011#include "rtc_base/thread.h"
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000012
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000013#if defined(WEBRTC_WIN)
14#include <comdef.h>
15#elif defined(WEBRTC_POSIX)
16#include <time.h>
Tommi51492422017-12-04 15:18:23 +010017#else
18#error "Either WEBRTC_WIN or WEBRTC_POSIX needs to be defined."
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000019#endif
20
Artem Titov80d02ad2018-05-21 12:20:39 +020021#if defined(WEBRTC_WIN)
22// Disable warning that we don't care about:
23// warning C4722: destructor never returns, potential memory leak
24#pragma warning(disable : 4722)
25#endif
26
Yves Gerey988cc082018-10-23 12:03:01 +020027#include <stdio.h>
Jonas Olssona4d87372019-07-05 19:08:33 +020028
Yves Gerey988cc082018-10-23 12:03:01 +020029#include <utility>
Yves Gerey2e00abc2018-10-05 15:39:24 +020030
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +010031#include "absl/algorithm/container.h"
32#include "rtc_base/atomic_ops.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020033#include "rtc_base/checks.h"
Steve Anton10542f22019-01-11 09:11:00 -080034#include "rtc_base/critical_section.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020035#include "rtc_base/logging.h"
Steve Anton10542f22019-01-11 09:11:00 -080036#include "rtc_base/null_socket_server.h"
Sebastian Janssonda7267a2020-03-03 10:48:05 +010037#include "rtc_base/task_utils/to_queued_task.h"
Steve Anton10542f22019-01-11 09:11:00 -080038#include "rtc_base/time_utils.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020039#include "rtc_base/trace_event.h"
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000040
Kári Tristan Helgason62b13452018-10-12 12:57:49 +020041#if defined(WEBRTC_MAC)
42#include "rtc_base/system/cocoa_threading.h"
Yves Gerey988cc082018-10-23 12:03:01 +020043
Kári Tristan Helgason62b13452018-10-12 12:57:49 +020044/*
45 * These are forward-declarations for methods that are part of the
46 * ObjC runtime. They are declared in the private header objc-internal.h.
47 * These calls are what clang inserts when using @autoreleasepool in ObjC,
48 * but here they are used directly in order to keep this file C++.
49 * https://clang.llvm.org/docs/AutomaticReferenceCounting.html#runtime-support
50 */
51extern "C" {
52void* objc_autoreleasePoolPush(void);
53void objc_autoreleasePoolPop(void* pool);
54}
55
56namespace {
57class ScopedAutoReleasePool {
58 public:
59 ScopedAutoReleasePool() : pool_(objc_autoreleasePoolPush()) {}
60 ~ScopedAutoReleasePool() { objc_autoreleasePoolPop(pool_); }
61
62 private:
63 void* const pool_;
64};
65} // namespace
66#endif
67
henrike@webrtc.orgf0488722014-05-13 18:00:26 +000068namespace rtc {
Steve Antonbcc1a762019-12-11 11:21:53 -080069namespace {
70
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +010071const int kSlowDispatchLoggingThreshold = 50; // 50 ms
72
Steve Antonbcc1a762019-12-11 11:21:53 -080073class MessageHandlerWithTask final : public MessageHandler {
74 public:
75 MessageHandlerWithTask() = default;
76
77 void OnMessage(Message* msg) override {
78 static_cast<rtc_thread_internal::MessageLikeTask*>(msg->pdata)->Run();
79 delete msg->pdata;
80 }
81
82 private:
83 ~MessageHandlerWithTask() override {}
84
85 RTC_DISALLOW_COPY_AND_ASSIGN(MessageHandlerWithTask);
86};
87
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +010088class RTC_SCOPED_LOCKABLE MarkProcessingCritScope {
89 public:
90 MarkProcessingCritScope(const CriticalSection* cs, size_t* processing)
91 RTC_EXCLUSIVE_LOCK_FUNCTION(cs)
92 : cs_(cs), processing_(processing) {
93 cs_->Enter();
94 *processing_ += 1;
95 }
96
97 ~MarkProcessingCritScope() RTC_UNLOCK_FUNCTION() {
98 *processing_ -= 1;
99 cs_->Leave();
100 }
101
102 private:
103 const CriticalSection* const cs_;
104 size_t* processing_;
105
106 RTC_DISALLOW_COPY_AND_ASSIGN(MarkProcessingCritScope);
107};
108
Steve Antonbcc1a762019-12-11 11:21:53 -0800109} // namespace
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000110
111ThreadManager* ThreadManager::Instance() {
Niels Möller14682a32018-05-24 08:54:25 +0200112 static ThreadManager* const thread_manager = new ThreadManager();
113 return thread_manager;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000114}
115
nisse7866cfe2017-04-26 01:45:31 -0700116ThreadManager::~ThreadManager() {
117 // By above RTC_DEFINE_STATIC_LOCAL.
118 RTC_NOTREACHED() << "ThreadManager should never be destructed.";
119}
120
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000121// static
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100122void ThreadManager::Add(Thread* message_queue) {
123 return Instance()->AddInternal(message_queue);
124}
125void ThreadManager::AddInternal(Thread* message_queue) {
126 CritScope cs(&crit_);
127 // Prevent changes while the list of message queues is processed.
128 RTC_DCHECK_EQ(processing_, 0);
129 message_queues_.push_back(message_queue);
130}
131
132// static
133void ThreadManager::Remove(Thread* message_queue) {
134 return Instance()->RemoveInternal(message_queue);
135}
136void ThreadManager::RemoveInternal(Thread* message_queue) {
137 {
138 CritScope cs(&crit_);
139 // Prevent changes while the list of message queues is processed.
140 RTC_DCHECK_EQ(processing_, 0);
141 std::vector<Thread*>::iterator iter;
142 iter = absl::c_find(message_queues_, message_queue);
143 if (iter != message_queues_.end()) {
144 message_queues_.erase(iter);
145 }
Sebastian Janssonda7267a2020-03-03 10:48:05 +0100146#if RTC_DCHECK_IS_ON
147 RemoveFromSendGraph(message_queue);
148#endif
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100149 }
150}
151
Sebastian Janssonda7267a2020-03-03 10:48:05 +0100152#if RTC_DCHECK_IS_ON
153void ThreadManager::RemoveFromSendGraph(Thread* thread) {
154 for (auto it = send_graph_.begin(); it != send_graph_.end();) {
155 if (it->first == thread) {
156 it = send_graph_.erase(it);
157 } else {
158 it->second.erase(thread);
159 ++it;
160 }
161 }
162}
163
164void ThreadManager::RegisterSendAndCheckForCycles(Thread* source,
165 Thread* target) {
166 CritScope cs(&crit_);
167 std::deque<Thread*> all_targets({target});
168 // We check the pre-existing who-sends-to-who graph for any path from target
169 // to source. This loop is guaranteed to terminate because per the send graph
170 // invariant, there are no cycles in the graph.
171 for (auto it = all_targets.begin(); it != all_targets.end(); ++it) {
172 const auto& targets = send_graph_[*it];
173 all_targets.insert(all_targets.end(), targets.begin(), targets.end());
174 }
175 RTC_CHECK_EQ(absl::c_count(all_targets, source), 0)
176 << " send loop between " << source->name() << " and " << target->name();
177
178 // We may now insert source -> target without creating a cycle, since there
179 // was no path from target to source per the prior CHECK.
180 send_graph_[source].insert(target);
181}
182#endif
183
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100184// static
185void ThreadManager::Clear(MessageHandler* handler) {
186 return Instance()->ClearInternal(handler);
187}
188void ThreadManager::ClearInternal(MessageHandler* handler) {
189 // Deleted objects may cause re-entrant calls to ClearInternal. This is
190 // allowed as the list of message queues does not change while queues are
191 // cleared.
192 MarkProcessingCritScope cs(&crit_, &processing_);
193 for (Thread* queue : message_queues_) {
194 queue->Clear(handler);
195 }
196}
197
198// static
199void ThreadManager::ProcessAllMessageQueuesForTesting() {
200 return Instance()->ProcessAllMessageQueuesInternal();
201}
202
203void ThreadManager::ProcessAllMessageQueuesInternal() {
204 // This works by posting a delayed message at the current time and waiting
205 // for it to be dispatched on all queues, which will ensure that all messages
206 // that came before it were also dispatched.
207 volatile int queues_not_done = 0;
208
209 // This class is used so that whether the posted message is processed, or the
210 // message queue is simply cleared, queues_not_done gets decremented.
211 class ScopedIncrement : public MessageData {
212 public:
213 ScopedIncrement(volatile int* value) : value_(value) {
214 AtomicOps::Increment(value_);
215 }
216 ~ScopedIncrement() override { AtomicOps::Decrement(value_); }
217
218 private:
219 volatile int* value_;
220 };
221
222 {
223 MarkProcessingCritScope cs(&crit_, &processing_);
224 for (Thread* queue : message_queues_) {
225 if (!queue->IsProcessingMessagesForTesting()) {
226 // If the queue is not processing messages, it can
227 // be ignored. If we tried to post a message to it, it would be dropped
228 // or ignored.
229 continue;
230 }
231 queue->PostDelayed(RTC_FROM_HERE, 0, nullptr, MQID_DISPOSE,
232 new ScopedIncrement(&queues_not_done));
233 }
234 }
235
236 rtc::Thread* current = rtc::Thread::Current();
237 // Note: One of the message queues may have been on this thread, which is
238 // why we can't synchronously wait for queues_not_done to go to 0; we need
239 // to process messages as well.
240 while (AtomicOps::AcquireLoad(&queues_not_done) > 0) {
241 if (current) {
242 current->ProcessMessages(0);
243 }
244 }
245}
246
247// static
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000248Thread* Thread::Current() {
nisse7866cfe2017-04-26 01:45:31 -0700249 ThreadManager* manager = ThreadManager::Instance();
250 Thread* thread = manager->CurrentThread();
251
Niels Moller9d1840c2019-05-21 07:26:37 +0000252#ifndef NO_MAIN_THREAD_WRAPPING
253 // Only autowrap the thread which instantiated the ThreadManager.
254 if (!thread && manager->IsMainThread()) {
255 thread = new Thread(SocketServer::CreateDefault());
256 thread->WrapCurrentWithThreadManager(manager, true);
257 }
258#endif
259
nisse7866cfe2017-04-26 01:45:31 -0700260 return thread;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000261}
262
263#if defined(WEBRTC_POSIX)
Niels Moller9d1840c2019-05-21 07:26:37 +0000264ThreadManager::ThreadManager() : main_thread_ref_(CurrentThreadRef()) {
Kári Tristan Helgason62b13452018-10-12 12:57:49 +0200265#if defined(WEBRTC_MAC)
266 InitCocoaMultiThreading();
267#endif
deadbeef37f5ecf2017-02-27 14:06:41 -0800268 pthread_key_create(&key_, nullptr);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000269}
270
Yves Gerey665174f2018-06-19 15:03:05 +0200271Thread* ThreadManager::CurrentThread() {
272 return static_cast<Thread*>(pthread_getspecific(key_));
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000273}
274
Sebastian Jansson178a6852020-01-14 11:12:26 +0100275void ThreadManager::SetCurrentThreadInternal(Thread* thread) {
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000276 pthread_setspecific(key_, thread);
277}
278#endif
279
280#if defined(WEBRTC_WIN)
Niels Moller9d1840c2019-05-21 07:26:37 +0000281ThreadManager::ThreadManager()
282 : key_(TlsAlloc()), main_thread_ref_(CurrentThreadRef()) {}
Yves Gerey665174f2018-06-19 15:03:05 +0200283
284Thread* ThreadManager::CurrentThread() {
285 return static_cast<Thread*>(TlsGetValue(key_));
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000286}
287
Sebastian Jansson178a6852020-01-14 11:12:26 +0100288void ThreadManager::SetCurrentThreadInternal(Thread* thread) {
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000289 TlsSetValue(key_, thread);
290}
291#endif
292
Sebastian Jansson178a6852020-01-14 11:12:26 +0100293void ThreadManager::SetCurrentThread(Thread* thread) {
294#if RTC_DLOG_IS_ON
295 if (CurrentThread() && thread) {
296 RTC_DLOG(LS_ERROR) << "SetCurrentThread: Overwriting an existing value?";
297 }
298#endif // RTC_DLOG_IS_ON
299 SetCurrentThreadInternal(thread);
300}
301
302void rtc::ThreadManager::ChangeCurrentThreadForTest(rtc::Thread* thread) {
303 SetCurrentThreadInternal(thread);
304}
305
Yves Gerey665174f2018-06-19 15:03:05 +0200306Thread* ThreadManager::WrapCurrentThread() {
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000307 Thread* result = CurrentThread();
deadbeef37f5ecf2017-02-27 14:06:41 -0800308 if (nullptr == result) {
tommie7251592017-07-14 14:44:46 -0700309 result = new Thread(SocketServer::CreateDefault());
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +0000310 result->WrapCurrentWithThreadManager(this, true);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000311 }
312 return result;
313}
314
315void ThreadManager::UnwrapCurrentThread() {
316 Thread* t = CurrentThread();
317 if (t && !(t->IsOwned())) {
318 t->UnwrapCurrent();
319 delete t;
320 }
321}
322
Niels Moller9d1840c2019-05-21 07:26:37 +0000323bool ThreadManager::IsMainThread() {
324 return IsThreadRefEqual(CurrentThreadRef(), main_thread_ref_);
325}
326
henrike@webrtc.org92a9bac2014-07-14 22:03:57 +0000327Thread::ScopedDisallowBlockingCalls::ScopedDisallowBlockingCalls()
Yves Gerey665174f2018-06-19 15:03:05 +0200328 : thread_(Thread::Current()),
329 previous_state_(thread_->SetAllowBlockingCalls(false)) {}
henrike@webrtc.org92a9bac2014-07-14 22:03:57 +0000330
331Thread::ScopedDisallowBlockingCalls::~ScopedDisallowBlockingCalls() {
nisseede5da42017-01-12 05:15:36 -0800332 RTC_DCHECK(thread_->IsCurrent());
henrike@webrtc.org92a9bac2014-07-14 22:03:57 +0000333 thread_->SetAllowBlockingCalls(previous_state_);
334}
335
Taylor Brandstetter08672602018-03-02 15:20:33 -0800336Thread::Thread(SocketServer* ss) : Thread(ss, /*do_init=*/true) {}
danilchapbebf54c2016-04-28 01:32:48 -0700337
338Thread::Thread(std::unique_ptr<SocketServer> ss)
Taylor Brandstetter08672602018-03-02 15:20:33 -0800339 : Thread(std::move(ss), /*do_init=*/true) {}
340
341Thread::Thread(SocketServer* ss, bool do_init)
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100342 : fPeekKeep_(false),
Sebastian Jansson61380c02020-01-17 14:46:08 +0100343 delayed_next_num_(0),
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100344 fInitialized_(false),
345 fDestroyed_(false),
346 stop_(0),
347 ss_(ss) {
348 RTC_DCHECK(ss);
349 ss_->SetMessageQueue(this);
Taylor Brandstetter08672602018-03-02 15:20:33 -0800350 SetName("Thread", this); // default name
351 if (do_init) {
352 DoInit();
353 }
354}
355
356Thread::Thread(std::unique_ptr<SocketServer> ss, bool do_init)
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100357 : Thread(ss.get(), do_init) {
358 own_ss_ = std::move(ss);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000359}
360
361Thread::~Thread() {
362 Stop();
jbauch25d1f282016-02-05 00:25:02 -0800363 DoDestroy();
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000364}
365
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100366void Thread::DoInit() {
367 if (fInitialized_) {
368 return;
369 }
370
371 fInitialized_ = true;
372 ThreadManager::Add(this);
373}
374
375void Thread::DoDestroy() {
376 if (fDestroyed_) {
377 return;
378 }
379
380 fDestroyed_ = true;
381 // The signal is done from here to ensure
382 // that it always gets called when the queue
383 // is going away.
384 SignalQueueDestroyed();
385 ThreadManager::Remove(this);
386 ClearInternal(nullptr, MQID_ANY, nullptr);
387
388 if (ss_) {
389 ss_->SetMessageQueue(nullptr);
390 }
391}
392
393SocketServer* Thread::socketserver() {
394 return ss_;
395}
396
397void Thread::WakeUpSocketServer() {
398 ss_->WakeUp();
399}
400
401void Thread::Quit() {
402 AtomicOps::ReleaseStore(&stop_, 1);
403 WakeUpSocketServer();
404}
405
406bool Thread::IsQuitting() {
407 return AtomicOps::AcquireLoad(&stop_) != 0;
408}
409
410void Thread::Restart() {
411 AtomicOps::ReleaseStore(&stop_, 0);
412}
413
414bool Thread::Peek(Message* pmsg, int cmsWait) {
415 if (fPeekKeep_) {
416 *pmsg = msgPeek_;
417 return true;
418 }
419 if (!Get(pmsg, cmsWait))
420 return false;
421 msgPeek_ = *pmsg;
422 fPeekKeep_ = true;
423 return true;
424}
425
426bool Thread::Get(Message* pmsg, int cmsWait, bool process_io) {
427 // Return and clear peek if present
428 // Always return the peek if it exists so there is Peek/Get symmetry
429
430 if (fPeekKeep_) {
431 *pmsg = msgPeek_;
432 fPeekKeep_ = false;
433 return true;
434 }
435
436 // Get w/wait + timer scan / dispatch + socket / event multiplexer dispatch
437
438 int64_t cmsTotal = cmsWait;
439 int64_t cmsElapsed = 0;
440 int64_t msStart = TimeMillis();
441 int64_t msCurrent = msStart;
442 while (true) {
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100443 // Check for posted events
444 int64_t cmsDelayNext = kForever;
445 bool first_pass = true;
446 while (true) {
447 // All queue operations need to be locked, but nothing else in this loop
448 // (specifically handling disposed message) can happen inside the crit.
449 // Otherwise, disposed MessageHandlers will cause deadlocks.
450 {
451 CritScope cs(&crit_);
452 // On the first pass, check for delayed messages that have been
453 // triggered and calculate the next trigger time.
454 if (first_pass) {
455 first_pass = false;
Sebastian Jansson61380c02020-01-17 14:46:08 +0100456 while (!delayed_messages_.empty()) {
457 if (msCurrent < delayed_messages_.top().run_time_ms_) {
458 cmsDelayNext =
459 TimeDiff(delayed_messages_.top().run_time_ms_, msCurrent);
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100460 break;
461 }
Sebastian Jansson61380c02020-01-17 14:46:08 +0100462 messages_.push_back(delayed_messages_.top().msg_);
463 delayed_messages_.pop();
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100464 }
465 }
466 // Pull a message off the message queue, if available.
Sebastian Jansson61380c02020-01-17 14:46:08 +0100467 if (messages_.empty()) {
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100468 break;
469 } else {
Sebastian Jansson61380c02020-01-17 14:46:08 +0100470 *pmsg = messages_.front();
471 messages_.pop_front();
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100472 }
473 } // crit_ is released here.
474
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100475 // If this was a dispose message, delete it and skip it.
476 if (MQID_DISPOSE == pmsg->message_id) {
477 RTC_DCHECK(nullptr == pmsg->phandler);
478 delete pmsg->pdata;
479 *pmsg = Message();
480 continue;
481 }
482 return true;
483 }
484
485 if (IsQuitting())
486 break;
487
488 // Which is shorter, the delay wait or the asked wait?
489
490 int64_t cmsNext;
491 if (cmsWait == kForever) {
492 cmsNext = cmsDelayNext;
493 } else {
494 cmsNext = std::max<int64_t>(0, cmsTotal - cmsElapsed);
495 if ((cmsDelayNext != kForever) && (cmsDelayNext < cmsNext))
496 cmsNext = cmsDelayNext;
497 }
498
499 {
500 // Wait and multiplex in the meantime
501 if (!ss_->Wait(static_cast<int>(cmsNext), process_io))
502 return false;
503 }
504
505 // If the specified timeout expired, return
506
507 msCurrent = TimeMillis();
508 cmsElapsed = TimeDiff(msCurrent, msStart);
509 if (cmsWait != kForever) {
510 if (cmsElapsed >= cmsWait)
511 return false;
512 }
513 }
514 return false;
515}
516
517void Thread::Post(const Location& posted_from,
518 MessageHandler* phandler,
519 uint32_t id,
520 MessageData* pdata,
521 bool time_sensitive) {
Sebastian Jansson61380c02020-01-17 14:46:08 +0100522 RTC_DCHECK(!time_sensitive);
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100523 if (IsQuitting()) {
524 delete pdata;
525 return;
526 }
527
528 // Keep thread safe
529 // Add the message to the end of the queue
530 // Signal for the multiplexer to return
531
532 {
533 CritScope cs(&crit_);
534 Message msg;
535 msg.posted_from = posted_from;
536 msg.phandler = phandler;
537 msg.message_id = id;
538 msg.pdata = pdata;
Sebastian Jansson61380c02020-01-17 14:46:08 +0100539 messages_.push_back(msg);
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100540 }
541 WakeUpSocketServer();
542}
543
544void Thread::PostDelayed(const Location& posted_from,
Sebastian Jansson61380c02020-01-17 14:46:08 +0100545 int delay_ms,
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100546 MessageHandler* phandler,
547 uint32_t id,
548 MessageData* pdata) {
Sebastian Jansson61380c02020-01-17 14:46:08 +0100549 return DoDelayPost(posted_from, delay_ms, TimeAfter(delay_ms), phandler, id,
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100550 pdata);
551}
552
553void Thread::PostAt(const Location& posted_from,
Sebastian Jansson61380c02020-01-17 14:46:08 +0100554 int64_t run_at_ms,
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100555 MessageHandler* phandler,
556 uint32_t id,
557 MessageData* pdata) {
Sebastian Jansson61380c02020-01-17 14:46:08 +0100558 return DoDelayPost(posted_from, TimeUntil(run_at_ms), run_at_ms, phandler, id,
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100559 pdata);
560}
561
562void Thread::DoDelayPost(const Location& posted_from,
Sebastian Jansson61380c02020-01-17 14:46:08 +0100563 int64_t delay_ms,
564 int64_t run_at_ms,
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100565 MessageHandler* phandler,
566 uint32_t id,
567 MessageData* pdata) {
568 if (IsQuitting()) {
569 delete pdata;
570 return;
571 }
572
573 // Keep thread safe
574 // Add to the priority queue. Gets sorted soonest first.
575 // Signal for the multiplexer to return.
576
577 {
578 CritScope cs(&crit_);
579 Message msg;
580 msg.posted_from = posted_from;
581 msg.phandler = phandler;
582 msg.message_id = id;
583 msg.pdata = pdata;
Sebastian Jansson61380c02020-01-17 14:46:08 +0100584 DelayedMessage delayed(delay_ms, run_at_ms, delayed_next_num_, msg);
585 delayed_messages_.push(delayed);
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100586 // If this message queue processes 1 message every millisecond for 50 days,
587 // we will wrap this number. Even then, only messages with identical times
588 // will be misordered, and then only briefly. This is probably ok.
Sebastian Jansson61380c02020-01-17 14:46:08 +0100589 ++delayed_next_num_;
590 RTC_DCHECK_NE(0, delayed_next_num_);
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100591 }
592 WakeUpSocketServer();
593}
594
595int Thread::GetDelay() {
596 CritScope cs(&crit_);
597
Sebastian Jansson61380c02020-01-17 14:46:08 +0100598 if (!messages_.empty())
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100599 return 0;
600
Sebastian Jansson61380c02020-01-17 14:46:08 +0100601 if (!delayed_messages_.empty()) {
602 int delay = TimeUntil(delayed_messages_.top().run_time_ms_);
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100603 if (delay < 0)
604 delay = 0;
605 return delay;
606 }
607
608 return kForever;
609}
610
611void Thread::ClearInternal(MessageHandler* phandler,
612 uint32_t id,
613 MessageList* removed) {
614 // Remove messages with phandler
615
616 if (fPeekKeep_ && msgPeek_.Match(phandler, id)) {
617 if (removed) {
618 removed->push_back(msgPeek_);
619 } else {
620 delete msgPeek_.pdata;
621 }
622 fPeekKeep_ = false;
623 }
624
625 // Remove from ordered message queue
626
Sebastian Jansson61380c02020-01-17 14:46:08 +0100627 for (auto it = messages_.begin(); it != messages_.end();) {
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100628 if (it->Match(phandler, id)) {
629 if (removed) {
630 removed->push_back(*it);
631 } else {
632 delete it->pdata;
633 }
Sebastian Jansson61380c02020-01-17 14:46:08 +0100634 it = messages_.erase(it);
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100635 } else {
636 ++it;
637 }
638 }
639
640 // Remove from priority queue. Not directly iterable, so use this approach
641
Sebastian Jansson61380c02020-01-17 14:46:08 +0100642 auto new_end = delayed_messages_.container().begin();
643 for (auto it = new_end; it != delayed_messages_.container().end(); ++it) {
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100644 if (it->msg_.Match(phandler, id)) {
645 if (removed) {
646 removed->push_back(it->msg_);
647 } else {
648 delete it->msg_.pdata;
649 }
650 } else {
651 *new_end++ = *it;
652 }
653 }
Sebastian Jansson61380c02020-01-17 14:46:08 +0100654 delayed_messages_.container().erase(new_end,
655 delayed_messages_.container().end());
656 delayed_messages_.reheap();
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100657}
658
659void Thread::Dispatch(Message* pmsg) {
660 TRACE_EVENT2("webrtc", "Thread::Dispatch", "src_file",
661 pmsg->posted_from.file_name(), "src_func",
662 pmsg->posted_from.function_name());
663 int64_t start_time = TimeMillis();
664 pmsg->phandler->OnMessage(pmsg);
665 int64_t end_time = TimeMillis();
666 int64_t diff = TimeDiff(end_time, start_time);
667 if (diff >= kSlowDispatchLoggingThreshold) {
668 RTC_LOG(LS_INFO) << "Message took " << diff
669 << "ms to dispatch. Posted from: "
670 << pmsg->posted_from.ToString();
671 }
672}
673
nisse7866cfe2017-04-26 01:45:31 -0700674bool Thread::IsCurrent() const {
675 return ThreadManager::Instance()->CurrentThread() == this;
676}
677
danilchapbebf54c2016-04-28 01:32:48 -0700678std::unique_ptr<Thread> Thread::CreateWithSocketServer() {
679 return std::unique_ptr<Thread>(new Thread(SocketServer::CreateDefault()));
680}
681
682std::unique_ptr<Thread> Thread::Create() {
683 return std::unique_ptr<Thread>(
684 new Thread(std::unique_ptr<SocketServer>(new NullSocketServer())));
685}
686
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000687bool Thread::SleepMs(int milliseconds) {
henrike@webrtc.org92a9bac2014-07-14 22:03:57 +0000688 AssertBlockingIsAllowedOnCurrentThread();
689
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000690#if defined(WEBRTC_WIN)
691 ::Sleep(milliseconds);
692 return true;
693#else
694 // POSIX has both a usleep() and a nanosleep(), but the former is deprecated,
695 // so we use nanosleep() even though it has greater precision than necessary.
696 struct timespec ts;
697 ts.tv_sec = milliseconds / 1000;
698 ts.tv_nsec = (milliseconds % 1000) * 1000000;
deadbeef37f5ecf2017-02-27 14:06:41 -0800699 int ret = nanosleep(&ts, nullptr);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000700 if (ret != 0) {
Mirko Bonadei675513b2017-11-09 11:09:25 +0100701 RTC_LOG_ERR(LS_WARNING) << "nanosleep() returning early";
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000702 return false;
703 }
704 return true;
705#endif
706}
707
708bool Thread::SetName(const std::string& name, const void* obj) {
Tommi51492422017-12-04 15:18:23 +0100709 RTC_DCHECK(!IsRunning());
710
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000711 name_ = name;
712 if (obj) {
Niels Mölleraba06332018-10-16 15:14:15 +0200713 // The %p specifier typically produce at most 16 hex digits, possibly with a
714 // 0x prefix. But format is implementation defined, so add some margin.
715 char buf[30];
716 snprintf(buf, sizeof(buf), " 0x%p", obj);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000717 name_ += buf;
718 }
719 return true;
720}
721
Niels Möllerd2e50132019-06-11 09:24:14 +0200722bool Thread::Start() {
Tommi51492422017-12-04 15:18:23 +0100723 RTC_DCHECK(!IsRunning());
724
725 if (IsRunning())
726 return false;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000727
André Susano Pinto02a57972016-07-22 13:30:05 +0200728 Restart(); // reset IsQuitting() if the thread is being restarted
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000729
730 // Make sure that ThreadManager is created on the main thread before
731 // we start a new thread.
732 ThreadManager::Instance();
733
Tommi51492422017-12-04 15:18:23 +0100734 owned_ = true;
735
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000736#if defined(WEBRTC_WIN)
Niels Möllerd2e50132019-06-11 09:24:14 +0200737 thread_ = CreateThread(nullptr, 0, PreRun, this, 0, &thread_id_);
Tommi51492422017-12-04 15:18:23 +0100738 if (!thread_) {
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000739 return false;
740 }
741#elif defined(WEBRTC_POSIX)
742 pthread_attr_t attr;
743 pthread_attr_init(&attr);
744
Niels Möllerd2e50132019-06-11 09:24:14 +0200745 int error_code = pthread_create(&thread_, &attr, PreRun, this);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000746 if (0 != error_code) {
Mirko Bonadei675513b2017-11-09 11:09:25 +0100747 RTC_LOG(LS_ERROR) << "Unable to create pthread, error " << error_code;
Tommi51492422017-12-04 15:18:23 +0100748 thread_ = 0;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000749 return false;
750 }
Tommi51492422017-12-04 15:18:23 +0100751 RTC_DCHECK(thread_);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000752#endif
753 return true;
754}
755
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +0000756bool Thread::WrapCurrent() {
757 return WrapCurrentWithThreadManager(ThreadManager::Instance(), true);
758}
759
760void Thread::UnwrapCurrent() {
761 // Clears the platform-specific thread-specific storage.
deadbeef37f5ecf2017-02-27 14:06:41 -0800762 ThreadManager::Instance()->SetCurrentThread(nullptr);
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +0000763#if defined(WEBRTC_WIN)
deadbeef37f5ecf2017-02-27 14:06:41 -0800764 if (thread_ != nullptr) {
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +0000765 if (!CloseHandle(thread_)) {
Mirko Bonadei675513b2017-11-09 11:09:25 +0100766 RTC_LOG_GLE(LS_ERROR)
767 << "When unwrapping thread, failed to close handle.";
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +0000768 }
deadbeef37f5ecf2017-02-27 14:06:41 -0800769 thread_ = nullptr;
Tommi51492422017-12-04 15:18:23 +0100770 thread_id_ = 0;
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +0000771 }
Tommi51492422017-12-04 15:18:23 +0100772#elif defined(WEBRTC_POSIX)
773 thread_ = 0;
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +0000774#endif
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +0000775}
776
777void Thread::SafeWrapCurrent() {
778 WrapCurrentWithThreadManager(ThreadManager::Instance(), false);
779}
780
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000781void Thread::Join() {
Tommi51492422017-12-04 15:18:23 +0100782 if (!IsRunning())
783 return;
784
785 RTC_DCHECK(!IsCurrent());
786 if (Current() && !Current()->blocking_calls_allowed_) {
787 RTC_LOG(LS_WARNING) << "Waiting for the thread to join, "
Jonas Olssonb2b20312020-01-14 12:11:31 +0100788 "but blocking calls have been disallowed";
Tommi51492422017-12-04 15:18:23 +0100789 }
jiayl@webrtc.org1fd362c2014-09-26 16:57:07 +0000790
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000791#if defined(WEBRTC_WIN)
Tommi51492422017-12-04 15:18:23 +0100792 RTC_DCHECK(thread_ != nullptr);
793 WaitForSingleObject(thread_, INFINITE);
794 CloseHandle(thread_);
795 thread_ = nullptr;
796 thread_id_ = 0;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000797#elif defined(WEBRTC_POSIX)
Tommi51492422017-12-04 15:18:23 +0100798 pthread_join(thread_, nullptr);
799 thread_ = 0;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000800#endif
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000801}
802
henrike@webrtc.org92a9bac2014-07-14 22:03:57 +0000803bool Thread::SetAllowBlockingCalls(bool allow) {
nisseede5da42017-01-12 05:15:36 -0800804 RTC_DCHECK(IsCurrent());
henrike@webrtc.org92a9bac2014-07-14 22:03:57 +0000805 bool previous = blocking_calls_allowed_;
806 blocking_calls_allowed_ = allow;
807 return previous;
808}
809
810// static
811void Thread::AssertBlockingIsAllowedOnCurrentThread() {
tfarinaa41ab932015-10-30 16:08:48 -0700812#if !defined(NDEBUG)
henrike@webrtc.org92a9bac2014-07-14 22:03:57 +0000813 Thread* current = Thread::Current();
nisseede5da42017-01-12 05:15:36 -0800814 RTC_DCHECK(!current || current->blocking_calls_allowed_);
henrike@webrtc.org92a9bac2014-07-14 22:03:57 +0000815#endif
816}
817
deadbeefdc20e262017-01-31 15:10:44 -0800818// static
819#if defined(WEBRTC_WIN)
820DWORD WINAPI Thread::PreRun(LPVOID pv) {
821#else
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000822void* Thread::PreRun(void* pv) {
deadbeefdc20e262017-01-31 15:10:44 -0800823#endif
Niels Möllerd2e50132019-06-11 09:24:14 +0200824 Thread* thread = static_cast<Thread*>(pv);
825 ThreadManager::Instance()->SetCurrentThread(thread);
826 rtc::SetCurrentThreadName(thread->name_.c_str());
Danil Chapovalov912b3b82019-11-22 15:52:40 +0100827 CurrentTaskQueueSetter set_current_task_queue(thread);
Kári Tristan Helgason62b13452018-10-12 12:57:49 +0200828#if defined(WEBRTC_MAC)
829 ScopedAutoReleasePool pool;
830#endif
Niels Möllerd2e50132019-06-11 09:24:14 +0200831 thread->Run();
832
Tommi51492422017-12-04 15:18:23 +0100833 ThreadManager::Instance()->SetCurrentThread(nullptr);
kthelgasonde6adbe2017-02-22 00:42:11 -0800834#ifdef WEBRTC_WIN
835 return 0;
836#else
837 return nullptr;
838#endif
Jonas Olssona4d87372019-07-05 19:08:33 +0200839} // namespace rtc
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000840
841void Thread::Run() {
842 ProcessMessages(kForever);
843}
844
845bool Thread::IsOwned() {
Tommi51492422017-12-04 15:18:23 +0100846 RTC_DCHECK(IsRunning());
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000847 return owned_;
848}
849
850void Thread::Stop() {
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100851 Thread::Quit();
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000852 Join();
853}
854
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700855void Thread::Send(const Location& posted_from,
856 MessageHandler* phandler,
857 uint32_t id,
858 MessageData* pdata) {
Sebastian Jansson5d9b9642020-01-17 13:10:54 +0100859 RTC_DCHECK(!IsQuitting());
André Susano Pinto02a57972016-07-22 13:30:05 +0200860 if (IsQuitting())
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000861 return;
862
863 // Sent messages are sent to the MessageHandler directly, in the context
864 // of "thread", like Win32 SendMessage. If in the right context,
865 // call the handler directly.
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000866 Message msg;
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700867 msg.posted_from = posted_from;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000868 msg.phandler = phandler;
869 msg.message_id = id;
870 msg.pdata = pdata;
871 if (IsCurrent()) {
Sebastian Janssonda7267a2020-03-03 10:48:05 +0100872 msg.phandler->OnMessage(&msg);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000873 return;
874 }
875
jiayl@webrtc.org3987b6d2014-09-24 17:14:05 +0000876 AssertBlockingIsAllowedOnCurrentThread();
877
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000878 AutoThread thread;
Yves Gerey665174f2018-06-19 15:03:05 +0200879 Thread* current_thread = Thread::Current();
deadbeef37f5ecf2017-02-27 14:06:41 -0800880 RTC_DCHECK(current_thread != nullptr); // AutoThread ensures this
Sebastian Janssonda7267a2020-03-03 10:48:05 +0100881#if RTC_DCHECK_IS_ON
882 ThreadManager::Instance()->RegisterSendAndCheckForCycles(current_thread,
883 this);
884#endif
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000885 bool ready = false;
Sebastian Janssonda7267a2020-03-03 10:48:05 +0100886 PostTask(
887 webrtc::ToQueuedTask([msg]() mutable { msg.phandler->OnMessage(&msg); },
888 [this, &ready, current_thread] {
889 CritScope cs(&crit_);
890 ready = true;
891 current_thread->socketserver()->WakeUp();
892 }));
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000893
894 bool waited = false;
895 crit_.Enter();
896 while (!ready) {
897 crit_.Leave();
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000898 current_thread->socketserver()->Wait(kForever, false);
899 waited = true;
900 crit_.Enter();
901 }
902 crit_.Leave();
903
904 // Our Wait loop above may have consumed some WakeUp events for this
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100905 // Thread, that weren't relevant to this Send. Losing these WakeUps can
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000906 // cause problems for some SocketServers.
907 //
908 // Concrete example:
909 // Win32SocketServer on thread A calls Send on thread B. While processing the
910 // message, thread B Posts a message to A. We consume the wakeup for that
911 // Post while waiting for the Send to complete, which means that when we exit
912 // this loop, we need to issue another WakeUp, or else the Posted message
913 // won't be processed in a timely manner.
914
915 if (waited) {
916 current_thread->socketserver()->WakeUp();
917 }
918}
919
Taylor Brandstetter5d97a9a2016-06-10 14:17:27 -0700920void Thread::InvokeInternal(const Location& posted_from,
Danil Chapovalov89313452019-11-29 12:56:43 +0100921 rtc::FunctionView<void()> functor) {
Steve Antonc5d7c522019-12-03 10:14:05 -0800922 TRACE_EVENT2("webrtc", "Thread::Invoke", "src_file", posted_from.file_name(),
923 "src_func", posted_from.function_name());
Danil Chapovalov89313452019-11-29 12:56:43 +0100924
925 class FunctorMessageHandler : public MessageHandler {
926 public:
927 explicit FunctorMessageHandler(rtc::FunctionView<void()> functor)
928 : functor_(functor) {}
929 void OnMessage(Message* msg) override { functor_(); }
930
931 private:
932 rtc::FunctionView<void()> functor_;
933 } handler(functor);
934
935 Send(posted_from, &handler);
tommi@webrtc.org7c64ed22015-03-17 14:25:37 +0000936}
937
Danil Chapovalov912b3b82019-11-22 15:52:40 +0100938void Thread::QueuedTaskHandler::OnMessage(Message* msg) {
939 RTC_DCHECK(msg);
940 auto* data = static_cast<ScopedMessageData<webrtc::QueuedTask>*>(msg->pdata);
941 std::unique_ptr<webrtc::QueuedTask> task = std::move(data->data());
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100942 // Thread expects handler to own Message::pdata when OnMessage is called
Danil Chapovalov912b3b82019-11-22 15:52:40 +0100943 // Since MessageData is no longer needed, delete it.
944 delete data;
945
946 // QueuedTask interface uses Run return value to communicate who owns the
947 // task. false means QueuedTask took the ownership.
948 if (!task->Run())
949 task.release();
950}
951
952void Thread::PostTask(std::unique_ptr<webrtc::QueuedTask> task) {
953 // Though Post takes MessageData by raw pointer (last parameter), it still
954 // takes it with ownership.
955 Post(RTC_FROM_HERE, &queued_task_handler_,
956 /*id=*/0, new ScopedMessageData<webrtc::QueuedTask>(std::move(task)));
957}
958
959void Thread::PostDelayedTask(std::unique_ptr<webrtc::QueuedTask> task,
960 uint32_t milliseconds) {
961 // Though PostDelayed takes MessageData by raw pointer (last parameter),
962 // it still takes it with ownership.
963 PostDelayed(RTC_FROM_HERE, milliseconds, &queued_task_handler_,
964 /*id=*/0,
965 new ScopedMessageData<webrtc::QueuedTask>(std::move(task)));
966}
967
968void Thread::Delete() {
969 Stop();
970 delete this;
971}
972
Niels Möller8909a632018-09-06 08:42:44 +0200973bool Thread::IsProcessingMessagesForTesting() {
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +0100974 return (owned_ || IsCurrent()) && !IsQuitting();
Niels Möller8909a632018-09-06 08:42:44 +0200975}
976
Peter Boström0c4e06b2015-10-07 12:23:21 +0200977void Thread::Clear(MessageHandler* phandler,
978 uint32_t id,
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000979 MessageList* removed) {
980 CritScope cs(&crit_);
Niels Möller5e007b72018-09-07 12:35:44 +0200981 ClearInternal(phandler, id, removed);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000982}
983
984bool Thread::ProcessMessages(int cmsLoop) {
deadbeef22e08142017-06-12 14:30:28 -0700985 // Using ProcessMessages with a custom clock for testing and a time greater
986 // than 0 doesn't work, since it's not guaranteed to advance the custom
987 // clock's time, and may get stuck in an infinite loop.
988 RTC_DCHECK(GetClockForTesting() == nullptr || cmsLoop == 0 ||
989 cmsLoop == kForever);
Honghai Zhang82d78622016-05-06 11:29:15 -0700990 int64_t msEnd = (kForever == cmsLoop) ? 0 : TimeAfter(cmsLoop);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +0000991 int cmsNext = cmsLoop;
992
993 while (true) {
Kári Tristan Helgason62b13452018-10-12 12:57:49 +0200994#if defined(WEBRTC_MAC)
995 ScopedAutoReleasePool pool;
996#endif
kthelgasonde6adbe2017-02-22 00:42:11 -0800997 Message msg;
998 if (!Get(&msg, cmsNext))
999 return !IsQuitting();
1000 Dispatch(&msg);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001001
kthelgasonde6adbe2017-02-22 00:42:11 -08001002 if (cmsLoop != kForever) {
1003 cmsNext = static_cast<int>(TimeUntil(msEnd));
1004 if (cmsNext < 0)
1005 return true;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001006 }
1007 }
1008}
1009
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +00001010bool Thread::WrapCurrentWithThreadManager(ThreadManager* thread_manager,
1011 bool need_synchronize_access) {
Tommi51492422017-12-04 15:18:23 +01001012 RTC_DCHECK(!IsRunning());
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +00001013
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001014#if defined(WEBRTC_WIN)
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +00001015 if (need_synchronize_access) {
1016 // We explicitly ask for no rights other than synchronization.
1017 // This gives us the best chance of succeeding.
1018 thread_ = OpenThread(SYNCHRONIZE, FALSE, GetCurrentThreadId());
1019 if (!thread_) {
Mirko Bonadei675513b2017-11-09 11:09:25 +01001020 RTC_LOG_GLE(LS_ERROR) << "Unable to get handle to thread.";
jiayl@webrtc.orgba737cb2014-09-18 16:45:21 +00001021 return false;
1022 }
1023 thread_id_ = GetCurrentThreadId();
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001024 }
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001025#elif defined(WEBRTC_POSIX)
1026 thread_ = pthread_self();
1027#endif
1028 owned_ = false;
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001029 thread_manager->SetCurrentThread(this);
1030 return true;
1031}
1032
Tommi51492422017-12-04 15:18:23 +01001033bool Thread::IsRunning() {
Tommi51492422017-12-04 15:18:23 +01001034#if defined(WEBRTC_WIN)
1035 return thread_ != nullptr;
1036#elif defined(WEBRTC_POSIX)
1037 return thread_ != 0;
1038#endif
1039}
1040
Steve Antonbcc1a762019-12-11 11:21:53 -08001041// static
1042MessageHandler* Thread::GetPostTaskMessageHandler() {
1043 // Allocate at first call, never deallocate.
1044 static MessageHandler* handler = new MessageHandlerWithTask;
1045 return handler;
1046}
1047
Taylor Brandstetter08672602018-03-02 15:20:33 -08001048AutoThread::AutoThread()
1049 : Thread(SocketServer::CreateDefault(), /*do_init=*/false) {
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001050 if (!ThreadManager::Instance()->CurrentThread()) {
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +01001051 // DoInit registers with ThreadManager. Do that only if we intend to
Niels Möller5a8f8602019-06-12 11:30:59 +02001052 // be rtc::Thread::Current(), otherwise ProcessAllMessageQueuesInternal will
1053 // post a message to a queue that no running thread is serving.
1054 DoInit();
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001055 ThreadManager::Instance()->SetCurrentThread(this);
1056 }
1057}
1058
1059AutoThread::~AutoThread() {
1060 Stop();
Steve Anton3b80aac2017-10-19 10:17:12 -07001061 DoDestroy();
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001062 if (ThreadManager::Instance()->CurrentThread() == this) {
deadbeef37f5ecf2017-02-27 14:06:41 -08001063 ThreadManager::Instance()->SetCurrentThread(nullptr);
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001064 }
1065}
1066
nisse7eaa4ea2017-05-08 05:25:41 -07001067AutoSocketServerThread::AutoSocketServerThread(SocketServer* ss)
Taylor Brandstetter08672602018-03-02 15:20:33 -08001068 : Thread(ss, /*do_init=*/false) {
1069 DoInit();
nisse7eaa4ea2017-05-08 05:25:41 -07001070 old_thread_ = ThreadManager::Instance()->CurrentThread();
Tommi51492422017-12-04 15:18:23 +01001071 // Temporarily set the current thread to nullptr so that we can keep checks
1072 // around that catch unintentional pointer overwrites.
1073 rtc::ThreadManager::Instance()->SetCurrentThread(nullptr);
nisse7eaa4ea2017-05-08 05:25:41 -07001074 rtc::ThreadManager::Instance()->SetCurrentThread(this);
1075 if (old_thread_) {
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +01001076 ThreadManager::Remove(old_thread_);
nisse7eaa4ea2017-05-08 05:25:41 -07001077 }
1078}
1079
1080AutoSocketServerThread::~AutoSocketServerThread() {
1081 RTC_DCHECK(ThreadManager::Instance()->CurrentThread() == this);
1082 // Some tests post destroy messages to this thread. To avoid memory
1083 // leaks, we have to process those messages. In particular
1084 // P2PTransportChannelPingTest, relying on the message posted in
1085 // cricket::Connection::Destroy.
1086 ProcessMessages(0);
Steve Anton3b80aac2017-10-19 10:17:12 -07001087 // Stop and destroy the thread before clearing it as the current thread.
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +01001088 // Sometimes there are messages left in the Thread that will be
Steve Anton3b80aac2017-10-19 10:17:12 -07001089 // destroyed by DoDestroy, and sometimes the destructors of the message and/or
1090 // its contents rely on this thread still being set as the current thread.
1091 Stop();
1092 DoDestroy();
Tommi51492422017-12-04 15:18:23 +01001093 rtc::ThreadManager::Instance()->SetCurrentThread(nullptr);
nisse7eaa4ea2017-05-08 05:25:41 -07001094 rtc::ThreadManager::Instance()->SetCurrentThread(old_thread_);
1095 if (old_thread_) {
Sebastian Jansson6ea2c6a2020-01-13 14:07:22 +01001096 ThreadManager::Add(old_thread_);
nisse7eaa4ea2017-05-08 05:25:41 -07001097 }
1098}
1099
henrike@webrtc.orgf0488722014-05-13 18:00:26 +00001100} // namespace rtc