blob: 1e5e82e644b10bcc041b58f1d1bee260f43de0ea [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "api.h"
31#include "bootstrapper.h"
32#include "debug.h"
33#include "execution.h"
34#include "v8threads.h"
35#include "regexp-stack.h"
36
37namespace v8 {
38
39static internal::Thread::LocalStorageKey thread_state_key =
40 internal::Thread::CreateThreadLocalKey();
41static internal::Thread::LocalStorageKey thread_id_key =
42 internal::Thread::CreateThreadLocalKey();
43
44
45// Track whether this V8 instance has ever called v8::Locker. This allows the
46// API code to verify that the lock is always held when V8 is being entered.
47bool Locker::active_ = false;
48
49
50// Constructor for the Locker object. Once the Locker is constructed the
51// current thread will be guaranteed to have the big V8 lock.
52Locker::Locker() : has_lock_(false), top_level_(true) {
53 // Record that the Locker has been used at least once.
54 active_ = true;
55 // Get the big lock if necessary.
56 if (!internal::ThreadManager::IsLockedByCurrentThread()) {
57 internal::ThreadManager::Lock();
58 has_lock_ = true;
59 // Make sure that V8 is initialized. Archiving of threads interferes
60 // with deserialization by adding additional root pointers, so we must
61 // initialize here, before anyone can call ~Locker() or Unlocker().
62 if (!internal::V8::IsRunning()) {
63 V8::Initialize();
64 }
65 // This may be a locker within an unlocker in which case we have to
66 // get the saved state for this thread and restore it.
67 if (internal::ThreadManager::RestoreThread()) {
68 top_level_ = false;
69 } else {
70 internal::ExecutionAccess access;
71 internal::StackGuard::ClearThread(access);
72 internal::StackGuard::InitThread(access);
73 }
74 }
75 ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
76
77 // Make sure this thread is assigned a thread id.
78 internal::ThreadManager::AssignId();
79}
80
81
82bool Locker::IsLocked() {
83 return internal::ThreadManager::IsLockedByCurrentThread();
84}
85
86
87Locker::~Locker() {
88 ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
89 if (has_lock_) {
90 if (top_level_) {
91 internal::ThreadManager::FreeThreadResources();
92 } else {
93 internal::ThreadManager::ArchiveThread();
94 }
95 internal::ThreadManager::Unlock();
96 }
97}
98
99
100Unlocker::Unlocker() {
101 ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
102 internal::ThreadManager::ArchiveThread();
103 internal::ThreadManager::Unlock();
104}
105
106
107Unlocker::~Unlocker() {
108 ASSERT(!internal::ThreadManager::IsLockedByCurrentThread());
109 internal::ThreadManager::Lock();
110 internal::ThreadManager::RestoreThread();
111}
112
113
114void Locker::StartPreemption(int every_n_ms) {
115 v8::internal::ContextSwitcher::StartPreemption(every_n_ms);
116}
117
118
119void Locker::StopPreemption() {
120 v8::internal::ContextSwitcher::StopPreemption();
121}
122
123
124namespace internal {
125
126
127bool ThreadManager::RestoreThread() {
128 // First check whether the current thread has been 'lazily archived', ie
129 // not archived at all. If that is the case we put the state storage we
130 // had prepared back in the free list, since we didn't need it after all.
131 if (lazily_archived_thread_.IsSelf()) {
132 lazily_archived_thread_.Initialize(ThreadHandle::INVALID);
133 ASSERT(Thread::GetThreadLocal(thread_state_key) ==
134 lazily_archived_thread_state_);
135 lazily_archived_thread_state_->set_id(kInvalidId);
136 lazily_archived_thread_state_->LinkInto(ThreadState::FREE_LIST);
137 lazily_archived_thread_state_ = NULL;
138 Thread::SetThreadLocal(thread_state_key, NULL);
139 return true;
140 }
141
142 // Make sure that the preemption thread cannot modify the thread state while
143 // it is being archived or restored.
144 ExecutionAccess access;
145
146 // If there is another thread that was lazily archived then we have to really
147 // archive it now.
148 if (lazily_archived_thread_.IsValid()) {
149 EagerlyArchiveThread();
150 }
151 ThreadState* state =
152 reinterpret_cast<ThreadState*>(Thread::GetThreadLocal(thread_state_key));
153 if (state == NULL) {
154 // This is a new thread.
155 StackGuard::InitThread(access);
156 return false;
157 }
158 char* from = state->data();
159 from = HandleScopeImplementer::RestoreThread(from);
160 from = Top::RestoreThread(from);
161 from = Relocatable::RestoreState(from);
162#ifdef ENABLE_DEBUGGER_SUPPORT
163 from = Debug::RestoreDebug(from);
164#endif
165 from = StackGuard::RestoreStackGuard(from);
166 from = RegExpStack::RestoreStack(from);
167 from = Bootstrapper::RestoreState(from);
168 Thread::SetThreadLocal(thread_state_key, NULL);
169 if (state->terminate_on_restore()) {
170 StackGuard::TerminateExecution();
171 state->set_terminate_on_restore(false);
172 }
173 state->set_id(kInvalidId);
174 state->Unlink();
175 state->LinkInto(ThreadState::FREE_LIST);
176 return true;
177}
178
179
180void ThreadManager::Lock() {
181 mutex_->Lock();
182 mutex_owner_.Initialize(ThreadHandle::SELF);
183 ASSERT(IsLockedByCurrentThread());
184}
185
186
187void ThreadManager::Unlock() {
188 mutex_owner_.Initialize(ThreadHandle::INVALID);
189 mutex_->Unlock();
190}
191
192
193static int ArchiveSpacePerThread() {
194 return HandleScopeImplementer::ArchiveSpacePerThread() +
195 Top::ArchiveSpacePerThread() +
196#ifdef ENABLE_DEBUGGER_SUPPORT
197 Debug::ArchiveSpacePerThread() +
198#endif
199 StackGuard::ArchiveSpacePerThread() +
200 RegExpStack::ArchiveSpacePerThread() +
201 Bootstrapper::ArchiveSpacePerThread() +
202 Relocatable::ArchiveSpacePerThread();
203}
204
205
206ThreadState* ThreadState::free_anchor_ = new ThreadState();
207ThreadState* ThreadState::in_use_anchor_ = new ThreadState();
208
209
210ThreadState::ThreadState() : id_(ThreadManager::kInvalidId),
211 terminate_on_restore_(false),
212 next_(this), previous_(this) {
213}
214
215
216void ThreadState::AllocateSpace() {
217 data_ = NewArray<char>(ArchiveSpacePerThread());
218}
219
220
221void ThreadState::Unlink() {
222 next_->previous_ = previous_;
223 previous_->next_ = next_;
224}
225
226
227void ThreadState::LinkInto(List list) {
228 ThreadState* flying_anchor =
229 list == FREE_LIST ? free_anchor_ : in_use_anchor_;
230 next_ = flying_anchor->next_;
231 previous_ = flying_anchor;
232 flying_anchor->next_ = this;
233 next_->previous_ = this;
234}
235
236
237ThreadState* ThreadState::GetFree() {
238 ThreadState* gotten = free_anchor_->next_;
239 if (gotten == free_anchor_) {
240 ThreadState* new_thread_state = new ThreadState();
241 new_thread_state->AllocateSpace();
242 return new_thread_state;
243 }
244 return gotten;
245}
246
247
248// Gets the first in the list of archived threads.
249ThreadState* ThreadState::FirstInUse() {
250 return in_use_anchor_->Next();
251}
252
253
254ThreadState* ThreadState::Next() {
255 if (next_ == in_use_anchor_) return NULL;
256 return next_;
257}
258
259
260// Thread ids must start with 1, because in TLS having thread id 0 can't
261// be distinguished from not having a thread id at all (since NULL is
262// defined as 0.)
263int ThreadManager::last_id_ = 0;
264Mutex* ThreadManager::mutex_ = OS::CreateMutex();
265ThreadHandle ThreadManager::mutex_owner_(ThreadHandle::INVALID);
266ThreadHandle ThreadManager::lazily_archived_thread_(ThreadHandle::INVALID);
267ThreadState* ThreadManager::lazily_archived_thread_state_ = NULL;
268
269
270void ThreadManager::ArchiveThread() {
271 ASSERT(!lazily_archived_thread_.IsValid());
272 ASSERT(!IsArchived());
273 ThreadState* state = ThreadState::GetFree();
274 state->Unlink();
275 Thread::SetThreadLocal(thread_state_key, reinterpret_cast<void*>(state));
276 lazily_archived_thread_.Initialize(ThreadHandle::SELF);
277 lazily_archived_thread_state_ = state;
278 ASSERT(state->id() == kInvalidId);
279 state->set_id(CurrentId());
280 ASSERT(state->id() != kInvalidId);
281}
282
283
284void ThreadManager::EagerlyArchiveThread() {
285 ThreadState* state = lazily_archived_thread_state_;
286 state->LinkInto(ThreadState::IN_USE_LIST);
287 char* to = state->data();
288 // Ensure that data containing GC roots are archived first, and handle them
289 // in ThreadManager::Iterate(ObjectVisitor*).
290 to = HandleScopeImplementer::ArchiveThread(to);
291 to = Top::ArchiveThread(to);
292 to = Relocatable::ArchiveState(to);
293#ifdef ENABLE_DEBUGGER_SUPPORT
294 to = Debug::ArchiveDebug(to);
295#endif
296 to = StackGuard::ArchiveStackGuard(to);
297 to = RegExpStack::ArchiveStack(to);
298 to = Bootstrapper::ArchiveState(to);
299 lazily_archived_thread_.Initialize(ThreadHandle::INVALID);
300 lazily_archived_thread_state_ = NULL;
301}
302
303
304void ThreadManager::FreeThreadResources() {
305 HandleScopeImplementer::FreeThreadResources();
306 Top::FreeThreadResources();
307#ifdef ENABLE_DEBUGGER_SUPPORT
308 Debug::FreeThreadResources();
309#endif
310 StackGuard::FreeThreadResources();
311 RegExpStack::FreeThreadResources();
312 Bootstrapper::FreeThreadResources();
313}
314
315
316bool ThreadManager::IsArchived() {
317 return Thread::HasThreadLocal(thread_state_key);
318}
319
320
321void ThreadManager::Iterate(ObjectVisitor* v) {
322 // Expecting no threads during serialization/deserialization
323 for (ThreadState* state = ThreadState::FirstInUse();
324 state != NULL;
325 state = state->Next()) {
326 char* data = state->data();
327 data = HandleScopeImplementer::Iterate(v, data);
328 data = Top::Iterate(v, data);
329 data = Relocatable::Iterate(v, data);
330 }
331}
332
333
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100334void ThreadManager::IterateArchivedThreads(ThreadVisitor* v) {
Steve Block6ded16b2010-05-10 14:33:55 +0100335 for (ThreadState* state = ThreadState::FirstInUse();
336 state != NULL;
337 state = state->Next()) {
338 char* data = state->data();
339 data += HandleScopeImplementer::ArchiveSpacePerThread();
340 Top::IterateThread(v, data);
341 }
342}
343
344
Steve Blocka7e24c12009-10-30 11:49:00 +0000345void ThreadManager::MarkCompactPrologue(bool is_compacting) {
346 for (ThreadState* state = ThreadState::FirstInUse();
347 state != NULL;
348 state = state->Next()) {
349 char* data = state->data();
350 data += HandleScopeImplementer::ArchiveSpacePerThread();
351 Top::MarkCompactPrologue(is_compacting, data);
352 }
353}
354
355
356void ThreadManager::MarkCompactEpilogue(bool is_compacting) {
357 for (ThreadState* state = ThreadState::FirstInUse();
358 state != NULL;
359 state = state->Next()) {
360 char* data = state->data();
361 data += HandleScopeImplementer::ArchiveSpacePerThread();
362 Top::MarkCompactEpilogue(is_compacting, data);
363 }
364}
365
366
367int ThreadManager::CurrentId() {
368 return Thread::GetThreadLocalInt(thread_id_key);
369}
370
371
372void ThreadManager::AssignId() {
373 if (!HasId()) {
374 ASSERT(Locker::IsLocked());
375 int thread_id = ++last_id_;
376 ASSERT(thread_id > 0); // see the comment near last_id_ definition.
377 Thread::SetThreadLocalInt(thread_id_key, thread_id);
378 Top::set_thread_id(thread_id);
379 }
380}
381
382
383bool ThreadManager::HasId() {
384 return Thread::HasThreadLocal(thread_id_key);
385}
386
387
388void ThreadManager::TerminateExecution(int thread_id) {
389 for (ThreadState* state = ThreadState::FirstInUse();
390 state != NULL;
391 state = state->Next()) {
392 if (thread_id == state->id()) {
393 state->set_terminate_on_restore(true);
394 }
395 }
396}
397
398
399// This is the ContextSwitcher singleton. There is at most a single thread
400// running which delivers preemption events to V8 threads.
401ContextSwitcher* ContextSwitcher::singleton_ = NULL;
402
403
404ContextSwitcher::ContextSwitcher(int every_n_ms)
405 : keep_going_(true),
406 sleep_ms_(every_n_ms) {
407}
408
409
410// Set the scheduling interval of V8 threads. This function starts the
411// ContextSwitcher thread if needed.
412void ContextSwitcher::StartPreemption(int every_n_ms) {
413 ASSERT(Locker::IsLocked());
414 if (singleton_ == NULL) {
415 // If the ContextSwitcher thread is not running at the moment start it now.
416 singleton_ = new ContextSwitcher(every_n_ms);
417 singleton_->Start();
418 } else {
419 // ContextSwitcher thread is already running, so we just change the
420 // scheduling interval.
421 singleton_->sleep_ms_ = every_n_ms;
422 }
423}
424
425
426// Disable preemption of V8 threads. If multiple threads want to use V8 they
427// must cooperatively schedule amongst them from this point on.
428void ContextSwitcher::StopPreemption() {
429 ASSERT(Locker::IsLocked());
430 if (singleton_ != NULL) {
431 // The ContextSwitcher thread is running. We need to stop it and release
432 // its resources.
433 singleton_->keep_going_ = false;
434 singleton_->Join(); // Wait for the ContextSwitcher thread to exit.
435 // Thread has exited, now we can delete it.
436 delete(singleton_);
437 singleton_ = NULL;
438 }
439}
440
441
442// Main loop of the ContextSwitcher thread: Preempt the currently running V8
443// thread at regular intervals.
444void ContextSwitcher::Run() {
445 while (keep_going_) {
446 OS::Sleep(sleep_ms_);
447 StackGuard::Preempt();
448 }
449}
450
451
452// Acknowledge the preemption by the receiving thread.
453void ContextSwitcher::PreemptionReceived() {
454 ASSERT(Locker::IsLocked());
455 // There is currently no accounting being done for this. But could be in the
456 // future, which is why we leave this in.
457}
458
459
460} // namespace internal
461} // namespace v8