blob: bf7a350791f931dd0d1c2df2726e2b852aeaf681 [file] [log] [blame]
Antoine Pitrou074e5ed2009-11-10 19:50:40 +00001/*
2 * Implementation of the Global Interpreter Lock (GIL).
3 */
4
5#include <stdlib.h>
6#include <errno.h>
7
8
9/* First some general settings */
10
11/* microseconds (the Python API uses seconds, though) */
12#define DEFAULT_INTERVAL 5000
13static unsigned long gil_interval = DEFAULT_INTERVAL;
14#define INTERVAL (gil_interval >= 1 ? gil_interval : 1)
15
16/* Enable if you want to force the switching of threads at least every `gil_interval` */
17#undef FORCE_SWITCHING
18#define FORCE_SWITCHING
19
20
21/*
22 Notes about the implementation:
23
24 - The GIL is just a boolean variable (gil_locked) whose access is protected
25 by a mutex (gil_mutex), and whose changes are signalled by a condition
26 variable (gil_cond). gil_mutex is taken for short periods of time,
27 and therefore mostly uncontended.
28
29 - In the GIL-holding thread, the main loop (PyEval_EvalFrameEx) must be
30 able to release the GIL on demand by another thread. A volatile boolean
31 variable (gil_drop_request) is used for that purpose, which is checked
32 at every turn of the eval loop. That variable is set after a wait of
33 `interval` microseconds on `gil_cond` has timed out.
34
35 [Actually, another volatile boolean variable (eval_breaker) is used
36 which ORs several conditions into one. Volatile booleans are
37 sufficient as inter-thread signalling means since Python is run
38 on cache-coherent architectures only.]
39
40 - A thread wanting to take the GIL will first let pass a given amount of
41 time (`interval` microseconds) before setting gil_drop_request. This
42 encourages a defined switching period, but doesn't enforce it since
43 opcodes can take an arbitrary time to execute.
44
45 The `interval` value is available for the user to read and modify
46 using the Python API `sys.{get,set}switchinterval()`.
47
48 - When a thread releases the GIL and gil_drop_request is set, that thread
49 ensures that another GIL-awaiting thread gets scheduled.
50 It does so by waiting on a condition variable (switch_cond) until
51 the value of gil_last_holder is changed to something else than its
52 own thread state pointer, indicating that another thread was able to
53 take the GIL.
54
55 This is meant to prohibit the latency-adverse behaviour on multi-core
56 machines where one thread would speculatively release the GIL, but still
57 run and end up being the first to re-acquire it, making the "timeslices"
58 much longer than expected.
59 (Note: this mechanism is enabled with FORCE_SWITCHING above)
60*/
61
62#ifndef _POSIX_THREADS
63/* This means pthreads are not implemented in libc headers, hence the macro
64 not present in unistd.h. But they still can be implemented as an external
65 library (e.g. gnu pth in pthread emulation) */
66# ifdef HAVE_PTHREAD_H
67# include <pthread.h> /* _POSIX_THREADS */
68# endif
69#endif
70
71
72#ifdef _POSIX_THREADS
73
74/*
75 * POSIX support
76 */
77
78#include <pthread.h>
79
80#define ADD_MICROSECONDS(tv, interval) \
81do { \
82 tv.tv_usec += (long) interval; \
83 tv.tv_sec += tv.tv_usec / 1000000; \
84 tv.tv_usec %= 1000000; \
85} while (0)
86
87/* We assume all modern POSIX systems have gettimeofday() */
88#ifdef GETTIMEOFDAY_NO_TZ
89#define GETTIMEOFDAY(ptv) gettimeofday(ptv)
90#else
91#define GETTIMEOFDAY(ptv) gettimeofday(ptv, (struct timezone *)NULL)
92#endif
93
94#define MUTEX_T pthread_mutex_t
95#define MUTEX_INIT(mut) \
96 if (pthread_mutex_init(&mut, NULL)) { \
97 Py_FatalError("pthread_mutex_init(" #mut ") failed"); };
Antoine Pitrou1df15362010-09-13 14:16:46 +000098#define MUTEX_FINI(mut) \
99 if (pthread_mutex_destroy(&mut)) { \
100 Py_FatalError("pthread_mutex_destroy(" #mut ") failed"); };
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000101#define MUTEX_LOCK(mut) \
102 if (pthread_mutex_lock(&mut)) { \
103 Py_FatalError("pthread_mutex_lock(" #mut ") failed"); };
104#define MUTEX_UNLOCK(mut) \
105 if (pthread_mutex_unlock(&mut)) { \
106 Py_FatalError("pthread_mutex_unlock(" #mut ") failed"); };
107
108#define COND_T pthread_cond_t
109#define COND_INIT(cond) \
110 if (pthread_cond_init(&cond, NULL)) { \
111 Py_FatalError("pthread_cond_init(" #cond ") failed"); };
Antoine Pitrou1df15362010-09-13 14:16:46 +0000112#define COND_FINI(cond) \
113 if (pthread_cond_destroy(&cond)) { \
114 Py_FatalError("pthread_cond_destroy(" #cond ") failed"); };
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000115#define COND_SIGNAL(cond) \
116 if (pthread_cond_signal(&cond)) { \
117 Py_FatalError("pthread_cond_signal(" #cond ") failed"); };
118#define COND_WAIT(cond, mut) \
119 if (pthread_cond_wait(&cond, &mut)) { \
120 Py_FatalError("pthread_cond_wait(" #cond ") failed"); };
121#define COND_TIMED_WAIT(cond, mut, microseconds, timeout_result) \
122 { \
123 int r; \
124 struct timespec ts; \
125 struct timeval deadline; \
126 \
127 GETTIMEOFDAY(&deadline); \
128 ADD_MICROSECONDS(deadline, microseconds); \
129 ts.tv_sec = deadline.tv_sec; \
130 ts.tv_nsec = deadline.tv_usec * 1000; \
131 \
132 r = pthread_cond_timedwait(&cond, &mut, &ts); \
133 if (r == ETIMEDOUT) \
134 timeout_result = 1; \
135 else if (r) \
136 Py_FatalError("pthread_cond_timedwait(" #cond ") failed"); \
137 else \
138 timeout_result = 0; \
139 } \
140
141#elif defined(NT_THREADS)
142
143/*
144 * Windows (2000 and later, as well as (hopefully) CE) support
145 */
146
147#include <windows.h>
148
Antoine Pitroue1dd1742010-08-10 13:48:51 +0000149#define MUTEX_T CRITICAL_SECTION
150#define MUTEX_INIT(mut) do { \
151 if (!(InitializeCriticalSectionAndSpinCount(&(mut), 4000))) \
152 Py_FatalError("CreateMutex(" #mut ") failed"); \
153} while (0)
154#define MUTEX_FINI(mut) \
155 DeleteCriticalSection(&(mut))
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000156#define MUTEX_LOCK(mut) \
Antoine Pitroue1dd1742010-08-10 13:48:51 +0000157 EnterCriticalSection(&(mut))
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000158#define MUTEX_UNLOCK(mut) \
Antoine Pitroue1dd1742010-08-10 13:48:51 +0000159 LeaveCriticalSection(&(mut))
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000160
Antoine Pitroue1dd1742010-08-10 13:48:51 +0000161/* We emulate condition variables with a semaphore.
162 We use a Semaphore rather than an auto-reset event, because although
163 an auto-resent event might appear to solve the lost-wakeup bug (race
164 condition between releasing the outer lock and waiting) because it
165 maintains state even though a wait hasn't happened, there is still
166 a lost wakeup problem if more than one thread are interrupted in the
167 critical place. A semaphore solves that.
168 Because it is ok to signal a condition variable with no one
169 waiting, we need to keep track of the number of
170 waiting threads. Otherwise, the semaphore's state could rise
171 without bound.
Antoine Pitroucf4cabb2009-11-11 18:11:36 +0000172
Antoine Pitroue1dd1742010-08-10 13:48:51 +0000173 Generic emulations of the pthread_cond_* API using
Antoine Pitroucf4cabb2009-11-11 18:11:36 +0000174 Win32 functions can be found on the Web.
175 The following read can be edificating (or not):
176 http://www.cse.wustl.edu/~schmidt/win32-cv-1.html
177*/
Antoine Pitroue1dd1742010-08-10 13:48:51 +0000178typedef struct COND_T
179{
180 HANDLE sem; /* the semaphore */
181 int n_waiting; /* how many are unreleased */
182} COND_T;
183
184__inline static void _cond_init(COND_T *cond)
185{
186 /* A semaphore with a large max value, The positive value
187 * is only needed to catch those "lost wakeup" events and
188 * race conditions when a timed wait elapses.
189 */
190 if (!(cond->sem = CreateSemaphore(NULL, 0, 1000, NULL)))
191 Py_FatalError("CreateSemaphore() failed");
192 cond->n_waiting = 0;
193}
194
195__inline static void _cond_fini(COND_T *cond)
196{
197 BOOL ok = CloseHandle(cond->sem);
198 if (!ok)
199 Py_FatalError("CloseHandle() failed");
200}
201
202__inline static void _cond_wait(COND_T *cond, MUTEX_T *mut)
203{
204 ++cond->n_waiting;
205 MUTEX_UNLOCK(*mut);
206 /* "lost wakeup bug" would occur if the caller were interrupted here,
207 * but we are safe because we are using a semaphore wich has an internal
208 * count.
209 */
210 if (WaitForSingleObject(cond->sem, INFINITE) == WAIT_FAILED)
211 Py_FatalError("WaitForSingleObject() failed");
212 MUTEX_LOCK(*mut);
213}
214
215__inline static int _cond_timed_wait(COND_T *cond, MUTEX_T *mut,
216 int us)
217{
218 DWORD r;
219 ++cond->n_waiting;
220 MUTEX_UNLOCK(*mut);
221 r = WaitForSingleObject(cond->sem, us / 1000);
222 if (r == WAIT_FAILED)
223 Py_FatalError("WaitForSingleObject() failed");
224 MUTEX_LOCK(*mut);
225 if (r == WAIT_TIMEOUT)
226 --cond->n_waiting;
227 /* Here we have a benign race condition with _cond_signal. If the
228 * wait operation has timed out, but before we can acquire the
229 * mutex again to decrement n_waiting, a thread holding the mutex
230 * still sees a positive n_waiting value and may call
231 * ReleaseSemaphore and decrement n_waiting.
232 * This will cause n_waiting to be decremented twice.
233 * This is benign, though, because ReleaseSemaphore will also have
234 * been called, leaving the semaphore state positive. We may
235 * thus end up with semaphore in state 1, and n_waiting == -1, and
236 * the next time someone calls _cond_wait(), that thread will
237 * pass right through, decrementing the semaphore state and
238 * incrementing n_waiting, thus correcting the extra _cond_signal.
239 */
240 return r == WAIT_TIMEOUT;
241}
242
243__inline static void _cond_signal(COND_T *cond) {
244 /* NOTE: This must be called with the mutex held */
245 if (cond->n_waiting > 0) {
246 if (!ReleaseSemaphore(cond->sem, 1, NULL))
247 Py_FatalError("ReleaseSemaphore() failed");
248 --cond->n_waiting;
249 }
250}
251
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000252#define COND_INIT(cond) \
Antoine Pitroue1dd1742010-08-10 13:48:51 +0000253 _cond_init(&(cond))
254#define COND_FINI(cond) \
255 _cond_fini(&(cond))
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000256#define COND_SIGNAL(cond) \
Antoine Pitroue1dd1742010-08-10 13:48:51 +0000257 _cond_signal(&(cond))
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000258#define COND_WAIT(cond, mut) \
Antoine Pitroue1dd1742010-08-10 13:48:51 +0000259 _cond_wait(&(cond), &(mut))
260#define COND_TIMED_WAIT(cond, mut, us, timeout_result) do { \
261 (timeout_result) = _cond_timed_wait(&(cond), &(mut), us); \
262} while (0)
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000263
264#else
265
266#error You need either a POSIX-compatible or a Windows system!
267
268#endif /* _POSIX_THREADS, NT_THREADS */
269
270
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000271/* Whether the GIL is already taken (-1 if uninitialized). This is atomic
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000272 because it can be read without any lock taken in ceval.c. */
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000273static _Py_atomic_int gil_locked = {-1};
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000274/* Number of GIL switches since the beginning. */
275static unsigned long gil_switch_number = 0;
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000276/* Last PyThreadState holding / having held the GIL. This helps us know
277 whether anyone else was scheduled after we dropped the GIL. */
278static _Py_atomic_address gil_last_holder = {NULL};
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000279
280/* This condition variable allows one or several threads to wait until
281 the GIL is released. In addition, the mutex also protects the above
282 variables. */
283static COND_T gil_cond;
284static MUTEX_T gil_mutex;
285
286#ifdef FORCE_SWITCHING
287/* This condition variable helps the GIL-releasing thread wait for
288 a GIL-awaiting thread to be scheduled and take the GIL. */
289static COND_T switch_cond;
290static MUTEX_T switch_mutex;
291#endif
292
293
294static int gil_created(void)
295{
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000296 return _Py_atomic_load_explicit(&gil_locked, _Py_memory_order_acquire) >= 0;
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000297}
298
299static void create_gil(void)
300{
301 MUTEX_INIT(gil_mutex);
302#ifdef FORCE_SWITCHING
303 MUTEX_INIT(switch_mutex);
304#endif
305 COND_INIT(gil_cond);
306#ifdef FORCE_SWITCHING
307 COND_INIT(switch_cond);
308#endif
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000309 _Py_atomic_store_relaxed(&gil_last_holder, NULL);
310 _Py_ANNOTATE_RWLOCK_CREATE(&gil_locked);
311 _Py_atomic_store_explicit(&gil_locked, 0, _Py_memory_order_release);
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000312}
313
Antoine Pitrou1df15362010-09-13 14:16:46 +0000314static void destroy_gil(void)
315{
316 MUTEX_FINI(gil_mutex);
317#ifdef FORCE_SWITCHING
318 MUTEX_FINI(switch_mutex);
319#endif
320 COND_FINI(gil_cond);
321#ifdef FORCE_SWITCHING
322 COND_FINI(switch_cond);
323#endif
324 _Py_atomic_store_explicit(&gil_locked, -1, _Py_memory_order_release);
325 _Py_ANNOTATE_RWLOCK_DESTROY(&gil_locked);
326}
327
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000328static void recreate_gil(void)
329{
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000330 _Py_ANNOTATE_RWLOCK_DESTROY(&gil_locked);
Antoine Pitrou1df15362010-09-13 14:16:46 +0000331 /* XXX should we destroy the old OS resources here? */
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000332 create_gil();
333}
334
335static void drop_gil(PyThreadState *tstate)
336{
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000337 if (!_Py_atomic_load_relaxed(&gil_locked))
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000338 Py_FatalError("drop_gil: GIL is not locked");
Antoine Pitrou6a002c02011-01-15 11:37:11 +0000339 /* tstate is allowed to be NULL (early interpreter init) */
340 if (tstate != NULL) {
341 /* Sub-interpreter support: threads might have been switched
342 under our feet using PyThreadState_Swap(). Fix the GIL last
343 holder variable so that our heuristics work. */
344 _Py_atomic_store_relaxed(&gil_last_holder, tstate);
345 }
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000346
347 MUTEX_LOCK(gil_mutex);
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000348 _Py_ANNOTATE_RWLOCK_RELEASED(&gil_locked, /*is_write=*/1);
349 _Py_atomic_store_relaxed(&gil_locked, 0);
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000350 COND_SIGNAL(gil_cond);
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000351 MUTEX_UNLOCK(gil_mutex);
352
353#ifdef FORCE_SWITCHING
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000354 if (_Py_atomic_load_relaxed(&gil_drop_request) && tstate != NULL) {
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000355 MUTEX_LOCK(switch_mutex);
356 /* Not switched yet => wait */
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000357 if (_Py_atomic_load_relaxed(&gil_last_holder) == tstate) {
Antoine Pitroua1d23322009-11-12 22:56:02 +0000358 RESET_GIL_DROP_REQUEST();
Antoine Pitroucf4cabb2009-11-11 18:11:36 +0000359 /* NOTE: if COND_WAIT does not atomically start waiting when
360 releasing the mutex, another thread can run through, take
361 the GIL and drop it again, and reset the condition
Antoine Pitroua1d23322009-11-12 22:56:02 +0000362 before we even had a chance to wait for it. */
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000363 COND_WAIT(switch_cond, switch_mutex);
Antoine Pitroua1d23322009-11-12 22:56:02 +0000364 }
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000365 MUTEX_UNLOCK(switch_mutex);
366 }
367#endif
368}
369
370static void take_gil(PyThreadState *tstate)
371{
372 int err;
373 if (tstate == NULL)
374 Py_FatalError("take_gil: NULL tstate");
375
376 err = errno;
377 MUTEX_LOCK(gil_mutex);
378
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000379 if (!_Py_atomic_load_relaxed(&gil_locked))
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000380 goto _ready;
381
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000382 while (_Py_atomic_load_relaxed(&gil_locked)) {
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000383 int timed_out = 0;
384 unsigned long saved_switchnum;
385
386 saved_switchnum = gil_switch_number;
387 COND_TIMED_WAIT(gil_cond, gil_mutex, INTERVAL, timed_out);
388 /* If we timed out and no switch occurred in the meantime, it is time
389 to ask the GIL-holding thread to drop it. */
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000390 if (timed_out &&
391 _Py_atomic_load_relaxed(&gil_locked) &&
392 gil_switch_number == saved_switchnum) {
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000393 SET_GIL_DROP_REQUEST();
394 }
395 }
396_ready:
397#ifdef FORCE_SWITCHING
398 /* This mutex must be taken before modifying gil_last_holder (see drop_gil()). */
399 MUTEX_LOCK(switch_mutex);
400#endif
401 /* We now hold the GIL */
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000402 _Py_atomic_store_relaxed(&gil_locked, 1);
403 _Py_ANNOTATE_RWLOCK_ACQUIRED(&gil_locked, /*is_write=*/1);
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000404
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000405 if (tstate != _Py_atomic_load_relaxed(&gil_last_holder)) {
406 _Py_atomic_store_relaxed(&gil_last_holder, tstate);
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000407 ++gil_switch_number;
408 }
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000409
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000410#ifdef FORCE_SWITCHING
411 COND_SIGNAL(switch_cond);
412 MUTEX_UNLOCK(switch_mutex);
413#endif
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000414 if (_Py_atomic_load_relaxed(&gil_drop_request)) {
Antoine Pitrou074e5ed2009-11-10 19:50:40 +0000415 RESET_GIL_DROP_REQUEST();
416 }
417 if (tstate->async_exc != NULL) {
418 _PyEval_SignalAsyncExc();
419 }
420
421 MUTEX_UNLOCK(gil_mutex);
422 errno = err;
423}
424
425void _PyEval_SetSwitchInterval(unsigned long microseconds)
426{
427 gil_interval = microseconds;
428}
429
430unsigned long _PyEval_GetSwitchInterval()
431{
432 return gil_interval;
433}