blob: 9108d2f19f8f5fdb908b10efd58326adadd5bbbe [file] [log] [blame]
njn278b3d62005-05-30 23:20:51 +00001
2/*--------------------------------------------------------------------*/
3/*--- The scheduler. pub_core_scheduler.h ---*/
4/*--------------------------------------------------------------------*/
5
6/*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
9
10 Copyright (C) 2000-2005 Julian Seward
11 jseward@acm.org
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29*/
30
31#ifndef __PUB_CORE_SCHEDULER_H
32#define __PUB_CORE_SCHEDULER_H
33
34//--------------------------------------------------------------------
35// PURPOSE: This module is the scheduler, which is the main loop
36// controlling the running of all the program's threads.
37// It's at the centre of everything.
38//--------------------------------------------------------------------
39
40/*
41 Thread state machine:
42
43 Empty -> Init -> Runnable <=> WaitSys/Yielding
44 ^ |
45 \---- Zombie -----/
46 */
47typedef
48 enum ThreadStatus {
49 VgTs_Empty, /* this slot is not in use */
50 VgTs_Init, /* just allocated */
51 VgTs_Runnable, /* ready to run */
52 VgTs_WaitSys, /* waiting for a syscall to complete */
53 VgTs_Yielding, /* temporarily yielding the CPU */
54 VgTs_Zombie, /* transient state just before exiting */
55 }
56 ThreadStatus;
57
58/* Return codes from the scheduler. */
59typedef
60 enum {
61 VgSrc_None, /* not exiting yet */
62 VgSrc_ExitSyscall, /* client called exit(). This is the normal
63 route out. */
64 VgSrc_FatalSig /* Killed by the default action of a fatal
65 signal */
66 }
67 VgSchedReturnCode;
68
69
70#if defined(VGA_x86)
71 typedef VexGuestX86State VexGuestArchState;
72#elif defined(VGA_amd64)
73 typedef VexGuestAMD64State VexGuestArchState;
74#elif defined(VGA_arm)
75 typedef VexGuestARMState VexGuestArchState;
76#else
77# error Unknown architecture
78#endif
79
80
81typedef
82 struct {
83 /* --- BEGIN vex-mandated guest state --- */
84
85 /* Saved machine context. */
86 VexGuestArchState vex;
87
88 /* Saved shadow context. */
89 VexGuestArchState vex_shadow;
90
91 /* Spill area. */
92 UChar vex_spill[LibVEX_N_SPILL_BYTES];
93
94 /* --- END vex-mandated guest state --- */
95 }
96 ThreadArchState;
97
98
99typedef struct {
100 /* ThreadId == 0 (and hence vg_threads[0]) is NEVER USED.
101 The thread identity is simply the index in vg_threads[].
102 ThreadId == 1 is the root thread and has the special property
103 that we don't try and allocate or deallocate its stack. For
104 convenience of generating error message, we also put the
105 ThreadId in this tid field, but be aware that it should
106 ALWAYS == the index in vg_threads[]. */
107 ThreadId tid;
108
109 /* Current scheduling status. */
110 ThreadStatus status;
111
112 /* This is set if the thread is in the process of exiting for any
113 reason. The precise details of the exit are in the OS-specific
114 state. */
115 VgSchedReturnCode exitreason;
116
117 /* Architecture-specific thread state. */
118 ThreadArchState arch;
119
120 /* This thread's blocked-signals mask. Semantics is that for a
121 signal to be delivered to this thread, the signal must not be
122 blocked by this signal mask. If more than one thread accepts a
123 signal, then it will be delivered to one at random. If all
124 threads block the signal, it will remain pending until either a
125 thread unblocks it or someone uses sigwaitsig/sigtimedwait. */
126 vki_sigset_t sig_mask;
127
128 /* tmp_sig_mask is usually the same as sig_mask, and is kept in
129 sync whenever sig_mask is changed. The only time they have
130 different values is during the execution of a sigsuspend, where
131 tmp_sig_mask is the temporary mask which sigsuspend installs.
132 It is only consulted to compute the signal mask applied to a
133 signal handler. */
134 vki_sigset_t tmp_sig_mask;
135
136 /* A little signal queue for signals we can't get the kernel to
137 queue for us. This is only allocated as needed, since it should
138 be rare. */
139 struct SigQueue *sig_queue;
140
141 /* Syscall the Thread is currently running; -1 if none. Should only
142 be set while Thread is in VgTs_WaitSys. */
143 Int syscallno;
144
145 /* Client stacks. When a thread slot is freed, we don't deallocate its
146 stack; we just leave it lying around for the next use of the
147 slot. If the next use of the slot requires a larger stack,
148 only then is the old one deallocated and a new one
149 allocated.
150
151 For the main thread (threadid == 0), this mechanism doesn't
152 apply. We don't know the size of the stack since we didn't
153 allocate it, and furthermore we never reallocate it. */
154
155 /* The allocated size of this thread's stack (permanently zero
156 if this is ThreadId == 0, since we didn't allocate its stack) */
157 SizeT client_stack_szB;
158
159 /* Address of the highest legitimate word in this stack. This is
160 used for error messages only -- not critical for execution
161 correctness. Is is set for all stacks, specifically including
162 ThreadId == 0 (the main thread). */
163 Addr client_stack_highest_word;
164
165 /* Alternate signal stack */
166 vki_stack_t altstack;
167
168 /* OS-specific thread state */
169 os_thread_t os_state;
170
171 /* Used in the syscall handlers. Set to True to indicate that the
172 PRE routine for a syscall has set the syscall result already and
173 so the syscall does not need to be handed to the kernel. */
174 Bool syscall_result_set;
175
176 /* Per-thread jmp_buf to resume scheduler after a signal */
177 Bool sched_jmpbuf_valid;
178 jmp_buf sched_jmpbuf;
179}
180ThreadState;
181
182
183/* The thread table. */
184extern ThreadState VG_(threads)[VG_N_THREADS];
185
186/* Allocate a new ThreadState */
187extern ThreadId VG_(alloc_ThreadState)(void);
188
189/* A thread exits. tid must currently be running. */
190extern void VG_(exit_thread)(ThreadId tid);
191
192/* Kill a thread. This interrupts whatever a thread is doing, and
193 makes it exit ASAP. This does not set the exitreason or
194 exitcode. */
195extern void VG_(kill_thread)(ThreadId tid);
196
197/* Check that tid is in range and denotes a non-Empty thread. */
198extern Bool VG_(is_valid_tid) ( ThreadId tid );
199
200/* Get the ThreadState for a particular thread */
201extern ThreadState *VG_(get_ThreadState)(ThreadId tid);
202
203/* Given an LWP id (ie, real kernel thread id), find the corresponding
204 ThreadId */
205extern ThreadId VG_(get_lwp_tid)(Int lwpid);
206
207/* Returns true if a thread is currently running (ie, has the CPU lock) */
208extern Bool VG_(is_running_thread)(ThreadId tid);
209
210/* Returns true if the thread is in the process of exiting */
211extern Bool VG_(is_exiting)(ThreadId tid);
212
213/* Return the number of non-dead Threads */
214extern Int VG_(count_living_threads)(void);
215
216/* Nuke all threads except tid. */
217extern void VG_(nuke_all_threads_except) ( ThreadId me,
218 VgSchedReturnCode reason );
219
220/* Make a thread the running thread. The thread must previously been
221 sleeping, and not holding the CPU semaphore. This will set the
222 thread state to VgTs_Runnable, and the thread will attempt to take
223 the CPU semaphore. By the time it returns, tid will be the running
224 thread. */
225extern void VG_(set_running) ( ThreadId tid );
226
227/* Set a thread into a sleeping state. Before the call, the thread
228 must be runnable, and holding the CPU semaphore. When this call
229 returns, the thread will be set to the specified sleeping state,
230 and will not be holding the CPU semaphore. Note that another
231 thread could be running by the time this call returns, so the
232 caller must be careful not to touch any shared state. It is also
233 the caller's responsibility to actually block until the thread is
234 ready to run again. */
235extern void VG_(set_sleeping) ( ThreadId tid, ThreadStatus state );
236
237/* Yield the CPU for a while */
238extern void VG_(vg_yield)(void);
239
240// The scheduler.
241extern VgSchedReturnCode VG_(scheduler) ( ThreadId tid );
242
243// Do everything which needs doing before the process finally ends,
244// like printing reports, etc
245extern void VG_(shutdown_actions_NORETURN) (
246 ThreadId tid,
247 VgSchedReturnCode tids_schedretcode
248 );
249
250extern void VG_(scheduler_init) ( void );
251
252extern void VG_(pp_sched_status) ( void );
253
254// Longjmp back to the scheduler and thus enter the sighandler immediately.
255extern void VG_(resume_scheduler) ( ThreadId tid );
256
257/* If true, a fault is Valgrind-internal (ie, a bug) */
258extern Bool VG_(my_fault);
259
260#endif // __PUB_CORE_SCHEDULER_H
261
262/*--------------------------------------------------------------------*/
263/*--- end ---*/
264/*--------------------------------------------------------------------*/