Ingo Molnar | 2993002 | 2017-02-08 18:51:36 +0100 | [diff] [blame] | 1 | #ifndef _LINUX_SCHED_TASK_H |
| 2 | #define _LINUX_SCHED_TASK_H |
| 3 | |
Ingo Molnar | 901b14b | 2017-02-03 15:24:12 +0100 | [diff] [blame] | 4 | /* |
| 5 | * Interface between the scheduler and various task lifetime (fork()/exit()) |
| 6 | * functionality: |
| 7 | */ |
| 8 | |
Ingo Molnar | 2993002 | 2017-02-08 18:51:36 +0100 | [diff] [blame] | 9 | #include <linux/sched.h> |
| 10 | |
Ingo Molnar | cdc75e9 | 2017-02-04 01:20:53 +0100 | [diff] [blame] | 11 | struct task_struct; |
Al Viro | 92ebce5 | 2017-05-14 23:54:33 -0400 | [diff] [blame] | 12 | struct rusage; |
Ingo Molnar | cdc75e9 | 2017-02-04 01:20:53 +0100 | [diff] [blame] | 13 | union thread_union; |
| 14 | |
Ingo Molnar | 901b14b | 2017-02-03 15:24:12 +0100 | [diff] [blame] | 15 | /* |
| 16 | * This serializes "schedule()" and also protects |
| 17 | * the run-queue from deletions/modifications (but |
| 18 | * _adding_ to the beginning of the run-queue has |
| 19 | * a separate lock). |
| 20 | */ |
| 21 | extern rwlock_t tasklist_lock; |
| 22 | extern spinlock_t mmlist_lock; |
| 23 | |
Ingo Molnar | cdc75e9 | 2017-02-04 01:20:53 +0100 | [diff] [blame] | 24 | extern union thread_union init_thread_union; |
| 25 | extern struct task_struct init_task; |
| 26 | |
Ingo Molnar | 901b14b | 2017-02-03 15:24:12 +0100 | [diff] [blame] | 27 | #ifdef CONFIG_PROVE_RCU |
| 28 | extern int lockdep_tasklist_lock_is_held(void); |
| 29 | #endif /* #ifdef CONFIG_PROVE_RCU */ |
| 30 | |
| 31 | extern asmlinkage void schedule_tail(struct task_struct *prev); |
| 32 | extern void init_idle(struct task_struct *idle, int cpu); |
| 33 | extern void init_idle_bootup_task(struct task_struct *idle); |
| 34 | |
Ingo Molnar | 901b14b | 2017-02-03 15:24:12 +0100 | [diff] [blame] | 35 | extern int sched_fork(unsigned long clone_flags, struct task_struct *p); |
| 36 | extern void sched_dead(struct task_struct *p); |
| 37 | |
| 38 | void __noreturn do_task_dead(void); |
| 39 | |
| 40 | extern void proc_caches_init(void); |
| 41 | |
| 42 | extern void release_task(struct task_struct * p); |
| 43 | |
| 44 | #ifdef CONFIG_HAVE_COPY_THREAD_TLS |
| 45 | extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, |
| 46 | struct task_struct *, unsigned long); |
| 47 | #else |
| 48 | extern int copy_thread(unsigned long, unsigned long, unsigned long, |
| 49 | struct task_struct *); |
| 50 | |
| 51 | /* Architectures that haven't opted into copy_thread_tls get the tls argument |
| 52 | * via pt_regs, so ignore the tls argument passed via C. */ |
| 53 | static inline int copy_thread_tls( |
| 54 | unsigned long clone_flags, unsigned long sp, unsigned long arg, |
| 55 | struct task_struct *p, unsigned long tls) |
| 56 | { |
| 57 | return copy_thread(clone_flags, sp, arg, p); |
| 58 | } |
| 59 | #endif |
| 60 | extern void flush_thread(void); |
| 61 | |
| 62 | #ifdef CONFIG_HAVE_EXIT_THREAD |
| 63 | extern void exit_thread(struct task_struct *tsk); |
| 64 | #else |
| 65 | static inline void exit_thread(struct task_struct *tsk) |
| 66 | { |
| 67 | } |
| 68 | #endif |
| 69 | extern void do_group_exit(int); |
| 70 | |
Ingo Molnar | 42011db | 2017-02-05 14:35:41 +0100 | [diff] [blame] | 71 | extern void exit_files(struct task_struct *); |
| 72 | extern void exit_itimers(struct signal_struct *); |
| 73 | |
Ingo Molnar | 901b14b | 2017-02-03 15:24:12 +0100 | [diff] [blame] | 74 | extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long); |
| 75 | extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); |
| 76 | struct task_struct *fork_idle(int); |
| 77 | extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); |
Al Viro | 92ebce5 | 2017-05-14 23:54:33 -0400 | [diff] [blame] | 78 | extern long kernel_wait4(pid_t, int *, int, struct rusage *); |
Ingo Molnar | 901b14b | 2017-02-03 15:24:12 +0100 | [diff] [blame] | 79 | |
Ingo Molnar | cda6672 | 2017-02-05 15:30:50 +0100 | [diff] [blame] | 80 | extern void free_task(struct task_struct *tsk); |
| 81 | |
Ingo Molnar | 6f175fc | 2017-02-06 11:12:45 +0100 | [diff] [blame] | 82 | /* sched_exec is called by processes performing an exec */ |
| 83 | #ifdef CONFIG_SMP |
| 84 | extern void sched_exec(void); |
| 85 | #else |
| 86 | #define sched_exec() {} |
| 87 | #endif |
| 88 | |
Ingo Molnar | cda6672 | 2017-02-05 15:30:50 +0100 | [diff] [blame] | 89 | #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) |
| 90 | |
| 91 | extern void __put_task_struct(struct task_struct *t); |
| 92 | |
| 93 | static inline void put_task_struct(struct task_struct *t) |
| 94 | { |
| 95 | if (atomic_dec_and_test(&t->usage)) |
| 96 | __put_task_struct(t); |
| 97 | } |
| 98 | |
| 99 | struct task_struct *task_rcu_dereference(struct task_struct **ptask); |
Ingo Molnar | 901b14b | 2017-02-03 15:24:12 +0100 | [diff] [blame] | 100 | |
| 101 | #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT |
| 102 | extern int arch_task_struct_size __read_mostly; |
| 103 | #else |
| 104 | # define arch_task_struct_size (sizeof(struct task_struct)) |
| 105 | #endif |
| 106 | |
| 107 | #ifdef CONFIG_VMAP_STACK |
| 108 | static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) |
| 109 | { |
| 110 | return t->stack_vm_area; |
| 111 | } |
| 112 | #else |
| 113 | static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) |
| 114 | { |
| 115 | return NULL; |
| 116 | } |
| 117 | #endif |
| 118 | |
Ingo Molnar | 56cd697 | 2017-02-06 10:57:33 +0100 | [diff] [blame] | 119 | /* |
| 120 | * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring |
| 121 | * subscriptions and synchronises with wait4(). Also used in procfs. Also |
| 122 | * pins the final release of task.io_context. Also protects ->cpuset and |
| 123 | * ->cgroup.subsys[]. And ->vfork_done. |
| 124 | * |
| 125 | * Nests both inside and outside of read_lock(&tasklist_lock). |
| 126 | * It must not be nested with write_lock_irq(&tasklist_lock), |
| 127 | * neither inside nor outside. |
| 128 | */ |
| 129 | static inline void task_lock(struct task_struct *p) |
| 130 | { |
| 131 | spin_lock(&p->alloc_lock); |
| 132 | } |
| 133 | |
| 134 | static inline void task_unlock(struct task_struct *p) |
| 135 | { |
| 136 | spin_unlock(&p->alloc_lock); |
| 137 | } |
| 138 | |
Ingo Molnar | 2993002 | 2017-02-08 18:51:36 +0100 | [diff] [blame] | 139 | #endif /* _LINUX_SCHED_TASK_H */ |