blob: 04e239cdcfaa23731498afc88a2c333fe7e082ae [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/fork.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * 'fork.c' contains the help-routines for the 'fork' system call
9 * (see also entry.S and others).
10 * Fork is rather simple, once you get the hang of it, but the memory
11 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
12 */
13
Christian Brauner20c96e02019-03-27 13:04:15 +010014#include <linux/anon_inodes.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/slab.h>
16#include <linux/init.h>
17#include <linux/unistd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/module.h>
19#include <linux/vmalloc.h>
20#include <linux/completion.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/personality.h>
22#include <linux/mempolicy.h>
23#include <linux/sem.h>
24#include <linux/file.h>
Al Viro9f3acc32008-04-24 07:44:08 -040025#include <linux/fdtable.h>
Jens Axboeda9cbc82008-06-30 20:42:08 +020026#include <linux/iocontext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/key.h>
28#include <linux/binfmts.h>
29#include <linux/mman.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070030#include <linux/mmu_notifier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/fs.h>
Davidlohr Bueso615d6e82014-04-07 15:37:25 -070032#include <linux/mm.h>
33#include <linux/vmacache.h>
Serge E. Hallynab516012006-10-02 02:18:06 -070034#include <linux/nsproxy.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080035#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <linux/cpu.h>
Paul Menageb4f48b62007-10-18 23:39:33 -070037#include <linux/cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/security.h>
Mel Gormana1e78772008-07-23 21:27:23 -070039#include <linux/hugetlb.h>
Will Drewrye2cfabdf2012-04-12 16:47:57 -050040#include <linux/seccomp.h>
Christian Brauner20c96e02019-03-27 13:04:15 +010041#include <linux/seq_file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/swap.h>
43#include <linux/syscalls.h>
44#include <linux/jiffies.h>
45#include <linux/futex.h>
Linus Torvalds8141c7f2008-11-15 10:20:36 -080046#include <linux/compat.h>
Eric Dumazet207205a2011-03-22 16:30:44 -070047#include <linux/kthread.h>
Andrew Morton7c3ab732006-12-10 02:19:19 -080048#include <linux/task_io_accounting_ops.h>
Dipankar Sarmaab2af1f2005-09-09 13:04:13 -070049#include <linux/rcupdate.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <linux/ptrace.h>
51#include <linux/mount.h>
52#include <linux/audit.h>
Pavel Emelianov78fb7462008-02-07 00:13:51 -080053#include <linux/memcontrol.h>
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +010054#include <linux/ftrace.h>
Mike Galbraith5e2bf012012-05-10 13:01:45 -070055#include <linux/proc_fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#include <linux/profile.h>
57#include <linux/rmap.h>
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070058#include <linux/ksm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070059#include <linux/acct.h>
Jay Lan8f0ab512006-09-30 23:28:59 -070060#include <linux/tsacct_kern.h>
Matt Helsley9f460802005-11-07 00:59:16 -080061#include <linux/cn_proc.h>
Rafael J. Wysockiba96a0c2007-05-23 13:57:25 -070062#include <linux/freezer.h>
Dave Hansen8f0baad2017-08-30 16:23:00 -070063#include <linux/kaiser.h>
Shailabh Nagarca74e922006-07-14 00:24:36 -070064#include <linux/delayacct.h>
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -070065#include <linux/taskstats_kern.h>
Arjan van de Ven0a425402006-09-26 10:52:38 +020066#include <linux/random.h>
Miloslav Trmac522ed772007-07-15 23:40:56 -070067#include <linux/tty.h>
Jens Axboefd0928d2008-01-24 08:52:45 +010068#include <linux/blkdev.h>
Al Viro5ad4e532009-03-29 19:50:06 -040069#include <linux/fs_struct.h>
Eric Sandeen7c9f8862008-04-22 16:38:23 -050070#include <linux/magic.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020071#include <linux/perf_event.h>
Stanislaw Gruszka42c4ab42009-07-29 12:15:26 +020072#include <linux/posix-timers.h>
Avi Kivity8e7cac72009-11-29 16:34:48 +020073#include <linux/user-return-notifier.h>
Ying Han3d5992d2010-10-26 14:21:23 -070074#include <linux/oom.h>
Andrea Arcangeliba761492011-01-13 15:46:58 -080075#include <linux/khugepaged.h>
Oleg Nesterovd80e7312012-02-24 20:07:11 +010076#include <linux/signalfd.h>
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +053077#include <linux/uprobes.h>
Kent Overstreeta27bb332013-05-07 16:19:08 -070078#include <linux/aio.h>
Gideon Israel Dsouza52f5684c2014-04-07 15:39:20 -070079#include <linux/compiler.h>
Heinrich Schuchardt16db3d32015-04-16 12:47:50 -070080#include <linux/sysctl.h>
Dmitry Vyukov5c9a8752016-03-22 14:27:30 -070081#include <linux/kcov.h>
Sultan Alsawaf47bbcd62018-06-03 10:47:51 -070082#include <linux/cpufreq_times.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
84#include <asm/pgtable.h>
85#include <asm/pgalloc.h>
86#include <asm/uaccess.h>
87#include <asm/mmu_context.h>
88#include <asm/cacheflush.h>
89#include <asm/tlbflush.h>
90
Steven Rostedtad8d75f2009-04-14 19:39:12 -040091#include <trace/events/sched.h>
92
KAMEZAWA Hiroyuki43d2b112012-01-10 15:08:09 -080093#define CREATE_TRACE_POINTS
94#include <trace/events/task.h>
95
Linus Torvalds1da177e2005-04-16 15:20:36 -070096/*
Heinrich Schuchardtac1b3982015-04-16 12:47:47 -070097 * Minimum number of threads to boot the kernel
98 */
99#define MIN_THREADS 20
100
101/*
102 * Maximum number of threads
103 */
104#define MAX_THREADS FUTEX_TID_MASK
105
106/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 * Protected counters by write_lock_irq(&tasklist_lock)
108 */
109unsigned long total_forks; /* Handle normal Linux uptimes. */
Daniel Rebelo de Oliveirafb0a6852011-07-26 16:08:39 -0700110int nr_threads; /* The idle threads do not count.. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
112int max_threads; /* tunable limit on nr_threads */
113
114DEFINE_PER_CPU(unsigned long, process_counts) = 0;
115
Christoph Hellwigc59923a2006-07-10 04:45:40 -0700116__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
Paul E. McKenneydb1466b2010-03-03 07:46:56 -0800117
118#ifdef CONFIG_PROVE_RCU
119int lockdep_tasklist_lock_is_held(void)
120{
121 return lockdep_is_held(&tasklist_lock);
122}
123EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held);
124#endif /* #ifdef CONFIG_PROVE_RCU */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
126int nr_processes(void)
127{
128 int cpu;
129 int total = 0;
130
Ian Campbell1d510752009-11-03 10:11:14 +0000131 for_each_possible_cpu(cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 total += per_cpu(process_counts, cpu);
133
134 return total;
135}
136
Akinobu Mitaf19b9f72012-07-30 14:42:33 -0700137void __weak arch_release_task_struct(struct task_struct *tsk)
138{
139}
140
Thomas Gleixnerf5e10282012-05-05 15:05:48 +0000141#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
Christoph Lametere18b8902006-12-06 20:33:20 -0800142static struct kmem_cache *task_struct_cachep;
Thomas Gleixner41101802012-05-05 15:05:41 +0000143
144static inline struct task_struct *alloc_task_struct_node(int node)
145{
146 return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
147}
148
Thomas Gleixner41101802012-05-05 15:05:41 +0000149static inline void free_task_struct(struct task_struct *tsk)
150{
Thomas Gleixner41101802012-05-05 15:05:41 +0000151 kmem_cache_free(task_struct_cachep, tsk);
152}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153#endif
154
Linus Torvaldsb235bee2016-06-24 15:09:37 -0700155void __weak arch_release_thread_stack(unsigned long *stack)
Akinobu Mitaf19b9f72012-07-30 14:42:33 -0700156{
157}
158
Linus Torvaldsb235bee2016-06-24 15:09:37 -0700159#ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
Thomas Gleixner41101802012-05-05 15:05:41 +0000160
Thomas Gleixner0d15d742012-05-05 15:05:41 +0000161/*
162 * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
163 * kmemcache based allocator.
164 */
Andy Lutomirskiba14a192016-08-11 02:35:21 -0700165# if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)
Andy Lutomirskiac496bf2016-09-15 22:45:49 -0700166
167#ifdef CONFIG_VMAP_STACK
168/*
169 * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB
170 * flush. Try to minimize the number of calls by caching stacks.
171 */
172#define NR_CACHED_STACKS 2
173static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]);
174#endif
175
Andy Lutomirskiba14a192016-08-11 02:35:21 -0700176static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
FUJITA Tomonorib69c49b2008-07-25 01:45:40 -0700177{
Andy Lutomirskiba14a192016-08-11 02:35:21 -0700178#ifdef CONFIG_VMAP_STACK
Andy Lutomirskiac496bf2016-09-15 22:45:49 -0700179 void *stack;
180 int i;
181
182 local_irq_disable();
183 for (i = 0; i < NR_CACHED_STACKS; i++) {
184 struct vm_struct *s = this_cpu_read(cached_stacks[i]);
185
186 if (!s)
187 continue;
188 this_cpu_write(cached_stacks[i], NULL);
189
Konstantin Khlebnikov885b49b2017-10-13 15:58:22 -0700190 /* Clear stale pointers from reused stack. */
191 memset(s->addr, 0, THREAD_SIZE);
Kees Cook6a19e262018-04-20 14:55:31 -0700192
Andy Lutomirskiac496bf2016-09-15 22:45:49 -0700193 tsk->stack_vm_area = s;
194 local_irq_enable();
195 return s->addr;
196 }
197 local_irq_enable();
198
199 stack = __vmalloc_node_range(THREAD_SIZE, THREAD_SIZE,
200 VMALLOC_START, VMALLOC_END,
201 THREADINFO_GFP | __GFP_HIGHMEM,
202 PAGE_KERNEL,
203 0, node, __builtin_return_address(0));
Andy Lutomirskiba14a192016-08-11 02:35:21 -0700204
205 /*
206 * We can't call find_vm_area() in interrupt context, and
207 * free_thread_stack() can be called in interrupt context,
208 * so cache the vm_struct.
209 */
210 if (stack)
211 tsk->stack_vm_area = find_vm_area(stack);
212 return stack;
213#else
Vladimir Davydov49491482016-07-26 15:24:24 -0700214 struct page *page = alloc_pages_node(node, THREADINFO_GFP,
215 THREAD_SIZE_ORDER);
Eric Dumazetb6a84012011-03-22 16:30:42 -0700216
217 return page ? page_address(page) : NULL;
Andy Lutomirskiba14a192016-08-11 02:35:21 -0700218#endif
FUJITA Tomonorib69c49b2008-07-25 01:45:40 -0700219}
220
Andy Lutomirskiba14a192016-08-11 02:35:21 -0700221static inline void free_thread_stack(struct task_struct *tsk)
FUJITA Tomonorib69c49b2008-07-25 01:45:40 -0700222{
Hugh Dickins0994a2c2017-09-03 18:57:03 -0700223 kaiser_unmap_thread_stack(tsk->stack);
Andy Lutomirskiac496bf2016-09-15 22:45:49 -0700224#ifdef CONFIG_VMAP_STACK
225 if (task_stack_vm_area(tsk)) {
226 unsigned long flags;
227 int i;
228
229 local_irq_save(flags);
230 for (i = 0; i < NR_CACHED_STACKS; i++) {
231 if (this_cpu_read(cached_stacks[i]))
232 continue;
233
234 this_cpu_write(cached_stacks[i], tsk->stack_vm_area);
235 local_irq_restore(flags);
236 return;
237 }
238 local_irq_restore(flags);
239
Andy Lutomirskiba14a192016-08-11 02:35:21 -0700240 vfree(tsk->stack);
Andy Lutomirskiac496bf2016-09-15 22:45:49 -0700241 return;
242 }
243#endif
244
245 __free_pages(virt_to_page(tsk->stack), THREAD_SIZE_ORDER);
FUJITA Tomonorib69c49b2008-07-25 01:45:40 -0700246}
Thomas Gleixner0d15d742012-05-05 15:05:41 +0000247# else
Linus Torvaldsb235bee2016-06-24 15:09:37 -0700248static struct kmem_cache *thread_stack_cache;
Thomas Gleixner0d15d742012-05-05 15:05:41 +0000249
Michael Ellerman9521d392016-06-25 21:53:30 +1000250static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
Thomas Gleixner0d15d742012-05-05 15:05:41 +0000251 int node)
252{
Linus Torvaldsb235bee2016-06-24 15:09:37 -0700253 return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
Thomas Gleixner0d15d742012-05-05 15:05:41 +0000254}
255
Andy Lutomirskiba14a192016-08-11 02:35:21 -0700256static void free_thread_stack(struct task_struct *tsk)
Thomas Gleixner0d15d742012-05-05 15:05:41 +0000257{
Andy Lutomirskiba14a192016-08-11 02:35:21 -0700258 kmem_cache_free(thread_stack_cache, tsk->stack);
Thomas Gleixner0d15d742012-05-05 15:05:41 +0000259}
260
Linus Torvaldsb235bee2016-06-24 15:09:37 -0700261void thread_stack_cache_init(void)
Thomas Gleixner0d15d742012-05-05 15:05:41 +0000262{
Linus Torvaldsb235bee2016-06-24 15:09:37 -0700263 thread_stack_cache = kmem_cache_create("thread_stack", THREAD_SIZE,
Thomas Gleixner0d15d742012-05-05 15:05:41 +0000264 THREAD_SIZE, 0, NULL);
Linus Torvaldsb235bee2016-06-24 15:09:37 -0700265 BUG_ON(thread_stack_cache == NULL);
Thomas Gleixner0d15d742012-05-05 15:05:41 +0000266}
267# endif
FUJITA Tomonorib69c49b2008-07-25 01:45:40 -0700268#endif
269
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270/* SLAB cache for signal_struct structures (tsk->signal) */
Christoph Lametere18b8902006-12-06 20:33:20 -0800271static struct kmem_cache *signal_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
273/* SLAB cache for sighand_struct structures (tsk->sighand) */
Christoph Lametere18b8902006-12-06 20:33:20 -0800274struct kmem_cache *sighand_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
276/* SLAB cache for files_struct structures (tsk->files) */
Christoph Lametere18b8902006-12-06 20:33:20 -0800277struct kmem_cache *files_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278
279/* SLAB cache for fs_struct structures (tsk->fs) */
Christoph Lametere18b8902006-12-06 20:33:20 -0800280struct kmem_cache *fs_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281
282/* SLAB cache for vm_area_struct structures */
Christoph Lametere18b8902006-12-06 20:33:20 -0800283struct kmem_cache *vm_area_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284
285/* SLAB cache for mm_struct structures (tsk->mm) */
Christoph Lametere18b8902006-12-06 20:33:20 -0800286static struct kmem_cache *mm_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
Andy Lutomirskiba14a192016-08-11 02:35:21 -0700288static void account_kernel_stack(struct task_struct *tsk, int account)
KOSAKI Motohiroc6a7f572009-09-21 17:01:32 -0700289{
Andy Lutomirskiba14a192016-08-11 02:35:21 -0700290 void *stack = task_stack_page(tsk);
291 struct vm_struct *vm = task_stack_vm_area(tsk);
KOSAKI Motohiroc6a7f572009-09-21 17:01:32 -0700292
Andy Lutomirskiba14a192016-08-11 02:35:21 -0700293 BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK) && PAGE_SIZE % 1024 != 0);
Andy Lutomirskiefdc9492016-07-28 15:48:17 -0700294
Andy Lutomirskiba14a192016-08-11 02:35:21 -0700295 if (vm) {
296 int i;
297
298 BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE);
299
300 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
301 mod_zone_page_state(page_zone(vm->pages[i]),
302 NR_KERNEL_STACK_KB,
303 PAGE_SIZE / 1024 * account);
304 }
305
306 /* All stack pages belong to the same memcg. */
307 memcg_kmem_update_page_stat(vm->pages[0], MEMCG_KERNEL_STACK_KB,
308 account * (THREAD_SIZE / 1024));
309 } else {
310 /*
311 * All stack pages are in the same zone and belong to the
312 * same memcg.
313 */
314 struct page *first_page = virt_to_page(stack);
315
316 mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB,
317 THREAD_SIZE / 1024 * account);
318
319 memcg_kmem_update_page_stat(first_page, MEMCG_KERNEL_STACK_KB,
320 account * (THREAD_SIZE / 1024));
321 }
KOSAKI Motohiroc6a7f572009-09-21 17:01:32 -0700322}
323
Andy Lutomirski68f24b082016-09-15 22:45:48 -0700324static void release_task_stack(struct task_struct *tsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325{
Andy Lutomirski405c0752016-10-31 08:11:43 -0700326 if (WARN_ON(tsk->state != TASK_DEAD))
327 return; /* Better to leak the stack than to free prematurely */
328
Andy Lutomirskiba14a192016-08-11 02:35:21 -0700329 account_kernel_stack(tsk, -1);
Linus Torvaldsb235bee2016-06-24 15:09:37 -0700330 arch_release_thread_stack(tsk->stack);
Andy Lutomirskiba14a192016-08-11 02:35:21 -0700331 free_thread_stack(tsk);
Andy Lutomirski68f24b082016-09-15 22:45:48 -0700332 tsk->stack = NULL;
333#ifdef CONFIG_VMAP_STACK
334 tsk->stack_vm_area = NULL;
335#endif
336}
337
338#ifdef CONFIG_THREAD_INFO_IN_TASK
339void put_task_stack(struct task_struct *tsk)
340{
341 if (atomic_dec_and_test(&tsk->stack_refcount))
342 release_task_stack(tsk);
343}
344#endif
345
346void free_task(struct task_struct *tsk)
347{
Sultan Alsawaf47bbcd62018-06-03 10:47:51 -0700348 cpufreq_task_times_exit(tsk);
349
Andy Lutomirski68f24b082016-09-15 22:45:48 -0700350#ifndef CONFIG_THREAD_INFO_IN_TASK
351 /*
352 * The task is finally done with both the stack and thread_info,
353 * so free both.
354 */
355 release_task_stack(tsk);
356#else
357 /*
358 * If the task had a separate stack allocation, it should be gone
359 * by now.
360 */
361 WARN_ON_ONCE(atomic_read(&tsk->stack_refcount) != 0);
362#endif
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700363 rt_mutex_debug_task_free(tsk);
Frederic Weisbeckerfb526072008-11-25 21:07:04 +0100364 ftrace_graph_exit_task(tsk);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500365 put_seccomp_filter(tsk);
Akinobu Mitaf19b9f72012-07-30 14:42:33 -0700366 arch_release_task_struct(tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 free_task_struct(tsk);
368}
369EXPORT_SYMBOL(free_task);
370
Oleg Nesterovea6d2902010-05-26 14:43:16 -0700371static inline void free_signal_struct(struct signal_struct *sig)
372{
Oleg Nesterov97101eb2010-05-26 14:43:20 -0700373 taskstats_tgid_free(sig);
Mike Galbraith1c5354d2011-01-05 11:16:04 +0100374 sched_autogroup_exit(sig);
Michal Hocko7283094ec2016-10-07 16:58:54 -0700375 /*
376 * __mmdrop is not safe to call from softirq context on x86 due to
377 * pgd_dtor so postpone it to the async context
378 */
Michal Hocko26db62f2016-10-07 16:58:51 -0700379 if (sig->oom_mm)
Michal Hocko7283094ec2016-10-07 16:58:54 -0700380 mmdrop_async(sig->oom_mm);
Oleg Nesterovea6d2902010-05-26 14:43:16 -0700381 kmem_cache_free(signal_cachep, sig);
382}
383
384static inline void put_signal_struct(struct signal_struct *sig)
385{
Mike Galbraith1c5354d2011-01-05 11:16:04 +0100386 if (atomic_dec_and_test(&sig->sigcnt))
Oleg Nesterovea6d2902010-05-26 14:43:16 -0700387 free_signal_struct(sig);
388}
389
Andrew Morton158d9eb2006-03-31 02:31:34 -0800390void __put_task_struct(struct task_struct *tsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391{
Eugene Teo270f7222007-10-18 23:40:38 -0700392 WARN_ON(!tsk->exit_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 WARN_ON(atomic_read(&tsk->usage));
394 WARN_ON(tsk == current);
395
Tejun Heo2e91fa72015-10-15 16:41:53 -0400396 cgroup_free(tsk);
Jann Horn837ffc92019-07-16 17:20:45 +0200397 task_numa_free(tsk, true);
Kees Cook1a2a4d02011-12-21 12:17:03 -0800398 security_task_free(tsk);
David Howellse0e81732009-09-02 09:13:40 +0100399 exit_creds(tsk);
Shailabh Nagar35df17c2006-08-31 21:27:38 -0700400 delayacct_tsk_free(tsk);
Oleg Nesterovea6d2902010-05-26 14:43:16 -0700401 put_signal_struct(tsk->signal);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402
403 if (!profile_handoff_task(tsk))
404 free_task(tsk);
405}
Rik van Riel77c100c2011-02-01 09:51:46 -0500406EXPORT_SYMBOL_GPL(__put_task_struct);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
Thomas Gleixner6c0a9fa2012-05-05 15:05:40 +0000408void __init __weak arch_task_cache_init(void) { }
Suresh Siddha61c46282008-03-10 15:28:04 -0700409
Heinrich Schuchardtff691f62015-04-16 12:47:44 -0700410/*
411 * set_max_threads
412 */
Heinrich Schuchardt16db3d32015-04-16 12:47:50 -0700413static void set_max_threads(unsigned int max_threads_suggested)
Heinrich Schuchardtff691f62015-04-16 12:47:44 -0700414{
Heinrich Schuchardtac1b3982015-04-16 12:47:47 -0700415 u64 threads;
Heinrich Schuchardtff691f62015-04-16 12:47:44 -0700416
417 /*
Heinrich Schuchardtac1b3982015-04-16 12:47:47 -0700418 * The number of threads shall be limited such that the thread
419 * structures may only consume a small part of the available memory.
Heinrich Schuchardtff691f62015-04-16 12:47:44 -0700420 */
Heinrich Schuchardtac1b3982015-04-16 12:47:47 -0700421 if (fls64(totalram_pages) + fls64(PAGE_SIZE) > 64)
422 threads = MAX_THREADS;
423 else
424 threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE,
425 (u64) THREAD_SIZE * 8UL);
426
Heinrich Schuchardt16db3d32015-04-16 12:47:50 -0700427 if (threads > max_threads_suggested)
428 threads = max_threads_suggested;
429
Heinrich Schuchardtac1b3982015-04-16 12:47:47 -0700430 max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS);
Heinrich Schuchardtff691f62015-04-16 12:47:44 -0700431}
432
Ingo Molnar5aaeb5c2015-07-17 12:28:12 +0200433#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
434/* Initialized by the architecture: */
435int arch_task_struct_size __read_mostly;
436#endif
Dave Hansen0c8c0f02015-07-17 12:28:11 +0200437
Heinrich Schuchardtff691f62015-04-16 12:47:44 -0700438void __init fork_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439{
Eric W. Biederman25f9c082016-08-08 14:41:52 -0500440 int i;
Thomas Gleixnerf5e10282012-05-05 15:05:48 +0000441#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442#ifndef ARCH_MIN_TASKALIGN
443#define ARCH_MIN_TASKALIGN L1_CACHE_BYTES
444#endif
445 /* create a slab on which task_structs can be allocated */
Vladimir Davydov5d097052016-01-14 15:18:21 -0800446 task_struct_cachep = kmem_cache_create("task_struct",
447 arch_task_struct_size, ARCH_MIN_TASKALIGN,
448 SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449#endif
450
Suresh Siddha61c46282008-03-10 15:28:04 -0700451 /* do the arch specific task caches init */
452 arch_task_cache_init();
453
Heinrich Schuchardt16db3d32015-04-16 12:47:50 -0700454 set_max_threads(MAX_THREADS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455
456 init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
457 init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
458 init_task.signal->rlim[RLIMIT_SIGPENDING] =
459 init_task.signal->rlim[RLIMIT_NPROC];
Eric W. Biedermanb376c3e2016-08-08 13:41:24 -0500460
Eric W. Biederman25f9c082016-08-08 14:41:52 -0500461 for (i = 0; i < UCOUNT_COUNTS; i++) {
462 init_user_ns.ucount_max[i] = max_threads/2;
463 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464}
465
Gideon Israel Dsouza52f5684c2014-04-07 15:39:20 -0700466int __weak arch_dup_task_struct(struct task_struct *dst,
Suresh Siddha61c46282008-03-10 15:28:04 -0700467 struct task_struct *src)
468{
469 *dst = *src;
470 return 0;
471}
472
Aaron Tomlind4311ff2014-09-12 14:16:17 +0100473void set_task_stack_end_magic(struct task_struct *tsk)
474{
475 unsigned long *stackend;
476
477 stackend = end_of_stack(tsk);
478 *stackend = STACK_END_MAGIC; /* for overflow detection */
479}
480
Andi Kleen725fc622016-05-23 16:24:05 -0700481static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482{
483 struct task_struct *tsk;
Linus Torvaldsb235bee2016-06-24 15:09:37 -0700484 unsigned long *stack;
Andy Lutomirskiba14a192016-08-11 02:35:21 -0700485 struct vm_struct *stack_vm_area;
Peter Zijlstra3e26c142007-10-16 23:25:50 -0700486 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487
Andi Kleen725fc622016-05-23 16:24:05 -0700488 if (node == NUMA_NO_NODE)
489 node = tsk_fork_get_node(orig);
Eric Dumazet504f52b2011-03-22 16:30:41 -0700490 tsk = alloc_task_struct_node(node);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 if (!tsk)
492 return NULL;
493
Linus Torvaldsb235bee2016-06-24 15:09:37 -0700494 stack = alloc_thread_stack_node(tsk, node);
495 if (!stack)
Akinobu Mitaf19b9f72012-07-30 14:42:33 -0700496 goto free_tsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
Andy Lutomirskiba14a192016-08-11 02:35:21 -0700498 stack_vm_area = task_stack_vm_area(tsk);
499
Daniel Rebelo de Oliveirafb0a6852011-07-26 16:08:39 -0700500 err = arch_dup_task_struct(tsk, orig);
Andy Lutomirskiba14a192016-08-11 02:35:21 -0700501
502 /*
503 * arch_dup_task_struct() clobbers the stack-related fields. Make
504 * sure they're properly initialized before using any stack-related
505 * functions again.
506 */
507 tsk->stack = stack;
Dave Hansen8f0baad2017-08-30 16:23:00 -0700508
Hugh Dickins0994a2c2017-09-03 18:57:03 -0700509 err= kaiser_map_thread_stack(tsk->stack);
Dave Hansen8f0baad2017-08-30 16:23:00 -0700510 if (err)
511 goto free_stack;
Andy Lutomirskiba14a192016-08-11 02:35:21 -0700512#ifdef CONFIG_VMAP_STACK
513 tsk->stack_vm_area = stack_vm_area;
514#endif
Andy Lutomirski68f24b082016-09-15 22:45:48 -0700515#ifdef CONFIG_THREAD_INFO_IN_TASK
516 atomic_set(&tsk->stack_refcount, 1);
517#endif
Andy Lutomirskiba14a192016-08-11 02:35:21 -0700518
Suresh Siddha61c46282008-03-10 15:28:04 -0700519 if (err)
Linus Torvaldsb235bee2016-06-24 15:09:37 -0700520 goto free_stack;
Suresh Siddha61c46282008-03-10 15:28:04 -0700521
Kees Cookdbd952122014-06-27 15:18:48 -0700522#ifdef CONFIG_SECCOMP
523 /*
524 * We must handle setting up seccomp filters once we're under
525 * the sighand lock in case orig has changed between now and
526 * then. Until then, filter must be NULL to avoid messing up
527 * the usage counts on the error path calling free_task.
528 */
529 tsk->seccomp.filter = NULL;
530#endif
Andrew Morton87bec582012-07-30 14:42:31 -0700531
532 setup_thread_stack(tsk, orig);
Avi Kivity8e7cac72009-11-29 16:34:48 +0200533 clear_user_return_notifier(tsk);
Mike Galbraithf26f9af2010-12-08 11:05:42 +0100534 clear_tsk_need_resched(tsk);
Aaron Tomlind4311ff2014-09-12 14:16:17 +0100535 set_task_stack_end_magic(tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536
Arjan van de Ven0a425402006-09-26 10:52:38 +0200537#ifdef CONFIG_CC_STACKPROTECTOR
Daniel Micayf1572612017-05-04 09:32:09 -0400538 tsk->stack_canary = get_random_long();
Arjan van de Ven0a425402006-09-26 10:52:38 +0200539#endif
540
Daniel Rebelo de Oliveirafb0a6852011-07-26 16:08:39 -0700541 /*
542 * One for us, one for whoever does the "release_task()" (usually
543 * parent)
544 */
545 atomic_set(&tsk->usage, 2);
Alexey Dobriyan6c5c9342006-09-29 01:59:40 -0700546#ifdef CONFIG_BLK_DEV_IO_TRACE
Jens Axboe2056a782006-03-23 20:00:26 +0100547 tsk->btrace_seq = 0;
Alexey Dobriyan6c5c9342006-09-29 01:59:40 -0700548#endif
Jens Axboea0aa7f62006-04-20 13:05:33 +0200549 tsk->splice_pipe = NULL;
Eric Dumazet5640f762012-09-23 23:04:42 +0000550 tsk->task_frag.page = NULL;
Sebastian Andrzej Siewior093e5842015-12-21 18:17:10 +0100551 tsk->wake_q.next = NULL;
KOSAKI Motohiroc6a7f572009-09-21 17:01:32 -0700552
Andy Lutomirskiba14a192016-08-11 02:35:21 -0700553 account_kernel_stack(tsk, 1);
KOSAKI Motohiroc6a7f572009-09-21 17:01:32 -0700554
Dmitry Vyukov5c9a8752016-03-22 14:27:30 -0700555 kcov_task_init(tsk);
556
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 return tsk;
Suresh Siddha61c46282008-03-10 15:28:04 -0700558
Linus Torvaldsb235bee2016-06-24 15:09:37 -0700559free_stack:
Andy Lutomirskiba14a192016-08-11 02:35:21 -0700560 free_thread_stack(tsk);
Akinobu Mitaf19b9f72012-07-30 14:42:33 -0700561free_tsk:
Suresh Siddha61c46282008-03-10 15:28:04 -0700562 free_task_struct(tsk);
563 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564}
565
566#ifdef CONFIG_MMU
Emese Revfy0766f782016-06-20 20:42:34 +0200567static __latent_entropy int dup_mmap(struct mm_struct *mm,
568 struct mm_struct *oldmm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569{
Linus Torvalds297c5ee2010-08-20 16:24:55 -0700570 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 struct rb_node **rb_link, *rb_parent;
572 int retval;
573 unsigned long charge;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574
Oleg Nesterov32cdba12012-11-14 19:03:42 +0100575 uprobe_start_dup_mmap();
Michal Hocko7c051262016-05-23 16:25:48 -0700576 if (down_write_killable(&oldmm->mmap_sem)) {
577 retval = -EINTR;
578 goto fail_uprobe_end;
579 }
Ralf Baechleec8c0442006-12-12 17:14:57 +0000580 flush_cache_dup_mm(oldmm);
Oleg Nesterovf8ac4ec2012-08-08 17:11:42 +0200581 uprobe_dup_mmap(oldmm, mm);
Ingo Molnarad339452006-07-03 00:25:15 -0700582 /*
583 * Not linked in yet - no deadlock potential:
584 */
585 down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
Hugh Dickins7ee78232005-10-29 18:16:08 -0700586
Konstantin Khlebnikov90f31d02015-04-16 12:47:56 -0700587 /* No ordering required: file already has been exposed. */
588 RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
589
Vladimir Davydov4f7d4612014-08-08 14:22:01 -0700590 mm->total_vm = oldmm->total_vm;
Konstantin Khlebnikov84638332016-01-14 15:22:07 -0800591 mm->data_vm = oldmm->data_vm;
Vladimir Davydov4f7d4612014-08-08 14:22:01 -0700592 mm->exec_vm = oldmm->exec_vm;
593 mm->stack_vm = oldmm->stack_vm;
594
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 rb_link = &mm->mm_rb.rb_node;
596 rb_parent = NULL;
597 pprev = &mm->mmap;
Hugh Dickinsf8af4da2009-09-21 17:01:57 -0700598 retval = ksm_fork(mm, oldmm);
599 if (retval)
600 goto out;
Andrea Arcangeliba761492011-01-13 15:46:58 -0800601 retval = khugepaged_fork(mm, oldmm);
602 if (retval)
603 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604
Linus Torvalds297c5ee2010-08-20 16:24:55 -0700605 prev = NULL;
Hugh Dickinsfd3e42f2005-10-29 18:16:06 -0700606 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 struct file *file;
608
609 if (mpnt->vm_flags & VM_DONTCOPY) {
Konstantin Khlebnikov84638332016-01-14 15:22:07 -0800610 vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 continue;
612 }
613 charge = 0;
614 if (mpnt->vm_flags & VM_ACCOUNT) {
Huang Shijieb2412b72012-07-30 14:42:30 -0700615 unsigned long len = vma_pages(mpnt);
616
Al Viro191c5422012-02-13 03:58:52 +0000617 if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 goto fail_nomem;
619 charge = len;
620 }
Christoph Lametere94b1762006-12-06 20:33:17 -0800621 tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 if (!tmp)
623 goto fail_nomem;
624 *tmp = *mpnt;
Laurent Dufour6315ccc2018-04-17 16:33:13 +0200625 INIT_VMA(tmp);
Oleg Nesterovef0855d2013-09-11 14:20:14 -0700626 retval = vma_dup_policy(mpnt, tmp);
627 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 goto fail_nomem_policy;
Andrea Arcangelia247c3a2010-09-22 13:05:12 -0700629 tmp->vm_mm = mm;
Rik van Riel5beb4932010-03-05 13:42:07 -0800630 if (anon_vma_fork(tmp, mpnt))
631 goto fail_nomem_anon_vma_fork;
Eric B Munsonde60f5f2015-11-05 18:51:36 -0800632 tmp->vm_flags &=
633 ~(VM_LOCKED|VM_LOCKONFAULT|VM_UFFD_MISSING|VM_UFFD_WP);
Linus Torvalds297c5ee2010-08-20 16:24:55 -0700634 tmp->vm_next = tmp->vm_prev = NULL;
Andrea Arcangeli745f2342015-09-04 15:46:14 -0700635 tmp->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 file = tmp->vm_file;
637 if (file) {
Al Viro496ad9a2013-01-23 17:07:38 -0500638 struct inode *inode = file_inode(file);
Hugh Dickinsb88ed202008-12-10 20:48:52 +0000639 struct address_space *mapping = file->f_mapping;
640
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 get_file(file);
642 if (tmp->vm_flags & VM_DENYWRITE)
643 atomic_dec(&inode->i_writecount);
Davidlohr Bueso83cde9e2014-12-12 16:54:21 -0800644 i_mmap_lock_write(mapping);
Hugh Dickinsb88ed202008-12-10 20:48:52 +0000645 if (tmp->vm_flags & VM_SHARED)
David Herrmann4bb5f5d2014-08-08 14:25:25 -0700646 atomic_inc(&mapping->i_mmap_writable);
Hugh Dickinsb88ed202008-12-10 20:48:52 +0000647 flush_dcache_mmap_lock(mapping);
648 /* insert tmp into the share list, just after mpnt */
Kirill A. Shutemov27ba0642015-02-10 14:09:59 -0800649 vma_interval_tree_insert_after(tmp, mpnt,
650 &mapping->i_mmap);
Hugh Dickinsb88ed202008-12-10 20:48:52 +0000651 flush_dcache_mmap_unlock(mapping);
Davidlohr Bueso83cde9e2014-12-12 16:54:21 -0800652 i_mmap_unlock_write(mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 }
654
655 /*
Mel Gormana1e78772008-07-23 21:27:23 -0700656 * Clear hugetlb-related page reserves for children. This only
657 * affects MAP_PRIVATE mappings. Faults generated by the child
658 * are not guaranteed to succeed, even if read-only
659 */
660 if (is_vm_hugetlb_page(tmp))
661 reset_vma_resv_huge_pages(tmp);
662
663 /*
Hugh Dickins7ee78232005-10-29 18:16:08 -0700664 * Link in the new vma and copy the page table entries.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 *pprev = tmp;
667 pprev = &tmp->vm_next;
Linus Torvalds297c5ee2010-08-20 16:24:55 -0700668 tmp->vm_prev = prev;
669 prev = tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670
671 __vma_link_rb(mm, tmp, rb_link, rb_parent);
672 rb_link = &tmp->vm_rb.rb_right;
673 rb_parent = &tmp->vm_rb;
674
675 mm->map_count++;
Hugh Dickins0b0db142005-11-21 21:32:20 -0800676 retval = copy_page_range(mm, oldmm, mpnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
678 if (tmp->vm_ops && tmp->vm_ops->open)
679 tmp->vm_ops->open(tmp);
680
681 if (retval)
682 goto out;
683 }
Jeremy Fitzhardinged6dd61c2007-05-02 19:27:14 +0200684 /* a new mm has just been created */
685 arch_dup_mmap(oldmm, mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 retval = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687out:
Hugh Dickins7ee78232005-10-29 18:16:08 -0700688 up_write(&mm->mmap_sem);
Hugh Dickinsfd3e42f2005-10-29 18:16:06 -0700689 flush_tlb_mm(oldmm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 up_write(&oldmm->mmap_sem);
Michal Hocko7c051262016-05-23 16:25:48 -0700691fail_uprobe_end:
Oleg Nesterov32cdba12012-11-14 19:03:42 +0100692 uprobe_end_dup_mmap();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 return retval;
Rik van Riel5beb4932010-03-05 13:42:07 -0800694fail_nomem_anon_vma_fork:
Oleg Nesterovef0855d2013-09-11 14:20:14 -0700695 mpol_put(vma_policy(tmp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696fail_nomem_policy:
697 kmem_cache_free(vm_area_cachep, tmp);
698fail_nomem:
699 retval = -ENOMEM;
700 vm_unacct_memory(charge);
701 goto out;
702}
703
Daniel Rebelo de Oliveirafb0a6852011-07-26 16:08:39 -0700704static inline int mm_alloc_pgd(struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705{
706 mm->pgd = pgd_alloc(mm);
707 if (unlikely(!mm->pgd))
708 return -ENOMEM;
709 return 0;
710}
711
Daniel Rebelo de Oliveirafb0a6852011-07-26 16:08:39 -0700712static inline void mm_free_pgd(struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713{
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -0800714 pgd_free(mm, mm->pgd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715}
716#else
Konstantin Khlebnikov90f31d02015-04-16 12:47:56 -0700717static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
718{
719 down_write(&oldmm->mmap_sem);
720 RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
721 up_write(&oldmm->mmap_sem);
722 return 0;
723}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724#define mm_alloc_pgd(mm) (0)
725#define mm_free_pgd(mm)
726#endif /* CONFIG_MMU */
727
Daniel Walker23ff4442007-10-18 03:06:07 -0700728__cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729
Christoph Lametere94b1762006-12-06 20:33:17 -0800730#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
732
Hidehiro Kawai4cb0e112009-01-06 14:42:47 -0800733static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
734
735static int __init coredump_filter_setup(char *s)
736{
737 default_dump_filter =
738 (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
739 MMF_DUMP_FILTER_MASK;
740 return 1;
741}
742
743__setup("coredump_filter=", coredump_filter_setup);
744
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745#include <linux/init_task.h>
746
Alexey Dobriyan858f0992009-09-23 15:57:32 -0700747static void mm_init_aio(struct mm_struct *mm)
748{
749#ifdef CONFIG_AIO
750 spin_lock_init(&mm->ioctx_lock);
Benjamin LaHaisedb446a02013-07-30 12:54:40 -0400751 mm->ioctx_table = NULL;
Alexey Dobriyan858f0992009-09-23 15:57:32 -0700752#endif
753}
754
Vladimir Davydov33144e82014-08-08 14:22:03 -0700755static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
756{
757#ifdef CONFIG_MEMCG
758 mm->owner = p;
759#endif
760}
761
Eric Biggers17c564f2017-08-31 16:15:26 -0700762static void mm_init_uprobes_state(struct mm_struct *mm)
763{
764#ifdef CONFIG_UPROBES
765 mm->uprobes_state.xol_area = NULL;
766#endif
767}
768
Eric W. Biederman694a95f2016-10-13 21:23:16 -0500769static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
770 struct user_namespace *user_ns)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771{
Vladimir Davydov41f727f2014-08-08 14:21:56 -0700772 mm->mmap = NULL;
773 mm->mm_rb = RB_ROOT;
774 mm->vmacache_seqnum = 0;
Laurent Dufourd8173842018-04-17 16:33:23 +0200775#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
776 rwlock_init(&mm->mm_rb_lock);
777#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 atomic_set(&mm->mm_users, 1);
779 atomic_set(&mm->mm_count, 1);
780 init_rwsem(&mm->mmap_sem);
781 INIT_LIST_HEAD(&mm->mmlist);
Oleg Nesterov999d9fc2008-07-25 01:47:41 -0700782 mm->core_state = NULL;
Kirill A. Shutemove1f56c82013-11-14 14:30:48 -0800783 atomic_long_set(&mm->nr_ptes, 0);
Kirill A. Shutemov2d2f5112015-02-12 14:59:59 -0800784 mm_nr_pmds_init(mm);
Vladimir Davydov41f727f2014-08-08 14:21:56 -0700785 mm->map_count = 0;
786 mm->locked_vm = 0;
Vladimir Davydovce65cef2014-08-08 14:21:58 -0700787 mm->pinned_vm = 0;
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -0800788 memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 spin_lock_init(&mm->page_table_lock);
Vladimir Davydov41f727f2014-08-08 14:21:56 -0700790 mm_init_cpumask(mm);
Alexey Dobriyan858f0992009-09-23 15:57:32 -0700791 mm_init_aio(mm);
Balbir Singhcf475ad2008-04-29 01:00:16 -0700792 mm_init_owner(mm, p);
Eric Biggersb65b6ac2017-08-25 15:55:43 -0700793 RCU_INIT_POINTER(mm->exe_file, NULL);
Vladimir Davydov41f727f2014-08-08 14:21:56 -0700794 mmu_notifier_mm_init(mm);
Rik van Riel20841402013-12-18 17:08:44 -0800795 clear_tlb_flush_pending(mm);
Vladimir Davydov41f727f2014-08-08 14:21:56 -0700796#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
797 mm->pmd_huge_pte = NULL;
798#endif
Eric Biggers17c564f2017-08-31 16:15:26 -0700799 mm_init_uprobes_state(mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800
Alex Thorltona0715cc2014-04-07 15:37:10 -0700801 if (current->mm) {
802 mm->flags = current->mm->flags & MMF_INIT_MASK;
803 mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK;
804 } else {
805 mm->flags = default_dump_filter;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 mm->def_flags = 0;
Alex Thorltona0715cc2014-04-07 15:37:10 -0700807 }
808
Vladimir Davydov41f727f2014-08-08 14:21:56 -0700809 if (mm_alloc_pgd(mm))
810 goto fail_nopgd;
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800811
Vladimir Davydov41f727f2014-08-08 14:21:56 -0700812 if (init_new_context(p, mm))
813 goto fail_nocontext;
814
Eric W. Biederman694a95f2016-10-13 21:23:16 -0500815 mm->user_ns = get_user_ns(user_ns);
Vladimir Davydov41f727f2014-08-08 14:21:56 -0700816 return mm;
817
818fail_nocontext:
819 mm_free_pgd(mm);
820fail_nopgd:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 free_mm(mm);
822 return NULL;
823}
824
Konstantin Khlebnikovc3f03272012-03-21 16:33:48 -0700825static void check_mm(struct mm_struct *mm)
826{
827 int i;
828
829 for (i = 0; i < NR_MM_COUNTERS; i++) {
830 long x = atomic_long_read(&mm->rss_stat.count[i]);
831
832 if (unlikely(x))
833 printk(KERN_ALERT "BUG: Bad rss-counter state "
834 "mm:%p idx:%d val:%ld\n", mm, i, x);
835 }
Kirill A. Shutemovb30fe6c2015-02-11 15:26:53 -0800836
837 if (atomic_long_read(&mm->nr_ptes))
838 pr_alert("BUG: non-zero nr_ptes on freeing mm: %ld\n",
839 atomic_long_read(&mm->nr_ptes));
840 if (mm_nr_pmds(mm))
841 pr_alert("BUG: non-zero nr_pmds on freeing mm: %ld\n",
842 mm_nr_pmds(mm));
843
Kirill A. Shutemove009bb32013-11-14 14:31:07 -0800844#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
Sasha Levin96dad672014-10-09 15:28:39 -0700845 VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
Konstantin Khlebnikovc3f03272012-03-21 16:33:48 -0700846#endif
847}
848
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849/*
850 * Allocate and initialize an mm_struct.
851 */
Daniel Rebelo de Oliveirafb0a6852011-07-26 16:08:39 -0700852struct mm_struct *mm_alloc(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853{
Daniel Rebelo de Oliveirafb0a6852011-07-26 16:08:39 -0700854 struct mm_struct *mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855
856 mm = allocate_mm();
KOSAKI Motohirode03c722011-05-24 17:12:15 -0700857 if (!mm)
858 return NULL;
859
860 memset(mm, 0, sizeof(*mm));
Eric W. Biederman694a95f2016-10-13 21:23:16 -0500861 return mm_init(mm, current, current_user_ns());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862}
863
864/*
865 * Called when the last reference to the mm
866 * is dropped: either by a lazy thread or by
867 * mmput. Free the page directory and the mm.
868 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800869void __mmdrop(struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870{
871 BUG_ON(mm == &init_mm);
872 mm_free_pgd(mm);
873 destroy_context(mm);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700874 mmu_notifier_mm_destroy(mm);
Konstantin Khlebnikovc3f03272012-03-21 16:33:48 -0700875 check_mm(mm);
Eric W. Biederman694a95f2016-10-13 21:23:16 -0500876 put_user_ns(mm->user_ns);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 free_mm(mm);
878}
Avi Kivity6d4e4c42007-11-21 16:41:05 +0200879EXPORT_SYMBOL_GPL(__mmdrop);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
Michal Hockoec8d7c12016-05-20 16:57:21 -0700881static inline void __mmput(struct mm_struct *mm)
882{
883 VM_BUG_ON(atomic_read(&mm->mm_users));
884
885 uprobe_clear_state(mm);
886 exit_aio(mm);
887 ksm_exit(mm);
888 khugepaged_exit(mm); /* must run before exit_mmap */
889 exit_mmap(mm);
Aaron Lu6fcb52a2016-10-07 17:00:08 -0700890 mm_put_huge_zero_page(mm);
Michal Hockoec8d7c12016-05-20 16:57:21 -0700891 set_mm_exe_file(mm, NULL);
892 if (!list_empty(&mm->mmlist)) {
893 spin_lock(&mmlist_lock);
894 list_del(&mm->mmlist);
895 spin_unlock(&mmlist_lock);
896 }
897 if (mm->binfmt)
898 module_put(mm->binfmt->module);
899 mmdrop(mm);
900}
901
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902/*
903 * Decrement the use count and release all resources for an mm.
904 */
Liam Mark4f1cdd22013-02-07 14:31:36 -0800905int mmput(struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906{
Liam Mark4f1cdd22013-02-07 14:31:36 -0800907 int mm_freed = 0;
Andrew Morton0ae26f12006-06-23 02:05:15 -0700908 might_sleep();
909
Liam Mark4f1cdd22013-02-07 14:31:36 -0800910 if (atomic_dec_and_test(&mm->mm_users)) {
Michal Hockoec8d7c12016-05-20 16:57:21 -0700911 __mmput(mm);
Liam Mark4f1cdd22013-02-07 14:31:36 -0800912 mm_freed = 1;
913 }
914
915 return mm_freed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916}
917EXPORT_SYMBOL_GPL(mmput);
918
Michal Hocko7ef949d72016-05-26 15:16:22 -0700919#ifdef CONFIG_MMU
Michal Hockoec8d7c12016-05-20 16:57:21 -0700920static void mmput_async_fn(struct work_struct *work)
921{
922 struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
923 __mmput(mm);
924}
925
926void mmput_async(struct mm_struct *mm)
927{
928 if (atomic_dec_and_test(&mm->mm_users)) {
929 INIT_WORK(&mm->async_put_work, mmput_async_fn);
930 schedule_work(&mm->async_put_work);
931 }
932}
Michal Hocko7ef949d72016-05-26 15:16:22 -0700933#endif
Michal Hockoec8d7c12016-05-20 16:57:21 -0700934
Konstantin Khlebnikov90f31d02015-04-16 12:47:56 -0700935/**
936 * set_mm_exe_file - change a reference to the mm's executable file
937 *
938 * This changes mm's executable file (shown as symlink /proc/[pid]/exe).
939 *
Davidlohr Bueso6e399cd2015-04-16 12:47:59 -0700940 * Main users are mmput() and sys_execve(). Callers prevent concurrent
941 * invocations: in mmput() nobody alive left, in execve task is single
942 * threaded. sys_prctl(PR_SET_MM_MAP/EXE_FILE) also needs to set the
943 * mm->exe_file, but does so without using set_mm_exe_file() in order
944 * to do avoid the need for any locks.
Konstantin Khlebnikov90f31d02015-04-16 12:47:56 -0700945 */
Jiri Slaby38646012011-05-26 16:25:46 -0700946void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
947{
Davidlohr Bueso6e399cd2015-04-16 12:47:59 -0700948 struct file *old_exe_file;
949
950 /*
951 * It is safe to dereference the exe_file without RCU as
952 * this function is only called if nobody else can access
953 * this mm -- see comment above for justification.
954 */
955 old_exe_file = rcu_dereference_raw(mm->exe_file);
Konstantin Khlebnikov90f31d02015-04-16 12:47:56 -0700956
Jiri Slaby38646012011-05-26 16:25:46 -0700957 if (new_exe_file)
958 get_file(new_exe_file);
Konstantin Khlebnikov90f31d02015-04-16 12:47:56 -0700959 rcu_assign_pointer(mm->exe_file, new_exe_file);
960 if (old_exe_file)
961 fput(old_exe_file);
Jiri Slaby38646012011-05-26 16:25:46 -0700962}
963
Konstantin Khlebnikov90f31d02015-04-16 12:47:56 -0700964/**
965 * get_mm_exe_file - acquire a reference to the mm's executable file
966 *
967 * Returns %NULL if mm has no associated executable file.
968 * User must release file via fput().
969 */
Jiri Slaby38646012011-05-26 16:25:46 -0700970struct file *get_mm_exe_file(struct mm_struct *mm)
971{
972 struct file *exe_file;
973
Konstantin Khlebnikov90f31d02015-04-16 12:47:56 -0700974 rcu_read_lock();
975 exe_file = rcu_dereference(mm->exe_file);
976 if (exe_file && !get_file_rcu(exe_file))
977 exe_file = NULL;
978 rcu_read_unlock();
Jiri Slaby38646012011-05-26 16:25:46 -0700979 return exe_file;
980}
Davidlohr Bueso11163342015-04-16 12:49:12 -0700981EXPORT_SYMBOL(get_mm_exe_file);
Jiri Slaby38646012011-05-26 16:25:46 -0700982
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983/**
Mateusz Guzikcd81a912016-08-23 16:20:38 +0200984 * get_task_exe_file - acquire a reference to the task's executable file
985 *
986 * Returns %NULL if task's mm (if any) has no associated executable file or
987 * this is a kernel thread with borrowed mm (see the comment above get_task_mm).
988 * User must release file via fput().
989 */
990struct file *get_task_exe_file(struct task_struct *task)
991{
992 struct file *exe_file = NULL;
993 struct mm_struct *mm;
994
995 task_lock(task);
996 mm = task->mm;
997 if (mm) {
998 if (!(task->flags & PF_KTHREAD))
999 exe_file = get_mm_exe_file(mm);
1000 }
1001 task_unlock(task);
1002 return exe_file;
1003}
1004EXPORT_SYMBOL(get_task_exe_file);
1005
1006/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 * get_task_mm - acquire a reference to the task's mm
1008 *
Oleg Nesterov246bb0b2008-07-25 01:47:38 -07001009 * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 * this kernel workthread has transiently adopted a user mm with use_mm,
1011 * to do its AIO) is not set and if so returns a reference to it, after
1012 * bumping up the use count. User must release the mm via mmput()
1013 * after use. Typically used by /proc and ptrace.
1014 */
1015struct mm_struct *get_task_mm(struct task_struct *task)
1016{
1017 struct mm_struct *mm;
1018
1019 task_lock(task);
1020 mm = task->mm;
1021 if (mm) {
Oleg Nesterov246bb0b2008-07-25 01:47:38 -07001022 if (task->flags & PF_KTHREAD)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 mm = NULL;
1024 else
1025 atomic_inc(&mm->mm_users);
1026 }
1027 task_unlock(task);
1028 return mm;
1029}
1030EXPORT_SYMBOL_GPL(get_task_mm);
1031
Christopher Yeoh8cdb8782012-02-02 11:34:09 +10301032struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
1033{
1034 struct mm_struct *mm;
1035 int err;
1036
1037 err = mutex_lock_killable(&task->signal->cred_guard_mutex);
1038 if (err)
1039 return ERR_PTR(err);
1040
1041 mm = get_task_mm(task);
1042 if (mm && mm != current->mm &&
Daniel Mentz5b07c2d2017-07-06 18:13:08 -07001043 !ptrace_may_access(task, mode)) {
Christopher Yeoh8cdb8782012-02-02 11:34:09 +10301044 mmput(mm);
1045 mm = ERR_PTR(-EACCES);
1046 }
1047 mutex_unlock(&task->signal->cred_guard_mutex);
1048
1049 return mm;
1050}
1051
Oleg Nesterov57b59c42012-03-05 14:59:13 -08001052static void complete_vfork_done(struct task_struct *tsk)
Oleg Nesterovc415c3b2012-03-05 14:59:13 -08001053{
Oleg Nesterovd68b46f2012-03-05 14:59:13 -08001054 struct completion *vfork;
Oleg Nesterovc415c3b2012-03-05 14:59:13 -08001055
Oleg Nesterovd68b46f2012-03-05 14:59:13 -08001056 task_lock(tsk);
1057 vfork = tsk->vfork_done;
1058 if (likely(vfork)) {
1059 tsk->vfork_done = NULL;
1060 complete(vfork);
1061 }
1062 task_unlock(tsk);
1063}
1064
1065static int wait_for_vfork_done(struct task_struct *child,
1066 struct completion *vfork)
1067{
1068 int killed;
1069
1070 freezer_do_not_count();
1071 killed = wait_for_completion_killable(vfork);
1072 freezer_count();
1073
1074 if (killed) {
1075 task_lock(child);
1076 child->vfork_done = NULL;
1077 task_unlock(child);
1078 }
1079
1080 put_task_struct(child);
1081 return killed;
Oleg Nesterovc415c3b2012-03-05 14:59:13 -08001082}
1083
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084/* Please note the differences between mmput and mm_release.
1085 * mmput is called whenever we stop holding onto a mm_struct,
1086 * error success whatever.
1087 *
1088 * mm_release is called after a mm_struct has been removed
1089 * from the current process.
1090 *
1091 * This difference is important for error handling, when we
1092 * only half set up a mm_struct for a new process and need to restore
1093 * the old one. Because we mmput the new mm_struct before
1094 * restoring the old one. . .
1095 * Eric Biederman 10 January 1998
1096 */
1097void mm_release(struct task_struct *tsk, struct mm_struct *mm)
1098{
Linus Torvalds8141c7f2008-11-15 10:20:36 -08001099 /* Get rid of any futexes when releasing the mm */
1100#ifdef CONFIG_FUTEX
Peter Zijlstrafc6b1772009-10-05 18:17:32 +02001101 if (unlikely(tsk->robust_list)) {
Linus Torvalds8141c7f2008-11-15 10:20:36 -08001102 exit_robust_list(tsk);
Peter Zijlstrafc6b1772009-10-05 18:17:32 +02001103 tsk->robust_list = NULL;
1104 }
Linus Torvalds8141c7f2008-11-15 10:20:36 -08001105#ifdef CONFIG_COMPAT
Peter Zijlstrafc6b1772009-10-05 18:17:32 +02001106 if (unlikely(tsk->compat_robust_list)) {
Linus Torvalds8141c7f2008-11-15 10:20:36 -08001107 compat_exit_robust_list(tsk);
Peter Zijlstrafc6b1772009-10-05 18:17:32 +02001108 tsk->compat_robust_list = NULL;
1109 }
Linus Torvalds8141c7f2008-11-15 10:20:36 -08001110#endif
Thomas Gleixner322a2c12009-10-05 18:18:03 +02001111 if (unlikely(!list_empty(&tsk->pi_state_list)))
1112 exit_pi_state_list(tsk);
Linus Torvalds8141c7f2008-11-15 10:20:36 -08001113#endif
1114
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +05301115 uprobe_free_utask(tsk);
1116
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 /* Get rid of any cached register state */
1118 deactivate_mm(tsk, mm);
1119
Roland McGrathfec1d012006-12-06 20:36:34 -08001120 /*
Michal Hocko735f2772016-09-01 16:15:13 -07001121 * Signal userspace if we're not exiting with a core dump
1122 * because we want to leave the value intact for debugging
1123 * purposes.
Roland McGrathfec1d012006-12-06 20:36:34 -08001124 */
Eric Dumazet9c8a8222009-08-06 15:09:28 -07001125 if (tsk->clear_child_tid) {
Michal Hocko735f2772016-09-01 16:15:13 -07001126 if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) &&
Eric Dumazet9c8a8222009-08-06 15:09:28 -07001127 atomic_read(&mm->mm_users) > 1) {
1128 /*
1129 * We don't check the error code - if userspace has
1130 * not set up a proper pointer then tough luck.
1131 */
1132 put_user(0, tsk->clear_child_tid);
1133 sys_futex(tsk->clear_child_tid, FUTEX_WAKE,
1134 1, NULL, NULL, 0);
1135 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 tsk->clear_child_tid = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 }
Konstantin Khlebnikovf7505d642012-05-31 16:26:21 -07001138
1139 /*
1140 * All done, finally we can wake up parent and return this mm to him.
1141 * Also kthread_stop() uses this completion for synchronization.
1142 */
1143 if (tsk->vfork_done)
1144 complete_vfork_done(tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145}
1146
JANAK DESAIa0a7ec32006-02-07 12:59:01 -08001147/*
1148 * Allocate a new mm structure and copy contents from the
1149 * mm structure of the passed in task structure.
1150 */
DaeSeok Younff252c12014-01-23 15:55:46 -08001151static struct mm_struct *dup_mm(struct task_struct *tsk)
JANAK DESAIa0a7ec32006-02-07 12:59:01 -08001152{
1153 struct mm_struct *mm, *oldmm = current->mm;
1154 int err;
1155
JANAK DESAIa0a7ec32006-02-07 12:59:01 -08001156 mm = allocate_mm();
1157 if (!mm)
1158 goto fail_nomem;
1159
1160 memcpy(mm, oldmm, sizeof(*mm));
1161
Eric W. Biederman694a95f2016-10-13 21:23:16 -05001162 if (!mm_init(mm, tsk, mm->user_ns))
JANAK DESAIa0a7ec32006-02-07 12:59:01 -08001163 goto fail_nomem;
1164
JANAK DESAIa0a7ec32006-02-07 12:59:01 -08001165 err = dup_mmap(mm, oldmm);
1166 if (err)
1167 goto free_pt;
1168
1169 mm->hiwater_rss = get_mm_rss(mm);
1170 mm->hiwater_vm = mm->total_vm;
1171
Hiroshi Shimamoto801460d2009-09-23 15:57:41 -07001172 if (mm->binfmt && !try_module_get(mm->binfmt->module))
1173 goto free_pt;
1174
JANAK DESAIa0a7ec32006-02-07 12:59:01 -08001175 return mm;
1176
1177free_pt:
Hiroshi Shimamoto801460d2009-09-23 15:57:41 -07001178 /* don't put binfmt in mmput, we haven't got module yet */
1179 mm->binfmt = NULL;
JANAK DESAIa0a7ec32006-02-07 12:59:01 -08001180 mmput(mm);
1181
1182fail_nomem:
1183 return NULL;
JANAK DESAIa0a7ec32006-02-07 12:59:01 -08001184}
1185
Daniel Rebelo de Oliveirafb0a6852011-07-26 16:08:39 -07001186static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187{
Daniel Rebelo de Oliveirafb0a6852011-07-26 16:08:39 -07001188 struct mm_struct *mm, *oldmm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189 int retval;
1190
1191 tsk->min_flt = tsk->maj_flt = 0;
1192 tsk->nvcsw = tsk->nivcsw = 0;
Mandeep Singh Baines17406b82009-02-06 15:37:47 -08001193#ifdef CONFIG_DETECT_HUNG_TASK
1194 tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
1195#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196
1197 tsk->mm = NULL;
1198 tsk->active_mm = NULL;
1199
1200 /*
1201 * Are we cloning a kernel thread?
1202 *
1203 * We need to steal a active VM for that..
1204 */
1205 oldmm = current->mm;
1206 if (!oldmm)
1207 return 0;
1208
Davidlohr Bueso615d6e82014-04-07 15:37:25 -07001209 /* initialize the new vmacache entries */
1210 vmacache_flush(tsk);
1211
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 if (clone_flags & CLONE_VM) {
1213 atomic_inc(&oldmm->mm_users);
1214 mm = oldmm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 goto good_mm;
1216 }
1217
1218 retval = -ENOMEM;
JANAK DESAIa0a7ec32006-02-07 12:59:01 -08001219 mm = dup_mm(tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 if (!mm)
1221 goto fail_nomem;
1222
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223good_mm:
1224 tsk->mm = mm;
1225 tsk->active_mm = mm;
1226 return 0;
1227
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228fail_nomem:
1229 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230}
1231
Alexey Dobriyana39bc512007-10-18 23:41:10 -07001232static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233{
Al Viro498052b2009-03-30 07:20:30 -04001234 struct fs_struct *fs = current->fs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 if (clone_flags & CLONE_FS) {
Al Viro498052b2009-03-30 07:20:30 -04001236 /* tsk->fs is already what we want */
Nick Piggin2a4419b2010-08-18 04:37:33 +10001237 spin_lock(&fs->lock);
Al Viro498052b2009-03-30 07:20:30 -04001238 if (fs->in_exec) {
Nick Piggin2a4419b2010-08-18 04:37:33 +10001239 spin_unlock(&fs->lock);
Al Viro498052b2009-03-30 07:20:30 -04001240 return -EAGAIN;
1241 }
1242 fs->users++;
Nick Piggin2a4419b2010-08-18 04:37:33 +10001243 spin_unlock(&fs->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 return 0;
1245 }
Al Viro498052b2009-03-30 07:20:30 -04001246 tsk->fs = copy_fs_struct(fs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 if (!tsk->fs)
1248 return -ENOMEM;
1249 return 0;
1250}
1251
Daniel Rebelo de Oliveirafb0a6852011-07-26 16:08:39 -07001252static int copy_files(unsigned long clone_flags, struct task_struct *tsk)
JANAK DESAIa016f332006-02-07 12:59:02 -08001253{
1254 struct files_struct *oldf, *newf;
1255 int error = 0;
1256
1257 /*
1258 * A background process may not have any files ...
1259 */
1260 oldf = current->files;
1261 if (!oldf)
1262 goto out;
1263
1264 if (clone_flags & CLONE_FILES) {
1265 atomic_inc(&oldf->count);
1266 goto out;
1267 }
1268
JANAK DESAIa016f332006-02-07 12:59:02 -08001269 newf = dup_fd(oldf, &error);
1270 if (!newf)
1271 goto out;
1272
1273 tsk->files = newf;
1274 error = 0;
1275out:
1276 return error;
1277}
1278
Jens Axboefadad8782008-01-24 08:54:47 +01001279static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
Jens Axboefd0928d2008-01-24 08:52:45 +01001280{
1281#ifdef CONFIG_BLOCK
1282 struct io_context *ioc = current->io_context;
Tejun Heo6e736be2011-12-14 00:33:38 +01001283 struct io_context *new_ioc;
Jens Axboefd0928d2008-01-24 08:52:45 +01001284
1285 if (!ioc)
1286 return 0;
Jens Axboefadad8782008-01-24 08:54:47 +01001287 /*
1288 * Share io context with parent, if CLONE_IO is set
1289 */
1290 if (clone_flags & CLONE_IO) {
Tejun Heo3d487492012-03-05 13:15:25 -08001291 ioc_task_link(ioc);
1292 tsk->io_context = ioc;
Jens Axboefadad8782008-01-24 08:54:47 +01001293 } else if (ioprio_valid(ioc->ioprio)) {
Tejun Heo6e736be2011-12-14 00:33:38 +01001294 new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
1295 if (unlikely(!new_ioc))
Jens Axboefd0928d2008-01-24 08:52:45 +01001296 return -ENOMEM;
1297
Tejun Heo6e736be2011-12-14 00:33:38 +01001298 new_ioc->ioprio = ioc->ioprio;
Tejun Heo11a31222012-02-07 07:51:30 +01001299 put_io_context(new_ioc);
Jens Axboefd0928d2008-01-24 08:52:45 +01001300 }
1301#endif
1302 return 0;
1303}
1304
Alexey Dobriyana39bc512007-10-18 23:41:10 -07001305static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306{
1307 struct sighand_struct *sig;
1308
Zhaolei60348802009-01-06 14:40:46 -08001309 if (clone_flags & CLONE_SIGHAND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 atomic_inc(&current->sighand->count);
1311 return 0;
1312 }
1313 sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
Ingo Molnare56d0902006-01-08 01:01:37 -08001314 rcu_assign_pointer(tsk->sighand, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 if (!sig)
1316 return -ENOMEM;
Peter Zijlstra9d7fb042015-06-30 11:30:54 +02001317
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 atomic_set(&sig->count, 1);
Jann Horn015fd7e2018-08-21 22:00:58 -07001319 spin_lock_irq(&current->sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 memcpy(sig->action, current->sighand->action, sizeof(sig->action));
Jann Horn015fd7e2018-08-21 22:00:58 -07001321 spin_unlock_irq(&current->sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 return 0;
1323}
1324
Oleg Nesterova7e53282006-03-28 16:11:27 -08001325void __cleanup_sighand(struct sighand_struct *sighand)
Oleg Nesterovc81addc2006-03-28 16:11:17 -08001326{
Oleg Nesterovd80e7312012-02-24 20:07:11 +01001327 if (atomic_dec_and_test(&sighand->count)) {
1328 signalfd_cleanup(sighand);
Oleg Nesterov392809b2014-09-28 23:44:18 +02001329 /*
1330 * sighand_cachep is SLAB_DESTROY_BY_RCU so we can free it
1331 * without an RCU grace period, see __lock_task_sighand().
1332 */
Oleg Nesterovc81addc2006-03-28 16:11:17 -08001333 kmem_cache_free(sighand_cachep, sighand);
Oleg Nesterovd80e7312012-02-24 20:07:11 +01001334 }
Oleg Nesterovc81addc2006-03-28 16:11:17 -08001335}
1336
Frank Mayharf06febc2008-09-12 09:54:39 -07001337/*
1338 * Initialize POSIX timer handling for a thread group.
1339 */
1340static void posix_cpu_timers_init_group(struct signal_struct *sig)
1341{
Jiri Slaby78d7d402010-03-05 13:42:54 -08001342 unsigned long cpu_limit;
1343
Jason Low316c1608d2015-04-28 13:00:20 -07001344 cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
Jiri Slaby78d7d402010-03-05 13:42:54 -08001345 if (cpu_limit != RLIM_INFINITY) {
1346 sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
Jason Lowd5c373e2015-10-14 12:07:55 -07001347 sig->cputimer.running = true;
Oleg Nesterov6279a7512009-03-27 01:06:07 +01001348 }
1349
Frank Mayharf06febc2008-09-12 09:54:39 -07001350 /* The timer lists. */
1351 INIT_LIST_HEAD(&sig->cpu_timers[0]);
1352 INIT_LIST_HEAD(&sig->cpu_timers[1]);
1353 INIT_LIST_HEAD(&sig->cpu_timers[2]);
1354}
1355
Alexey Dobriyana39bc512007-10-18 23:41:10 -07001356static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357{
1358 struct signal_struct *sig;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359
Oleg Nesterov4ab6c082009-08-26 14:29:24 -07001360 if (clone_flags & CLONE_THREAD)
Peter Zijlstra490dea42008-11-24 17:06:57 +01001361 return 0;
Oleg Nesterov6279a7512009-03-27 01:06:07 +01001362
Veaceslav Falicoa56704e2010-03-10 15:23:01 -08001363 sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 tsk->signal = sig;
1365 if (!sig)
1366 return -ENOMEM;
1367
Oleg Nesterovb3ac0222010-05-26 14:43:24 -07001368 sig->nr_threads = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 atomic_set(&sig->live, 1);
Oleg Nesterovb3ac0222010-05-26 14:43:24 -07001370 atomic_set(&sig->sigcnt, 1);
Oleg Nesterov0c740d02014-01-21 15:49:56 -08001371
1372 /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
1373 sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
1374 tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);
1375
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 init_waitqueue_head(&sig->wait_chldexit);
Oleg Nesterovdb51aec2008-04-30 00:52:52 -07001377 sig->curr_target = tsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 init_sigpending(&sig->shared_pending);
1379 INIT_LIST_HEAD(&sig->posix_timers);
Rik van Riele78c3492014-08-16 13:40:10 -04001380 seqlock_init(&sig->stats_lock);
Peter Zijlstra9d7fb042015-06-30 11:30:54 +02001381 prev_cputime_init(&sig->prev_cputime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382
Thomas Gleixnerc9cb2e32007-02-16 01:27:49 -08001383 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 sig->real_timer.function = it_real_fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386 task_lock(current->group_leader);
1387 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
1388 task_unlock(current->group_leader);
1389
Oleg Nesterov6279a7512009-03-27 01:06:07 +01001390 posix_cpu_timers_init_group(sig);
1391
Miloslav Trmac522ed772007-07-15 23:40:56 -07001392 tty_audit_fork(sig);
Mike Galbraith5091faa2010-11-30 14:18:03 +01001393 sched_autogroup_fork(sig);
Miloslav Trmac522ed772007-07-15 23:40:56 -07001394
David Rientjesa63d83f2010-08-09 17:19:46 -07001395 sig->oom_score_adj = current->signal->oom_score_adj;
Mandeep Singh Bainesdabb16f2011-01-13 15:46:05 -08001396 sig->oom_score_adj_min = current->signal->oom_score_adj_min;
KOSAKI Motohiro28b83c52009-09-21 17:03:13 -07001397
Lennart Poetteringebec18a2012-03-23 15:01:54 -07001398 sig->has_child_subreaper = current->signal->has_child_subreaper ||
1399 current->signal->is_child_subreaper;
1400
KOSAKI Motohiro9b1bf122010-10-27 15:34:08 -07001401 mutex_init(&sig->cred_guard_mutex);
1402
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 return 0;
1404}
1405
Kees Cookdbd952122014-06-27 15:18:48 -07001406static void copy_seccomp(struct task_struct *p)
1407{
1408#ifdef CONFIG_SECCOMP
1409 /*
1410 * Must be called with sighand->lock held, which is common to
1411 * all threads in the group. Holding cred_guard_mutex is not
1412 * needed because this new task is not yet running and cannot
1413 * be racing exec.
1414 */
Guenter Roeck69f6a342014-08-10 20:50:30 -07001415 assert_spin_locked(&current->sighand->siglock);
Kees Cookdbd952122014-06-27 15:18:48 -07001416
1417 /* Ref-count the new filter user, and assign it. */
1418 get_seccomp_filter(current);
1419 p->seccomp = current->seccomp;
1420
1421 /*
1422 * Explicitly enable no_new_privs here in case it got set
1423 * between the task_struct being duplicated and holding the
1424 * sighand lock. The seccomp state and nnp must be in sync.
1425 */
1426 if (task_no_new_privs(current))
1427 task_set_no_new_privs(p);
1428
1429 /*
1430 * If the parent gained a seccomp mode after copying thread
1431 * flags and between before we held the sighand lock, we have
1432 * to manually enable the seccomp thread flag here.
1433 */
1434 if (p->seccomp.mode != SECCOMP_MODE_DISABLED)
1435 set_tsk_thread_flag(p, TIF_SECCOMP);
1436#endif
1437}
1438
Heiko Carstens17da2bd2009-01-14 14:14:10 +01001439SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440{
1441 current->clear_child_tid = tidptr;
1442
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001443 return task_pid_vnr(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444}
1445
Alexey Dobriyana39bc512007-10-18 23:41:10 -07001446static void rt_mutex_init_task(struct task_struct *p)
Ingo Molnar23f78d4a2006-06-27 02:54:53 -07001447{
Thomas Gleixner1d615482009-11-17 14:54:03 +01001448 raw_spin_lock_init(&p->pi_lock);
Zilvinas Valinskase29e1752007-03-16 13:38:34 -08001449#ifdef CONFIG_RT_MUTEXES
Peter Zijlstrafb00aca2013-11-07 14:43:43 +01001450 p->pi_waiters = RB_ROOT;
1451 p->pi_waiters_leftmost = NULL;
Ingo Molnar23f78d4a2006-06-27 02:54:53 -07001452 p->pi_blocked_on = NULL;
Ingo Molnar23f78d4a2006-06-27 02:54:53 -07001453#endif
1454}
1455
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456/*
Frank Mayharf06febc2008-09-12 09:54:39 -07001457 * Initialize POSIX timer handling for a single task.
1458 */
1459static void posix_cpu_timers_init(struct task_struct *tsk)
1460{
Martin Schwidefsky64861632011-12-15 14:56:09 +01001461 tsk->cputime_expires.prof_exp = 0;
1462 tsk->cputime_expires.virt_exp = 0;
Frank Mayharf06febc2008-09-12 09:54:39 -07001463 tsk->cputime_expires.sched_exp = 0;
1464 INIT_LIST_HEAD(&tsk->cpu_timers[0]);
1465 INIT_LIST_HEAD(&tsk->cpu_timers[1]);
1466 INIT_LIST_HEAD(&tsk->cpu_timers[2]);
1467}
1468
Oleg Nesterov81907732013-07-03 15:08:31 -07001469static inline void
1470init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
1471{
1472 task->pids[type].pid = pid;
1473}
1474
Christian Brauner20c96e02019-03-27 13:04:15 +01001475static int pidfd_release(struct inode *inode, struct file *file)
1476{
1477 struct pid *pid = file->private_data;
1478
1479 file->private_data = NULL;
1480 put_pid(pid);
1481 return 0;
1482}
1483
1484#ifdef CONFIG_PROC_FS
1485static void pidfd_show_fdinfo(struct seq_file *m, struct file *f)
1486{
1487 struct pid_namespace *ns = file_inode(m->file)->i_sb->s_fs_info;
1488 struct pid *pid = f->private_data;
1489
1490 seq_put_decimal_ull(m, "Pid:\t", pid_nr_ns(pid, ns));
1491 seq_putc(m, '\n');
1492}
1493#endif
1494
Joel Fernandes (Google)911e99a2019-04-30 12:21:53 -04001495/*
1496 * Poll support for process exit notification.
1497 */
1498static unsigned int pidfd_poll(struct file *file, struct poll_table_struct *pts)
1499{
1500 struct task_struct *task;
1501 struct pid *pid = file->private_data;
1502 int poll_flags = 0;
1503
1504 poll_wait(file, &pid->wait_pidfd, pts);
1505
1506 rcu_read_lock();
1507 task = pid_task(pid, PIDTYPE_PID);
1508 /*
1509 * Inform pollers only when the whole thread group exits.
1510 * If the thread group leader exits before all other threads in the
1511 * group, then poll(2) should block, similar to the wait(2) family.
1512 */
1513 if (!task || (task->exit_state && thread_group_empty(task)))
1514 poll_flags = POLLIN | POLLRDNORM;
1515 rcu_read_unlock();
1516
1517 return poll_flags;
1518}
1519
Christian Brauner20c96e02019-03-27 13:04:15 +01001520const struct file_operations pidfd_fops = {
1521 .release = pidfd_release,
Joel Fernandes (Google)911e99a2019-04-30 12:21:53 -04001522 .poll = pidfd_poll,
Christian Brauner20c96e02019-03-27 13:04:15 +01001523#ifdef CONFIG_PROC_FS
1524 .show_fdinfo = pidfd_show_fdinfo,
1525#endif
1526};
1527
1528/**
1529 * pidfd_create() - Create a new pid file descriptor.
1530 *
1531 * @pid: struct pid that the pidfd will reference
1532 *
1533 * This creates a new pid file descriptor with the O_CLOEXEC flag set.
1534 *
1535 * Note, that this function can only be called after the fd table has
1536 * been unshared to avoid leaking the pidfd to the new process.
1537 *
1538 * Return: On success, a cloexec pidfd is returned.
1539 * On error, a negative errno number will be returned.
1540 */
1541static int pidfd_create(struct pid *pid)
1542{
1543 int fd;
1544
1545 fd = anon_inode_getfd("[pidfd]", &pidfd_fops, get_pid(pid),
1546 O_RDWR | O_CLOEXEC);
1547 if (fd < 0)
1548 put_pid(pid);
1549
1550 return fd;
1551}
1552
Frank Mayharf06febc2008-09-12 09:54:39 -07001553/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 * This creates a new process as a copy of the old one,
1555 * but does not actually start it yet.
1556 *
1557 * It copies the registers, and all the appropriate
1558 * parts of the process environment (as per the clone
1559 * flags). The actual kick-off is left to the caller.
1560 */
Emese Revfy0766f782016-06-20 20:42:34 +02001561static __latent_entropy struct task_struct *copy_process(
1562 unsigned long clone_flags,
Ingo Molnar36c8b582006-07-03 00:25:41 -07001563 unsigned long stack_start,
Ingo Molnar36c8b582006-07-03 00:25:41 -07001564 unsigned long stack_size,
Christian Brauner20c96e02019-03-27 13:04:15 +01001565 int __user *parent_tidptr,
Ingo Molnar36c8b582006-07-03 00:25:41 -07001566 int __user *child_tidptr,
Roland McGrath09a05392008-07-25 19:45:47 -07001567 struct pid *pid,
Josh Triplett3033f14a2015-06-25 15:01:19 -07001568 int trace,
Andi Kleen725fc622016-05-23 16:24:05 -07001569 unsigned long tls,
1570 int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571{
Christian Brauner20c96e02019-03-27 13:04:15 +01001572 int pidfd = -1, retval;
Mariusz Kozlowskia24efe62007-10-18 23:41:09 -07001573 struct task_struct *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574
1575 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
1576 return ERR_PTR(-EINVAL);
1577
Eric W. Biedermane66eded2013-03-13 11:51:49 -07001578 if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
1579 return ERR_PTR(-EINVAL);
1580
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 /*
1582 * Thread groups must share signals as well, and detached threads
1583 * can only be started up within the thread group.
1584 */
1585 if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
1586 return ERR_PTR(-EINVAL);
1587
1588 /*
1589 * Shared signal handlers imply shared VM. By way of the above,
1590 * thread groups also imply shared VM. Blocking this case allows
1591 * for various simplifications in other code.
1592 */
1593 if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
1594 return ERR_PTR(-EINVAL);
1595
Sukadev Bhattiprolu123be072009-09-23 15:57:20 -07001596 /*
1597 * Siblings of global init remain as zombies on exit since they are
1598 * not reaped by their parent (swapper). To solve this and to avoid
1599 * multi-rooted process trees, prevent global and container-inits
1600 * from creating siblings.
1601 */
1602 if ((clone_flags & CLONE_PARENT) &&
1603 current->signal->flags & SIGNAL_UNKILLABLE)
1604 return ERR_PTR(-EINVAL);
1605
Eric W. Biederman8382fca2012-12-20 19:26:06 -08001606 /*
Oleg Nesterov40a0d322013-09-11 14:19:41 -07001607 * If the new process will be in a different pid or user namespace
Eric W. Biedermanfaf00da2015-08-10 18:25:44 -05001608 * do not allow it to share a thread group with the forking task.
Eric W. Biederman8382fca2012-12-20 19:26:06 -08001609 */
Eric W. Biedermanfaf00da2015-08-10 18:25:44 -05001610 if (clone_flags & CLONE_THREAD) {
Oleg Nesterov40a0d322013-09-11 14:19:41 -07001611 if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
1612 (task_active_pid_ns(current) !=
1613 current->nsproxy->pid_ns_for_children))
1614 return ERR_PTR(-EINVAL);
1615 }
Eric W. Biederman8382fca2012-12-20 19:26:06 -08001616
Christian Brauner20c96e02019-03-27 13:04:15 +01001617 if (clone_flags & CLONE_PIDFD) {
1618 int reserved;
1619
1620 /*
1621 * - CLONE_PARENT_SETTID is useless for pidfds and also
1622 * parent_tidptr is used to return pidfds.
1623 * - CLONE_DETACHED is blocked so that we can potentially
1624 * reuse it later for CLONE_PIDFD.
1625 * - CLONE_THREAD is blocked until someone really needs it.
1626 */
1627 if (clone_flags &
1628 (CLONE_DETACHED | CLONE_PARENT_SETTID | CLONE_THREAD))
1629 return ERR_PTR(-EINVAL);
1630
1631 /*
1632 * Verify that parent_tidptr is sane so we can potentially
1633 * reuse it later.
1634 */
1635 if (get_user(reserved, parent_tidptr))
1636 return ERR_PTR(-EFAULT);
1637
1638 if (reserved != 0)
1639 return ERR_PTR(-EINVAL);
1640 }
1641
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 retval = security_task_create(clone_flags);
1643 if (retval)
1644 goto fork_out;
1645
1646 retval = -ENOMEM;
Andi Kleen725fc622016-05-23 16:24:05 -07001647 p = dup_task_struct(current, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 if (!p)
1649 goto fork_out;
1650
Vegard Nossum5218d132017-05-09 09:39:59 +02001651 /*
1652 * This _must_ happen before we call free_task(), i.e. before we jump
1653 * to any of the bad_fork_* labels. This is to avoid freeing
1654 * p->set_child_tid which is (ab)used as a kthread's data pointer for
1655 * kernel threads (PF_KTHREAD).
1656 */
1657 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1658 /*
1659 * Clear TID on mm_release()?
1660 */
1661 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
1662
Sultan Alsawaf47bbcd62018-06-03 10:47:51 -07001663 cpufreq_task_times_init(p);
1664
Vegard Nossuma70e46b2017-05-09 09:39:59 +02001665 /*
1666 * This _must_ happen before we call free_task(), i.e. before we jump
1667 * to any of the bad_fork_* labels. This is to avoid freeing
1668 * p->set_child_tid which is (ab)used as a kthread's data pointer for
1669 * kernel threads (PF_KTHREAD).
1670 */
1671 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1672 /*
1673 * Clear TID on mm_release()?
1674 */
1675 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
1676
Steven Rostedtf7e8b612009-06-02 16:39:48 -04001677 ftrace_graph_init_task(p);
1678
Peter Zijlstrabea493a2006-10-17 00:10:33 -07001679 rt_mutex_init_task(p);
1680
Ingo Molnard12c1a32008-07-14 12:09:28 +02001681#ifdef CONFIG_PROVE_LOCKING
Ingo Molnarde30a2b2006-07-03 00:24:42 -07001682 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
1683 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
1684#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 retval = -EAGAIN;
David Howells3b11a1d2008-11-14 10:39:26 +11001686 if (atomic_read(&p->real_cred->user->processes) >=
Jiri Slaby78d7d402010-03-05 13:42:54 -08001687 task_rlimit(p, RLIMIT_NPROC)) {
Eric Parisb57922b2013-07-03 15:08:29 -07001688 if (p->real_cred->user != INIT_USER &&
1689 !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 goto bad_fork_free;
1691 }
Vasiliy Kulikov72fa5992011-08-08 19:02:04 +04001692 current->flags &= ~PF_NPROC_EXCEEDED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693
David Howellsf1752ee2008-11-14 10:39:17 +11001694 retval = copy_creds(p, clone_flags);
1695 if (retval < 0)
1696 goto bad_fork_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697
1698 /*
1699 * If multiple threads are within copy_process(), then this check
1700 * triggers too late. This doesn't hurt, the check is only there
1701 * to stop root fork bombs.
1702 */
Li Zefan04ec93f2009-02-06 08:17:19 +00001703 retval = -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 if (nr_threads >= max_threads)
1705 goto bad_fork_cleanup_count;
1706
Shailabh Nagarca74e922006-07-14 00:24:36 -07001707 delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
David Rientjes514ddb42014-04-07 15:37:27 -07001708 p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
1709 p->flags |= PF_FORKNOEXEC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 INIT_LIST_HEAD(&p->children);
1711 INIT_LIST_HEAD(&p->sibling);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001712 rcu_copy_process(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 p->vfork_done = NULL;
1714 spin_lock_init(&p->alloc_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 init_sigpending(&p->pending);
1717
Martin Schwidefsky64861632011-12-15 14:56:09 +01001718 p->utime = p->stime = p->gtime = 0;
1719 p->utimescaled = p->stimescaled = 0;
Peter Zijlstra9d7fb042015-06-30 11:30:54 +02001720 prev_cputime_init(&p->prev_cputime);
1721
Frederic Weisbecker6a616712012-12-16 20:00:34 +01001722#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
Frederic Weisbeckerb7ce2272015-11-19 16:47:34 +01001723 seqcount_init(&p->vtime_seqcount);
Frederic Weisbecker6a616712012-12-16 20:00:34 +01001724 p->vtime_snap = 0;
Frederic Weisbecker7098c1e2015-11-19 16:47:30 +01001725 p->vtime_snap_whence = VTIME_INACTIVE;
Frederic Weisbecker6a616712012-12-16 20:00:34 +01001726#endif
1727
KAMEZAWA Hiroyukia3a2e762010-04-06 14:34:42 -07001728#if defined(SPLIT_RSS_COUNTING)
1729 memset(&p->rss_stat, 0, sizeof(p->rss_stat));
1730#endif
Balbir Singh172ba842007-07-09 18:52:00 +02001731
Arjan van de Ven69766752008-09-01 15:52:40 -07001732 p->default_timer_slack_ns = current->timer_slack_ns;
1733
Johannes Weiner3df0e592018-10-26 15:06:27 -07001734#ifdef CONFIG_PSI
1735 p->psi_flags = 0;
1736#endif
1737
Andrea Righi59954772008-07-27 17:29:15 +02001738 task_io_accounting_init(&p->ioac);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 acct_clear_integrals(p);
1740
Frank Mayharf06febc2008-09-12 09:54:39 -07001741 posix_cpu_timers_init(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 p->io_context = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744 p->audit_context = NULL;
Paul Menageb4f48b62007-10-18 23:39:33 -07001745 cgroup_fork(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746#ifdef CONFIG_NUMA
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07001747 p->mempolicy = mpol_dup(p->mempolicy);
Daniel Rebelo de Oliveirafb0a6852011-07-26 16:08:39 -07001748 if (IS_ERR(p->mempolicy)) {
1749 retval = PTR_ERR(p->mempolicy);
1750 p->mempolicy = NULL;
Li Zefane8604cb2014-03-28 15:18:27 +08001751 goto bad_fork_cleanup_threadgroup_lock;
Daniel Rebelo de Oliveirafb0a6852011-07-26 16:08:39 -07001752 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753#endif
Michal Hocko778d3b02011-07-26 16:08:30 -07001754#ifdef CONFIG_CPUSETS
1755 p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
1756 p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
Mel Gormancc9a6c82012-03-21 16:34:11 -07001757 seqcount_init(&p->mems_allowed_seq);
Michal Hocko778d3b02011-07-26 16:08:30 -07001758#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -07001759#ifdef CONFIG_TRACE_IRQFLAGS
1760 p->irq_events = 0;
1761 p->hardirqs_enabled = 0;
1762 p->hardirq_enable_ip = 0;
1763 p->hardirq_enable_event = 0;
1764 p->hardirq_disable_ip = _THIS_IP_;
1765 p->hardirq_disable_event = 0;
1766 p->softirqs_enabled = 1;
1767 p->softirq_enable_ip = _THIS_IP_;
1768 p->softirq_enable_event = 0;
1769 p->softirq_disable_ip = 0;
1770 p->softirq_disable_event = 0;
1771 p->hardirq_context = 0;
1772 p->softirq_context = 0;
1773#endif
David Hildenbrand8bcbde52015-05-11 17:52:06 +02001774
1775 p->pagefault_disabled = 0;
1776
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07001777#ifdef CONFIG_LOCKDEP
1778 p->lockdep_depth = 0; /* no locks held yet */
1779 p->curr_chain_key = 0;
1780 p->lockdep_recursion = 0;
1781#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782
Ingo Molnar408894e2006-01-09 15:59:20 -08001783#ifdef CONFIG_DEBUG_MUTEXES
1784 p->blocked_on = NULL; /* not blocked yet */
1785#endif
Kent Overstreetcafe5632013-03-23 16:11:31 -07001786#ifdef CONFIG_BCACHE
1787 p->sequential_io = 0;
1788 p->sequential_io_avg = 0;
1789#endif
Markus Metzger0f481402009-04-03 16:43:48 +02001790
Srivatsa Vaddagiri3c90e6e2007-11-09 22:39:39 +01001791 /* Perform scheduler related setup. Assign this task to a CPU. */
Dario Faggioliaab03e02013-11-28 11:14:43 +01001792 retval = sched_fork(clone_flags, p);
1793 if (retval)
1794 goto bad_fork_cleanup_policy;
Peter Zijlstra6ab423e2009-05-25 14:45:27 +02001795
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001796 retval = perf_event_init_task(p);
Peter Zijlstra6ab423e2009-05-25 14:45:27 +02001797 if (retval)
1798 goto bad_fork_cleanup_policy;
Daniel Rebelo de Oliveirafb0a6852011-07-26 16:08:39 -07001799 retval = audit_alloc(p);
1800 if (retval)
Peter Zijlstra6c72e3502014-10-02 16:17:02 -07001801 goto bad_fork_cleanup_perf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 /* copy all the process information */
Jack Millerab602f72014-08-08 14:23:19 -07001803 shm_init_task(p);
Daniel Rebelo de Oliveirafb0a6852011-07-26 16:08:39 -07001804 retval = copy_semundo(clone_flags, p);
1805 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 goto bad_fork_cleanup_audit;
Daniel Rebelo de Oliveirafb0a6852011-07-26 16:08:39 -07001807 retval = copy_files(clone_flags, p);
1808 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809 goto bad_fork_cleanup_semundo;
Daniel Rebelo de Oliveirafb0a6852011-07-26 16:08:39 -07001810 retval = copy_fs(clone_flags, p);
1811 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 goto bad_fork_cleanup_files;
Daniel Rebelo de Oliveirafb0a6852011-07-26 16:08:39 -07001813 retval = copy_sighand(clone_flags, p);
1814 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 goto bad_fork_cleanup_fs;
Daniel Rebelo de Oliveirafb0a6852011-07-26 16:08:39 -07001816 retval = copy_signal(clone_flags, p);
1817 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 goto bad_fork_cleanup_sighand;
Daniel Rebelo de Oliveirafb0a6852011-07-26 16:08:39 -07001819 retval = copy_mm(clone_flags, p);
1820 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821 goto bad_fork_cleanup_signal;
Daniel Rebelo de Oliveirafb0a6852011-07-26 16:08:39 -07001822 retval = copy_namespaces(clone_flags, p);
1823 if (retval)
David Howellsd84f4f92008-11-14 10:39:23 +11001824 goto bad_fork_cleanup_mm;
Daniel Rebelo de Oliveirafb0a6852011-07-26 16:08:39 -07001825 retval = copy_io(clone_flags, p);
1826 if (retval)
Jens Axboefd0928d2008-01-24 08:52:45 +01001827 goto bad_fork_cleanup_namespaces;
Josh Triplett3033f14a2015-06-25 15:01:19 -07001828 retval = copy_thread_tls(clone_flags, stack_start, stack_size, p, tls);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829 if (retval)
Jens Axboefd0928d2008-01-24 08:52:45 +01001830 goto bad_fork_cleanup_io;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831
Pavel Emelyanov425fb2b2007-10-18 23:40:07 -07001832 if (pid != &init_struct_pid) {
Andy Lutomirskic2b1df22013-08-22 11:39:16 -07001833 pid = alloc_pid(p->nsproxy->pid_ns_for_children);
Michal Hocko35f71bc2015-04-16 12:47:38 -07001834 if (IS_ERR(pid)) {
1835 retval = PTR_ERR(pid);
Jiri Slaby0740aa52016-05-20 17:00:25 -07001836 goto bad_fork_cleanup_thread;
Michal Hocko35f71bc2015-04-16 12:47:38 -07001837 }
Pavel Emelyanov425fb2b2007-10-18 23:40:07 -07001838 }
1839
Christian Brauner20c96e02019-03-27 13:04:15 +01001840 /*
1841 * This has to happen after we've potentially unshared the file
1842 * descriptor table (so that the pidfd doesn't leak into the child
1843 * if the fd table isn't shared).
1844 */
1845 if (clone_flags & CLONE_PIDFD) {
1846 retval = pidfd_create(pid);
1847 if (retval < 0)
1848 goto bad_fork_free_pid;
1849
1850 pidfd = retval;
1851 retval = put_user(pidfd, parent_tidptr);
1852 if (retval)
1853 goto bad_fork_put_pidfd;
1854 }
1855
Jens Axboe73c10102011-03-08 13:19:51 +01001856#ifdef CONFIG_BLOCK
1857 p->plug = NULL;
1858#endif
Alexey Dobriyan42b2dd02007-10-16 23:27:30 -07001859#ifdef CONFIG_FUTEX
Ingo Molnar8f17d3a2006-03-27 01:16:27 -08001860 p->robust_list = NULL;
1861#ifdef CONFIG_COMPAT
1862 p->compat_robust_list = NULL;
1863#endif
Ingo Molnarc87e2832006-06-27 02:54:58 -07001864 INIT_LIST_HEAD(&p->pi_state_list);
1865 p->pi_state_cache = NULL;
Alexey Dobriyan42b2dd02007-10-16 23:27:30 -07001866#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867 /*
GOTO Masanorif9a38792006-03-13 21:20:44 -08001868 * sigaltstack should be cleared when sharing the same VM
1869 */
1870 if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
Stas Sergeev2a742132016-04-14 23:20:04 +03001871 sas_ss_reset(p);
GOTO Masanorif9a38792006-03-13 21:20:44 -08001872
1873 /*
Oleg Nesterov65808072009-12-15 16:47:16 -08001874 * Syscall tracing and stepping should be turned off in the
1875 * child regardless of CLONE_PTRACE.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 */
Oleg Nesterov65808072009-12-15 16:47:16 -08001877 user_disable_single_step(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
Laurent Viviered75e8d2005-09-03 15:57:18 -07001879#ifdef TIF_SYSCALL_EMU
1880 clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
1881#endif
Arjan van de Ven97455122008-01-25 21:08:34 +01001882 clear_all_latency_tracing(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 /* ok, now we should be set up.. */
Oleg Nesterov18c830d2013-07-03 15:08:32 -07001885 p->pid = pid_nr(pid);
1886 if (clone_flags & CLONE_THREAD) {
Oleg Nesterov5f8aadd2012-03-14 19:55:38 +01001887 p->exit_signal = -1;
Oleg Nesterov18c830d2013-07-03 15:08:32 -07001888 p->group_leader = current->group_leader;
1889 p->tgid = current->tgid;
1890 } else {
1891 if (clone_flags & CLONE_PARENT)
1892 p->exit_signal = current->group_leader->exit_signal;
1893 else
1894 p->exit_signal = (clone_flags & CSIGNAL);
1895 p->group_leader = p;
1896 p->tgid = p->pid;
1897 }
Oleg Nesterov5f8aadd2012-03-14 19:55:38 +01001898
Wu Fengguang9d823e82011-06-11 18:10:12 -06001899 p->nr_dirtied = 0;
1900 p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
Wu Fengguang83712352011-06-11 19:25:42 -06001901 p->dirty_paused_when = 0;
Wu Fengguang9d823e82011-06-11 18:10:12 -06001902
Oleg Nesterovbb8cbbf2013-11-13 15:36:12 +01001903 p->pdeath_signal = 0;
Oleg Nesterov47e65322006-03-28 16:11:25 -08001904 INIT_LIST_HEAD(&p->thread_group);
Al Viro158e1642012-06-27 09:24:13 +04001905 p->task_works = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906
Balbir Singh568ac882016-08-10 15:43:06 -04001907 threadgroup_change_begin(current);
Oleg Nesterov18c830d2013-07-03 15:08:32 -07001908 /*
Aleksa Sarai7e476822015-06-09 21:32:09 +10001909 * Ensure that the cgroup subsystem policies allow the new process to be
1910 * forked. It should be noted the the new process's css_set can be changed
1911 * between here and cgroup_post_fork() if an organisation operation is in
1912 * progress.
1913 */
Oleg Nesterovb53202e2015-12-03 10:24:08 -05001914 retval = cgroup_can_fork(p);
Aleksa Sarai7e476822015-06-09 21:32:09 +10001915 if (retval)
Christian Brauner5ae2d0f2019-05-10 11:53:46 +02001916 goto bad_fork_cgroup_threadgroup_change_end;
Aleksa Sarai7e476822015-06-09 21:32:09 +10001917
1918 /*
David Herrmann0ea60302019-01-08 13:58:52 +01001919 * From this point on we must avoid any synchronous user-space
1920 * communication until we take the tasklist-lock. In particular, we do
1921 * not want user-space to be able to predict the process start-time by
1922 * stalling fork(2) after we recorded the start_time but before it is
1923 * visible to the system.
1924 */
1925
1926 p->start_time = ktime_get_ns();
1927 p->real_start_time = ktime_get_boot_ns();
1928
1929 /*
Oleg Nesterov18c830d2013-07-03 15:08:32 -07001930 * Make it visible to the rest of the system, but dont wake it up yet.
1931 * Need tasklist lock for parent etc handling!
1932 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 write_lock_irq(&tasklist_lock);
1934
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 /* CLONE_PARENT re-uses the old parent */
Oleg Nesterov2d5516c2009-03-02 22:58:45 +01001936 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937 p->real_parent = current->real_parent;
Oleg Nesterov2d5516c2009-03-02 22:58:45 +01001938 p->parent_exec_id = current->parent_exec_id;
1939 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 p->real_parent = current;
Oleg Nesterov2d5516c2009-03-02 22:58:45 +01001941 p->parent_exec_id = current->self_exec_id;
1942 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943
Oleg Nesterov3f17da62006-02-15 22:13:24 +03001944 spin_lock(&current->sighand->siglock);
Oleg Nesterov4a2c7a72006-03-28 16:11:26 -08001945
1946 /*
Kees Cookdbd952122014-06-27 15:18:48 -07001947 * Copy seccomp details explicitly here, in case they were changed
1948 * before holding sighand lock.
1949 */
1950 copy_seccomp(p);
1951
1952 /*
Oleg Nesterov4a2c7a72006-03-28 16:11:26 -08001953 * Process group and session signals need to be delivered to just the
1954 * parent before the fork or both the parent and the child after the
1955 * fork. Restart if a signal comes in before we add the new process to
1956 * it's process group.
1957 * A fatal signal pending means that current will exit, so the new
1958 * thread can't slip out of an OOM kill (or normal SIGKILL).
Daniel Rebelo de Oliveirafb0a6852011-07-26 16:08:39 -07001959 */
Daniel Walker23ff4442007-10-18 03:06:07 -07001960 recalc_sigpending();
Oleg Nesterov4a2c7a72006-03-28 16:11:26 -08001961 if (signal_pending(current)) {
Oleg Nesterov4a2c7a72006-03-28 16:11:26 -08001962 retval = -ERESTARTNOINTR;
Aleksa Sarai7e476822015-06-09 21:32:09 +10001963 goto bad_fork_cancel_cgroup;
Oleg Nesterov4a2c7a72006-03-28 16:11:26 -08001964 }
Kirill Tkhai2ea2f892017-05-12 19:11:31 +03001965 if (unlikely(!(ns_of_pid(pid)->nr_hashed & PIDNS_HASH_ADDING))) {
1966 retval = -ENOMEM;
1967 goto bad_fork_cancel_cgroup;
1968 }
Oleg Nesterov4a2c7a72006-03-28 16:11:26 -08001969
Oleg Nesterov73b9ebf2006-03-28 16:11:07 -08001970 if (likely(p->pid)) {
Tejun Heo4b9d33e2011-06-17 16:50:38 +02001971 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972
Oleg Nesterov81907732013-07-03 15:08:31 -07001973 init_task_pid(p, PIDTYPE_PID, pid);
Oleg Nesterov73b9ebf2006-03-28 16:11:07 -08001974 if (thread_group_leader(p)) {
Oleg Nesterov81907732013-07-03 15:08:31 -07001975 init_task_pid(p, PIDTYPE_PGID, task_pgrp(current));
1976 init_task_pid(p, PIDTYPE_SID, task_session(current));
1977
Eric W. Biederman1c4042c2010-07-12 17:10:36 -07001978 if (is_child_reaper(pid)) {
Eric W. Biederman17cf22c2010-03-02 14:51:53 -08001979 ns_of_pid(pid)->child_reaper = p;
Eric W. Biederman1c4042c2010-07-12 17:10:36 -07001980 p->signal->flags |= SIGNAL_UNKILLABLE;
1981 }
Oleg Nesterovc97d9892006-03-28 16:11:06 -08001982
Oleg Nesterovfea9d172008-02-08 04:19:19 -08001983 p->signal->leader_pid = pid;
Alan Cox9c9f4de2008-10-13 10:37:26 +01001984 p->signal->tty = tty_kref_get(current->signal->tty);
Oleg Nesterov9cd80bb2009-12-17 15:27:15 -08001985 list_add_tail(&p->sibling, &p->real_parent->children);
Eric W. Biederman5e85d4a2006-04-18 22:20:16 -07001986 list_add_tail_rcu(&p->tasks, &init_task.tasks);
Oleg Nesterov81907732013-07-03 15:08:31 -07001987 attach_pid(p, PIDTYPE_PGID);
1988 attach_pid(p, PIDTYPE_SID);
Christoph Lameter909ea962010-12-08 16:22:55 +01001989 __this_cpu_inc(process_counts);
Oleg Nesterov80628ca2013-07-03 15:08:30 -07001990 } else {
1991 current->signal->nr_threads++;
1992 atomic_inc(&current->signal->live);
1993 atomic_inc(&current->signal->sigcnt);
Oleg Nesterov80628ca2013-07-03 15:08:30 -07001994 list_add_tail_rcu(&p->thread_group,
1995 &p->group_leader->thread_group);
Oleg Nesterov0c740d02014-01-21 15:49:56 -08001996 list_add_tail_rcu(&p->thread_node,
1997 &p->signal->thread_head);
Oleg Nesterov73b9ebf2006-03-28 16:11:07 -08001998 }
Oleg Nesterov81907732013-07-03 15:08:31 -07001999 attach_pid(p, PIDTYPE_PID);
Oleg Nesterov73b9ebf2006-03-28 16:11:07 -08002000 nr_threads++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001 }
2002
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003 total_forks++;
Oleg Nesterov3f17da62006-02-15 22:13:24 +03002004 spin_unlock(&current->sighand->siglock);
Oleg Nesterov4af42062014-04-13 20:58:54 +02002005 syscall_tracepoint_update(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 write_unlock_irq(&tasklist_lock);
Oleg Nesterov4af42062014-04-13 20:58:54 +02002007
Andrew Mortonc13cf852005-11-28 13:43:48 -08002008 proc_fork_connector(p);
Oleg Nesterovb53202e2015-12-03 10:24:08 -05002009 cgroup_post_fork(p);
Oleg Nesterovc9e75f02015-11-27 19:57:19 +01002010 threadgroup_change_end(current);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002011 perf_event_fork(p);
KAMEZAWA Hiroyuki43d2b112012-01-10 15:08:09 -08002012
2013 trace_task_newtask(p, clone_flags);
Oleg Nesterov3ab67962013-10-16 19:39:37 +02002014 uprobe_copy_process(p, clone_flags);
KAMEZAWA Hiroyuki43d2b112012-01-10 15:08:09 -08002015
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016 return p;
2017
Aleksa Sarai7e476822015-06-09 21:32:09 +10002018bad_fork_cancel_cgroup:
Kirill Tkhai2ea2f892017-05-12 19:11:31 +03002019 spin_unlock(&current->sighand->siglock);
2020 write_unlock_irq(&tasklist_lock);
Oleg Nesterovb53202e2015-12-03 10:24:08 -05002021 cgroup_cancel_fork(p);
Christian Brauner5ae2d0f2019-05-10 11:53:46 +02002022bad_fork_cgroup_threadgroup_change_end:
Balbir Singh568ac882016-08-10 15:43:06 -04002023 threadgroup_change_end(current);
Christian Brauner20c96e02019-03-27 13:04:15 +01002024bad_fork_put_pidfd:
2025 if (clone_flags & CLONE_PIDFD)
2026 sys_close(pidfd);
Aleksa Sarai7e476822015-06-09 21:32:09 +10002027bad_fork_free_pid:
Pavel Emelyanov425fb2b2007-10-18 23:40:07 -07002028 if (pid != &init_struct_pid)
2029 free_pid(pid);
Jiri Slaby0740aa52016-05-20 17:00:25 -07002030bad_fork_cleanup_thread:
2031 exit_thread(p);
Jens Axboefd0928d2008-01-24 08:52:45 +01002032bad_fork_cleanup_io:
Louis Rillingb69f2292009-12-04 14:52:42 +01002033 if (p->io_context)
2034 exit_io_context(p);
Serge E. Hallynab516012006-10-02 02:18:06 -07002035bad_fork_cleanup_namespaces:
Linus Torvalds444f3782007-01-30 13:35:18 -08002036 exit_task_namespaces(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037bad_fork_cleanup_mm:
David Rientjesc9f01242011-10-31 17:07:15 -07002038 if (p->mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 mmput(p->mm);
2040bad_fork_cleanup_signal:
Oleg Nesterov4ab6c082009-08-26 14:29:24 -07002041 if (!(clone_flags & CLONE_THREAD))
Mike Galbraith1c5354d2011-01-05 11:16:04 +01002042 free_signal_struct(p->signal);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043bad_fork_cleanup_sighand:
Oleg Nesterova7e53282006-03-28 16:11:27 -08002044 __cleanup_sighand(p->sighand);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045bad_fork_cleanup_fs:
2046 exit_fs(p); /* blocking */
2047bad_fork_cleanup_files:
2048 exit_files(p); /* blocking */
2049bad_fork_cleanup_semundo:
2050 exit_sem(p);
2051bad_fork_cleanup_audit:
2052 audit_free(p);
Peter Zijlstra6c72e3502014-10-02 16:17:02 -07002053bad_fork_cleanup_perf:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002054 perf_event_free_task(p);
Peter Zijlstra6c72e3502014-10-02 16:17:02 -07002055bad_fork_cleanup_policy:
Syed Rameez Mustafa88040bd2016-11-03 18:13:08 -07002056 free_task_load_ptrs(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057#ifdef CONFIG_NUMA
Lee Schermerhornf0be3d32008-04-28 02:13:08 -07002058 mpol_put(p->mempolicy);
Li Zefane8604cb2014-03-28 15:18:27 +08002059bad_fork_cleanup_threadgroup_lock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060#endif
Shailabh Nagar35df17c2006-08-31 21:27:38 -07002061 delayacct_tsk_free(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062bad_fork_cleanup_count:
David Howellsd84f4f92008-11-14 10:39:23 +11002063 atomic_dec(&p->cred->user->processes);
David Howellse0e81732009-09-02 09:13:40 +01002064 exit_creds(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065bad_fork_free:
Andy Lutomirski405c0752016-10-31 08:11:43 -07002066 p->state = TASK_DEAD;
Andy Lutomirski68f24b082016-09-15 22:45:48 -07002067 put_task_stack(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068 free_task(p);
Oleg Nesterovfe7d37d2006-01-08 01:04:02 -08002069fork_out:
2070 return ERR_PTR(retval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071}
2072
Oleg Nesterovf106eee2010-05-26 14:44:11 -07002073static inline void init_idle_pids(struct pid_link *links)
2074{
2075 enum pid_type type;
2076
2077 for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
2078 INIT_HLIST_NODE(&links[type].node); /* not really needed */
2079 links[type].pid = &init_struct_pid;
2080 }
2081}
2082
Paul Gortmaker0db06282013-06-19 14:53:51 -04002083struct task_struct *fork_idle(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084{
Ingo Molnar36c8b582006-07-03 00:25:41 -07002085 struct task_struct *task;
Christian Brauner20c96e02019-03-27 13:04:15 +01002086 task = copy_process(CLONE_VM, 0, 0, NULL, NULL, &init_struct_pid, 0, 0,
Andi Kleen725fc622016-05-23 16:24:05 -07002087 cpu_to_node(cpu));
Oleg Nesterovf106eee2010-05-26 14:44:11 -07002088 if (!IS_ERR(task)) {
2089 init_idle_pids(task->pids);
Pavankumar Kondeti736630c2018-09-20 15:31:36 +05302090 init_idle(task, cpu);
Oleg Nesterovf106eee2010-05-26 14:44:11 -07002091 }
Oleg Nesterov73b9ebf2006-03-28 16:11:07 -08002092
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093 return task;
2094}
2095
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096/*
2097 * Ok, this is the main fork-routine.
2098 *
2099 * It copies the process, and if successful kick-starts
2100 * it and waits for it to finish using the VM if required.
2101 */
Josh Triplett3033f14a2015-06-25 15:01:19 -07002102long _do_fork(unsigned long clone_flags,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 unsigned long stack_start,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 unsigned long stack_size,
2105 int __user *parent_tidptr,
Josh Triplett3033f14a2015-06-25 15:01:19 -07002106 int __user *child_tidptr,
2107 unsigned long tls)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108{
2109 struct task_struct *p;
2110 int trace = 0;
Eric W. Biederman92476d72006-03-31 02:31:42 -08002111 long nr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112
Andrew Mortonbdff7462008-02-04 22:27:22 -08002113 /*
Tejun Heo4b9d33e2011-06-17 16:50:38 +02002114 * Determine whether and which event to report to ptracer. When
2115 * called from kernel_thread or CLONE_UNTRACED is explicitly
2116 * requested, no event is reported; otherwise, report if the event
2117 * for the type of forking is enabled.
Roland McGrath09a05392008-07-25 19:45:47 -07002118 */
Al Viroe80d6662012-10-22 23:10:08 -04002119 if (!(clone_flags & CLONE_UNTRACED)) {
Tejun Heo4b9d33e2011-06-17 16:50:38 +02002120 if (clone_flags & CLONE_VFORK)
2121 trace = PTRACE_EVENT_VFORK;
2122 else if ((clone_flags & CSIGNAL) != SIGCHLD)
2123 trace = PTRACE_EVENT_CLONE;
2124 else
2125 trace = PTRACE_EVENT_FORK;
2126
2127 if (likely(!ptrace_event_enabled(current, trace)))
2128 trace = 0;
2129 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130
Christian Brauner20c96e02019-03-27 13:04:15 +01002131 p = copy_process(clone_flags, stack_start, stack_size, parent_tidptr,
Andi Kleen725fc622016-05-23 16:24:05 -07002132 child_tidptr, NULL, trace, tls, NUMA_NO_NODE);
Emese Revfy38addce2016-06-20 20:41:19 +02002133 add_latent_entropy();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 /*
2135 * Do this prior waking up the new thread - the thread pointer
2136 * might get invalid after that point, if the thread exits quickly.
2137 */
2138 if (!IS_ERR(p)) {
2139 struct completion vfork;
Matthew Dempsky4e523652014-06-06 14:36:42 -07002140 struct pid *pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141
Sultan Alsawaf47bbcd62018-06-03 10:47:51 -07002142 cpufreq_task_times_alloc(p);
2143
Mathieu Desnoyers0a16b602008-07-18 12:16:17 -04002144 trace_sched_process_fork(current, p);
2145
Matthew Dempsky4e523652014-06-06 14:36:42 -07002146 pid = get_task_pid(p, PIDTYPE_PID);
2147 nr = pid_vnr(pid);
Pavel Emelyanov30e49c22007-10-18 23:40:10 -07002148
2149 if (clone_flags & CLONE_PARENT_SETTID)
2150 put_user(nr, parent_tidptr);
Sukadev Bhattiprolua6f5e062007-10-18 23:39:53 -07002151
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 if (clone_flags & CLONE_VFORK) {
2153 p->vfork_done = &vfork;
2154 init_completion(&vfork);
Oleg Nesterovd68b46f2012-03-05 14:59:13 -08002155 get_task_struct(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 }
2157
Samir Bellabes3e51e3e2011-05-11 18:18:05 +02002158 wake_up_new_task(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159
Tejun Heo4b9d33e2011-06-17 16:50:38 +02002160 /* forking complete and child started to run, tell ptracer */
2161 if (unlikely(trace))
Matthew Dempsky4e523652014-06-06 14:36:42 -07002162 ptrace_event_pid(trace, pid);
Roland McGrath09a05392008-07-25 19:45:47 -07002163
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 if (clone_flags & CLONE_VFORK) {
Oleg Nesterovd68b46f2012-03-05 14:59:13 -08002165 if (!wait_for_vfork_done(p, &vfork))
Matthew Dempsky4e523652014-06-06 14:36:42 -07002166 ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 }
Matthew Dempsky4e523652014-06-06 14:36:42 -07002168
2169 put_pid(pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 } else {
Eric W. Biederman92476d72006-03-31 02:31:42 -08002171 nr = PTR_ERR(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 }
Eric W. Biederman92476d72006-03-31 02:31:42 -08002173 return nr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174}
2175
Josh Triplett3033f14a2015-06-25 15:01:19 -07002176#ifndef CONFIG_HAVE_COPY_THREAD_TLS
2177/* For compatibility with architectures that call do_fork directly rather than
2178 * using the syscall entry points below. */
2179long do_fork(unsigned long clone_flags,
2180 unsigned long stack_start,
2181 unsigned long stack_size,
2182 int __user *parent_tidptr,
2183 int __user *child_tidptr)
2184{
2185 return _do_fork(clone_flags, stack_start, stack_size,
2186 parent_tidptr, child_tidptr, 0);
2187}
2188#endif
2189
Al Viro2aa3a7f2012-09-21 19:55:31 -04002190/*
2191 * Create a kernel thread.
2192 */
2193pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
2194{
Josh Triplett3033f14a2015-06-25 15:01:19 -07002195 return _do_fork(flags|CLONE_VM|CLONE_UNTRACED, (unsigned long)fn,
2196 (unsigned long)arg, NULL, NULL, 0);
Al Viro2aa3a7f2012-09-21 19:55:31 -04002197}
Al Viro2aa3a7f2012-09-21 19:55:31 -04002198
Al Virod2125042012-10-23 13:17:59 -04002199#ifdef __ARCH_WANT_SYS_FORK
2200SYSCALL_DEFINE0(fork)
2201{
2202#ifdef CONFIG_MMU
Josh Triplett3033f14a2015-06-25 15:01:19 -07002203 return _do_fork(SIGCHLD, 0, 0, NULL, NULL, 0);
Al Virod2125042012-10-23 13:17:59 -04002204#else
2205 /* can not support in nommu mode */
Daeseok Youn5d59e182014-01-23 15:55:47 -08002206 return -EINVAL;
Al Virod2125042012-10-23 13:17:59 -04002207#endif
2208}
2209#endif
2210
2211#ifdef __ARCH_WANT_SYS_VFORK
2212SYSCALL_DEFINE0(vfork)
2213{
Josh Triplett3033f14a2015-06-25 15:01:19 -07002214 return _do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0,
2215 0, NULL, NULL, 0);
Al Virod2125042012-10-23 13:17:59 -04002216}
2217#endif
2218
2219#ifdef __ARCH_WANT_SYS_CLONE
2220#ifdef CONFIG_CLONE_BACKWARDS
2221SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
2222 int __user *, parent_tidptr,
Josh Triplett3033f14a2015-06-25 15:01:19 -07002223 unsigned long, tls,
Al Virod2125042012-10-23 13:17:59 -04002224 int __user *, child_tidptr)
2225#elif defined(CONFIG_CLONE_BACKWARDS2)
2226SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags,
2227 int __user *, parent_tidptr,
2228 int __user *, child_tidptr,
Josh Triplett3033f14a2015-06-25 15:01:19 -07002229 unsigned long, tls)
Michal Simekdfa97712013-08-13 16:00:53 -07002230#elif defined(CONFIG_CLONE_BACKWARDS3)
2231SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
2232 int, stack_size,
2233 int __user *, parent_tidptr,
2234 int __user *, child_tidptr,
Josh Triplett3033f14a2015-06-25 15:01:19 -07002235 unsigned long, tls)
Al Virod2125042012-10-23 13:17:59 -04002236#else
2237SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
2238 int __user *, parent_tidptr,
2239 int __user *, child_tidptr,
Josh Triplett3033f14a2015-06-25 15:01:19 -07002240 unsigned long, tls)
Al Virod2125042012-10-23 13:17:59 -04002241#endif
2242{
Josh Triplett3033f14a2015-06-25 15:01:19 -07002243 return _do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr, tls);
Al Virod2125042012-10-23 13:17:59 -04002244}
2245#endif
2246
Ravikiran G Thirumalai5fd63b32006-01-11 22:46:15 +01002247#ifndef ARCH_MIN_MMSTRUCT_ALIGN
2248#define ARCH_MIN_MMSTRUCT_ALIGN 0
2249#endif
2250
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07002251static void sighand_ctor(void *data)
Oleg Nesterovaa1757f2006-03-28 16:11:12 -08002252{
2253 struct sighand_struct *sighand = data;
2254
Christoph Lametera35afb82007-05-16 22:10:57 -07002255 spin_lock_init(&sighand->siglock);
Davide Libenzib8fceee2007-09-20 12:40:16 -07002256 init_waitqueue_head(&sighand->signalfd_wqh);
Oleg Nesterovaa1757f2006-03-28 16:11:12 -08002257}
2258
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259void __init proc_caches_init(void)
2260{
2261 sighand_cachep = kmem_cache_create("sighand_cache",
2262 sizeof(struct sighand_struct), 0,
Vegard Nossum2dff4402008-05-31 15:56:17 +02002263 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
Vladimir Davydov5d097052016-01-14 15:18:21 -08002264 SLAB_NOTRACK|SLAB_ACCOUNT, sighand_ctor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265 signal_cachep = kmem_cache_create("signal_cache",
2266 sizeof(struct signal_struct), 0,
Vladimir Davydov5d097052016-01-14 15:18:21 -08002267 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
2268 NULL);
Paul Mundt20c2df82007-07-20 10:11:58 +09002269 files_cachep = kmem_cache_create("files_cache",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270 sizeof(struct files_struct), 0,
Vladimir Davydov5d097052016-01-14 15:18:21 -08002271 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
2272 NULL);
Paul Mundt20c2df82007-07-20 10:11:58 +09002273 fs_cachep = kmem_cache_create("fs_cache",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 sizeof(struct fs_struct), 0,
Vladimir Davydov5d097052016-01-14 15:18:21 -08002275 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
2276 NULL);
Linus Torvalds6345d242011-05-29 11:32:28 -07002277 /*
2278 * FIXME! The "sizeof(struct mm_struct)" currently includes the
2279 * whole struct cpumask for the OFFSTACK case. We could change
2280 * this to *only* allocate as much of it as required by the
2281 * maximum number of CPU's we can ever have. The cpumask_allocation
2282 * is at the end of the structure, exactly for that reason.
2283 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284 mm_cachep = kmem_cache_create("mm_struct",
Ravikiran G Thirumalai5fd63b32006-01-11 22:46:15 +01002285 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
Vladimir Davydov5d097052016-01-14 15:18:21 -08002286 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
2287 NULL);
2288 vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
David Howells8feae132009-01-08 12:04:47 +00002289 mmap_init();
Al Viro66577192011-06-28 15:41:10 -04002290 nsproxy_cache_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291}
JANAK DESAIcf2e3402006-02-07 12:58:58 -08002292
JANAK DESAIcf2e3402006-02-07 12:58:58 -08002293/*
Oleg Nesterov9bfb23f2011-03-22 16:34:09 -07002294 * Check constraints on flags passed to the unshare system call.
JANAK DESAIcf2e3402006-02-07 12:58:58 -08002295 */
Oleg Nesterov9bfb23f2011-03-22 16:34:09 -07002296static int check_unshare_flags(unsigned long unshare_flags)
JANAK DESAIcf2e3402006-02-07 12:58:58 -08002297{
Oleg Nesterov9bfb23f2011-03-22 16:34:09 -07002298 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
2299 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
Eric W. Biederman50804fe2010-03-02 15:41:50 -08002300 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET|
Aditya Kalia79a9082016-01-29 02:54:06 -06002301 CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP))
JANAK DESAIcf2e3402006-02-07 12:58:58 -08002302 return -EINVAL;
Oleg Nesterov9bfb23f2011-03-22 16:34:09 -07002303 /*
Eric W. Biederman12c641a2015-08-10 17:35:07 -05002304 * Not implemented, but pretend it works if there is nothing
2305 * to unshare. Note that unsharing the address space or the
2306 * signal handlers also need to unshare the signal queues (aka
2307 * CLONE_THREAD).
Oleg Nesterov9bfb23f2011-03-22 16:34:09 -07002308 */
2309 if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
Eric W. Biederman12c641a2015-08-10 17:35:07 -05002310 if (!thread_group_empty(current))
2311 return -EINVAL;
2312 }
2313 if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
2314 if (atomic_read(&current->sighand->count) > 1)
2315 return -EINVAL;
2316 }
2317 if (unshare_flags & CLONE_VM) {
2318 if (!current_is_single_threaded())
Oleg Nesterov9bfb23f2011-03-22 16:34:09 -07002319 return -EINVAL;
2320 }
JANAK DESAIcf2e3402006-02-07 12:58:58 -08002321
2322 return 0;
2323}
2324
2325/*
JANAK DESAI99d14192006-02-07 12:58:59 -08002326 * Unshare the filesystem structure if it is being shared
JANAK DESAIcf2e3402006-02-07 12:58:58 -08002327 */
2328static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
2329{
2330 struct fs_struct *fs = current->fs;
2331
Al Viro498052b2009-03-30 07:20:30 -04002332 if (!(unshare_flags & CLONE_FS) || !fs)
2333 return 0;
2334
2335 /* don't need lock here; in the worst case we'll do useless copy */
2336 if (fs->users == 1)
2337 return 0;
2338
2339 *new_fsp = copy_fs_struct(fs);
2340 if (!*new_fsp)
2341 return -ENOMEM;
JANAK DESAIcf2e3402006-02-07 12:58:58 -08002342
2343 return 0;
2344}
2345
2346/*
JANAK DESAIa016f332006-02-07 12:59:02 -08002347 * Unshare file descriptor table if it is being shared
JANAK DESAIcf2e3402006-02-07 12:58:58 -08002348 */
2349static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
2350{
2351 struct files_struct *fd = current->files;
JANAK DESAIa016f332006-02-07 12:59:02 -08002352 int error = 0;
JANAK DESAIcf2e3402006-02-07 12:58:58 -08002353
2354 if ((unshare_flags & CLONE_FILES) &&
JANAK DESAIa016f332006-02-07 12:59:02 -08002355 (fd && atomic_read(&fd->count) > 1)) {
2356 *new_fdp = dup_fd(fd, &error);
2357 if (!*new_fdp)
2358 return error;
2359 }
JANAK DESAIcf2e3402006-02-07 12:58:58 -08002360
2361 return 0;
2362}
2363
2364/*
JANAK DESAIcf2e3402006-02-07 12:58:58 -08002365 * unshare allows a process to 'unshare' part of the process
2366 * context which was originally shared using clone. copy_*
2367 * functions used by do_fork() cannot be used here directly
2368 * because they modify an inactive task_struct that is being
2369 * constructed. Here we are modifying the current, active,
2370 * task_struct.
2371 */
Heiko Carstens6559eed82009-01-14 14:14:32 +01002372SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
JANAK DESAIcf2e3402006-02-07 12:58:58 -08002373{
JANAK DESAIcf2e3402006-02-07 12:58:58 -08002374 struct fs_struct *fs, *new_fs = NULL;
JANAK DESAIcf2e3402006-02-07 12:58:58 -08002375 struct files_struct *fd, *new_fd = NULL;
Eric W. Biedermanb2e0d9872012-07-26 05:15:35 -07002376 struct cred *new_cred = NULL;
Pavel Emelyanovcf7b7082007-10-18 23:39:54 -07002377 struct nsproxy *new_nsproxy = NULL;
Manfred Spraul9edff4a2008-04-29 01:00:57 -07002378 int do_sysvsem = 0;
Oleg Nesterov9bfb23f2011-03-22 16:34:09 -07002379 int err;
JANAK DESAIcf2e3402006-02-07 12:58:58 -08002380
Eric W. Biederman50804fe2010-03-02 15:41:50 -08002381 /*
Eric W. Biedermanfaf00da2015-08-10 18:25:44 -05002382 * If unsharing a user namespace must also unshare the thread group
2383 * and unshare the filesystem root and working directories.
Eric W. Biedermanb2e0d9872012-07-26 05:15:35 -07002384 */
2385 if (unshare_flags & CLONE_NEWUSER)
Eric W. Biedermane66eded2013-03-13 11:51:49 -07002386 unshare_flags |= CLONE_THREAD | CLONE_FS;
Eric W. Biedermanb2e0d9872012-07-26 05:15:35 -07002387 /*
Eric W. Biederman50804fe2010-03-02 15:41:50 -08002388 * If unsharing vm, must also unshare signal handlers.
2389 */
2390 if (unshare_flags & CLONE_VM)
2391 unshare_flags |= CLONE_SIGHAND;
Manfred Spraul6013f672008-04-29 01:00:59 -07002392 /*
Eric W. Biederman12c641a2015-08-10 17:35:07 -05002393 * If unsharing a signal handlers, must also unshare the signal queues.
2394 */
2395 if (unshare_flags & CLONE_SIGHAND)
2396 unshare_flags |= CLONE_THREAD;
2397 /*
Oleg Nesterov9bfb23f2011-03-22 16:34:09 -07002398 * If unsharing namespace, must also unshare filesystem information.
2399 */
2400 if (unshare_flags & CLONE_NEWNS)
2401 unshare_flags |= CLONE_FS;
Eric W. Biederman50804fe2010-03-02 15:41:50 -08002402
2403 err = check_unshare_flags(unshare_flags);
2404 if (err)
2405 goto bad_unshare_out;
Oleg Nesterov9bfb23f2011-03-22 16:34:09 -07002406 /*
Manfred Spraul6013f672008-04-29 01:00:59 -07002407 * CLONE_NEWIPC must also detach from the undolist: after switching
2408 * to a new ipc namespace, the semaphore arrays from the old
2409 * namespace are unreachable.
2410 */
2411 if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
Manfred Spraul9edff4a2008-04-29 01:00:57 -07002412 do_sysvsem = 1;
Daniel Rebelo de Oliveirafb0a6852011-07-26 16:08:39 -07002413 err = unshare_fs(unshare_flags, &new_fs);
2414 if (err)
Oleg Nesterov9bfb23f2011-03-22 16:34:09 -07002415 goto bad_unshare_out;
Daniel Rebelo de Oliveirafb0a6852011-07-26 16:08:39 -07002416 err = unshare_fd(unshare_flags, &new_fd);
2417 if (err)
Oleg Nesterov9bfb23f2011-03-22 16:34:09 -07002418 goto bad_unshare_cleanup_fs;
Eric W. Biedermanb2e0d9872012-07-26 05:15:35 -07002419 err = unshare_userns(unshare_flags, &new_cred);
Daniel Rebelo de Oliveirafb0a6852011-07-26 16:08:39 -07002420 if (err)
Manfred Spraul9edff4a2008-04-29 01:00:57 -07002421 goto bad_unshare_cleanup_fd;
Eric W. Biedermanb2e0d9872012-07-26 05:15:35 -07002422 err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
2423 new_cred, new_fs);
2424 if (err)
2425 goto bad_unshare_cleanup_cred;
JANAK DESAIcf2e3402006-02-07 12:58:58 -08002426
Eric W. Biedermanb2e0d9872012-07-26 05:15:35 -07002427 if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) {
Manfred Spraul9edff4a2008-04-29 01:00:57 -07002428 if (do_sysvsem) {
2429 /*
2430 * CLONE_SYSVSEM is equivalent to sys_exit().
2431 */
2432 exit_sem(current);
2433 }
Jack Millerab602f72014-08-08 14:23:19 -07002434 if (unshare_flags & CLONE_NEWIPC) {
2435 /* Orphan segments in old ns (see sem above). */
2436 exit_shm(current);
2437 shm_init_task(current);
2438 }
Serge E. Hallynab516012006-10-02 02:18:06 -07002439
Alan Cox6f977e62013-02-27 17:03:23 -08002440 if (new_nsproxy)
Pavel Emelyanovcf7b7082007-10-18 23:39:54 -07002441 switch_task_namespaces(current, new_nsproxy);
JANAK DESAIcf2e3402006-02-07 12:58:58 -08002442
Pavel Emelyanovcf7b7082007-10-18 23:39:54 -07002443 task_lock(current);
2444
JANAK DESAIcf2e3402006-02-07 12:58:58 -08002445 if (new_fs) {
2446 fs = current->fs;
Nick Piggin2a4419b2010-08-18 04:37:33 +10002447 spin_lock(&fs->lock);
JANAK DESAIcf2e3402006-02-07 12:58:58 -08002448 current->fs = new_fs;
Al Viro498052b2009-03-30 07:20:30 -04002449 if (--fs->users)
2450 new_fs = NULL;
2451 else
2452 new_fs = fs;
Nick Piggin2a4419b2010-08-18 04:37:33 +10002453 spin_unlock(&fs->lock);
JANAK DESAIcf2e3402006-02-07 12:58:58 -08002454 }
2455
JANAK DESAIcf2e3402006-02-07 12:58:58 -08002456 if (new_fd) {
2457 fd = current->files;
2458 current->files = new_fd;
2459 new_fd = fd;
2460 }
2461
2462 task_unlock(current);
Eric W. Biedermanb2e0d9872012-07-26 05:15:35 -07002463
2464 if (new_cred) {
2465 /* Install the new user namespace */
2466 commit_creds(new_cred);
2467 new_cred = NULL;
2468 }
JANAK DESAIcf2e3402006-02-07 12:58:58 -08002469 }
2470
Eric W. Biedermanb2e0d9872012-07-26 05:15:35 -07002471bad_unshare_cleanup_cred:
2472 if (new_cred)
2473 put_cred(new_cred);
JANAK DESAIcf2e3402006-02-07 12:58:58 -08002474bad_unshare_cleanup_fd:
2475 if (new_fd)
2476 put_files_struct(new_fd);
2477
JANAK DESAIcf2e3402006-02-07 12:58:58 -08002478bad_unshare_cleanup_fs:
2479 if (new_fs)
Al Viro498052b2009-03-30 07:20:30 -04002480 free_fs_struct(new_fs);
JANAK DESAIcf2e3402006-02-07 12:58:58 -08002481
JANAK DESAIcf2e3402006-02-07 12:58:58 -08002482bad_unshare_out:
2483 return err;
2484}
Al Viro3b125382008-04-22 05:31:30 -04002485
2486/*
2487 * Helper to unshare the files of the current task.
2488 * We don't want to expose copy_files internals to
2489 * the exec layer of the kernel.
2490 */
2491
2492int unshare_files(struct files_struct **displaced)
2493{
2494 struct task_struct *task = current;
Al Viro50704512008-04-26 05:25:00 +01002495 struct files_struct *copy = NULL;
Al Viro3b125382008-04-22 05:31:30 -04002496 int error;
2497
2498 error = unshare_fd(CLONE_FILES, &copy);
2499 if (error || !copy) {
2500 *displaced = NULL;
2501 return error;
2502 }
2503 *displaced = task->files;
2504 task_lock(task);
2505 task->files = copy;
2506 task_unlock(task);
2507 return 0;
2508}
Heinrich Schuchardt16db3d32015-04-16 12:47:50 -07002509
2510int sysctl_max_threads(struct ctl_table *table, int write,
2511 void __user *buffer, size_t *lenp, loff_t *ppos)
2512{
2513 struct ctl_table t;
2514 int ret;
2515 int threads = max_threads;
Michal Hocko5a4a1212019-10-06 17:58:19 -07002516 int min = 1;
Heinrich Schuchardt16db3d32015-04-16 12:47:50 -07002517 int max = MAX_THREADS;
2518
2519 t = *table;
2520 t.data = &threads;
2521 t.extra1 = &min;
2522 t.extra2 = &max;
2523
2524 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
2525 if (ret || !write)
2526 return ret;
2527
Michal Hocko5a4a1212019-10-06 17:58:19 -07002528 max_threads = threads;
Heinrich Schuchardt16db3d32015-04-16 12:47:50 -07002529
2530 return 0;
2531}