blob: 3cb2164f40993818cdf141e3414e89c2a734ec90 [file] [log] [blame]
Andrew Morton16d69262008-07-25 19:44:36 -07001#include <linux/mm.h>
Matt Mackall30992c92006-01-08 01:01:43 -08002#include <linux/slab.h>
3#include <linux/string.h>
Gideon Israel Dsouza3b321232014-04-07 15:37:26 -07004#include <linux/compiler.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -04005#include <linux/export.h>
Davi Arnaut96840aa2006-03-24 03:18:42 -08006#include <linux/err.h>
Adrian Bunk3b8f14b2008-07-26 15:22:28 -07007#include <linux/sched.h>
Al Viroeb36c582012-05-30 20:17:35 -04008#include <linux/security.h>
Shaohua Li98003392013-02-22 16:34:35 -08009#include <linux/swap.h>
Shaohua Li33806f02013-02-22 16:34:37 -080010#include <linux/swapops.h>
Jerome Marchand00619bc2013-11-12 15:08:31 -080011#include <linux/mman.h>
12#include <linux/hugetlb.h>
Al Viro39f1f782014-05-06 14:02:53 -040013#include <linux/vmalloc.h>
Jerome Marchand00619bc2013-11-12 15:08:31 -080014
Andrzej Hajdaa4bb1e42015-02-13 14:36:24 -080015#include <asm/sections.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080016#include <linux/uaccess.h>
Matt Mackall30992c92006-01-08 01:01:43 -080017
Namhyung Kim6038def2011-05-24 17:11:22 -070018#include "internal.h"
19
Andrzej Hajdaa4bb1e42015-02-13 14:36:24 -080020static inline int is_kernel_rodata(unsigned long addr)
21{
22 return addr >= (unsigned long)__start_rodata &&
23 addr < (unsigned long)__end_rodata;
24}
25
26/**
27 * kfree_const - conditionally free memory
28 * @x: pointer to the memory
29 *
30 * Function calls kfree only if @x is not in .rodata section.
31 */
32void kfree_const(const void *x)
33{
34 if (!is_kernel_rodata((unsigned long)x))
35 kfree(x);
36}
37EXPORT_SYMBOL(kfree_const);
38
Matt Mackall30992c92006-01-08 01:01:43 -080039/**
Matt Mackall30992c92006-01-08 01:01:43 -080040 * kstrdup - allocate space for and copy an existing string
Matt Mackall30992c92006-01-08 01:01:43 -080041 * @s: the string to duplicate
42 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
43 */
44char *kstrdup(const char *s, gfp_t gfp)
45{
46 size_t len;
47 char *buf;
48
49 if (!s)
50 return NULL;
51
52 len = strlen(s) + 1;
Christoph Hellwig1d2c8ee2006-10-04 02:15:25 -070053 buf = kmalloc_track_caller(len, gfp);
Matt Mackall30992c92006-01-08 01:01:43 -080054 if (buf)
55 memcpy(buf, s, len);
56 return buf;
57}
58EXPORT_SYMBOL(kstrdup);
Davi Arnaut96840aa2006-03-24 03:18:42 -080059
Alexey Dobriyan1a2f67b2006-09-30 23:27:20 -070060/**
Andrzej Hajdaa4bb1e42015-02-13 14:36:24 -080061 * kstrdup_const - conditionally duplicate an existing const string
62 * @s: the string to duplicate
63 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
64 *
65 * Function returns source string if it is in .rodata section otherwise it
66 * fallbacks to kstrdup.
67 * Strings allocated by kstrdup_const should be freed by kfree_const.
68 */
69const char *kstrdup_const(const char *s, gfp_t gfp)
70{
71 if (is_kernel_rodata((unsigned long)s))
72 return s;
73
74 return kstrdup(s, gfp);
75}
76EXPORT_SYMBOL(kstrdup_const);
77
78/**
Jeremy Fitzhardinge1e66df32007-07-17 18:37:02 -070079 * kstrndup - allocate space for and copy an existing string
80 * @s: the string to duplicate
81 * @max: read at most @max chars from @s
82 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
83 */
84char *kstrndup(const char *s, size_t max, gfp_t gfp)
85{
86 size_t len;
87 char *buf;
88
89 if (!s)
90 return NULL;
91
92 len = strnlen(s, max);
93 buf = kmalloc_track_caller(len+1, gfp);
94 if (buf) {
95 memcpy(buf, s, len);
96 buf[len] = '\0';
97 }
98 return buf;
99}
100EXPORT_SYMBOL(kstrndup);
101
102/**
Alexey Dobriyan1a2f67b2006-09-30 23:27:20 -0700103 * kmemdup - duplicate region of memory
104 *
105 * @src: memory region to duplicate
106 * @len: memory region length
107 * @gfp: GFP mask to use
108 */
109void *kmemdup(const void *src, size_t len, gfp_t gfp)
110{
111 void *p;
112
Christoph Hellwig1d2c8ee2006-10-04 02:15:25 -0700113 p = kmalloc_track_caller(len, gfp);
Alexey Dobriyan1a2f67b2006-09-30 23:27:20 -0700114 if (p)
115 memcpy(p, src, len);
116 return p;
117}
118EXPORT_SYMBOL(kmemdup);
119
Christoph Lameteref2ad802007-07-17 04:03:21 -0700120/**
Li Zefan610a77e2009-03-31 15:23:16 -0700121 * memdup_user - duplicate memory region from user space
122 *
123 * @src: source address in user space
124 * @len: number of bytes to copy
125 *
126 * Returns an ERR_PTR() on failure.
127 */
128void *memdup_user(const void __user *src, size_t len)
129{
130 void *p;
131
132 /*
133 * Always use GFP_KERNEL, since copy_from_user() can sleep and
134 * cause pagefault, which makes it pointless to use GFP_NOFS
135 * or GFP_ATOMIC.
136 */
137 p = kmalloc_track_caller(len, GFP_KERNEL);
138 if (!p)
139 return ERR_PTR(-ENOMEM);
140
141 if (copy_from_user(p, src, len)) {
142 kfree(p);
143 return ERR_PTR(-EFAULT);
144 }
145
146 return p;
147}
148EXPORT_SYMBOL(memdup_user);
149
Davi Arnaut96840aa2006-03-24 03:18:42 -0800150/*
151 * strndup_user - duplicate an existing string from user space
Davi Arnaut96840aa2006-03-24 03:18:42 -0800152 * @s: The string to duplicate
153 * @n: Maximum number of bytes to copy, including the trailing NUL.
154 */
155char *strndup_user(const char __user *s, long n)
156{
157 char *p;
158 long length;
159
160 length = strnlen_user(s, n);
161
162 if (!length)
163 return ERR_PTR(-EFAULT);
164
165 if (length > n)
166 return ERR_PTR(-EINVAL);
167
Julia Lawall90d74042010-08-09 17:18:26 -0700168 p = memdup_user(s, length);
Davi Arnaut96840aa2006-03-24 03:18:42 -0800169
Julia Lawall90d74042010-08-09 17:18:26 -0700170 if (IS_ERR(p))
171 return p;
Davi Arnaut96840aa2006-03-24 03:18:42 -0800172
173 p[length - 1] = '\0';
174
175 return p;
176}
177EXPORT_SYMBOL(strndup_user);
Andrew Morton16d69262008-07-25 19:44:36 -0700178
Al Viroe9d408e2015-12-24 00:06:05 -0500179/**
180 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
181 *
182 * @src: source address in user space
183 * @len: number of bytes to copy
184 *
185 * Returns an ERR_PTR() on failure.
186 */
187void *memdup_user_nul(const void __user *src, size_t len)
188{
189 char *p;
190
191 /*
192 * Always use GFP_KERNEL, since copy_from_user() can sleep and
193 * cause pagefault, which makes it pointless to use GFP_NOFS
194 * or GFP_ATOMIC.
195 */
196 p = kmalloc_track_caller(len + 1, GFP_KERNEL);
197 if (!p)
198 return ERR_PTR(-ENOMEM);
199
200 if (copy_from_user(p, src, len)) {
201 kfree(p);
202 return ERR_PTR(-EFAULT);
203 }
204 p[len] = '\0';
205
206 return p;
207}
208EXPORT_SYMBOL(memdup_user_nul);
209
Namhyung Kim6038def2011-05-24 17:11:22 -0700210void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
211 struct vm_area_struct *prev, struct rb_node *rb_parent)
212{
213 struct vm_area_struct *next;
214
215 vma->vm_prev = prev;
216 if (prev) {
217 next = prev->vm_next;
218 prev->vm_next = vma;
219 } else {
220 mm->mmap = vma;
221 if (rb_parent)
222 next = rb_entry(rb_parent,
223 struct vm_area_struct, vm_rb);
224 else
225 next = NULL;
226 }
227 vma->vm_next = next;
228 if (next)
229 next->vm_prev = vma;
230}
231
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700232/* Check if the vma is being used as a stack by this task */
Andy Lutomirskid17af502016-09-30 10:58:58 -0700233int vma_is_stack_for_current(struct vm_area_struct *vma)
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700234{
Andy Lutomirskid17af502016-09-30 10:58:58 -0700235 struct task_struct * __maybe_unused t = current;
236
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700237 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
238}
239
David Howellsefc1a3b2010-01-15 17:01:35 -0800240#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
Andrew Morton16d69262008-07-25 19:44:36 -0700241void arch_pick_mmap_layout(struct mm_struct *mm)
242{
243 mm->mmap_base = TASK_UNMAPPED_BASE;
244 mm->get_unmapped_area = arch_get_unmapped_area;
Andrew Morton16d69262008-07-25 19:44:36 -0700245}
246#endif
Rusty Russell912985d2008-08-12 17:52:52 -0500247
Xiao Guangrong45888a02010-08-22 19:08:57 +0800248/*
249 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
250 * back to the regular GUP.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300251 * If the architecture not support this function, simply return with no
Xiao Guangrong45888a02010-08-22 19:08:57 +0800252 * page pinned
253 */
Gideon Israel Dsouza3b321232014-04-07 15:37:26 -0700254int __weak __get_user_pages_fast(unsigned long start,
Xiao Guangrong45888a02010-08-22 19:08:57 +0800255 int nr_pages, int write, struct page **pages)
256{
257 return 0;
258}
259EXPORT_SYMBOL_GPL(__get_user_pages_fast);
260
Andy Grover9de100d2009-04-13 14:40:05 -0700261/**
262 * get_user_pages_fast() - pin user pages in memory
263 * @start: starting user address
264 * @nr_pages: number of pages from start to pin
265 * @write: whether pages will be written to
266 * @pages: array that receives pointers to the pages pinned.
267 * Should be at least nr_pages long.
268 *
Andy Grover9de100d2009-04-13 14:40:05 -0700269 * Returns number of pages pinned. This may be fewer than the number
270 * requested. If nr_pages is 0 or negative, returns 0. If no pages
271 * were pinned, returns -errno.
Nick Piggind2bf6be2009-06-16 15:31:39 -0700272 *
273 * get_user_pages_fast provides equivalent functionality to get_user_pages,
274 * operating on current and current->mm, with force=0 and vma=NULL. However
275 * unlike get_user_pages, it must be called without mmap_sem held.
276 *
277 * get_user_pages_fast may take mmap_sem and page table locks, so no
278 * assumptions can be made about lack of locking. get_user_pages_fast is to be
279 * implemented in a way that is advantageous (vs get_user_pages()) when the
280 * user memory area is already faulted in and present in ptes. However if the
281 * pages have to be faulted in, it may turn out to be slightly slower so
282 * callers need to carefully consider what to use. On many architectures,
283 * get_user_pages_fast simply falls back to get_user_pages.
Andy Grover9de100d2009-04-13 14:40:05 -0700284 */
Gideon Israel Dsouza3b321232014-04-07 15:37:26 -0700285int __weak get_user_pages_fast(unsigned long start,
Rusty Russell912985d2008-08-12 17:52:52 -0500286 int nr_pages, int write, struct page **pages)
287{
Lorenzo Stoakesc1641542016-10-13 01:20:13 +0100288 return get_user_pages_unlocked(start, nr_pages, pages,
289 write ? FOLL_WRITE : 0);
Rusty Russell912985d2008-08-12 17:52:52 -0500290}
291EXPORT_SYMBOL_GPL(get_user_pages_fast);
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +0200292
Al Viroeb36c582012-05-30 20:17:35 -0400293unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
294 unsigned long len, unsigned long prot,
Michal Hocko9fbeb5a2016-05-23 16:25:30 -0700295 unsigned long flag, unsigned long pgoff)
Al Viroeb36c582012-05-30 20:17:35 -0400296{
297 unsigned long ret;
298 struct mm_struct *mm = current->mm;
Michel Lespinasse41badc12013-02-22 16:32:47 -0800299 unsigned long populate;
Al Viroeb36c582012-05-30 20:17:35 -0400300
301 ret = security_mmap_file(file, prot, flag);
302 if (!ret) {
Michal Hocko9fbeb5a2016-05-23 16:25:30 -0700303 if (down_write_killable(&mm->mmap_sem))
304 return -EINTR;
Michel Lespinassebebeb3d2013-02-22 16:32:37 -0800305 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
306 &populate);
Al Viroeb36c582012-05-30 20:17:35 -0400307 up_write(&mm->mmap_sem);
Michel Lespinasse41badc12013-02-22 16:32:47 -0800308 if (populate)
309 mm_populate(ret, populate);
Al Viroeb36c582012-05-30 20:17:35 -0400310 }
311 return ret;
312}
313
314unsigned long vm_mmap(struct file *file, unsigned long addr,
315 unsigned long len, unsigned long prot,
316 unsigned long flag, unsigned long offset)
317{
318 if (unlikely(offset + PAGE_ALIGN(len) < offset))
319 return -EINVAL;
Alexander Kuleshovea53cde2015-11-05 18:46:46 -0800320 if (unlikely(offset_in_page(offset)))
Al Viroeb36c582012-05-30 20:17:35 -0400321 return -EINVAL;
322
Michal Hocko9fbeb5a2016-05-23 16:25:30 -0700323 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
Al Viroeb36c582012-05-30 20:17:35 -0400324}
325EXPORT_SYMBOL(vm_mmap);
326
Al Viro39f1f782014-05-06 14:02:53 -0400327void kvfree(const void *addr)
328{
329 if (is_vmalloc_addr(addr))
330 vfree(addr);
331 else
332 kfree(addr);
333}
334EXPORT_SYMBOL(kvfree);
335
Kirill A. Shutemove39155e2015-04-15 16:14:53 -0700336static inline void *__page_rmapping(struct page *page)
337{
338 unsigned long mapping;
339
340 mapping = (unsigned long)page->mapping;
341 mapping &= ~PAGE_MAPPING_FLAGS;
342
343 return (void *)mapping;
344}
345
346/* Neutral page->mapping pointer to address_space or anon_vma or other */
347void *page_rmapping(struct page *page)
348{
349 page = compound_head(page);
350 return __page_rmapping(page);
351}
352
Andrew Morton1aa8aea2016-05-19 17:12:00 -0700353/*
354 * Return true if this page is mapped into pagetables.
355 * For compound page it returns true if any subpage of compound page is mapped.
356 */
357bool page_mapped(struct page *page)
358{
359 int i;
360
361 if (likely(!PageCompound(page)))
362 return atomic_read(&page->_mapcount) >= 0;
363 page = compound_head(page);
364 if (atomic_read(compound_mapcount_ptr(page)) >= 0)
365 return true;
366 if (PageHuge(page))
367 return false;
368 for (i = 0; i < hpage_nr_pages(page); i++) {
369 if (atomic_read(&page[i]._mapcount) >= 0)
370 return true;
371 }
372 return false;
373}
374EXPORT_SYMBOL(page_mapped);
375
Kirill A. Shutemove39155e2015-04-15 16:14:53 -0700376struct anon_vma *page_anon_vma(struct page *page)
377{
378 unsigned long mapping;
379
380 page = compound_head(page);
381 mapping = (unsigned long)page->mapping;
382 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
383 return NULL;
384 return __page_rmapping(page);
385}
386
Shaohua Li98003392013-02-22 16:34:35 -0800387struct address_space *page_mapping(struct page *page)
388{
Kirill A. Shutemov1c290f62016-01-15 16:52:07 -0800389 struct address_space *mapping;
390
391 page = compound_head(page);
Shaohua Li98003392013-02-22 16:34:35 -0800392
Mikulas Patocka03e5ac22014-01-14 17:56:40 -0800393 /* This happens if someone calls flush_dcache_page on slab page */
394 if (unlikely(PageSlab(page)))
395 return NULL;
396
Shaohua Li33806f02013-02-22 16:34:37 -0800397 if (unlikely(PageSwapCache(page))) {
398 swp_entry_t entry;
399
400 entry.val = page_private(page);
Kirill A. Shutemove39155e2015-04-15 16:14:53 -0700401 return swap_address_space(entry);
402 }
403
Kirill A. Shutemov1c290f62016-01-15 16:52:07 -0800404 mapping = page->mapping;
Minchan Kimbda807d2016-07-26 15:23:05 -0700405 if ((unsigned long)mapping & PAGE_MAPPING_ANON)
Kirill A. Shutemove39155e2015-04-15 16:14:53 -0700406 return NULL;
Minchan Kimbda807d2016-07-26 15:23:05 -0700407
408 return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
Shaohua Li98003392013-02-22 16:34:35 -0800409}
Minchan Kimbda807d2016-07-26 15:23:05 -0700410EXPORT_SYMBOL(page_mapping);
Shaohua Li98003392013-02-22 16:34:35 -0800411
Kirill A. Shutemovb20ce5e2016-01-15 16:54:37 -0800412/* Slow path of page_mapcount() for compound pages */
413int __page_mapcount(struct page *page)
414{
415 int ret;
416
417 ret = atomic_read(&page->_mapcount) + 1;
Kirill A. Shutemovdd78fed2016-07-26 15:25:26 -0700418 /*
419 * For file THP page->_mapcount contains total number of mapping
420 * of the page: no need to look into compound_mapcount.
421 */
422 if (!PageAnon(page) && !PageHuge(page))
423 return ret;
Kirill A. Shutemovb20ce5e2016-01-15 16:54:37 -0800424 page = compound_head(page);
425 ret += atomic_read(compound_mapcount_ptr(page)) + 1;
426 if (PageDoubleMap(page))
427 ret--;
428 return ret;
429}
430EXPORT_SYMBOL_GPL(__page_mapcount);
431
Andrey Ryabinin39a1aa82016-03-17 14:18:50 -0700432int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
433int sysctl_overcommit_ratio __read_mostly = 50;
434unsigned long sysctl_overcommit_kbytes __read_mostly;
435int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
436unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
437unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
438
Jerome Marchand49f0ce52014-01-21 15:49:14 -0800439int overcommit_ratio_handler(struct ctl_table *table, int write,
440 void __user *buffer, size_t *lenp,
441 loff_t *ppos)
442{
443 int ret;
444
445 ret = proc_dointvec(table, write, buffer, lenp, ppos);
446 if (ret == 0 && write)
447 sysctl_overcommit_kbytes = 0;
448 return ret;
449}
450
451int overcommit_kbytes_handler(struct ctl_table *table, int write,
452 void __user *buffer, size_t *lenp,
453 loff_t *ppos)
454{
455 int ret;
456
457 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
458 if (ret == 0 && write)
459 sysctl_overcommit_ratio = 0;
460 return ret;
461}
462
Jerome Marchand00619bc2013-11-12 15:08:31 -0800463/*
464 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
465 */
466unsigned long vm_commit_limit(void)
467{
Jerome Marchand49f0ce52014-01-21 15:49:14 -0800468 unsigned long allowed;
469
470 if (sysctl_overcommit_kbytes)
471 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
472 else
473 allowed = ((totalram_pages - hugetlb_total_pages())
474 * sysctl_overcommit_ratio / 100);
475 allowed += total_swap_pages;
476
477 return allowed;
Jerome Marchand00619bc2013-11-12 15:08:31 -0800478}
479
Andrey Ryabinin39a1aa82016-03-17 14:18:50 -0700480/*
481 * Make sure vm_committed_as in one cacheline and not cacheline shared with
482 * other variables. It can be updated by several CPUs frequently.
483 */
484struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
485
486/*
487 * The global memory commitment made in the system can be a metric
488 * that can be used to drive ballooning decisions when Linux is hosted
489 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
490 * balancing memory across competing virtual machines that are hosted.
491 * Several metrics drive this policy engine including the guest reported
492 * memory commitment.
493 */
494unsigned long vm_memory_committed(void)
495{
496 return percpu_counter_read_positive(&vm_committed_as);
497}
498EXPORT_SYMBOL_GPL(vm_memory_committed);
499
500/*
501 * Check that a process has enough memory to allocate a new virtual
502 * mapping. 0 means there is enough memory for the allocation to
503 * succeed and -ENOMEM implies there is not.
504 *
505 * We currently support three overcommit policies, which are set via the
506 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting
507 *
508 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
509 * Additional code 2002 Jul 20 by Robert Love.
510 *
511 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
512 *
513 * Note this is a helper function intended to be used by LSMs which
514 * wish to use this logic.
515 */
516int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
517{
518 long free, allowed, reserve;
519
520 VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
521 -(s64)vm_committed_as_batch * num_online_cpus(),
522 "memory commitment underflow");
523
524 vm_acct_memory(pages);
525
526 /*
527 * Sometimes we want to use more memory than we have
528 */
529 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
530 return 0;
531
532 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
533 free = global_page_state(NR_FREE_PAGES);
Mel Gorman11fb9982016-07-28 15:46:20 -0700534 free += global_node_page_state(NR_FILE_PAGES);
Andrey Ryabinin39a1aa82016-03-17 14:18:50 -0700535
536 /*
537 * shmem pages shouldn't be counted as free in this
538 * case, they can't be purged, only swapped out, and
539 * that won't affect the overall amount of available
540 * memory in the system.
541 */
Mel Gorman11fb9982016-07-28 15:46:20 -0700542 free -= global_node_page_state(NR_SHMEM);
Andrey Ryabinin39a1aa82016-03-17 14:18:50 -0700543
544 free += get_nr_swap_pages();
545
546 /*
547 * Any slabs which are created with the
548 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
549 * which are reclaimable, under pressure. The dentry
550 * cache and most inode caches should fall into this
551 */
552 free += global_page_state(NR_SLAB_RECLAIMABLE);
553
554 /*
555 * Leave reserved pages. The pages are not for anonymous pages.
556 */
557 if (free <= totalreserve_pages)
558 goto error;
559 else
560 free -= totalreserve_pages;
561
562 /*
563 * Reserve some for root
564 */
565 if (!cap_sys_admin)
566 free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
567
568 if (free > pages)
569 return 0;
570
571 goto error;
572 }
573
574 allowed = vm_commit_limit();
575 /*
576 * Reserve some for root
577 */
578 if (!cap_sys_admin)
579 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
580
581 /*
582 * Don't let a single process grow so big a user can't recover
583 */
584 if (mm) {
585 reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
586 allowed -= min_t(long, mm->total_vm / 32, reserve);
587 }
588
589 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
590 return 0;
591error:
592 vm_unacct_memory(pages);
593
594 return -ENOMEM;
595}
596
William Robertsa9090252014-02-11 10:11:59 -0800597/**
598 * get_cmdline() - copy the cmdline value to a buffer.
599 * @task: the task whose cmdline value to copy.
600 * @buffer: the buffer to copy to.
601 * @buflen: the length of the buffer. Larger cmdline values are truncated
602 * to this length.
603 * Returns the size of the cmdline field copied. Note that the copy does
604 * not guarantee an ending NULL byte.
605 */
606int get_cmdline(struct task_struct *task, char *buffer, int buflen)
607{
608 int res = 0;
609 unsigned int len;
610 struct mm_struct *mm = get_task_mm(task);
Mateusz Guzika3b609e2016-01-20 15:01:05 -0800611 unsigned long arg_start, arg_end, env_start, env_end;
William Robertsa9090252014-02-11 10:11:59 -0800612 if (!mm)
613 goto out;
614 if (!mm->arg_end)
615 goto out_mm; /* Shh! No looking before we're done */
616
Mateusz Guzika3b609e2016-01-20 15:01:05 -0800617 down_read(&mm->mmap_sem);
618 arg_start = mm->arg_start;
619 arg_end = mm->arg_end;
620 env_start = mm->env_start;
621 env_end = mm->env_end;
622 up_read(&mm->mmap_sem);
623
624 len = arg_end - arg_start;
William Robertsa9090252014-02-11 10:11:59 -0800625
626 if (len > buflen)
627 len = buflen;
628
Lorenzo Stoakesf307ab62016-10-13 01:20:20 +0100629 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
William Robertsa9090252014-02-11 10:11:59 -0800630
631 /*
632 * If the nul at the end of args has been overwritten, then
633 * assume application is using setproctitle(3).
634 */
635 if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
636 len = strnlen(buffer, res);
637 if (len < res) {
638 res = len;
639 } else {
Mateusz Guzika3b609e2016-01-20 15:01:05 -0800640 len = env_end - env_start;
William Robertsa9090252014-02-11 10:11:59 -0800641 if (len > buflen - res)
642 len = buflen - res;
Mateusz Guzika3b609e2016-01-20 15:01:05 -0800643 res += access_process_vm(task, env_start,
Lorenzo Stoakesf307ab62016-10-13 01:20:20 +0100644 buffer+res, len,
645 FOLL_FORCE);
William Robertsa9090252014-02-11 10:11:59 -0800646 res = strnlen(buffer, res);
647 }
648 }
649out_mm:
650 mmput(mm);
651out:
652 return res;
653}