blob: 8c7265afa29f2109b884907daa050b79f0b25f8b [file] [log] [blame]
Andrew Morton16d69262008-07-25 19:44:36 -07001#include <linux/mm.h>
Matt Mackall30992c92006-01-08 01:01:43 -08002#include <linux/slab.h>
3#include <linux/string.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -04004#include <linux/export.h>
Davi Arnaut96840aa2006-03-24 03:18:42 -08005#include <linux/err.h>
Adrian Bunk3b8f14b2008-07-26 15:22:28 -07006#include <linux/sched.h>
Al Viroeb36c582012-05-30 20:17:35 -04007#include <linux/security.h>
Davi Arnaut96840aa2006-03-24 03:18:42 -08008#include <asm/uaccess.h>
Matt Mackall30992c92006-01-08 01:01:43 -08009
Namhyung Kim6038def2011-05-24 17:11:22 -070010#include "internal.h"
11
Steven Rostedta8d154b2009-04-10 09:36:00 -040012#define CREATE_TRACE_POINTS
Steven Rostedtad8d75f2009-04-14 19:39:12 -040013#include <trace/events/kmem.h>
Steven Rostedta8d154b2009-04-10 09:36:00 -040014
Matt Mackall30992c92006-01-08 01:01:43 -080015/**
Matt Mackall30992c92006-01-08 01:01:43 -080016 * kstrdup - allocate space for and copy an existing string
Matt Mackall30992c92006-01-08 01:01:43 -080017 * @s: the string to duplicate
18 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
19 */
20char *kstrdup(const char *s, gfp_t gfp)
21{
22 size_t len;
23 char *buf;
24
25 if (!s)
26 return NULL;
27
28 len = strlen(s) + 1;
Christoph Hellwig1d2c8ee2006-10-04 02:15:25 -070029 buf = kmalloc_track_caller(len, gfp);
Matt Mackall30992c92006-01-08 01:01:43 -080030 if (buf)
31 memcpy(buf, s, len);
32 return buf;
33}
34EXPORT_SYMBOL(kstrdup);
Davi Arnaut96840aa2006-03-24 03:18:42 -080035
Alexey Dobriyan1a2f67b2006-09-30 23:27:20 -070036/**
Jeremy Fitzhardinge1e66df32007-07-17 18:37:02 -070037 * kstrndup - allocate space for and copy an existing string
38 * @s: the string to duplicate
39 * @max: read at most @max chars from @s
40 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
41 */
42char *kstrndup(const char *s, size_t max, gfp_t gfp)
43{
44 size_t len;
45 char *buf;
46
47 if (!s)
48 return NULL;
49
50 len = strnlen(s, max);
51 buf = kmalloc_track_caller(len+1, gfp);
52 if (buf) {
53 memcpy(buf, s, len);
54 buf[len] = '\0';
55 }
56 return buf;
57}
58EXPORT_SYMBOL(kstrndup);
59
60/**
Alexey Dobriyan1a2f67b2006-09-30 23:27:20 -070061 * kmemdup - duplicate region of memory
62 *
63 * @src: memory region to duplicate
64 * @len: memory region length
65 * @gfp: GFP mask to use
66 */
67void *kmemdup(const void *src, size_t len, gfp_t gfp)
68{
69 void *p;
70
Christoph Hellwig1d2c8ee2006-10-04 02:15:25 -070071 p = kmalloc_track_caller(len, gfp);
Alexey Dobriyan1a2f67b2006-09-30 23:27:20 -070072 if (p)
73 memcpy(p, src, len);
74 return p;
75}
76EXPORT_SYMBOL(kmemdup);
77
Christoph Lameteref2ad802007-07-17 04:03:21 -070078/**
Li Zefan610a77e2009-03-31 15:23:16 -070079 * memdup_user - duplicate memory region from user space
80 *
81 * @src: source address in user space
82 * @len: number of bytes to copy
83 *
84 * Returns an ERR_PTR() on failure.
85 */
86void *memdup_user(const void __user *src, size_t len)
87{
88 void *p;
89
90 /*
91 * Always use GFP_KERNEL, since copy_from_user() can sleep and
92 * cause pagefault, which makes it pointless to use GFP_NOFS
93 * or GFP_ATOMIC.
94 */
95 p = kmalloc_track_caller(len, GFP_KERNEL);
96 if (!p)
97 return ERR_PTR(-ENOMEM);
98
99 if (copy_from_user(p, src, len)) {
100 kfree(p);
101 return ERR_PTR(-EFAULT);
102 }
103
104 return p;
105}
106EXPORT_SYMBOL(memdup_user);
107
108/**
Pekka Enberg93bc4e82008-07-26 17:49:33 -0700109 * __krealloc - like krealloc() but don't free @p.
110 * @p: object to reallocate memory for.
111 * @new_size: how many bytes of memory are required.
112 * @flags: the type of memory to allocate.
113 *
114 * This function is like krealloc() except it never frees the originally
115 * allocated buffer. Use this if you don't want to free the buffer immediately
116 * like, for example, with RCU.
117 */
118void *__krealloc(const void *p, size_t new_size, gfp_t flags)
119{
120 void *ret;
121 size_t ks = 0;
122
123 if (unlikely(!new_size))
124 return ZERO_SIZE_PTR;
125
126 if (p)
127 ks = ksize(p);
128
129 if (ks >= new_size)
130 return (void *)p;
131
132 ret = kmalloc_track_caller(new_size, flags);
133 if (ret && p)
134 memcpy(ret, p, ks);
135
136 return ret;
137}
138EXPORT_SYMBOL(__krealloc);
139
140/**
Christoph Lameteref2ad802007-07-17 04:03:21 -0700141 * krealloc - reallocate memory. The contents will remain unchanged.
142 * @p: object to reallocate memory for.
143 * @new_size: how many bytes of memory are required.
144 * @flags: the type of memory to allocate.
145 *
146 * The contents of the object pointed to are preserved up to the
147 * lesser of the new and old sizes. If @p is %NULL, krealloc()
148 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
149 * %NULL pointer, the object pointed to is freed.
150 */
151void *krealloc(const void *p, size_t new_size, gfp_t flags)
152{
153 void *ret;
Christoph Lameteref2ad802007-07-17 04:03:21 -0700154
155 if (unlikely(!new_size)) {
156 kfree(p);
Christoph Lameter6cb8f912007-07-17 04:03:22 -0700157 return ZERO_SIZE_PTR;
Christoph Lameteref2ad802007-07-17 04:03:21 -0700158 }
159
Pekka Enberg93bc4e82008-07-26 17:49:33 -0700160 ret = __krealloc(p, new_size, flags);
161 if (ret && p != ret)
Christoph Lameteref2ad802007-07-17 04:03:21 -0700162 kfree(p);
Pekka Enberg93bc4e82008-07-26 17:49:33 -0700163
Christoph Lameteref2ad802007-07-17 04:03:21 -0700164 return ret;
165}
166EXPORT_SYMBOL(krealloc);
167
Johannes Weiner3ef0e5b2009-02-20 15:38:41 -0800168/**
169 * kzfree - like kfree but zero memory
170 * @p: object to free memory of
171 *
172 * The memory of the object @p points to is zeroed before freed.
173 * If @p is %NULL, kzfree() does nothing.
Pekka Enberga234bdc2009-05-31 13:50:38 +0300174 *
175 * Note: this function zeroes the whole allocated buffer which can be a good
176 * deal bigger than the requested buffer size passed to kmalloc(). So be
177 * careful when using this function in performance sensitive code.
Johannes Weiner3ef0e5b2009-02-20 15:38:41 -0800178 */
179void kzfree(const void *p)
180{
181 size_t ks;
182 void *mem = (void *)p;
183
184 if (unlikely(ZERO_OR_NULL_PTR(mem)))
185 return;
186 ks = ksize(mem);
187 memset(mem, 0, ks);
188 kfree(mem);
189}
190EXPORT_SYMBOL(kzfree);
191
Davi Arnaut96840aa2006-03-24 03:18:42 -0800192/*
193 * strndup_user - duplicate an existing string from user space
Davi Arnaut96840aa2006-03-24 03:18:42 -0800194 * @s: The string to duplicate
195 * @n: Maximum number of bytes to copy, including the trailing NUL.
196 */
197char *strndup_user(const char __user *s, long n)
198{
199 char *p;
200 long length;
201
202 length = strnlen_user(s, n);
203
204 if (!length)
205 return ERR_PTR(-EFAULT);
206
207 if (length > n)
208 return ERR_PTR(-EINVAL);
209
Julia Lawall90d74042010-08-09 17:18:26 -0700210 p = memdup_user(s, length);
Davi Arnaut96840aa2006-03-24 03:18:42 -0800211
Julia Lawall90d74042010-08-09 17:18:26 -0700212 if (IS_ERR(p))
213 return p;
Davi Arnaut96840aa2006-03-24 03:18:42 -0800214
215 p[length - 1] = '\0';
216
217 return p;
218}
219EXPORT_SYMBOL(strndup_user);
Andrew Morton16d69262008-07-25 19:44:36 -0700220
Namhyung Kim6038def2011-05-24 17:11:22 -0700221void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
222 struct vm_area_struct *prev, struct rb_node *rb_parent)
223{
224 struct vm_area_struct *next;
225
226 vma->vm_prev = prev;
227 if (prev) {
228 next = prev->vm_next;
229 prev->vm_next = vma;
230 } else {
231 mm->mmap = vma;
232 if (rb_parent)
233 next = rb_entry(rb_parent,
234 struct vm_area_struct, vm_rb);
235 else
236 next = NULL;
237 }
238 vma->vm_next = next;
239 if (next)
240 next->vm_prev = vma;
241}
242
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700243/* Check if the vma is being used as a stack by this task */
244static int vm_is_stack_for_task(struct task_struct *t,
245 struct vm_area_struct *vma)
246{
247 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
248}
249
250/*
251 * Check if the vma is being used as a stack.
252 * If is_group is non-zero, check in the entire thread group or else
253 * just check in the current task. Returns the pid of the task that
254 * the vma is stack for.
255 */
256pid_t vm_is_stack(struct task_struct *task,
257 struct vm_area_struct *vma, int in_group)
258{
259 pid_t ret = 0;
260
261 if (vm_is_stack_for_task(task, vma))
262 return task->pid;
263
264 if (in_group) {
265 struct task_struct *t;
266 rcu_read_lock();
267 if (!pid_alive(task))
268 goto done;
269
270 t = task;
271 do {
272 if (vm_is_stack_for_task(t, vma)) {
273 ret = t->pid;
274 goto done;
275 }
276 } while_each_thread(task, t);
277done:
278 rcu_read_unlock();
279 }
280
281 return ret;
282}
283
David Howellsefc1a3b2010-01-15 17:01:35 -0800284#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
Andrew Morton16d69262008-07-25 19:44:36 -0700285void arch_pick_mmap_layout(struct mm_struct *mm)
286{
287 mm->mmap_base = TASK_UNMAPPED_BASE;
288 mm->get_unmapped_area = arch_get_unmapped_area;
289 mm->unmap_area = arch_unmap_area;
290}
291#endif
Rusty Russell912985d2008-08-12 17:52:52 -0500292
Xiao Guangrong45888a02010-08-22 19:08:57 +0800293/*
294 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
295 * back to the regular GUP.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300296 * If the architecture not support this function, simply return with no
Xiao Guangrong45888a02010-08-22 19:08:57 +0800297 * page pinned
298 */
299int __attribute__((weak)) __get_user_pages_fast(unsigned long start,
300 int nr_pages, int write, struct page **pages)
301{
302 return 0;
303}
304EXPORT_SYMBOL_GPL(__get_user_pages_fast);
305
Andy Grover9de100d2009-04-13 14:40:05 -0700306/**
307 * get_user_pages_fast() - pin user pages in memory
308 * @start: starting user address
309 * @nr_pages: number of pages from start to pin
310 * @write: whether pages will be written to
311 * @pages: array that receives pointers to the pages pinned.
312 * Should be at least nr_pages long.
313 *
Andy Grover9de100d2009-04-13 14:40:05 -0700314 * Returns number of pages pinned. This may be fewer than the number
315 * requested. If nr_pages is 0 or negative, returns 0. If no pages
316 * were pinned, returns -errno.
Nick Piggind2bf6be2009-06-16 15:31:39 -0700317 *
318 * get_user_pages_fast provides equivalent functionality to get_user_pages,
319 * operating on current and current->mm, with force=0 and vma=NULL. However
320 * unlike get_user_pages, it must be called without mmap_sem held.
321 *
322 * get_user_pages_fast may take mmap_sem and page table locks, so no
323 * assumptions can be made about lack of locking. get_user_pages_fast is to be
324 * implemented in a way that is advantageous (vs get_user_pages()) when the
325 * user memory area is already faulted in and present in ptes. However if the
326 * pages have to be faulted in, it may turn out to be slightly slower so
327 * callers need to carefully consider what to use. On many architectures,
328 * get_user_pages_fast simply falls back to get_user_pages.
Andy Grover9de100d2009-04-13 14:40:05 -0700329 */
Rusty Russell912985d2008-08-12 17:52:52 -0500330int __attribute__((weak)) get_user_pages_fast(unsigned long start,
331 int nr_pages, int write, struct page **pages)
332{
333 struct mm_struct *mm = current->mm;
334 int ret;
335
336 down_read(&mm->mmap_sem);
337 ret = get_user_pages(current, mm, start, nr_pages,
338 write, 0, pages, NULL);
339 up_read(&mm->mmap_sem);
340
341 return ret;
342}
343EXPORT_SYMBOL_GPL(get_user_pages_fast);
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +0200344
Al Viroeb36c582012-05-30 20:17:35 -0400345unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
346 unsigned long len, unsigned long prot,
347 unsigned long flag, unsigned long pgoff)
348{
349 unsigned long ret;
350 struct mm_struct *mm = current->mm;
351
352 ret = security_mmap_file(file, prot, flag);
353 if (!ret) {
354 down_write(&mm->mmap_sem);
355 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff);
356 up_write(&mm->mmap_sem);
357 }
358 return ret;
359}
360
361unsigned long vm_mmap(struct file *file, unsigned long addr,
362 unsigned long len, unsigned long prot,
363 unsigned long flag, unsigned long offset)
364{
365 if (unlikely(offset + PAGE_ALIGN(len) < offset))
366 return -EINVAL;
367 if (unlikely(offset & ~PAGE_MASK))
368 return -EINVAL;
369
370 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
371}
372EXPORT_SYMBOL(vm_mmap);
373
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +0200374/* Tracepoints definitions. */
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +0200375EXPORT_TRACEPOINT_SYMBOL(kmalloc);
376EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
377EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
378EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
379EXPORT_TRACEPOINT_SYMBOL(kfree);
380EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);