blob: f018d7e0addb47133fcc6e16bd3f25ef2d3bf6d0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/vmalloc.c
3 *
4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
Christoph Lameter930fc452005-10-29 18:15:41 -07008 * Numa awareness, Christoph Lameter, SGI, June 2005
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 */
10
11#include <linux/mm.h>
12#include <linux/module.h>
13#include <linux/highmem.h>
14#include <linux/slab.h>
15#include <linux/spinlock.h>
16#include <linux/interrupt.h>
Christoph Lametera10aa572008-04-28 02:12:40 -070017#include <linux/seq_file.h>
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070018#include <linux/debugobjects.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/vmalloc.h>
Christoph Lameter23016962008-04-28 02:12:42 -070020#include <linux/kallsyms.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
22#include <asm/uaccess.h>
23#include <asm/tlbflush.h>
24
25
26DEFINE_RWLOCK(vmlist_lock);
27struct vm_struct *vmlist;
28
Adrian Bunkb2213852006-09-25 23:31:02 -070029static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
Christoph Lameter23016962008-04-28 02:12:42 -070030 int node, void *caller);
Adrian Bunkb2213852006-09-25 23:31:02 -070031
Linus Torvalds1da177e2005-04-16 15:20:36 -070032static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
33{
34 pte_t *pte;
35
36 pte = pte_offset_kernel(pmd, addr);
37 do {
38 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
39 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
40 } while (pte++, addr += PAGE_SIZE, addr != end);
41}
42
43static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr,
44 unsigned long end)
45{
46 pmd_t *pmd;
47 unsigned long next;
48
49 pmd = pmd_offset(pud, addr);
50 do {
51 next = pmd_addr_end(addr, end);
52 if (pmd_none_or_clear_bad(pmd))
53 continue;
54 vunmap_pte_range(pmd, addr, next);
55 } while (pmd++, addr = next, addr != end);
56}
57
58static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
59 unsigned long end)
60{
61 pud_t *pud;
62 unsigned long next;
63
64 pud = pud_offset(pgd, addr);
65 do {
66 next = pud_addr_end(addr, end);
67 if (pud_none_or_clear_bad(pud))
68 continue;
69 vunmap_pmd_range(pud, addr, next);
70 } while (pud++, addr = next, addr != end);
71}
72
Benjamin Herrenschmidtc19c03f2007-06-04 15:15:35 +100073void unmap_kernel_range(unsigned long addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -070074{
75 pgd_t *pgd;
76 unsigned long next;
Benjamin Herrenschmidtc19c03f2007-06-04 15:15:35 +100077 unsigned long start = addr;
78 unsigned long end = addr + size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
80 BUG_ON(addr >= end);
81 pgd = pgd_offset_k(addr);
82 flush_cache_vunmap(addr, end);
83 do {
84 next = pgd_addr_end(addr, end);
85 if (pgd_none_or_clear_bad(pgd))
86 continue;
87 vunmap_pud_range(pgd, addr, next);
88 } while (pgd++, addr = next, addr != end);
Benjamin Herrenschmidtc19c03f2007-06-04 15:15:35 +100089 flush_tlb_kernel_range(start, end);
90}
91
92static void unmap_vm_area(struct vm_struct *area)
93{
94 unmap_kernel_range((unsigned long)area->addr, area->size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070095}
96
97static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
98 unsigned long end, pgprot_t prot, struct page ***pages)
99{
100 pte_t *pte;
101
Hugh Dickins872fec12005-10-29 18:16:21 -0700102 pte = pte_alloc_kernel(pmd, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 if (!pte)
104 return -ENOMEM;
105 do {
106 struct page *page = **pages;
107 WARN_ON(!pte_none(*pte));
108 if (!page)
109 return -ENOMEM;
110 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
111 (*pages)++;
112 } while (pte++, addr += PAGE_SIZE, addr != end);
113 return 0;
114}
115
116static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
117 unsigned long end, pgprot_t prot, struct page ***pages)
118{
119 pmd_t *pmd;
120 unsigned long next;
121
122 pmd = pmd_alloc(&init_mm, pud, addr);
123 if (!pmd)
124 return -ENOMEM;
125 do {
126 next = pmd_addr_end(addr, end);
127 if (vmap_pte_range(pmd, addr, next, prot, pages))
128 return -ENOMEM;
129 } while (pmd++, addr = next, addr != end);
130 return 0;
131}
132
133static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr,
134 unsigned long end, pgprot_t prot, struct page ***pages)
135{
136 pud_t *pud;
137 unsigned long next;
138
139 pud = pud_alloc(&init_mm, pgd, addr);
140 if (!pud)
141 return -ENOMEM;
142 do {
143 next = pud_addr_end(addr, end);
144 if (vmap_pmd_range(pud, addr, next, prot, pages))
145 return -ENOMEM;
146 } while (pud++, addr = next, addr != end);
147 return 0;
148}
149
150int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
151{
152 pgd_t *pgd;
153 unsigned long next;
154 unsigned long addr = (unsigned long) area->addr;
155 unsigned long end = addr + area->size - PAGE_SIZE;
156 int err;
157
158 BUG_ON(addr >= end);
159 pgd = pgd_offset_k(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 do {
161 next = pgd_addr_end(addr, end);
162 err = vmap_pud_range(pgd, addr, next, prot, pages);
163 if (err)
164 break;
165 } while (pgd++, addr = next, addr != end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 flush_cache_vmap((unsigned long) area->addr, end);
167 return err;
168}
Rusty Russell5992b6d2007-07-19 01:49:21 -0700169EXPORT_SYMBOL_GPL(map_vm_area);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
Linus Torvalds73bdf0a2008-10-15 08:35:12 -0700171static inline int is_vmalloc_or_module_addr(const void *x)
172{
173 /*
174 * x86-64 and sparc64 put modules in a special place,
175 * and fall back on vmalloc() if that fails. Others
176 * just put it in the vmalloc space.
177 */
178#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
179 unsigned long addr = (unsigned long)x;
180 if (addr >= MODULES_VADDR && addr < MODULES_END)
181 return 1;
182#endif
183 return is_vmalloc_addr(x);
184}
185
Christoph Lameter48667e72008-02-04 22:28:31 -0800186/*
187 * Map a vmalloc()-space virtual address to the physical page.
188 */
Christoph Lameterb3bdda02008-02-04 22:28:32 -0800189struct page *vmalloc_to_page(const void *vmalloc_addr)
Christoph Lameter48667e72008-02-04 22:28:31 -0800190{
191 unsigned long addr = (unsigned long) vmalloc_addr;
192 struct page *page = NULL;
193 pgd_t *pgd = pgd_offset_k(addr);
194 pud_t *pud;
195 pmd_t *pmd;
196 pte_t *ptep, pte;
197
Ingo Molnar7aa413d2008-06-19 13:28:11 +0200198 /*
199 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
200 * architectures that do not vmalloc module space
201 */
Linus Torvalds73bdf0a2008-10-15 08:35:12 -0700202 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
Jiri Slaby59ea7462008-06-12 13:56:40 +0200203
Christoph Lameter48667e72008-02-04 22:28:31 -0800204 if (!pgd_none(*pgd)) {
205 pud = pud_offset(pgd, addr);
206 if (!pud_none(*pud)) {
207 pmd = pmd_offset(pud, addr);
208 if (!pmd_none(*pmd)) {
209 ptep = pte_offset_map(pmd, addr);
210 pte = *ptep;
211 if (pte_present(pte))
212 page = pte_page(pte);
213 pte_unmap(ptep);
214 }
215 }
216 }
217 return page;
218}
219EXPORT_SYMBOL(vmalloc_to_page);
220
221/*
222 * Map a vmalloc()-space virtual address to the physical page frame number.
223 */
Christoph Lameterb3bdda02008-02-04 22:28:32 -0800224unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
Christoph Lameter48667e72008-02-04 22:28:31 -0800225{
226 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
227}
228EXPORT_SYMBOL(vmalloc_to_pfn);
229
Christoph Lameter23016962008-04-28 02:12:42 -0700230static struct vm_struct *
231__get_vm_area_node(unsigned long size, unsigned long flags, unsigned long start,
232 unsigned long end, int node, gfp_t gfp_mask, void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233{
234 struct vm_struct **p, *tmp, *area;
235 unsigned long align = 1;
236 unsigned long addr;
237
Giridhar Pemmasani52fd24c2006-10-28 10:38:34 -0700238 BUG_ON(in_interrupt());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 if (flags & VM_IOREMAP) {
240 int bit = fls(size);
241
242 if (bit > IOREMAP_MAX_ORDER)
243 bit = IOREMAP_MAX_ORDER;
244 else if (bit < PAGE_SHIFT)
245 bit = PAGE_SHIFT;
246
247 align = 1ul << bit;
248 }
249 addr = ALIGN(start, align);
250 size = PAGE_ALIGN(size);
OGAWA Hirofumi31be8302006-11-16 01:19:29 -0800251 if (unlikely(!size))
252 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
Christoph Lameter6cb06222007-10-16 01:25:41 -0700254 area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
255
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 if (unlikely(!area))
257 return NULL;
258
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 /*
260 * We always allocate a guard page.
261 */
262 size += PAGE_SIZE;
263
264 write_lock(&vmlist_lock);
265 for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
266 if ((unsigned long)tmp->addr < addr) {
267 if((unsigned long)tmp->addr + tmp->size >= addr)
268 addr = ALIGN(tmp->size +
269 (unsigned long)tmp->addr, align);
270 continue;
271 }
272 if ((size + addr) < addr)
273 goto out;
274 if (size + addr <= (unsigned long)tmp->addr)
275 goto found;
276 addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
277 if (addr > end - size)
278 goto out;
279 }
Robert Bragg5dc33182008-02-04 22:29:18 -0800280 if ((size + addr) < addr)
281 goto out;
282 if (addr > end - size)
283 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284
285found:
286 area->next = *p;
287 *p = area;
288
289 area->flags = flags;
290 area->addr = (void *)addr;
291 area->size = size;
292 area->pages = NULL;
293 area->nr_pages = 0;
294 area->phys_addr = 0;
Christoph Lameter23016962008-04-28 02:12:42 -0700295 area->caller = caller;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 write_unlock(&vmlist_lock);
297
298 return area;
299
300out:
301 write_unlock(&vmlist_lock);
302 kfree(area);
303 if (printk_ratelimit())
304 printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
305 return NULL;
306}
307
Christoph Lameter930fc452005-10-29 18:15:41 -0700308struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
309 unsigned long start, unsigned long end)
310{
Christoph Lameter23016962008-04-28 02:12:42 -0700311 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
312 __builtin_return_address(0));
Christoph Lameter930fc452005-10-29 18:15:41 -0700313}
Rusty Russell5992b6d2007-07-19 01:49:21 -0700314EXPORT_SYMBOL_GPL(__get_vm_area);
Christoph Lameter930fc452005-10-29 18:15:41 -0700315
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316/**
Simon Arlott183ff222007-10-20 01:27:18 +0200317 * get_vm_area - reserve a contiguous kernel virtual area
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 * @size: size of the area
319 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
320 *
321 * Search an area of @size in the kernel virtual mapping area,
322 * and reserved it for out purposes. Returns the area descriptor
323 * on success or %NULL on failure.
324 */
325struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
326{
Christoph Lameter23016962008-04-28 02:12:42 -0700327 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
328 -1, GFP_KERNEL, __builtin_return_address(0));
329}
330
331struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
332 void *caller)
333{
334 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
335 -1, GFP_KERNEL, caller);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336}
337
Giridhar Pemmasani52fd24c2006-10-28 10:38:34 -0700338struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
339 int node, gfp_t gfp_mask)
Christoph Lameter930fc452005-10-29 18:15:41 -0700340{
Giridhar Pemmasani52fd24c2006-10-28 10:38:34 -0700341 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
Christoph Lameter23016962008-04-28 02:12:42 -0700342 gfp_mask, __builtin_return_address(0));
Christoph Lameter930fc452005-10-29 18:15:41 -0700343}
344
Andi Kleen7856dfe2005-05-20 14:27:57 -0700345/* Caller must hold vmlist_lock */
Christoph Lameterb3bdda02008-02-04 22:28:32 -0800346static struct vm_struct *__find_vm_area(const void *addr)
Nick Piggin83342312006-06-23 02:03:20 -0700347{
348 struct vm_struct *tmp;
349
350 for (tmp = vmlist; tmp != NULL; tmp = tmp->next) {
351 if (tmp->addr == addr)
352 break;
353 }
354
355 return tmp;
356}
357
358/* Caller must hold vmlist_lock */
Christoph Lameterb3bdda02008-02-04 22:28:32 -0800359static struct vm_struct *__remove_vm_area(const void *addr)
Andi Kleen7856dfe2005-05-20 14:27:57 -0700360{
361 struct vm_struct **p, *tmp;
362
363 for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
364 if (tmp->addr == addr)
365 goto found;
366 }
367 return NULL;
368
369found:
370 unmap_vm_area(tmp);
371 *p = tmp->next;
372
373 /*
374 * Remove the guard page.
375 */
376 tmp->size -= PAGE_SIZE;
377 return tmp;
378}
379
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380/**
Simon Arlott183ff222007-10-20 01:27:18 +0200381 * remove_vm_area - find and remove a continuous kernel virtual area
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 * @addr: base address
383 *
384 * Search for the kernel VM area starting at @addr, and remove it.
385 * This function returns the found VM area, but using it is NOT safe
Andi Kleen7856dfe2005-05-20 14:27:57 -0700386 * on SMP machines, except for its size or flags.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 */
Christoph Lameterb3bdda02008-02-04 22:28:32 -0800388struct vm_struct *remove_vm_area(const void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389{
Andi Kleen7856dfe2005-05-20 14:27:57 -0700390 struct vm_struct *v;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 write_lock(&vmlist_lock);
Andi Kleen7856dfe2005-05-20 14:27:57 -0700392 v = __remove_vm_area(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 write_unlock(&vmlist_lock);
Andi Kleen7856dfe2005-05-20 14:27:57 -0700394 return v;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395}
396
Christoph Lameterb3bdda02008-02-04 22:28:32 -0800397static void __vunmap(const void *addr, int deallocate_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398{
399 struct vm_struct *area;
400
401 if (!addr)
402 return;
403
404 if ((PAGE_SIZE-1) & (unsigned long)addr) {
Arjan van de Ven4c8573e2008-07-25 19:45:37 -0700405 WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 return;
407 }
408
409 area = remove_vm_area(addr);
410 if (unlikely(!area)) {
Arjan van de Ven4c8573e2008-07-25 19:45:37 -0700411 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 return;
414 }
415
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700416 debug_check_no_locks_freed(addr, area->size);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700417 debug_check_no_obj_freed(addr, area->size);
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700418
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 if (deallocate_pages) {
420 int i;
421
422 for (i = 0; i < area->nr_pages; i++) {
Christoph Lameterbf53d6f2008-02-04 22:28:34 -0800423 struct page *page = area->pages[i];
424
425 BUG_ON(!page);
426 __free_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 }
428
Jan Kiszka8757d5f2006-07-14 00:23:56 -0700429 if (area->flags & VM_VPAGES)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 vfree(area->pages);
431 else
432 kfree(area->pages);
433 }
434
435 kfree(area);
436 return;
437}
438
439/**
440 * vfree - release memory allocated by vmalloc()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 * @addr: memory base address
442 *
Simon Arlott183ff222007-10-20 01:27:18 +0200443 * Free the virtually continuous memory area starting at @addr, as
Pekka Enberg80e93ef2005-09-09 13:10:16 -0700444 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
445 * NULL, no operation is performed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 *
Pekka Enberg80e93ef2005-09-09 13:10:16 -0700447 * Must not be called in interrupt context.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 */
Christoph Lameterb3bdda02008-02-04 22:28:32 -0800449void vfree(const void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450{
451 BUG_ON(in_interrupt());
452 __vunmap(addr, 1);
453}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454EXPORT_SYMBOL(vfree);
455
456/**
457 * vunmap - release virtual mapping obtained by vmap()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 * @addr: memory base address
459 *
460 * Free the virtually contiguous memory area starting at @addr,
461 * which was created from the page array passed to vmap().
462 *
Pekka Enberg80e93ef2005-09-09 13:10:16 -0700463 * Must not be called in interrupt context.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 */
Christoph Lameterb3bdda02008-02-04 22:28:32 -0800465void vunmap(const void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466{
467 BUG_ON(in_interrupt());
468 __vunmap(addr, 0);
469}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470EXPORT_SYMBOL(vunmap);
471
472/**
473 * vmap - map an array of pages into virtually contiguous space
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 * @pages: array of page pointers
475 * @count: number of pages to map
476 * @flags: vm_area->flags
477 * @prot: page protection for the mapping
478 *
479 * Maps @count pages from @pages into contiguous kernel virtual
480 * space.
481 */
482void *vmap(struct page **pages, unsigned int count,
483 unsigned long flags, pgprot_t prot)
484{
485 struct vm_struct *area;
486
487 if (count > num_physpages)
488 return NULL;
489
Christoph Lameter23016962008-04-28 02:12:42 -0700490 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
491 __builtin_return_address(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 if (!area)
493 return NULL;
Christoph Lameter23016962008-04-28 02:12:42 -0700494
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 if (map_vm_area(area, prot, &pages)) {
496 vunmap(area->addr);
497 return NULL;
498 }
499
500 return area->addr;
501}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502EXPORT_SYMBOL(vmap);
503
Adrian Bunke31d9eb2008-02-04 22:29:09 -0800504static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
Christoph Lameter23016962008-04-28 02:12:42 -0700505 pgprot_t prot, int node, void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506{
507 struct page **pages;
508 unsigned int nr_pages, array_size, i;
509
510 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
511 array_size = (nr_pages * sizeof(struct page *));
512
513 area->nr_pages = nr_pages;
514 /* Please note that the recursion is strictly bounded. */
Jan Kiszka8757d5f2006-07-14 00:23:56 -0700515 if (array_size > PAGE_SIZE) {
Christoph Lameter94f60302007-07-17 04:03:29 -0700516 pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
Christoph Lameter23016962008-04-28 02:12:42 -0700517 PAGE_KERNEL, node, caller);
Jan Kiszka8757d5f2006-07-14 00:23:56 -0700518 area->flags |= VM_VPAGES;
Andrew Morton286e1ea2006-10-17 00:09:57 -0700519 } else {
520 pages = kmalloc_node(array_size,
Christoph Lameter6cb06222007-10-16 01:25:41 -0700521 (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
Andrew Morton286e1ea2006-10-17 00:09:57 -0700522 node);
523 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 area->pages = pages;
Christoph Lameter23016962008-04-28 02:12:42 -0700525 area->caller = caller;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 if (!area->pages) {
527 remove_vm_area(area->addr);
528 kfree(area);
529 return NULL;
530 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531
532 for (i = 0; i < area->nr_pages; i++) {
Christoph Lameterbf53d6f2008-02-04 22:28:34 -0800533 struct page *page;
534
Christoph Lameter930fc452005-10-29 18:15:41 -0700535 if (node < 0)
Christoph Lameterbf53d6f2008-02-04 22:28:34 -0800536 page = alloc_page(gfp_mask);
Christoph Lameter930fc452005-10-29 18:15:41 -0700537 else
Christoph Lameterbf53d6f2008-02-04 22:28:34 -0800538 page = alloc_pages_node(node, gfp_mask, 0);
539
540 if (unlikely(!page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 /* Successfully allocated i pages, free them in __vunmap() */
542 area->nr_pages = i;
543 goto fail;
544 }
Christoph Lameterbf53d6f2008-02-04 22:28:34 -0800545 area->pages[i] = page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 }
547
548 if (map_vm_area(area, prot, &pages))
549 goto fail;
550 return area->addr;
551
552fail:
553 vfree(area->addr);
554 return NULL;
555}
556
Christoph Lameter930fc452005-10-29 18:15:41 -0700557void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
558{
Christoph Lameter23016962008-04-28 02:12:42 -0700559 return __vmalloc_area_node(area, gfp_mask, prot, -1,
560 __builtin_return_address(0));
Christoph Lameter930fc452005-10-29 18:15:41 -0700561}
562
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563/**
Christoph Lameter930fc452005-10-29 18:15:41 -0700564 * __vmalloc_node - allocate virtually contiguous memory
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 * @size: allocation size
566 * @gfp_mask: flags for the page level allocator
567 * @prot: protection mask for the allocated pages
Randy Dunlapd44e0782005-11-07 01:01:10 -0800568 * @node: node to use for allocation or -1
Randy Dunlapc85d1942008-05-01 04:34:48 -0700569 * @caller: caller's return address
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 *
571 * Allocate enough pages to cover @size from the page level
572 * allocator with @gfp_mask flags. Map them into contiguous
573 * kernel virtual space, using a pagetable protection of @prot.
574 */
Adrian Bunkb2213852006-09-25 23:31:02 -0700575static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
Christoph Lameter23016962008-04-28 02:12:42 -0700576 int node, void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577{
578 struct vm_struct *area;
579
580 size = PAGE_ALIGN(size);
581 if (!size || (size >> PAGE_SHIFT) > num_physpages)
582 return NULL;
583
Christoph Lameter23016962008-04-28 02:12:42 -0700584 area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,
585 node, gfp_mask, caller);
586
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 if (!area)
588 return NULL;
589
Christoph Lameter23016962008-04-28 02:12:42 -0700590 return __vmalloc_area_node(area, gfp_mask, prot, node, caller);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591}
592
Christoph Lameter930fc452005-10-29 18:15:41 -0700593void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
594{
Christoph Lameter23016962008-04-28 02:12:42 -0700595 return __vmalloc_node(size, gfp_mask, prot, -1,
596 __builtin_return_address(0));
Christoph Lameter930fc452005-10-29 18:15:41 -0700597}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598EXPORT_SYMBOL(__vmalloc);
599
600/**
601 * vmalloc - allocate virtually contiguous memory
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 * @size: allocation size
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 * Allocate enough pages to cover @size from the page level
604 * allocator and map them into contiguous kernel virtual space.
605 *
Michael Opdenackerc1c88972006-10-03 23:21:02 +0200606 * For tight control over page level allocator and protection flags
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 * use __vmalloc() instead.
608 */
609void *vmalloc(unsigned long size)
610{
Christoph Lameter23016962008-04-28 02:12:42 -0700611 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
612 -1, __builtin_return_address(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614EXPORT_SYMBOL(vmalloc);
615
Christoph Lameter930fc452005-10-29 18:15:41 -0700616/**
Rolf Eike Beeread04082006-09-27 01:50:13 -0700617 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
618 * @size: allocation size
Nick Piggin83342312006-06-23 02:03:20 -0700619 *
Rolf Eike Beeread04082006-09-27 01:50:13 -0700620 * The resulting memory area is zeroed so it can be mapped to userspace
621 * without leaking data.
Nick Piggin83342312006-06-23 02:03:20 -0700622 */
623void *vmalloc_user(unsigned long size)
624{
625 struct vm_struct *area;
626 void *ret;
627
628 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
Eric Dumazet2b4ac442006-11-10 12:27:48 -0800629 if (ret) {
630 write_lock(&vmlist_lock);
631 area = __find_vm_area(ret);
632 area->flags |= VM_USERMAP;
633 write_unlock(&vmlist_lock);
634 }
Nick Piggin83342312006-06-23 02:03:20 -0700635 return ret;
636}
637EXPORT_SYMBOL(vmalloc_user);
638
639/**
Christoph Lameter930fc452005-10-29 18:15:41 -0700640 * vmalloc_node - allocate memory on a specific node
Christoph Lameter930fc452005-10-29 18:15:41 -0700641 * @size: allocation size
Randy Dunlapd44e0782005-11-07 01:01:10 -0800642 * @node: numa node
Christoph Lameter930fc452005-10-29 18:15:41 -0700643 *
644 * Allocate enough pages to cover @size from the page level
645 * allocator and map them into contiguous kernel virtual space.
646 *
Michael Opdenackerc1c88972006-10-03 23:21:02 +0200647 * For tight control over page level allocator and protection flags
Christoph Lameter930fc452005-10-29 18:15:41 -0700648 * use __vmalloc() instead.
649 */
650void *vmalloc_node(unsigned long size, int node)
651{
Christoph Lameter23016962008-04-28 02:12:42 -0700652 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
653 node, __builtin_return_address(0));
Christoph Lameter930fc452005-10-29 18:15:41 -0700654}
655EXPORT_SYMBOL(vmalloc_node);
656
Pavel Pisa4dc3b162005-05-01 08:59:25 -0700657#ifndef PAGE_KERNEL_EXEC
658# define PAGE_KERNEL_EXEC PAGE_KERNEL
659#endif
660
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661/**
662 * vmalloc_exec - allocate virtually contiguous, executable memory
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 * @size: allocation size
664 *
665 * Kernel-internal function to allocate enough pages to cover @size
666 * the page level allocator and map them into contiguous and
667 * executable kernel virtual space.
668 *
Michael Opdenackerc1c88972006-10-03 23:21:02 +0200669 * For tight control over page level allocator and protection flags
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 * use __vmalloc() instead.
671 */
672
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673void *vmalloc_exec(unsigned long size)
674{
675 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
676}
677
Andi Kleen0d08e0d2007-05-02 19:27:12 +0200678#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
Benjamin Herrenschmidt7ac674f2007-07-19 01:49:10 -0700679#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
Andi Kleen0d08e0d2007-05-02 19:27:12 +0200680#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
Benjamin Herrenschmidt7ac674f2007-07-19 01:49:10 -0700681#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
Andi Kleen0d08e0d2007-05-02 19:27:12 +0200682#else
683#define GFP_VMALLOC32 GFP_KERNEL
684#endif
685
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686/**
687 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 * @size: allocation size
689 *
690 * Allocate enough 32bit PA addressable pages to cover @size from the
691 * page level allocator and map them into contiguous kernel virtual space.
692 */
693void *vmalloc_32(unsigned long size)
694{
Andi Kleen0d08e0d2007-05-02 19:27:12 +0200695 return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697EXPORT_SYMBOL(vmalloc_32);
698
Nick Piggin83342312006-06-23 02:03:20 -0700699/**
Rolf Eike Beeread04082006-09-27 01:50:13 -0700700 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
Nick Piggin83342312006-06-23 02:03:20 -0700701 * @size: allocation size
Rolf Eike Beeread04082006-09-27 01:50:13 -0700702 *
703 * The resulting memory area is 32bit addressable and zeroed so it can be
704 * mapped to userspace without leaking data.
Nick Piggin83342312006-06-23 02:03:20 -0700705 */
706void *vmalloc_32_user(unsigned long size)
707{
708 struct vm_struct *area;
709 void *ret;
710
Andi Kleen0d08e0d2007-05-02 19:27:12 +0200711 ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL);
Eric Dumazet2b4ac442006-11-10 12:27:48 -0800712 if (ret) {
713 write_lock(&vmlist_lock);
714 area = __find_vm_area(ret);
715 area->flags |= VM_USERMAP;
716 write_unlock(&vmlist_lock);
717 }
Nick Piggin83342312006-06-23 02:03:20 -0700718 return ret;
719}
720EXPORT_SYMBOL(vmalloc_32_user);
721
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722long vread(char *buf, char *addr, unsigned long count)
723{
724 struct vm_struct *tmp;
725 char *vaddr, *buf_start = buf;
726 unsigned long n;
727
728 /* Don't allow overflow */
729 if ((unsigned long) addr + count < count)
730 count = -(unsigned long) addr;
731
732 read_lock(&vmlist_lock);
733 for (tmp = vmlist; tmp; tmp = tmp->next) {
734 vaddr = (char *) tmp->addr;
735 if (addr >= vaddr + tmp->size - PAGE_SIZE)
736 continue;
737 while (addr < vaddr) {
738 if (count == 0)
739 goto finished;
740 *buf = '\0';
741 buf++;
742 addr++;
743 count--;
744 }
745 n = vaddr + tmp->size - PAGE_SIZE - addr;
746 do {
747 if (count == 0)
748 goto finished;
749 *buf = *addr;
750 buf++;
751 addr++;
752 count--;
753 } while (--n > 0);
754 }
755finished:
756 read_unlock(&vmlist_lock);
757 return buf - buf_start;
758}
759
760long vwrite(char *buf, char *addr, unsigned long count)
761{
762 struct vm_struct *tmp;
763 char *vaddr, *buf_start = buf;
764 unsigned long n;
765
766 /* Don't allow overflow */
767 if ((unsigned long) addr + count < count)
768 count = -(unsigned long) addr;
769
770 read_lock(&vmlist_lock);
771 for (tmp = vmlist; tmp; tmp = tmp->next) {
772 vaddr = (char *) tmp->addr;
773 if (addr >= vaddr + tmp->size - PAGE_SIZE)
774 continue;
775 while (addr < vaddr) {
776 if (count == 0)
777 goto finished;
778 buf++;
779 addr++;
780 count--;
781 }
782 n = vaddr + tmp->size - PAGE_SIZE - addr;
783 do {
784 if (count == 0)
785 goto finished;
786 *addr = *buf;
787 buf++;
788 addr++;
789 count--;
790 } while (--n > 0);
791 }
792finished:
793 read_unlock(&vmlist_lock);
794 return buf - buf_start;
795}
Nick Piggin83342312006-06-23 02:03:20 -0700796
797/**
798 * remap_vmalloc_range - map vmalloc pages to userspace
Nick Piggin83342312006-06-23 02:03:20 -0700799 * @vma: vma to cover (map full range of vma)
800 * @addr: vmalloc memory
801 * @pgoff: number of pages into addr before first page to map
Randy Dunlap76824862008-03-19 17:00:40 -0700802 *
803 * Returns: 0 for success, -Exxx on failure
Nick Piggin83342312006-06-23 02:03:20 -0700804 *
805 * This function checks that addr is a valid vmalloc'ed area, and
806 * that it is big enough to cover the vma. Will return failure if
807 * that criteria isn't met.
808 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800809 * Similar to remap_pfn_range() (see mm/memory.c)
Nick Piggin83342312006-06-23 02:03:20 -0700810 */
811int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
812 unsigned long pgoff)
813{
814 struct vm_struct *area;
815 unsigned long uaddr = vma->vm_start;
816 unsigned long usize = vma->vm_end - vma->vm_start;
817 int ret;
818
819 if ((PAGE_SIZE-1) & (unsigned long)addr)
820 return -EINVAL;
821
822 read_lock(&vmlist_lock);
823 area = __find_vm_area(addr);
824 if (!area)
825 goto out_einval_locked;
826
827 if (!(area->flags & VM_USERMAP))
828 goto out_einval_locked;
829
830 if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
831 goto out_einval_locked;
832 read_unlock(&vmlist_lock);
833
834 addr += pgoff << PAGE_SHIFT;
835 do {
836 struct page *page = vmalloc_to_page(addr);
837 ret = vm_insert_page(vma, uaddr, page);
838 if (ret)
839 return ret;
840
841 uaddr += PAGE_SIZE;
842 addr += PAGE_SIZE;
843 usize -= PAGE_SIZE;
844 } while (usize > 0);
845
846 /* Prevent "things" like memory migration? VM_flags need a cleanup... */
847 vma->vm_flags |= VM_RESERVED;
848
849 return ret;
850
851out_einval_locked:
852 read_unlock(&vmlist_lock);
853 return -EINVAL;
854}
855EXPORT_SYMBOL(remap_vmalloc_range);
856
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -0700857/*
858 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
859 * have one.
860 */
861void __attribute__((weak)) vmalloc_sync_all(void)
862{
863}
Jeremy Fitzhardinge5f4352f2007-07-17 18:37:04 -0700864
865
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800866static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
Jeremy Fitzhardinge5f4352f2007-07-17 18:37:04 -0700867{
868 /* apply_to_page_range() does all the hard work. */
869 return 0;
870}
871
872/**
873 * alloc_vm_area - allocate a range of kernel address space
874 * @size: size of the area
Randy Dunlap76824862008-03-19 17:00:40 -0700875 *
876 * Returns: NULL on failure, vm_struct on success
Jeremy Fitzhardinge5f4352f2007-07-17 18:37:04 -0700877 *
878 * This function reserves a range of kernel address space, and
879 * allocates pagetables to map that range. No actual mappings
880 * are created. If the kernel address space is not shared
881 * between processes, it syncs the pagetable across all
882 * processes.
883 */
884struct vm_struct *alloc_vm_area(size_t size)
885{
886 struct vm_struct *area;
887
Christoph Lameter23016962008-04-28 02:12:42 -0700888 area = get_vm_area_caller(size, VM_IOREMAP,
889 __builtin_return_address(0));
Jeremy Fitzhardinge5f4352f2007-07-17 18:37:04 -0700890 if (area == NULL)
891 return NULL;
892
893 /*
894 * This ensures that page tables are constructed for this region
895 * of kernel virtual address space and mapped into init_mm.
896 */
897 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
898 area->size, f, NULL)) {
899 free_vm_area(area);
900 return NULL;
901 }
902
903 /* Make sure the pagetables are constructed in process kernel
904 mappings */
905 vmalloc_sync_all();
906
907 return area;
908}
909EXPORT_SYMBOL_GPL(alloc_vm_area);
910
911void free_vm_area(struct vm_struct *area)
912{
913 struct vm_struct *ret;
914 ret = remove_vm_area(area->addr);
915 BUG_ON(ret != area);
916 kfree(area);
917}
918EXPORT_SYMBOL_GPL(free_vm_area);
Christoph Lametera10aa572008-04-28 02:12:40 -0700919
920
921#ifdef CONFIG_PROC_FS
922static void *s_start(struct seq_file *m, loff_t *pos)
923{
924 loff_t n = *pos;
925 struct vm_struct *v;
926
927 read_lock(&vmlist_lock);
928 v = vmlist;
929 while (n > 0 && v) {
930 n--;
931 v = v->next;
932 }
933 if (!n)
934 return v;
935
936 return NULL;
937
938}
939
940static void *s_next(struct seq_file *m, void *p, loff_t *pos)
941{
942 struct vm_struct *v = p;
943
944 ++*pos;
945 return v->next;
946}
947
948static void s_stop(struct seq_file *m, void *p)
949{
950 read_unlock(&vmlist_lock);
951}
952
Eric Dumazeta47a1262008-07-23 21:27:38 -0700953static void show_numa_info(struct seq_file *m, struct vm_struct *v)
954{
955 if (NUMA_BUILD) {
956 unsigned int nr, *counters = m->private;
957
958 if (!counters)
959 return;
960
961 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
962
963 for (nr = 0; nr < v->nr_pages; nr++)
964 counters[page_to_nid(v->pages[nr])]++;
965
966 for_each_node_state(nr, N_HIGH_MEMORY)
967 if (counters[nr])
968 seq_printf(m, " N%u=%u", nr, counters[nr]);
969 }
970}
971
Christoph Lametera10aa572008-04-28 02:12:40 -0700972static int s_show(struct seq_file *m, void *p)
973{
974 struct vm_struct *v = p;
975
976 seq_printf(m, "0x%p-0x%p %7ld",
977 v->addr, v->addr + v->size, v->size);
978
Christoph Lameter23016962008-04-28 02:12:42 -0700979 if (v->caller) {
980 char buff[2 * KSYM_NAME_LEN];
981
982 seq_putc(m, ' ');
983 sprint_symbol(buff, (unsigned long)v->caller);
984 seq_puts(m, buff);
985 }
986
Christoph Lametera10aa572008-04-28 02:12:40 -0700987 if (v->nr_pages)
988 seq_printf(m, " pages=%d", v->nr_pages);
989
990 if (v->phys_addr)
991 seq_printf(m, " phys=%lx", v->phys_addr);
992
993 if (v->flags & VM_IOREMAP)
994 seq_printf(m, " ioremap");
995
996 if (v->flags & VM_ALLOC)
997 seq_printf(m, " vmalloc");
998
999 if (v->flags & VM_MAP)
1000 seq_printf(m, " vmap");
1001
1002 if (v->flags & VM_USERMAP)
1003 seq_printf(m, " user");
1004
1005 if (v->flags & VM_VPAGES)
1006 seq_printf(m, " vpages");
1007
Eric Dumazeta47a1262008-07-23 21:27:38 -07001008 show_numa_info(m, v);
Christoph Lametera10aa572008-04-28 02:12:40 -07001009 seq_putc(m, '\n');
1010 return 0;
1011}
1012
1013const struct seq_operations vmalloc_op = {
1014 .start = s_start,
1015 .next = s_next,
1016 .stop = s_stop,
1017 .show = s_show,
1018};
1019#endif
1020