blob: e4c59a30835b422540ff54a3ffad26d11f159839 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/vmalloc.c
3 *
4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
Christoph Lameter930fc452005-10-29 18:15:41 -07008 * Numa awareness, Christoph Lameter, SGI, June 2005
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 */
10
11#include <linux/mm.h>
12#include <linux/module.h>
13#include <linux/highmem.h>
14#include <linux/slab.h>
15#include <linux/spinlock.h>
16#include <linux/interrupt.h>
17
18#include <linux/vmalloc.h>
19
20#include <asm/uaccess.h>
21#include <asm/tlbflush.h>
22
23
24DEFINE_RWLOCK(vmlist_lock);
25struct vm_struct *vmlist;
26
Adrian Bunkb2213852006-09-25 23:31:02 -070027static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
28 int node);
29
Linus Torvalds1da177e2005-04-16 15:20:36 -070030static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
31{
32 pte_t *pte;
33
34 pte = pte_offset_kernel(pmd, addr);
35 do {
36 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
37 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
38 } while (pte++, addr += PAGE_SIZE, addr != end);
39}
40
41static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr,
42 unsigned long end)
43{
44 pmd_t *pmd;
45 unsigned long next;
46
47 pmd = pmd_offset(pud, addr);
48 do {
49 next = pmd_addr_end(addr, end);
50 if (pmd_none_or_clear_bad(pmd))
51 continue;
52 vunmap_pte_range(pmd, addr, next);
53 } while (pmd++, addr = next, addr != end);
54}
55
56static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
57 unsigned long end)
58{
59 pud_t *pud;
60 unsigned long next;
61
62 pud = pud_offset(pgd, addr);
63 do {
64 next = pud_addr_end(addr, end);
65 if (pud_none_or_clear_bad(pud))
66 continue;
67 vunmap_pmd_range(pud, addr, next);
68 } while (pud++, addr = next, addr != end);
69}
70
Benjamin Herrenschmidtc19c03f2007-06-04 15:15:35 +100071void unmap_kernel_range(unsigned long addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -070072{
73 pgd_t *pgd;
74 unsigned long next;
Benjamin Herrenschmidtc19c03f2007-06-04 15:15:35 +100075 unsigned long start = addr;
76 unsigned long end = addr + size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
78 BUG_ON(addr >= end);
79 pgd = pgd_offset_k(addr);
80 flush_cache_vunmap(addr, end);
81 do {
82 next = pgd_addr_end(addr, end);
83 if (pgd_none_or_clear_bad(pgd))
84 continue;
85 vunmap_pud_range(pgd, addr, next);
86 } while (pgd++, addr = next, addr != end);
Benjamin Herrenschmidtc19c03f2007-06-04 15:15:35 +100087 flush_tlb_kernel_range(start, end);
88}
89
90static void unmap_vm_area(struct vm_struct *area)
91{
92 unmap_kernel_range((unsigned long)area->addr, area->size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093}
94
95static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
96 unsigned long end, pgprot_t prot, struct page ***pages)
97{
98 pte_t *pte;
99
Hugh Dickins872fec12005-10-29 18:16:21 -0700100 pte = pte_alloc_kernel(pmd, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 if (!pte)
102 return -ENOMEM;
103 do {
104 struct page *page = **pages;
105 WARN_ON(!pte_none(*pte));
106 if (!page)
107 return -ENOMEM;
108 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
109 (*pages)++;
110 } while (pte++, addr += PAGE_SIZE, addr != end);
111 return 0;
112}
113
114static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
115 unsigned long end, pgprot_t prot, struct page ***pages)
116{
117 pmd_t *pmd;
118 unsigned long next;
119
120 pmd = pmd_alloc(&init_mm, pud, addr);
121 if (!pmd)
122 return -ENOMEM;
123 do {
124 next = pmd_addr_end(addr, end);
125 if (vmap_pte_range(pmd, addr, next, prot, pages))
126 return -ENOMEM;
127 } while (pmd++, addr = next, addr != end);
128 return 0;
129}
130
131static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr,
132 unsigned long end, pgprot_t prot, struct page ***pages)
133{
134 pud_t *pud;
135 unsigned long next;
136
137 pud = pud_alloc(&init_mm, pgd, addr);
138 if (!pud)
139 return -ENOMEM;
140 do {
141 next = pud_addr_end(addr, end);
142 if (vmap_pmd_range(pud, addr, next, prot, pages))
143 return -ENOMEM;
144 } while (pud++, addr = next, addr != end);
145 return 0;
146}
147
148int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
149{
150 pgd_t *pgd;
151 unsigned long next;
152 unsigned long addr = (unsigned long) area->addr;
153 unsigned long end = addr + area->size - PAGE_SIZE;
154 int err;
155
156 BUG_ON(addr >= end);
157 pgd = pgd_offset_k(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 do {
159 next = pgd_addr_end(addr, end);
160 err = vmap_pud_range(pgd, addr, next, prot, pages);
161 if (err)
162 break;
163 } while (pgd++, addr = next, addr != end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 flush_cache_vmap((unsigned long) area->addr, end);
165 return err;
166}
Rusty Russell5992b6d2007-07-19 01:49:21 -0700167EXPORT_SYMBOL_GPL(map_vm_area);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
Christoph Lameter48667e72008-02-04 22:28:31 -0800169/*
170 * Map a vmalloc()-space virtual address to the physical page.
171 */
172struct page *vmalloc_to_page(void *vmalloc_addr)
173{
174 unsigned long addr = (unsigned long) vmalloc_addr;
175 struct page *page = NULL;
176 pgd_t *pgd = pgd_offset_k(addr);
177 pud_t *pud;
178 pmd_t *pmd;
179 pte_t *ptep, pte;
180
181 if (!pgd_none(*pgd)) {
182 pud = pud_offset(pgd, addr);
183 if (!pud_none(*pud)) {
184 pmd = pmd_offset(pud, addr);
185 if (!pmd_none(*pmd)) {
186 ptep = pte_offset_map(pmd, addr);
187 pte = *ptep;
188 if (pte_present(pte))
189 page = pte_page(pte);
190 pte_unmap(ptep);
191 }
192 }
193 }
194 return page;
195}
196EXPORT_SYMBOL(vmalloc_to_page);
197
198/*
199 * Map a vmalloc()-space virtual address to the physical page frame number.
200 */
201unsigned long vmalloc_to_pfn(void *vmalloc_addr)
202{
203 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
204}
205EXPORT_SYMBOL(vmalloc_to_pfn);
206
Giridhar Pemmasani52fd24c2006-10-28 10:38:34 -0700207static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
208 unsigned long start, unsigned long end,
209 int node, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210{
211 struct vm_struct **p, *tmp, *area;
212 unsigned long align = 1;
213 unsigned long addr;
214
Giridhar Pemmasani52fd24c2006-10-28 10:38:34 -0700215 BUG_ON(in_interrupt());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 if (flags & VM_IOREMAP) {
217 int bit = fls(size);
218
219 if (bit > IOREMAP_MAX_ORDER)
220 bit = IOREMAP_MAX_ORDER;
221 else if (bit < PAGE_SHIFT)
222 bit = PAGE_SHIFT;
223
224 align = 1ul << bit;
225 }
226 addr = ALIGN(start, align);
227 size = PAGE_ALIGN(size);
OGAWA Hirofumi31be8302006-11-16 01:19:29 -0800228 if (unlikely(!size))
229 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
Christoph Lameter6cb06222007-10-16 01:25:41 -0700231 area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 if (unlikely(!area))
234 return NULL;
235
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 /*
237 * We always allocate a guard page.
238 */
239 size += PAGE_SIZE;
240
241 write_lock(&vmlist_lock);
242 for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
243 if ((unsigned long)tmp->addr < addr) {
244 if((unsigned long)tmp->addr + tmp->size >= addr)
245 addr = ALIGN(tmp->size +
246 (unsigned long)tmp->addr, align);
247 continue;
248 }
249 if ((size + addr) < addr)
250 goto out;
251 if (size + addr <= (unsigned long)tmp->addr)
252 goto found;
253 addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
254 if (addr > end - size)
255 goto out;
256 }
257
258found:
259 area->next = *p;
260 *p = area;
261
262 area->flags = flags;
263 area->addr = (void *)addr;
264 area->size = size;
265 area->pages = NULL;
266 area->nr_pages = 0;
267 area->phys_addr = 0;
268 write_unlock(&vmlist_lock);
269
270 return area;
271
272out:
273 write_unlock(&vmlist_lock);
274 kfree(area);
275 if (printk_ratelimit())
276 printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
277 return NULL;
278}
279
Christoph Lameter930fc452005-10-29 18:15:41 -0700280struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
281 unsigned long start, unsigned long end)
282{
Giridhar Pemmasani52fd24c2006-10-28 10:38:34 -0700283 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL);
Christoph Lameter930fc452005-10-29 18:15:41 -0700284}
Rusty Russell5992b6d2007-07-19 01:49:21 -0700285EXPORT_SYMBOL_GPL(__get_vm_area);
Christoph Lameter930fc452005-10-29 18:15:41 -0700286
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287/**
Simon Arlott183ff222007-10-20 01:27:18 +0200288 * get_vm_area - reserve a contiguous kernel virtual area
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 * @size: size of the area
290 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
291 *
292 * Search an area of @size in the kernel virtual mapping area,
293 * and reserved it for out purposes. Returns the area descriptor
294 * on success or %NULL on failure.
295 */
296struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
297{
298 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
299}
300
Giridhar Pemmasani52fd24c2006-10-28 10:38:34 -0700301struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
302 int node, gfp_t gfp_mask)
Christoph Lameter930fc452005-10-29 18:15:41 -0700303{
Giridhar Pemmasani52fd24c2006-10-28 10:38:34 -0700304 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
305 gfp_mask);
Christoph Lameter930fc452005-10-29 18:15:41 -0700306}
307
Andi Kleen7856dfe2005-05-20 14:27:57 -0700308/* Caller must hold vmlist_lock */
Nick Piggin83342312006-06-23 02:03:20 -0700309static struct vm_struct *__find_vm_area(void *addr)
310{
311 struct vm_struct *tmp;
312
313 for (tmp = vmlist; tmp != NULL; tmp = tmp->next) {
314 if (tmp->addr == addr)
315 break;
316 }
317
318 return tmp;
319}
320
321/* Caller must hold vmlist_lock */
Rolf Eike Beerd24afc52006-09-27 01:50:13 -0700322static struct vm_struct *__remove_vm_area(void *addr)
Andi Kleen7856dfe2005-05-20 14:27:57 -0700323{
324 struct vm_struct **p, *tmp;
325
326 for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
327 if (tmp->addr == addr)
328 goto found;
329 }
330 return NULL;
331
332found:
333 unmap_vm_area(tmp);
334 *p = tmp->next;
335
336 /*
337 * Remove the guard page.
338 */
339 tmp->size -= PAGE_SIZE;
340 return tmp;
341}
342
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343/**
Simon Arlott183ff222007-10-20 01:27:18 +0200344 * remove_vm_area - find and remove a continuous kernel virtual area
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 * @addr: base address
346 *
347 * Search for the kernel VM area starting at @addr, and remove it.
348 * This function returns the found VM area, but using it is NOT safe
Andi Kleen7856dfe2005-05-20 14:27:57 -0700349 * on SMP machines, except for its size or flags.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 */
351struct vm_struct *remove_vm_area(void *addr)
352{
Andi Kleen7856dfe2005-05-20 14:27:57 -0700353 struct vm_struct *v;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 write_lock(&vmlist_lock);
Andi Kleen7856dfe2005-05-20 14:27:57 -0700355 v = __remove_vm_area(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 write_unlock(&vmlist_lock);
Andi Kleen7856dfe2005-05-20 14:27:57 -0700357 return v;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358}
359
Benjamin Herrenschmidtd55e2ca2007-05-16 22:11:07 -0700360static void __vunmap(void *addr, int deallocate_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
362 struct vm_struct *area;
363
364 if (!addr)
365 return;
366
367 if ((PAGE_SIZE-1) & (unsigned long)addr) {
368 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
369 WARN_ON(1);
370 return;
371 }
372
373 area = remove_vm_area(addr);
374 if (unlikely(!area)) {
375 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
376 addr);
377 WARN_ON(1);
378 return;
379 }
380
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700381 debug_check_no_locks_freed(addr, area->size);
382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 if (deallocate_pages) {
384 int i;
385
386 for (i = 0; i < area->nr_pages; i++) {
Eric Sesterhenn5aae2772006-04-01 01:26:09 +0200387 BUG_ON(!area->pages[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 __free_page(area->pages[i]);
389 }
390
Jan Kiszka8757d5f2006-07-14 00:23:56 -0700391 if (area->flags & VM_VPAGES)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 vfree(area->pages);
393 else
394 kfree(area->pages);
395 }
396
397 kfree(area);
398 return;
399}
400
401/**
402 * vfree - release memory allocated by vmalloc()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 * @addr: memory base address
404 *
Simon Arlott183ff222007-10-20 01:27:18 +0200405 * Free the virtually continuous memory area starting at @addr, as
Pekka Enberg80e93ef2005-09-09 13:10:16 -0700406 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
407 * NULL, no operation is performed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 *
Pekka Enberg80e93ef2005-09-09 13:10:16 -0700409 * Must not be called in interrupt context.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 */
411void vfree(void *addr)
412{
413 BUG_ON(in_interrupt());
414 __vunmap(addr, 1);
415}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416EXPORT_SYMBOL(vfree);
417
418/**
419 * vunmap - release virtual mapping obtained by vmap()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 * @addr: memory base address
421 *
422 * Free the virtually contiguous memory area starting at @addr,
423 * which was created from the page array passed to vmap().
424 *
Pekka Enberg80e93ef2005-09-09 13:10:16 -0700425 * Must not be called in interrupt context.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 */
427void vunmap(void *addr)
428{
429 BUG_ON(in_interrupt());
430 __vunmap(addr, 0);
431}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432EXPORT_SYMBOL(vunmap);
433
434/**
435 * vmap - map an array of pages into virtually contiguous space
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 * @pages: array of page pointers
437 * @count: number of pages to map
438 * @flags: vm_area->flags
439 * @prot: page protection for the mapping
440 *
441 * Maps @count pages from @pages into contiguous kernel virtual
442 * space.
443 */
444void *vmap(struct page **pages, unsigned int count,
445 unsigned long flags, pgprot_t prot)
446{
447 struct vm_struct *area;
448
449 if (count > num_physpages)
450 return NULL;
451
452 area = get_vm_area((count << PAGE_SHIFT), flags);
453 if (!area)
454 return NULL;
455 if (map_vm_area(area, prot, &pages)) {
456 vunmap(area->addr);
457 return NULL;
458 }
459
460 return area->addr;
461}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462EXPORT_SYMBOL(vmap);
463
Christoph Lameter930fc452005-10-29 18:15:41 -0700464void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
465 pgprot_t prot, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466{
467 struct page **pages;
468 unsigned int nr_pages, array_size, i;
469
470 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
471 array_size = (nr_pages * sizeof(struct page *));
472
473 area->nr_pages = nr_pages;
474 /* Please note that the recursion is strictly bounded. */
Jan Kiszka8757d5f2006-07-14 00:23:56 -0700475 if (array_size > PAGE_SIZE) {
Christoph Lameter94f60302007-07-17 04:03:29 -0700476 pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
477 PAGE_KERNEL, node);
Jan Kiszka8757d5f2006-07-14 00:23:56 -0700478 area->flags |= VM_VPAGES;
Andrew Morton286e1ea2006-10-17 00:09:57 -0700479 } else {
480 pages = kmalloc_node(array_size,
Christoph Lameter6cb06222007-10-16 01:25:41 -0700481 (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
Andrew Morton286e1ea2006-10-17 00:09:57 -0700482 node);
483 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 area->pages = pages;
485 if (!area->pages) {
486 remove_vm_area(area->addr);
487 kfree(area);
488 return NULL;
489 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490
491 for (i = 0; i < area->nr_pages; i++) {
Christoph Lameter930fc452005-10-29 18:15:41 -0700492 if (node < 0)
493 area->pages[i] = alloc_page(gfp_mask);
494 else
495 area->pages[i] = alloc_pages_node(node, gfp_mask, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 if (unlikely(!area->pages[i])) {
497 /* Successfully allocated i pages, free them in __vunmap() */
498 area->nr_pages = i;
499 goto fail;
500 }
501 }
502
503 if (map_vm_area(area, prot, &pages))
504 goto fail;
505 return area->addr;
506
507fail:
508 vfree(area->addr);
509 return NULL;
510}
511
Christoph Lameter930fc452005-10-29 18:15:41 -0700512void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
513{
514 return __vmalloc_area_node(area, gfp_mask, prot, -1);
515}
516
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517/**
Christoph Lameter930fc452005-10-29 18:15:41 -0700518 * __vmalloc_node - allocate virtually contiguous memory
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 * @size: allocation size
520 * @gfp_mask: flags for the page level allocator
521 * @prot: protection mask for the allocated pages
Randy Dunlapd44e0782005-11-07 01:01:10 -0800522 * @node: node to use for allocation or -1
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 *
524 * Allocate enough pages to cover @size from the page level
525 * allocator with @gfp_mask flags. Map them into contiguous
526 * kernel virtual space, using a pagetable protection of @prot.
527 */
Adrian Bunkb2213852006-09-25 23:31:02 -0700528static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
529 int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530{
531 struct vm_struct *area;
532
533 size = PAGE_ALIGN(size);
534 if (!size || (size >> PAGE_SHIFT) > num_physpages)
535 return NULL;
536
Giridhar Pemmasani52fd24c2006-10-28 10:38:34 -0700537 area = get_vm_area_node(size, VM_ALLOC, node, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 if (!area)
539 return NULL;
540
Christoph Lameter930fc452005-10-29 18:15:41 -0700541 return __vmalloc_area_node(area, gfp_mask, prot, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542}
543
Christoph Lameter930fc452005-10-29 18:15:41 -0700544void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
545{
546 return __vmalloc_node(size, gfp_mask, prot, -1);
547}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548EXPORT_SYMBOL(__vmalloc);
549
550/**
551 * vmalloc - allocate virtually contiguous memory
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 * @size: allocation size
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 * Allocate enough pages to cover @size from the page level
554 * allocator and map them into contiguous kernel virtual space.
555 *
Michael Opdenackerc1c88972006-10-03 23:21:02 +0200556 * For tight control over page level allocator and protection flags
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 * use __vmalloc() instead.
558 */
559void *vmalloc(unsigned long size)
560{
Nick Piggin83342312006-06-23 02:03:20 -0700561 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563EXPORT_SYMBOL(vmalloc);
564
Christoph Lameter930fc452005-10-29 18:15:41 -0700565/**
Rolf Eike Beeread04082006-09-27 01:50:13 -0700566 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
567 * @size: allocation size
Nick Piggin83342312006-06-23 02:03:20 -0700568 *
Rolf Eike Beeread04082006-09-27 01:50:13 -0700569 * The resulting memory area is zeroed so it can be mapped to userspace
570 * without leaking data.
Nick Piggin83342312006-06-23 02:03:20 -0700571 */
572void *vmalloc_user(unsigned long size)
573{
574 struct vm_struct *area;
575 void *ret;
576
577 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
Eric Dumazet2b4ac442006-11-10 12:27:48 -0800578 if (ret) {
579 write_lock(&vmlist_lock);
580 area = __find_vm_area(ret);
581 area->flags |= VM_USERMAP;
582 write_unlock(&vmlist_lock);
583 }
Nick Piggin83342312006-06-23 02:03:20 -0700584 return ret;
585}
586EXPORT_SYMBOL(vmalloc_user);
587
588/**
Christoph Lameter930fc452005-10-29 18:15:41 -0700589 * vmalloc_node - allocate memory on a specific node
Christoph Lameter930fc452005-10-29 18:15:41 -0700590 * @size: allocation size
Randy Dunlapd44e0782005-11-07 01:01:10 -0800591 * @node: numa node
Christoph Lameter930fc452005-10-29 18:15:41 -0700592 *
593 * Allocate enough pages to cover @size from the page level
594 * allocator and map them into contiguous kernel virtual space.
595 *
Michael Opdenackerc1c88972006-10-03 23:21:02 +0200596 * For tight control over page level allocator and protection flags
Christoph Lameter930fc452005-10-29 18:15:41 -0700597 * use __vmalloc() instead.
598 */
599void *vmalloc_node(unsigned long size, int node)
600{
Nick Piggin83342312006-06-23 02:03:20 -0700601 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
Christoph Lameter930fc452005-10-29 18:15:41 -0700602}
603EXPORT_SYMBOL(vmalloc_node);
604
Pavel Pisa4dc3b162005-05-01 08:59:25 -0700605#ifndef PAGE_KERNEL_EXEC
606# define PAGE_KERNEL_EXEC PAGE_KERNEL
607#endif
608
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609/**
610 * vmalloc_exec - allocate virtually contiguous, executable memory
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 * @size: allocation size
612 *
613 * Kernel-internal function to allocate enough pages to cover @size
614 * the page level allocator and map them into contiguous and
615 * executable kernel virtual space.
616 *
Michael Opdenackerc1c88972006-10-03 23:21:02 +0200617 * For tight control over page level allocator and protection flags
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 * use __vmalloc() instead.
619 */
620
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621void *vmalloc_exec(unsigned long size)
622{
623 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
624}
625
Andi Kleen0d08e0d2007-05-02 19:27:12 +0200626#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
Benjamin Herrenschmidt7ac674f2007-07-19 01:49:10 -0700627#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
Andi Kleen0d08e0d2007-05-02 19:27:12 +0200628#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
Benjamin Herrenschmidt7ac674f2007-07-19 01:49:10 -0700629#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
Andi Kleen0d08e0d2007-05-02 19:27:12 +0200630#else
631#define GFP_VMALLOC32 GFP_KERNEL
632#endif
633
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634/**
635 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 * @size: allocation size
637 *
638 * Allocate enough 32bit PA addressable pages to cover @size from the
639 * page level allocator and map them into contiguous kernel virtual space.
640 */
641void *vmalloc_32(unsigned long size)
642{
Andi Kleen0d08e0d2007-05-02 19:27:12 +0200643 return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645EXPORT_SYMBOL(vmalloc_32);
646
Nick Piggin83342312006-06-23 02:03:20 -0700647/**
Rolf Eike Beeread04082006-09-27 01:50:13 -0700648 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
Nick Piggin83342312006-06-23 02:03:20 -0700649 * @size: allocation size
Rolf Eike Beeread04082006-09-27 01:50:13 -0700650 *
651 * The resulting memory area is 32bit addressable and zeroed so it can be
652 * mapped to userspace without leaking data.
Nick Piggin83342312006-06-23 02:03:20 -0700653 */
654void *vmalloc_32_user(unsigned long size)
655{
656 struct vm_struct *area;
657 void *ret;
658
Andi Kleen0d08e0d2007-05-02 19:27:12 +0200659 ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL);
Eric Dumazet2b4ac442006-11-10 12:27:48 -0800660 if (ret) {
661 write_lock(&vmlist_lock);
662 area = __find_vm_area(ret);
663 area->flags |= VM_USERMAP;
664 write_unlock(&vmlist_lock);
665 }
Nick Piggin83342312006-06-23 02:03:20 -0700666 return ret;
667}
668EXPORT_SYMBOL(vmalloc_32_user);
669
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670long vread(char *buf, char *addr, unsigned long count)
671{
672 struct vm_struct *tmp;
673 char *vaddr, *buf_start = buf;
674 unsigned long n;
675
676 /* Don't allow overflow */
677 if ((unsigned long) addr + count < count)
678 count = -(unsigned long) addr;
679
680 read_lock(&vmlist_lock);
681 for (tmp = vmlist; tmp; tmp = tmp->next) {
682 vaddr = (char *) tmp->addr;
683 if (addr >= vaddr + tmp->size - PAGE_SIZE)
684 continue;
685 while (addr < vaddr) {
686 if (count == 0)
687 goto finished;
688 *buf = '\0';
689 buf++;
690 addr++;
691 count--;
692 }
693 n = vaddr + tmp->size - PAGE_SIZE - addr;
694 do {
695 if (count == 0)
696 goto finished;
697 *buf = *addr;
698 buf++;
699 addr++;
700 count--;
701 } while (--n > 0);
702 }
703finished:
704 read_unlock(&vmlist_lock);
705 return buf - buf_start;
706}
707
708long vwrite(char *buf, char *addr, unsigned long count)
709{
710 struct vm_struct *tmp;
711 char *vaddr, *buf_start = buf;
712 unsigned long n;
713
714 /* Don't allow overflow */
715 if ((unsigned long) addr + count < count)
716 count = -(unsigned long) addr;
717
718 read_lock(&vmlist_lock);
719 for (tmp = vmlist; tmp; tmp = tmp->next) {
720 vaddr = (char *) tmp->addr;
721 if (addr >= vaddr + tmp->size - PAGE_SIZE)
722 continue;
723 while (addr < vaddr) {
724 if (count == 0)
725 goto finished;
726 buf++;
727 addr++;
728 count--;
729 }
730 n = vaddr + tmp->size - PAGE_SIZE - addr;
731 do {
732 if (count == 0)
733 goto finished;
734 *addr = *buf;
735 buf++;
736 addr++;
737 count--;
738 } while (--n > 0);
739 }
740finished:
741 read_unlock(&vmlist_lock);
742 return buf - buf_start;
743}
Nick Piggin83342312006-06-23 02:03:20 -0700744
745/**
746 * remap_vmalloc_range - map vmalloc pages to userspace
Nick Piggin83342312006-06-23 02:03:20 -0700747 * @vma: vma to cover (map full range of vma)
748 * @addr: vmalloc memory
749 * @pgoff: number of pages into addr before first page to map
750 * @returns: 0 for success, -Exxx on failure
751 *
752 * This function checks that addr is a valid vmalloc'ed area, and
753 * that it is big enough to cover the vma. Will return failure if
754 * that criteria isn't met.
755 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800756 * Similar to remap_pfn_range() (see mm/memory.c)
Nick Piggin83342312006-06-23 02:03:20 -0700757 */
758int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
759 unsigned long pgoff)
760{
761 struct vm_struct *area;
762 unsigned long uaddr = vma->vm_start;
763 unsigned long usize = vma->vm_end - vma->vm_start;
764 int ret;
765
766 if ((PAGE_SIZE-1) & (unsigned long)addr)
767 return -EINVAL;
768
769 read_lock(&vmlist_lock);
770 area = __find_vm_area(addr);
771 if (!area)
772 goto out_einval_locked;
773
774 if (!(area->flags & VM_USERMAP))
775 goto out_einval_locked;
776
777 if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
778 goto out_einval_locked;
779 read_unlock(&vmlist_lock);
780
781 addr += pgoff << PAGE_SHIFT;
782 do {
783 struct page *page = vmalloc_to_page(addr);
784 ret = vm_insert_page(vma, uaddr, page);
785 if (ret)
786 return ret;
787
788 uaddr += PAGE_SIZE;
789 addr += PAGE_SIZE;
790 usize -= PAGE_SIZE;
791 } while (usize > 0);
792
793 /* Prevent "things" like memory migration? VM_flags need a cleanup... */
794 vma->vm_flags |= VM_RESERVED;
795
796 return ret;
797
798out_einval_locked:
799 read_unlock(&vmlist_lock);
800 return -EINVAL;
801}
802EXPORT_SYMBOL(remap_vmalloc_range);
803
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -0700804/*
805 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
806 * have one.
807 */
808void __attribute__((weak)) vmalloc_sync_all(void)
809{
810}
Jeremy Fitzhardinge5f4352f2007-07-17 18:37:04 -0700811
812
813static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
814{
815 /* apply_to_page_range() does all the hard work. */
816 return 0;
817}
818
819/**
820 * alloc_vm_area - allocate a range of kernel address space
821 * @size: size of the area
822 * @returns: NULL on failure, vm_struct on success
823 *
824 * This function reserves a range of kernel address space, and
825 * allocates pagetables to map that range. No actual mappings
826 * are created. If the kernel address space is not shared
827 * between processes, it syncs the pagetable across all
828 * processes.
829 */
830struct vm_struct *alloc_vm_area(size_t size)
831{
832 struct vm_struct *area;
833
834 area = get_vm_area(size, VM_IOREMAP);
835 if (area == NULL)
836 return NULL;
837
838 /*
839 * This ensures that page tables are constructed for this region
840 * of kernel virtual address space and mapped into init_mm.
841 */
842 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
843 area->size, f, NULL)) {
844 free_vm_area(area);
845 return NULL;
846 }
847
848 /* Make sure the pagetables are constructed in process kernel
849 mappings */
850 vmalloc_sync_all();
851
852 return area;
853}
854EXPORT_SYMBOL_GPL(alloc_vm_area);
855
856void free_vm_area(struct vm_struct *area)
857{
858 struct vm_struct *ret;
859 ret = remove_vm_area(area->addr);
860 BUG_ON(ret != area);
861 kfree(area);
862}
863EXPORT_SYMBOL_GPL(free_vm_area);