blob: 8ff16a1eee6ad43e5a7bec93a207ee94392c2233 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/vmalloc.c
3 *
4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 */
9
10#include <linux/mm.h>
11#include <linux/module.h>
12#include <linux/highmem.h>
13#include <linux/slab.h>
14#include <linux/spinlock.h>
15#include <linux/interrupt.h>
16
17#include <linux/vmalloc.h>
18
19#include <asm/uaccess.h>
20#include <asm/tlbflush.h>
21
22
23DEFINE_RWLOCK(vmlist_lock);
24struct vm_struct *vmlist;
25
26static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
27{
28 pte_t *pte;
29
30 pte = pte_offset_kernel(pmd, addr);
31 do {
32 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
33 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
34 } while (pte++, addr += PAGE_SIZE, addr != end);
35}
36
37static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr,
38 unsigned long end)
39{
40 pmd_t *pmd;
41 unsigned long next;
42
43 pmd = pmd_offset(pud, addr);
44 do {
45 next = pmd_addr_end(addr, end);
46 if (pmd_none_or_clear_bad(pmd))
47 continue;
48 vunmap_pte_range(pmd, addr, next);
49 } while (pmd++, addr = next, addr != end);
50}
51
52static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
53 unsigned long end)
54{
55 pud_t *pud;
56 unsigned long next;
57
58 pud = pud_offset(pgd, addr);
59 do {
60 next = pud_addr_end(addr, end);
61 if (pud_none_or_clear_bad(pud))
62 continue;
63 vunmap_pmd_range(pud, addr, next);
64 } while (pud++, addr = next, addr != end);
65}
66
67void unmap_vm_area(struct vm_struct *area)
68{
69 pgd_t *pgd;
70 unsigned long next;
71 unsigned long addr = (unsigned long) area->addr;
72 unsigned long end = addr + area->size;
73
74 BUG_ON(addr >= end);
75 pgd = pgd_offset_k(addr);
76 flush_cache_vunmap(addr, end);
77 do {
78 next = pgd_addr_end(addr, end);
79 if (pgd_none_or_clear_bad(pgd))
80 continue;
81 vunmap_pud_range(pgd, addr, next);
82 } while (pgd++, addr = next, addr != end);
83 flush_tlb_kernel_range((unsigned long) area->addr, end);
84}
85
86static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
87 unsigned long end, pgprot_t prot, struct page ***pages)
88{
89 pte_t *pte;
90
91 pte = pte_alloc_kernel(&init_mm, pmd, addr);
92 if (!pte)
93 return -ENOMEM;
94 do {
95 struct page *page = **pages;
96 WARN_ON(!pte_none(*pte));
97 if (!page)
98 return -ENOMEM;
99 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
100 (*pages)++;
101 } while (pte++, addr += PAGE_SIZE, addr != end);
102 return 0;
103}
104
105static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
106 unsigned long end, pgprot_t prot, struct page ***pages)
107{
108 pmd_t *pmd;
109 unsigned long next;
110
111 pmd = pmd_alloc(&init_mm, pud, addr);
112 if (!pmd)
113 return -ENOMEM;
114 do {
115 next = pmd_addr_end(addr, end);
116 if (vmap_pte_range(pmd, addr, next, prot, pages))
117 return -ENOMEM;
118 } while (pmd++, addr = next, addr != end);
119 return 0;
120}
121
122static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr,
123 unsigned long end, pgprot_t prot, struct page ***pages)
124{
125 pud_t *pud;
126 unsigned long next;
127
128 pud = pud_alloc(&init_mm, pgd, addr);
129 if (!pud)
130 return -ENOMEM;
131 do {
132 next = pud_addr_end(addr, end);
133 if (vmap_pmd_range(pud, addr, next, prot, pages))
134 return -ENOMEM;
135 } while (pud++, addr = next, addr != end);
136 return 0;
137}
138
139int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
140{
141 pgd_t *pgd;
142 unsigned long next;
143 unsigned long addr = (unsigned long) area->addr;
144 unsigned long end = addr + area->size - PAGE_SIZE;
145 int err;
146
147 BUG_ON(addr >= end);
148 pgd = pgd_offset_k(addr);
149 spin_lock(&init_mm.page_table_lock);
150 do {
151 next = pgd_addr_end(addr, end);
152 err = vmap_pud_range(pgd, addr, next, prot, pages);
153 if (err)
154 break;
155 } while (pgd++, addr = next, addr != end);
156 spin_unlock(&init_mm.page_table_lock);
157 flush_cache_vmap((unsigned long) area->addr, end);
158 return err;
159}
160
161#define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */
162
163struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
164 unsigned long start, unsigned long end)
165{
166 struct vm_struct **p, *tmp, *area;
167 unsigned long align = 1;
168 unsigned long addr;
169
170 if (flags & VM_IOREMAP) {
171 int bit = fls(size);
172
173 if (bit > IOREMAP_MAX_ORDER)
174 bit = IOREMAP_MAX_ORDER;
175 else if (bit < PAGE_SHIFT)
176 bit = PAGE_SHIFT;
177
178 align = 1ul << bit;
179 }
180 addr = ALIGN(start, align);
181 size = PAGE_ALIGN(size);
182
183 area = kmalloc(sizeof(*area), GFP_KERNEL);
184 if (unlikely(!area))
185 return NULL;
186
187 if (unlikely(!size)) {
188 kfree (area);
189 return NULL;
190 }
191
192 /*
193 * We always allocate a guard page.
194 */
195 size += PAGE_SIZE;
196
197 write_lock(&vmlist_lock);
198 for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
199 if ((unsigned long)tmp->addr < addr) {
200 if((unsigned long)tmp->addr + tmp->size >= addr)
201 addr = ALIGN(tmp->size +
202 (unsigned long)tmp->addr, align);
203 continue;
204 }
205 if ((size + addr) < addr)
206 goto out;
207 if (size + addr <= (unsigned long)tmp->addr)
208 goto found;
209 addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
210 if (addr > end - size)
211 goto out;
212 }
213
214found:
215 area->next = *p;
216 *p = area;
217
218 area->flags = flags;
219 area->addr = (void *)addr;
220 area->size = size;
221 area->pages = NULL;
222 area->nr_pages = 0;
223 area->phys_addr = 0;
224 write_unlock(&vmlist_lock);
225
226 return area;
227
228out:
229 write_unlock(&vmlist_lock);
230 kfree(area);
231 if (printk_ratelimit())
232 printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
233 return NULL;
234}
235
236/**
237 * get_vm_area - reserve a contingous kernel virtual area
238 *
239 * @size: size of the area
240 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
241 *
242 * Search an area of @size in the kernel virtual mapping area,
243 * and reserved it for out purposes. Returns the area descriptor
244 * on success or %NULL on failure.
245 */
246struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
247{
248 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
249}
250
Andi Kleen7856dfe2005-05-20 14:27:57 -0700251/* Caller must hold vmlist_lock */
252struct vm_struct *__remove_vm_area(void *addr)
253{
254 struct vm_struct **p, *tmp;
255
256 for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
257 if (tmp->addr == addr)
258 goto found;
259 }
260 return NULL;
261
262found:
263 unmap_vm_area(tmp);
264 *p = tmp->next;
265
266 /*
267 * Remove the guard page.
268 */
269 tmp->size -= PAGE_SIZE;
270 return tmp;
271}
272
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273/**
274 * remove_vm_area - find and remove a contingous kernel virtual area
275 *
276 * @addr: base address
277 *
278 * Search for the kernel VM area starting at @addr, and remove it.
279 * This function returns the found VM area, but using it is NOT safe
Andi Kleen7856dfe2005-05-20 14:27:57 -0700280 * on SMP machines, except for its size or flags.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 */
282struct vm_struct *remove_vm_area(void *addr)
283{
Andi Kleen7856dfe2005-05-20 14:27:57 -0700284 struct vm_struct *v;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 write_lock(&vmlist_lock);
Andi Kleen7856dfe2005-05-20 14:27:57 -0700286 v = __remove_vm_area(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 write_unlock(&vmlist_lock);
Andi Kleen7856dfe2005-05-20 14:27:57 -0700288 return v;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289}
290
291void __vunmap(void *addr, int deallocate_pages)
292{
293 struct vm_struct *area;
294
295 if (!addr)
296 return;
297
298 if ((PAGE_SIZE-1) & (unsigned long)addr) {
299 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
300 WARN_ON(1);
301 return;
302 }
303
304 area = remove_vm_area(addr);
305 if (unlikely(!area)) {
306 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
307 addr);
308 WARN_ON(1);
309 return;
310 }
311
312 if (deallocate_pages) {
313 int i;
314
315 for (i = 0; i < area->nr_pages; i++) {
316 if (unlikely(!area->pages[i]))
317 BUG();
318 __free_page(area->pages[i]);
319 }
320
321 if (area->nr_pages > PAGE_SIZE/sizeof(struct page *))
322 vfree(area->pages);
323 else
324 kfree(area->pages);
325 }
326
327 kfree(area);
328 return;
329}
330
331/**
332 * vfree - release memory allocated by vmalloc()
333 *
334 * @addr: memory base address
335 *
336 * Free the virtually contiguous memory area starting at @addr, as
337 * obtained from vmalloc(), vmalloc_32() or __vmalloc().
338 *
339 * May not be called in interrupt context.
340 */
341void vfree(void *addr)
342{
343 BUG_ON(in_interrupt());
344 __vunmap(addr, 1);
345}
346
347EXPORT_SYMBOL(vfree);
348
349/**
350 * vunmap - release virtual mapping obtained by vmap()
351 *
352 * @addr: memory base address
353 *
354 * Free the virtually contiguous memory area starting at @addr,
355 * which was created from the page array passed to vmap().
356 *
357 * May not be called in interrupt context.
358 */
359void vunmap(void *addr)
360{
361 BUG_ON(in_interrupt());
362 __vunmap(addr, 0);
363}
364
365EXPORT_SYMBOL(vunmap);
366
367/**
368 * vmap - map an array of pages into virtually contiguous space
369 *
370 * @pages: array of page pointers
371 * @count: number of pages to map
372 * @flags: vm_area->flags
373 * @prot: page protection for the mapping
374 *
375 * Maps @count pages from @pages into contiguous kernel virtual
376 * space.
377 */
378void *vmap(struct page **pages, unsigned int count,
379 unsigned long flags, pgprot_t prot)
380{
381 struct vm_struct *area;
382
383 if (count > num_physpages)
384 return NULL;
385
386 area = get_vm_area((count << PAGE_SHIFT), flags);
387 if (!area)
388 return NULL;
389 if (map_vm_area(area, prot, &pages)) {
390 vunmap(area->addr);
391 return NULL;
392 }
393
394 return area->addr;
395}
396
397EXPORT_SYMBOL(vmap);
398
399void *__vmalloc_area(struct vm_struct *area, unsigned int __nocast gfp_mask, pgprot_t prot)
400{
401 struct page **pages;
402 unsigned int nr_pages, array_size, i;
403
404 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
405 array_size = (nr_pages * sizeof(struct page *));
406
407 area->nr_pages = nr_pages;
408 /* Please note that the recursion is strictly bounded. */
409 if (array_size > PAGE_SIZE)
410 pages = __vmalloc(array_size, gfp_mask, PAGE_KERNEL);
411 else
412 pages = kmalloc(array_size, (gfp_mask & ~__GFP_HIGHMEM));
413 area->pages = pages;
414 if (!area->pages) {
415 remove_vm_area(area->addr);
416 kfree(area);
417 return NULL;
418 }
419 memset(area->pages, 0, array_size);
420
421 for (i = 0; i < area->nr_pages; i++) {
422 area->pages[i] = alloc_page(gfp_mask);
423 if (unlikely(!area->pages[i])) {
424 /* Successfully allocated i pages, free them in __vunmap() */
425 area->nr_pages = i;
426 goto fail;
427 }
428 }
429
430 if (map_vm_area(area, prot, &pages))
431 goto fail;
432 return area->addr;
433
434fail:
435 vfree(area->addr);
436 return NULL;
437}
438
439/**
440 * __vmalloc - allocate virtually contiguous memory
441 *
442 * @size: allocation size
443 * @gfp_mask: flags for the page level allocator
444 * @prot: protection mask for the allocated pages
445 *
446 * Allocate enough pages to cover @size from the page level
447 * allocator with @gfp_mask flags. Map them into contiguous
448 * kernel virtual space, using a pagetable protection of @prot.
449 */
450void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask, pgprot_t prot)
451{
452 struct vm_struct *area;
453
454 size = PAGE_ALIGN(size);
455 if (!size || (size >> PAGE_SHIFT) > num_physpages)
456 return NULL;
457
458 area = get_vm_area(size, VM_ALLOC);
459 if (!area)
460 return NULL;
461
462 return __vmalloc_area(area, gfp_mask, prot);
463}
464
465EXPORT_SYMBOL(__vmalloc);
466
467/**
468 * vmalloc - allocate virtually contiguous memory
469 *
470 * @size: allocation size
471 *
472 * Allocate enough pages to cover @size from the page level
473 * allocator and map them into contiguous kernel virtual space.
474 *
475 * For tight cotrol over page level allocator and protection flags
476 * use __vmalloc() instead.
477 */
478void *vmalloc(unsigned long size)
479{
480 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
481}
482
483EXPORT_SYMBOL(vmalloc);
484
Pavel Pisa4dc3b162005-05-01 08:59:25 -0700485#ifndef PAGE_KERNEL_EXEC
486# define PAGE_KERNEL_EXEC PAGE_KERNEL
487#endif
488
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489/**
490 * vmalloc_exec - allocate virtually contiguous, executable memory
491 *
492 * @size: allocation size
493 *
494 * Kernel-internal function to allocate enough pages to cover @size
495 * the page level allocator and map them into contiguous and
496 * executable kernel virtual space.
497 *
498 * For tight cotrol over page level allocator and protection flags
499 * use __vmalloc() instead.
500 */
501
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502void *vmalloc_exec(unsigned long size)
503{
504 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
505}
506
507/**
508 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
509 *
510 * @size: allocation size
511 *
512 * Allocate enough 32bit PA addressable pages to cover @size from the
513 * page level allocator and map them into contiguous kernel virtual space.
514 */
515void *vmalloc_32(unsigned long size)
516{
517 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
518}
519
520EXPORT_SYMBOL(vmalloc_32);
521
522long vread(char *buf, char *addr, unsigned long count)
523{
524 struct vm_struct *tmp;
525 char *vaddr, *buf_start = buf;
526 unsigned long n;
527
528 /* Don't allow overflow */
529 if ((unsigned long) addr + count < count)
530 count = -(unsigned long) addr;
531
532 read_lock(&vmlist_lock);
533 for (tmp = vmlist; tmp; tmp = tmp->next) {
534 vaddr = (char *) tmp->addr;
535 if (addr >= vaddr + tmp->size - PAGE_SIZE)
536 continue;
537 while (addr < vaddr) {
538 if (count == 0)
539 goto finished;
540 *buf = '\0';
541 buf++;
542 addr++;
543 count--;
544 }
545 n = vaddr + tmp->size - PAGE_SIZE - addr;
546 do {
547 if (count == 0)
548 goto finished;
549 *buf = *addr;
550 buf++;
551 addr++;
552 count--;
553 } while (--n > 0);
554 }
555finished:
556 read_unlock(&vmlist_lock);
557 return buf - buf_start;
558}
559
560long vwrite(char *buf, char *addr, unsigned long count)
561{
562 struct vm_struct *tmp;
563 char *vaddr, *buf_start = buf;
564 unsigned long n;
565
566 /* Don't allow overflow */
567 if ((unsigned long) addr + count < count)
568 count = -(unsigned long) addr;
569
570 read_lock(&vmlist_lock);
571 for (tmp = vmlist; tmp; tmp = tmp->next) {
572 vaddr = (char *) tmp->addr;
573 if (addr >= vaddr + tmp->size - PAGE_SIZE)
574 continue;
575 while (addr < vaddr) {
576 if (count == 0)
577 goto finished;
578 buf++;
579 addr++;
580 count--;
581 }
582 n = vaddr + tmp->size - PAGE_SIZE - addr;
583 do {
584 if (count == 0)
585 goto finished;
586 *addr = *buf;
587 buf++;
588 addr++;
589 count--;
590 } while (--n > 0);
591 }
592finished:
593 read_unlock(&vmlist_lock);
594 return buf - buf_start;
595}