blob: 9aad8b0cc6ee71f5da612d0c4bd79aa8a3fb4476 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/vmalloc.c
3 *
4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
Christoph Lameter930fc452005-10-29 18:15:41 -07008 * Numa awareness, Christoph Lameter, SGI, June 2005
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 */
10
11#include <linux/mm.h>
12#include <linux/module.h>
13#include <linux/highmem.h>
14#include <linux/slab.h>
15#include <linux/spinlock.h>
16#include <linux/interrupt.h>
17
18#include <linux/vmalloc.h>
19
20#include <asm/uaccess.h>
21#include <asm/tlbflush.h>
22
23
24DEFINE_RWLOCK(vmlist_lock);
25struct vm_struct *vmlist;
26
Adrian Bunkb2213852006-09-25 23:31:02 -070027static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
28 int node);
29
Linus Torvalds1da177e2005-04-16 15:20:36 -070030static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
31{
32 pte_t *pte;
33
34 pte = pte_offset_kernel(pmd, addr);
35 do {
36 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
37 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
38 } while (pte++, addr += PAGE_SIZE, addr != end);
39}
40
41static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr,
42 unsigned long end)
43{
44 pmd_t *pmd;
45 unsigned long next;
46
47 pmd = pmd_offset(pud, addr);
48 do {
49 next = pmd_addr_end(addr, end);
50 if (pmd_none_or_clear_bad(pmd))
51 continue;
52 vunmap_pte_range(pmd, addr, next);
53 } while (pmd++, addr = next, addr != end);
54}
55
56static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
57 unsigned long end)
58{
59 pud_t *pud;
60 unsigned long next;
61
62 pud = pud_offset(pgd, addr);
63 do {
64 next = pud_addr_end(addr, end);
65 if (pud_none_or_clear_bad(pud))
66 continue;
67 vunmap_pmd_range(pud, addr, next);
68 } while (pud++, addr = next, addr != end);
69}
70
71void unmap_vm_area(struct vm_struct *area)
72{
73 pgd_t *pgd;
74 unsigned long next;
75 unsigned long addr = (unsigned long) area->addr;
76 unsigned long end = addr + area->size;
77
78 BUG_ON(addr >= end);
79 pgd = pgd_offset_k(addr);
80 flush_cache_vunmap(addr, end);
81 do {
82 next = pgd_addr_end(addr, end);
83 if (pgd_none_or_clear_bad(pgd))
84 continue;
85 vunmap_pud_range(pgd, addr, next);
86 } while (pgd++, addr = next, addr != end);
87 flush_tlb_kernel_range((unsigned long) area->addr, end);
88}
89
90static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
91 unsigned long end, pgprot_t prot, struct page ***pages)
92{
93 pte_t *pte;
94
Hugh Dickins872fec12005-10-29 18:16:21 -070095 pte = pte_alloc_kernel(pmd, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 if (!pte)
97 return -ENOMEM;
98 do {
99 struct page *page = **pages;
100 WARN_ON(!pte_none(*pte));
101 if (!page)
102 return -ENOMEM;
103 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
104 (*pages)++;
105 } while (pte++, addr += PAGE_SIZE, addr != end);
106 return 0;
107}
108
109static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
110 unsigned long end, pgprot_t prot, struct page ***pages)
111{
112 pmd_t *pmd;
113 unsigned long next;
114
115 pmd = pmd_alloc(&init_mm, pud, addr);
116 if (!pmd)
117 return -ENOMEM;
118 do {
119 next = pmd_addr_end(addr, end);
120 if (vmap_pte_range(pmd, addr, next, prot, pages))
121 return -ENOMEM;
122 } while (pmd++, addr = next, addr != end);
123 return 0;
124}
125
126static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr,
127 unsigned long end, pgprot_t prot, struct page ***pages)
128{
129 pud_t *pud;
130 unsigned long next;
131
132 pud = pud_alloc(&init_mm, pgd, addr);
133 if (!pud)
134 return -ENOMEM;
135 do {
136 next = pud_addr_end(addr, end);
137 if (vmap_pmd_range(pud, addr, next, prot, pages))
138 return -ENOMEM;
139 } while (pud++, addr = next, addr != end);
140 return 0;
141}
142
143int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
144{
145 pgd_t *pgd;
146 unsigned long next;
147 unsigned long addr = (unsigned long) area->addr;
148 unsigned long end = addr + area->size - PAGE_SIZE;
149 int err;
150
151 BUG_ON(addr >= end);
152 pgd = pgd_offset_k(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 do {
154 next = pgd_addr_end(addr, end);
155 err = vmap_pud_range(pgd, addr, next, prot, pages);
156 if (err)
157 break;
158 } while (pgd++, addr = next, addr != end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 flush_cache_vmap((unsigned long) area->addr, end);
160 return err;
161}
162
Christoph Lameter930fc452005-10-29 18:15:41 -0700163struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
164 unsigned long start, unsigned long end, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165{
166 struct vm_struct **p, *tmp, *area;
167 unsigned long align = 1;
168 unsigned long addr;
169
170 if (flags & VM_IOREMAP) {
171 int bit = fls(size);
172
173 if (bit > IOREMAP_MAX_ORDER)
174 bit = IOREMAP_MAX_ORDER;
175 else if (bit < PAGE_SHIFT)
176 bit = PAGE_SHIFT;
177
178 align = 1ul << bit;
179 }
180 addr = ALIGN(start, align);
181 size = PAGE_ALIGN(size);
182
Christoph Lameter930fc452005-10-29 18:15:41 -0700183 area = kmalloc_node(sizeof(*area), GFP_KERNEL, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 if (unlikely(!area))
185 return NULL;
186
187 if (unlikely(!size)) {
188 kfree (area);
189 return NULL;
190 }
191
192 /*
193 * We always allocate a guard page.
194 */
195 size += PAGE_SIZE;
196
197 write_lock(&vmlist_lock);
198 for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
199 if ((unsigned long)tmp->addr < addr) {
200 if((unsigned long)tmp->addr + tmp->size >= addr)
201 addr = ALIGN(tmp->size +
202 (unsigned long)tmp->addr, align);
203 continue;
204 }
205 if ((size + addr) < addr)
206 goto out;
207 if (size + addr <= (unsigned long)tmp->addr)
208 goto found;
209 addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
210 if (addr > end - size)
211 goto out;
212 }
213
214found:
215 area->next = *p;
216 *p = area;
217
218 area->flags = flags;
219 area->addr = (void *)addr;
220 area->size = size;
221 area->pages = NULL;
222 area->nr_pages = 0;
223 area->phys_addr = 0;
224 write_unlock(&vmlist_lock);
225
226 return area;
227
228out:
229 write_unlock(&vmlist_lock);
230 kfree(area);
231 if (printk_ratelimit())
232 printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
233 return NULL;
234}
235
Christoph Lameter930fc452005-10-29 18:15:41 -0700236struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
237 unsigned long start, unsigned long end)
238{
239 return __get_vm_area_node(size, flags, start, end, -1);
240}
241
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242/**
243 * get_vm_area - reserve a contingous kernel virtual area
244 *
245 * @size: size of the area
246 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
247 *
248 * Search an area of @size in the kernel virtual mapping area,
249 * and reserved it for out purposes. Returns the area descriptor
250 * on success or %NULL on failure.
251 */
252struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
253{
254 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
255}
256
Christoph Lameter930fc452005-10-29 18:15:41 -0700257struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, int node)
258{
259 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node);
260}
261
Andi Kleen7856dfe2005-05-20 14:27:57 -0700262/* Caller must hold vmlist_lock */
Nick Piggin83342312006-06-23 02:03:20 -0700263static struct vm_struct *__find_vm_area(void *addr)
264{
265 struct vm_struct *tmp;
266
267 for (tmp = vmlist; tmp != NULL; tmp = tmp->next) {
268 if (tmp->addr == addr)
269 break;
270 }
271
272 return tmp;
273}
274
275/* Caller must hold vmlist_lock */
Andi Kleen7856dfe2005-05-20 14:27:57 -0700276struct vm_struct *__remove_vm_area(void *addr)
277{
278 struct vm_struct **p, *tmp;
279
280 for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
281 if (tmp->addr == addr)
282 goto found;
283 }
284 return NULL;
285
286found:
287 unmap_vm_area(tmp);
288 *p = tmp->next;
289
290 /*
291 * Remove the guard page.
292 */
293 tmp->size -= PAGE_SIZE;
294 return tmp;
295}
296
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297/**
298 * remove_vm_area - find and remove a contingous kernel virtual area
299 *
300 * @addr: base address
301 *
302 * Search for the kernel VM area starting at @addr, and remove it.
303 * This function returns the found VM area, but using it is NOT safe
Andi Kleen7856dfe2005-05-20 14:27:57 -0700304 * on SMP machines, except for its size or flags.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 */
306struct vm_struct *remove_vm_area(void *addr)
307{
Andi Kleen7856dfe2005-05-20 14:27:57 -0700308 struct vm_struct *v;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 write_lock(&vmlist_lock);
Andi Kleen7856dfe2005-05-20 14:27:57 -0700310 v = __remove_vm_area(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 write_unlock(&vmlist_lock);
Andi Kleen7856dfe2005-05-20 14:27:57 -0700312 return v;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313}
314
315void __vunmap(void *addr, int deallocate_pages)
316{
317 struct vm_struct *area;
318
319 if (!addr)
320 return;
321
322 if ((PAGE_SIZE-1) & (unsigned long)addr) {
323 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
324 WARN_ON(1);
325 return;
326 }
327
328 area = remove_vm_area(addr);
329 if (unlikely(!area)) {
330 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
331 addr);
332 WARN_ON(1);
333 return;
334 }
335
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700336 debug_check_no_locks_freed(addr, area->size);
337
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 if (deallocate_pages) {
339 int i;
340
341 for (i = 0; i < area->nr_pages; i++) {
Eric Sesterhenn5aae2772006-04-01 01:26:09 +0200342 BUG_ON(!area->pages[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 __free_page(area->pages[i]);
344 }
345
Jan Kiszka8757d5f2006-07-14 00:23:56 -0700346 if (area->flags & VM_VPAGES)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 vfree(area->pages);
348 else
349 kfree(area->pages);
350 }
351
352 kfree(area);
353 return;
354}
355
356/**
357 * vfree - release memory allocated by vmalloc()
358 *
359 * @addr: memory base address
360 *
361 * Free the virtually contiguous memory area starting at @addr, as
Pekka Enberg80e93ef2005-09-09 13:10:16 -0700362 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
363 * NULL, no operation is performed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 *
Pekka Enberg80e93ef2005-09-09 13:10:16 -0700365 * Must not be called in interrupt context.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 */
367void vfree(void *addr)
368{
369 BUG_ON(in_interrupt());
370 __vunmap(addr, 1);
371}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372EXPORT_SYMBOL(vfree);
373
374/**
375 * vunmap - release virtual mapping obtained by vmap()
376 *
377 * @addr: memory base address
378 *
379 * Free the virtually contiguous memory area starting at @addr,
380 * which was created from the page array passed to vmap().
381 *
Pekka Enberg80e93ef2005-09-09 13:10:16 -0700382 * Must not be called in interrupt context.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 */
384void vunmap(void *addr)
385{
386 BUG_ON(in_interrupt());
387 __vunmap(addr, 0);
388}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389EXPORT_SYMBOL(vunmap);
390
391/**
392 * vmap - map an array of pages into virtually contiguous space
393 *
394 * @pages: array of page pointers
395 * @count: number of pages to map
396 * @flags: vm_area->flags
397 * @prot: page protection for the mapping
398 *
399 * Maps @count pages from @pages into contiguous kernel virtual
400 * space.
401 */
402void *vmap(struct page **pages, unsigned int count,
403 unsigned long flags, pgprot_t prot)
404{
405 struct vm_struct *area;
406
407 if (count > num_physpages)
408 return NULL;
409
410 area = get_vm_area((count << PAGE_SHIFT), flags);
411 if (!area)
412 return NULL;
413 if (map_vm_area(area, prot, &pages)) {
414 vunmap(area->addr);
415 return NULL;
416 }
417
418 return area->addr;
419}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420EXPORT_SYMBOL(vmap);
421
Christoph Lameter930fc452005-10-29 18:15:41 -0700422void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
423 pgprot_t prot, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424{
425 struct page **pages;
426 unsigned int nr_pages, array_size, i;
427
428 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
429 array_size = (nr_pages * sizeof(struct page *));
430
431 area->nr_pages = nr_pages;
432 /* Please note that the recursion is strictly bounded. */
Jan Kiszka8757d5f2006-07-14 00:23:56 -0700433 if (array_size > PAGE_SIZE) {
Christoph Lameter930fc452005-10-29 18:15:41 -0700434 pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node);
Jan Kiszka8757d5f2006-07-14 00:23:56 -0700435 area->flags |= VM_VPAGES;
436 } else
Christoph Lameter930fc452005-10-29 18:15:41 -0700437 pages = kmalloc_node(array_size, (gfp_mask & ~__GFP_HIGHMEM), node);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 area->pages = pages;
439 if (!area->pages) {
440 remove_vm_area(area->addr);
441 kfree(area);
442 return NULL;
443 }
444 memset(area->pages, 0, array_size);
445
446 for (i = 0; i < area->nr_pages; i++) {
Christoph Lameter930fc452005-10-29 18:15:41 -0700447 if (node < 0)
448 area->pages[i] = alloc_page(gfp_mask);
449 else
450 area->pages[i] = alloc_pages_node(node, gfp_mask, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 if (unlikely(!area->pages[i])) {
452 /* Successfully allocated i pages, free them in __vunmap() */
453 area->nr_pages = i;
454 goto fail;
455 }
456 }
457
458 if (map_vm_area(area, prot, &pages))
459 goto fail;
460 return area->addr;
461
462fail:
463 vfree(area->addr);
464 return NULL;
465}
466
Christoph Lameter930fc452005-10-29 18:15:41 -0700467void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
468{
469 return __vmalloc_area_node(area, gfp_mask, prot, -1);
470}
471
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472/**
Christoph Lameter930fc452005-10-29 18:15:41 -0700473 * __vmalloc_node - allocate virtually contiguous memory
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 *
475 * @size: allocation size
476 * @gfp_mask: flags for the page level allocator
477 * @prot: protection mask for the allocated pages
Randy Dunlapd44e0782005-11-07 01:01:10 -0800478 * @node: node to use for allocation or -1
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 *
480 * Allocate enough pages to cover @size from the page level
481 * allocator with @gfp_mask flags. Map them into contiguous
482 * kernel virtual space, using a pagetable protection of @prot.
483 */
Adrian Bunkb2213852006-09-25 23:31:02 -0700484static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
485 int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486{
487 struct vm_struct *area;
488
489 size = PAGE_ALIGN(size);
490 if (!size || (size >> PAGE_SHIFT) > num_physpages)
491 return NULL;
492
Christoph Lameter930fc452005-10-29 18:15:41 -0700493 area = get_vm_area_node(size, VM_ALLOC, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 if (!area)
495 return NULL;
496
Christoph Lameter930fc452005-10-29 18:15:41 -0700497 return __vmalloc_area_node(area, gfp_mask, prot, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498}
499
Christoph Lameter930fc452005-10-29 18:15:41 -0700500void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
501{
502 return __vmalloc_node(size, gfp_mask, prot, -1);
503}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504EXPORT_SYMBOL(__vmalloc);
505
506/**
507 * vmalloc - allocate virtually contiguous memory
508 *
509 * @size: allocation size
510 *
511 * Allocate enough pages to cover @size from the page level
512 * allocator and map them into contiguous kernel virtual space.
513 *
514 * For tight cotrol over page level allocator and protection flags
515 * use __vmalloc() instead.
516 */
517void *vmalloc(unsigned long size)
518{
Nick Piggin83342312006-06-23 02:03:20 -0700519 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521EXPORT_SYMBOL(vmalloc);
522
Christoph Lameter930fc452005-10-29 18:15:41 -0700523/**
Nick Piggin83342312006-06-23 02:03:20 -0700524 * vmalloc_user - allocate virtually contiguous memory which has
525 * been zeroed so it can be mapped to userspace without
526 * leaking data.
527 *
528 * @size: allocation size
529 */
530void *vmalloc_user(unsigned long size)
531{
532 struct vm_struct *area;
533 void *ret;
534
535 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
536 write_lock(&vmlist_lock);
537 area = __find_vm_area(ret);
538 area->flags |= VM_USERMAP;
539 write_unlock(&vmlist_lock);
540
541 return ret;
542}
543EXPORT_SYMBOL(vmalloc_user);
544
545/**
Christoph Lameter930fc452005-10-29 18:15:41 -0700546 * vmalloc_node - allocate memory on a specific node
547 *
548 * @size: allocation size
Randy Dunlapd44e0782005-11-07 01:01:10 -0800549 * @node: numa node
Christoph Lameter930fc452005-10-29 18:15:41 -0700550 *
551 * Allocate enough pages to cover @size from the page level
552 * allocator and map them into contiguous kernel virtual space.
553 *
554 * For tight cotrol over page level allocator and protection flags
555 * use __vmalloc() instead.
556 */
557void *vmalloc_node(unsigned long size, int node)
558{
Nick Piggin83342312006-06-23 02:03:20 -0700559 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
Christoph Lameter930fc452005-10-29 18:15:41 -0700560}
561EXPORT_SYMBOL(vmalloc_node);
562
Pavel Pisa4dc3b162005-05-01 08:59:25 -0700563#ifndef PAGE_KERNEL_EXEC
564# define PAGE_KERNEL_EXEC PAGE_KERNEL
565#endif
566
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567/**
568 * vmalloc_exec - allocate virtually contiguous, executable memory
569 *
570 * @size: allocation size
571 *
572 * Kernel-internal function to allocate enough pages to cover @size
573 * the page level allocator and map them into contiguous and
574 * executable kernel virtual space.
575 *
576 * For tight cotrol over page level allocator and protection flags
577 * use __vmalloc() instead.
578 */
579
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580void *vmalloc_exec(unsigned long size)
581{
582 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
583}
584
585/**
586 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
587 *
588 * @size: allocation size
589 *
590 * Allocate enough 32bit PA addressable pages to cover @size from the
591 * page level allocator and map them into contiguous kernel virtual space.
592 */
593void *vmalloc_32(unsigned long size)
594{
595 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
596}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597EXPORT_SYMBOL(vmalloc_32);
598
Nick Piggin83342312006-06-23 02:03:20 -0700599/**
600 * vmalloc_32_user - allocate virtually contiguous memory (32bit
601 * addressable) which is zeroed so it can be
602 * mapped to userspace without leaking data.
603 *
604 * @size: allocation size
605 */
606void *vmalloc_32_user(unsigned long size)
607{
608 struct vm_struct *area;
609 void *ret;
610
611 ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
612 write_lock(&vmlist_lock);
613 area = __find_vm_area(ret);
614 area->flags |= VM_USERMAP;
615 write_unlock(&vmlist_lock);
616
617 return ret;
618}
619EXPORT_SYMBOL(vmalloc_32_user);
620
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621long vread(char *buf, char *addr, unsigned long count)
622{
623 struct vm_struct *tmp;
624 char *vaddr, *buf_start = buf;
625 unsigned long n;
626
627 /* Don't allow overflow */
628 if ((unsigned long) addr + count < count)
629 count = -(unsigned long) addr;
630
631 read_lock(&vmlist_lock);
632 for (tmp = vmlist; tmp; tmp = tmp->next) {
633 vaddr = (char *) tmp->addr;
634 if (addr >= vaddr + tmp->size - PAGE_SIZE)
635 continue;
636 while (addr < vaddr) {
637 if (count == 0)
638 goto finished;
639 *buf = '\0';
640 buf++;
641 addr++;
642 count--;
643 }
644 n = vaddr + tmp->size - PAGE_SIZE - addr;
645 do {
646 if (count == 0)
647 goto finished;
648 *buf = *addr;
649 buf++;
650 addr++;
651 count--;
652 } while (--n > 0);
653 }
654finished:
655 read_unlock(&vmlist_lock);
656 return buf - buf_start;
657}
658
659long vwrite(char *buf, char *addr, unsigned long count)
660{
661 struct vm_struct *tmp;
662 char *vaddr, *buf_start = buf;
663 unsigned long n;
664
665 /* Don't allow overflow */
666 if ((unsigned long) addr + count < count)
667 count = -(unsigned long) addr;
668
669 read_lock(&vmlist_lock);
670 for (tmp = vmlist; tmp; tmp = tmp->next) {
671 vaddr = (char *) tmp->addr;
672 if (addr >= vaddr + tmp->size - PAGE_SIZE)
673 continue;
674 while (addr < vaddr) {
675 if (count == 0)
676 goto finished;
677 buf++;
678 addr++;
679 count--;
680 }
681 n = vaddr + tmp->size - PAGE_SIZE - addr;
682 do {
683 if (count == 0)
684 goto finished;
685 *addr = *buf;
686 buf++;
687 addr++;
688 count--;
689 } while (--n > 0);
690 }
691finished:
692 read_unlock(&vmlist_lock);
693 return buf - buf_start;
694}
Nick Piggin83342312006-06-23 02:03:20 -0700695
696/**
697 * remap_vmalloc_range - map vmalloc pages to userspace
698 *
699 * @vma: vma to cover (map full range of vma)
700 * @addr: vmalloc memory
701 * @pgoff: number of pages into addr before first page to map
702 * @returns: 0 for success, -Exxx on failure
703 *
704 * This function checks that addr is a valid vmalloc'ed area, and
705 * that it is big enough to cover the vma. Will return failure if
706 * that criteria isn't met.
707 *
708 * Similar to remap_pfn_range (see mm/memory.c)
709 */
710int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
711 unsigned long pgoff)
712{
713 struct vm_struct *area;
714 unsigned long uaddr = vma->vm_start;
715 unsigned long usize = vma->vm_end - vma->vm_start;
716 int ret;
717
718 if ((PAGE_SIZE-1) & (unsigned long)addr)
719 return -EINVAL;
720
721 read_lock(&vmlist_lock);
722 area = __find_vm_area(addr);
723 if (!area)
724 goto out_einval_locked;
725
726 if (!(area->flags & VM_USERMAP))
727 goto out_einval_locked;
728
729 if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
730 goto out_einval_locked;
731 read_unlock(&vmlist_lock);
732
733 addr += pgoff << PAGE_SHIFT;
734 do {
735 struct page *page = vmalloc_to_page(addr);
736 ret = vm_insert_page(vma, uaddr, page);
737 if (ret)
738 return ret;
739
740 uaddr += PAGE_SIZE;
741 addr += PAGE_SIZE;
742 usize -= PAGE_SIZE;
743 } while (usize > 0);
744
745 /* Prevent "things" like memory migration? VM_flags need a cleanup... */
746 vma->vm_flags |= VM_RESERVED;
747
748 return ret;
749
750out_einval_locked:
751 read_unlock(&vmlist_lock);
752 return -EINVAL;
753}
754EXPORT_SYMBOL(remap_vmalloc_range);
755