blob: 036fdbfdd62f6e91e0c329c7c12a9152dca21d7b [file] [log] [blame]
Russell King13ccf3a2009-11-19 15:07:04 +00001#include <linux/spinlock.h>
2#include <linux/list.h>
3#include <linux/slab.h>
4
5#include "vmregion.h"
6
7/*
8 * VM region handling support.
9 *
10 * This should become something generic, handling VM region allocations for
11 * vmalloc and similar (ioremap, module space, etc).
12 *
13 * I envisage vmalloc()'s supporting vm_struct becoming:
14 *
15 * struct vm_struct {
16 * struct vmregion region;
17 * unsigned long flags;
18 * struct page **pages;
19 * unsigned int nr_pages;
20 * unsigned long phys_addr;
21 * };
22 *
23 * get_vm_area() would then call vmregion_alloc with an appropriate
24 * struct vmregion head (eg):
25 *
26 * struct vmregion vmalloc_head = {
27 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
28 * .vm_start = VMALLOC_START,
29 * .vm_end = VMALLOC_END,
30 * };
31 *
32 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
33 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
34 * would have to initialise this each time prior to calling vmregion_alloc().
35 */
36
37struct arm_vmregion *
Russell King5bc23d32010-07-25 08:57:02 +010038arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
39 size_t size, gfp_t gfp)
Russell King13ccf3a2009-11-19 15:07:04 +000040{
Russell King459c1512011-01-08 11:49:20 +000041 unsigned long start = head->vm_start, addr = head->vm_end;
Russell King13ccf3a2009-11-19 15:07:04 +000042 unsigned long flags;
43 struct arm_vmregion *c, *new;
44
45 if (head->vm_end - head->vm_start < size) {
46 printk(KERN_WARNING "%s: allocation too big (requested %#x)\n",
47 __func__, size);
48 goto out;
49 }
50
51 new = kmalloc(sizeof(struct arm_vmregion), gfp);
52 if (!new)
53 goto out;
54
55 spin_lock_irqsave(&head->vm_lock, flags);
56
Russell King459c1512011-01-08 11:49:20 +000057 addr = rounddown(addr - size, align);
58 list_for_each_entry_reverse(c, &head->vm_list, vm_list) {
59 if (addr >= c->vm_end)
Russell King13ccf3a2009-11-19 15:07:04 +000060 goto found;
Russell King459c1512011-01-08 11:49:20 +000061 addr = rounddown(c->vm_start - size, align);
62 if (addr < start)
Russell King13ccf3a2009-11-19 15:07:04 +000063 goto nospc;
64 }
65
66 found:
67 /*
Russell King459c1512011-01-08 11:49:20 +000068 * Insert this entry after the one we found.
Russell King13ccf3a2009-11-19 15:07:04 +000069 */
Russell King459c1512011-01-08 11:49:20 +000070 list_add(&new->vm_list, &c->vm_list);
Russell King13ccf3a2009-11-19 15:07:04 +000071 new->vm_start = addr;
72 new->vm_end = addr + size;
73 new->vm_active = 1;
74
75 spin_unlock_irqrestore(&head->vm_lock, flags);
76 return new;
77
78 nospc:
79 spin_unlock_irqrestore(&head->vm_lock, flags);
80 kfree(new);
81 out:
82 return NULL;
83}
84
85static struct arm_vmregion *__arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr)
86{
87 struct arm_vmregion *c;
88
89 list_for_each_entry(c, &head->vm_list, vm_list) {
90 if (c->vm_active && c->vm_start == addr)
91 goto out;
92 }
93 c = NULL;
94 out:
95 return c;
96}
97
98struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr)
99{
100 struct arm_vmregion *c;
101 unsigned long flags;
102
103 spin_lock_irqsave(&head->vm_lock, flags);
104 c = __arm_vmregion_find(head, addr);
105 spin_unlock_irqrestore(&head->vm_lock, flags);
106 return c;
107}
108
109struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *head, unsigned long addr)
110{
111 struct arm_vmregion *c;
112 unsigned long flags;
113
114 spin_lock_irqsave(&head->vm_lock, flags);
115 c = __arm_vmregion_find(head, addr);
116 if (c)
117 c->vm_active = 0;
118 spin_unlock_irqrestore(&head->vm_lock, flags);
119 return c;
120}
121
122void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c)
123{
124 unsigned long flags;
125
126 spin_lock_irqsave(&head->vm_lock, flags);
127 list_del(&c->vm_list);
128 spin_unlock_irqrestore(&head->vm_lock, flags);
129
130 kfree(c);
131}