blob: 71976c5d40d301373ea9bc3f807553dde819c195 [file] [log] [blame]
Matt Mackall10cef602006-01-08 01:01:45 -08001/*
2 * SLOB Allocator: Simple List Of Blocks
3 *
4 * Matt Mackall <mpm@selenic.com> 12/30/03
5 *
6 * How SLOB works:
7 *
8 * The core of SLOB is a traditional K&R style heap allocator, with
9 * support for returning aligned objects. The granularity of this
10 * allocator is 8 bytes on x86, though it's perhaps possible to reduce
11 * this to 4 if it's deemed worth the effort. The slob heap is a
12 * singly-linked list of pages from __get_free_page, grown on demand
13 * and allocation from the heap is currently first-fit.
14 *
15 * Above this is an implementation of kmalloc/kfree. Blocks returned
16 * from kmalloc are 8-byte aligned and prepended with a 8-byte header.
17 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
18 * __get_free_pages directly so that it can return page-aligned blocks
19 * and keeps a linked list of such pages and their orders. These
20 * objects are detected in kfree() by their page alignment.
21 *
22 * SLAB is emulated on top of SLOB by simply calling constructors and
23 * destructors for every SLAB allocation. Objects are returned with
Christoph Lameter5af60832007-05-06 14:49:56 -070024 * the 8-byte alignment unless the SLAB_HWCACHE_ALIGN flag is
Matt Mackall10cef602006-01-08 01:01:45 -080025 * set, in which case the low-level allocator will fragment blocks to
26 * create the proper alignment. Again, objects of page-size or greater
27 * are allocated by calling __get_free_pages. As SLAB objects know
28 * their size, no separate size bookkeeping is necessary and there is
29 * essentially no allocation space overhead.
30 */
31
Matt Mackall10cef602006-01-08 01:01:45 -080032#include <linux/slab.h>
33#include <linux/mm.h>
34#include <linux/cache.h>
35#include <linux/init.h>
36#include <linux/module.h>
37#include <linux/timer.h>
Nick Pigginafc0ced2007-05-16 22:10:49 -070038#include <linux/rcupdate.h>
Matt Mackall10cef602006-01-08 01:01:45 -080039
40struct slob_block {
41 int units;
42 struct slob_block *next;
43};
44typedef struct slob_block slob_t;
45
46#define SLOB_UNIT sizeof(slob_t)
47#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
48#define SLOB_ALIGN L1_CACHE_BYTES
49
50struct bigblock {
51 int order;
52 void *pages;
53 struct bigblock *next;
54};
55typedef struct bigblock bigblock_t;
56
Nick Pigginafc0ced2007-05-16 22:10:49 -070057/*
58 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
59 * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
60 * the block using call_rcu.
61 */
62struct slob_rcu {
63 struct rcu_head head;
64 int size;
65};
66
Matt Mackall10cef602006-01-08 01:01:45 -080067static slob_t arena = { .next = &arena, .units = 1 };
68static slob_t *slobfree = &arena;
69static bigblock_t *bigblocks;
70static DEFINE_SPINLOCK(slob_lock);
71static DEFINE_SPINLOCK(block_lock);
72
73static void slob_free(void *b, int size);
Dimitri Gorokhovikbcb4ddb2006-12-29 16:48:28 -080074static void slob_timer_cbk(void);
75
Matt Mackall10cef602006-01-08 01:01:45 -080076
77static void *slob_alloc(size_t size, gfp_t gfp, int align)
78{
79 slob_t *prev, *cur, *aligned = 0;
80 int delta = 0, units = SLOB_UNITS(size);
81 unsigned long flags;
82
83 spin_lock_irqsave(&slob_lock, flags);
84 prev = slobfree;
85 for (cur = prev->next; ; prev = cur, cur = cur->next) {
86 if (align) {
87 aligned = (slob_t *)ALIGN((unsigned long)cur, align);
88 delta = aligned - cur;
89 }
90 if (cur->units >= units + delta) { /* room enough? */
91 if (delta) { /* need to fragment head to align? */
92 aligned->units = cur->units - delta;
93 aligned->next = cur->next;
94 cur->next = aligned;
95 cur->units = delta;
96 prev = cur;
97 cur = aligned;
98 }
99
100 if (cur->units == units) /* exact fit? */
101 prev->next = cur->next; /* unlink */
102 else { /* fragment */
103 prev->next = cur + units;
104 prev->next->units = cur->units - units;
105 prev->next->next = cur->next;
106 cur->units = units;
107 }
108
109 slobfree = prev;
110 spin_unlock_irqrestore(&slob_lock, flags);
111 return cur;
112 }
113 if (cur == slobfree) {
114 spin_unlock_irqrestore(&slob_lock, flags);
115
116 if (size == PAGE_SIZE) /* trying to shrink arena? */
117 return 0;
118
119 cur = (slob_t *)__get_free_page(gfp);
120 if (!cur)
121 return 0;
122
123 slob_free(cur, PAGE_SIZE);
124 spin_lock_irqsave(&slob_lock, flags);
125 cur = slobfree;
126 }
127 }
128}
129
130static void slob_free(void *block, int size)
131{
132 slob_t *cur, *b = (slob_t *)block;
133 unsigned long flags;
134
135 if (!block)
136 return;
137
138 if (size)
139 b->units = SLOB_UNITS(size);
140
141 /* Find reinsertion point */
142 spin_lock_irqsave(&slob_lock, flags);
143 for (cur = slobfree; !(b > cur && b < cur->next); cur = cur->next)
144 if (cur >= cur->next && (b > cur || b < cur->next))
145 break;
146
147 if (b + b->units == cur->next) {
148 b->units += cur->next->units;
149 b->next = cur->next->next;
150 } else
151 b->next = cur->next;
152
153 if (cur + cur->units == b) {
154 cur->units += b->units;
155 cur->next = b->next;
156 } else
157 cur->next = b;
158
159 slobfree = cur;
160
161 spin_unlock_irqrestore(&slob_lock, flags);
162}
163
Christoph Lameter2e892f42006-12-13 00:34:23 -0800164void *__kmalloc(size_t size, gfp_t gfp)
Matt Mackall10cef602006-01-08 01:01:45 -0800165{
166 slob_t *m;
167 bigblock_t *bb;
168 unsigned long flags;
169
170 if (size < PAGE_SIZE - SLOB_UNIT) {
171 m = slob_alloc(size + SLOB_UNIT, gfp, 0);
172 return m ? (void *)(m + 1) : 0;
173 }
174
175 bb = slob_alloc(sizeof(bigblock_t), gfp, 0);
176 if (!bb)
177 return 0;
178
Akinobu Mita4ab688c2007-05-06 14:50:19 -0700179 bb->order = get_order(size);
Matt Mackall10cef602006-01-08 01:01:45 -0800180 bb->pages = (void *)__get_free_pages(gfp, bb->order);
181
182 if (bb->pages) {
183 spin_lock_irqsave(&block_lock, flags);
184 bb->next = bigblocks;
185 bigblocks = bb;
186 spin_unlock_irqrestore(&block_lock, flags);
187 return bb->pages;
188 }
189
190 slob_free(bb, sizeof(bigblock_t));
191 return 0;
192}
Christoph Lameter2e892f42006-12-13 00:34:23 -0800193EXPORT_SYMBOL(__kmalloc);
Matt Mackall10cef602006-01-08 01:01:45 -0800194
Pekka Enbergfd76bab2007-05-06 14:48:40 -0700195/**
196 * krealloc - reallocate memory. The contents will remain unchanged.
197 *
198 * @p: object to reallocate memory for.
199 * @new_size: how many bytes of memory are required.
200 * @flags: the type of memory to allocate.
201 *
202 * The contents of the object pointed to are preserved up to the
203 * lesser of the new and old sizes. If @p is %NULL, krealloc()
204 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
205 * %NULL pointer, the object pointed to is freed.
206 */
207void *krealloc(const void *p, size_t new_size, gfp_t flags)
208{
209 void *ret;
210
211 if (unlikely(!p))
212 return kmalloc_track_caller(new_size, flags);
213
214 if (unlikely(!new_size)) {
215 kfree(p);
216 return NULL;
217 }
218
219 ret = kmalloc_track_caller(new_size, flags);
220 if (ret) {
221 memcpy(ret, p, min(new_size, ksize(p)));
222 kfree(p);
223 }
224 return ret;
225}
226EXPORT_SYMBOL(krealloc);
227
Matt Mackall10cef602006-01-08 01:01:45 -0800228void kfree(const void *block)
229{
230 bigblock_t *bb, **last = &bigblocks;
231 unsigned long flags;
232
233 if (!block)
234 return;
235
236 if (!((unsigned long)block & (PAGE_SIZE-1))) {
237 /* might be on the big block list */
238 spin_lock_irqsave(&block_lock, flags);
239 for (bb = bigblocks; bb; last = &bb->next, bb = bb->next) {
240 if (bb->pages == block) {
241 *last = bb->next;
242 spin_unlock_irqrestore(&block_lock, flags);
243 free_pages((unsigned long)block, bb->order);
244 slob_free(bb, sizeof(bigblock_t));
245 return;
246 }
247 }
248 spin_unlock_irqrestore(&block_lock, flags);
249 }
250
251 slob_free((slob_t *)block - 1, 0);
252 return;
253}
254
255EXPORT_SYMBOL(kfree);
256
Pekka Enbergfd76bab2007-05-06 14:48:40 -0700257size_t ksize(const void *block)
Matt Mackall10cef602006-01-08 01:01:45 -0800258{
259 bigblock_t *bb;
260 unsigned long flags;
261
262 if (!block)
263 return 0;
264
265 if (!((unsigned long)block & (PAGE_SIZE-1))) {
266 spin_lock_irqsave(&block_lock, flags);
267 for (bb = bigblocks; bb; bb = bb->next)
268 if (bb->pages == block) {
269 spin_unlock_irqrestore(&slob_lock, flags);
270 return PAGE_SIZE << bb->order;
271 }
272 spin_unlock_irqrestore(&block_lock, flags);
273 }
274
275 return ((slob_t *)block - 1)->units * SLOB_UNIT;
276}
277
278struct kmem_cache {
279 unsigned int size, align;
Nick Pigginafc0ced2007-05-16 22:10:49 -0700280 unsigned long flags;
Matt Mackall10cef602006-01-08 01:01:45 -0800281 const char *name;
282 void (*ctor)(void *, struct kmem_cache *, unsigned long);
Matt Mackall10cef602006-01-08 01:01:45 -0800283};
284
285struct kmem_cache *kmem_cache_create(const char *name, size_t size,
286 size_t align, unsigned long flags,
287 void (*ctor)(void*, struct kmem_cache *, unsigned long),
288 void (*dtor)(void*, struct kmem_cache *, unsigned long))
289{
290 struct kmem_cache *c;
291
292 c = slob_alloc(sizeof(struct kmem_cache), flags, 0);
293
294 if (c) {
295 c->name = name;
296 c->size = size;
Nick Pigginafc0ced2007-05-16 22:10:49 -0700297 if (flags & SLAB_DESTROY_BY_RCU) {
Nick Pigginafc0ced2007-05-16 22:10:49 -0700298 /* leave room for rcu footer at the end of object */
299 c->size += sizeof(struct slob_rcu);
300 }
301 c->flags = flags;
Matt Mackall10cef602006-01-08 01:01:45 -0800302 c->ctor = ctor;
Matt Mackall10cef602006-01-08 01:01:45 -0800303 /* ignore alignment unless it's forced */
Christoph Lameter5af60832007-05-06 14:49:56 -0700304 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
Matt Mackall10cef602006-01-08 01:01:45 -0800305 if (c->align < align)
306 c->align = align;
Akinobu Mitabc0055a2007-05-06 14:49:52 -0700307 } else if (flags & SLAB_PANIC)
308 panic("Cannot create slab cache %s\n", name);
Matt Mackall10cef602006-01-08 01:01:45 -0800309
310 return c;
311}
312EXPORT_SYMBOL(kmem_cache_create);
313
Alexey Dobriyan133d2052006-09-27 01:49:41 -0700314void kmem_cache_destroy(struct kmem_cache *c)
Matt Mackall10cef602006-01-08 01:01:45 -0800315{
316 slob_free(c, sizeof(struct kmem_cache));
Matt Mackall10cef602006-01-08 01:01:45 -0800317}
318EXPORT_SYMBOL(kmem_cache_destroy);
319
320void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags)
321{
322 void *b;
323
324 if (c->size < PAGE_SIZE)
325 b = slob_alloc(c->size, flags, c->align);
326 else
Akinobu Mita4ab688c2007-05-06 14:50:19 -0700327 b = (void *)__get_free_pages(flags, get_order(c->size));
Matt Mackall10cef602006-01-08 01:01:45 -0800328
329 if (c->ctor)
Christoph Lametera35afb82007-05-16 22:10:57 -0700330 c->ctor(b, c, 0);
Matt Mackall10cef602006-01-08 01:01:45 -0800331
332 return b;
333}
334EXPORT_SYMBOL(kmem_cache_alloc);
335
Pekka Enberga8c0f9a2006-03-25 03:06:42 -0800336void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags)
337{
338 void *ret = kmem_cache_alloc(c, flags);
339 if (ret)
340 memset(ret, 0, c->size);
341
342 return ret;
343}
344EXPORT_SYMBOL(kmem_cache_zalloc);
345
Nick Pigginafc0ced2007-05-16 22:10:49 -0700346static void __kmem_cache_free(void *b, int size)
347{
348 if (size < PAGE_SIZE)
349 slob_free(b, size);
350 else
351 free_pages((unsigned long)b, get_order(size));
352}
353
354static void kmem_rcu_free(struct rcu_head *head)
355{
356 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
357 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
358
359 __kmem_cache_free(b, slob_rcu->size);
360}
361
Matt Mackall10cef602006-01-08 01:01:45 -0800362void kmem_cache_free(struct kmem_cache *c, void *b)
363{
Nick Pigginafc0ced2007-05-16 22:10:49 -0700364 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
365 struct slob_rcu *slob_rcu;
366 slob_rcu = b + (c->size - sizeof(struct slob_rcu));
367 INIT_RCU_HEAD(&slob_rcu->head);
368 slob_rcu->size = c->size;
369 call_rcu(&slob_rcu->head, kmem_rcu_free);
370 } else {
Nick Pigginafc0ced2007-05-16 22:10:49 -0700371 __kmem_cache_free(b, c->size);
372 }
Matt Mackall10cef602006-01-08 01:01:45 -0800373}
374EXPORT_SYMBOL(kmem_cache_free);
375
376unsigned int kmem_cache_size(struct kmem_cache *c)
377{
378 return c->size;
379}
380EXPORT_SYMBOL(kmem_cache_size);
381
382const char *kmem_cache_name(struct kmem_cache *c)
383{
384 return c->name;
385}
386EXPORT_SYMBOL(kmem_cache_name);
387
388static struct timer_list slob_timer = TIMER_INITIALIZER(
Dimitri Gorokhovikbcb4ddb2006-12-29 16:48:28 -0800389 (void (*)(unsigned long))slob_timer_cbk, 0, 0);
Matt Mackall10cef602006-01-08 01:01:45 -0800390
Christoph Lameter2e892f42006-12-13 00:34:23 -0800391int kmem_cache_shrink(struct kmem_cache *d)
392{
393 return 0;
394}
395EXPORT_SYMBOL(kmem_cache_shrink);
396
Christoph Lameter55935a32006-12-13 00:34:24 -0800397int kmem_ptr_validate(struct kmem_cache *a, const void *b)
Christoph Lameter2e892f42006-12-13 00:34:23 -0800398{
399 return 0;
400}
401
Dimitri Gorokhovikbcb4ddb2006-12-29 16:48:28 -0800402void __init kmem_cache_init(void)
403{
404 slob_timer_cbk();
405}
406
407static void slob_timer_cbk(void)
Matt Mackall10cef602006-01-08 01:01:45 -0800408{
409 void *p = slob_alloc(PAGE_SIZE, 0, PAGE_SIZE-1);
410
411 if (p)
412 free_page((unsigned long)p);
413
414 mod_timer(&slob_timer, jiffies + HZ);
415}