blob: b4899079d8b00e32660f8c36437c3b558bef4823 [file] [log] [blame]
Matt Mackall10cef602006-01-08 01:01:45 -08001/*
2 * SLOB Allocator: Simple List Of Blocks
3 *
4 * Matt Mackall <mpm@selenic.com> 12/30/03
5 *
Paul Mundt6193a2f2007-07-15 23:38:22 -07006 * NUMA support by Paul Mundt, 2007.
7 *
Matt Mackall10cef602006-01-08 01:01:45 -08008 * How SLOB works:
9 *
10 * The core of SLOB is a traditional K&R style heap allocator, with
11 * support for returning aligned objects. The granularity of this
Nick Piggin55394842007-07-15 23:38:09 -070012 * allocator is as little as 2 bytes, however typically most architectures
13 * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
Nick Piggin95b35122007-07-15 23:38:07 -070014 *
Paul Mundt6193a2f2007-07-15 23:38:22 -070015 * The slob heap is a linked list of pages from alloc_pages(), and
Nick Piggin95b35122007-07-15 23:38:07 -070016 * within each page, there is a singly-linked list of free blocks (slob_t).
17 * The heap is grown on demand and allocation from the heap is currently
18 * first-fit.
Matt Mackall10cef602006-01-08 01:01:45 -080019 *
20 * Above this is an implementation of kmalloc/kfree. Blocks returned
Nick Piggin55394842007-07-15 23:38:09 -070021 * from kmalloc are prepended with a 4-byte header with the kmalloc size.
Matt Mackall10cef602006-01-08 01:01:45 -080022 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
Paul Mundt6193a2f2007-07-15 23:38:22 -070023 * alloc_pages() directly, allocating compound pages so the page order
Nick Piggind87a1332007-07-15 23:38:08 -070024 * does not have to be separately tracked, and also stores the exact
25 * allocation size in page->private so that it can be used to accurately
26 * provide ksize(). These objects are detected in kfree() because slob_page()
27 * is false for them.
Matt Mackall10cef602006-01-08 01:01:45 -080028 *
29 * SLAB is emulated on top of SLOB by simply calling constructors and
Nick Piggin95b35122007-07-15 23:38:07 -070030 * destructors for every SLAB allocation. Objects are returned with the
31 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
32 * case the low-level allocator will fragment blocks to create the proper
33 * alignment. Again, objects of page-size or greater are allocated by
Paul Mundt6193a2f2007-07-15 23:38:22 -070034 * calling alloc_pages(). As SLAB objects know their size, no separate
Nick Piggin95b35122007-07-15 23:38:07 -070035 * size bookkeeping is necessary and there is essentially no allocation
Nick Piggind87a1332007-07-15 23:38:08 -070036 * space overhead, and compound pages aren't needed for multi-page
37 * allocations.
Paul Mundt6193a2f2007-07-15 23:38:22 -070038 *
39 * NUMA support in SLOB is fairly simplistic, pushing most of the real
40 * logic down to the page allocator, and simply doing the node accounting
41 * on the upper levels. In the event that a node id is explicitly
42 * provided, alloc_pages_node() with the specified node id is used
43 * instead. The common case (or when the node id isn't explicitly provided)
44 * will default to the current node, as per numa_node_id().
45 *
46 * Node aware pages are still inserted in to the global freelist, and
47 * these are scanned for by matching against the node id encoded in the
48 * page flags. As a result, block allocations that can be satisfied from
49 * the freelist will only be done so on pages residing on the same node,
50 * in order to prevent random node placement.
Matt Mackall10cef602006-01-08 01:01:45 -080051 */
52
Nick Piggin95b35122007-07-15 23:38:07 -070053#include <linux/kernel.h>
Matt Mackall10cef602006-01-08 01:01:45 -080054#include <linux/slab.h>
55#include <linux/mm.h>
56#include <linux/cache.h>
57#include <linux/init.h>
58#include <linux/module.h>
Nick Pigginafc0ced2007-05-16 22:10:49 -070059#include <linux/rcupdate.h>
Nick Piggin95b35122007-07-15 23:38:07 -070060#include <linux/list.h>
61#include <asm/atomic.h>
Matt Mackall10cef602006-01-08 01:01:45 -080062
Nick Piggin95b35122007-07-15 23:38:07 -070063/*
64 * slob_block has a field 'units', which indicates size of block if +ve,
65 * or offset of next block if -ve (in SLOB_UNITs).
66 *
67 * Free blocks of size 1 unit simply contain the offset of the next block.
68 * Those with larger size contain their size in the first SLOB_UNIT of
69 * memory, and the offset of the next free block in the second SLOB_UNIT.
70 */
Nick Piggin55394842007-07-15 23:38:09 -070071#if PAGE_SIZE <= (32767 * 2)
Nick Piggin95b35122007-07-15 23:38:07 -070072typedef s16 slobidx_t;
73#else
74typedef s32 slobidx_t;
75#endif
76
Matt Mackall10cef602006-01-08 01:01:45 -080077struct slob_block {
Nick Piggin95b35122007-07-15 23:38:07 -070078 slobidx_t units;
Nick Piggin55394842007-07-15 23:38:09 -070079};
Matt Mackall10cef602006-01-08 01:01:45 -080080typedef struct slob_block slob_t;
81
Nick Piggin95b35122007-07-15 23:38:07 -070082/*
83 * We use struct page fields to manage some slob allocation aspects,
84 * however to avoid the horrible mess in include/linux/mm_types.h, we'll
85 * just define our own struct page type variant here.
86 */
87struct slob_page {
88 union {
89 struct {
90 unsigned long flags; /* mandatory */
91 atomic_t _count; /* mandatory */
92 slobidx_t units; /* free units left in page */
93 unsigned long pad[2];
94 slob_t *free; /* first free slob_t in page */
95 struct list_head list; /* linked list of free pages */
96 };
97 struct page page;
98 };
99};
100static inline void struct_slob_page_wrong_size(void)
101{ BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); }
102
103/*
104 * free_slob_page: call before a slob_page is returned to the page allocator.
105 */
106static inline void free_slob_page(struct slob_page *sp)
107{
108 reset_page_mapcount(&sp->page);
109 sp->page.mapping = NULL;
110}
111
112/*
113 * All (partially) free slob pages go on this list.
114 */
115static LIST_HEAD(free_slob_pages);
116
117/*
118 * slob_page: True for all slob pages (false for bigblock pages)
119 */
120static inline int slob_page(struct slob_page *sp)
121{
122 return test_bit(PG_active, &sp->flags);
123}
124
125static inline void set_slob_page(struct slob_page *sp)
126{
127 __set_bit(PG_active, &sp->flags);
128}
129
130static inline void clear_slob_page(struct slob_page *sp)
131{
132 __clear_bit(PG_active, &sp->flags);
133}
134
135/*
136 * slob_page_free: true for pages on free_slob_pages list.
137 */
138static inline int slob_page_free(struct slob_page *sp)
139{
140 return test_bit(PG_private, &sp->flags);
141}
142
143static inline void set_slob_page_free(struct slob_page *sp)
144{
145 list_add(&sp->list, &free_slob_pages);
146 __set_bit(PG_private, &sp->flags);
147}
148
149static inline void clear_slob_page_free(struct slob_page *sp)
150{
151 list_del(&sp->list);
152 __clear_bit(PG_private, &sp->flags);
153}
154
Matt Mackall10cef602006-01-08 01:01:45 -0800155#define SLOB_UNIT sizeof(slob_t)
156#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
157#define SLOB_ALIGN L1_CACHE_BYTES
158
Nick Pigginafc0ced2007-05-16 22:10:49 -0700159/*
160 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
161 * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
162 * the block using call_rcu.
163 */
164struct slob_rcu {
165 struct rcu_head head;
166 int size;
167};
168
Nick Piggin95b35122007-07-15 23:38:07 -0700169/*
170 * slob_lock protects all slob allocator structures.
171 */
Matt Mackall10cef602006-01-08 01:01:45 -0800172static DEFINE_SPINLOCK(slob_lock);
Matt Mackall10cef602006-01-08 01:01:45 -0800173
Nick Piggin95b35122007-07-15 23:38:07 -0700174/*
175 * Encode the given size and next info into a free slob block s.
176 */
177static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
178{
179 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
180 slobidx_t offset = next - base;
Dimitri Gorokhovikbcb4ddb2006-12-29 16:48:28 -0800181
Nick Piggin95b35122007-07-15 23:38:07 -0700182 if (size > 1) {
183 s[0].units = size;
184 s[1].units = offset;
185 } else
186 s[0].units = -offset;
187}
Matt Mackall10cef602006-01-08 01:01:45 -0800188
Nick Piggin95b35122007-07-15 23:38:07 -0700189/*
190 * Return the size of a slob block.
191 */
192static slobidx_t slob_units(slob_t *s)
193{
194 if (s->units > 0)
195 return s->units;
196 return 1;
197}
198
199/*
200 * Return the next free slob block pointer after this one.
201 */
202static slob_t *slob_next(slob_t *s)
203{
204 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
205 slobidx_t next;
206
207 if (s[0].units < 0)
208 next = -s[0].units;
209 else
210 next = s[1].units;
211 return base+next;
212}
213
214/*
215 * Returns true if s is the last free block in its page.
216 */
217static int slob_last(slob_t *s)
218{
219 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
220}
221
Paul Mundt6193a2f2007-07-15 23:38:22 -0700222static void *slob_new_page(gfp_t gfp, int order, int node)
223{
224 void *page;
225
226#ifdef CONFIG_NUMA
227 if (node != -1)
228 page = alloc_pages_node(node, gfp, order);
229 else
230#endif
231 page = alloc_pages(gfp, order);
232
233 if (!page)
234 return NULL;
235
236 return page_address(page);
237}
238
Nick Piggin95b35122007-07-15 23:38:07 -0700239/*
240 * Allocate a slob block within a given slob_page sp.
241 */
242static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
Matt Mackall10cef602006-01-08 01:01:45 -0800243{
244 slob_t *prev, *cur, *aligned = 0;
245 int delta = 0, units = SLOB_UNITS(size);
Matt Mackall10cef602006-01-08 01:01:45 -0800246
Nick Piggin95b35122007-07-15 23:38:07 -0700247 for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
248 slobidx_t avail = slob_units(cur);
249
Matt Mackall10cef602006-01-08 01:01:45 -0800250 if (align) {
251 aligned = (slob_t *)ALIGN((unsigned long)cur, align);
252 delta = aligned - cur;
253 }
Nick Piggin95b35122007-07-15 23:38:07 -0700254 if (avail >= units + delta) { /* room enough? */
255 slob_t *next;
256
Matt Mackall10cef602006-01-08 01:01:45 -0800257 if (delta) { /* need to fragment head to align? */
Nick Piggin95b35122007-07-15 23:38:07 -0700258 next = slob_next(cur);
259 set_slob(aligned, avail - delta, next);
260 set_slob(cur, delta, aligned);
Matt Mackall10cef602006-01-08 01:01:45 -0800261 prev = cur;
262 cur = aligned;
Nick Piggin95b35122007-07-15 23:38:07 -0700263 avail = slob_units(cur);
Matt Mackall10cef602006-01-08 01:01:45 -0800264 }
265
Nick Piggin95b35122007-07-15 23:38:07 -0700266 next = slob_next(cur);
267 if (avail == units) { /* exact fit? unlink. */
268 if (prev)
269 set_slob(prev, slob_units(prev), next);
270 else
271 sp->free = next;
272 } else { /* fragment */
273 if (prev)
274 set_slob(prev, slob_units(prev), cur + units);
275 else
276 sp->free = cur + units;
277 set_slob(cur + units, avail - units, next);
Matt Mackall10cef602006-01-08 01:01:45 -0800278 }
279
Nick Piggin95b35122007-07-15 23:38:07 -0700280 sp->units -= units;
281 if (!sp->units)
282 clear_slob_page_free(sp);
Matt Mackall10cef602006-01-08 01:01:45 -0800283 return cur;
284 }
Nick Piggin95b35122007-07-15 23:38:07 -0700285 if (slob_last(cur))
286 return NULL;
Matt Mackall10cef602006-01-08 01:01:45 -0800287 }
288}
289
Nick Piggin95b35122007-07-15 23:38:07 -0700290/*
291 * slob_alloc: entry point into the slob allocator.
292 */
Paul Mundt6193a2f2007-07-15 23:38:22 -0700293static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
Nick Piggin95b35122007-07-15 23:38:07 -0700294{
295 struct slob_page *sp;
296 slob_t *b = NULL;
297 unsigned long flags;
298
299 spin_lock_irqsave(&slob_lock, flags);
300 /* Iterate through each partially free page, try to find room */
301 list_for_each_entry(sp, &free_slob_pages, list) {
Paul Mundt6193a2f2007-07-15 23:38:22 -0700302#ifdef CONFIG_NUMA
303 /*
304 * If there's a node specification, search for a partial
305 * page with a matching node id in the freelist.
306 */
307 if (node != -1 && page_to_nid(&sp->page) != node)
308 continue;
309#endif
310
Nick Piggin95b35122007-07-15 23:38:07 -0700311 if (sp->units >= SLOB_UNITS(size)) {
312 b = slob_page_alloc(sp, size, align);
313 if (b)
314 break;
315 }
316 }
317 spin_unlock_irqrestore(&slob_lock, flags);
318
319 /* Not enough space: must allocate a new page */
320 if (!b) {
Paul Mundt6193a2f2007-07-15 23:38:22 -0700321 b = slob_new_page(gfp, 0, node);
Nick Piggin95b35122007-07-15 23:38:07 -0700322 if (!b)
323 return 0;
324 sp = (struct slob_page *)virt_to_page(b);
325 set_slob_page(sp);
326
327 spin_lock_irqsave(&slob_lock, flags);
328 sp->units = SLOB_UNITS(PAGE_SIZE);
329 sp->free = b;
330 INIT_LIST_HEAD(&sp->list);
331 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
332 set_slob_page_free(sp);
333 b = slob_page_alloc(sp, size, align);
334 BUG_ON(!b);
335 spin_unlock_irqrestore(&slob_lock, flags);
336 }
337 return b;
338}
339
340/*
341 * slob_free: entry point into the slob allocator.
342 */
Matt Mackall10cef602006-01-08 01:01:45 -0800343static void slob_free(void *block, int size)
344{
Nick Piggin95b35122007-07-15 23:38:07 -0700345 struct slob_page *sp;
346 slob_t *prev, *next, *b = (slob_t *)block;
347 slobidx_t units;
Matt Mackall10cef602006-01-08 01:01:45 -0800348 unsigned long flags;
349
350 if (!block)
351 return;
Nick Piggin95b35122007-07-15 23:38:07 -0700352 BUG_ON(!size);
Matt Mackall10cef602006-01-08 01:01:45 -0800353
Nick Piggin95b35122007-07-15 23:38:07 -0700354 sp = (struct slob_page *)virt_to_page(block);
355 units = SLOB_UNITS(size);
Matt Mackall10cef602006-01-08 01:01:45 -0800356
Matt Mackall10cef602006-01-08 01:01:45 -0800357 spin_lock_irqsave(&slob_lock, flags);
Matt Mackall10cef602006-01-08 01:01:45 -0800358
Nick Piggin95b35122007-07-15 23:38:07 -0700359 if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
360 /* Go directly to page allocator. Do not pass slob allocator */
361 if (slob_page_free(sp))
362 clear_slob_page_free(sp);
363 clear_slob_page(sp);
364 free_slob_page(sp);
365 free_page((unsigned long)b);
366 goto out;
367 }
Matt Mackall10cef602006-01-08 01:01:45 -0800368
Nick Piggin95b35122007-07-15 23:38:07 -0700369 if (!slob_page_free(sp)) {
370 /* This slob page is about to become partially free. Easy! */
371 sp->units = units;
372 sp->free = b;
373 set_slob(b, units,
374 (void *)((unsigned long)(b +
375 SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
376 set_slob_page_free(sp);
377 goto out;
378 }
Matt Mackall10cef602006-01-08 01:01:45 -0800379
Nick Piggin95b35122007-07-15 23:38:07 -0700380 /*
381 * Otherwise the page is already partially free, so find reinsertion
382 * point.
383 */
384 sp->units += units;
Matt Mackall10cef602006-01-08 01:01:45 -0800385
Nick Piggin95b35122007-07-15 23:38:07 -0700386 if (b < sp->free) {
387 set_slob(b, units, sp->free);
388 sp->free = b;
389 } else {
390 prev = sp->free;
391 next = slob_next(prev);
392 while (b > next) {
393 prev = next;
394 next = slob_next(prev);
395 }
396
397 if (!slob_last(prev) && b + units == next) {
398 units += slob_units(next);
399 set_slob(b, units, slob_next(next));
400 } else
401 set_slob(b, units, next);
402
403 if (prev + slob_units(prev) == b) {
404 units = slob_units(b) + slob_units(prev);
405 set_slob(prev, units, slob_next(b));
406 } else
407 set_slob(prev, slob_units(prev), b);
408 }
409out:
Matt Mackall10cef602006-01-08 01:01:45 -0800410 spin_unlock_irqrestore(&slob_lock, flags);
411}
412
Nick Piggin95b35122007-07-15 23:38:07 -0700413/*
414 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
415 */
416
Nick Piggin55394842007-07-15 23:38:09 -0700417#ifndef ARCH_KMALLOC_MINALIGN
418#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long)
419#endif
420
421#ifndef ARCH_SLAB_MINALIGN
422#define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
423#endif
424
Paul Mundt6193a2f2007-07-15 23:38:22 -0700425void *__kmalloc_node(size_t size, gfp_t gfp, int node)
Matt Mackall10cef602006-01-08 01:01:45 -0800426{
Nick Piggin55394842007-07-15 23:38:09 -0700427 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
428
429 if (size < PAGE_SIZE - align) {
430 unsigned int *m;
Paul Mundt6193a2f2007-07-15 23:38:22 -0700431 m = slob_alloc(size + align, gfp, align, node);
Nick Piggin95b35122007-07-15 23:38:07 -0700432 if (m)
Nick Piggin55394842007-07-15 23:38:09 -0700433 *m = size;
434 return (void *)m + align;
Nick Piggind87a1332007-07-15 23:38:08 -0700435 } else {
436 void *ret;
437
Paul Mundt6193a2f2007-07-15 23:38:22 -0700438 ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node);
Nick Piggind87a1332007-07-15 23:38:08 -0700439 if (ret) {
440 struct page *page;
441 page = virt_to_page(ret);
442 page->private = size;
443 }
444 return ret;
Matt Mackall10cef602006-01-08 01:01:45 -0800445 }
Matt Mackall10cef602006-01-08 01:01:45 -0800446}
Paul Mundt6193a2f2007-07-15 23:38:22 -0700447EXPORT_SYMBOL(__kmalloc_node);
Matt Mackall10cef602006-01-08 01:01:45 -0800448
Pekka Enbergfd76bab2007-05-06 14:48:40 -0700449/**
450 * krealloc - reallocate memory. The contents will remain unchanged.
451 *
452 * @p: object to reallocate memory for.
453 * @new_size: how many bytes of memory are required.
454 * @flags: the type of memory to allocate.
455 *
456 * The contents of the object pointed to are preserved up to the
457 * lesser of the new and old sizes. If @p is %NULL, krealloc()
458 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
459 * %NULL pointer, the object pointed to is freed.
460 */
461void *krealloc(const void *p, size_t new_size, gfp_t flags)
462{
463 void *ret;
464
465 if (unlikely(!p))
466 return kmalloc_track_caller(new_size, flags);
467
468 if (unlikely(!new_size)) {
469 kfree(p);
470 return NULL;
471 }
472
473 ret = kmalloc_track_caller(new_size, flags);
474 if (ret) {
475 memcpy(ret, p, min(new_size, ksize(p)));
476 kfree(p);
477 }
478 return ret;
479}
480EXPORT_SYMBOL(krealloc);
481
Matt Mackall10cef602006-01-08 01:01:45 -0800482void kfree(const void *block)
483{
Nick Piggin95b35122007-07-15 23:38:07 -0700484 struct slob_page *sp;
Matt Mackall10cef602006-01-08 01:01:45 -0800485
486 if (!block)
487 return;
488
Nick Piggin95b35122007-07-15 23:38:07 -0700489 sp = (struct slob_page *)virt_to_page(block);
Nick Piggind87a1332007-07-15 23:38:08 -0700490 if (slob_page(sp)) {
Nick Piggin55394842007-07-15 23:38:09 -0700491 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
492 unsigned int *m = (unsigned int *)(block - align);
493 slob_free(m, *m + align);
Nick Piggind87a1332007-07-15 23:38:08 -0700494 } else
495 put_page(&sp->page);
Matt Mackall10cef602006-01-08 01:01:45 -0800496}
Matt Mackall10cef602006-01-08 01:01:45 -0800497EXPORT_SYMBOL(kfree);
498
Nick Piggind87a1332007-07-15 23:38:08 -0700499/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
Pekka Enbergfd76bab2007-05-06 14:48:40 -0700500size_t ksize(const void *block)
Matt Mackall10cef602006-01-08 01:01:45 -0800501{
Nick Piggin95b35122007-07-15 23:38:07 -0700502 struct slob_page *sp;
Matt Mackall10cef602006-01-08 01:01:45 -0800503
504 if (!block)
505 return 0;
506
Nick Piggin95b35122007-07-15 23:38:07 -0700507 sp = (struct slob_page *)virt_to_page(block);
Nick Piggind87a1332007-07-15 23:38:08 -0700508 if (slob_page(sp))
509 return ((slob_t *)block - 1)->units + SLOB_UNIT;
510 else
511 return sp->page.private;
Matt Mackall10cef602006-01-08 01:01:45 -0800512}
513
514struct kmem_cache {
515 unsigned int size, align;
Nick Pigginafc0ced2007-05-16 22:10:49 -0700516 unsigned long flags;
Matt Mackall10cef602006-01-08 01:01:45 -0800517 const char *name;
518 void (*ctor)(void *, struct kmem_cache *, unsigned long);
Matt Mackall10cef602006-01-08 01:01:45 -0800519};
520
521struct kmem_cache *kmem_cache_create(const char *name, size_t size,
522 size_t align, unsigned long flags,
523 void (*ctor)(void*, struct kmem_cache *, unsigned long),
524 void (*dtor)(void*, struct kmem_cache *, unsigned long))
525{
526 struct kmem_cache *c;
527
Paul Mundt6193a2f2007-07-15 23:38:22 -0700528 c = slob_alloc(sizeof(struct kmem_cache), flags, 0, -1);
Matt Mackall10cef602006-01-08 01:01:45 -0800529
530 if (c) {
531 c->name = name;
532 c->size = size;
Nick Pigginafc0ced2007-05-16 22:10:49 -0700533 if (flags & SLAB_DESTROY_BY_RCU) {
Nick Pigginafc0ced2007-05-16 22:10:49 -0700534 /* leave room for rcu footer at the end of object */
535 c->size += sizeof(struct slob_rcu);
536 }
537 c->flags = flags;
Matt Mackall10cef602006-01-08 01:01:45 -0800538 c->ctor = ctor;
Matt Mackall10cef602006-01-08 01:01:45 -0800539 /* ignore alignment unless it's forced */
Christoph Lameter5af60832007-05-06 14:49:56 -0700540 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
Nick Piggin55394842007-07-15 23:38:09 -0700541 if (c->align < ARCH_SLAB_MINALIGN)
542 c->align = ARCH_SLAB_MINALIGN;
Matt Mackall10cef602006-01-08 01:01:45 -0800543 if (c->align < align)
544 c->align = align;
Akinobu Mitabc0055a2007-05-06 14:49:52 -0700545 } else if (flags & SLAB_PANIC)
546 panic("Cannot create slab cache %s\n", name);
Matt Mackall10cef602006-01-08 01:01:45 -0800547
548 return c;
549}
550EXPORT_SYMBOL(kmem_cache_create);
551
Alexey Dobriyan133d2052006-09-27 01:49:41 -0700552void kmem_cache_destroy(struct kmem_cache *c)
Matt Mackall10cef602006-01-08 01:01:45 -0800553{
554 slob_free(c, sizeof(struct kmem_cache));
Matt Mackall10cef602006-01-08 01:01:45 -0800555}
556EXPORT_SYMBOL(kmem_cache_destroy);
557
Paul Mundt6193a2f2007-07-15 23:38:22 -0700558void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
Matt Mackall10cef602006-01-08 01:01:45 -0800559{
560 void *b;
561
562 if (c->size < PAGE_SIZE)
Paul Mundt6193a2f2007-07-15 23:38:22 -0700563 b = slob_alloc(c->size, flags, c->align, node);
Matt Mackall10cef602006-01-08 01:01:45 -0800564 else
Paul Mundt6193a2f2007-07-15 23:38:22 -0700565 b = slob_new_page(flags, get_order(c->size), node);
Matt Mackall10cef602006-01-08 01:01:45 -0800566
567 if (c->ctor)
Christoph Lametera35afb82007-05-16 22:10:57 -0700568 c->ctor(b, c, 0);
Matt Mackall10cef602006-01-08 01:01:45 -0800569
570 return b;
571}
Paul Mundt6193a2f2007-07-15 23:38:22 -0700572EXPORT_SYMBOL(kmem_cache_alloc_node);
Matt Mackall10cef602006-01-08 01:01:45 -0800573
Pekka Enberga8c0f9a2006-03-25 03:06:42 -0800574void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags)
575{
576 void *ret = kmem_cache_alloc(c, flags);
577 if (ret)
578 memset(ret, 0, c->size);
579
580 return ret;
581}
582EXPORT_SYMBOL(kmem_cache_zalloc);
583
Nick Pigginafc0ced2007-05-16 22:10:49 -0700584static void __kmem_cache_free(void *b, int size)
585{
586 if (size < PAGE_SIZE)
587 slob_free(b, size);
588 else
589 free_pages((unsigned long)b, get_order(size));
590}
591
592static void kmem_rcu_free(struct rcu_head *head)
593{
594 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
595 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
596
597 __kmem_cache_free(b, slob_rcu->size);
598}
599
Matt Mackall10cef602006-01-08 01:01:45 -0800600void kmem_cache_free(struct kmem_cache *c, void *b)
601{
Nick Pigginafc0ced2007-05-16 22:10:49 -0700602 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
603 struct slob_rcu *slob_rcu;
604 slob_rcu = b + (c->size - sizeof(struct slob_rcu));
605 INIT_RCU_HEAD(&slob_rcu->head);
606 slob_rcu->size = c->size;
607 call_rcu(&slob_rcu->head, kmem_rcu_free);
608 } else {
Nick Pigginafc0ced2007-05-16 22:10:49 -0700609 __kmem_cache_free(b, c->size);
610 }
Matt Mackall10cef602006-01-08 01:01:45 -0800611}
612EXPORT_SYMBOL(kmem_cache_free);
613
614unsigned int kmem_cache_size(struct kmem_cache *c)
615{
616 return c->size;
617}
618EXPORT_SYMBOL(kmem_cache_size);
619
620const char *kmem_cache_name(struct kmem_cache *c)
621{
622 return c->name;
623}
624EXPORT_SYMBOL(kmem_cache_name);
625
Christoph Lameter2e892f42006-12-13 00:34:23 -0800626int kmem_cache_shrink(struct kmem_cache *d)
627{
628 return 0;
629}
630EXPORT_SYMBOL(kmem_cache_shrink);
631
Christoph Lameter55935a32006-12-13 00:34:24 -0800632int kmem_ptr_validate(struct kmem_cache *a, const void *b)
Christoph Lameter2e892f42006-12-13 00:34:23 -0800633{
634 return 0;
635}
636
Paul Mundt84a01c22007-07-15 23:38:24 -0700637static unsigned int slob_ready __read_mostly;
638
639int slab_is_available(void)
640{
641 return slob_ready;
642}
643
Dimitri Gorokhovikbcb4ddb2006-12-29 16:48:28 -0800644void __init kmem_cache_init(void)
645{
Paul Mundt84a01c22007-07-15 23:38:24 -0700646 slob_ready = 1;
Matt Mackall10cef602006-01-08 01:01:45 -0800647}