blob: bf3918187165f79351f08daac748e17cc34acd5e [file] [log] [blame]
Matt Mackall10cef602006-01-08 01:01:45 -08001/*
2 * SLOB Allocator: Simple List Of Blocks
3 *
4 * Matt Mackall <mpm@selenic.com> 12/30/03
5 *
Paul Mundt6193a2f2007-07-15 23:38:22 -07006 * NUMA support by Paul Mundt, 2007.
7 *
Matt Mackall10cef602006-01-08 01:01:45 -08008 * How SLOB works:
9 *
10 * The core of SLOB is a traditional K&R style heap allocator, with
11 * support for returning aligned objects. The granularity of this
Nick Piggin55394842007-07-15 23:38:09 -070012 * allocator is as little as 2 bytes, however typically most architectures
13 * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
Nick Piggin95b35122007-07-15 23:38:07 -070014 *
Matt Mackall20cecba2008-02-04 22:29:37 -080015 * The slob heap is a set of linked list of pages from alloc_pages(),
16 * and within each page, there is a singly-linked list of free blocks
17 * (slob_t). The heap is grown on demand. To reduce fragmentation,
18 * heap pages are segregated into three lists, with objects less than
19 * 256 bytes, objects less than 1024 bytes, and all other objects.
20 *
21 * Allocation from heap involves first searching for a page with
22 * sufficient free blocks (using a next-fit-like approach) followed by
23 * a first-fit scan of the page. Deallocation inserts objects back
24 * into the free list in address order, so this is effectively an
25 * address-ordered first fit.
Matt Mackall10cef602006-01-08 01:01:45 -080026 *
27 * Above this is an implementation of kmalloc/kfree. Blocks returned
Nick Piggin55394842007-07-15 23:38:09 -070028 * from kmalloc are prepended with a 4-byte header with the kmalloc size.
Matt Mackall10cef602006-01-08 01:01:45 -080029 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
Paul Mundt6193a2f2007-07-15 23:38:22 -070030 * alloc_pages() directly, allocating compound pages so the page order
Nick Piggind87a1332007-07-15 23:38:08 -070031 * does not have to be separately tracked, and also stores the exact
32 * allocation size in page->private so that it can be used to accurately
33 * provide ksize(). These objects are detected in kfree() because slob_page()
34 * is false for them.
Matt Mackall10cef602006-01-08 01:01:45 -080035 *
36 * SLAB is emulated on top of SLOB by simply calling constructors and
Nick Piggin95b35122007-07-15 23:38:07 -070037 * destructors for every SLAB allocation. Objects are returned with the
38 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
39 * case the low-level allocator will fragment blocks to create the proper
40 * alignment. Again, objects of page-size or greater are allocated by
Paul Mundt6193a2f2007-07-15 23:38:22 -070041 * calling alloc_pages(). As SLAB objects know their size, no separate
Nick Piggin95b35122007-07-15 23:38:07 -070042 * size bookkeeping is necessary and there is essentially no allocation
Nick Piggind87a1332007-07-15 23:38:08 -070043 * space overhead, and compound pages aren't needed for multi-page
44 * allocations.
Paul Mundt6193a2f2007-07-15 23:38:22 -070045 *
46 * NUMA support in SLOB is fairly simplistic, pushing most of the real
47 * logic down to the page allocator, and simply doing the node accounting
48 * on the upper levels. In the event that a node id is explicitly
Mel Gorman6484eb32009-06-16 15:31:54 -070049 * provided, alloc_pages_exact_node() with the specified node id is used
Paul Mundt6193a2f2007-07-15 23:38:22 -070050 * instead. The common case (or when the node id isn't explicitly provided)
51 * will default to the current node, as per numa_node_id().
52 *
53 * Node aware pages are still inserted in to the global freelist, and
54 * these are scanned for by matching against the node id encoded in the
55 * page flags. As a result, block allocations that can be satisfied from
56 * the freelist will only be done so on pages residing on the same node,
57 * in order to prevent random node placement.
Matt Mackall10cef602006-01-08 01:01:45 -080058 */
59
Nick Piggin95b35122007-07-15 23:38:07 -070060#include <linux/kernel.h>
Matt Mackall10cef602006-01-08 01:01:45 -080061#include <linux/slab.h>
62#include <linux/mm.h>
Nick Piggin1f0532e2009-05-05 19:13:45 +100063#include <linux/swap.h> /* struct reclaim_state */
Matt Mackall10cef602006-01-08 01:01:45 -080064#include <linux/cache.h>
65#include <linux/init.h>
66#include <linux/module.h>
Nick Pigginafc0ced2007-05-16 22:10:49 -070067#include <linux/rcupdate.h>
Nick Piggin95b35122007-07-15 23:38:07 -070068#include <linux/list.h>
Catalin Marinas4374e612009-06-11 13:23:17 +010069#include <linux/kmemleak.h>
Li Zefan039ca4e2010-05-26 17:22:17 +080070
71#include <trace/events/kmem.h>
72
Arun Sharma600634972011-07-26 16:09:06 -070073#include <linux/atomic.h>
Matt Mackall10cef602006-01-08 01:01:45 -080074
Nick Piggin95b35122007-07-15 23:38:07 -070075/*
76 * slob_block has a field 'units', which indicates size of block if +ve,
77 * or offset of next block if -ve (in SLOB_UNITs).
78 *
79 * Free blocks of size 1 unit simply contain the offset of the next block.
80 * Those with larger size contain their size in the first SLOB_UNIT of
81 * memory, and the offset of the next free block in the second SLOB_UNIT.
82 */
Nick Piggin55394842007-07-15 23:38:09 -070083#if PAGE_SIZE <= (32767 * 2)
Nick Piggin95b35122007-07-15 23:38:07 -070084typedef s16 slobidx_t;
85#else
86typedef s32 slobidx_t;
87#endif
88
Matt Mackall10cef602006-01-08 01:01:45 -080089struct slob_block {
Nick Piggin95b35122007-07-15 23:38:07 -070090 slobidx_t units;
Nick Piggin55394842007-07-15 23:38:09 -070091};
Matt Mackall10cef602006-01-08 01:01:45 -080092typedef struct slob_block slob_t;
93
Nick Piggin95b35122007-07-15 23:38:07 -070094/*
95 * We use struct page fields to manage some slob allocation aspects,
96 * however to avoid the horrible mess in include/linux/mm_types.h, we'll
97 * just define our own struct page type variant here.
98 */
99struct slob_page {
100 union {
101 struct {
102 unsigned long flags; /* mandatory */
103 atomic_t _count; /* mandatory */
104 slobidx_t units; /* free units left in page */
105 unsigned long pad[2];
106 slob_t *free; /* first free slob_t in page */
107 struct list_head list; /* linked list of free pages */
108 };
109 struct page page;
110 };
111};
112static inline void struct_slob_page_wrong_size(void)
113{ BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); }
114
115/*
116 * free_slob_page: call before a slob_page is returned to the page allocator.
117 */
118static inline void free_slob_page(struct slob_page *sp)
119{
120 reset_page_mapcount(&sp->page);
121 sp->page.mapping = NULL;
122}
123
124/*
Matt Mackall20cecba2008-02-04 22:29:37 -0800125 * All partially free slob pages go on these lists.
Nick Piggin95b35122007-07-15 23:38:07 -0700126 */
Matt Mackall20cecba2008-02-04 22:29:37 -0800127#define SLOB_BREAK1 256
128#define SLOB_BREAK2 1024
129static LIST_HEAD(free_slob_small);
130static LIST_HEAD(free_slob_medium);
131static LIST_HEAD(free_slob_large);
Nick Piggin95b35122007-07-15 23:38:07 -0700132
133/*
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800134 * is_slob_page: True for all slob pages (false for bigblock pages)
Nick Piggin95b35122007-07-15 23:38:07 -0700135 */
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800136static inline int is_slob_page(struct slob_page *sp)
Nick Piggin95b35122007-07-15 23:38:07 -0700137{
Wu Fengguang7303f242009-05-11 09:59:34 +0300138 return PageSlab((struct page *)sp);
Nick Piggin95b35122007-07-15 23:38:07 -0700139}
140
141static inline void set_slob_page(struct slob_page *sp)
142{
Wu Fengguang7303f242009-05-11 09:59:34 +0300143 __SetPageSlab((struct page *)sp);
Nick Piggin95b35122007-07-15 23:38:07 -0700144}
145
146static inline void clear_slob_page(struct slob_page *sp)
147{
Wu Fengguang7303f242009-05-11 09:59:34 +0300148 __ClearPageSlab((struct page *)sp);
Nick Piggin95b35122007-07-15 23:38:07 -0700149}
150
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800151static inline struct slob_page *slob_page(const void *addr)
152{
153 return (struct slob_page *)virt_to_page(addr);
154}
155
Nick Piggin95b35122007-07-15 23:38:07 -0700156/*
157 * slob_page_free: true for pages on free_slob_pages list.
158 */
159static inline int slob_page_free(struct slob_page *sp)
160{
Andy Whitcroft9023cb72008-07-23 21:27:19 -0700161 return PageSlobFree((struct page *)sp);
Nick Piggin95b35122007-07-15 23:38:07 -0700162}
163
Matt Mackall20cecba2008-02-04 22:29:37 -0800164static void set_slob_page_free(struct slob_page *sp, struct list_head *list)
Nick Piggin95b35122007-07-15 23:38:07 -0700165{
Matt Mackall20cecba2008-02-04 22:29:37 -0800166 list_add(&sp->list, list);
Andy Whitcroft9023cb72008-07-23 21:27:19 -0700167 __SetPageSlobFree((struct page *)sp);
Nick Piggin95b35122007-07-15 23:38:07 -0700168}
169
170static inline void clear_slob_page_free(struct slob_page *sp)
171{
172 list_del(&sp->list);
Andy Whitcroft9023cb72008-07-23 21:27:19 -0700173 __ClearPageSlobFree((struct page *)sp);
Nick Piggin95b35122007-07-15 23:38:07 -0700174}
175
Matt Mackall10cef602006-01-08 01:01:45 -0800176#define SLOB_UNIT sizeof(slob_t)
177#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
178#define SLOB_ALIGN L1_CACHE_BYTES
179
Nick Pigginafc0ced2007-05-16 22:10:49 -0700180/*
181 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
182 * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
183 * the block using call_rcu.
184 */
185struct slob_rcu {
186 struct rcu_head head;
187 int size;
188};
189
Nick Piggin95b35122007-07-15 23:38:07 -0700190/*
191 * slob_lock protects all slob allocator structures.
192 */
Matt Mackall10cef602006-01-08 01:01:45 -0800193static DEFINE_SPINLOCK(slob_lock);
Matt Mackall10cef602006-01-08 01:01:45 -0800194
Nick Piggin95b35122007-07-15 23:38:07 -0700195/*
196 * Encode the given size and next info into a free slob block s.
197 */
198static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
199{
200 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
201 slobidx_t offset = next - base;
Dimitri Gorokhovikbcb4ddb2006-12-29 16:48:28 -0800202
Nick Piggin95b35122007-07-15 23:38:07 -0700203 if (size > 1) {
204 s[0].units = size;
205 s[1].units = offset;
206 } else
207 s[0].units = -offset;
208}
Matt Mackall10cef602006-01-08 01:01:45 -0800209
Nick Piggin95b35122007-07-15 23:38:07 -0700210/*
211 * Return the size of a slob block.
212 */
213static slobidx_t slob_units(slob_t *s)
214{
215 if (s->units > 0)
216 return s->units;
217 return 1;
218}
219
220/*
221 * Return the next free slob block pointer after this one.
222 */
223static slob_t *slob_next(slob_t *s)
224{
225 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
226 slobidx_t next;
227
228 if (s[0].units < 0)
229 next = -s[0].units;
230 else
231 next = s[1].units;
232 return base+next;
233}
234
235/*
236 * Returns true if s is the last free block in its page.
237 */
238static int slob_last(slob_t *s)
239{
240 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
241}
242
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800243static void *slob_new_pages(gfp_t gfp, int order, int node)
Paul Mundt6193a2f2007-07-15 23:38:22 -0700244{
245 void *page;
246
247#ifdef CONFIG_NUMA
248 if (node != -1)
Mel Gorman6484eb32009-06-16 15:31:54 -0700249 page = alloc_pages_exact_node(node, gfp, order);
Paul Mundt6193a2f2007-07-15 23:38:22 -0700250 else
251#endif
252 page = alloc_pages(gfp, order);
253
254 if (!page)
255 return NULL;
256
257 return page_address(page);
258}
259
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800260static void slob_free_pages(void *b, int order)
261{
Nick Piggin1f0532e2009-05-05 19:13:45 +1000262 if (current->reclaim_state)
263 current->reclaim_state->reclaimed_slab += 1 << order;
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800264 free_pages((unsigned long)b, order);
265}
266
Nick Piggin95b35122007-07-15 23:38:07 -0700267/*
268 * Allocate a slob block within a given slob_page sp.
269 */
270static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
Matt Mackall10cef602006-01-08 01:01:45 -0800271{
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800272 slob_t *prev, *cur, *aligned = NULL;
Matt Mackall10cef602006-01-08 01:01:45 -0800273 int delta = 0, units = SLOB_UNITS(size);
Matt Mackall10cef602006-01-08 01:01:45 -0800274
Nick Piggin95b35122007-07-15 23:38:07 -0700275 for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
276 slobidx_t avail = slob_units(cur);
277
Matt Mackall10cef602006-01-08 01:01:45 -0800278 if (align) {
279 aligned = (slob_t *)ALIGN((unsigned long)cur, align);
280 delta = aligned - cur;
281 }
Nick Piggin95b35122007-07-15 23:38:07 -0700282 if (avail >= units + delta) { /* room enough? */
283 slob_t *next;
284
Matt Mackall10cef602006-01-08 01:01:45 -0800285 if (delta) { /* need to fragment head to align? */
Nick Piggin95b35122007-07-15 23:38:07 -0700286 next = slob_next(cur);
287 set_slob(aligned, avail - delta, next);
288 set_slob(cur, delta, aligned);
Matt Mackall10cef602006-01-08 01:01:45 -0800289 prev = cur;
290 cur = aligned;
Nick Piggin95b35122007-07-15 23:38:07 -0700291 avail = slob_units(cur);
Matt Mackall10cef602006-01-08 01:01:45 -0800292 }
293
Nick Piggin95b35122007-07-15 23:38:07 -0700294 next = slob_next(cur);
295 if (avail == units) { /* exact fit? unlink. */
296 if (prev)
297 set_slob(prev, slob_units(prev), next);
298 else
299 sp->free = next;
300 } else { /* fragment */
301 if (prev)
302 set_slob(prev, slob_units(prev), cur + units);
303 else
304 sp->free = cur + units;
305 set_slob(cur + units, avail - units, next);
Matt Mackall10cef602006-01-08 01:01:45 -0800306 }
307
Nick Piggin95b35122007-07-15 23:38:07 -0700308 sp->units -= units;
309 if (!sp->units)
310 clear_slob_page_free(sp);
Matt Mackall10cef602006-01-08 01:01:45 -0800311 return cur;
312 }
Nick Piggin95b35122007-07-15 23:38:07 -0700313 if (slob_last(cur))
314 return NULL;
Matt Mackall10cef602006-01-08 01:01:45 -0800315 }
316}
317
Nick Piggin95b35122007-07-15 23:38:07 -0700318/*
319 * slob_alloc: entry point into the slob allocator.
320 */
Paul Mundt6193a2f2007-07-15 23:38:22 -0700321static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
Nick Piggin95b35122007-07-15 23:38:07 -0700322{
323 struct slob_page *sp;
Matt Mackalld6269542007-07-21 04:37:40 -0700324 struct list_head *prev;
Matt Mackall20cecba2008-02-04 22:29:37 -0800325 struct list_head *slob_list;
Nick Piggin95b35122007-07-15 23:38:07 -0700326 slob_t *b = NULL;
327 unsigned long flags;
328
Matt Mackall20cecba2008-02-04 22:29:37 -0800329 if (size < SLOB_BREAK1)
330 slob_list = &free_slob_small;
331 else if (size < SLOB_BREAK2)
332 slob_list = &free_slob_medium;
333 else
334 slob_list = &free_slob_large;
335
Nick Piggin95b35122007-07-15 23:38:07 -0700336 spin_lock_irqsave(&slob_lock, flags);
337 /* Iterate through each partially free page, try to find room */
Matt Mackall20cecba2008-02-04 22:29:37 -0800338 list_for_each_entry(sp, slob_list, list) {
Paul Mundt6193a2f2007-07-15 23:38:22 -0700339#ifdef CONFIG_NUMA
340 /*
341 * If there's a node specification, search for a partial
342 * page with a matching node id in the freelist.
343 */
344 if (node != -1 && page_to_nid(&sp->page) != node)
345 continue;
346#endif
Matt Mackalld6269542007-07-21 04:37:40 -0700347 /* Enough room on this page? */
348 if (sp->units < SLOB_UNITS(size))
349 continue;
Paul Mundt6193a2f2007-07-15 23:38:22 -0700350
Matt Mackalld6269542007-07-21 04:37:40 -0700351 /* Attempt to alloc */
352 prev = sp->list.prev;
353 b = slob_page_alloc(sp, size, align);
354 if (!b)
355 continue;
356
357 /* Improve fragment distribution and reduce our average
358 * search time by starting our next search here. (see
359 * Knuth vol 1, sec 2.5, pg 449) */
Matt Mackall20cecba2008-02-04 22:29:37 -0800360 if (prev != slob_list->prev &&
361 slob_list->next != prev->next)
362 list_move_tail(slob_list, prev->next);
Matt Mackalld6269542007-07-21 04:37:40 -0700363 break;
Nick Piggin95b35122007-07-15 23:38:07 -0700364 }
365 spin_unlock_irqrestore(&slob_lock, flags);
366
367 /* Not enough space: must allocate a new page */
368 if (!b) {
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800369 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
Nick Piggin95b35122007-07-15 23:38:07 -0700370 if (!b)
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800371 return NULL;
372 sp = slob_page(b);
Nick Piggin95b35122007-07-15 23:38:07 -0700373 set_slob_page(sp);
374
375 spin_lock_irqsave(&slob_lock, flags);
376 sp->units = SLOB_UNITS(PAGE_SIZE);
377 sp->free = b;
378 INIT_LIST_HEAD(&sp->list);
379 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
Matt Mackall20cecba2008-02-04 22:29:37 -0800380 set_slob_page_free(sp, slob_list);
Nick Piggin95b35122007-07-15 23:38:07 -0700381 b = slob_page_alloc(sp, size, align);
382 BUG_ON(!b);
383 spin_unlock_irqrestore(&slob_lock, flags);
384 }
Christoph Lameterd07dbea2007-07-17 04:03:23 -0700385 if (unlikely((gfp & __GFP_ZERO) && b))
386 memset(b, 0, size);
Nick Piggin95b35122007-07-15 23:38:07 -0700387 return b;
388}
389
390/*
391 * slob_free: entry point into the slob allocator.
392 */
Matt Mackall10cef602006-01-08 01:01:45 -0800393static void slob_free(void *block, int size)
394{
Nick Piggin95b35122007-07-15 23:38:07 -0700395 struct slob_page *sp;
396 slob_t *prev, *next, *b = (slob_t *)block;
397 slobidx_t units;
Matt Mackall10cef602006-01-08 01:01:45 -0800398 unsigned long flags;
Bob Liud602dab2010-07-10 18:05:33 +0800399 struct list_head *slob_list;
Matt Mackall10cef602006-01-08 01:01:45 -0800400
Satyam Sharma2408c552007-10-16 01:24:44 -0700401 if (unlikely(ZERO_OR_NULL_PTR(block)))
Matt Mackall10cef602006-01-08 01:01:45 -0800402 return;
Nick Piggin95b35122007-07-15 23:38:07 -0700403 BUG_ON(!size);
Matt Mackall10cef602006-01-08 01:01:45 -0800404
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800405 sp = slob_page(block);
Nick Piggin95b35122007-07-15 23:38:07 -0700406 units = SLOB_UNITS(size);
Matt Mackall10cef602006-01-08 01:01:45 -0800407
Matt Mackall10cef602006-01-08 01:01:45 -0800408 spin_lock_irqsave(&slob_lock, flags);
Matt Mackall10cef602006-01-08 01:01:45 -0800409
Nick Piggin95b35122007-07-15 23:38:07 -0700410 if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
411 /* Go directly to page allocator. Do not pass slob allocator */
412 if (slob_page_free(sp))
413 clear_slob_page_free(sp);
Nick Piggin6fb8f422009-03-16 21:00:28 +1100414 spin_unlock_irqrestore(&slob_lock, flags);
Nick Piggin95b35122007-07-15 23:38:07 -0700415 clear_slob_page(sp);
416 free_slob_page(sp);
Nick Piggin1f0532e2009-05-05 19:13:45 +1000417 slob_free_pages(b, 0);
Nick Piggin6fb8f422009-03-16 21:00:28 +1100418 return;
Nick Piggin95b35122007-07-15 23:38:07 -0700419 }
Matt Mackall10cef602006-01-08 01:01:45 -0800420
Nick Piggin95b35122007-07-15 23:38:07 -0700421 if (!slob_page_free(sp)) {
422 /* This slob page is about to become partially free. Easy! */
423 sp->units = units;
424 sp->free = b;
425 set_slob(b, units,
426 (void *)((unsigned long)(b +
427 SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
Bob Liud602dab2010-07-10 18:05:33 +0800428 if (size < SLOB_BREAK1)
429 slob_list = &free_slob_small;
430 else if (size < SLOB_BREAK2)
431 slob_list = &free_slob_medium;
432 else
433 slob_list = &free_slob_large;
434 set_slob_page_free(sp, slob_list);
Nick Piggin95b35122007-07-15 23:38:07 -0700435 goto out;
436 }
Matt Mackall10cef602006-01-08 01:01:45 -0800437
Nick Piggin95b35122007-07-15 23:38:07 -0700438 /*
439 * Otherwise the page is already partially free, so find reinsertion
440 * point.
441 */
442 sp->units += units;
Matt Mackall10cef602006-01-08 01:01:45 -0800443
Nick Piggin95b35122007-07-15 23:38:07 -0700444 if (b < sp->free) {
Matt Mackall679299b2008-02-04 22:29:37 -0800445 if (b + units == sp->free) {
446 units += slob_units(sp->free);
447 sp->free = slob_next(sp->free);
448 }
Nick Piggin95b35122007-07-15 23:38:07 -0700449 set_slob(b, units, sp->free);
450 sp->free = b;
451 } else {
452 prev = sp->free;
453 next = slob_next(prev);
454 while (b > next) {
455 prev = next;
456 next = slob_next(prev);
457 }
458
459 if (!slob_last(prev) && b + units == next) {
460 units += slob_units(next);
461 set_slob(b, units, slob_next(next));
462 } else
463 set_slob(b, units, next);
464
465 if (prev + slob_units(prev) == b) {
466 units = slob_units(b) + slob_units(prev);
467 set_slob(prev, units, slob_next(b));
468 } else
469 set_slob(prev, slob_units(prev), b);
470 }
471out:
Matt Mackall10cef602006-01-08 01:01:45 -0800472 spin_unlock_irqrestore(&slob_lock, flags);
473}
474
Nick Piggin95b35122007-07-15 23:38:07 -0700475/*
476 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
477 */
478
Paul Mundt6193a2f2007-07-15 23:38:22 -0700479void *__kmalloc_node(size_t size, gfp_t gfp, int node)
Matt Mackall10cef602006-01-08 01:01:45 -0800480{
Christoph Lameter6cb8f912007-07-17 04:03:22 -0700481 unsigned int *m;
Nick Piggin55394842007-07-15 23:38:09 -0700482 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300483 void *ret;
Nick Piggin55394842007-07-15 23:38:09 -0700484
Steven Rostedtbd50cfa2011-06-07 07:18:45 -0400485 gfp &= gfp_allowed_mask;
486
Ingo Molnar19cefdf2009-03-15 06:03:11 +0100487 lockdep_trace_alloc(gfp);
Nick Piggincf40bd12009-01-21 08:12:39 +0100488
Nick Piggin55394842007-07-15 23:38:09 -0700489 if (size < PAGE_SIZE - align) {
Christoph Lameter6cb8f912007-07-17 04:03:22 -0700490 if (!size)
491 return ZERO_SIZE_PTR;
492
Paul Mundt6193a2f2007-07-15 23:38:22 -0700493 m = slob_alloc(size + align, gfp, align, node);
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300494
MinChan Kim239f49c2008-05-19 22:12:08 +0900495 if (!m)
496 return NULL;
497 *m = size;
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300498 ret = (void *)m + align;
Nick Piggind87a1332007-07-15 23:38:08 -0700499
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +0200500 trace_kmalloc_node(_RET_IP_, ret,
501 size, size + align, gfp, node);
Nick Piggind87a1332007-07-15 23:38:08 -0700502 } else {
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300503 unsigned int order = get_order(size);
Nick Piggind87a1332007-07-15 23:38:08 -0700504
David Rientjes8df275a2010-08-22 16:16:06 -0700505 if (likely(order))
506 gfp |= __GFP_COMP;
507 ret = slob_new_pages(gfp, order, node);
Nick Piggind87a1332007-07-15 23:38:08 -0700508 if (ret) {
509 struct page *page;
510 page = virt_to_page(ret);
511 page->private = size;
512 }
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300513
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +0200514 trace_kmalloc_node(_RET_IP_, ret,
515 size, PAGE_SIZE << order, gfp, node);
Matt Mackall10cef602006-01-08 01:01:45 -0800516 }
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300517
Catalin Marinas4374e612009-06-11 13:23:17 +0100518 kmemleak_alloc(ret, size, 1, gfp);
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300519 return ret;
Matt Mackall10cef602006-01-08 01:01:45 -0800520}
Paul Mundt6193a2f2007-07-15 23:38:22 -0700521EXPORT_SYMBOL(__kmalloc_node);
Matt Mackall10cef602006-01-08 01:01:45 -0800522
523void kfree(const void *block)
524{
Nick Piggin95b35122007-07-15 23:38:07 -0700525 struct slob_page *sp;
Matt Mackall10cef602006-01-08 01:01:45 -0800526
Pekka Enberg2121db72009-03-25 11:05:57 +0200527 trace_kfree(_RET_IP_, block);
528
Satyam Sharma2408c552007-10-16 01:24:44 -0700529 if (unlikely(ZERO_OR_NULL_PTR(block)))
Matt Mackall10cef602006-01-08 01:01:45 -0800530 return;
Catalin Marinas4374e612009-06-11 13:23:17 +0100531 kmemleak_free(block);
Matt Mackall10cef602006-01-08 01:01:45 -0800532
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800533 sp = slob_page(block);
534 if (is_slob_page(sp)) {
Nick Piggin55394842007-07-15 23:38:09 -0700535 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
536 unsigned int *m = (unsigned int *)(block - align);
537 slob_free(m, *m + align);
Nick Piggind87a1332007-07-15 23:38:08 -0700538 } else
539 put_page(&sp->page);
Matt Mackall10cef602006-01-08 01:01:45 -0800540}
Matt Mackall10cef602006-01-08 01:01:45 -0800541EXPORT_SYMBOL(kfree);
542
Nick Piggind87a1332007-07-15 23:38:08 -0700543/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
Pekka Enbergfd76bab2007-05-06 14:48:40 -0700544size_t ksize(const void *block)
Matt Mackall10cef602006-01-08 01:01:45 -0800545{
Nick Piggin95b35122007-07-15 23:38:07 -0700546 struct slob_page *sp;
Matt Mackall10cef602006-01-08 01:01:45 -0800547
Christoph Lameteref8b4522007-10-16 01:24:46 -0700548 BUG_ON(!block);
549 if (unlikely(block == ZERO_SIZE_PTR))
Matt Mackall10cef602006-01-08 01:01:45 -0800550 return 0;
551
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800552 sp = slob_page(block);
553 if (is_slob_page(sp)) {
Matt Mackall70096a52008-10-08 14:51:57 -0500554 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
555 unsigned int *m = (unsigned int *)(block - align);
556 return SLOB_UNITS(*m) * SLOB_UNIT;
557 } else
Nick Piggind87a1332007-07-15 23:38:08 -0700558 return sp->page.private;
Matt Mackall10cef602006-01-08 01:01:45 -0800559}
Kirill A. Shutemovb1aabec2009-02-10 15:21:44 +0200560EXPORT_SYMBOL(ksize);
Matt Mackall10cef602006-01-08 01:01:45 -0800561
562struct kmem_cache {
563 unsigned int size, align;
Nick Pigginafc0ced2007-05-16 22:10:49 -0700564 unsigned long flags;
Matt Mackall10cef602006-01-08 01:01:45 -0800565 const char *name;
Alexey Dobriyan51cc5062008-07-25 19:45:34 -0700566 void (*ctor)(void *);
Matt Mackall10cef602006-01-08 01:01:45 -0800567};
568
569struct kmem_cache *kmem_cache_create(const char *name, size_t size,
Alexey Dobriyan51cc5062008-07-25 19:45:34 -0700570 size_t align, unsigned long flags, void (*ctor)(void *))
Matt Mackall10cef602006-01-08 01:01:45 -0800571{
572 struct kmem_cache *c;
573
Yi Li0701a9e2008-04-25 19:49:21 +0300574 c = slob_alloc(sizeof(struct kmem_cache),
Catalin Marinas5e18e2b2008-12-15 13:54:16 -0800575 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
Matt Mackall10cef602006-01-08 01:01:45 -0800576
577 if (c) {
578 c->name = name;
579 c->size = size;
Nick Pigginafc0ced2007-05-16 22:10:49 -0700580 if (flags & SLAB_DESTROY_BY_RCU) {
Nick Pigginafc0ced2007-05-16 22:10:49 -0700581 /* leave room for rcu footer at the end of object */
582 c->size += sizeof(struct slob_rcu);
583 }
584 c->flags = flags;
Matt Mackall10cef602006-01-08 01:01:45 -0800585 c->ctor = ctor;
Matt Mackall10cef602006-01-08 01:01:45 -0800586 /* ignore alignment unless it's forced */
Christoph Lameter5af60832007-05-06 14:49:56 -0700587 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
Nick Piggin55394842007-07-15 23:38:09 -0700588 if (c->align < ARCH_SLAB_MINALIGN)
589 c->align = ARCH_SLAB_MINALIGN;
Matt Mackall10cef602006-01-08 01:01:45 -0800590 if (c->align < align)
591 c->align = align;
Akinobu Mitabc0055a2007-05-06 14:49:52 -0700592 } else if (flags & SLAB_PANIC)
593 panic("Cannot create slab cache %s\n", name);
Matt Mackall10cef602006-01-08 01:01:45 -0800594
Catalin Marinas4374e612009-06-11 13:23:17 +0100595 kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
Matt Mackall10cef602006-01-08 01:01:45 -0800596 return c;
597}
598EXPORT_SYMBOL(kmem_cache_create);
599
Alexey Dobriyan133d2052006-09-27 01:49:41 -0700600void kmem_cache_destroy(struct kmem_cache *c)
Matt Mackall10cef602006-01-08 01:01:45 -0800601{
Catalin Marinas4374e612009-06-11 13:23:17 +0100602 kmemleak_free(c);
Paul E. McKenney7ed9f7e2009-06-25 12:31:37 -0700603 if (c->flags & SLAB_DESTROY_BY_RCU)
604 rcu_barrier();
Matt Mackall10cef602006-01-08 01:01:45 -0800605 slob_free(c, sizeof(struct kmem_cache));
Matt Mackall10cef602006-01-08 01:01:45 -0800606}
607EXPORT_SYMBOL(kmem_cache_destroy);
608
Paul Mundt6193a2f2007-07-15 23:38:22 -0700609void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
Matt Mackall10cef602006-01-08 01:01:45 -0800610{
611 void *b;
612
Steven Rostedtbd50cfa2011-06-07 07:18:45 -0400613 flags &= gfp_allowed_mask;
614
615 lockdep_trace_alloc(flags);
616
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300617 if (c->size < PAGE_SIZE) {
Paul Mundt6193a2f2007-07-15 23:38:22 -0700618 b = slob_alloc(c->size, flags, c->align, node);
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +0200619 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
620 SLOB_UNITS(c->size) * SLOB_UNIT,
621 flags, node);
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300622 } else {
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800623 b = slob_new_pages(flags, get_order(c->size), node);
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +0200624 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
625 PAGE_SIZE << get_order(c->size),
626 flags, node);
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300627 }
Matt Mackall10cef602006-01-08 01:01:45 -0800628
629 if (c->ctor)
Alexey Dobriyan51cc5062008-07-25 19:45:34 -0700630 c->ctor(b);
Matt Mackall10cef602006-01-08 01:01:45 -0800631
Catalin Marinas4374e612009-06-11 13:23:17 +0100632 kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
Matt Mackall10cef602006-01-08 01:01:45 -0800633 return b;
634}
Paul Mundt6193a2f2007-07-15 23:38:22 -0700635EXPORT_SYMBOL(kmem_cache_alloc_node);
Matt Mackall10cef602006-01-08 01:01:45 -0800636
Nick Pigginafc0ced2007-05-16 22:10:49 -0700637static void __kmem_cache_free(void *b, int size)
638{
639 if (size < PAGE_SIZE)
640 slob_free(b, size);
641 else
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800642 slob_free_pages(b, get_order(size));
Nick Pigginafc0ced2007-05-16 22:10:49 -0700643}
644
645static void kmem_rcu_free(struct rcu_head *head)
646{
647 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
648 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
649
650 __kmem_cache_free(b, slob_rcu->size);
651}
652
Matt Mackall10cef602006-01-08 01:01:45 -0800653void kmem_cache_free(struct kmem_cache *c, void *b)
654{
Catalin Marinas4374e612009-06-11 13:23:17 +0100655 kmemleak_free_recursive(b, c->flags);
Nick Pigginafc0ced2007-05-16 22:10:49 -0700656 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
657 struct slob_rcu *slob_rcu;
658 slob_rcu = b + (c->size - sizeof(struct slob_rcu));
Nick Pigginafc0ced2007-05-16 22:10:49 -0700659 slob_rcu->size = c->size;
660 call_rcu(&slob_rcu->head, kmem_rcu_free);
661 } else {
Nick Pigginafc0ced2007-05-16 22:10:49 -0700662 __kmem_cache_free(b, c->size);
663 }
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300664
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +0200665 trace_kmem_cache_free(_RET_IP_, b);
Matt Mackall10cef602006-01-08 01:01:45 -0800666}
667EXPORT_SYMBOL(kmem_cache_free);
668
669unsigned int kmem_cache_size(struct kmem_cache *c)
670{
671 return c->size;
672}
673EXPORT_SYMBOL(kmem_cache_size);
674
Christoph Lameter2e892f42006-12-13 00:34:23 -0800675int kmem_cache_shrink(struct kmem_cache *d)
676{
677 return 0;
678}
679EXPORT_SYMBOL(kmem_cache_shrink);
680
Paul Mundt84a01c22007-07-15 23:38:24 -0700681static unsigned int slob_ready __read_mostly;
682
683int slab_is_available(void)
684{
685 return slob_ready;
686}
687
Dimitri Gorokhovikbcb4ddb2006-12-29 16:48:28 -0800688void __init kmem_cache_init(void)
689{
Paul Mundt84a01c22007-07-15 23:38:24 -0700690 slob_ready = 1;
Matt Mackall10cef602006-01-08 01:01:45 -0800691}
Wu Fengguangbbff2e42009-08-06 11:36:25 +0300692
693void __init kmem_cache_init_late(void)
694{
695 /* Nothing to do */
696}