blob: 87e16c4d9143621f608f45f675b4be467add5961 [file] [log] [blame]
Matt Mackall10cef602006-01-08 01:01:45 -08001/*
2 * SLOB Allocator: Simple List Of Blocks
3 *
4 * Matt Mackall <mpm@selenic.com> 12/30/03
5 *
Paul Mundt6193a2f2007-07-15 23:38:22 -07006 * NUMA support by Paul Mundt, 2007.
7 *
Matt Mackall10cef602006-01-08 01:01:45 -08008 * How SLOB works:
9 *
10 * The core of SLOB is a traditional K&R style heap allocator, with
11 * support for returning aligned objects. The granularity of this
Nick Piggin55394842007-07-15 23:38:09 -070012 * allocator is as little as 2 bytes, however typically most architectures
13 * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
Nick Piggin95b35122007-07-15 23:38:07 -070014 *
Matt Mackall20cecba2008-02-04 22:29:37 -080015 * The slob heap is a set of linked list of pages from alloc_pages(),
16 * and within each page, there is a singly-linked list of free blocks
17 * (slob_t). The heap is grown on demand. To reduce fragmentation,
18 * heap pages are segregated into three lists, with objects less than
19 * 256 bytes, objects less than 1024 bytes, and all other objects.
20 *
21 * Allocation from heap involves first searching for a page with
22 * sufficient free blocks (using a next-fit-like approach) followed by
23 * a first-fit scan of the page. Deallocation inserts objects back
24 * into the free list in address order, so this is effectively an
25 * address-ordered first fit.
Matt Mackall10cef602006-01-08 01:01:45 -080026 *
27 * Above this is an implementation of kmalloc/kfree. Blocks returned
Nick Piggin55394842007-07-15 23:38:09 -070028 * from kmalloc are prepended with a 4-byte header with the kmalloc size.
Matt Mackall10cef602006-01-08 01:01:45 -080029 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
Paul Mundt6193a2f2007-07-15 23:38:22 -070030 * alloc_pages() directly, allocating compound pages so the page order
Ezequiel Garcia999d8792012-10-19 09:33:10 -030031 * does not have to be separately tracked.
32 * These objects are detected in kfree() because PageSlab()
Nick Piggind87a1332007-07-15 23:38:08 -070033 * is false for them.
Matt Mackall10cef602006-01-08 01:01:45 -080034 *
35 * SLAB is emulated on top of SLOB by simply calling constructors and
Nick Piggin95b35122007-07-15 23:38:07 -070036 * destructors for every SLAB allocation. Objects are returned with the
37 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
38 * case the low-level allocator will fragment blocks to create the proper
39 * alignment. Again, objects of page-size or greater are allocated by
Paul Mundt6193a2f2007-07-15 23:38:22 -070040 * calling alloc_pages(). As SLAB objects know their size, no separate
Nick Piggin95b35122007-07-15 23:38:07 -070041 * size bookkeeping is necessary and there is essentially no allocation
Nick Piggind87a1332007-07-15 23:38:08 -070042 * space overhead, and compound pages aren't needed for multi-page
43 * allocations.
Paul Mundt6193a2f2007-07-15 23:38:22 -070044 *
45 * NUMA support in SLOB is fairly simplistic, pushing most of the real
46 * logic down to the page allocator, and simply doing the node accounting
47 * on the upper levels. In the event that a node id is explicitly
Mel Gorman6484eb32009-06-16 15:31:54 -070048 * provided, alloc_pages_exact_node() with the specified node id is used
Paul Mundt6193a2f2007-07-15 23:38:22 -070049 * instead. The common case (or when the node id isn't explicitly provided)
50 * will default to the current node, as per numa_node_id().
51 *
52 * Node aware pages are still inserted in to the global freelist, and
53 * these are scanned for by matching against the node id encoded in the
54 * page flags. As a result, block allocations that can be satisfied from
55 * the freelist will only be done so on pages residing on the same node,
56 * in order to prevent random node placement.
Matt Mackall10cef602006-01-08 01:01:45 -080057 */
58
Nick Piggin95b35122007-07-15 23:38:07 -070059#include <linux/kernel.h>
Matt Mackall10cef602006-01-08 01:01:45 -080060#include <linux/slab.h>
Christoph Lameter97d06602012-07-06 15:25:11 -050061#include "slab.h"
62
Matt Mackall10cef602006-01-08 01:01:45 -080063#include <linux/mm.h>
Nick Piggin1f0532e2009-05-05 19:13:45 +100064#include <linux/swap.h> /* struct reclaim_state */
Matt Mackall10cef602006-01-08 01:01:45 -080065#include <linux/cache.h>
66#include <linux/init.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040067#include <linux/export.h>
Nick Pigginafc0ced2007-05-16 22:10:49 -070068#include <linux/rcupdate.h>
Nick Piggin95b35122007-07-15 23:38:07 -070069#include <linux/list.h>
Catalin Marinas4374e612009-06-11 13:23:17 +010070#include <linux/kmemleak.h>
Li Zefan039ca4e2010-05-26 17:22:17 +080071
72#include <trace/events/kmem.h>
73
Arun Sharma600634972011-07-26 16:09:06 -070074#include <linux/atomic.h>
Matt Mackall10cef602006-01-08 01:01:45 -080075
Nick Piggin95b35122007-07-15 23:38:07 -070076/*
77 * slob_block has a field 'units', which indicates size of block if +ve,
78 * or offset of next block if -ve (in SLOB_UNITs).
79 *
80 * Free blocks of size 1 unit simply contain the offset of the next block.
81 * Those with larger size contain their size in the first SLOB_UNIT of
82 * memory, and the offset of the next free block in the second SLOB_UNIT.
83 */
Nick Piggin55394842007-07-15 23:38:09 -070084#if PAGE_SIZE <= (32767 * 2)
Nick Piggin95b35122007-07-15 23:38:07 -070085typedef s16 slobidx_t;
86#else
87typedef s32 slobidx_t;
88#endif
89
Matt Mackall10cef602006-01-08 01:01:45 -080090struct slob_block {
Nick Piggin95b35122007-07-15 23:38:07 -070091 slobidx_t units;
Nick Piggin55394842007-07-15 23:38:09 -070092};
Matt Mackall10cef602006-01-08 01:01:45 -080093typedef struct slob_block slob_t;
94
Nick Piggin95b35122007-07-15 23:38:07 -070095/*
Matt Mackall20cecba2008-02-04 22:29:37 -080096 * All partially free slob pages go on these lists.
Nick Piggin95b35122007-07-15 23:38:07 -070097 */
Matt Mackall20cecba2008-02-04 22:29:37 -080098#define SLOB_BREAK1 256
99#define SLOB_BREAK2 1024
100static LIST_HEAD(free_slob_small);
101static LIST_HEAD(free_slob_medium);
102static LIST_HEAD(free_slob_large);
Nick Piggin95b35122007-07-15 23:38:07 -0700103
104/*
Nick Piggin95b35122007-07-15 23:38:07 -0700105 * slob_page_free: true for pages on free_slob_pages list.
106 */
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500107static inline int slob_page_free(struct page *sp)
Nick Piggin95b35122007-07-15 23:38:07 -0700108{
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500109 return PageSlobFree(sp);
Nick Piggin95b35122007-07-15 23:38:07 -0700110}
111
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500112static void set_slob_page_free(struct page *sp, struct list_head *list)
Nick Piggin95b35122007-07-15 23:38:07 -0700113{
Matt Mackall20cecba2008-02-04 22:29:37 -0800114 list_add(&sp->list, list);
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500115 __SetPageSlobFree(sp);
Nick Piggin95b35122007-07-15 23:38:07 -0700116}
117
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500118static inline void clear_slob_page_free(struct page *sp)
Nick Piggin95b35122007-07-15 23:38:07 -0700119{
120 list_del(&sp->list);
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500121 __ClearPageSlobFree(sp);
Nick Piggin95b35122007-07-15 23:38:07 -0700122}
123
Matt Mackall10cef602006-01-08 01:01:45 -0800124#define SLOB_UNIT sizeof(slob_t)
125#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
126#define SLOB_ALIGN L1_CACHE_BYTES
127
Nick Pigginafc0ced2007-05-16 22:10:49 -0700128/*
129 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
130 * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
131 * the block using call_rcu.
132 */
133struct slob_rcu {
134 struct rcu_head head;
135 int size;
136};
137
Nick Piggin95b35122007-07-15 23:38:07 -0700138/*
139 * slob_lock protects all slob allocator structures.
140 */
Matt Mackall10cef602006-01-08 01:01:45 -0800141static DEFINE_SPINLOCK(slob_lock);
Matt Mackall10cef602006-01-08 01:01:45 -0800142
Nick Piggin95b35122007-07-15 23:38:07 -0700143/*
144 * Encode the given size and next info into a free slob block s.
145 */
146static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
147{
148 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
149 slobidx_t offset = next - base;
Dimitri Gorokhovikbcb4ddb2006-12-29 16:48:28 -0800150
Nick Piggin95b35122007-07-15 23:38:07 -0700151 if (size > 1) {
152 s[0].units = size;
153 s[1].units = offset;
154 } else
155 s[0].units = -offset;
156}
Matt Mackall10cef602006-01-08 01:01:45 -0800157
Nick Piggin95b35122007-07-15 23:38:07 -0700158/*
159 * Return the size of a slob block.
160 */
161static slobidx_t slob_units(slob_t *s)
162{
163 if (s->units > 0)
164 return s->units;
165 return 1;
166}
167
168/*
169 * Return the next free slob block pointer after this one.
170 */
171static slob_t *slob_next(slob_t *s)
172{
173 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
174 slobidx_t next;
175
176 if (s[0].units < 0)
177 next = -s[0].units;
178 else
179 next = s[1].units;
180 return base+next;
181}
182
183/*
184 * Returns true if s is the last free block in its page.
185 */
186static int slob_last(slob_t *s)
187{
188 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
189}
190
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800191static void *slob_new_pages(gfp_t gfp, int order, int node)
Paul Mundt6193a2f2007-07-15 23:38:22 -0700192{
193 void *page;
194
195#ifdef CONFIG_NUMA
Ezequiel Garcia90f2cbb2012-09-08 17:47:51 -0300196 if (node != NUMA_NO_NODE)
Mel Gorman6484eb32009-06-16 15:31:54 -0700197 page = alloc_pages_exact_node(node, gfp, order);
Paul Mundt6193a2f2007-07-15 23:38:22 -0700198 else
199#endif
200 page = alloc_pages(gfp, order);
201
202 if (!page)
203 return NULL;
204
205 return page_address(page);
206}
207
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800208static void slob_free_pages(void *b, int order)
209{
Nick Piggin1f0532e2009-05-05 19:13:45 +1000210 if (current->reclaim_state)
211 current->reclaim_state->reclaimed_slab += 1 << order;
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800212 free_pages((unsigned long)b, order);
213}
214
Nick Piggin95b35122007-07-15 23:38:07 -0700215/*
216 * Allocate a slob block within a given slob_page sp.
217 */
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500218static void *slob_page_alloc(struct page *sp, size_t size, int align)
Matt Mackall10cef602006-01-08 01:01:45 -0800219{
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800220 slob_t *prev, *cur, *aligned = NULL;
Matt Mackall10cef602006-01-08 01:01:45 -0800221 int delta = 0, units = SLOB_UNITS(size);
Matt Mackall10cef602006-01-08 01:01:45 -0800222
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500223 for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
Nick Piggin95b35122007-07-15 23:38:07 -0700224 slobidx_t avail = slob_units(cur);
225
Matt Mackall10cef602006-01-08 01:01:45 -0800226 if (align) {
227 aligned = (slob_t *)ALIGN((unsigned long)cur, align);
228 delta = aligned - cur;
229 }
Nick Piggin95b35122007-07-15 23:38:07 -0700230 if (avail >= units + delta) { /* room enough? */
231 slob_t *next;
232
Matt Mackall10cef602006-01-08 01:01:45 -0800233 if (delta) { /* need to fragment head to align? */
Nick Piggin95b35122007-07-15 23:38:07 -0700234 next = slob_next(cur);
235 set_slob(aligned, avail - delta, next);
236 set_slob(cur, delta, aligned);
Matt Mackall10cef602006-01-08 01:01:45 -0800237 prev = cur;
238 cur = aligned;
Nick Piggin95b35122007-07-15 23:38:07 -0700239 avail = slob_units(cur);
Matt Mackall10cef602006-01-08 01:01:45 -0800240 }
241
Nick Piggin95b35122007-07-15 23:38:07 -0700242 next = slob_next(cur);
243 if (avail == units) { /* exact fit? unlink. */
244 if (prev)
245 set_slob(prev, slob_units(prev), next);
246 else
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500247 sp->freelist = next;
Nick Piggin95b35122007-07-15 23:38:07 -0700248 } else { /* fragment */
249 if (prev)
250 set_slob(prev, slob_units(prev), cur + units);
251 else
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500252 sp->freelist = cur + units;
Nick Piggin95b35122007-07-15 23:38:07 -0700253 set_slob(cur + units, avail - units, next);
Matt Mackall10cef602006-01-08 01:01:45 -0800254 }
255
Nick Piggin95b35122007-07-15 23:38:07 -0700256 sp->units -= units;
257 if (!sp->units)
258 clear_slob_page_free(sp);
Matt Mackall10cef602006-01-08 01:01:45 -0800259 return cur;
260 }
Nick Piggin95b35122007-07-15 23:38:07 -0700261 if (slob_last(cur))
262 return NULL;
Matt Mackall10cef602006-01-08 01:01:45 -0800263 }
264}
265
Nick Piggin95b35122007-07-15 23:38:07 -0700266/*
267 * slob_alloc: entry point into the slob allocator.
268 */
Paul Mundt6193a2f2007-07-15 23:38:22 -0700269static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
Nick Piggin95b35122007-07-15 23:38:07 -0700270{
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500271 struct page *sp;
Matt Mackalld6269542007-07-21 04:37:40 -0700272 struct list_head *prev;
Matt Mackall20cecba2008-02-04 22:29:37 -0800273 struct list_head *slob_list;
Nick Piggin95b35122007-07-15 23:38:07 -0700274 slob_t *b = NULL;
275 unsigned long flags;
276
Matt Mackall20cecba2008-02-04 22:29:37 -0800277 if (size < SLOB_BREAK1)
278 slob_list = &free_slob_small;
279 else if (size < SLOB_BREAK2)
280 slob_list = &free_slob_medium;
281 else
282 slob_list = &free_slob_large;
283
Nick Piggin95b35122007-07-15 23:38:07 -0700284 spin_lock_irqsave(&slob_lock, flags);
285 /* Iterate through each partially free page, try to find room */
Matt Mackall20cecba2008-02-04 22:29:37 -0800286 list_for_each_entry(sp, slob_list, list) {
Paul Mundt6193a2f2007-07-15 23:38:22 -0700287#ifdef CONFIG_NUMA
288 /*
289 * If there's a node specification, search for a partial
290 * page with a matching node id in the freelist.
291 */
Ezequiel Garcia90f2cbb2012-09-08 17:47:51 -0300292 if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
Paul Mundt6193a2f2007-07-15 23:38:22 -0700293 continue;
294#endif
Matt Mackalld6269542007-07-21 04:37:40 -0700295 /* Enough room on this page? */
296 if (sp->units < SLOB_UNITS(size))
297 continue;
Paul Mundt6193a2f2007-07-15 23:38:22 -0700298
Matt Mackalld6269542007-07-21 04:37:40 -0700299 /* Attempt to alloc */
300 prev = sp->list.prev;
301 b = slob_page_alloc(sp, size, align);
302 if (!b)
303 continue;
304
305 /* Improve fragment distribution and reduce our average
306 * search time by starting our next search here. (see
307 * Knuth vol 1, sec 2.5, pg 449) */
Matt Mackall20cecba2008-02-04 22:29:37 -0800308 if (prev != slob_list->prev &&
309 slob_list->next != prev->next)
310 list_move_tail(slob_list, prev->next);
Matt Mackalld6269542007-07-21 04:37:40 -0700311 break;
Nick Piggin95b35122007-07-15 23:38:07 -0700312 }
313 spin_unlock_irqrestore(&slob_lock, flags);
314
315 /* Not enough space: must allocate a new page */
316 if (!b) {
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800317 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
Nick Piggin95b35122007-07-15 23:38:07 -0700318 if (!b)
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800319 return NULL;
Christoph Lameterb5568282012-06-13 10:24:54 -0500320 sp = virt_to_page(b);
321 __SetPageSlab(sp);
Nick Piggin95b35122007-07-15 23:38:07 -0700322
323 spin_lock_irqsave(&slob_lock, flags);
324 sp->units = SLOB_UNITS(PAGE_SIZE);
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500325 sp->freelist = b;
Nick Piggin95b35122007-07-15 23:38:07 -0700326 INIT_LIST_HEAD(&sp->list);
327 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
Matt Mackall20cecba2008-02-04 22:29:37 -0800328 set_slob_page_free(sp, slob_list);
Nick Piggin95b35122007-07-15 23:38:07 -0700329 b = slob_page_alloc(sp, size, align);
330 BUG_ON(!b);
331 spin_unlock_irqrestore(&slob_lock, flags);
332 }
Christoph Lameterd07dbea2007-07-17 04:03:23 -0700333 if (unlikely((gfp & __GFP_ZERO) && b))
334 memset(b, 0, size);
Nick Piggin95b35122007-07-15 23:38:07 -0700335 return b;
336}
337
338/*
339 * slob_free: entry point into the slob allocator.
340 */
Matt Mackall10cef602006-01-08 01:01:45 -0800341static void slob_free(void *block, int size)
342{
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500343 struct page *sp;
Nick Piggin95b35122007-07-15 23:38:07 -0700344 slob_t *prev, *next, *b = (slob_t *)block;
345 slobidx_t units;
Matt Mackall10cef602006-01-08 01:01:45 -0800346 unsigned long flags;
Bob Liud602dab2010-07-10 18:05:33 +0800347 struct list_head *slob_list;
Matt Mackall10cef602006-01-08 01:01:45 -0800348
Satyam Sharma2408c552007-10-16 01:24:44 -0700349 if (unlikely(ZERO_OR_NULL_PTR(block)))
Matt Mackall10cef602006-01-08 01:01:45 -0800350 return;
Nick Piggin95b35122007-07-15 23:38:07 -0700351 BUG_ON(!size);
Matt Mackall10cef602006-01-08 01:01:45 -0800352
Christoph Lameterb5568282012-06-13 10:24:54 -0500353 sp = virt_to_page(block);
Nick Piggin95b35122007-07-15 23:38:07 -0700354 units = SLOB_UNITS(size);
Matt Mackall10cef602006-01-08 01:01:45 -0800355
Matt Mackall10cef602006-01-08 01:01:45 -0800356 spin_lock_irqsave(&slob_lock, flags);
Matt Mackall10cef602006-01-08 01:01:45 -0800357
Nick Piggin95b35122007-07-15 23:38:07 -0700358 if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
359 /* Go directly to page allocator. Do not pass slob allocator */
360 if (slob_page_free(sp))
361 clear_slob_page_free(sp);
Nick Piggin6fb8f422009-03-16 21:00:28 +1100362 spin_unlock_irqrestore(&slob_lock, flags);
Christoph Lameterb5568282012-06-13 10:24:54 -0500363 __ClearPageSlab(sp);
364 reset_page_mapcount(sp);
Nick Piggin1f0532e2009-05-05 19:13:45 +1000365 slob_free_pages(b, 0);
Nick Piggin6fb8f422009-03-16 21:00:28 +1100366 return;
Nick Piggin95b35122007-07-15 23:38:07 -0700367 }
Matt Mackall10cef602006-01-08 01:01:45 -0800368
Nick Piggin95b35122007-07-15 23:38:07 -0700369 if (!slob_page_free(sp)) {
370 /* This slob page is about to become partially free. Easy! */
371 sp->units = units;
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500372 sp->freelist = b;
Nick Piggin95b35122007-07-15 23:38:07 -0700373 set_slob(b, units,
374 (void *)((unsigned long)(b +
375 SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
Bob Liud602dab2010-07-10 18:05:33 +0800376 if (size < SLOB_BREAK1)
377 slob_list = &free_slob_small;
378 else if (size < SLOB_BREAK2)
379 slob_list = &free_slob_medium;
380 else
381 slob_list = &free_slob_large;
382 set_slob_page_free(sp, slob_list);
Nick Piggin95b35122007-07-15 23:38:07 -0700383 goto out;
384 }
Matt Mackall10cef602006-01-08 01:01:45 -0800385
Nick Piggin95b35122007-07-15 23:38:07 -0700386 /*
387 * Otherwise the page is already partially free, so find reinsertion
388 * point.
389 */
390 sp->units += units;
Matt Mackall10cef602006-01-08 01:01:45 -0800391
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500392 if (b < (slob_t *)sp->freelist) {
393 if (b + units == sp->freelist) {
394 units += slob_units(sp->freelist);
395 sp->freelist = slob_next(sp->freelist);
Matt Mackall679299b2008-02-04 22:29:37 -0800396 }
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500397 set_slob(b, units, sp->freelist);
398 sp->freelist = b;
Nick Piggin95b35122007-07-15 23:38:07 -0700399 } else {
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500400 prev = sp->freelist;
Nick Piggin95b35122007-07-15 23:38:07 -0700401 next = slob_next(prev);
402 while (b > next) {
403 prev = next;
404 next = slob_next(prev);
405 }
406
407 if (!slob_last(prev) && b + units == next) {
408 units += slob_units(next);
409 set_slob(b, units, slob_next(next));
410 } else
411 set_slob(b, units, next);
412
413 if (prev + slob_units(prev) == b) {
414 units = slob_units(b) + slob_units(prev);
415 set_slob(prev, units, slob_next(b));
416 } else
417 set_slob(prev, slob_units(prev), b);
418 }
419out:
Matt Mackall10cef602006-01-08 01:01:45 -0800420 spin_unlock_irqrestore(&slob_lock, flags);
421}
422
Nick Piggin95b35122007-07-15 23:38:07 -0700423/*
424 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
425 */
426
Ezequiel Garciaf3f74102012-09-08 17:47:53 -0300427static __always_inline void *
428__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
Matt Mackall10cef602006-01-08 01:01:45 -0800429{
Christoph Lameter6cb8f912007-07-17 04:03:22 -0700430 unsigned int *m;
Arnd Bergmann789306e2012-10-05 16:55:20 +0200431 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300432 void *ret;
Nick Piggin55394842007-07-15 23:38:09 -0700433
Steven Rostedtbd50cfa2011-06-07 07:18:45 -0400434 gfp &= gfp_allowed_mask;
435
Ingo Molnar19cefdf2009-03-15 06:03:11 +0100436 lockdep_trace_alloc(gfp);
Nick Piggincf40bd12009-01-21 08:12:39 +0100437
Nick Piggin55394842007-07-15 23:38:09 -0700438 if (size < PAGE_SIZE - align) {
Christoph Lameter6cb8f912007-07-17 04:03:22 -0700439 if (!size)
440 return ZERO_SIZE_PTR;
441
Paul Mundt6193a2f2007-07-15 23:38:22 -0700442 m = slob_alloc(size + align, gfp, align, node);
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300443
MinChan Kim239f49c2008-05-19 22:12:08 +0900444 if (!m)
445 return NULL;
446 *m = size;
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300447 ret = (void *)m + align;
Nick Piggind87a1332007-07-15 23:38:08 -0700448
Ezequiel Garciaf3f74102012-09-08 17:47:53 -0300449 trace_kmalloc_node(caller, ret,
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +0200450 size, size + align, gfp, node);
Nick Piggind87a1332007-07-15 23:38:08 -0700451 } else {
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300452 unsigned int order = get_order(size);
Nick Piggind87a1332007-07-15 23:38:08 -0700453
David Rientjes8df275a2010-08-22 16:16:06 -0700454 if (likely(order))
455 gfp |= __GFP_COMP;
456 ret = slob_new_pages(gfp, order, node);
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300457
Ezequiel Garciaf3f74102012-09-08 17:47:53 -0300458 trace_kmalloc_node(caller, ret,
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +0200459 size, PAGE_SIZE << order, gfp, node);
Matt Mackall10cef602006-01-08 01:01:45 -0800460 }
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300461
Catalin Marinas4374e612009-06-11 13:23:17 +0100462 kmemleak_alloc(ret, size, 1, gfp);
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300463 return ret;
Matt Mackall10cef602006-01-08 01:01:45 -0800464}
Ezequiel Garciaf3f74102012-09-08 17:47:53 -0300465
466void *__kmalloc_node(size_t size, gfp_t gfp, int node)
467{
468 return __do_kmalloc_node(size, gfp, node, _RET_IP_);
469}
Paul Mundt6193a2f2007-07-15 23:38:22 -0700470EXPORT_SYMBOL(__kmalloc_node);
Matt Mackall10cef602006-01-08 01:01:45 -0800471
Ezequiel Garciaf3f74102012-09-08 17:47:53 -0300472#ifdef CONFIG_TRACING
473void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
474{
475 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller);
476}
477
478#ifdef CONFIG_NUMA
David Rientjes82bd5502012-09-25 12:53:51 -0700479void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
Ezequiel Garciaf3f74102012-09-08 17:47:53 -0300480 int node, unsigned long caller)
481{
482 return __do_kmalloc_node(size, gfp, node, caller);
483}
484#endif
485#endif
486
Matt Mackall10cef602006-01-08 01:01:45 -0800487void kfree(const void *block)
488{
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500489 struct page *sp;
Matt Mackall10cef602006-01-08 01:01:45 -0800490
Pekka Enberg2121db72009-03-25 11:05:57 +0200491 trace_kfree(_RET_IP_, block);
492
Satyam Sharma2408c552007-10-16 01:24:44 -0700493 if (unlikely(ZERO_OR_NULL_PTR(block)))
Matt Mackall10cef602006-01-08 01:01:45 -0800494 return;
Catalin Marinas4374e612009-06-11 13:23:17 +0100495 kmemleak_free(block);
Matt Mackall10cef602006-01-08 01:01:45 -0800496
Christoph Lameterb5568282012-06-13 10:24:54 -0500497 sp = virt_to_page(block);
498 if (PageSlab(sp)) {
Arnd Bergmann789306e2012-10-05 16:55:20 +0200499 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
Nick Piggin55394842007-07-15 23:38:09 -0700500 unsigned int *m = (unsigned int *)(block - align);
501 slob_free(m, *m + align);
Nick Piggind87a1332007-07-15 23:38:08 -0700502 } else
Ezequiel Garcia8cf98642012-10-22 09:04:31 -0300503 __free_pages(sp, compound_order(sp));
Matt Mackall10cef602006-01-08 01:01:45 -0800504}
Matt Mackall10cef602006-01-08 01:01:45 -0800505EXPORT_SYMBOL(kfree);
506
Nick Piggind87a1332007-07-15 23:38:08 -0700507/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
Pekka Enbergfd76bab2007-05-06 14:48:40 -0700508size_t ksize(const void *block)
Matt Mackall10cef602006-01-08 01:01:45 -0800509{
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500510 struct page *sp;
Ezequiel Garcia999d8792012-10-19 09:33:10 -0300511 int align;
512 unsigned int *m;
Matt Mackall10cef602006-01-08 01:01:45 -0800513
Christoph Lameteref8b4522007-10-16 01:24:46 -0700514 BUG_ON(!block);
515 if (unlikely(block == ZERO_SIZE_PTR))
Matt Mackall10cef602006-01-08 01:01:45 -0800516 return 0;
517
Christoph Lameterb5568282012-06-13 10:24:54 -0500518 sp = virt_to_page(block);
Ezequiel Garcia999d8792012-10-19 09:33:10 -0300519 if (unlikely(!PageSlab(sp)))
520 return PAGE_SIZE << compound_order(sp);
521
Arnd Bergmann789306e2012-10-05 16:55:20 +0200522 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
Ezequiel Garcia999d8792012-10-19 09:33:10 -0300523 m = (unsigned int *)(block - align);
524 return SLOB_UNITS(*m) * SLOB_UNIT;
Matt Mackall10cef602006-01-08 01:01:45 -0800525}
Kirill A. Shutemovb1aabec2009-02-10 15:21:44 +0200526EXPORT_SYMBOL(ksize);
Matt Mackall10cef602006-01-08 01:01:45 -0800527
Christoph Lameter8a13a4c2012-09-04 23:18:33 +0000528int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
Matt Mackall10cef602006-01-08 01:01:45 -0800529{
Christoph Lameter8a13a4c2012-09-04 23:18:33 +0000530 size_t align = c->size;
Matt Mackall10cef602006-01-08 01:01:45 -0800531
Christoph Lameter278b1bb2012-09-05 00:20:34 +0000532 if (flags & SLAB_DESTROY_BY_RCU) {
533 /* leave room for rcu footer at the end of object */
534 c->size += sizeof(struct slob_rcu);
Christoph Lameter039363f2012-07-06 15:25:10 -0500535 }
Christoph Lameter278b1bb2012-09-05 00:20:34 +0000536 c->flags = flags;
Christoph Lameter278b1bb2012-09-05 00:20:34 +0000537 /* ignore alignment unless it's forced */
538 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
539 if (c->align < ARCH_SLAB_MINALIGN)
540 c->align = ARCH_SLAB_MINALIGN;
541 if (c->align < align)
542 c->align = align;
Matt Mackall10cef602006-01-08 01:01:45 -0800543
Christoph Lameter278b1bb2012-09-05 00:20:34 +0000544 return 0;
Matt Mackall10cef602006-01-08 01:01:45 -0800545}
Matt Mackall10cef602006-01-08 01:01:45 -0800546
Paul Mundt6193a2f2007-07-15 23:38:22 -0700547void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
Matt Mackall10cef602006-01-08 01:01:45 -0800548{
549 void *b;
550
Steven Rostedtbd50cfa2011-06-07 07:18:45 -0400551 flags &= gfp_allowed_mask;
552
553 lockdep_trace_alloc(flags);
554
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300555 if (c->size < PAGE_SIZE) {
Paul Mundt6193a2f2007-07-15 23:38:22 -0700556 b = slob_alloc(c->size, flags, c->align, node);
Ezequiel Garciafe74fe22012-10-19 09:33:11 -0300557 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +0200558 SLOB_UNITS(c->size) * SLOB_UNIT,
559 flags, node);
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300560 } else {
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800561 b = slob_new_pages(flags, get_order(c->size), node);
Ezequiel Garciafe74fe22012-10-19 09:33:11 -0300562 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +0200563 PAGE_SIZE << get_order(c->size),
564 flags, node);
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300565 }
Matt Mackall10cef602006-01-08 01:01:45 -0800566
567 if (c->ctor)
Alexey Dobriyan51cc5062008-07-25 19:45:34 -0700568 c->ctor(b);
Matt Mackall10cef602006-01-08 01:01:45 -0800569
Catalin Marinas4374e612009-06-11 13:23:17 +0100570 kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
Matt Mackall10cef602006-01-08 01:01:45 -0800571 return b;
572}
Paul Mundt6193a2f2007-07-15 23:38:22 -0700573EXPORT_SYMBOL(kmem_cache_alloc_node);
Matt Mackall10cef602006-01-08 01:01:45 -0800574
Nick Pigginafc0ced2007-05-16 22:10:49 -0700575static void __kmem_cache_free(void *b, int size)
576{
577 if (size < PAGE_SIZE)
578 slob_free(b, size);
579 else
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800580 slob_free_pages(b, get_order(size));
Nick Pigginafc0ced2007-05-16 22:10:49 -0700581}
582
583static void kmem_rcu_free(struct rcu_head *head)
584{
585 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
586 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
587
588 __kmem_cache_free(b, slob_rcu->size);
589}
590
Matt Mackall10cef602006-01-08 01:01:45 -0800591void kmem_cache_free(struct kmem_cache *c, void *b)
592{
Catalin Marinas4374e612009-06-11 13:23:17 +0100593 kmemleak_free_recursive(b, c->flags);
Nick Pigginafc0ced2007-05-16 22:10:49 -0700594 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
595 struct slob_rcu *slob_rcu;
596 slob_rcu = b + (c->size - sizeof(struct slob_rcu));
Nick Pigginafc0ced2007-05-16 22:10:49 -0700597 slob_rcu->size = c->size;
598 call_rcu(&slob_rcu->head, kmem_rcu_free);
599 } else {
Nick Pigginafc0ced2007-05-16 22:10:49 -0700600 __kmem_cache_free(b, c->size);
601 }
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300602
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +0200603 trace_kmem_cache_free(_RET_IP_, b);
Matt Mackall10cef602006-01-08 01:01:45 -0800604}
605EXPORT_SYMBOL(kmem_cache_free);
606
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000607int __kmem_cache_shutdown(struct kmem_cache *c)
608{
609 /* No way to check for remaining objects */
610 return 0;
611}
612
Christoph Lameter2e892f42006-12-13 00:34:23 -0800613int kmem_cache_shrink(struct kmem_cache *d)
614{
615 return 0;
616}
617EXPORT_SYMBOL(kmem_cache_shrink);
618
Christoph Lameter9b030cb2012-09-05 00:20:33 +0000619struct kmem_cache kmem_cache_boot = {
620 .name = "kmem_cache",
621 .size = sizeof(struct kmem_cache),
622 .flags = SLAB_PANIC,
623 .align = ARCH_KMALLOC_MINALIGN,
624};
625
Dimitri Gorokhovikbcb4ddb2006-12-29 16:48:28 -0800626void __init kmem_cache_init(void)
627{
Christoph Lameter9b030cb2012-09-05 00:20:33 +0000628 kmem_cache = &kmem_cache_boot;
Christoph Lameter97d06602012-07-06 15:25:11 -0500629 slab_state = UP;
Matt Mackall10cef602006-01-08 01:01:45 -0800630}
Wu Fengguangbbff2e42009-08-06 11:36:25 +0300631
632void __init kmem_cache_init_late(void)
633{
Christoph Lameter97d06602012-07-06 15:25:11 -0500634 slab_state = FULL;
Wu Fengguangbbff2e42009-08-06 11:36:25 +0300635}