blob: a8bd6fa11a66fae671132d2a17eae201751a5093 [file] [log] [blame]
Matt Mackall10cef602006-01-08 01:01:45 -08001/*
2 * SLOB Allocator: Simple List Of Blocks
3 *
4 * Matt Mackall <mpm@selenic.com> 12/30/03
5 *
Paul Mundt6193a2f2007-07-15 23:38:22 -07006 * NUMA support by Paul Mundt, 2007.
7 *
Matt Mackall10cef602006-01-08 01:01:45 -08008 * How SLOB works:
9 *
10 * The core of SLOB is a traditional K&R style heap allocator, with
11 * support for returning aligned objects. The granularity of this
Nick Piggin55394842007-07-15 23:38:09 -070012 * allocator is as little as 2 bytes, however typically most architectures
13 * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
Nick Piggin95b35122007-07-15 23:38:07 -070014 *
Matt Mackall20cecba2008-02-04 22:29:37 -080015 * The slob heap is a set of linked list of pages from alloc_pages(),
16 * and within each page, there is a singly-linked list of free blocks
17 * (slob_t). The heap is grown on demand. To reduce fragmentation,
18 * heap pages are segregated into three lists, with objects less than
19 * 256 bytes, objects less than 1024 bytes, and all other objects.
20 *
21 * Allocation from heap involves first searching for a page with
22 * sufficient free blocks (using a next-fit-like approach) followed by
23 * a first-fit scan of the page. Deallocation inserts objects back
24 * into the free list in address order, so this is effectively an
25 * address-ordered first fit.
Matt Mackall10cef602006-01-08 01:01:45 -080026 *
27 * Above this is an implementation of kmalloc/kfree. Blocks returned
Nick Piggin55394842007-07-15 23:38:09 -070028 * from kmalloc are prepended with a 4-byte header with the kmalloc size.
Matt Mackall10cef602006-01-08 01:01:45 -080029 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
Paul Mundt6193a2f2007-07-15 23:38:22 -070030 * alloc_pages() directly, allocating compound pages so the page order
Ezequiel Garcia999d8792012-10-19 09:33:10 -030031 * does not have to be separately tracked.
32 * These objects are detected in kfree() because PageSlab()
Nick Piggind87a1332007-07-15 23:38:08 -070033 * is false for them.
Matt Mackall10cef602006-01-08 01:01:45 -080034 *
35 * SLAB is emulated on top of SLOB by simply calling constructors and
Nick Piggin95b35122007-07-15 23:38:07 -070036 * destructors for every SLAB allocation. Objects are returned with the
37 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
38 * case the low-level allocator will fragment blocks to create the proper
39 * alignment. Again, objects of page-size or greater are allocated by
Paul Mundt6193a2f2007-07-15 23:38:22 -070040 * calling alloc_pages(). As SLAB objects know their size, no separate
Nick Piggin95b35122007-07-15 23:38:07 -070041 * size bookkeeping is necessary and there is essentially no allocation
Nick Piggind87a1332007-07-15 23:38:08 -070042 * space overhead, and compound pages aren't needed for multi-page
43 * allocations.
Paul Mundt6193a2f2007-07-15 23:38:22 -070044 *
45 * NUMA support in SLOB is fairly simplistic, pushing most of the real
46 * logic down to the page allocator, and simply doing the node accounting
47 * on the upper levels. In the event that a node id is explicitly
Vlastimil Babka96db8002015-09-08 15:03:50 -070048 * provided, __alloc_pages_node() with the specified node id is used
Paul Mundt6193a2f2007-07-15 23:38:22 -070049 * instead. The common case (or when the node id isn't explicitly provided)
50 * will default to the current node, as per numa_node_id().
51 *
52 * Node aware pages are still inserted in to the global freelist, and
53 * these are scanned for by matching against the node id encoded in the
54 * page flags. As a result, block allocations that can be satisfied from
55 * the freelist will only be done so on pages residing on the same node,
56 * in order to prevent random node placement.
Matt Mackall10cef602006-01-08 01:01:45 -080057 */
58
Nick Piggin95b35122007-07-15 23:38:07 -070059#include <linux/kernel.h>
Matt Mackall10cef602006-01-08 01:01:45 -080060#include <linux/slab.h>
Christoph Lameter97d06602012-07-06 15:25:11 -050061
Matt Mackall10cef602006-01-08 01:01:45 -080062#include <linux/mm.h>
Nick Piggin1f0532e2009-05-05 19:13:45 +100063#include <linux/swap.h> /* struct reclaim_state */
Matt Mackall10cef602006-01-08 01:01:45 -080064#include <linux/cache.h>
65#include <linux/init.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040066#include <linux/export.h>
Nick Pigginafc0ced2007-05-16 22:10:49 -070067#include <linux/rcupdate.h>
Nick Piggin95b35122007-07-15 23:38:07 -070068#include <linux/list.h>
Catalin Marinas4374e612009-06-11 13:23:17 +010069#include <linux/kmemleak.h>
Li Zefan039ca4e2010-05-26 17:22:17 +080070
71#include <trace/events/kmem.h>
72
Arun Sharma600634972011-07-26 16:09:06 -070073#include <linux/atomic.h>
Matt Mackall10cef602006-01-08 01:01:45 -080074
Glauber Costab9ce5ef2012-12-18 14:22:46 -080075#include "slab.h"
Nick Piggin95b35122007-07-15 23:38:07 -070076/*
77 * slob_block has a field 'units', which indicates size of block if +ve,
78 * or offset of next block if -ve (in SLOB_UNITs).
79 *
80 * Free blocks of size 1 unit simply contain the offset of the next block.
81 * Those with larger size contain their size in the first SLOB_UNIT of
82 * memory, and the offset of the next free block in the second SLOB_UNIT.
83 */
Nick Piggin55394842007-07-15 23:38:09 -070084#if PAGE_SIZE <= (32767 * 2)
Nick Piggin95b35122007-07-15 23:38:07 -070085typedef s16 slobidx_t;
86#else
87typedef s32 slobidx_t;
88#endif
89
Matt Mackall10cef602006-01-08 01:01:45 -080090struct slob_block {
Nick Piggin95b35122007-07-15 23:38:07 -070091 slobidx_t units;
Nick Piggin55394842007-07-15 23:38:09 -070092};
Matt Mackall10cef602006-01-08 01:01:45 -080093typedef struct slob_block slob_t;
94
Nick Piggin95b35122007-07-15 23:38:07 -070095/*
Matt Mackall20cecba2008-02-04 22:29:37 -080096 * All partially free slob pages go on these lists.
Nick Piggin95b35122007-07-15 23:38:07 -070097 */
Matt Mackall20cecba2008-02-04 22:29:37 -080098#define SLOB_BREAK1 256
99#define SLOB_BREAK2 1024
100static LIST_HEAD(free_slob_small);
101static LIST_HEAD(free_slob_medium);
102static LIST_HEAD(free_slob_large);
Nick Piggin95b35122007-07-15 23:38:07 -0700103
104/*
Nick Piggin95b35122007-07-15 23:38:07 -0700105 * slob_page_free: true for pages on free_slob_pages list.
106 */
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500107static inline int slob_page_free(struct page *sp)
Nick Piggin95b35122007-07-15 23:38:07 -0700108{
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500109 return PageSlobFree(sp);
Nick Piggin95b35122007-07-15 23:38:07 -0700110}
111
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500112static void set_slob_page_free(struct page *sp, struct list_head *list)
Nick Piggin95b35122007-07-15 23:38:07 -0700113{
Dave Hansen34bf6ef2014-04-08 13:44:27 -0700114 list_add(&sp->lru, list);
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500115 __SetPageSlobFree(sp);
Nick Piggin95b35122007-07-15 23:38:07 -0700116}
117
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500118static inline void clear_slob_page_free(struct page *sp)
Nick Piggin95b35122007-07-15 23:38:07 -0700119{
Dave Hansen34bf6ef2014-04-08 13:44:27 -0700120 list_del(&sp->lru);
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500121 __ClearPageSlobFree(sp);
Nick Piggin95b35122007-07-15 23:38:07 -0700122}
123
Matt Mackall10cef602006-01-08 01:01:45 -0800124#define SLOB_UNIT sizeof(slob_t)
Sasha Levina6d78152012-12-20 14:11:39 -0500125#define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT)
Matt Mackall10cef602006-01-08 01:01:45 -0800126
Nick Pigginafc0ced2007-05-16 22:10:49 -0700127/*
128 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800129 * were created with a SLAB_TYPESAFE_BY_RCU slab. slob_rcu is used to free
Nick Pigginafc0ced2007-05-16 22:10:49 -0700130 * the block using call_rcu.
131 */
132struct slob_rcu {
133 struct rcu_head head;
134 int size;
135};
136
Nick Piggin95b35122007-07-15 23:38:07 -0700137/*
138 * slob_lock protects all slob allocator structures.
139 */
Matt Mackall10cef602006-01-08 01:01:45 -0800140static DEFINE_SPINLOCK(slob_lock);
Matt Mackall10cef602006-01-08 01:01:45 -0800141
Nick Piggin95b35122007-07-15 23:38:07 -0700142/*
143 * Encode the given size and next info into a free slob block s.
144 */
145static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
146{
147 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
148 slobidx_t offset = next - base;
Dimitri Gorokhovikbcb4ddb2006-12-29 16:48:28 -0800149
Nick Piggin95b35122007-07-15 23:38:07 -0700150 if (size > 1) {
151 s[0].units = size;
152 s[1].units = offset;
153 } else
154 s[0].units = -offset;
155}
Matt Mackall10cef602006-01-08 01:01:45 -0800156
Nick Piggin95b35122007-07-15 23:38:07 -0700157/*
158 * Return the size of a slob block.
159 */
160static slobidx_t slob_units(slob_t *s)
161{
162 if (s->units > 0)
163 return s->units;
164 return 1;
165}
166
167/*
168 * Return the next free slob block pointer after this one.
169 */
170static slob_t *slob_next(slob_t *s)
171{
172 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
173 slobidx_t next;
174
175 if (s[0].units < 0)
176 next = -s[0].units;
177 else
178 next = s[1].units;
179 return base+next;
180}
181
182/*
183 * Returns true if s is the last free block in its page.
184 */
185static int slob_last(slob_t *s)
186{
187 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
188}
189
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800190static void *slob_new_pages(gfp_t gfp, int order, int node)
Paul Mundt6193a2f2007-07-15 23:38:22 -0700191{
192 void *page;
193
194#ifdef CONFIG_NUMA
Ezequiel Garcia90f2cbb2012-09-08 17:47:51 -0300195 if (node != NUMA_NO_NODE)
Vlastimil Babka96db8002015-09-08 15:03:50 -0700196 page = __alloc_pages_node(node, gfp, order);
Paul Mundt6193a2f2007-07-15 23:38:22 -0700197 else
198#endif
199 page = alloc_pages(gfp, order);
200
201 if (!page)
202 return NULL;
203
204 return page_address(page);
205}
206
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800207static void slob_free_pages(void *b, int order)
208{
Nick Piggin1f0532e2009-05-05 19:13:45 +1000209 if (current->reclaim_state)
210 current->reclaim_state->reclaimed_slab += 1 << order;
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800211 free_pages((unsigned long)b, order);
212}
213
Nick Piggin95b35122007-07-15 23:38:07 -0700214/*
215 * Allocate a slob block within a given slob_page sp.
216 */
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500217static void *slob_page_alloc(struct page *sp, size_t size, int align)
Matt Mackall10cef602006-01-08 01:01:45 -0800218{
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800219 slob_t *prev, *cur, *aligned = NULL;
Matt Mackall10cef602006-01-08 01:01:45 -0800220 int delta = 0, units = SLOB_UNITS(size);
Matt Mackall10cef602006-01-08 01:01:45 -0800221
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500222 for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
Nick Piggin95b35122007-07-15 23:38:07 -0700223 slobidx_t avail = slob_units(cur);
224
Matt Mackall10cef602006-01-08 01:01:45 -0800225 if (align) {
226 aligned = (slob_t *)ALIGN((unsigned long)cur, align);
227 delta = aligned - cur;
228 }
Nick Piggin95b35122007-07-15 23:38:07 -0700229 if (avail >= units + delta) { /* room enough? */
230 slob_t *next;
231
Matt Mackall10cef602006-01-08 01:01:45 -0800232 if (delta) { /* need to fragment head to align? */
Nick Piggin95b35122007-07-15 23:38:07 -0700233 next = slob_next(cur);
234 set_slob(aligned, avail - delta, next);
235 set_slob(cur, delta, aligned);
Matt Mackall10cef602006-01-08 01:01:45 -0800236 prev = cur;
237 cur = aligned;
Nick Piggin95b35122007-07-15 23:38:07 -0700238 avail = slob_units(cur);
Matt Mackall10cef602006-01-08 01:01:45 -0800239 }
240
Nick Piggin95b35122007-07-15 23:38:07 -0700241 next = slob_next(cur);
242 if (avail == units) { /* exact fit? unlink. */
243 if (prev)
244 set_slob(prev, slob_units(prev), next);
245 else
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500246 sp->freelist = next;
Nick Piggin95b35122007-07-15 23:38:07 -0700247 } else { /* fragment */
248 if (prev)
249 set_slob(prev, slob_units(prev), cur + units);
250 else
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500251 sp->freelist = cur + units;
Nick Piggin95b35122007-07-15 23:38:07 -0700252 set_slob(cur + units, avail - units, next);
Matt Mackall10cef602006-01-08 01:01:45 -0800253 }
254
Nick Piggin95b35122007-07-15 23:38:07 -0700255 sp->units -= units;
256 if (!sp->units)
257 clear_slob_page_free(sp);
Matt Mackall10cef602006-01-08 01:01:45 -0800258 return cur;
259 }
Nick Piggin95b35122007-07-15 23:38:07 -0700260 if (slob_last(cur))
261 return NULL;
Matt Mackall10cef602006-01-08 01:01:45 -0800262 }
263}
264
Nick Piggin95b35122007-07-15 23:38:07 -0700265/*
266 * slob_alloc: entry point into the slob allocator.
267 */
Paul Mundt6193a2f2007-07-15 23:38:22 -0700268static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
Nick Piggin95b35122007-07-15 23:38:07 -0700269{
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500270 struct page *sp;
Matt Mackalld6269542007-07-21 04:37:40 -0700271 struct list_head *prev;
Matt Mackall20cecba2008-02-04 22:29:37 -0800272 struct list_head *slob_list;
Nick Piggin95b35122007-07-15 23:38:07 -0700273 slob_t *b = NULL;
274 unsigned long flags;
275
Matt Mackall20cecba2008-02-04 22:29:37 -0800276 if (size < SLOB_BREAK1)
277 slob_list = &free_slob_small;
278 else if (size < SLOB_BREAK2)
279 slob_list = &free_slob_medium;
280 else
281 slob_list = &free_slob_large;
282
Nick Piggin95b35122007-07-15 23:38:07 -0700283 spin_lock_irqsave(&slob_lock, flags);
284 /* Iterate through each partially free page, try to find room */
Dave Hansen34bf6ef2014-04-08 13:44:27 -0700285 list_for_each_entry(sp, slob_list, lru) {
Paul Mundt6193a2f2007-07-15 23:38:22 -0700286#ifdef CONFIG_NUMA
287 /*
288 * If there's a node specification, search for a partial
289 * page with a matching node id in the freelist.
290 */
Ezequiel Garcia90f2cbb2012-09-08 17:47:51 -0300291 if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
Paul Mundt6193a2f2007-07-15 23:38:22 -0700292 continue;
293#endif
Matt Mackalld6269542007-07-21 04:37:40 -0700294 /* Enough room on this page? */
295 if (sp->units < SLOB_UNITS(size))
296 continue;
Paul Mundt6193a2f2007-07-15 23:38:22 -0700297
Matt Mackalld6269542007-07-21 04:37:40 -0700298 /* Attempt to alloc */
Dave Hansen34bf6ef2014-04-08 13:44:27 -0700299 prev = sp->lru.prev;
Matt Mackalld6269542007-07-21 04:37:40 -0700300 b = slob_page_alloc(sp, size, align);
301 if (!b)
302 continue;
303
304 /* Improve fragment distribution and reduce our average
305 * search time by starting our next search here. (see
306 * Knuth vol 1, sec 2.5, pg 449) */
Matt Mackall20cecba2008-02-04 22:29:37 -0800307 if (prev != slob_list->prev &&
308 slob_list->next != prev->next)
309 list_move_tail(slob_list, prev->next);
Matt Mackalld6269542007-07-21 04:37:40 -0700310 break;
Nick Piggin95b35122007-07-15 23:38:07 -0700311 }
312 spin_unlock_irqrestore(&slob_lock, flags);
313
314 /* Not enough space: must allocate a new page */
315 if (!b) {
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800316 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
Nick Piggin95b35122007-07-15 23:38:07 -0700317 if (!b)
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800318 return NULL;
Christoph Lameterb5568282012-06-13 10:24:54 -0500319 sp = virt_to_page(b);
320 __SetPageSlab(sp);
Nick Piggin95b35122007-07-15 23:38:07 -0700321
322 spin_lock_irqsave(&slob_lock, flags);
323 sp->units = SLOB_UNITS(PAGE_SIZE);
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500324 sp->freelist = b;
Dave Hansen34bf6ef2014-04-08 13:44:27 -0700325 INIT_LIST_HEAD(&sp->lru);
Nick Piggin95b35122007-07-15 23:38:07 -0700326 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
Matt Mackall20cecba2008-02-04 22:29:37 -0800327 set_slob_page_free(sp, slob_list);
Nick Piggin95b35122007-07-15 23:38:07 -0700328 b = slob_page_alloc(sp, size, align);
329 BUG_ON(!b);
330 spin_unlock_irqrestore(&slob_lock, flags);
331 }
Christoph Lameterd07dbea2007-07-17 04:03:23 -0700332 if (unlikely((gfp & __GFP_ZERO) && b))
333 memset(b, 0, size);
Nick Piggin95b35122007-07-15 23:38:07 -0700334 return b;
335}
336
337/*
338 * slob_free: entry point into the slob allocator.
339 */
Matt Mackall10cef602006-01-08 01:01:45 -0800340static void slob_free(void *block, int size)
341{
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500342 struct page *sp;
Nick Piggin95b35122007-07-15 23:38:07 -0700343 slob_t *prev, *next, *b = (slob_t *)block;
344 slobidx_t units;
Matt Mackall10cef602006-01-08 01:01:45 -0800345 unsigned long flags;
Bob Liud602dab2010-07-10 18:05:33 +0800346 struct list_head *slob_list;
Matt Mackall10cef602006-01-08 01:01:45 -0800347
Satyam Sharma2408c552007-10-16 01:24:44 -0700348 if (unlikely(ZERO_OR_NULL_PTR(block)))
Matt Mackall10cef602006-01-08 01:01:45 -0800349 return;
Nick Piggin95b35122007-07-15 23:38:07 -0700350 BUG_ON(!size);
Matt Mackall10cef602006-01-08 01:01:45 -0800351
Christoph Lameterb5568282012-06-13 10:24:54 -0500352 sp = virt_to_page(block);
Nick Piggin95b35122007-07-15 23:38:07 -0700353 units = SLOB_UNITS(size);
Matt Mackall10cef602006-01-08 01:01:45 -0800354
Matt Mackall10cef602006-01-08 01:01:45 -0800355 spin_lock_irqsave(&slob_lock, flags);
Matt Mackall10cef602006-01-08 01:01:45 -0800356
Nick Piggin95b35122007-07-15 23:38:07 -0700357 if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
358 /* Go directly to page allocator. Do not pass slob allocator */
359 if (slob_page_free(sp))
360 clear_slob_page_free(sp);
Nick Piggin6fb8f422009-03-16 21:00:28 +1100361 spin_unlock_irqrestore(&slob_lock, flags);
Christoph Lameterb5568282012-06-13 10:24:54 -0500362 __ClearPageSlab(sp);
Mel Gorman22b751c2013-02-22 16:34:59 -0800363 page_mapcount_reset(sp);
Nick Piggin1f0532e2009-05-05 19:13:45 +1000364 slob_free_pages(b, 0);
Nick Piggin6fb8f422009-03-16 21:00:28 +1100365 return;
Nick Piggin95b35122007-07-15 23:38:07 -0700366 }
Matt Mackall10cef602006-01-08 01:01:45 -0800367
Nick Piggin95b35122007-07-15 23:38:07 -0700368 if (!slob_page_free(sp)) {
369 /* This slob page is about to become partially free. Easy! */
370 sp->units = units;
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500371 sp->freelist = b;
Nick Piggin95b35122007-07-15 23:38:07 -0700372 set_slob(b, units,
373 (void *)((unsigned long)(b +
374 SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
Bob Liud602dab2010-07-10 18:05:33 +0800375 if (size < SLOB_BREAK1)
376 slob_list = &free_slob_small;
377 else if (size < SLOB_BREAK2)
378 slob_list = &free_slob_medium;
379 else
380 slob_list = &free_slob_large;
381 set_slob_page_free(sp, slob_list);
Nick Piggin95b35122007-07-15 23:38:07 -0700382 goto out;
383 }
Matt Mackall10cef602006-01-08 01:01:45 -0800384
Nick Piggin95b35122007-07-15 23:38:07 -0700385 /*
386 * Otherwise the page is already partially free, so find reinsertion
387 * point.
388 */
389 sp->units += units;
Matt Mackall10cef602006-01-08 01:01:45 -0800390
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500391 if (b < (slob_t *)sp->freelist) {
392 if (b + units == sp->freelist) {
393 units += slob_units(sp->freelist);
394 sp->freelist = slob_next(sp->freelist);
Matt Mackall679299b2008-02-04 22:29:37 -0800395 }
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500396 set_slob(b, units, sp->freelist);
397 sp->freelist = b;
Nick Piggin95b35122007-07-15 23:38:07 -0700398 } else {
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500399 prev = sp->freelist;
Nick Piggin95b35122007-07-15 23:38:07 -0700400 next = slob_next(prev);
401 while (b > next) {
402 prev = next;
403 next = slob_next(prev);
404 }
405
406 if (!slob_last(prev) && b + units == next) {
407 units += slob_units(next);
408 set_slob(b, units, slob_next(next));
409 } else
410 set_slob(b, units, next);
411
412 if (prev + slob_units(prev) == b) {
413 units = slob_units(b) + slob_units(prev);
414 set_slob(prev, units, slob_next(b));
415 } else
416 set_slob(prev, slob_units(prev), b);
417 }
418out:
Matt Mackall10cef602006-01-08 01:01:45 -0800419 spin_unlock_irqrestore(&slob_lock, flags);
420}
421
Nick Piggin95b35122007-07-15 23:38:07 -0700422/*
423 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
424 */
425
Ezequiel Garciaf3f74102012-09-08 17:47:53 -0300426static __always_inline void *
427__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
Matt Mackall10cef602006-01-08 01:01:45 -0800428{
Christoph Lameter6cb8f912007-07-17 04:03:22 -0700429 unsigned int *m;
Arnd Bergmann789306e2012-10-05 16:55:20 +0200430 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300431 void *ret;
Nick Piggin55394842007-07-15 23:38:09 -0700432
Steven Rostedtbd50cfa2011-06-07 07:18:45 -0400433 gfp &= gfp_allowed_mask;
434
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +0100435 fs_reclaim_acquire(gfp);
436 fs_reclaim_release(gfp);
Nick Piggincf40bd12009-01-21 08:12:39 +0100437
Nick Piggin55394842007-07-15 23:38:09 -0700438 if (size < PAGE_SIZE - align) {
Christoph Lameter6cb8f912007-07-17 04:03:22 -0700439 if (!size)
440 return ZERO_SIZE_PTR;
441
Paul Mundt6193a2f2007-07-15 23:38:22 -0700442 m = slob_alloc(size + align, gfp, align, node);
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300443
MinChan Kim239f49c2008-05-19 22:12:08 +0900444 if (!m)
445 return NULL;
446 *m = size;
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300447 ret = (void *)m + align;
Nick Piggind87a1332007-07-15 23:38:08 -0700448
Ezequiel Garciaf3f74102012-09-08 17:47:53 -0300449 trace_kmalloc_node(caller, ret,
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +0200450 size, size + align, gfp, node);
Nick Piggind87a1332007-07-15 23:38:08 -0700451 } else {
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300452 unsigned int order = get_order(size);
Nick Piggind87a1332007-07-15 23:38:08 -0700453
David Rientjes8df275a2010-08-22 16:16:06 -0700454 if (likely(order))
455 gfp |= __GFP_COMP;
456 ret = slob_new_pages(gfp, order, node);
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300457
Ezequiel Garciaf3f74102012-09-08 17:47:53 -0300458 trace_kmalloc_node(caller, ret,
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +0200459 size, PAGE_SIZE << order, gfp, node);
Matt Mackall10cef602006-01-08 01:01:45 -0800460 }
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300461
Catalin Marinas4374e612009-06-11 13:23:17 +0100462 kmemleak_alloc(ret, size, 1, gfp);
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300463 return ret;
Matt Mackall10cef602006-01-08 01:01:45 -0800464}
Ezequiel Garciaf3f74102012-09-08 17:47:53 -0300465
Christoph Lameterf1b6eb62013-09-04 16:35:34 +0000466void *__kmalloc(size_t size, gfp_t gfp)
Ezequiel Garciaf3f74102012-09-08 17:47:53 -0300467{
Christoph Lameterf1b6eb62013-09-04 16:35:34 +0000468 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
Ezequiel Garciaf3f74102012-09-08 17:47:53 -0300469}
Christoph Lameterf1b6eb62013-09-04 16:35:34 +0000470EXPORT_SYMBOL(__kmalloc);
Matt Mackall10cef602006-01-08 01:01:45 -0800471
Ezequiel Garciaf3f74102012-09-08 17:47:53 -0300472void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
473{
474 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller);
475}
476
477#ifdef CONFIG_NUMA
David Rientjes82bd5502012-09-25 12:53:51 -0700478void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
Ezequiel Garciaf3f74102012-09-08 17:47:53 -0300479 int node, unsigned long caller)
480{
481 return __do_kmalloc_node(size, gfp, node, caller);
482}
483#endif
Ezequiel Garciaf3f74102012-09-08 17:47:53 -0300484
Matt Mackall10cef602006-01-08 01:01:45 -0800485void kfree(const void *block)
486{
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500487 struct page *sp;
Matt Mackall10cef602006-01-08 01:01:45 -0800488
Pekka Enberg2121db72009-03-25 11:05:57 +0200489 trace_kfree(_RET_IP_, block);
490
Satyam Sharma2408c552007-10-16 01:24:44 -0700491 if (unlikely(ZERO_OR_NULL_PTR(block)))
Matt Mackall10cef602006-01-08 01:01:45 -0800492 return;
Catalin Marinas4374e612009-06-11 13:23:17 +0100493 kmemleak_free(block);
Matt Mackall10cef602006-01-08 01:01:45 -0800494
Christoph Lameterb5568282012-06-13 10:24:54 -0500495 sp = virt_to_page(block);
496 if (PageSlab(sp)) {
Arnd Bergmann789306e2012-10-05 16:55:20 +0200497 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
Nick Piggin55394842007-07-15 23:38:09 -0700498 unsigned int *m = (unsigned int *)(block - align);
499 slob_free(m, *m + align);
Nick Piggind87a1332007-07-15 23:38:08 -0700500 } else
Ezequiel Garcia8cf98642012-10-22 09:04:31 -0300501 __free_pages(sp, compound_order(sp));
Matt Mackall10cef602006-01-08 01:01:45 -0800502}
Matt Mackall10cef602006-01-08 01:01:45 -0800503EXPORT_SYMBOL(kfree);
504
Nick Piggind87a1332007-07-15 23:38:08 -0700505/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
Pekka Enbergfd76bab2007-05-06 14:48:40 -0700506size_t ksize(const void *block)
Matt Mackall10cef602006-01-08 01:01:45 -0800507{
Christoph Lameterb8c24c42012-06-13 10:24:52 -0500508 struct page *sp;
Ezequiel Garcia999d8792012-10-19 09:33:10 -0300509 int align;
510 unsigned int *m;
Matt Mackall10cef602006-01-08 01:01:45 -0800511
Christoph Lameteref8b4522007-10-16 01:24:46 -0700512 BUG_ON(!block);
513 if (unlikely(block == ZERO_SIZE_PTR))
Matt Mackall10cef602006-01-08 01:01:45 -0800514 return 0;
515
Christoph Lameterb5568282012-06-13 10:24:54 -0500516 sp = virt_to_page(block);
Ezequiel Garcia999d8792012-10-19 09:33:10 -0300517 if (unlikely(!PageSlab(sp)))
518 return PAGE_SIZE << compound_order(sp);
519
Arnd Bergmann789306e2012-10-05 16:55:20 +0200520 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
Ezequiel Garcia999d8792012-10-19 09:33:10 -0300521 m = (unsigned int *)(block - align);
522 return SLOB_UNITS(*m) * SLOB_UNIT;
Matt Mackall10cef602006-01-08 01:01:45 -0800523}
Kirill A. Shutemovb1aabec2009-02-10 15:21:44 +0200524EXPORT_SYMBOL(ksize);
Matt Mackall10cef602006-01-08 01:01:45 -0800525
Christoph Lameter8a13a4c2012-09-04 23:18:33 +0000526int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
Matt Mackall10cef602006-01-08 01:01:45 -0800527{
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800528 if (flags & SLAB_TYPESAFE_BY_RCU) {
Christoph Lameter278b1bb2012-09-05 00:20:34 +0000529 /* leave room for rcu footer at the end of object */
530 c->size += sizeof(struct slob_rcu);
Christoph Lameter039363f2012-07-06 15:25:10 -0500531 }
Christoph Lameter278b1bb2012-09-05 00:20:34 +0000532 c->flags = flags;
Christoph Lameter278b1bb2012-09-05 00:20:34 +0000533 return 0;
Matt Mackall10cef602006-01-08 01:01:45 -0800534}
Matt Mackall10cef602006-01-08 01:01:45 -0800535
Fabian Frederickc21a6da2015-04-14 15:44:34 -0700536static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
Matt Mackall10cef602006-01-08 01:01:45 -0800537{
538 void *b;
539
Steven Rostedtbd50cfa2011-06-07 07:18:45 -0400540 flags &= gfp_allowed_mask;
541
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +0100542 fs_reclaim_acquire(flags);
543 fs_reclaim_release(flags);
Steven Rostedtbd50cfa2011-06-07 07:18:45 -0400544
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300545 if (c->size < PAGE_SIZE) {
Paul Mundt6193a2f2007-07-15 23:38:22 -0700546 b = slob_alloc(c->size, flags, c->align, node);
Ezequiel Garciafe74fe22012-10-19 09:33:11 -0300547 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +0200548 SLOB_UNITS(c->size) * SLOB_UNIT,
549 flags, node);
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300550 } else {
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800551 b = slob_new_pages(flags, get_order(c->size), node);
Ezequiel Garciafe74fe22012-10-19 09:33:11 -0300552 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +0200553 PAGE_SIZE << get_order(c->size),
554 flags, node);
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300555 }
Matt Mackall10cef602006-01-08 01:01:45 -0800556
Steven Rostedtc1e854e2013-01-17 12:13:46 -0500557 if (b && c->ctor)
Alexey Dobriyan51cc5062008-07-25 19:45:34 -0700558 c->ctor(b);
Matt Mackall10cef602006-01-08 01:01:45 -0800559
Catalin Marinas4374e612009-06-11 13:23:17 +0100560 kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
Matt Mackall10cef602006-01-08 01:01:45 -0800561 return b;
562}
Christoph Lameterf1b6eb62013-09-04 16:35:34 +0000563
564void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
565{
566 return slob_alloc_node(cachep, flags, NUMA_NO_NODE);
567}
568EXPORT_SYMBOL(kmem_cache_alloc);
569
570#ifdef CONFIG_NUMA
571void *__kmalloc_node(size_t size, gfp_t gfp, int node)
572{
573 return __do_kmalloc_node(size, gfp, node, _RET_IP_);
574}
575EXPORT_SYMBOL(__kmalloc_node);
576
577void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
578{
579 return slob_alloc_node(cachep, gfp, node);
580}
Paul Mundt6193a2f2007-07-15 23:38:22 -0700581EXPORT_SYMBOL(kmem_cache_alloc_node);
Christoph Lameterf1b6eb62013-09-04 16:35:34 +0000582#endif
Matt Mackall10cef602006-01-08 01:01:45 -0800583
Nick Pigginafc0ced2007-05-16 22:10:49 -0700584static void __kmem_cache_free(void *b, int size)
585{
586 if (size < PAGE_SIZE)
587 slob_free(b, size);
588 else
Américo Wang6e9ed0c2009-01-19 02:00:38 +0800589 slob_free_pages(b, get_order(size));
Nick Pigginafc0ced2007-05-16 22:10:49 -0700590}
591
592static void kmem_rcu_free(struct rcu_head *head)
593{
594 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
595 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
596
597 __kmem_cache_free(b, slob_rcu->size);
598}
599
Matt Mackall10cef602006-01-08 01:01:45 -0800600void kmem_cache_free(struct kmem_cache *c, void *b)
601{
Catalin Marinas4374e612009-06-11 13:23:17 +0100602 kmemleak_free_recursive(b, c->flags);
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800603 if (unlikely(c->flags & SLAB_TYPESAFE_BY_RCU)) {
Nick Pigginafc0ced2007-05-16 22:10:49 -0700604 struct slob_rcu *slob_rcu;
605 slob_rcu = b + (c->size - sizeof(struct slob_rcu));
Nick Pigginafc0ced2007-05-16 22:10:49 -0700606 slob_rcu->size = c->size;
607 call_rcu(&slob_rcu->head, kmem_rcu_free);
608 } else {
Nick Pigginafc0ced2007-05-16 22:10:49 -0700609 __kmem_cache_free(b, c->size);
610 }
Eduard - Gabriel Munteanu3eae2cb22008-08-10 20:14:07 +0300611
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +0200612 trace_kmem_cache_free(_RET_IP_, b);
Matt Mackall10cef602006-01-08 01:01:45 -0800613}
614EXPORT_SYMBOL(kmem_cache_free);
615
Christoph Lameter484748f2015-09-04 15:45:34 -0700616void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
617{
618 __kmem_cache_free_bulk(s, size, p);
619}
620EXPORT_SYMBOL(kmem_cache_free_bulk);
621
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -0800622int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
Christoph Lameter484748f2015-09-04 15:45:34 -0700623 void **p)
624{
625 return __kmem_cache_alloc_bulk(s, flags, size, p);
626}
627EXPORT_SYMBOL(kmem_cache_alloc_bulk);
628
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000629int __kmem_cache_shutdown(struct kmem_cache *c)
630{
631 /* No way to check for remaining objects */
632 return 0;
633}
634
Dmitry Safonov52b4b952016-02-17 13:11:37 -0800635void __kmem_cache_release(struct kmem_cache *c)
636{
637}
638
Vladimir Davydov89e364d2016-12-12 16:41:32 -0800639int __kmem_cache_shrink(struct kmem_cache *d)
Christoph Lameter2e892f42006-12-13 00:34:23 -0800640{
641 return 0;
642}
Christoph Lameter2e892f42006-12-13 00:34:23 -0800643
Christoph Lameter9b030cb2012-09-05 00:20:33 +0000644struct kmem_cache kmem_cache_boot = {
645 .name = "kmem_cache",
646 .size = sizeof(struct kmem_cache),
647 .flags = SLAB_PANIC,
648 .align = ARCH_KMALLOC_MINALIGN,
649};
650
Dimitri Gorokhovikbcb4ddb2006-12-29 16:48:28 -0800651void __init kmem_cache_init(void)
652{
Christoph Lameter9b030cb2012-09-05 00:20:33 +0000653 kmem_cache = &kmem_cache_boot;
Christoph Lameter97d06602012-07-06 15:25:11 -0500654 slab_state = UP;
Matt Mackall10cef602006-01-08 01:01:45 -0800655}
Wu Fengguangbbff2e42009-08-06 11:36:25 +0300656
657void __init kmem_cache_init_late(void)
658{
Christoph Lameter97d06602012-07-06 15:25:11 -0500659 slab_state = FULL;
Wu Fengguangbbff2e42009-08-06 11:36:25 +0300660}