blob: cb6b0857e1a699c03fa8fb59839e9f70d1bd0628 [file] [log] [blame]
Christoph Lameter81819f02007-05-06 14:49:36 -07001/*
2 * SLUB: A slab allocator that limits cache line use instead of queuing
3 * objects in per cpu and per node lists.
4 *
Christoph Lameter881db7f2011-06-01 12:25:53 -05005 * The allocator synchronizes using per slab locks or atomic operatios
6 * and only uses a centralized lock to manage a pool of partial slabs.
Christoph Lameter81819f02007-05-06 14:49:36 -07007 *
Christoph Lametercde53532008-07-04 09:59:22 -07008 * (C) 2007 SGI, Christoph Lameter
Christoph Lameter881db7f2011-06-01 12:25:53 -05009 * (C) 2011 Linux Foundation, Christoph Lameter
Christoph Lameter81819f02007-05-06 14:49:36 -070010 */
11
12#include <linux/mm.h>
Nick Piggin1eb5ac62009-05-05 19:13:44 +100013#include <linux/swap.h> /* struct reclaim_state */
Christoph Lameter81819f02007-05-06 14:49:36 -070014#include <linux/module.h>
15#include <linux/bit_spinlock.h>
16#include <linux/interrupt.h>
17#include <linux/bitops.h>
18#include <linux/slab.h>
Alexey Dobriyan7b3c3a52008-10-06 02:42:17 +040019#include <linux/proc_fs.h>
Christoph Lameter81819f02007-05-06 14:49:36 -070020#include <linux/seq_file.h>
Vegard Nossum5a896d92008-04-04 00:54:48 +020021#include <linux/kmemcheck.h>
Christoph Lameter81819f02007-05-06 14:49:36 -070022#include <linux/cpu.h>
23#include <linux/cpuset.h>
24#include <linux/mempolicy.h>
25#include <linux/ctype.h>
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070026#include <linux/debugobjects.h>
Christoph Lameter81819f02007-05-06 14:49:36 -070027#include <linux/kallsyms.h>
Yasunori Gotob9049e22007-10-21 16:41:37 -070028#include <linux/memory.h>
Roman Zippelf8bd2252008-05-01 04:34:31 -070029#include <linux/math64.h>
Akinobu Mita773ff602008-12-23 19:37:01 +090030#include <linux/fault-inject.h>
Christoph Lameter81819f02007-05-06 14:49:36 -070031
Richard Kennedy4a923792010-10-21 10:29:19 +010032#include <trace/events/kmem.h>
33
Christoph Lameter81819f02007-05-06 14:49:36 -070034/*
35 * Lock order:
Christoph Lameter881db7f2011-06-01 12:25:53 -050036 * 1. slub_lock (Global Semaphore)
37 * 2. node->list_lock
38 * 3. slab_lock(page) (Only on some arches and for debugging)
Christoph Lameter81819f02007-05-06 14:49:36 -070039 *
Christoph Lameter881db7f2011-06-01 12:25:53 -050040 * slub_lock
41 *
42 * The role of the slub_lock is to protect the list of all the slabs
43 * and to synchronize major metadata changes to slab cache structures.
44 *
45 * The slab_lock is only used for debugging and on arches that do not
46 * have the ability to do a cmpxchg_double. It only protects the second
47 * double word in the page struct. Meaning
48 * A. page->freelist -> List of object free in a page
49 * B. page->counters -> Counters of objects
50 * C. page->frozen -> frozen state
51 *
52 * If a slab is frozen then it is exempt from list management. It is not
53 * on any list. The processor that froze the slab is the one who can
54 * perform list operations on the page. Other processors may put objects
55 * onto the freelist but the processor that froze the slab is the only
56 * one that can retrieve the objects from the page's freelist.
Christoph Lameter81819f02007-05-06 14:49:36 -070057 *
58 * The list_lock protects the partial and full list on each node and
59 * the partial slab counter. If taken then no new slabs may be added or
60 * removed from the lists nor make the number of partial slabs be modified.
61 * (Note that the total number of slabs is an atomic value that may be
62 * modified without taking the list lock).
63 *
64 * The list_lock is a centralized lock and thus we avoid taking it as
65 * much as possible. As long as SLUB does not have to handle partial
66 * slabs, operations can continue without any centralized lock. F.e.
67 * allocating a long series of objects that fill up slabs does not require
68 * the list lock.
Christoph Lameter81819f02007-05-06 14:49:36 -070069 * Interrupts are disabled during allocation and deallocation in order to
70 * make the slab allocator safe to use in the context of an irq. In addition
71 * interrupts are disabled to ensure that the processor does not change
72 * while handling per_cpu slabs, due to kernel preemption.
73 *
74 * SLUB assigns one slab for allocation to each processor.
75 * Allocations only occur from these slabs called cpu slabs.
76 *
Christoph Lameter672bba32007-05-09 02:32:39 -070077 * Slabs with free elements are kept on a partial list and during regular
78 * operations no list for full slabs is used. If an object in a full slab is
Christoph Lameter81819f02007-05-06 14:49:36 -070079 * freed then the slab will show up again on the partial lists.
Christoph Lameter672bba32007-05-09 02:32:39 -070080 * We track full slabs for debugging purposes though because otherwise we
81 * cannot scan all objects.
Christoph Lameter81819f02007-05-06 14:49:36 -070082 *
83 * Slabs are freed when they become empty. Teardown and setup is
84 * minimal so we rely on the page allocators per cpu caches for
85 * fast frees and allocs.
86 *
87 * Overloading of page flags that are otherwise used for LRU management.
88 *
Christoph Lameter4b6f0752007-05-16 22:10:53 -070089 * PageActive The slab is frozen and exempt from list processing.
90 * This means that the slab is dedicated to a purpose
91 * such as satisfying allocations for a specific
92 * processor. Objects may be freed in the slab while
93 * it is frozen but slab_free will then skip the usual
94 * list operations. It is up to the processor holding
95 * the slab to integrate the slab into the slab lists
96 * when the slab is no longer needed.
97 *
98 * One use of this flag is to mark slabs that are
99 * used for allocations. Then such a slab becomes a cpu
100 * slab. The cpu slab may be equipped with an additional
Christoph Lameterdfb4f092007-10-16 01:26:05 -0700101 * freelist that allows lockless access to
Christoph Lameter894b8782007-05-10 03:15:16 -0700102 * free objects in addition to the regular freelist
103 * that requires the slab lock.
Christoph Lameter81819f02007-05-06 14:49:36 -0700104 *
105 * PageError Slab requires special handling due to debug
106 * options set. This moves slab handling out of
Christoph Lameter894b8782007-05-10 03:15:16 -0700107 * the fast path and disables lockless freelists.
Christoph Lameter81819f02007-05-06 14:49:36 -0700108 */
109
Christoph Lameteraf537b02010-07-09 14:07:14 -0500110#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
111 SLAB_TRACE | SLAB_DEBUG_FREE)
112
113static inline int kmem_cache_debug(struct kmem_cache *s)
114{
Christoph Lameter5577bd82007-05-16 22:10:56 -0700115#ifdef CONFIG_SLUB_DEBUG
Christoph Lameteraf537b02010-07-09 14:07:14 -0500116 return unlikely(s->flags & SLAB_DEBUG_FLAGS);
Christoph Lameter5577bd82007-05-16 22:10:56 -0700117#else
Christoph Lameteraf537b02010-07-09 14:07:14 -0500118 return 0;
Christoph Lameter5577bd82007-05-16 22:10:56 -0700119#endif
Christoph Lameteraf537b02010-07-09 14:07:14 -0500120}
Christoph Lameter5577bd82007-05-16 22:10:56 -0700121
Christoph Lameter81819f02007-05-06 14:49:36 -0700122/*
123 * Issues still to be resolved:
124 *
Christoph Lameter81819f02007-05-06 14:49:36 -0700125 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
126 *
Christoph Lameter81819f02007-05-06 14:49:36 -0700127 * - Variable sizing of the per node arrays
128 */
129
130/* Enable to test recovery from slab corruption on boot */
131#undef SLUB_RESILIENCY_TEST
132
Christoph Lameterb789ef52011-06-01 12:25:49 -0500133/* Enable to log cmpxchg failures */
134#undef SLUB_DEBUG_CMPXCHG
135
Christoph Lameter81819f02007-05-06 14:49:36 -0700136/*
Christoph Lameter2086d262007-05-06 14:49:46 -0700137 * Mininum number of partial slabs. These will be left on the partial
138 * lists even if they are empty. kmem_cache_shrink may reclaim them.
139 */
Christoph Lameter76be8952007-12-21 14:37:37 -0800140#define MIN_PARTIAL 5
Christoph Lametere95eed52007-05-06 14:49:44 -0700141
Christoph Lameter2086d262007-05-06 14:49:46 -0700142/*
143 * Maximum number of desirable partial slabs.
144 * The existence of more partial slabs makes kmem_cache_shrink
145 * sort the partial list by the number of objects in the.
146 */
147#define MAX_PARTIAL 10
148
Christoph Lameter81819f02007-05-06 14:49:36 -0700149#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
150 SLAB_POISON | SLAB_STORE_USER)
Christoph Lameter672bba32007-05-09 02:32:39 -0700151
Christoph Lameter81819f02007-05-06 14:49:36 -0700152/*
David Rientjes3de47212009-07-27 18:30:35 -0700153 * Debugging flags that require metadata to be stored in the slab. These get
154 * disabled when slub_debug=O is used and a cache's min order increases with
155 * metadata.
David Rientjesfa5ec8a2009-07-07 00:14:14 -0700156 */
David Rientjes3de47212009-07-27 18:30:35 -0700157#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
David Rientjesfa5ec8a2009-07-07 00:14:14 -0700158
159/*
Christoph Lameter81819f02007-05-06 14:49:36 -0700160 * Set of flags that will prevent slab merging
161 */
162#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
Dmitry Monakhov4c13dd32010-02-26 09:36:12 +0300163 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
164 SLAB_FAILSLAB)
Christoph Lameter81819f02007-05-06 14:49:36 -0700165
166#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
Vegard Nossum5a896d92008-04-04 00:54:48 +0200167 SLAB_CACHE_DMA | SLAB_NOTRACK)
Christoph Lameter81819f02007-05-06 14:49:36 -0700168
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +0400169#define OO_SHIFT 16
170#define OO_MASK ((1 << OO_SHIFT) - 1)
Christoph Lameter50d5c412011-06-01 12:25:45 -0500171#define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +0400172
Christoph Lameter81819f02007-05-06 14:49:36 -0700173/* Internal SLUB flags */
Christoph Lameterf90ec392010-07-09 14:07:11 -0500174#define __OBJECT_POISON 0x80000000UL /* Poison object */
Christoph Lameterb789ef52011-06-01 12:25:49 -0500175#define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */
Christoph Lameter81819f02007-05-06 14:49:36 -0700176
177static int kmem_size = sizeof(struct kmem_cache);
178
179#ifdef CONFIG_SMP
180static struct notifier_block slab_notifier;
181#endif
182
183static enum {
184 DOWN, /* No slab functionality available */
Christoph Lameter51df1142010-08-20 12:37:15 -0500185 PARTIAL, /* Kmem_cache_node works */
Christoph Lameter672bba32007-05-09 02:32:39 -0700186 UP, /* Everything works but does not show up in sysfs */
Christoph Lameter81819f02007-05-06 14:49:36 -0700187 SYSFS /* Sysfs up */
188} slab_state = DOWN;
189
190/* A list of all slab caches on the system */
191static DECLARE_RWSEM(slub_lock);
Adrian Bunk5af328a2007-07-17 04:03:27 -0700192static LIST_HEAD(slab_caches);
Christoph Lameter81819f02007-05-06 14:49:36 -0700193
Christoph Lameter02cbc872007-05-09 02:32:43 -0700194/*
195 * Tracking user of a slab.
196 */
197struct track {
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +0300198 unsigned long addr; /* Called from address */
Christoph Lameter02cbc872007-05-09 02:32:43 -0700199 int cpu; /* Was running on cpu */
200 int pid; /* Pid context */
201 unsigned long when; /* When did the operation occur */
202};
203
204enum track_item { TRACK_ALLOC, TRACK_FREE };
205
Christoph Lameterab4d5ed2010-10-05 13:57:26 -0500206#ifdef CONFIG_SYSFS
Christoph Lameter81819f02007-05-06 14:49:36 -0700207static int sysfs_slab_add(struct kmem_cache *);
208static int sysfs_slab_alias(struct kmem_cache *, const char *);
209static void sysfs_slab_remove(struct kmem_cache *);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -0800210
Christoph Lameter81819f02007-05-06 14:49:36 -0700211#else
Christoph Lameter0c710012007-07-17 04:03:24 -0700212static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
213static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
214 { return 0; }
Christoph Lameter151c6022008-01-07 22:29:05 -0800215static inline void sysfs_slab_remove(struct kmem_cache *s)
216{
Pekka Enberg84c1cf62010-09-14 23:21:12 +0300217 kfree(s->name);
Christoph Lameter151c6022008-01-07 22:29:05 -0800218 kfree(s);
219}
Christoph Lameter8ff12cf2008-02-07 17:47:41 -0800220
Christoph Lameter81819f02007-05-06 14:49:36 -0700221#endif
222
Christoph Lameter4fdccdf2011-03-22 13:35:00 -0500223static inline void stat(const struct kmem_cache *s, enum stat_item si)
Christoph Lameter8ff12cf2008-02-07 17:47:41 -0800224{
225#ifdef CONFIG_SLUB_STATS
Christoph Lameter84e554e62009-12-18 16:26:23 -0600226 __this_cpu_inc(s->cpu_slab->stat[si]);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -0800227#endif
228}
229
Christoph Lameter81819f02007-05-06 14:49:36 -0700230/********************************************************************
231 * Core slab cache functions
232 *******************************************************************/
233
234int slab_is_available(void)
235{
236 return slab_state >= UP;
237}
238
239static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
240{
Christoph Lameter81819f02007-05-06 14:49:36 -0700241 return s->node[node];
Christoph Lameter81819f02007-05-06 14:49:36 -0700242}
243
Christoph Lameter6446faa2008-02-15 23:45:26 -0800244/* Verify that a pointer has an address that is valid within a slab page */
Christoph Lameter02cbc872007-05-09 02:32:43 -0700245static inline int check_valid_pointer(struct kmem_cache *s,
246 struct page *page, const void *object)
247{
248 void *base;
249
Christoph Lametera973e9d2008-03-01 13:40:44 -0800250 if (!object)
Christoph Lameter02cbc872007-05-09 02:32:43 -0700251 return 1;
252
Christoph Lametera973e9d2008-03-01 13:40:44 -0800253 base = page_address(page);
Christoph Lameter39b26462008-04-14 19:11:30 +0300254 if (object < base || object >= base + page->objects * s->size ||
Christoph Lameter02cbc872007-05-09 02:32:43 -0700255 (object - base) % s->size) {
256 return 0;
257 }
258
259 return 1;
260}
261
Christoph Lameter7656c722007-05-09 02:32:40 -0700262static inline void *get_freepointer(struct kmem_cache *s, void *object)
263{
264 return *(void **)(object + s->offset);
265}
266
Christoph Lameter1393d9a2011-05-16 15:26:08 -0500267static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
268{
269 void *p;
270
271#ifdef CONFIG_DEBUG_PAGEALLOC
272 probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p));
273#else
274 p = get_freepointer(s, object);
275#endif
276 return p;
277}
278
Christoph Lameter7656c722007-05-09 02:32:40 -0700279static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
280{
281 *(void **)(object + s->offset) = fp;
282}
283
284/* Loop over all objects in a slab */
Christoph Lameter224a88b2008-04-14 19:11:31 +0300285#define for_each_object(__p, __s, __addr, __objects) \
286 for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
Christoph Lameter7656c722007-05-09 02:32:40 -0700287 __p += (__s)->size)
288
Christoph Lameter7656c722007-05-09 02:32:40 -0700289/* Determine object index from a given position */
290static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
291{
292 return (p - addr) / s->size;
293}
294
Mariusz Kozlowskid71f6062011-02-26 20:10:26 +0100295static inline size_t slab_ksize(const struct kmem_cache *s)
296{
297#ifdef CONFIG_SLUB_DEBUG
298 /*
299 * Debugging requires use of the padding between object
300 * and whatever may come after it.
301 */
302 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
303 return s->objsize;
304
305#endif
306 /*
307 * If we have the need to store the freelist pointer
308 * back there or track user information then we can
309 * only use the space before that information.
310 */
311 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
312 return s->inuse;
313 /*
314 * Else we can use all the padding etc for the allocation
315 */
316 return s->size;
317}
318
Lai Jiangshanab9a0f12011-03-10 15:21:48 +0800319static inline int order_objects(int order, unsigned long size, int reserved)
320{
321 return ((PAGE_SIZE << order) - reserved) / size;
322}
323
Christoph Lameter834f3d12008-04-14 19:11:31 +0300324static inline struct kmem_cache_order_objects oo_make(int order,
Lai Jiangshanab9a0f12011-03-10 15:21:48 +0800325 unsigned long size, int reserved)
Christoph Lameter834f3d12008-04-14 19:11:31 +0300326{
327 struct kmem_cache_order_objects x = {
Lai Jiangshanab9a0f12011-03-10 15:21:48 +0800328 (order << OO_SHIFT) + order_objects(order, size, reserved)
Christoph Lameter834f3d12008-04-14 19:11:31 +0300329 };
330
331 return x;
332}
333
334static inline int oo_order(struct kmem_cache_order_objects x)
335{
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +0400336 return x.x >> OO_SHIFT;
Christoph Lameter834f3d12008-04-14 19:11:31 +0300337}
338
339static inline int oo_objects(struct kmem_cache_order_objects x)
340{
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +0400341 return x.x & OO_MASK;
Christoph Lameter834f3d12008-04-14 19:11:31 +0300342}
343
Christoph Lameter881db7f2011-06-01 12:25:53 -0500344/*
345 * Per slab locking using the pagelock
346 */
347static __always_inline void slab_lock(struct page *page)
348{
349 bit_spin_lock(PG_locked, &page->flags);
350}
351
352static __always_inline void slab_unlock(struct page *page)
353{
354 __bit_spin_unlock(PG_locked, &page->flags);
355}
356
Christoph Lameterb789ef52011-06-01 12:25:49 -0500357static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
358 void *freelist_old, unsigned long counters_old,
359 void *freelist_new, unsigned long counters_new,
360 const char *n)
361{
362#ifdef CONFIG_CMPXCHG_DOUBLE
363 if (s->flags & __CMPXCHG_DOUBLE) {
364 if (cmpxchg_double(&page->freelist,
365 freelist_old, counters_old,
366 freelist_new, counters_new))
367 return 1;
368 } else
369#endif
370 {
Christoph Lameter881db7f2011-06-01 12:25:53 -0500371 slab_lock(page);
Christoph Lameterb789ef52011-06-01 12:25:49 -0500372 if (page->freelist == freelist_old && page->counters == counters_old) {
373 page->freelist = freelist_new;
374 page->counters = counters_new;
Christoph Lameter881db7f2011-06-01 12:25:53 -0500375 slab_unlock(page);
Christoph Lameterb789ef52011-06-01 12:25:49 -0500376 return 1;
377 }
Christoph Lameter881db7f2011-06-01 12:25:53 -0500378 slab_unlock(page);
Christoph Lameterb789ef52011-06-01 12:25:49 -0500379 }
380
381 cpu_relax();
382 stat(s, CMPXCHG_DOUBLE_FAIL);
383
384#ifdef SLUB_DEBUG_CMPXCHG
385 printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
386#endif
387
388 return 0;
389}
390
Christoph Lameter41ecc552007-05-09 02:32:44 -0700391#ifdef CONFIG_SLUB_DEBUG
392/*
Christoph Lameter5f80b132011-04-15 14:48:13 -0500393 * Determine a map of object in use on a page.
394 *
Christoph Lameter881db7f2011-06-01 12:25:53 -0500395 * Node listlock must be held to guarantee that the page does
Christoph Lameter5f80b132011-04-15 14:48:13 -0500396 * not vanish from under us.
397 */
398static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
399{
400 void *p;
401 void *addr = page_address(page);
402
403 for (p = page->freelist; p; p = get_freepointer(s, p))
404 set_bit(slab_index(p, s, addr), map);
405}
406
Christoph Lameter41ecc552007-05-09 02:32:44 -0700407/*
408 * Debug settings:
409 */
Christoph Lameterf0630ff2007-07-15 23:38:14 -0700410#ifdef CONFIG_SLUB_DEBUG_ON
411static int slub_debug = DEBUG_DEFAULT_FLAGS;
412#else
Christoph Lameter41ecc552007-05-09 02:32:44 -0700413static int slub_debug;
Christoph Lameterf0630ff2007-07-15 23:38:14 -0700414#endif
Christoph Lameter41ecc552007-05-09 02:32:44 -0700415
416static char *slub_debug_slabs;
David Rientjesfa5ec8a2009-07-07 00:14:14 -0700417static int disable_higher_order_debug;
Christoph Lameter41ecc552007-05-09 02:32:44 -0700418
Christoph Lameter7656c722007-05-09 02:32:40 -0700419/*
Christoph Lameter81819f02007-05-06 14:49:36 -0700420 * Object debugging
421 */
422static void print_section(char *text, u8 *addr, unsigned int length)
423{
424 int i, offset;
425 int newline = 1;
426 char ascii[17];
427
428 ascii[16] = 0;
429
430 for (i = 0; i < length; i++) {
431 if (newline) {
Christoph Lameter24922682007-07-17 04:03:18 -0700432 printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
Christoph Lameter81819f02007-05-06 14:49:36 -0700433 newline = 0;
434 }
Pekka Enberg06428782008-01-07 23:20:27 -0800435 printk(KERN_CONT " %02x", addr[i]);
Christoph Lameter81819f02007-05-06 14:49:36 -0700436 offset = i % 16;
437 ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
438 if (offset == 15) {
Pekka Enberg06428782008-01-07 23:20:27 -0800439 printk(KERN_CONT " %s\n", ascii);
Christoph Lameter81819f02007-05-06 14:49:36 -0700440 newline = 1;
441 }
442 }
443 if (!newline) {
444 i %= 16;
445 while (i < 16) {
Pekka Enberg06428782008-01-07 23:20:27 -0800446 printk(KERN_CONT " ");
Christoph Lameter81819f02007-05-06 14:49:36 -0700447 ascii[i] = ' ';
448 i++;
449 }
Pekka Enberg06428782008-01-07 23:20:27 -0800450 printk(KERN_CONT " %s\n", ascii);
Christoph Lameter81819f02007-05-06 14:49:36 -0700451 }
452}
453
Christoph Lameter81819f02007-05-06 14:49:36 -0700454static struct track *get_track(struct kmem_cache *s, void *object,
455 enum track_item alloc)
456{
457 struct track *p;
458
459 if (s->offset)
460 p = object + s->offset + sizeof(void *);
461 else
462 p = object + s->inuse;
463
464 return p + alloc;
465}
466
467static void set_track(struct kmem_cache *s, void *object,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +0300468 enum track_item alloc, unsigned long addr)
Christoph Lameter81819f02007-05-06 14:49:36 -0700469{
Akinobu Mita1a00df42009-03-07 00:36:21 +0900470 struct track *p = get_track(s, object, alloc);
Christoph Lameter81819f02007-05-06 14:49:36 -0700471
Christoph Lameter81819f02007-05-06 14:49:36 -0700472 if (addr) {
473 p->addr = addr;
474 p->cpu = smp_processor_id();
Alexey Dobriyan88e4ccf2008-06-23 02:58:37 +0400475 p->pid = current->pid;
Christoph Lameter81819f02007-05-06 14:49:36 -0700476 p->when = jiffies;
477 } else
478 memset(p, 0, sizeof(struct track));
479}
480
Christoph Lameter81819f02007-05-06 14:49:36 -0700481static void init_tracking(struct kmem_cache *s, void *object)
482{
Christoph Lameter24922682007-07-17 04:03:18 -0700483 if (!(s->flags & SLAB_STORE_USER))
484 return;
485
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +0300486 set_track(s, object, TRACK_FREE, 0UL);
487 set_track(s, object, TRACK_ALLOC, 0UL);
Christoph Lameter81819f02007-05-06 14:49:36 -0700488}
489
490static void print_track(const char *s, struct track *t)
491{
492 if (!t->addr)
493 return;
494
Linus Torvalds7daf7052008-07-14 12:12:53 -0700495 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +0300496 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
Christoph Lameter81819f02007-05-06 14:49:36 -0700497}
498
Christoph Lameter24922682007-07-17 04:03:18 -0700499static void print_tracking(struct kmem_cache *s, void *object)
500{
501 if (!(s->flags & SLAB_STORE_USER))
502 return;
503
504 print_track("Allocated", get_track(s, object, TRACK_ALLOC));
505 print_track("Freed", get_track(s, object, TRACK_FREE));
506}
507
508static void print_page_info(struct page *page)
509{
Christoph Lameter39b26462008-04-14 19:11:30 +0300510 printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
511 page, page->objects, page->inuse, page->freelist, page->flags);
Christoph Lameter24922682007-07-17 04:03:18 -0700512
513}
514
515static void slab_bug(struct kmem_cache *s, char *fmt, ...)
516{
517 va_list args;
518 char buf[100];
519
520 va_start(args, fmt);
521 vsnprintf(buf, sizeof(buf), fmt, args);
522 va_end(args);
523 printk(KERN_ERR "========================================"
524 "=====================================\n");
525 printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
526 printk(KERN_ERR "----------------------------------------"
527 "-------------------------------------\n\n");
528}
529
530static void slab_fix(struct kmem_cache *s, char *fmt, ...)
531{
532 va_list args;
533 char buf[100];
534
535 va_start(args, fmt);
536 vsnprintf(buf, sizeof(buf), fmt, args);
537 va_end(args);
538 printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
539}
540
541static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
Christoph Lameter81819f02007-05-06 14:49:36 -0700542{
543 unsigned int off; /* Offset of last byte */
Christoph Lametera973e9d2008-03-01 13:40:44 -0800544 u8 *addr = page_address(page);
Christoph Lameter24922682007-07-17 04:03:18 -0700545
546 print_tracking(s, p);
547
548 print_page_info(page);
549
550 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
551 p, p - addr, get_freepointer(s, p));
552
553 if (p > addr + 16)
554 print_section("Bytes b4", p - 16, 16);
555
Pekka Enberg0ebd6522008-07-19 14:17:22 +0300556 print_section("Object", p, min_t(unsigned long, s->objsize, PAGE_SIZE));
Christoph Lameter81819f02007-05-06 14:49:36 -0700557
558 if (s->flags & SLAB_RED_ZONE)
559 print_section("Redzone", p + s->objsize,
560 s->inuse - s->objsize);
561
Christoph Lameter81819f02007-05-06 14:49:36 -0700562 if (s->offset)
563 off = s->offset + sizeof(void *);
564 else
565 off = s->inuse;
566
Christoph Lameter24922682007-07-17 04:03:18 -0700567 if (s->flags & SLAB_STORE_USER)
Christoph Lameter81819f02007-05-06 14:49:36 -0700568 off += 2 * sizeof(struct track);
Christoph Lameter81819f02007-05-06 14:49:36 -0700569
570 if (off != s->size)
571 /* Beginning of the filler is the free pointer */
Christoph Lameter24922682007-07-17 04:03:18 -0700572 print_section("Padding", p + off, s->size - off);
573
574 dump_stack();
Christoph Lameter81819f02007-05-06 14:49:36 -0700575}
576
577static void object_err(struct kmem_cache *s, struct page *page,
578 u8 *object, char *reason)
579{
Christoph Lameter3dc50632008-04-23 12:28:01 -0700580 slab_bug(s, "%s", reason);
Christoph Lameter24922682007-07-17 04:03:18 -0700581 print_trailer(s, page, object);
Christoph Lameter81819f02007-05-06 14:49:36 -0700582}
583
Christoph Lameter24922682007-07-17 04:03:18 -0700584static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
Christoph Lameter81819f02007-05-06 14:49:36 -0700585{
586 va_list args;
587 char buf[100];
588
Christoph Lameter24922682007-07-17 04:03:18 -0700589 va_start(args, fmt);
590 vsnprintf(buf, sizeof(buf), fmt, args);
Christoph Lameter81819f02007-05-06 14:49:36 -0700591 va_end(args);
Christoph Lameter3dc50632008-04-23 12:28:01 -0700592 slab_bug(s, "%s", buf);
Christoph Lameter24922682007-07-17 04:03:18 -0700593 print_page_info(page);
Christoph Lameter81819f02007-05-06 14:49:36 -0700594 dump_stack();
595}
596
Christoph Lameterf7cb1932010-09-29 07:15:01 -0500597static void init_object(struct kmem_cache *s, void *object, u8 val)
Christoph Lameter81819f02007-05-06 14:49:36 -0700598{
599 u8 *p = object;
600
601 if (s->flags & __OBJECT_POISON) {
602 memset(p, POISON_FREE, s->objsize - 1);
Pekka Enberg06428782008-01-07 23:20:27 -0800603 p[s->objsize - 1] = POISON_END;
Christoph Lameter81819f02007-05-06 14:49:36 -0700604 }
605
606 if (s->flags & SLAB_RED_ZONE)
Christoph Lameterf7cb1932010-09-29 07:15:01 -0500607 memset(p + s->objsize, val, s->inuse - s->objsize);
Christoph Lameter81819f02007-05-06 14:49:36 -0700608}
609
Christoph Lameter24922682007-07-17 04:03:18 -0700610static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
Christoph Lameter81819f02007-05-06 14:49:36 -0700611{
612 while (bytes) {
613 if (*start != (u8)value)
Christoph Lameter24922682007-07-17 04:03:18 -0700614 return start;
Christoph Lameter81819f02007-05-06 14:49:36 -0700615 start++;
616 bytes--;
617 }
Christoph Lameter24922682007-07-17 04:03:18 -0700618 return NULL;
619}
620
621static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
622 void *from, void *to)
623{
624 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
625 memset(from, data, to - from);
626}
627
628static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
629 u8 *object, char *what,
Pekka Enberg06428782008-01-07 23:20:27 -0800630 u8 *start, unsigned int value, unsigned int bytes)
Christoph Lameter24922682007-07-17 04:03:18 -0700631{
632 u8 *fault;
633 u8 *end;
634
635 fault = check_bytes(start, value, bytes);
636 if (!fault)
637 return 1;
638
639 end = start + bytes;
640 while (end > fault && end[-1] == value)
641 end--;
642
643 slab_bug(s, "%s overwritten", what);
644 printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
645 fault, end - 1, fault[0], value);
646 print_trailer(s, page, object);
647
648 restore_bytes(s, what, value, fault, end);
649 return 0;
Christoph Lameter81819f02007-05-06 14:49:36 -0700650}
651
Christoph Lameter81819f02007-05-06 14:49:36 -0700652/*
653 * Object layout:
654 *
655 * object address
656 * Bytes of the object to be managed.
657 * If the freepointer may overlay the object then the free
658 * pointer is the first word of the object.
Christoph Lameter672bba32007-05-09 02:32:39 -0700659 *
Christoph Lameter81819f02007-05-06 14:49:36 -0700660 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
661 * 0xa5 (POISON_END)
662 *
663 * object + s->objsize
664 * Padding to reach word boundary. This is also used for Redzoning.
Christoph Lameter672bba32007-05-09 02:32:39 -0700665 * Padding is extended by another word if Redzoning is enabled and
666 * objsize == inuse.
667 *
Christoph Lameter81819f02007-05-06 14:49:36 -0700668 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
669 * 0xcc (RED_ACTIVE) for objects in use.
670 *
671 * object + s->inuse
Christoph Lameter672bba32007-05-09 02:32:39 -0700672 * Meta data starts here.
673 *
Christoph Lameter81819f02007-05-06 14:49:36 -0700674 * A. Free pointer (if we cannot overwrite object on free)
675 * B. Tracking data for SLAB_STORE_USER
Christoph Lameter672bba32007-05-09 02:32:39 -0700676 * C. Padding to reach required alignment boundary or at mininum
Christoph Lameter6446faa2008-02-15 23:45:26 -0800677 * one word if debugging is on to be able to detect writes
Christoph Lameter672bba32007-05-09 02:32:39 -0700678 * before the word boundary.
679 *
680 * Padding is done using 0x5a (POISON_INUSE)
Christoph Lameter81819f02007-05-06 14:49:36 -0700681 *
682 * object + s->size
Christoph Lameter672bba32007-05-09 02:32:39 -0700683 * Nothing is used beyond s->size.
Christoph Lameter81819f02007-05-06 14:49:36 -0700684 *
Christoph Lameter672bba32007-05-09 02:32:39 -0700685 * If slabcaches are merged then the objsize and inuse boundaries are mostly
686 * ignored. And therefore no slab options that rely on these boundaries
Christoph Lameter81819f02007-05-06 14:49:36 -0700687 * may be used with merged slabcaches.
688 */
689
Christoph Lameter81819f02007-05-06 14:49:36 -0700690static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
691{
692 unsigned long off = s->inuse; /* The end of info */
693
694 if (s->offset)
695 /* Freepointer is placed after the object. */
696 off += sizeof(void *);
697
698 if (s->flags & SLAB_STORE_USER)
699 /* We also have user information there */
700 off += 2 * sizeof(struct track);
701
702 if (s->size == off)
703 return 1;
704
Christoph Lameter24922682007-07-17 04:03:18 -0700705 return check_bytes_and_report(s, page, p, "Object padding",
706 p + off, POISON_INUSE, s->size - off);
Christoph Lameter81819f02007-05-06 14:49:36 -0700707}
708
Christoph Lameter39b26462008-04-14 19:11:30 +0300709/* Check the pad bytes at the end of a slab page */
Christoph Lameter81819f02007-05-06 14:49:36 -0700710static int slab_pad_check(struct kmem_cache *s, struct page *page)
711{
Christoph Lameter24922682007-07-17 04:03:18 -0700712 u8 *start;
713 u8 *fault;
714 u8 *end;
715 int length;
716 int remainder;
Christoph Lameter81819f02007-05-06 14:49:36 -0700717
718 if (!(s->flags & SLAB_POISON))
719 return 1;
720
Christoph Lametera973e9d2008-03-01 13:40:44 -0800721 start = page_address(page);
Lai Jiangshanab9a0f12011-03-10 15:21:48 +0800722 length = (PAGE_SIZE << compound_order(page)) - s->reserved;
Christoph Lameter39b26462008-04-14 19:11:30 +0300723 end = start + length;
724 remainder = length % s->size;
Christoph Lameter81819f02007-05-06 14:49:36 -0700725 if (!remainder)
726 return 1;
727
Christoph Lameter39b26462008-04-14 19:11:30 +0300728 fault = check_bytes(end - remainder, POISON_INUSE, remainder);
Christoph Lameter24922682007-07-17 04:03:18 -0700729 if (!fault)
730 return 1;
731 while (end > fault && end[-1] == POISON_INUSE)
732 end--;
733
734 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
Christoph Lameter39b26462008-04-14 19:11:30 +0300735 print_section("Padding", end - remainder, remainder);
Christoph Lameter24922682007-07-17 04:03:18 -0700736
Eric Dumazet8a3d2712009-09-03 16:08:06 +0200737 restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
Christoph Lameter24922682007-07-17 04:03:18 -0700738 return 0;
Christoph Lameter81819f02007-05-06 14:49:36 -0700739}
740
741static int check_object(struct kmem_cache *s, struct page *page,
Christoph Lameterf7cb1932010-09-29 07:15:01 -0500742 void *object, u8 val)
Christoph Lameter81819f02007-05-06 14:49:36 -0700743{
744 u8 *p = object;
745 u8 *endobject = object + s->objsize;
746
747 if (s->flags & SLAB_RED_ZONE) {
Christoph Lameter24922682007-07-17 04:03:18 -0700748 if (!check_bytes_and_report(s, page, object, "Redzone",
Christoph Lameterf7cb1932010-09-29 07:15:01 -0500749 endobject, val, s->inuse - s->objsize))
Christoph Lameter81819f02007-05-06 14:49:36 -0700750 return 0;
Christoph Lameter81819f02007-05-06 14:49:36 -0700751 } else {
Ingo Molnar3adbefe2008-02-05 17:57:39 -0800752 if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
753 check_bytes_and_report(s, page, p, "Alignment padding",
754 endobject, POISON_INUSE, s->inuse - s->objsize);
755 }
Christoph Lameter81819f02007-05-06 14:49:36 -0700756 }
757
758 if (s->flags & SLAB_POISON) {
Christoph Lameterf7cb1932010-09-29 07:15:01 -0500759 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
Christoph Lameter24922682007-07-17 04:03:18 -0700760 (!check_bytes_and_report(s, page, p, "Poison", p,
761 POISON_FREE, s->objsize - 1) ||
762 !check_bytes_and_report(s, page, p, "Poison",
Pekka Enberg06428782008-01-07 23:20:27 -0800763 p + s->objsize - 1, POISON_END, 1)))
Christoph Lameter81819f02007-05-06 14:49:36 -0700764 return 0;
Christoph Lameter81819f02007-05-06 14:49:36 -0700765 /*
766 * check_pad_bytes cleans up on its own.
767 */
768 check_pad_bytes(s, page, p);
769 }
770
Christoph Lameterf7cb1932010-09-29 07:15:01 -0500771 if (!s->offset && val == SLUB_RED_ACTIVE)
Christoph Lameter81819f02007-05-06 14:49:36 -0700772 /*
773 * Object and freepointer overlap. Cannot check
774 * freepointer while object is allocated.
775 */
776 return 1;
777
778 /* Check free pointer validity */
779 if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
780 object_err(s, page, p, "Freepointer corrupt");
781 /*
Nick Andrew9f6c708e2008-12-05 14:08:08 +1100782 * No choice but to zap it and thus lose the remainder
Christoph Lameter81819f02007-05-06 14:49:36 -0700783 * of the free objects in this slab. May cause
Christoph Lameter672bba32007-05-09 02:32:39 -0700784 * another error because the object count is now wrong.
Christoph Lameter81819f02007-05-06 14:49:36 -0700785 */
Christoph Lametera973e9d2008-03-01 13:40:44 -0800786 set_freepointer(s, p, NULL);
Christoph Lameter81819f02007-05-06 14:49:36 -0700787 return 0;
788 }
789 return 1;
790}
791
792static int check_slab(struct kmem_cache *s, struct page *page)
793{
Christoph Lameter39b26462008-04-14 19:11:30 +0300794 int maxobj;
795
Christoph Lameter81819f02007-05-06 14:49:36 -0700796 VM_BUG_ON(!irqs_disabled());
797
798 if (!PageSlab(page)) {
Christoph Lameter24922682007-07-17 04:03:18 -0700799 slab_err(s, page, "Not a valid slab page");
Christoph Lameter81819f02007-05-06 14:49:36 -0700800 return 0;
801 }
Christoph Lameter39b26462008-04-14 19:11:30 +0300802
Lai Jiangshanab9a0f12011-03-10 15:21:48 +0800803 maxobj = order_objects(compound_order(page), s->size, s->reserved);
Christoph Lameter39b26462008-04-14 19:11:30 +0300804 if (page->objects > maxobj) {
805 slab_err(s, page, "objects %u > max %u",
806 s->name, page->objects, maxobj);
807 return 0;
808 }
809 if (page->inuse > page->objects) {
Christoph Lameter24922682007-07-17 04:03:18 -0700810 slab_err(s, page, "inuse %u > max %u",
Christoph Lameter39b26462008-04-14 19:11:30 +0300811 s->name, page->inuse, page->objects);
Christoph Lameter81819f02007-05-06 14:49:36 -0700812 return 0;
813 }
814 /* Slab_pad_check fixes things up after itself */
815 slab_pad_check(s, page);
816 return 1;
817}
818
819/*
Christoph Lameter672bba32007-05-09 02:32:39 -0700820 * Determine if a certain object on a page is on the freelist. Must hold the
821 * slab lock to guarantee that the chains are in a consistent state.
Christoph Lameter81819f02007-05-06 14:49:36 -0700822 */
823static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
824{
825 int nr = 0;
Christoph Lameter881db7f2011-06-01 12:25:53 -0500826 void *fp;
Christoph Lameter81819f02007-05-06 14:49:36 -0700827 void *object = NULL;
Christoph Lameter224a88b2008-04-14 19:11:31 +0300828 unsigned long max_objects;
Christoph Lameter81819f02007-05-06 14:49:36 -0700829
Christoph Lameter881db7f2011-06-01 12:25:53 -0500830 fp = page->freelist;
Christoph Lameter39b26462008-04-14 19:11:30 +0300831 while (fp && nr <= page->objects) {
Christoph Lameter81819f02007-05-06 14:49:36 -0700832 if (fp == search)
833 return 1;
834 if (!check_valid_pointer(s, page, fp)) {
835 if (object) {
836 object_err(s, page, object,
837 "Freechain corrupt");
Christoph Lametera973e9d2008-03-01 13:40:44 -0800838 set_freepointer(s, object, NULL);
Christoph Lameter81819f02007-05-06 14:49:36 -0700839 break;
840 } else {
Christoph Lameter24922682007-07-17 04:03:18 -0700841 slab_err(s, page, "Freepointer corrupt");
Christoph Lametera973e9d2008-03-01 13:40:44 -0800842 page->freelist = NULL;
Christoph Lameter39b26462008-04-14 19:11:30 +0300843 page->inuse = page->objects;
Christoph Lameter24922682007-07-17 04:03:18 -0700844 slab_fix(s, "Freelist cleared");
Christoph Lameter81819f02007-05-06 14:49:36 -0700845 return 0;
846 }
847 break;
848 }
849 object = fp;
850 fp = get_freepointer(s, object);
851 nr++;
852 }
853
Lai Jiangshanab9a0f12011-03-10 15:21:48 +0800854 max_objects = order_objects(compound_order(page), s->size, s->reserved);
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +0400855 if (max_objects > MAX_OBJS_PER_PAGE)
856 max_objects = MAX_OBJS_PER_PAGE;
Christoph Lameter224a88b2008-04-14 19:11:31 +0300857
858 if (page->objects != max_objects) {
859 slab_err(s, page, "Wrong number of objects. Found %d but "
860 "should be %d", page->objects, max_objects);
861 page->objects = max_objects;
862 slab_fix(s, "Number of objects adjusted.");
863 }
Christoph Lameter39b26462008-04-14 19:11:30 +0300864 if (page->inuse != page->objects - nr) {
Christoph Lameter70d71222007-05-06 14:49:47 -0700865 slab_err(s, page, "Wrong object count. Counter is %d but "
Christoph Lameter39b26462008-04-14 19:11:30 +0300866 "counted were %d", page->inuse, page->objects - nr);
867 page->inuse = page->objects - nr;
Christoph Lameter24922682007-07-17 04:03:18 -0700868 slab_fix(s, "Object count adjusted.");
Christoph Lameter81819f02007-05-06 14:49:36 -0700869 }
870 return search == NULL;
871}
872
Christoph Lameter0121c6192008-04-29 16:11:12 -0700873static void trace(struct kmem_cache *s, struct page *page, void *object,
874 int alloc)
Christoph Lameter3ec09742007-05-16 22:11:00 -0700875{
876 if (s->flags & SLAB_TRACE) {
877 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
878 s->name,
879 alloc ? "alloc" : "free",
880 object, page->inuse,
881 page->freelist);
882
883 if (!alloc)
884 print_section("Object", (void *)object, s->objsize);
885
886 dump_stack();
887 }
888}
889
Christoph Lameter643b1132007-05-06 14:49:42 -0700890/*
Christoph Lameterc016b0b2010-08-20 12:37:16 -0500891 * Hooks for other subsystems that check memory allocations. In a typical
892 * production configuration these hooks all should produce no code at all.
893 */
894static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
895{
Christoph Lameterc1d50832010-08-20 12:37:17 -0500896 flags &= gfp_allowed_mask;
Christoph Lameterc016b0b2010-08-20 12:37:16 -0500897 lockdep_trace_alloc(flags);
898 might_sleep_if(flags & __GFP_WAIT);
899
900 return should_failslab(s->objsize, flags, s->flags);
901}
902
903static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
904{
Christoph Lameterc1d50832010-08-20 12:37:17 -0500905 flags &= gfp_allowed_mask;
Eric Dumazetb3d41882011-02-14 18:35:22 +0100906 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
Christoph Lameterc016b0b2010-08-20 12:37:16 -0500907 kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags);
908}
909
910static inline void slab_free_hook(struct kmem_cache *s, void *x)
911{
912 kmemleak_free_recursive(x, s->flags);
Christoph Lameterc016b0b2010-08-20 12:37:16 -0500913
Christoph Lameterd3f661d2011-02-25 11:38:52 -0600914 /*
915 * Trouble is that we may no longer disable interupts in the fast path
916 * So in order to make the debug calls that expect irqs to be
917 * disabled we need to disable interrupts temporarily.
918 */
919#if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP)
920 {
921 unsigned long flags;
922
923 local_irq_save(flags);
924 kmemcheck_slab_free(s, x, s->objsize);
925 debug_check_no_locks_freed(x, s->objsize);
Christoph Lameterd3f661d2011-02-25 11:38:52 -0600926 local_irq_restore(flags);
927 }
928#endif
Thomas Gleixnerf9b615d2011-03-24 21:26:46 +0200929 if (!(s->flags & SLAB_DEBUG_OBJECTS))
930 debug_check_no_obj_freed(x, s->objsize);
Christoph Lameterc016b0b2010-08-20 12:37:16 -0500931}
932
933/*
Christoph Lameter672bba32007-05-09 02:32:39 -0700934 * Tracking of fully allocated slabs for debugging purposes.
Christoph Lameter5cc6eee2011-06-01 12:25:50 -0500935 *
936 * list_lock must be held.
Christoph Lameter643b1132007-05-06 14:49:42 -0700937 */
Christoph Lameter5cc6eee2011-06-01 12:25:50 -0500938static void add_full(struct kmem_cache *s,
939 struct kmem_cache_node *n, struct page *page)
Christoph Lameter643b1132007-05-06 14:49:42 -0700940{
Christoph Lameter643b1132007-05-06 14:49:42 -0700941 if (!(s->flags & SLAB_STORE_USER))
942 return;
943
Christoph Lameter5cc6eee2011-06-01 12:25:50 -0500944 list_add(&page->lru, &n->full);
945}
Christoph Lameter643b1132007-05-06 14:49:42 -0700946
Christoph Lameter5cc6eee2011-06-01 12:25:50 -0500947/*
948 * list_lock must be held.
949 */
950static void remove_full(struct kmem_cache *s, struct page *page)
951{
952 if (!(s->flags & SLAB_STORE_USER))
953 return;
954
Christoph Lameter643b1132007-05-06 14:49:42 -0700955 list_del(&page->lru);
Christoph Lameter643b1132007-05-06 14:49:42 -0700956}
957
Christoph Lameter0f389ec2008-04-14 18:53:02 +0300958/* Tracking of the number of slabs for debugging purposes */
959static inline unsigned long slabs_node(struct kmem_cache *s, int node)
960{
961 struct kmem_cache_node *n = get_node(s, node);
962
963 return atomic_long_read(&n->nr_slabs);
964}
965
Alexander Beregalov26c02cf2009-06-11 14:08:48 +0400966static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
967{
968 return atomic_long_read(&n->nr_slabs);
969}
970
Christoph Lameter205ab992008-04-14 19:11:40 +0300971static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
Christoph Lameter0f389ec2008-04-14 18:53:02 +0300972{
973 struct kmem_cache_node *n = get_node(s, node);
974
975 /*
976 * May be called early in order to allocate a slab for the
977 * kmem_cache_node structure. Solve the chicken-egg
978 * dilemma by deferring the increment of the count during
979 * bootstrap (see early_kmem_cache_node_alloc).
980 */
Christoph Lameter7340cc82010-09-28 08:10:26 -0500981 if (n) {
Christoph Lameter0f389ec2008-04-14 18:53:02 +0300982 atomic_long_inc(&n->nr_slabs);
Christoph Lameter205ab992008-04-14 19:11:40 +0300983 atomic_long_add(objects, &n->total_objects);
984 }
Christoph Lameter0f389ec2008-04-14 18:53:02 +0300985}
Christoph Lameter205ab992008-04-14 19:11:40 +0300986static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
Christoph Lameter0f389ec2008-04-14 18:53:02 +0300987{
988 struct kmem_cache_node *n = get_node(s, node);
989
990 atomic_long_dec(&n->nr_slabs);
Christoph Lameter205ab992008-04-14 19:11:40 +0300991 atomic_long_sub(objects, &n->total_objects);
Christoph Lameter0f389ec2008-04-14 18:53:02 +0300992}
993
994/* Object debug checks for alloc/free paths */
Christoph Lameter3ec09742007-05-16 22:11:00 -0700995static void setup_object_debug(struct kmem_cache *s, struct page *page,
996 void *object)
997{
998 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
999 return;
1000
Christoph Lameterf7cb1932010-09-29 07:15:01 -05001001 init_object(s, object, SLUB_RED_INACTIVE);
Christoph Lameter3ec09742007-05-16 22:11:00 -07001002 init_tracking(s, object);
1003}
1004
Christoph Lameter15370662010-08-20 12:37:12 -05001005static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03001006 void *object, unsigned long addr)
Christoph Lameter81819f02007-05-06 14:49:36 -07001007{
1008 if (!check_slab(s, page))
1009 goto bad;
1010
Christoph Lameter81819f02007-05-06 14:49:36 -07001011 if (!check_valid_pointer(s, page, object)) {
1012 object_err(s, page, object, "Freelist Pointer check fails");
Christoph Lameter70d71222007-05-06 14:49:47 -07001013 goto bad;
Christoph Lameter81819f02007-05-06 14:49:36 -07001014 }
1015
Christoph Lameterf7cb1932010-09-29 07:15:01 -05001016 if (!check_object(s, page, object, SLUB_RED_INACTIVE))
Christoph Lameter81819f02007-05-06 14:49:36 -07001017 goto bad;
Christoph Lameter81819f02007-05-06 14:49:36 -07001018
Christoph Lameter3ec09742007-05-16 22:11:00 -07001019 /* Success perform special debug activities for allocs */
1020 if (s->flags & SLAB_STORE_USER)
1021 set_track(s, object, TRACK_ALLOC, addr);
1022 trace(s, page, object, 1);
Christoph Lameterf7cb1932010-09-29 07:15:01 -05001023 init_object(s, object, SLUB_RED_ACTIVE);
Christoph Lameter81819f02007-05-06 14:49:36 -07001024 return 1;
Christoph Lameter3ec09742007-05-16 22:11:00 -07001025
Christoph Lameter81819f02007-05-06 14:49:36 -07001026bad:
1027 if (PageSlab(page)) {
1028 /*
1029 * If this is a slab page then lets do the best we can
1030 * to avoid issues in the future. Marking all objects
Christoph Lameter672bba32007-05-09 02:32:39 -07001031 * as used avoids touching the remaining objects.
Christoph Lameter81819f02007-05-06 14:49:36 -07001032 */
Christoph Lameter24922682007-07-17 04:03:18 -07001033 slab_fix(s, "Marking all objects used");
Christoph Lameter39b26462008-04-14 19:11:30 +03001034 page->inuse = page->objects;
Christoph Lametera973e9d2008-03-01 13:40:44 -08001035 page->freelist = NULL;
Christoph Lameter81819f02007-05-06 14:49:36 -07001036 }
1037 return 0;
1038}
1039
Christoph Lameter15370662010-08-20 12:37:12 -05001040static noinline int free_debug_processing(struct kmem_cache *s,
1041 struct page *page, void *object, unsigned long addr)
Christoph Lameter81819f02007-05-06 14:49:36 -07001042{
Christoph Lameter5c2e4bb2011-06-01 12:25:54 -05001043 unsigned long flags;
1044 int rc = 0;
1045
1046 local_irq_save(flags);
Christoph Lameter881db7f2011-06-01 12:25:53 -05001047 slab_lock(page);
1048
Christoph Lameter81819f02007-05-06 14:49:36 -07001049 if (!check_slab(s, page))
1050 goto fail;
1051
1052 if (!check_valid_pointer(s, page, object)) {
Christoph Lameter70d71222007-05-06 14:49:47 -07001053 slab_err(s, page, "Invalid object pointer 0x%p", object);
Christoph Lameter81819f02007-05-06 14:49:36 -07001054 goto fail;
1055 }
1056
1057 if (on_freelist(s, page, object)) {
Christoph Lameter24922682007-07-17 04:03:18 -07001058 object_err(s, page, object, "Object already free");
Christoph Lameter81819f02007-05-06 14:49:36 -07001059 goto fail;
1060 }
1061
Christoph Lameterf7cb1932010-09-29 07:15:01 -05001062 if (!check_object(s, page, object, SLUB_RED_ACTIVE))
Christoph Lameter5c2e4bb2011-06-01 12:25:54 -05001063 goto out;
Christoph Lameter81819f02007-05-06 14:49:36 -07001064
1065 if (unlikely(s != page->slab)) {
Ingo Molnar3adbefe2008-02-05 17:57:39 -08001066 if (!PageSlab(page)) {
Christoph Lameter70d71222007-05-06 14:49:47 -07001067 slab_err(s, page, "Attempt to free object(0x%p) "
1068 "outside of slab", object);
Ingo Molnar3adbefe2008-02-05 17:57:39 -08001069 } else if (!page->slab) {
Christoph Lameter81819f02007-05-06 14:49:36 -07001070 printk(KERN_ERR
Christoph Lameter70d71222007-05-06 14:49:47 -07001071 "SLUB <none>: no slab for object 0x%p.\n",
Christoph Lameter81819f02007-05-06 14:49:36 -07001072 object);
Christoph Lameter70d71222007-05-06 14:49:47 -07001073 dump_stack();
Pekka Enberg06428782008-01-07 23:20:27 -08001074 } else
Christoph Lameter24922682007-07-17 04:03:18 -07001075 object_err(s, page, object,
1076 "page slab pointer corrupt.");
Christoph Lameter81819f02007-05-06 14:49:36 -07001077 goto fail;
1078 }
Christoph Lameter3ec09742007-05-16 22:11:00 -07001079
Christoph Lameter3ec09742007-05-16 22:11:00 -07001080 if (s->flags & SLAB_STORE_USER)
1081 set_track(s, object, TRACK_FREE, addr);
1082 trace(s, page, object, 0);
Christoph Lameterf7cb1932010-09-29 07:15:01 -05001083 init_object(s, object, SLUB_RED_INACTIVE);
Christoph Lameter5c2e4bb2011-06-01 12:25:54 -05001084 rc = 1;
1085out:
Christoph Lameter881db7f2011-06-01 12:25:53 -05001086 slab_unlock(page);
Christoph Lameter5c2e4bb2011-06-01 12:25:54 -05001087 local_irq_restore(flags);
1088 return rc;
Christoph Lameter3ec09742007-05-16 22:11:00 -07001089
Christoph Lameter81819f02007-05-06 14:49:36 -07001090fail:
Christoph Lameter24922682007-07-17 04:03:18 -07001091 slab_fix(s, "Object at 0x%p not freed", object);
Christoph Lameter5c2e4bb2011-06-01 12:25:54 -05001092 goto out;
Christoph Lameter81819f02007-05-06 14:49:36 -07001093}
1094
Christoph Lameter41ecc552007-05-09 02:32:44 -07001095static int __init setup_slub_debug(char *str)
1096{
Christoph Lameterf0630ff2007-07-15 23:38:14 -07001097 slub_debug = DEBUG_DEFAULT_FLAGS;
1098 if (*str++ != '=' || !*str)
1099 /*
1100 * No options specified. Switch on full debugging.
1101 */
1102 goto out;
Christoph Lameter41ecc552007-05-09 02:32:44 -07001103
1104 if (*str == ',')
Christoph Lameterf0630ff2007-07-15 23:38:14 -07001105 /*
1106 * No options but restriction on slabs. This means full
1107 * debugging for slabs matching a pattern.
1108 */
1109 goto check_slabs;
1110
David Rientjesfa5ec8a2009-07-07 00:14:14 -07001111 if (tolower(*str) == 'o') {
1112 /*
1113 * Avoid enabling debugging on caches if its minimum order
1114 * would increase as a result.
1115 */
1116 disable_higher_order_debug = 1;
1117 goto out;
1118 }
1119
Christoph Lameterf0630ff2007-07-15 23:38:14 -07001120 slub_debug = 0;
1121 if (*str == '-')
1122 /*
1123 * Switch off all debugging measures.
1124 */
1125 goto out;
1126
1127 /*
1128 * Determine which debug features should be switched on
1129 */
Pekka Enberg06428782008-01-07 23:20:27 -08001130 for (; *str && *str != ','; str++) {
Christoph Lameterf0630ff2007-07-15 23:38:14 -07001131 switch (tolower(*str)) {
1132 case 'f':
1133 slub_debug |= SLAB_DEBUG_FREE;
1134 break;
1135 case 'z':
1136 slub_debug |= SLAB_RED_ZONE;
1137 break;
1138 case 'p':
1139 slub_debug |= SLAB_POISON;
1140 break;
1141 case 'u':
1142 slub_debug |= SLAB_STORE_USER;
1143 break;
1144 case 't':
1145 slub_debug |= SLAB_TRACE;
1146 break;
Dmitry Monakhov4c13dd32010-02-26 09:36:12 +03001147 case 'a':
1148 slub_debug |= SLAB_FAILSLAB;
1149 break;
Christoph Lameterf0630ff2007-07-15 23:38:14 -07001150 default:
1151 printk(KERN_ERR "slub_debug option '%c' "
Pekka Enberg06428782008-01-07 23:20:27 -08001152 "unknown. skipped\n", *str);
Christoph Lameterf0630ff2007-07-15 23:38:14 -07001153 }
1154 }
1155
1156check_slabs:
1157 if (*str == ',')
Christoph Lameter41ecc552007-05-09 02:32:44 -07001158 slub_debug_slabs = str + 1;
Christoph Lameterf0630ff2007-07-15 23:38:14 -07001159out:
Christoph Lameter41ecc552007-05-09 02:32:44 -07001160 return 1;
1161}
1162
1163__setup("slub_debug", setup_slub_debug);
1164
Christoph Lameterba0268a2007-09-11 15:24:11 -07001165static unsigned long kmem_cache_flags(unsigned long objsize,
1166 unsigned long flags, const char *name,
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07001167 void (*ctor)(void *))
Christoph Lameter41ecc552007-05-09 02:32:44 -07001168{
1169 /*
Christoph Lametere1533622008-02-15 23:45:24 -08001170 * Enable debugging if selected on the kernel commandline.
Christoph Lameter41ecc552007-05-09 02:32:44 -07001171 */
Christoph Lametere1533622008-02-15 23:45:24 -08001172 if (slub_debug && (!slub_debug_slabs ||
David Rientjes3de47212009-07-27 18:30:35 -07001173 !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
1174 flags |= slub_debug;
Christoph Lameterba0268a2007-09-11 15:24:11 -07001175
1176 return flags;
Christoph Lameter41ecc552007-05-09 02:32:44 -07001177}
1178#else
Christoph Lameter3ec09742007-05-16 22:11:00 -07001179static inline void setup_object_debug(struct kmem_cache *s,
1180 struct page *page, void *object) {}
Christoph Lameter41ecc552007-05-09 02:32:44 -07001181
Christoph Lameter3ec09742007-05-16 22:11:00 -07001182static inline int alloc_debug_processing(struct kmem_cache *s,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03001183 struct page *page, void *object, unsigned long addr) { return 0; }
Christoph Lameter41ecc552007-05-09 02:32:44 -07001184
Christoph Lameter3ec09742007-05-16 22:11:00 -07001185static inline int free_debug_processing(struct kmem_cache *s,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03001186 struct page *page, void *object, unsigned long addr) { return 0; }
Christoph Lameter41ecc552007-05-09 02:32:44 -07001187
Christoph Lameter41ecc552007-05-09 02:32:44 -07001188static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1189 { return 1; }
1190static inline int check_object(struct kmem_cache *s, struct page *page,
Christoph Lameterf7cb1932010-09-29 07:15:01 -05001191 void *object, u8 val) { return 1; }
Christoph Lameter5cc6eee2011-06-01 12:25:50 -05001192static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1193 struct page *page) {}
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001194static inline void remove_full(struct kmem_cache *s, struct page *page) {}
Christoph Lameterba0268a2007-09-11 15:24:11 -07001195static inline unsigned long kmem_cache_flags(unsigned long objsize,
1196 unsigned long flags, const char *name,
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07001197 void (*ctor)(void *))
Christoph Lameterba0268a2007-09-11 15:24:11 -07001198{
1199 return flags;
1200}
Christoph Lameter41ecc552007-05-09 02:32:44 -07001201#define slub_debug 0
Christoph Lameter0f389ec2008-04-14 18:53:02 +03001202
Ingo Molnarfdaa45e2009-09-15 11:00:26 +02001203#define disable_higher_order_debug 0
1204
Christoph Lameter0f389ec2008-04-14 18:53:02 +03001205static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1206 { return 0; }
Alexander Beregalov26c02cf2009-06-11 14:08:48 +04001207static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1208 { return 0; }
Christoph Lameter205ab992008-04-14 19:11:40 +03001209static inline void inc_slabs_node(struct kmem_cache *s, int node,
1210 int objects) {}
1211static inline void dec_slabs_node(struct kmem_cache *s, int node,
1212 int objects) {}
Christoph Lameter7d550c52010-08-25 14:07:16 -05001213
1214static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
1215 { return 0; }
1216
1217static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
1218 void *object) {}
1219
1220static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
1221
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05001222#endif /* CONFIG_SLUB_DEBUG */
Christoph Lameter205ab992008-04-14 19:11:40 +03001223
Christoph Lameter81819f02007-05-06 14:49:36 -07001224/*
1225 * Slab allocation and freeing
1226 */
Christoph Lameter65c33762008-04-14 19:11:40 +03001227static inline struct page *alloc_slab_page(gfp_t flags, int node,
1228 struct kmem_cache_order_objects oo)
1229{
1230 int order = oo_order(oo);
1231
Vegard Nossumb1eeab62008-11-25 16:55:53 +01001232 flags |= __GFP_NOTRACK;
1233
Christoph Lameter2154a332010-07-09 14:07:10 -05001234 if (node == NUMA_NO_NODE)
Christoph Lameter65c33762008-04-14 19:11:40 +03001235 return alloc_pages(flags, order);
1236 else
Minchan Kim6b65aaf2010-04-14 23:58:36 +09001237 return alloc_pages_exact_node(node, flags, order);
Christoph Lameter65c33762008-04-14 19:11:40 +03001238}
1239
Christoph Lameter81819f02007-05-06 14:49:36 -07001240static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1241{
Pekka Enberg06428782008-01-07 23:20:27 -08001242 struct page *page;
Christoph Lameter834f3d12008-04-14 19:11:31 +03001243 struct kmem_cache_order_objects oo = s->oo;
Pekka Enbergba522702009-06-24 21:59:51 +03001244 gfp_t alloc_gfp;
Christoph Lameter81819f02007-05-06 14:49:36 -07001245
Christoph Lameter7e0528d2011-06-01 12:25:44 -05001246 flags &= gfp_allowed_mask;
1247
1248 if (flags & __GFP_WAIT)
1249 local_irq_enable();
1250
Christoph Lameterb7a49f02008-02-14 14:21:32 -08001251 flags |= s->allocflags;
Mel Gormane12ba742007-10-16 01:25:52 -07001252
Pekka Enbergba522702009-06-24 21:59:51 +03001253 /*
1254 * Let the initial higher-order allocation fail under memory pressure
1255 * so we fall-back to the minimum order allocation.
1256 */
1257 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1258
1259 page = alloc_slab_page(alloc_gfp, node, oo);
Christoph Lameter65c33762008-04-14 19:11:40 +03001260 if (unlikely(!page)) {
1261 oo = s->min;
1262 /*
1263 * Allocation may have failed due to fragmentation.
1264 * Try a lower order alloc if possible
1265 */
1266 page = alloc_slab_page(flags, node, oo);
Christoph Lameter81819f02007-05-06 14:49:36 -07001267
Christoph Lameter7e0528d2011-06-01 12:25:44 -05001268 if (page)
1269 stat(s, ORDER_FALLBACK);
Christoph Lameter65c33762008-04-14 19:11:40 +03001270 }
Vegard Nossum5a896d92008-04-04 00:54:48 +02001271
Christoph Lameter7e0528d2011-06-01 12:25:44 -05001272 if (flags & __GFP_WAIT)
1273 local_irq_disable();
1274
1275 if (!page)
1276 return NULL;
1277
Vegard Nossum5a896d92008-04-04 00:54:48 +02001278 if (kmemcheck_enabled
Amerigo Wang5086c389c2009-08-19 21:44:13 +03001279 && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
Vegard Nossumb1eeab62008-11-25 16:55:53 +01001280 int pages = 1 << oo_order(oo);
1281
1282 kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
1283
1284 /*
1285 * Objects from caches that have a constructor don't get
1286 * cleared when they're allocated, so we need to do it here.
1287 */
1288 if (s->ctor)
1289 kmemcheck_mark_uninitialized_pages(page, pages);
1290 else
1291 kmemcheck_mark_unallocated_pages(page, pages);
Vegard Nossum5a896d92008-04-04 00:54:48 +02001292 }
1293
Christoph Lameter834f3d12008-04-14 19:11:31 +03001294 page->objects = oo_objects(oo);
Christoph Lameter81819f02007-05-06 14:49:36 -07001295 mod_zone_page_state(page_zone(page),
1296 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1297 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
Christoph Lameter65c33762008-04-14 19:11:40 +03001298 1 << oo_order(oo));
Christoph Lameter81819f02007-05-06 14:49:36 -07001299
1300 return page;
1301}
1302
1303static void setup_object(struct kmem_cache *s, struct page *page,
1304 void *object)
1305{
Christoph Lameter3ec09742007-05-16 22:11:00 -07001306 setup_object_debug(s, page, object);
Christoph Lameter4f104932007-05-06 14:50:17 -07001307 if (unlikely(s->ctor))
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07001308 s->ctor(object);
Christoph Lameter81819f02007-05-06 14:49:36 -07001309}
1310
1311static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1312{
1313 struct page *page;
Christoph Lameter81819f02007-05-06 14:49:36 -07001314 void *start;
Christoph Lameter81819f02007-05-06 14:49:36 -07001315 void *last;
1316 void *p;
1317
Christoph Lameter6cb06222007-10-16 01:25:41 -07001318 BUG_ON(flags & GFP_SLAB_BUG_MASK);
Christoph Lameter81819f02007-05-06 14:49:36 -07001319
Christoph Lameter6cb06222007-10-16 01:25:41 -07001320 page = allocate_slab(s,
1321 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
Christoph Lameter81819f02007-05-06 14:49:36 -07001322 if (!page)
1323 goto out;
1324
Christoph Lameter205ab992008-04-14 19:11:40 +03001325 inc_slabs_node(s, page_to_nid(page), page->objects);
Christoph Lameter81819f02007-05-06 14:49:36 -07001326 page->slab = s;
1327 page->flags |= 1 << PG_slab;
Christoph Lameter81819f02007-05-06 14:49:36 -07001328
1329 start = page_address(page);
Christoph Lameter81819f02007-05-06 14:49:36 -07001330
1331 if (unlikely(s->flags & SLAB_POISON))
Christoph Lameter834f3d12008-04-14 19:11:31 +03001332 memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page));
Christoph Lameter81819f02007-05-06 14:49:36 -07001333
1334 last = start;
Christoph Lameter224a88b2008-04-14 19:11:31 +03001335 for_each_object(p, s, start, page->objects) {
Christoph Lameter81819f02007-05-06 14:49:36 -07001336 setup_object(s, page, last);
1337 set_freepointer(s, last, p);
1338 last = p;
1339 }
1340 setup_object(s, page, last);
Christoph Lametera973e9d2008-03-01 13:40:44 -08001341 set_freepointer(s, last, NULL);
Christoph Lameter81819f02007-05-06 14:49:36 -07001342
1343 page->freelist = start;
1344 page->inuse = 0;
Christoph Lameter8cb0a502011-06-01 12:25:46 -05001345 page->frozen = 1;
Christoph Lameter81819f02007-05-06 14:49:36 -07001346out:
Christoph Lameter81819f02007-05-06 14:49:36 -07001347 return page;
1348}
1349
1350static void __free_slab(struct kmem_cache *s, struct page *page)
1351{
Christoph Lameter834f3d12008-04-14 19:11:31 +03001352 int order = compound_order(page);
1353 int pages = 1 << order;
Christoph Lameter81819f02007-05-06 14:49:36 -07001354
Christoph Lameteraf537b02010-07-09 14:07:14 -05001355 if (kmem_cache_debug(s)) {
Christoph Lameter81819f02007-05-06 14:49:36 -07001356 void *p;
1357
1358 slab_pad_check(s, page);
Christoph Lameter224a88b2008-04-14 19:11:31 +03001359 for_each_object(p, s, page_address(page),
1360 page->objects)
Christoph Lameterf7cb1932010-09-29 07:15:01 -05001361 check_object(s, page, p, SLUB_RED_INACTIVE);
Christoph Lameter81819f02007-05-06 14:49:36 -07001362 }
1363
Vegard Nossumb1eeab62008-11-25 16:55:53 +01001364 kmemcheck_free_shadow(page, compound_order(page));
Vegard Nossum5a896d92008-04-04 00:54:48 +02001365
Christoph Lameter81819f02007-05-06 14:49:36 -07001366 mod_zone_page_state(page_zone(page),
1367 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1368 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
Pekka Enberg06428782008-01-07 23:20:27 -08001369 -pages);
Christoph Lameter81819f02007-05-06 14:49:36 -07001370
Christoph Lameter49bd5222008-04-14 18:52:18 +03001371 __ClearPageSlab(page);
1372 reset_page_mapcount(page);
Nick Piggin1eb5ac62009-05-05 19:13:44 +10001373 if (current->reclaim_state)
1374 current->reclaim_state->reclaimed_slab += pages;
Christoph Lameter834f3d12008-04-14 19:11:31 +03001375 __free_pages(page, order);
Christoph Lameter81819f02007-05-06 14:49:36 -07001376}
1377
Lai Jiangshanda9a6382011-03-10 15:22:00 +08001378#define need_reserve_slab_rcu \
1379 (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
1380
Christoph Lameter81819f02007-05-06 14:49:36 -07001381static void rcu_free_slab(struct rcu_head *h)
1382{
1383 struct page *page;
1384
Lai Jiangshanda9a6382011-03-10 15:22:00 +08001385 if (need_reserve_slab_rcu)
1386 page = virt_to_head_page(h);
1387 else
1388 page = container_of((struct list_head *)h, struct page, lru);
1389
Christoph Lameter81819f02007-05-06 14:49:36 -07001390 __free_slab(page->slab, page);
1391}
1392
1393static void free_slab(struct kmem_cache *s, struct page *page)
1394{
1395 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
Lai Jiangshanda9a6382011-03-10 15:22:00 +08001396 struct rcu_head *head;
1397
1398 if (need_reserve_slab_rcu) {
1399 int order = compound_order(page);
1400 int offset = (PAGE_SIZE << order) - s->reserved;
1401
1402 VM_BUG_ON(s->reserved != sizeof(*head));
1403 head = page_address(page) + offset;
1404 } else {
1405 /*
1406 * RCU free overloads the RCU head over the LRU
1407 */
1408 head = (void *)&page->lru;
1409 }
Christoph Lameter81819f02007-05-06 14:49:36 -07001410
1411 call_rcu(head, rcu_free_slab);
1412 } else
1413 __free_slab(s, page);
1414}
1415
1416static void discard_slab(struct kmem_cache *s, struct page *page)
1417{
Christoph Lameter205ab992008-04-14 19:11:40 +03001418 dec_slabs_node(s, page_to_nid(page), page->objects);
Christoph Lameter81819f02007-05-06 14:49:36 -07001419 free_slab(s, page);
1420}
1421
1422/*
Christoph Lameter5cc6eee2011-06-01 12:25:50 -05001423 * Management of partially allocated slabs.
1424 *
1425 * list_lock must be held.
Christoph Lameter81819f02007-05-06 14:49:36 -07001426 */
Christoph Lameter5cc6eee2011-06-01 12:25:50 -05001427static inline void add_partial(struct kmem_cache_node *n,
Christoph Lameter7c2e1322008-01-07 23:20:27 -08001428 struct page *page, int tail)
Christoph Lameter81819f02007-05-06 14:49:36 -07001429{
Christoph Lametere95eed52007-05-06 14:49:44 -07001430 n->nr_partial++;
Christoph Lameter7c2e1322008-01-07 23:20:27 -08001431 if (tail)
1432 list_add_tail(&page->lru, &n->partial);
1433 else
1434 list_add(&page->lru, &n->partial);
Christoph Lameter81819f02007-05-06 14:49:36 -07001435}
1436
Christoph Lameter5cc6eee2011-06-01 12:25:50 -05001437/*
1438 * list_lock must be held.
1439 */
1440static inline void remove_partial(struct kmem_cache_node *n,
Christoph Lameter62e346a2010-09-28 08:10:28 -05001441 struct page *page)
1442{
1443 list_del(&page->lru);
1444 n->nr_partial--;
1445}
1446
Christoph Lameter81819f02007-05-06 14:49:36 -07001447/*
Christoph Lameter5cc6eee2011-06-01 12:25:50 -05001448 * Lock slab, remove from the partial list and put the object into the
1449 * per cpu freelist.
Christoph Lameter81819f02007-05-06 14:49:36 -07001450 *
Christoph Lameter672bba32007-05-09 02:32:39 -07001451 * Must hold list_lock.
Christoph Lameter81819f02007-05-06 14:49:36 -07001452 */
Christoph Lameter881db7f2011-06-01 12:25:53 -05001453static inline int acquire_slab(struct kmem_cache *s,
Christoph Lameter61728d12011-06-01 12:25:51 -05001454 struct kmem_cache_node *n, struct page *page)
Christoph Lameter81819f02007-05-06 14:49:36 -07001455{
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001456 void *freelist;
1457 unsigned long counters;
1458 struct page new;
1459
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001460 /*
1461 * Zap the freelist and set the frozen bit.
1462 * The old freelist is the list of objects for the
1463 * per cpu allocation list.
1464 */
1465 do {
1466 freelist = page->freelist;
1467 counters = page->counters;
1468 new.counters = counters;
1469 new.inuse = page->objects;
1470
1471 VM_BUG_ON(new.frozen);
1472 new.frozen = 1;
1473
1474 } while (!cmpxchg_double_slab(s, page,
1475 freelist, counters,
1476 NULL, new.counters,
1477 "lock and freeze"));
1478
1479 remove_partial(n, page);
1480
1481 if (freelist) {
1482 /* Populate the per cpu freelist */
1483 this_cpu_write(s->cpu_slab->freelist, freelist);
1484 this_cpu_write(s->cpu_slab->page, page);
1485 this_cpu_write(s->cpu_slab->node, page_to_nid(page));
Christoph Lameter81819f02007-05-06 14:49:36 -07001486 return 1;
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001487 } else {
1488 /*
1489 * Slab page came from the wrong list. No object to allocate
1490 * from. Put it onto the correct list and continue partial
1491 * scan.
1492 */
1493 printk(KERN_ERR "SLUB: %s : Page without available objects on"
1494 " partial list\n", s->name);
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001495 return 0;
Christoph Lameter81819f02007-05-06 14:49:36 -07001496 }
Christoph Lameter81819f02007-05-06 14:49:36 -07001497}
1498
1499/*
Christoph Lameter672bba32007-05-09 02:32:39 -07001500 * Try to allocate a partial slab from a specific node.
Christoph Lameter81819f02007-05-06 14:49:36 -07001501 */
Christoph Lameter61728d12011-06-01 12:25:51 -05001502static struct page *get_partial_node(struct kmem_cache *s,
1503 struct kmem_cache_node *n)
Christoph Lameter81819f02007-05-06 14:49:36 -07001504{
1505 struct page *page;
1506
1507 /*
1508 * Racy check. If we mistakenly see no partial slabs then we
1509 * just allocate an empty slab. If we mistakenly try to get a
Christoph Lameter672bba32007-05-09 02:32:39 -07001510 * partial slab and there is none available then get_partials()
1511 * will return NULL.
Christoph Lameter81819f02007-05-06 14:49:36 -07001512 */
1513 if (!n || !n->nr_partial)
1514 return NULL;
1515
1516 spin_lock(&n->list_lock);
1517 list_for_each_entry(page, &n->partial, lru)
Christoph Lameter881db7f2011-06-01 12:25:53 -05001518 if (acquire_slab(s, n, page))
Christoph Lameter81819f02007-05-06 14:49:36 -07001519 goto out;
1520 page = NULL;
1521out:
1522 spin_unlock(&n->list_lock);
1523 return page;
1524}
1525
1526/*
Christoph Lameter672bba32007-05-09 02:32:39 -07001527 * Get a page from somewhere. Search in increasing NUMA distances.
Christoph Lameter81819f02007-05-06 14:49:36 -07001528 */
1529static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1530{
1531#ifdef CONFIG_NUMA
1532 struct zonelist *zonelist;
Mel Gormandd1a2392008-04-28 02:12:17 -07001533 struct zoneref *z;
Mel Gorman54a6eb52008-04-28 02:12:16 -07001534 struct zone *zone;
1535 enum zone_type high_zoneidx = gfp_zone(flags);
Christoph Lameter81819f02007-05-06 14:49:36 -07001536 struct page *page;
1537
1538 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07001539 * The defrag ratio allows a configuration of the tradeoffs between
1540 * inter node defragmentation and node local allocations. A lower
1541 * defrag_ratio increases the tendency to do local allocations
1542 * instead of attempting to obtain partial slabs from other nodes.
Christoph Lameter81819f02007-05-06 14:49:36 -07001543 *
Christoph Lameter672bba32007-05-09 02:32:39 -07001544 * If the defrag_ratio is set to 0 then kmalloc() always
1545 * returns node local objects. If the ratio is higher then kmalloc()
1546 * may return off node objects because partial slabs are obtained
1547 * from other nodes and filled up.
Christoph Lameter81819f02007-05-06 14:49:36 -07001548 *
Christoph Lameter6446faa2008-02-15 23:45:26 -08001549 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
Christoph Lameter672bba32007-05-09 02:32:39 -07001550 * defrag_ratio = 1000) then every (well almost) allocation will
1551 * first attempt to defrag slab caches on other nodes. This means
1552 * scanning over all nodes to look for partial slabs which may be
1553 * expensive if we do it every time we are trying to find a slab
1554 * with available objects.
Christoph Lameter81819f02007-05-06 14:49:36 -07001555 */
Christoph Lameter98246012008-01-07 23:20:26 -08001556 if (!s->remote_node_defrag_ratio ||
1557 get_cycles() % 1024 > s->remote_node_defrag_ratio)
Christoph Lameter81819f02007-05-06 14:49:36 -07001558 return NULL;
1559
Miao Xiec0ff7452010-05-24 14:32:08 -07001560 get_mems_allowed();
Mel Gorman0e884602008-04-28 02:12:14 -07001561 zonelist = node_zonelist(slab_node(current->mempolicy), flags);
Mel Gorman54a6eb52008-04-28 02:12:16 -07001562 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
Christoph Lameter81819f02007-05-06 14:49:36 -07001563 struct kmem_cache_node *n;
1564
Mel Gorman54a6eb52008-04-28 02:12:16 -07001565 n = get_node(s, zone_to_nid(zone));
Christoph Lameter81819f02007-05-06 14:49:36 -07001566
Mel Gorman54a6eb52008-04-28 02:12:16 -07001567 if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
David Rientjes3b89d7d2009-02-22 17:40:07 -08001568 n->nr_partial > s->min_partial) {
Christoph Lameter61728d12011-06-01 12:25:51 -05001569 page = get_partial_node(s, n);
Miao Xiec0ff7452010-05-24 14:32:08 -07001570 if (page) {
1571 put_mems_allowed();
Christoph Lameter81819f02007-05-06 14:49:36 -07001572 return page;
Miao Xiec0ff7452010-05-24 14:32:08 -07001573 }
Christoph Lameter81819f02007-05-06 14:49:36 -07001574 }
1575 }
Miao Xiec0ff7452010-05-24 14:32:08 -07001576 put_mems_allowed();
Christoph Lameter81819f02007-05-06 14:49:36 -07001577#endif
1578 return NULL;
1579}
1580
1581/*
1582 * Get a partial page, lock it and return it.
1583 */
1584static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
1585{
1586 struct page *page;
Christoph Lameter2154a332010-07-09 14:07:10 -05001587 int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
Christoph Lameter81819f02007-05-06 14:49:36 -07001588
Christoph Lameter61728d12011-06-01 12:25:51 -05001589 page = get_partial_node(s, get_node(s, searchnode));
Christoph Lameter33de04e2011-04-15 14:48:12 -05001590 if (page || node != NUMA_NO_NODE)
Christoph Lameter81819f02007-05-06 14:49:36 -07001591 return page;
1592
1593 return get_any_partial(s, flags);
1594}
1595
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06001596#ifdef CONFIG_PREEMPT
1597/*
1598 * Calculate the next globally unique transaction for disambiguiation
1599 * during cmpxchg. The transactions start with the cpu number and are then
1600 * incremented by CONFIG_NR_CPUS.
1601 */
1602#define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS)
1603#else
1604/*
1605 * No preemption supported therefore also no need to check for
1606 * different cpus.
1607 */
1608#define TID_STEP 1
1609#endif
1610
1611static inline unsigned long next_tid(unsigned long tid)
1612{
1613 return tid + TID_STEP;
1614}
1615
1616static inline unsigned int tid_to_cpu(unsigned long tid)
1617{
1618 return tid % TID_STEP;
1619}
1620
1621static inline unsigned long tid_to_event(unsigned long tid)
1622{
1623 return tid / TID_STEP;
1624}
1625
1626static inline unsigned int init_tid(int cpu)
1627{
1628 return cpu;
1629}
1630
1631static inline void note_cmpxchg_failure(const char *n,
1632 const struct kmem_cache *s, unsigned long tid)
1633{
1634#ifdef SLUB_DEBUG_CMPXCHG
1635 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
1636
1637 printk(KERN_INFO "%s %s: cmpxchg redo ", n, s->name);
1638
1639#ifdef CONFIG_PREEMPT
1640 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
1641 printk("due to cpu change %d -> %d\n",
1642 tid_to_cpu(tid), tid_to_cpu(actual_tid));
1643 else
1644#endif
1645 if (tid_to_event(tid) != tid_to_event(actual_tid))
1646 printk("due to cpu running other code. Event %ld->%ld\n",
1647 tid_to_event(tid), tid_to_event(actual_tid));
1648 else
1649 printk("for unknown reason: actual=%lx was=%lx target=%lx\n",
1650 actual_tid, tid, next_tid(tid));
1651#endif
Christoph Lameter4fdccdf2011-03-22 13:35:00 -05001652 stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06001653}
1654
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06001655void init_kmem_cache_cpus(struct kmem_cache *s)
1656{
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06001657 int cpu;
1658
1659 for_each_possible_cpu(cpu)
1660 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06001661}
Christoph Lameter81819f02007-05-06 14:49:36 -07001662/*
1663 * Remove the cpu slab
1664 */
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001665
1666/*
1667 * Remove the cpu slab
1668 */
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001669static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
Christoph Lameter81819f02007-05-06 14:49:36 -07001670{
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001671 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001672 struct page *page = c->page;
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001673 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1674 int lock = 0;
1675 enum slab_modes l = M_NONE, m = M_NONE;
1676 void *freelist;
1677 void *nextfree;
1678 int tail = 0;
1679 struct page new;
1680 struct page old;
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08001681
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001682 if (page->freelist) {
Christoph Lameter84e554e62009-12-18 16:26:23 -06001683 stat(s, DEACTIVATE_REMOTE_FREES);
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001684 tail = 1;
Christoph Lameter894b8782007-05-10 03:15:16 -07001685 }
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001686
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06001687 c->tid = next_tid(c->tid);
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001688 c->page = NULL;
1689 freelist = c->freelist;
1690 c->freelist = NULL;
1691
1692 /*
1693 * Stage one: Free all available per cpu objects back
1694 * to the page freelist while it is still frozen. Leave the
1695 * last one.
1696 *
1697 * There is no need to take the list->lock because the page
1698 * is still frozen.
1699 */
1700 while (freelist && (nextfree = get_freepointer(s, freelist))) {
1701 void *prior;
1702 unsigned long counters;
1703
1704 do {
1705 prior = page->freelist;
1706 counters = page->counters;
1707 set_freepointer(s, freelist, prior);
1708 new.counters = counters;
1709 new.inuse--;
1710 VM_BUG_ON(!new.frozen);
1711
1712 } while (!cmpxchg_double_slab(s, page,
1713 prior, counters,
1714 freelist, new.counters,
1715 "drain percpu freelist"));
1716
1717 freelist = nextfree;
1718 }
1719
1720 /*
1721 * Stage two: Ensure that the page is unfrozen while the
1722 * list presence reflects the actual number of objects
1723 * during unfreeze.
1724 *
1725 * We setup the list membership and then perform a cmpxchg
1726 * with the count. If there is a mismatch then the page
1727 * is not unfrozen but the page is on the wrong list.
1728 *
1729 * Then we restart the process which may have to remove
1730 * the page from the list that we just put it on again
1731 * because the number of objects in the slab may have
1732 * changed.
1733 */
1734redo:
1735
1736 old.freelist = page->freelist;
1737 old.counters = page->counters;
1738 VM_BUG_ON(!old.frozen);
1739
1740 /* Determine target state of the slab */
1741 new.counters = old.counters;
1742 if (freelist) {
1743 new.inuse--;
1744 set_freepointer(s, freelist, old.freelist);
1745 new.freelist = freelist;
1746 } else
1747 new.freelist = old.freelist;
1748
1749 new.frozen = 0;
1750
1751 if (!new.inuse && n->nr_partial < s->min_partial)
1752 m = M_FREE;
1753 else if (new.freelist) {
1754 m = M_PARTIAL;
1755 if (!lock) {
1756 lock = 1;
1757 /*
1758 * Taking the spinlock removes the possiblity
1759 * that acquire_slab() will see a slab page that
1760 * is frozen
1761 */
1762 spin_lock(&n->list_lock);
1763 }
1764 } else {
1765 m = M_FULL;
1766 if (kmem_cache_debug(s) && !lock) {
1767 lock = 1;
1768 /*
1769 * This also ensures that the scanning of full
1770 * slabs from diagnostic functions will not see
1771 * any frozen slabs.
1772 */
1773 spin_lock(&n->list_lock);
1774 }
1775 }
1776
1777 if (l != m) {
1778
1779 if (l == M_PARTIAL)
1780
1781 remove_partial(n, page);
1782
1783 else if (l == M_FULL)
1784
1785 remove_full(s, page);
1786
1787 if (m == M_PARTIAL) {
1788
1789 add_partial(n, page, tail);
1790 stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
1791
1792 } else if (m == M_FULL) {
1793
1794 stat(s, DEACTIVATE_FULL);
1795 add_full(s, n, page);
1796
1797 }
1798 }
1799
1800 l = m;
1801 if (!cmpxchg_double_slab(s, page,
1802 old.freelist, old.counters,
1803 new.freelist, new.counters,
1804 "unfreezing slab"))
1805 goto redo;
1806
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001807 if (lock)
1808 spin_unlock(&n->list_lock);
1809
1810 if (m == M_FREE) {
1811 stat(s, DEACTIVATE_EMPTY);
1812 discard_slab(s, page);
1813 stat(s, FREE_SLAB);
1814 }
Christoph Lameter81819f02007-05-06 14:49:36 -07001815}
1816
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001817static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
Christoph Lameter81819f02007-05-06 14:49:36 -07001818{
Christoph Lameter84e554e62009-12-18 16:26:23 -06001819 stat(s, CPUSLAB_FLUSH);
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001820 deactivate_slab(s, c);
Christoph Lameter81819f02007-05-06 14:49:36 -07001821}
1822
1823/*
1824 * Flush cpu slab.
Christoph Lameter6446faa2008-02-15 23:45:26 -08001825 *
Christoph Lameter81819f02007-05-06 14:49:36 -07001826 * Called from IPI handler with interrupts disabled.
1827 */
Christoph Lameter0c710012007-07-17 04:03:24 -07001828static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
Christoph Lameter81819f02007-05-06 14:49:36 -07001829{
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06001830 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
Christoph Lameter81819f02007-05-06 14:49:36 -07001831
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001832 if (likely(c && c->page))
1833 flush_slab(s, c);
Christoph Lameter81819f02007-05-06 14:49:36 -07001834}
1835
1836static void flush_cpu_slab(void *d)
1837{
1838 struct kmem_cache *s = d;
Christoph Lameter81819f02007-05-06 14:49:36 -07001839
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001840 __flush_cpu_slab(s, smp_processor_id());
Christoph Lameter81819f02007-05-06 14:49:36 -07001841}
1842
1843static void flush_all(struct kmem_cache *s)
1844{
Jens Axboe15c8b6c2008-05-09 09:39:44 +02001845 on_each_cpu(flush_cpu_slab, s, 1);
Christoph Lameter81819f02007-05-06 14:49:36 -07001846}
1847
1848/*
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001849 * Check if the objects in a per cpu structure fit numa
1850 * locality expectations.
1851 */
1852static inline int node_match(struct kmem_cache_cpu *c, int node)
1853{
1854#ifdef CONFIG_NUMA
Christoph Lameter2154a332010-07-09 14:07:10 -05001855 if (node != NUMA_NO_NODE && c->node != node)
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001856 return 0;
1857#endif
1858 return 1;
1859}
1860
Pekka Enberg781b2ba2009-06-10 18:50:32 +03001861static int count_free(struct page *page)
1862{
1863 return page->objects - page->inuse;
1864}
1865
1866static unsigned long count_partial(struct kmem_cache_node *n,
1867 int (*get_count)(struct page *))
1868{
1869 unsigned long flags;
1870 unsigned long x = 0;
1871 struct page *page;
1872
1873 spin_lock_irqsave(&n->list_lock, flags);
1874 list_for_each_entry(page, &n->partial, lru)
1875 x += get_count(page);
1876 spin_unlock_irqrestore(&n->list_lock, flags);
1877 return x;
1878}
1879
Alexander Beregalov26c02cf2009-06-11 14:08:48 +04001880static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
1881{
1882#ifdef CONFIG_SLUB_DEBUG
1883 return atomic_long_read(&n->total_objects);
1884#else
1885 return 0;
1886#endif
1887}
1888
Pekka Enberg781b2ba2009-06-10 18:50:32 +03001889static noinline void
1890slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
1891{
1892 int node;
1893
1894 printk(KERN_WARNING
1895 "SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
1896 nid, gfpflags);
1897 printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, "
1898 "default order: %d, min order: %d\n", s->name, s->objsize,
1899 s->size, oo_order(s->oo), oo_order(s->min));
1900
David Rientjesfa5ec8a2009-07-07 00:14:14 -07001901 if (oo_order(s->min) > get_order(s->objsize))
1902 printk(KERN_WARNING " %s debugging increased min order, use "
1903 "slub_debug=O to disable.\n", s->name);
1904
Pekka Enberg781b2ba2009-06-10 18:50:32 +03001905 for_each_online_node(node) {
1906 struct kmem_cache_node *n = get_node(s, node);
1907 unsigned long nr_slabs;
1908 unsigned long nr_objs;
1909 unsigned long nr_free;
1910
1911 if (!n)
1912 continue;
1913
Alexander Beregalov26c02cf2009-06-11 14:08:48 +04001914 nr_free = count_partial(n, count_free);
1915 nr_slabs = node_nr_slabs(n);
1916 nr_objs = node_nr_objs(n);
Pekka Enberg781b2ba2009-06-10 18:50:32 +03001917
1918 printk(KERN_WARNING
1919 " node %d: slabs: %ld, objs: %ld, free: %ld\n",
1920 node, nr_slabs, nr_objs, nr_free);
1921 }
1922}
1923
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001924/*
Christoph Lameter894b8782007-05-10 03:15:16 -07001925 * Slow path. The lockless freelist is empty or we need to perform
1926 * debugging duties.
Christoph Lameter81819f02007-05-06 14:49:36 -07001927 *
Christoph Lameter894b8782007-05-10 03:15:16 -07001928 * Interrupts are disabled.
Christoph Lameter81819f02007-05-06 14:49:36 -07001929 *
Christoph Lameter894b8782007-05-10 03:15:16 -07001930 * Processing is still very fast if new objects have been freed to the
1931 * regular freelist. In that case we simply take over the regular freelist
1932 * as the lockless freelist and zap the regular freelist.
Christoph Lameter81819f02007-05-06 14:49:36 -07001933 *
Christoph Lameter894b8782007-05-10 03:15:16 -07001934 * If that is not working then we fall back to the partial lists. We take the
1935 * first element of the freelist as the object to allocate now and move the
1936 * rest of the freelist to the lockless freelist.
1937 *
1938 * And if we were unable to get a new slab from the partial slab lists then
Christoph Lameter6446faa2008-02-15 23:45:26 -08001939 * we need to allocate a new slab. This is the slowest path since it involves
1940 * a call to the page allocator and the setup of a new slab.
Christoph Lameter81819f02007-05-06 14:49:36 -07001941 */
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03001942static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
1943 unsigned long addr, struct kmem_cache_cpu *c)
Christoph Lameter81819f02007-05-06 14:49:36 -07001944{
Christoph Lameter81819f02007-05-06 14:49:36 -07001945 void **object;
Christoph Lameter01ad8a72011-04-15 14:48:14 -05001946 struct page *page;
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06001947 unsigned long flags;
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001948 struct page new;
1949 unsigned long counters;
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06001950
1951 local_irq_save(flags);
1952#ifdef CONFIG_PREEMPT
1953 /*
1954 * We may have been preempted and rescheduled on a different
1955 * cpu before disabling interrupts. Need to reload cpu area
1956 * pointer.
1957 */
1958 c = this_cpu_ptr(s->cpu_slab);
1959#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07001960
Linus Torvaldse72e9c22008-03-27 20:56:33 -07001961 /* We handle __GFP_ZERO in the caller */
1962 gfpflags &= ~__GFP_ZERO;
1963
Christoph Lameter01ad8a72011-04-15 14:48:14 -05001964 page = c->page;
1965 if (!page)
Christoph Lameter81819f02007-05-06 14:49:36 -07001966 goto new_slab;
1967
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001968 if (unlikely(!node_match(c, node)))
Christoph Lameter81819f02007-05-06 14:49:36 -07001969 goto another_slab;
Christoph Lameter6446faa2008-02-15 23:45:26 -08001970
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001971 stat(s, ALLOC_SLOWPATH);
1972
1973 do {
1974 object = page->freelist;
1975 counters = page->counters;
1976 new.counters = counters;
1977 new.inuse = page->objects;
1978 VM_BUG_ON(!new.frozen);
1979
1980 } while (!cmpxchg_double_slab(s, page,
1981 object, counters,
1982 NULL, new.counters,
1983 "__slab_alloc"));
Christoph Lameter6446faa2008-02-15 23:45:26 -08001984
Christoph Lameter894b8782007-05-10 03:15:16 -07001985load_freelist:
Christoph Lameter8cb0a502011-06-01 12:25:46 -05001986 VM_BUG_ON(!page->frozen);
1987
Christoph Lametera973e9d2008-03-01 13:40:44 -08001988 if (unlikely(!object))
Christoph Lameter81819f02007-05-06 14:49:36 -07001989 goto another_slab;
Christoph Lameter81819f02007-05-06 14:49:36 -07001990
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001991 stat(s, ALLOC_REFILL);
Christoph Lameter01ad8a72011-04-15 14:48:14 -05001992
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001993 c->freelist = get_freepointer(s, object);
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06001994 c->tid = next_tid(c->tid);
1995 local_irq_restore(flags);
Christoph Lameter81819f02007-05-06 14:49:36 -07001996 return object;
1997
1998another_slab:
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001999 deactivate_slab(s, c);
Christoph Lameter81819f02007-05-06 14:49:36 -07002000
2001new_slab:
Christoph Lameter01ad8a72011-04-15 14:48:14 -05002002 page = get_partial(s, gfpflags, node);
2003 if (page) {
Christoph Lameter84e554e62009-12-18 16:26:23 -06002004 stat(s, ALLOC_FROM_PARTIAL);
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002005 object = c->freelist;
2006
2007 if (kmem_cache_debug(s))
2008 goto debug;
Christoph Lameter894b8782007-05-10 03:15:16 -07002009 goto load_freelist;
Christoph Lameter81819f02007-05-06 14:49:36 -07002010 }
2011
Christoph Lameter01ad8a72011-04-15 14:48:14 -05002012 page = new_slab(s, gfpflags, node);
Christoph Lameterb811c202007-10-16 23:25:51 -07002013
Christoph Lameter01ad8a72011-04-15 14:48:14 -05002014 if (page) {
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06002015 c = __this_cpu_ptr(s->cpu_slab);
Christoph Lameter05aa3452007-11-05 11:31:58 -08002016 if (c->page)
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002017 flush_slab(s, c);
Christoph Lameter01ad8a72011-04-15 14:48:14 -05002018
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002019 /*
2020 * No other reference to the page yet so we can
2021 * muck around with it freely without cmpxchg
2022 */
2023 object = page->freelist;
2024 page->freelist = NULL;
2025 page->inuse = page->objects;
2026
2027 stat(s, ALLOC_SLAB);
David Rientjesbd07d872011-05-12 13:10:49 -07002028 c->node = page_to_nid(page);
2029 c->page = page;
Christoph Lameter4b6f0752007-05-16 22:10:53 -07002030 goto load_freelist;
Christoph Lameter81819f02007-05-06 14:49:36 -07002031 }
Pekka Enberg95f85982009-06-11 16:18:09 +03002032 if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
2033 slab_out_of_memory(s, gfpflags, node);
Christoph Lameter2fd66c52011-03-22 13:32:53 -05002034 local_irq_restore(flags);
Christoph Lameter71c7a062008-02-14 14:28:01 -08002035 return NULL;
Christoph Lameter894b8782007-05-10 03:15:16 -07002036
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002037debug:
2038 if (!object || !alloc_debug_processing(s, page, object, addr))
2039 goto new_slab;
2040
2041 c->freelist = get_freepointer(s, object);
Christoph Lameter442b06b2011-05-17 16:29:31 -05002042 deactivate_slab(s, c);
2043 c->page = NULL;
Pekka Enberg15b7c512010-10-02 11:32:32 +03002044 c->node = NUMA_NO_NODE;
Christoph Lametera71ae472011-05-25 09:47:43 -05002045 local_irq_restore(flags);
2046 return object;
Christoph Lameter894b8782007-05-10 03:15:16 -07002047}
2048
2049/*
2050 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2051 * have the fastpath folded into their functions. So no function call
2052 * overhead for requests that can be satisfied on the fastpath.
2053 *
2054 * The fastpath works by first checking if the lockless freelist can be used.
2055 * If not then __slab_alloc is called for slow processing.
2056 *
2057 * Otherwise we can simply pick the next object from the lockless free list.
2058 */
Pekka Enberg06428782008-01-07 23:20:27 -08002059static __always_inline void *slab_alloc(struct kmem_cache *s,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03002060 gfp_t gfpflags, int node, unsigned long addr)
Christoph Lameter894b8782007-05-10 03:15:16 -07002061{
Christoph Lameter894b8782007-05-10 03:15:16 -07002062 void **object;
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002063 struct kmem_cache_cpu *c;
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002064 unsigned long tid;
Christoph Lameter1f842602008-01-07 23:20:30 -08002065
Christoph Lameterc016b0b2010-08-20 12:37:16 -05002066 if (slab_pre_alloc_hook(s, gfpflags))
Akinobu Mita773ff602008-12-23 19:37:01 +09002067 return NULL;
2068
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002069redo:
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002070
2071 /*
2072 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
2073 * enabled. We may switch back and forth between cpus while
2074 * reading from one cpu area. That does not matter as long
2075 * as we end up on the original cpu again when doing the cmpxchg.
2076 */
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06002077 c = __this_cpu_ptr(s->cpu_slab);
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002078
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002079 /*
2080 * The transaction ids are globally unique per cpu and per operation on
2081 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
2082 * occurs on the right processor and that there was no operation on the
2083 * linked list in between.
2084 */
2085 tid = c->tid;
2086 barrier();
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002087
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06002088 object = c->freelist;
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06002089 if (unlikely(!object || !node_match(c, node)))
Christoph Lameter894b8782007-05-10 03:15:16 -07002090
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002091 object = __slab_alloc(s, gfpflags, node, addr, c);
Christoph Lameter894b8782007-05-10 03:15:16 -07002092
2093 else {
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002094 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002095 * The cmpxchg will only match if there was no additional
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002096 * operation and if we are on the right processor.
2097 *
2098 * The cmpxchg does the following atomically (without lock semantics!)
2099 * 1. Relocate first pointer to the current per cpu area.
2100 * 2. Verify that tid and freelist have not been changed
2101 * 3. If they were not changed replace tid and freelist
2102 *
2103 * Since this is without lock semantics the protection is only against
2104 * code executing on this cpu *not* from access by other cpus.
2105 */
Thomas Gleixner30106b82011-05-04 15:38:19 +02002106 if (unlikely(!irqsafe_cpu_cmpxchg_double(
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002107 s->cpu_slab->freelist, s->cpu_slab->tid,
2108 object, tid,
Christoph Lameter1393d9a2011-05-16 15:26:08 -05002109 get_freepointer_safe(s, object), next_tid(tid)))) {
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002110
2111 note_cmpxchg_failure("slab_alloc", s, tid);
2112 goto redo;
2113 }
Christoph Lameter84e554e62009-12-18 16:26:23 -06002114 stat(s, ALLOC_FASTPATH);
Christoph Lameter894b8782007-05-10 03:15:16 -07002115 }
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002116
Pekka Enberg74e21342009-11-25 20:14:48 +02002117 if (unlikely(gfpflags & __GFP_ZERO) && object)
Christoph Lameterff120592009-12-18 16:26:22 -06002118 memset(object, 0, s->objsize);
Christoph Lameterd07dbea2007-07-17 04:03:23 -07002119
Christoph Lameterc016b0b2010-08-20 12:37:16 -05002120 slab_post_alloc_hook(s, gfpflags, object);
Vegard Nossum5a896d92008-04-04 00:54:48 +02002121
Christoph Lameter894b8782007-05-10 03:15:16 -07002122 return object;
Christoph Lameter81819f02007-05-06 14:49:36 -07002123}
2124
2125void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2126{
Christoph Lameter2154a332010-07-09 14:07:10 -05002127 void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002128
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02002129 trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002130
2131 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07002132}
2133EXPORT_SYMBOL(kmem_cache_alloc);
2134
Li Zefan0f24f122009-12-11 15:45:30 +08002135#ifdef CONFIG_TRACING
Richard Kennedy4a923792010-10-21 10:29:19 +01002136void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002137{
Richard Kennedy4a923792010-10-21 10:29:19 +01002138 void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
2139 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
2140 return ret;
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002141}
Richard Kennedy4a923792010-10-21 10:29:19 +01002142EXPORT_SYMBOL(kmem_cache_alloc_trace);
2143
2144void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
2145{
2146 void *ret = kmalloc_order(size, flags, order);
2147 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
2148 return ret;
2149}
2150EXPORT_SYMBOL(kmalloc_order_trace);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002151#endif
2152
Christoph Lameter81819f02007-05-06 14:49:36 -07002153#ifdef CONFIG_NUMA
2154void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
2155{
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002156 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
2157
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02002158 trace_kmem_cache_alloc_node(_RET_IP_, ret,
2159 s->objsize, s->size, gfpflags, node);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002160
2161 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07002162}
2163EXPORT_SYMBOL(kmem_cache_alloc_node);
Christoph Lameter81819f02007-05-06 14:49:36 -07002164
Li Zefan0f24f122009-12-11 15:45:30 +08002165#ifdef CONFIG_TRACING
Richard Kennedy4a923792010-10-21 10:29:19 +01002166void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002167 gfp_t gfpflags,
Richard Kennedy4a923792010-10-21 10:29:19 +01002168 int node, size_t size)
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002169{
Richard Kennedy4a923792010-10-21 10:29:19 +01002170 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
2171
2172 trace_kmalloc_node(_RET_IP_, ret,
2173 size, s->size, gfpflags, node);
2174 return ret;
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002175}
Richard Kennedy4a923792010-10-21 10:29:19 +01002176EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002177#endif
Namhyung Kim5d1f57e2010-09-29 21:02:15 +09002178#endif
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002179
Christoph Lameter81819f02007-05-06 14:49:36 -07002180/*
Christoph Lameter894b8782007-05-10 03:15:16 -07002181 * Slow patch handling. This may still be called frequently since objects
2182 * have a longer lifetime than the cpu slabs in most processing loads.
Christoph Lameter81819f02007-05-06 14:49:36 -07002183 *
Christoph Lameter894b8782007-05-10 03:15:16 -07002184 * So we still attempt to reduce cache line usage. Just take the slab
2185 * lock and free the item. If there is no additional partial page
2186 * handling required then we can return immediately.
Christoph Lameter81819f02007-05-06 14:49:36 -07002187 */
Christoph Lameter894b8782007-05-10 03:15:16 -07002188static void __slab_free(struct kmem_cache *s, struct page *page,
Christoph Lameterff120592009-12-18 16:26:22 -06002189 void *x, unsigned long addr)
Christoph Lameter81819f02007-05-06 14:49:36 -07002190{
2191 void *prior;
2192 void **object = (void *)x;
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002193 int was_frozen;
2194 int inuse;
2195 struct page new;
2196 unsigned long counters;
2197 struct kmem_cache_node *n = NULL;
Christoph Lameter61728d12011-06-01 12:25:51 -05002198 unsigned long uninitialized_var(flags);
Christoph Lameter81819f02007-05-06 14:49:36 -07002199
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002200 stat(s, FREE_SLOWPATH);
Christoph Lameter81819f02007-05-06 14:49:36 -07002201
Christoph Lameter8dc16c62011-04-15 14:48:16 -05002202 if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr))
Christoph Lameter80f08c12011-06-01 12:25:55 -05002203 return;
Christoph Lameter6446faa2008-02-15 23:45:26 -08002204
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002205 do {
2206 prior = page->freelist;
2207 counters = page->counters;
2208 set_freepointer(s, object, prior);
2209 new.counters = counters;
2210 was_frozen = new.frozen;
2211 new.inuse--;
2212 if ((!new.inuse || !prior) && !was_frozen && !n) {
2213 n = get_node(s, page_to_nid(page));
2214 /*
2215 * Speculatively acquire the list_lock.
2216 * If the cmpxchg does not succeed then we may
2217 * drop the list_lock without any processing.
2218 *
2219 * Otherwise the list_lock will synchronize with
2220 * other processors updating the list of slabs.
2221 */
Christoph Lameter80f08c12011-06-01 12:25:55 -05002222 spin_lock_irqsave(&n->list_lock, flags);
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002223 }
2224 inuse = new.inuse;
Christoph Lameter81819f02007-05-06 14:49:36 -07002225
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002226 } while (!cmpxchg_double_slab(s, page,
2227 prior, counters,
2228 object, new.counters,
2229 "__slab_free"));
Christoph Lameter81819f02007-05-06 14:49:36 -07002230
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002231 if (likely(!n)) {
2232 /*
2233 * The list lock was not taken therefore no list
2234 * activity can be necessary.
2235 */
2236 if (was_frozen)
2237 stat(s, FREE_FROZEN);
Christoph Lameter80f08c12011-06-01 12:25:55 -05002238 return;
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002239 }
Christoph Lameter81819f02007-05-06 14:49:36 -07002240
2241 /*
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002242 * was_frozen may have been set after we acquired the list_lock in
2243 * an earlier loop. So we need to check it here again.
Christoph Lameter81819f02007-05-06 14:49:36 -07002244 */
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002245 if (was_frozen)
2246 stat(s, FREE_FROZEN);
2247 else {
2248 if (unlikely(!inuse && n->nr_partial > s->min_partial))
2249 goto slab_empty;
Christoph Lameter5cc6eee2011-06-01 12:25:50 -05002250
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002251 /*
2252 * Objects left in the slab. If it was not on the partial list before
2253 * then add it.
2254 */
2255 if (unlikely(!prior)) {
2256 remove_full(s, page);
2257 add_partial(n, page, 0);
2258 stat(s, FREE_ADD_PARTIAL);
2259 }
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08002260 }
Christoph Lameter80f08c12011-06-01 12:25:55 -05002261 spin_unlock_irqrestore(&n->list_lock, flags);
Christoph Lameter81819f02007-05-06 14:49:36 -07002262 return;
2263
2264slab_empty:
Christoph Lametera973e9d2008-03-01 13:40:44 -08002265 if (prior) {
Christoph Lameter81819f02007-05-06 14:49:36 -07002266 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07002267 * Slab still on the partial list.
Christoph Lameter81819f02007-05-06 14:49:36 -07002268 */
Christoph Lameter5cc6eee2011-06-01 12:25:50 -05002269 remove_partial(n, page);
Christoph Lameter84e554e62009-12-18 16:26:23 -06002270 stat(s, FREE_REMOVE_PARTIAL);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08002271 }
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002272
Christoph Lameter80f08c12011-06-01 12:25:55 -05002273 spin_unlock_irqrestore(&n->list_lock, flags);
Christoph Lameter84e554e62009-12-18 16:26:23 -06002274 stat(s, FREE_SLAB);
Christoph Lameter81819f02007-05-06 14:49:36 -07002275 discard_slab(s, page);
Christoph Lameter81819f02007-05-06 14:49:36 -07002276}
2277
Christoph Lameter894b8782007-05-10 03:15:16 -07002278/*
2279 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
2280 * can perform fastpath freeing without additional function calls.
2281 *
2282 * The fastpath is only possible if we are freeing to the current cpu slab
2283 * of this processor. This typically the case if we have just allocated
2284 * the item before.
2285 *
2286 * If fastpath is not possible then fall back to __slab_free where we deal
2287 * with all sorts of special processing.
2288 */
Pekka Enberg06428782008-01-07 23:20:27 -08002289static __always_inline void slab_free(struct kmem_cache *s,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03002290 struct page *page, void *x, unsigned long addr)
Christoph Lameter894b8782007-05-10 03:15:16 -07002291{
2292 void **object = (void *)x;
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002293 struct kmem_cache_cpu *c;
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002294 unsigned long tid;
Christoph Lameter1f842602008-01-07 23:20:30 -08002295
Christoph Lameterc016b0b2010-08-20 12:37:16 -05002296 slab_free_hook(s, x);
2297
Christoph Lametera24c5a02011-03-15 12:45:21 -05002298redo:
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002299
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002300 /*
2301 * Determine the currently cpus per cpu slab.
2302 * The cpu may change afterward. However that does not matter since
2303 * data is retrieved via this pointer. If we are on the same cpu
2304 * during the cmpxchg then the free will succedd.
2305 */
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06002306 c = __this_cpu_ptr(s->cpu_slab);
Christoph Lameterc016b0b2010-08-20 12:37:16 -05002307
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002308 tid = c->tid;
2309 barrier();
Christoph Lameterc016b0b2010-08-20 12:37:16 -05002310
Christoph Lameter442b06b2011-05-17 16:29:31 -05002311 if (likely(page == c->page)) {
Christoph Lameterff120592009-12-18 16:26:22 -06002312 set_freepointer(s, object, c->freelist);
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002313
Thomas Gleixner30106b82011-05-04 15:38:19 +02002314 if (unlikely(!irqsafe_cpu_cmpxchg_double(
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002315 s->cpu_slab->freelist, s->cpu_slab->tid,
2316 c->freelist, tid,
2317 object, next_tid(tid)))) {
2318
2319 note_cmpxchg_failure("slab_free", s, tid);
2320 goto redo;
2321 }
Christoph Lameter84e554e62009-12-18 16:26:23 -06002322 stat(s, FREE_FASTPATH);
Christoph Lameter894b8782007-05-10 03:15:16 -07002323 } else
Christoph Lameterff120592009-12-18 16:26:22 -06002324 __slab_free(s, page, x, addr);
Christoph Lameter894b8782007-05-10 03:15:16 -07002325
Christoph Lameter894b8782007-05-10 03:15:16 -07002326}
2327
Christoph Lameter81819f02007-05-06 14:49:36 -07002328void kmem_cache_free(struct kmem_cache *s, void *x)
2329{
Christoph Lameter77c5e2d2007-05-06 14:49:42 -07002330 struct page *page;
Christoph Lameter81819f02007-05-06 14:49:36 -07002331
Christoph Lameterb49af682007-05-06 14:49:41 -07002332 page = virt_to_head_page(x);
Christoph Lameter81819f02007-05-06 14:49:36 -07002333
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03002334 slab_free(s, page, x, _RET_IP_);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002335
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02002336 trace_kmem_cache_free(_RET_IP_, x);
Christoph Lameter81819f02007-05-06 14:49:36 -07002337}
2338EXPORT_SYMBOL(kmem_cache_free);
2339
Christoph Lameter81819f02007-05-06 14:49:36 -07002340/*
Christoph Lameter672bba32007-05-09 02:32:39 -07002341 * Object placement in a slab is made very easy because we always start at
2342 * offset 0. If we tune the size of the object to the alignment then we can
2343 * get the required alignment by putting one properly sized object after
2344 * another.
Christoph Lameter81819f02007-05-06 14:49:36 -07002345 *
2346 * Notice that the allocation order determines the sizes of the per cpu
2347 * caches. Each processor has always one slab available for allocations.
2348 * Increasing the allocation order reduces the number of times that slabs
Christoph Lameter672bba32007-05-09 02:32:39 -07002349 * must be moved on and off the partial lists and is therefore a factor in
Christoph Lameter81819f02007-05-06 14:49:36 -07002350 * locking overhead.
Christoph Lameter81819f02007-05-06 14:49:36 -07002351 */
2352
2353/*
2354 * Mininum / Maximum order of slab pages. This influences locking overhead
2355 * and slab fragmentation. A higher order reduces the number of partial slabs
2356 * and increases the number of allocations possible without having to
2357 * take the list_lock.
2358 */
2359static int slub_min_order;
Christoph Lameter114e9e82008-04-14 19:11:41 +03002360static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
Christoph Lameter9b2cd502008-04-14 19:11:41 +03002361static int slub_min_objects;
Christoph Lameter81819f02007-05-06 14:49:36 -07002362
2363/*
2364 * Merge control. If this is set then no merging of slab caches will occur.
Christoph Lameter672bba32007-05-09 02:32:39 -07002365 * (Could be removed. This was introduced to pacify the merge skeptics.)
Christoph Lameter81819f02007-05-06 14:49:36 -07002366 */
2367static int slub_nomerge;
2368
2369/*
Christoph Lameter81819f02007-05-06 14:49:36 -07002370 * Calculate the order of allocation given an slab object size.
2371 *
Christoph Lameter672bba32007-05-09 02:32:39 -07002372 * The order of allocation has significant impact on performance and other
2373 * system components. Generally order 0 allocations should be preferred since
2374 * order 0 does not cause fragmentation in the page allocator. Larger objects
2375 * be problematic to put into order 0 slabs because there may be too much
Christoph Lameterc124f5b2008-04-14 19:13:29 +03002376 * unused space left. We go to a higher order if more than 1/16th of the slab
Christoph Lameter672bba32007-05-09 02:32:39 -07002377 * would be wasted.
Christoph Lameter81819f02007-05-06 14:49:36 -07002378 *
Christoph Lameter672bba32007-05-09 02:32:39 -07002379 * In order to reach satisfactory performance we must ensure that a minimum
2380 * number of objects is in one slab. Otherwise we may generate too much
2381 * activity on the partial lists which requires taking the list_lock. This is
2382 * less a concern for large slabs though which are rarely used.
Christoph Lameter81819f02007-05-06 14:49:36 -07002383 *
Christoph Lameter672bba32007-05-09 02:32:39 -07002384 * slub_max_order specifies the order where we begin to stop considering the
2385 * number of objects in a slab as critical. If we reach slub_max_order then
2386 * we try to keep the page order as low as possible. So we accept more waste
2387 * of space in favor of a small page order.
2388 *
2389 * Higher order allocations also allow the placement of more objects in a
2390 * slab and thereby reduce object handling overhead. If the user has
2391 * requested a higher mininum order then we start with that one instead of
2392 * the smallest order which will fit the object.
Christoph Lameter81819f02007-05-06 14:49:36 -07002393 */
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002394static inline int slab_order(int size, int min_objects,
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002395 int max_order, int fract_leftover, int reserved)
Christoph Lameter81819f02007-05-06 14:49:36 -07002396{
2397 int order;
2398 int rem;
Christoph Lameter6300ea72007-07-17 04:03:20 -07002399 int min_order = slub_min_order;
Christoph Lameter81819f02007-05-06 14:49:36 -07002400
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002401 if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +04002402 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
Christoph Lameter39b26462008-04-14 19:11:30 +03002403
Christoph Lameter6300ea72007-07-17 04:03:20 -07002404 for (order = max(min_order,
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002405 fls(min_objects * size - 1) - PAGE_SHIFT);
2406 order <= max_order; order++) {
2407
Christoph Lameter81819f02007-05-06 14:49:36 -07002408 unsigned long slab_size = PAGE_SIZE << order;
2409
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002410 if (slab_size < min_objects * size + reserved)
Christoph Lameter81819f02007-05-06 14:49:36 -07002411 continue;
2412
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002413 rem = (slab_size - reserved) % size;
Christoph Lameter81819f02007-05-06 14:49:36 -07002414
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002415 if (rem <= slab_size / fract_leftover)
Christoph Lameter81819f02007-05-06 14:49:36 -07002416 break;
2417
2418 }
Christoph Lameter672bba32007-05-09 02:32:39 -07002419
Christoph Lameter81819f02007-05-06 14:49:36 -07002420 return order;
2421}
2422
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002423static inline int calculate_order(int size, int reserved)
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002424{
2425 int order;
2426 int min_objects;
2427 int fraction;
Zhang Yanmine8120ff2009-02-12 18:00:17 +02002428 int max_objects;
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002429
2430 /*
2431 * Attempt to find best configuration for a slab. This
2432 * works by first attempting to generate a layout with
2433 * the best configuration and backing off gradually.
2434 *
2435 * First we reduce the acceptable waste in a slab. Then
2436 * we reduce the minimum objects required in a slab.
2437 */
2438 min_objects = slub_min_objects;
Christoph Lameter9b2cd502008-04-14 19:11:41 +03002439 if (!min_objects)
2440 min_objects = 4 * (fls(nr_cpu_ids) + 1);
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002441 max_objects = order_objects(slub_max_order, size, reserved);
Zhang Yanmine8120ff2009-02-12 18:00:17 +02002442 min_objects = min(min_objects, max_objects);
2443
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002444 while (min_objects > 1) {
Christoph Lameterc124f5b2008-04-14 19:13:29 +03002445 fraction = 16;
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002446 while (fraction >= 4) {
2447 order = slab_order(size, min_objects,
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002448 slub_max_order, fraction, reserved);
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002449 if (order <= slub_max_order)
2450 return order;
2451 fraction /= 2;
2452 }
Amerigo Wang5086c389c2009-08-19 21:44:13 +03002453 min_objects--;
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002454 }
2455
2456 /*
2457 * We were unable to place multiple objects in a slab. Now
2458 * lets see if we can place a single object there.
2459 */
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002460 order = slab_order(size, 1, slub_max_order, 1, reserved);
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002461 if (order <= slub_max_order)
2462 return order;
2463
2464 /*
2465 * Doh this slab cannot be placed using slub_max_order.
2466 */
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002467 order = slab_order(size, 1, MAX_ORDER, 1, reserved);
David Rientjes818cf592009-04-23 09:58:22 +03002468 if (order < MAX_ORDER)
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002469 return order;
2470 return -ENOSYS;
2471}
2472
Christoph Lameter81819f02007-05-06 14:49:36 -07002473/*
Christoph Lameter672bba32007-05-09 02:32:39 -07002474 * Figure out what the alignment of the objects will be.
Christoph Lameter81819f02007-05-06 14:49:36 -07002475 */
2476static unsigned long calculate_alignment(unsigned long flags,
2477 unsigned long align, unsigned long size)
2478{
2479 /*
Christoph Lameter6446faa2008-02-15 23:45:26 -08002480 * If the user wants hardware cache aligned objects then follow that
2481 * suggestion if the object is sufficiently large.
Christoph Lameter81819f02007-05-06 14:49:36 -07002482 *
Christoph Lameter6446faa2008-02-15 23:45:26 -08002483 * The hardware cache alignment cannot override the specified
2484 * alignment though. If that is greater then use it.
Christoph Lameter81819f02007-05-06 14:49:36 -07002485 */
Nick Pigginb6210382008-03-05 14:05:56 -08002486 if (flags & SLAB_HWCACHE_ALIGN) {
2487 unsigned long ralign = cache_line_size();
2488 while (size <= ralign / 2)
2489 ralign /= 2;
2490 align = max(align, ralign);
2491 }
Christoph Lameter81819f02007-05-06 14:49:36 -07002492
2493 if (align < ARCH_SLAB_MINALIGN)
Nick Pigginb6210382008-03-05 14:05:56 -08002494 align = ARCH_SLAB_MINALIGN;
Christoph Lameter81819f02007-05-06 14:49:36 -07002495
2496 return ALIGN(align, sizeof(void *));
2497}
2498
Pekka Enberg5595cff2008-08-05 09:28:47 +03002499static void
2500init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
Christoph Lameter81819f02007-05-06 14:49:36 -07002501{
2502 n->nr_partial = 0;
Christoph Lameter81819f02007-05-06 14:49:36 -07002503 spin_lock_init(&n->list_lock);
2504 INIT_LIST_HEAD(&n->partial);
Christoph Lameter8ab13722007-07-17 04:03:32 -07002505#ifdef CONFIG_SLUB_DEBUG
Christoph Lameter0f389ec2008-04-14 18:53:02 +03002506 atomic_long_set(&n->nr_slabs, 0);
Salman Qazi02b71b72008-09-11 12:25:41 -07002507 atomic_long_set(&n->total_objects, 0);
Christoph Lameter643b1132007-05-06 14:49:42 -07002508 INIT_LIST_HEAD(&n->full);
Christoph Lameter8ab13722007-07-17 04:03:32 -07002509#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07002510}
2511
Christoph Lameter55136592010-08-20 12:37:13 -05002512static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
Christoph Lameter4c93c3552007-10-16 01:26:08 -07002513{
Christoph Lameter6c182dc2010-08-20 12:37:14 -05002514 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
2515 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06002516
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002517 /*
Chris Metcalfd4d84fe2011-06-02 10:19:41 -04002518 * Must align to double word boundary for the double cmpxchg
2519 * instructions to work; see __pcpu_double_call_return_bool().
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002520 */
Chris Metcalfd4d84fe2011-06-02 10:19:41 -04002521 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
2522 2 * sizeof(void *));
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06002523
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002524 if (!s->cpu_slab)
2525 return 0;
2526
2527 init_kmem_cache_cpus(s);
2528
2529 return 1;
Christoph Lameter4c93c3552007-10-16 01:26:08 -07002530}
Christoph Lameter4c93c3552007-10-16 01:26:08 -07002531
Christoph Lameter51df1142010-08-20 12:37:15 -05002532static struct kmem_cache *kmem_cache_node;
2533
Christoph Lameter81819f02007-05-06 14:49:36 -07002534/*
2535 * No kmalloc_node yet so do it by hand. We know that this is the first
2536 * slab on the node for this slabcache. There are no concurrent accesses
2537 * possible.
2538 *
2539 * Note that this function only works on the kmalloc_node_cache
Christoph Lameter4c93c3552007-10-16 01:26:08 -07002540 * when allocating for the kmalloc_node_cache. This is used for bootstrapping
2541 * memory on a fresh node that has no slab structures yet.
Christoph Lameter81819f02007-05-06 14:49:36 -07002542 */
Christoph Lameter55136592010-08-20 12:37:13 -05002543static void early_kmem_cache_node_alloc(int node)
Christoph Lameter81819f02007-05-06 14:49:36 -07002544{
2545 struct page *page;
2546 struct kmem_cache_node *n;
2547
Christoph Lameter51df1142010-08-20 12:37:15 -05002548 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
Christoph Lameter81819f02007-05-06 14:49:36 -07002549
Christoph Lameter51df1142010-08-20 12:37:15 -05002550 page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
Christoph Lameter81819f02007-05-06 14:49:36 -07002551
2552 BUG_ON(!page);
Christoph Lametera2f92ee2007-08-22 14:01:57 -07002553 if (page_to_nid(page) != node) {
2554 printk(KERN_ERR "SLUB: Unable to allocate memory from "
2555 "node %d\n", node);
2556 printk(KERN_ERR "SLUB: Allocating a useless per node structure "
2557 "in order to be able to continue\n");
2558 }
2559
Christoph Lameter81819f02007-05-06 14:49:36 -07002560 n = page->freelist;
2561 BUG_ON(!n);
Christoph Lameter51df1142010-08-20 12:37:15 -05002562 page->freelist = get_freepointer(kmem_cache_node, n);
Christoph Lameter81819f02007-05-06 14:49:36 -07002563 page->inuse++;
Christoph Lameter8cb0a502011-06-01 12:25:46 -05002564 page->frozen = 0;
Christoph Lameter51df1142010-08-20 12:37:15 -05002565 kmem_cache_node->node[node] = n;
Christoph Lameter8ab13722007-07-17 04:03:32 -07002566#ifdef CONFIG_SLUB_DEBUG
Christoph Lameterf7cb1932010-09-29 07:15:01 -05002567 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
Christoph Lameter51df1142010-08-20 12:37:15 -05002568 init_tracking(kmem_cache_node, n);
Christoph Lameter8ab13722007-07-17 04:03:32 -07002569#endif
Christoph Lameter51df1142010-08-20 12:37:15 -05002570 init_kmem_cache_node(n, kmem_cache_node);
2571 inc_slabs_node(kmem_cache_node, node, page->objects);
Christoph Lameter6446faa2008-02-15 23:45:26 -08002572
Christoph Lameter7c2e1322008-01-07 23:20:27 -08002573 add_partial(n, page, 0);
Christoph Lameter81819f02007-05-06 14:49:36 -07002574}
2575
2576static void free_kmem_cache_nodes(struct kmem_cache *s)
2577{
2578 int node;
2579
Christoph Lameterf64dc582007-10-16 01:25:33 -07002580 for_each_node_state(node, N_NORMAL_MEMORY) {
Christoph Lameter81819f02007-05-06 14:49:36 -07002581 struct kmem_cache_node *n = s->node[node];
Christoph Lameter51df1142010-08-20 12:37:15 -05002582
Alexander Duyck73367bd2010-05-21 14:41:35 -07002583 if (n)
Christoph Lameter51df1142010-08-20 12:37:15 -05002584 kmem_cache_free(kmem_cache_node, n);
2585
Christoph Lameter81819f02007-05-06 14:49:36 -07002586 s->node[node] = NULL;
2587 }
2588}
2589
Christoph Lameter55136592010-08-20 12:37:13 -05002590static int init_kmem_cache_nodes(struct kmem_cache *s)
Christoph Lameter81819f02007-05-06 14:49:36 -07002591{
2592 int node;
Christoph Lameter81819f02007-05-06 14:49:36 -07002593
Christoph Lameterf64dc582007-10-16 01:25:33 -07002594 for_each_node_state(node, N_NORMAL_MEMORY) {
Christoph Lameter81819f02007-05-06 14:49:36 -07002595 struct kmem_cache_node *n;
2596
Alexander Duyck73367bd2010-05-21 14:41:35 -07002597 if (slab_state == DOWN) {
Christoph Lameter55136592010-08-20 12:37:13 -05002598 early_kmem_cache_node_alloc(node);
Alexander Duyck73367bd2010-05-21 14:41:35 -07002599 continue;
Christoph Lameter81819f02007-05-06 14:49:36 -07002600 }
Christoph Lameter51df1142010-08-20 12:37:15 -05002601 n = kmem_cache_alloc_node(kmem_cache_node,
Christoph Lameter55136592010-08-20 12:37:13 -05002602 GFP_KERNEL, node);
Alexander Duyck73367bd2010-05-21 14:41:35 -07002603
2604 if (!n) {
2605 free_kmem_cache_nodes(s);
2606 return 0;
2607 }
2608
Christoph Lameter81819f02007-05-06 14:49:36 -07002609 s->node[node] = n;
Pekka Enberg5595cff2008-08-05 09:28:47 +03002610 init_kmem_cache_node(n, s);
Christoph Lameter81819f02007-05-06 14:49:36 -07002611 }
2612 return 1;
2613}
Christoph Lameter81819f02007-05-06 14:49:36 -07002614
David Rientjesc0bdb232009-02-25 09:16:35 +02002615static void set_min_partial(struct kmem_cache *s, unsigned long min)
David Rientjes3b89d7d2009-02-22 17:40:07 -08002616{
2617 if (min < MIN_PARTIAL)
2618 min = MIN_PARTIAL;
2619 else if (min > MAX_PARTIAL)
2620 min = MAX_PARTIAL;
2621 s->min_partial = min;
2622}
2623
Christoph Lameter81819f02007-05-06 14:49:36 -07002624/*
2625 * calculate_sizes() determines the order and the distribution of data within
2626 * a slab object.
2627 */
Christoph Lameter06b285d2008-04-14 19:11:41 +03002628static int calculate_sizes(struct kmem_cache *s, int forced_order)
Christoph Lameter81819f02007-05-06 14:49:36 -07002629{
2630 unsigned long flags = s->flags;
2631 unsigned long size = s->objsize;
2632 unsigned long align = s->align;
Christoph Lameter834f3d12008-04-14 19:11:31 +03002633 int order;
Christoph Lameter81819f02007-05-06 14:49:36 -07002634
2635 /*
Christoph Lameterd8b42bf2008-02-15 23:45:25 -08002636 * Round up object size to the next word boundary. We can only
2637 * place the free pointer at word boundaries and this determines
2638 * the possible location of the free pointer.
2639 */
2640 size = ALIGN(size, sizeof(void *));
2641
2642#ifdef CONFIG_SLUB_DEBUG
2643 /*
Christoph Lameter81819f02007-05-06 14:49:36 -07002644 * Determine if we can poison the object itself. If the user of
2645 * the slab may touch the object after free or before allocation
2646 * then we should never poison the object itself.
2647 */
2648 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
Christoph Lameterc59def92007-05-16 22:10:50 -07002649 !s->ctor)
Christoph Lameter81819f02007-05-06 14:49:36 -07002650 s->flags |= __OBJECT_POISON;
2651 else
2652 s->flags &= ~__OBJECT_POISON;
2653
Christoph Lameter81819f02007-05-06 14:49:36 -07002654
2655 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07002656 * If we are Redzoning then check if there is some space between the
Christoph Lameter81819f02007-05-06 14:49:36 -07002657 * end of the object and the free pointer. If not then add an
Christoph Lameter672bba32007-05-09 02:32:39 -07002658 * additional word to have some bytes to store Redzone information.
Christoph Lameter81819f02007-05-06 14:49:36 -07002659 */
2660 if ((flags & SLAB_RED_ZONE) && size == s->objsize)
2661 size += sizeof(void *);
Christoph Lameter41ecc552007-05-09 02:32:44 -07002662#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07002663
2664 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07002665 * With that we have determined the number of bytes in actual use
2666 * by the object. This is the potential offset to the free pointer.
Christoph Lameter81819f02007-05-06 14:49:36 -07002667 */
2668 s->inuse = size;
2669
2670 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
Christoph Lameterc59def92007-05-16 22:10:50 -07002671 s->ctor)) {
Christoph Lameter81819f02007-05-06 14:49:36 -07002672 /*
2673 * Relocate free pointer after the object if it is not
2674 * permitted to overwrite the first word of the object on
2675 * kmem_cache_free.
2676 *
2677 * This is the case if we do RCU, have a constructor or
2678 * destructor or are poisoning the objects.
2679 */
2680 s->offset = size;
2681 size += sizeof(void *);
2682 }
2683
Christoph Lameterc12b3c62007-05-23 13:57:31 -07002684#ifdef CONFIG_SLUB_DEBUG
Christoph Lameter81819f02007-05-06 14:49:36 -07002685 if (flags & SLAB_STORE_USER)
2686 /*
2687 * Need to store information about allocs and frees after
2688 * the object.
2689 */
2690 size += 2 * sizeof(struct track);
2691
Christoph Lameterbe7b3fb2007-05-09 02:32:36 -07002692 if (flags & SLAB_RED_ZONE)
Christoph Lameter81819f02007-05-06 14:49:36 -07002693 /*
2694 * Add some empty padding so that we can catch
2695 * overwrites from earlier objects rather than let
2696 * tracking information or the free pointer be
Frederik Schwarzer0211a9c2008-12-29 22:14:56 +01002697 * corrupted if a user writes before the start
Christoph Lameter81819f02007-05-06 14:49:36 -07002698 * of the object.
2699 */
2700 size += sizeof(void *);
Christoph Lameter41ecc552007-05-09 02:32:44 -07002701#endif
Christoph Lameter672bba32007-05-09 02:32:39 -07002702
Christoph Lameter81819f02007-05-06 14:49:36 -07002703 /*
2704 * Determine the alignment based on various parameters that the
Christoph Lameter65c02d42007-05-09 02:32:35 -07002705 * user specified and the dynamic determination of cache line size
2706 * on bootup.
Christoph Lameter81819f02007-05-06 14:49:36 -07002707 */
2708 align = calculate_alignment(flags, align, s->objsize);
Zhang, Yanmindcb0ce12009-07-30 11:28:11 +08002709 s->align = align;
Christoph Lameter81819f02007-05-06 14:49:36 -07002710
2711 /*
2712 * SLUB stores one object immediately after another beginning from
2713 * offset 0. In order to align the objects we have to simply size
2714 * each object to conform to the alignment.
2715 */
2716 size = ALIGN(size, align);
2717 s->size = size;
Christoph Lameter06b285d2008-04-14 19:11:41 +03002718 if (forced_order >= 0)
2719 order = forced_order;
2720 else
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002721 order = calculate_order(size, s->reserved);
Christoph Lameter81819f02007-05-06 14:49:36 -07002722
Christoph Lameter834f3d12008-04-14 19:11:31 +03002723 if (order < 0)
Christoph Lameter81819f02007-05-06 14:49:36 -07002724 return 0;
2725
Christoph Lameterb7a49f02008-02-14 14:21:32 -08002726 s->allocflags = 0;
Christoph Lameter834f3d12008-04-14 19:11:31 +03002727 if (order)
Christoph Lameterb7a49f02008-02-14 14:21:32 -08002728 s->allocflags |= __GFP_COMP;
2729
2730 if (s->flags & SLAB_CACHE_DMA)
2731 s->allocflags |= SLUB_DMA;
2732
2733 if (s->flags & SLAB_RECLAIM_ACCOUNT)
2734 s->allocflags |= __GFP_RECLAIMABLE;
2735
Christoph Lameter81819f02007-05-06 14:49:36 -07002736 /*
2737 * Determine the number of objects per slab
2738 */
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002739 s->oo = oo_make(order, size, s->reserved);
2740 s->min = oo_make(get_order(size), size, s->reserved);
Christoph Lameter205ab992008-04-14 19:11:40 +03002741 if (oo_objects(s->oo) > oo_objects(s->max))
2742 s->max = s->oo;
Christoph Lameter81819f02007-05-06 14:49:36 -07002743
Christoph Lameter834f3d12008-04-14 19:11:31 +03002744 return !!oo_objects(s->oo);
Christoph Lameter81819f02007-05-06 14:49:36 -07002745
2746}
2747
Christoph Lameter55136592010-08-20 12:37:13 -05002748static int kmem_cache_open(struct kmem_cache *s,
Christoph Lameter81819f02007-05-06 14:49:36 -07002749 const char *name, size_t size,
2750 size_t align, unsigned long flags,
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07002751 void (*ctor)(void *))
Christoph Lameter81819f02007-05-06 14:49:36 -07002752{
2753 memset(s, 0, kmem_size);
2754 s->name = name;
2755 s->ctor = ctor;
Christoph Lameter81819f02007-05-06 14:49:36 -07002756 s->objsize = size;
Christoph Lameter81819f02007-05-06 14:49:36 -07002757 s->align = align;
Christoph Lameterba0268a2007-09-11 15:24:11 -07002758 s->flags = kmem_cache_flags(size, flags, name, ctor);
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002759 s->reserved = 0;
Christoph Lameter81819f02007-05-06 14:49:36 -07002760
Lai Jiangshanda9a6382011-03-10 15:22:00 +08002761 if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
2762 s->reserved = sizeof(struct rcu_head);
Christoph Lameter81819f02007-05-06 14:49:36 -07002763
Christoph Lameter06b285d2008-04-14 19:11:41 +03002764 if (!calculate_sizes(s, -1))
Christoph Lameter81819f02007-05-06 14:49:36 -07002765 goto error;
David Rientjes3de47212009-07-27 18:30:35 -07002766 if (disable_higher_order_debug) {
2767 /*
2768 * Disable debugging flags that store metadata if the min slab
2769 * order increased.
2770 */
2771 if (get_order(s->size) > get_order(s->objsize)) {
2772 s->flags &= ~DEBUG_METADATA_FLAGS;
2773 s->offset = 0;
2774 if (!calculate_sizes(s, -1))
2775 goto error;
2776 }
2777 }
Christoph Lameter81819f02007-05-06 14:49:36 -07002778
Christoph Lameterb789ef52011-06-01 12:25:49 -05002779#ifdef CONFIG_CMPXCHG_DOUBLE
2780 if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0)
2781 /* Enable fast mode */
2782 s->flags |= __CMPXCHG_DOUBLE;
2783#endif
2784
David Rientjes3b89d7d2009-02-22 17:40:07 -08002785 /*
2786 * The larger the object size is, the more pages we want on the partial
2787 * list to avoid pounding the page allocator excessively.
2788 */
David Rientjesc0bdb232009-02-25 09:16:35 +02002789 set_min_partial(s, ilog2(s->size));
Christoph Lameter81819f02007-05-06 14:49:36 -07002790 s->refcount = 1;
2791#ifdef CONFIG_NUMA
Christoph Lametere2cb96b2008-08-19 08:51:22 -05002792 s->remote_node_defrag_ratio = 1000;
Christoph Lameter81819f02007-05-06 14:49:36 -07002793#endif
Christoph Lameter55136592010-08-20 12:37:13 -05002794 if (!init_kmem_cache_nodes(s))
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002795 goto error;
Christoph Lameter81819f02007-05-06 14:49:36 -07002796
Christoph Lameter55136592010-08-20 12:37:13 -05002797 if (alloc_kmem_cache_cpus(s))
Christoph Lameter81819f02007-05-06 14:49:36 -07002798 return 1;
Christoph Lameterff120592009-12-18 16:26:22 -06002799
Christoph Lameter4c93c3552007-10-16 01:26:08 -07002800 free_kmem_cache_nodes(s);
Christoph Lameter81819f02007-05-06 14:49:36 -07002801error:
2802 if (flags & SLAB_PANIC)
2803 panic("Cannot create slab %s size=%lu realsize=%u "
2804 "order=%u offset=%u flags=%lx\n",
Christoph Lameter834f3d12008-04-14 19:11:31 +03002805 s->name, (unsigned long)size, s->size, oo_order(s->oo),
Christoph Lameter81819f02007-05-06 14:49:36 -07002806 s->offset, flags);
2807 return 0;
2808}
Christoph Lameter81819f02007-05-06 14:49:36 -07002809
2810/*
Christoph Lameter81819f02007-05-06 14:49:36 -07002811 * Determine the size of a slab object
2812 */
2813unsigned int kmem_cache_size(struct kmem_cache *s)
2814{
2815 return s->objsize;
2816}
2817EXPORT_SYMBOL(kmem_cache_size);
2818
Christoph Lameter33b12c32008-04-25 12:22:43 -07002819static void list_slab_objects(struct kmem_cache *s, struct page *page,
2820 const char *text)
Christoph Lameter81819f02007-05-06 14:49:36 -07002821{
Christoph Lameter33b12c32008-04-25 12:22:43 -07002822#ifdef CONFIG_SLUB_DEBUG
2823 void *addr = page_address(page);
2824 void *p;
Namhyung Kima5dd5c12010-09-29 21:02:13 +09002825 unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
2826 sizeof(long), GFP_ATOMIC);
Eric Dumazetbbd7d572010-03-24 22:25:47 +01002827 if (!map)
2828 return;
Christoph Lameter33b12c32008-04-25 12:22:43 -07002829 slab_err(s, page, "%s", text);
2830 slab_lock(page);
Christoph Lameter33b12c32008-04-25 12:22:43 -07002831
Christoph Lameter5f80b132011-04-15 14:48:13 -05002832 get_map(s, page, map);
Christoph Lameter33b12c32008-04-25 12:22:43 -07002833 for_each_object(p, s, addr, page->objects) {
2834
2835 if (!test_bit(slab_index(p, s, addr), map)) {
2836 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n",
2837 p, p - addr);
2838 print_tracking(s, p);
2839 }
2840 }
2841 slab_unlock(page);
Eric Dumazetbbd7d572010-03-24 22:25:47 +01002842 kfree(map);
Christoph Lameter33b12c32008-04-25 12:22:43 -07002843#endif
2844}
2845
Christoph Lameter81819f02007-05-06 14:49:36 -07002846/*
Christoph Lameter599870b2008-04-23 12:36:52 -07002847 * Attempt to free all partial slabs on a node.
Christoph Lameter81819f02007-05-06 14:49:36 -07002848 */
Christoph Lameter599870b2008-04-23 12:36:52 -07002849static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
Christoph Lameter81819f02007-05-06 14:49:36 -07002850{
Christoph Lameter81819f02007-05-06 14:49:36 -07002851 unsigned long flags;
2852 struct page *page, *h;
2853
2854 spin_lock_irqsave(&n->list_lock, flags);
Christoph Lameter33b12c32008-04-25 12:22:43 -07002855 list_for_each_entry_safe(page, h, &n->partial, lru) {
Christoph Lameter81819f02007-05-06 14:49:36 -07002856 if (!page->inuse) {
Christoph Lameter5cc6eee2011-06-01 12:25:50 -05002857 remove_partial(n, page);
Christoph Lameter81819f02007-05-06 14:49:36 -07002858 discard_slab(s, page);
Christoph Lameter33b12c32008-04-25 12:22:43 -07002859 } else {
2860 list_slab_objects(s, page,
2861 "Objects remaining on kmem_cache_close()");
Christoph Lameter599870b2008-04-23 12:36:52 -07002862 }
Christoph Lameter33b12c32008-04-25 12:22:43 -07002863 }
Christoph Lameter81819f02007-05-06 14:49:36 -07002864 spin_unlock_irqrestore(&n->list_lock, flags);
Christoph Lameter81819f02007-05-06 14:49:36 -07002865}
2866
2867/*
Christoph Lameter672bba32007-05-09 02:32:39 -07002868 * Release all resources used by a slab cache.
Christoph Lameter81819f02007-05-06 14:49:36 -07002869 */
Christoph Lameter0c710012007-07-17 04:03:24 -07002870static inline int kmem_cache_close(struct kmem_cache *s)
Christoph Lameter81819f02007-05-06 14:49:36 -07002871{
2872 int node;
2873
2874 flush_all(s);
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06002875 free_percpu(s->cpu_slab);
Christoph Lameter81819f02007-05-06 14:49:36 -07002876 /* Attempt to free all objects */
Christoph Lameterf64dc582007-10-16 01:25:33 -07002877 for_each_node_state(node, N_NORMAL_MEMORY) {
Christoph Lameter81819f02007-05-06 14:49:36 -07002878 struct kmem_cache_node *n = get_node(s, node);
2879
Christoph Lameter599870b2008-04-23 12:36:52 -07002880 free_partial(s, n);
2881 if (n->nr_partial || slabs_node(s, node))
Christoph Lameter81819f02007-05-06 14:49:36 -07002882 return 1;
2883 }
2884 free_kmem_cache_nodes(s);
2885 return 0;
2886}
2887
2888/*
2889 * Close a cache and release the kmem_cache structure
2890 * (must be used for caches created using kmem_cache_create)
2891 */
2892void kmem_cache_destroy(struct kmem_cache *s)
2893{
2894 down_write(&slub_lock);
2895 s->refcount--;
2896 if (!s->refcount) {
2897 list_del(&s->list);
Pekka Enbergd629d812008-04-23 22:31:08 +03002898 if (kmem_cache_close(s)) {
2899 printk(KERN_ERR "SLUB %s: %s called for cache that "
2900 "still has objects.\n", s->name, __func__);
2901 dump_stack();
2902 }
Eric Dumazetd76b1592009-09-03 22:38:59 +03002903 if (s->flags & SLAB_DESTROY_BY_RCU)
2904 rcu_barrier();
Christoph Lameter81819f02007-05-06 14:49:36 -07002905 sysfs_slab_remove(s);
Christoph Lameter2bce6482010-07-19 11:39:11 -05002906 }
2907 up_write(&slub_lock);
Christoph Lameter81819f02007-05-06 14:49:36 -07002908}
2909EXPORT_SYMBOL(kmem_cache_destroy);
2910
2911/********************************************************************
2912 * Kmalloc subsystem
2913 *******************************************************************/
2914
Christoph Lameter51df1142010-08-20 12:37:15 -05002915struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
Christoph Lameter81819f02007-05-06 14:49:36 -07002916EXPORT_SYMBOL(kmalloc_caches);
2917
Christoph Lameter51df1142010-08-20 12:37:15 -05002918static struct kmem_cache *kmem_cache;
2919
Christoph Lameter55136592010-08-20 12:37:13 -05002920#ifdef CONFIG_ZONE_DMA
Christoph Lameter51df1142010-08-20 12:37:15 -05002921static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
Christoph Lameter55136592010-08-20 12:37:13 -05002922#endif
2923
Christoph Lameter81819f02007-05-06 14:49:36 -07002924static int __init setup_slub_min_order(char *str)
2925{
Pekka Enberg06428782008-01-07 23:20:27 -08002926 get_option(&str, &slub_min_order);
Christoph Lameter81819f02007-05-06 14:49:36 -07002927
2928 return 1;
2929}
2930
2931__setup("slub_min_order=", setup_slub_min_order);
2932
2933static int __init setup_slub_max_order(char *str)
2934{
Pekka Enberg06428782008-01-07 23:20:27 -08002935 get_option(&str, &slub_max_order);
David Rientjes818cf592009-04-23 09:58:22 +03002936 slub_max_order = min(slub_max_order, MAX_ORDER - 1);
Christoph Lameter81819f02007-05-06 14:49:36 -07002937
2938 return 1;
2939}
2940
2941__setup("slub_max_order=", setup_slub_max_order);
2942
2943static int __init setup_slub_min_objects(char *str)
2944{
Pekka Enberg06428782008-01-07 23:20:27 -08002945 get_option(&str, &slub_min_objects);
Christoph Lameter81819f02007-05-06 14:49:36 -07002946
2947 return 1;
2948}
2949
2950__setup("slub_min_objects=", setup_slub_min_objects);
2951
2952static int __init setup_slub_nomerge(char *str)
2953{
2954 slub_nomerge = 1;
2955 return 1;
2956}
2957
2958__setup("slub_nomerge", setup_slub_nomerge);
2959
Christoph Lameter51df1142010-08-20 12:37:15 -05002960static struct kmem_cache *__init create_kmalloc_cache(const char *name,
2961 int size, unsigned int flags)
Christoph Lameter81819f02007-05-06 14:49:36 -07002962{
Christoph Lameter51df1142010-08-20 12:37:15 -05002963 struct kmem_cache *s;
2964
2965 s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
2966
Pekka Enberg83b519e2009-06-10 19:40:04 +03002967 /*
2968 * This function is called with IRQs disabled during early-boot on
2969 * single CPU so there's no need to take slub_lock here.
2970 */
Christoph Lameter55136592010-08-20 12:37:13 -05002971 if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN,
Christoph Lameter319d1e22008-04-14 19:11:41 +03002972 flags, NULL))
Christoph Lameter81819f02007-05-06 14:49:36 -07002973 goto panic;
2974
2975 list_add(&s->list, &slab_caches);
Christoph Lameter51df1142010-08-20 12:37:15 -05002976 return s;
Christoph Lameter81819f02007-05-06 14:49:36 -07002977
2978panic:
2979 panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
Christoph Lameter51df1142010-08-20 12:37:15 -05002980 return NULL;
Christoph Lameter81819f02007-05-06 14:49:36 -07002981}
2982
Christoph Lameterf1b26332007-07-17 04:03:26 -07002983/*
2984 * Conversion table for small slabs sizes / 8 to the index in the
2985 * kmalloc array. This is necessary for slabs < 192 since we have non power
2986 * of two cache sizes there. The size of larger slabs can be determined using
2987 * fls.
2988 */
2989static s8 size_index[24] = {
2990 3, /* 8 */
2991 4, /* 16 */
2992 5, /* 24 */
2993 5, /* 32 */
2994 6, /* 40 */
2995 6, /* 48 */
2996 6, /* 56 */
2997 6, /* 64 */
2998 1, /* 72 */
2999 1, /* 80 */
3000 1, /* 88 */
3001 1, /* 96 */
3002 7, /* 104 */
3003 7, /* 112 */
3004 7, /* 120 */
3005 7, /* 128 */
3006 2, /* 136 */
3007 2, /* 144 */
3008 2, /* 152 */
3009 2, /* 160 */
3010 2, /* 168 */
3011 2, /* 176 */
3012 2, /* 184 */
3013 2 /* 192 */
3014};
3015
Aaro Koskinenacdfcd02009-08-28 14:28:54 +03003016static inline int size_index_elem(size_t bytes)
3017{
3018 return (bytes - 1) / 8;
3019}
3020
Christoph Lameter81819f02007-05-06 14:49:36 -07003021static struct kmem_cache *get_slab(size_t size, gfp_t flags)
3022{
Christoph Lameterf1b26332007-07-17 04:03:26 -07003023 int index;
Christoph Lameter81819f02007-05-06 14:49:36 -07003024
Christoph Lameterf1b26332007-07-17 04:03:26 -07003025 if (size <= 192) {
3026 if (!size)
3027 return ZERO_SIZE_PTR;
Christoph Lameter81819f02007-05-06 14:49:36 -07003028
Aaro Koskinenacdfcd02009-08-28 14:28:54 +03003029 index = size_index[size_index_elem(size)];
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003030 } else
Christoph Lameterf1b26332007-07-17 04:03:26 -07003031 index = fls(size - 1);
Christoph Lameter81819f02007-05-06 14:49:36 -07003032
3033#ifdef CONFIG_ZONE_DMA
Christoph Lameterf1b26332007-07-17 04:03:26 -07003034 if (unlikely((flags & SLUB_DMA)))
Christoph Lameter51df1142010-08-20 12:37:15 -05003035 return kmalloc_dma_caches[index];
Christoph Lameterf1b26332007-07-17 04:03:26 -07003036
Christoph Lameter81819f02007-05-06 14:49:36 -07003037#endif
Christoph Lameter51df1142010-08-20 12:37:15 -05003038 return kmalloc_caches[index];
Christoph Lameter81819f02007-05-06 14:49:36 -07003039}
3040
3041void *__kmalloc(size_t size, gfp_t flags)
3042{
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003043 struct kmem_cache *s;
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003044 void *ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07003045
Christoph Lameterffadd4d2009-02-17 12:05:07 -05003046 if (unlikely(size > SLUB_MAX_SIZE))
Pekka Enbergeada35e2008-02-11 22:47:46 +02003047 return kmalloc_large(size, flags);
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003048
3049 s = get_slab(size, flags);
3050
3051 if (unlikely(ZERO_OR_NULL_PTR(s)))
Christoph Lameter6cb8f912007-07-17 04:03:22 -07003052 return s;
3053
Christoph Lameter2154a332010-07-09 14:07:10 -05003054 ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003055
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02003056 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003057
3058 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07003059}
3060EXPORT_SYMBOL(__kmalloc);
3061
Namhyung Kim5d1f57e2010-09-29 21:02:15 +09003062#ifdef CONFIG_NUMA
Christoph Lameterf619cfe2008-03-01 13:56:40 -08003063static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3064{
Vegard Nossumb1eeab62008-11-25 16:55:53 +01003065 struct page *page;
Catalin Marinase4f7c0b42009-07-07 10:32:59 +01003066 void *ptr = NULL;
Christoph Lameterf619cfe2008-03-01 13:56:40 -08003067
Vegard Nossumb1eeab62008-11-25 16:55:53 +01003068 flags |= __GFP_COMP | __GFP_NOTRACK;
3069 page = alloc_pages_node(node, flags, get_order(size));
Christoph Lameterf619cfe2008-03-01 13:56:40 -08003070 if (page)
Catalin Marinase4f7c0b42009-07-07 10:32:59 +01003071 ptr = page_address(page);
3072
3073 kmemleak_alloc(ptr, size, 1, flags);
3074 return ptr;
Christoph Lameterf619cfe2008-03-01 13:56:40 -08003075}
3076
Christoph Lameter81819f02007-05-06 14:49:36 -07003077void *__kmalloc_node(size_t size, gfp_t flags, int node)
3078{
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003079 struct kmem_cache *s;
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003080 void *ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07003081
Ingo Molnar057685c2009-02-20 12:15:30 +01003082 if (unlikely(size > SLUB_MAX_SIZE)) {
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003083 ret = kmalloc_large_node(size, flags, node);
3084
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02003085 trace_kmalloc_node(_RET_IP_, ret,
3086 size, PAGE_SIZE << get_order(size),
3087 flags, node);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003088
3089 return ret;
3090 }
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003091
3092 s = get_slab(size, flags);
3093
3094 if (unlikely(ZERO_OR_NULL_PTR(s)))
Christoph Lameter6cb8f912007-07-17 04:03:22 -07003095 return s;
3096
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003097 ret = slab_alloc(s, flags, node, _RET_IP_);
3098
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02003099 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003100
3101 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07003102}
3103EXPORT_SYMBOL(__kmalloc_node);
3104#endif
3105
3106size_t ksize(const void *object)
3107{
Christoph Lameter272c1d22007-06-08 13:46:49 -07003108 struct page *page;
Christoph Lameter81819f02007-05-06 14:49:36 -07003109
Christoph Lameteref8b4522007-10-16 01:24:46 -07003110 if (unlikely(object == ZERO_SIZE_PTR))
Christoph Lameter272c1d22007-06-08 13:46:49 -07003111 return 0;
3112
Vegard Nossum294a80a2007-12-04 23:45:30 -08003113 page = virt_to_head_page(object);
Vegard Nossum294a80a2007-12-04 23:45:30 -08003114
Pekka Enberg76994412008-05-22 19:22:25 +03003115 if (unlikely(!PageSlab(page))) {
3116 WARN_ON(!PageCompound(page));
Vegard Nossum294a80a2007-12-04 23:45:30 -08003117 return PAGE_SIZE << compound_order(page);
Pekka Enberg76994412008-05-22 19:22:25 +03003118 }
Christoph Lameter81819f02007-05-06 14:49:36 -07003119
Eric Dumazetb3d41882011-02-14 18:35:22 +01003120 return slab_ksize(page->slab);
Christoph Lameter81819f02007-05-06 14:49:36 -07003121}
Kirill A. Shutemovb1aabec2009-02-10 15:21:44 +02003122EXPORT_SYMBOL(ksize);
Christoph Lameter81819f02007-05-06 14:49:36 -07003123
3124void kfree(const void *x)
3125{
Christoph Lameter81819f02007-05-06 14:49:36 -07003126 struct page *page;
Christoph Lameter5bb983b2008-02-07 17:47:41 -08003127 void *object = (void *)x;
Christoph Lameter81819f02007-05-06 14:49:36 -07003128
Pekka Enberg2121db72009-03-25 11:05:57 +02003129 trace_kfree(_RET_IP_, x);
3130
Satyam Sharma2408c552007-10-16 01:24:44 -07003131 if (unlikely(ZERO_OR_NULL_PTR(x)))
Christoph Lameter81819f02007-05-06 14:49:36 -07003132 return;
3133
Christoph Lameterb49af682007-05-06 14:49:41 -07003134 page = virt_to_head_page(x);
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003135 if (unlikely(!PageSlab(page))) {
Christoph Lameter09375022008-05-28 10:32:22 -07003136 BUG_ON(!PageCompound(page));
Catalin Marinase4f7c0b42009-07-07 10:32:59 +01003137 kmemleak_free(x);
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003138 put_page(page);
3139 return;
3140 }
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03003141 slab_free(page->slab, page, object, _RET_IP_);
Christoph Lameter81819f02007-05-06 14:49:36 -07003142}
3143EXPORT_SYMBOL(kfree);
3144
Christoph Lameter2086d262007-05-06 14:49:46 -07003145/*
Christoph Lameter672bba32007-05-09 02:32:39 -07003146 * kmem_cache_shrink removes empty slabs from the partial lists and sorts
3147 * the remaining slabs by the number of items in use. The slabs with the
3148 * most items in use come first. New allocations will then fill those up
3149 * and thus they can be removed from the partial lists.
3150 *
3151 * The slabs with the least items are placed last. This results in them
3152 * being allocated from last increasing the chance that the last objects
3153 * are freed in them.
Christoph Lameter2086d262007-05-06 14:49:46 -07003154 */
3155int kmem_cache_shrink(struct kmem_cache *s)
3156{
3157 int node;
3158 int i;
3159 struct kmem_cache_node *n;
3160 struct page *page;
3161 struct page *t;
Christoph Lameter205ab992008-04-14 19:11:40 +03003162 int objects = oo_objects(s->max);
Christoph Lameter2086d262007-05-06 14:49:46 -07003163 struct list_head *slabs_by_inuse =
Christoph Lameter834f3d12008-04-14 19:11:31 +03003164 kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
Christoph Lameter2086d262007-05-06 14:49:46 -07003165 unsigned long flags;
3166
3167 if (!slabs_by_inuse)
3168 return -ENOMEM;
3169
3170 flush_all(s);
Christoph Lameterf64dc582007-10-16 01:25:33 -07003171 for_each_node_state(node, N_NORMAL_MEMORY) {
Christoph Lameter2086d262007-05-06 14:49:46 -07003172 n = get_node(s, node);
3173
3174 if (!n->nr_partial)
3175 continue;
3176
Christoph Lameter834f3d12008-04-14 19:11:31 +03003177 for (i = 0; i < objects; i++)
Christoph Lameter2086d262007-05-06 14:49:46 -07003178 INIT_LIST_HEAD(slabs_by_inuse + i);
3179
3180 spin_lock_irqsave(&n->list_lock, flags);
3181
3182 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07003183 * Build lists indexed by the items in use in each slab.
Christoph Lameter2086d262007-05-06 14:49:46 -07003184 *
Christoph Lameter672bba32007-05-09 02:32:39 -07003185 * Note that concurrent frees may occur while we hold the
3186 * list_lock. page->inuse here is the upper limit.
Christoph Lameter2086d262007-05-06 14:49:46 -07003187 */
3188 list_for_each_entry_safe(page, t, &n->partial, lru) {
Christoph Lameter881db7f2011-06-01 12:25:53 -05003189 if (!page->inuse) {
Christoph Lameter5cc6eee2011-06-01 12:25:50 -05003190 remove_partial(n, page);
Christoph Lameter2086d262007-05-06 14:49:46 -07003191 discard_slab(s, page);
3192 } else {
Christoph Lameterfcda3d82007-07-30 13:06:46 -07003193 list_move(&page->lru,
3194 slabs_by_inuse + page->inuse);
Christoph Lameter2086d262007-05-06 14:49:46 -07003195 }
3196 }
3197
Christoph Lameter2086d262007-05-06 14:49:46 -07003198 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07003199 * Rebuild the partial list with the slabs filled up most
3200 * first and the least used slabs at the end.
Christoph Lameter2086d262007-05-06 14:49:46 -07003201 */
Christoph Lameter834f3d12008-04-14 19:11:31 +03003202 for (i = objects - 1; i >= 0; i--)
Christoph Lameter2086d262007-05-06 14:49:46 -07003203 list_splice(slabs_by_inuse + i, n->partial.prev);
3204
Christoph Lameter2086d262007-05-06 14:49:46 -07003205 spin_unlock_irqrestore(&n->list_lock, flags);
3206 }
3207
3208 kfree(slabs_by_inuse);
3209 return 0;
3210}
3211EXPORT_SYMBOL(kmem_cache_shrink);
3212
Pekka Enberg92a5bbc2010-10-06 16:58:16 +03003213#if defined(CONFIG_MEMORY_HOTPLUG)
Yasunori Gotob9049e22007-10-21 16:41:37 -07003214static int slab_mem_going_offline_callback(void *arg)
3215{
3216 struct kmem_cache *s;
3217
3218 down_read(&slub_lock);
3219 list_for_each_entry(s, &slab_caches, list)
3220 kmem_cache_shrink(s);
3221 up_read(&slub_lock);
3222
3223 return 0;
3224}
3225
3226static void slab_mem_offline_callback(void *arg)
3227{
3228 struct kmem_cache_node *n;
3229 struct kmem_cache *s;
3230 struct memory_notify *marg = arg;
3231 int offline_node;
3232
3233 offline_node = marg->status_change_nid;
3234
3235 /*
3236 * If the node still has available memory. we need kmem_cache_node
3237 * for it yet.
3238 */
3239 if (offline_node < 0)
3240 return;
3241
3242 down_read(&slub_lock);
3243 list_for_each_entry(s, &slab_caches, list) {
3244 n = get_node(s, offline_node);
3245 if (n) {
3246 /*
3247 * if n->nr_slabs > 0, slabs still exist on the node
3248 * that is going down. We were unable to free them,
Adam Buchbinderc9404c92009-12-18 15:40:42 -05003249 * and offline_pages() function shouldn't call this
Yasunori Gotob9049e22007-10-21 16:41:37 -07003250 * callback. So, we must fail.
3251 */
Christoph Lameter0f389ec2008-04-14 18:53:02 +03003252 BUG_ON(slabs_node(s, offline_node));
Yasunori Gotob9049e22007-10-21 16:41:37 -07003253
3254 s->node[offline_node] = NULL;
Christoph Lameter8de66a02010-08-25 14:51:14 -05003255 kmem_cache_free(kmem_cache_node, n);
Yasunori Gotob9049e22007-10-21 16:41:37 -07003256 }
3257 }
3258 up_read(&slub_lock);
3259}
3260
3261static int slab_mem_going_online_callback(void *arg)
3262{
3263 struct kmem_cache_node *n;
3264 struct kmem_cache *s;
3265 struct memory_notify *marg = arg;
3266 int nid = marg->status_change_nid;
3267 int ret = 0;
3268
3269 /*
3270 * If the node's memory is already available, then kmem_cache_node is
3271 * already created. Nothing to do.
3272 */
3273 if (nid < 0)
3274 return 0;
3275
3276 /*
Christoph Lameter0121c6192008-04-29 16:11:12 -07003277 * We are bringing a node online. No memory is available yet. We must
Yasunori Gotob9049e22007-10-21 16:41:37 -07003278 * allocate a kmem_cache_node structure in order to bring the node
3279 * online.
3280 */
3281 down_read(&slub_lock);
3282 list_for_each_entry(s, &slab_caches, list) {
3283 /*
3284 * XXX: kmem_cache_alloc_node will fallback to other nodes
3285 * since memory is not yet available from the node that
3286 * is brought up.
3287 */
Christoph Lameter8de66a02010-08-25 14:51:14 -05003288 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
Yasunori Gotob9049e22007-10-21 16:41:37 -07003289 if (!n) {
3290 ret = -ENOMEM;
3291 goto out;
3292 }
Pekka Enberg5595cff2008-08-05 09:28:47 +03003293 init_kmem_cache_node(n, s);
Yasunori Gotob9049e22007-10-21 16:41:37 -07003294 s->node[nid] = n;
3295 }
3296out:
3297 up_read(&slub_lock);
3298 return ret;
3299}
3300
3301static int slab_memory_callback(struct notifier_block *self,
3302 unsigned long action, void *arg)
3303{
3304 int ret = 0;
3305
3306 switch (action) {
3307 case MEM_GOING_ONLINE:
3308 ret = slab_mem_going_online_callback(arg);
3309 break;
3310 case MEM_GOING_OFFLINE:
3311 ret = slab_mem_going_offline_callback(arg);
3312 break;
3313 case MEM_OFFLINE:
3314 case MEM_CANCEL_ONLINE:
3315 slab_mem_offline_callback(arg);
3316 break;
3317 case MEM_ONLINE:
3318 case MEM_CANCEL_OFFLINE:
3319 break;
3320 }
KAMEZAWA Hiroyukidc19f9d2008-12-01 13:13:48 -08003321 if (ret)
3322 ret = notifier_from_errno(ret);
3323 else
3324 ret = NOTIFY_OK;
Yasunori Gotob9049e22007-10-21 16:41:37 -07003325 return ret;
3326}
3327
3328#endif /* CONFIG_MEMORY_HOTPLUG */
3329
Christoph Lameter81819f02007-05-06 14:49:36 -07003330/********************************************************************
3331 * Basic setup of slabs
3332 *******************************************************************/
3333
Christoph Lameter51df1142010-08-20 12:37:15 -05003334/*
3335 * Used for early kmem_cache structures that were allocated using
3336 * the page allocator
3337 */
3338
3339static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
3340{
3341 int node;
3342
3343 list_add(&s->list, &slab_caches);
3344 s->refcount = -1;
3345
3346 for_each_node_state(node, N_NORMAL_MEMORY) {
3347 struct kmem_cache_node *n = get_node(s, node);
3348 struct page *p;
3349
3350 if (n) {
3351 list_for_each_entry(p, &n->partial, lru)
3352 p->slab = s;
3353
Li Zefan607bf322011-04-12 15:22:26 +08003354#ifdef CONFIG_SLUB_DEBUG
Christoph Lameter51df1142010-08-20 12:37:15 -05003355 list_for_each_entry(p, &n->full, lru)
3356 p->slab = s;
3357#endif
3358 }
3359 }
3360}
3361
Christoph Lameter81819f02007-05-06 14:49:36 -07003362void __init kmem_cache_init(void)
3363{
3364 int i;
Christoph Lameter4b356be2007-06-16 10:16:13 -07003365 int caches = 0;
Christoph Lameter51df1142010-08-20 12:37:15 -05003366 struct kmem_cache *temp_kmem_cache;
3367 int order;
Christoph Lameter51df1142010-08-20 12:37:15 -05003368 struct kmem_cache *temp_kmem_cache_node;
3369 unsigned long kmalloc_size;
3370
3371 kmem_size = offsetof(struct kmem_cache, node) +
3372 nr_node_ids * sizeof(struct kmem_cache_node *);
3373
3374 /* Allocate two kmem_caches from the page allocator */
3375 kmalloc_size = ALIGN(kmem_size, cache_line_size());
3376 order = get_order(2 * kmalloc_size);
3377 kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order);
3378
Christoph Lameter81819f02007-05-06 14:49:36 -07003379 /*
3380 * Must first have the slab cache available for the allocations of the
Christoph Lameter672bba32007-05-09 02:32:39 -07003381 * struct kmem_cache_node's. There is special bootstrap code in
Christoph Lameter81819f02007-05-06 14:49:36 -07003382 * kmem_cache_open for slab_state == DOWN.
3383 */
Christoph Lameter51df1142010-08-20 12:37:15 -05003384 kmem_cache_node = (void *)kmem_cache + kmalloc_size;
3385
3386 kmem_cache_open(kmem_cache_node, "kmem_cache_node",
3387 sizeof(struct kmem_cache_node),
3388 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
Yasunori Gotob9049e22007-10-21 16:41:37 -07003389
Nadia Derbey0c40ba42008-04-29 01:00:41 -07003390 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
Christoph Lameter81819f02007-05-06 14:49:36 -07003391
3392 /* Able to allocate the per node structures */
3393 slab_state = PARTIAL;
3394
Christoph Lameter51df1142010-08-20 12:37:15 -05003395 temp_kmem_cache = kmem_cache;
3396 kmem_cache_open(kmem_cache, "kmem_cache", kmem_size,
3397 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
3398 kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
3399 memcpy(kmem_cache, temp_kmem_cache, kmem_size);
Christoph Lameter81819f02007-05-06 14:49:36 -07003400
Christoph Lameter51df1142010-08-20 12:37:15 -05003401 /*
3402 * Allocate kmem_cache_node properly from the kmem_cache slab.
3403 * kmem_cache_node is separately allocated so no need to
3404 * update any list pointers.
3405 */
3406 temp_kmem_cache_node = kmem_cache_node;
Christoph Lameter81819f02007-05-06 14:49:36 -07003407
Christoph Lameter51df1142010-08-20 12:37:15 -05003408 kmem_cache_node = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
3409 memcpy(kmem_cache_node, temp_kmem_cache_node, kmem_size);
3410
3411 kmem_cache_bootstrap_fixup(kmem_cache_node);
3412
3413 caches++;
Christoph Lameter51df1142010-08-20 12:37:15 -05003414 kmem_cache_bootstrap_fixup(kmem_cache);
3415 caches++;
3416 /* Free temporary boot structure */
3417 free_pages((unsigned long)temp_kmem_cache, order);
3418
3419 /* Now we can use the kmem_cache to allocate kmalloc slabs */
Christoph Lameterf1b26332007-07-17 04:03:26 -07003420
3421 /*
3422 * Patch up the size_index table if we have strange large alignment
3423 * requirements for the kmalloc array. This is only the case for
Christoph Lameter6446faa2008-02-15 23:45:26 -08003424 * MIPS it seems. The standard arches will not generate any code here.
Christoph Lameterf1b26332007-07-17 04:03:26 -07003425 *
3426 * Largest permitted alignment is 256 bytes due to the way we
3427 * handle the index determination for the smaller caches.
3428 *
3429 * Make sure that nothing crazy happens if someone starts tinkering
3430 * around with ARCH_KMALLOC_MINALIGN
3431 */
3432 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
3433 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
3434
Aaro Koskinenacdfcd02009-08-28 14:28:54 +03003435 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
3436 int elem = size_index_elem(i);
3437 if (elem >= ARRAY_SIZE(size_index))
3438 break;
3439 size_index[elem] = KMALLOC_SHIFT_LOW;
3440 }
Christoph Lameterf1b26332007-07-17 04:03:26 -07003441
Aaro Koskinenacdfcd02009-08-28 14:28:54 +03003442 if (KMALLOC_MIN_SIZE == 64) {
3443 /*
3444 * The 96 byte size cache is not used if the alignment
3445 * is 64 byte.
3446 */
3447 for (i = 64 + 8; i <= 96; i += 8)
3448 size_index[size_index_elem(i)] = 7;
3449 } else if (KMALLOC_MIN_SIZE == 128) {
Christoph Lameter41d54d32008-07-03 09:14:26 -05003450 /*
3451 * The 192 byte sized cache is not used if the alignment
3452 * is 128 byte. Redirect kmalloc to use the 256 byte cache
3453 * instead.
3454 */
3455 for (i = 128 + 8; i <= 192; i += 8)
Aaro Koskinenacdfcd02009-08-28 14:28:54 +03003456 size_index[size_index_elem(i)] = 8;
Christoph Lameter41d54d32008-07-03 09:14:26 -05003457 }
3458
Christoph Lameter51df1142010-08-20 12:37:15 -05003459 /* Caches that are not of the two-to-the-power-of size */
3460 if (KMALLOC_MIN_SIZE <= 32) {
3461 kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
3462 caches++;
3463 }
3464
3465 if (KMALLOC_MIN_SIZE <= 64) {
3466 kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
3467 caches++;
3468 }
3469
3470 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
3471 kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
3472 caches++;
3473 }
3474
Christoph Lameter81819f02007-05-06 14:49:36 -07003475 slab_state = UP;
3476
3477 /* Provide the correct kmalloc names now that the caches are up */
Pekka Enberg84c1cf62010-09-14 23:21:12 +03003478 if (KMALLOC_MIN_SIZE <= 32) {
3479 kmalloc_caches[1]->name = kstrdup(kmalloc_caches[1]->name, GFP_NOWAIT);
3480 BUG_ON(!kmalloc_caches[1]->name);
3481 }
3482
3483 if (KMALLOC_MIN_SIZE <= 64) {
3484 kmalloc_caches[2]->name = kstrdup(kmalloc_caches[2]->name, GFP_NOWAIT);
3485 BUG_ON(!kmalloc_caches[2]->name);
3486 }
3487
Christoph Lameterd7278bd2010-07-09 14:07:12 -05003488 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
3489 char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
3490
3491 BUG_ON(!s);
Christoph Lameter51df1142010-08-20 12:37:15 -05003492 kmalloc_caches[i]->name = s;
Christoph Lameterd7278bd2010-07-09 14:07:12 -05003493 }
Christoph Lameter81819f02007-05-06 14:49:36 -07003494
3495#ifdef CONFIG_SMP
3496 register_cpu_notifier(&slab_notifier);
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06003497#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07003498
Christoph Lameter55136592010-08-20 12:37:13 -05003499#ifdef CONFIG_ZONE_DMA
Christoph Lameter51df1142010-08-20 12:37:15 -05003500 for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
3501 struct kmem_cache *s = kmalloc_caches[i];
Christoph Lameter55136592010-08-20 12:37:13 -05003502
Christoph Lameter51df1142010-08-20 12:37:15 -05003503 if (s && s->size) {
Christoph Lameter55136592010-08-20 12:37:13 -05003504 char *name = kasprintf(GFP_NOWAIT,
3505 "dma-kmalloc-%d", s->objsize);
3506
3507 BUG_ON(!name);
Christoph Lameter51df1142010-08-20 12:37:15 -05003508 kmalloc_dma_caches[i] = create_kmalloc_cache(name,
3509 s->objsize, SLAB_CACHE_DMA);
Christoph Lameter55136592010-08-20 12:37:13 -05003510 }
3511 }
3512#endif
Ingo Molnar3adbefe2008-02-05 17:57:39 -08003513 printk(KERN_INFO
3514 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
Christoph Lameter4b356be2007-06-16 10:16:13 -07003515 " CPUs=%d, Nodes=%d\n",
3516 caches, cache_line_size(),
Christoph Lameter81819f02007-05-06 14:49:36 -07003517 slub_min_order, slub_max_order, slub_min_objects,
3518 nr_cpu_ids, nr_node_ids);
3519}
3520
Pekka Enberg7e85ee02009-06-12 14:03:06 +03003521void __init kmem_cache_init_late(void)
3522{
Pekka Enberg7e85ee02009-06-12 14:03:06 +03003523}
3524
Christoph Lameter81819f02007-05-06 14:49:36 -07003525/*
3526 * Find a mergeable slab cache
3527 */
3528static int slab_unmergeable(struct kmem_cache *s)
3529{
3530 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
3531 return 1;
3532
Christoph Lameterc59def92007-05-16 22:10:50 -07003533 if (s->ctor)
Christoph Lameter81819f02007-05-06 14:49:36 -07003534 return 1;
3535
Christoph Lameter8ffa6872007-05-31 00:40:51 -07003536 /*
3537 * We may have set a slab to be unmergeable during bootstrap.
3538 */
3539 if (s->refcount < 0)
3540 return 1;
3541
Christoph Lameter81819f02007-05-06 14:49:36 -07003542 return 0;
3543}
3544
3545static struct kmem_cache *find_mergeable(size_t size,
Christoph Lameterba0268a2007-09-11 15:24:11 -07003546 size_t align, unsigned long flags, const char *name,
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07003547 void (*ctor)(void *))
Christoph Lameter81819f02007-05-06 14:49:36 -07003548{
Christoph Lameter5b95a4ac2007-07-17 04:03:19 -07003549 struct kmem_cache *s;
Christoph Lameter81819f02007-05-06 14:49:36 -07003550
3551 if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
3552 return NULL;
3553
Christoph Lameterc59def92007-05-16 22:10:50 -07003554 if (ctor)
Christoph Lameter81819f02007-05-06 14:49:36 -07003555 return NULL;
3556
3557 size = ALIGN(size, sizeof(void *));
3558 align = calculate_alignment(flags, align, size);
3559 size = ALIGN(size, align);
Christoph Lameterba0268a2007-09-11 15:24:11 -07003560 flags = kmem_cache_flags(size, flags, name, NULL);
Christoph Lameter81819f02007-05-06 14:49:36 -07003561
Christoph Lameter5b95a4ac2007-07-17 04:03:19 -07003562 list_for_each_entry(s, &slab_caches, list) {
Christoph Lameter81819f02007-05-06 14:49:36 -07003563 if (slab_unmergeable(s))
3564 continue;
3565
3566 if (size > s->size)
3567 continue;
3568
Christoph Lameterba0268a2007-09-11 15:24:11 -07003569 if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
Christoph Lameter81819f02007-05-06 14:49:36 -07003570 continue;
3571 /*
3572 * Check if alignment is compatible.
3573 * Courtesy of Adrian Drzewiecki
3574 */
Pekka Enberg06428782008-01-07 23:20:27 -08003575 if ((s->size & ~(align - 1)) != s->size)
Christoph Lameter81819f02007-05-06 14:49:36 -07003576 continue;
3577
3578 if (s->size - size >= sizeof(void *))
3579 continue;
3580
3581 return s;
3582 }
3583 return NULL;
3584}
3585
3586struct kmem_cache *kmem_cache_create(const char *name, size_t size,
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07003587 size_t align, unsigned long flags, void (*ctor)(void *))
Christoph Lameter81819f02007-05-06 14:49:36 -07003588{
3589 struct kmem_cache *s;
Pekka Enberg84c1cf62010-09-14 23:21:12 +03003590 char *n;
Christoph Lameter81819f02007-05-06 14:49:36 -07003591
Benjamin Herrenschmidtfe1ff492009-09-21 17:02:30 -07003592 if (WARN_ON(!name))
3593 return NULL;
3594
Christoph Lameter81819f02007-05-06 14:49:36 -07003595 down_write(&slub_lock);
Christoph Lameterba0268a2007-09-11 15:24:11 -07003596 s = find_mergeable(size, align, flags, name, ctor);
Christoph Lameter81819f02007-05-06 14:49:36 -07003597 if (s) {
3598 s->refcount++;
3599 /*
3600 * Adjust the object sizes so that we clear
3601 * the complete object on kzalloc.
3602 */
3603 s->objsize = max(s->objsize, (int)size);
3604 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
Christoph Lameter6446faa2008-02-15 23:45:26 -08003605
David Rientjes7b8f3b62008-12-17 22:09:46 -08003606 if (sysfs_slab_alias(s, name)) {
David Rientjes7b8f3b62008-12-17 22:09:46 -08003607 s->refcount--;
Christoph Lameter81819f02007-05-06 14:49:36 -07003608 goto err;
David Rientjes7b8f3b62008-12-17 22:09:46 -08003609 }
Christoph Lameter2bce6482010-07-19 11:39:11 -05003610 up_write(&slub_lock);
Christoph Lametera0e1d1b2007-07-17 04:03:31 -07003611 return s;
3612 }
Christoph Lameter6446faa2008-02-15 23:45:26 -08003613
Pekka Enberg84c1cf62010-09-14 23:21:12 +03003614 n = kstrdup(name, GFP_KERNEL);
3615 if (!n)
3616 goto err;
3617
Christoph Lametera0e1d1b2007-07-17 04:03:31 -07003618 s = kmalloc(kmem_size, GFP_KERNEL);
3619 if (s) {
Pekka Enberg84c1cf62010-09-14 23:21:12 +03003620 if (kmem_cache_open(s, n,
Christoph Lameterc59def92007-05-16 22:10:50 -07003621 size, align, flags, ctor)) {
Christoph Lameter81819f02007-05-06 14:49:36 -07003622 list_add(&s->list, &slab_caches);
David Rientjes7b8f3b62008-12-17 22:09:46 -08003623 if (sysfs_slab_add(s)) {
David Rientjes7b8f3b62008-12-17 22:09:46 -08003624 list_del(&s->list);
Pekka Enberg84c1cf62010-09-14 23:21:12 +03003625 kfree(n);
David Rientjes7b8f3b62008-12-17 22:09:46 -08003626 kfree(s);
Christoph Lametera0e1d1b2007-07-17 04:03:31 -07003627 goto err;
David Rientjes7b8f3b62008-12-17 22:09:46 -08003628 }
Christoph Lameter2bce6482010-07-19 11:39:11 -05003629 up_write(&slub_lock);
Christoph Lametera0e1d1b2007-07-17 04:03:31 -07003630 return s;
3631 }
Pekka Enberg84c1cf62010-09-14 23:21:12 +03003632 kfree(n);
Christoph Lametera0e1d1b2007-07-17 04:03:31 -07003633 kfree(s);
Christoph Lameter81819f02007-05-06 14:49:36 -07003634 }
Pavel Emelyanov68cee4f12010-10-28 13:50:37 +04003635err:
Christoph Lameter81819f02007-05-06 14:49:36 -07003636 up_write(&slub_lock);
Christoph Lameter81819f02007-05-06 14:49:36 -07003637
Christoph Lameter81819f02007-05-06 14:49:36 -07003638 if (flags & SLAB_PANIC)
3639 panic("Cannot create slabcache %s\n", name);
3640 else
3641 s = NULL;
3642 return s;
3643}
3644EXPORT_SYMBOL(kmem_cache_create);
3645
Christoph Lameter81819f02007-05-06 14:49:36 -07003646#ifdef CONFIG_SMP
Christoph Lameter27390bc2007-06-01 00:47:09 -07003647/*
Christoph Lameter672bba32007-05-09 02:32:39 -07003648 * Use the cpu notifier to insure that the cpu slabs are flushed when
3649 * necessary.
Christoph Lameter81819f02007-05-06 14:49:36 -07003650 */
3651static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
3652 unsigned long action, void *hcpu)
3653{
3654 long cpu = (long)hcpu;
Christoph Lameter5b95a4ac2007-07-17 04:03:19 -07003655 struct kmem_cache *s;
3656 unsigned long flags;
Christoph Lameter81819f02007-05-06 14:49:36 -07003657
3658 switch (action) {
3659 case CPU_UP_CANCELED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07003660 case CPU_UP_CANCELED_FROZEN:
Christoph Lameter81819f02007-05-06 14:49:36 -07003661 case CPU_DEAD:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07003662 case CPU_DEAD_FROZEN:
Christoph Lameter5b95a4ac2007-07-17 04:03:19 -07003663 down_read(&slub_lock);
3664 list_for_each_entry(s, &slab_caches, list) {
3665 local_irq_save(flags);
3666 __flush_cpu_slab(s, cpu);
3667 local_irq_restore(flags);
3668 }
3669 up_read(&slub_lock);
Christoph Lameter81819f02007-05-06 14:49:36 -07003670 break;
3671 default:
3672 break;
3673 }
3674 return NOTIFY_OK;
3675}
3676
Pekka Enberg06428782008-01-07 23:20:27 -08003677static struct notifier_block __cpuinitdata slab_notifier = {
Ingo Molnar3adbefe2008-02-05 17:57:39 -08003678 .notifier_call = slab_cpuup_callback
Pekka Enberg06428782008-01-07 23:20:27 -08003679};
Christoph Lameter81819f02007-05-06 14:49:36 -07003680
3681#endif
3682
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03003683void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
Christoph Lameter81819f02007-05-06 14:49:36 -07003684{
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003685 struct kmem_cache *s;
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03003686 void *ret;
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003687
Christoph Lameterffadd4d2009-02-17 12:05:07 -05003688 if (unlikely(size > SLUB_MAX_SIZE))
Pekka Enbergeada35e2008-02-11 22:47:46 +02003689 return kmalloc_large(size, gfpflags);
3690
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003691 s = get_slab(size, gfpflags);
Christoph Lameter81819f02007-05-06 14:49:36 -07003692
Satyam Sharma2408c552007-10-16 01:24:44 -07003693 if (unlikely(ZERO_OR_NULL_PTR(s)))
Christoph Lameter6cb8f912007-07-17 04:03:22 -07003694 return s;
Christoph Lameter81819f02007-05-06 14:49:36 -07003695
Christoph Lameter2154a332010-07-09 14:07:10 -05003696 ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller);
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03003697
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003698 /* Honor the call site pointer we received. */
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02003699 trace_kmalloc(caller, ret, size, s->size, gfpflags);
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03003700
3701 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07003702}
3703
Namhyung Kim5d1f57e2010-09-29 21:02:15 +09003704#ifdef CONFIG_NUMA
Christoph Lameter81819f02007-05-06 14:49:36 -07003705void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03003706 int node, unsigned long caller)
Christoph Lameter81819f02007-05-06 14:49:36 -07003707{
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003708 struct kmem_cache *s;
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03003709 void *ret;
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003710
Xiaotian Fengd3e14aa2010-04-08 17:26:44 +08003711 if (unlikely(size > SLUB_MAX_SIZE)) {
3712 ret = kmalloc_large_node(size, gfpflags, node);
3713
3714 trace_kmalloc_node(caller, ret,
3715 size, PAGE_SIZE << get_order(size),
3716 gfpflags, node);
3717
3718 return ret;
3719 }
Pekka Enbergeada35e2008-02-11 22:47:46 +02003720
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003721 s = get_slab(size, gfpflags);
Christoph Lameter81819f02007-05-06 14:49:36 -07003722
Satyam Sharma2408c552007-10-16 01:24:44 -07003723 if (unlikely(ZERO_OR_NULL_PTR(s)))
Christoph Lameter6cb8f912007-07-17 04:03:22 -07003724 return s;
Christoph Lameter81819f02007-05-06 14:49:36 -07003725
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03003726 ret = slab_alloc(s, gfpflags, node, caller);
3727
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003728 /* Honor the call site pointer we received. */
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02003729 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03003730
3731 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07003732}
Namhyung Kim5d1f57e2010-09-29 21:02:15 +09003733#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07003734
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05003735#ifdef CONFIG_SYSFS
Christoph Lameter205ab992008-04-14 19:11:40 +03003736static int count_inuse(struct page *page)
3737{
3738 return page->inuse;
3739}
3740
3741static int count_total(struct page *page)
3742{
3743 return page->objects;
3744}
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05003745#endif
Christoph Lameter205ab992008-04-14 19:11:40 +03003746
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05003747#ifdef CONFIG_SLUB_DEBUG
Christoph Lameter434e2452007-07-17 04:03:30 -07003748static int validate_slab(struct kmem_cache *s, struct page *page,
3749 unsigned long *map)
Christoph Lameter53e15af2007-05-06 14:49:43 -07003750{
3751 void *p;
Christoph Lametera973e9d2008-03-01 13:40:44 -08003752 void *addr = page_address(page);
Christoph Lameter53e15af2007-05-06 14:49:43 -07003753
3754 if (!check_slab(s, page) ||
3755 !on_freelist(s, page, NULL))
3756 return 0;
3757
3758 /* Now we know that a valid freelist exists */
Christoph Lameter39b26462008-04-14 19:11:30 +03003759 bitmap_zero(map, page->objects);
Christoph Lameter53e15af2007-05-06 14:49:43 -07003760
Christoph Lameter5f80b132011-04-15 14:48:13 -05003761 get_map(s, page, map);
3762 for_each_object(p, s, addr, page->objects) {
3763 if (test_bit(slab_index(p, s, addr), map))
3764 if (!check_object(s, page, p, SLUB_RED_INACTIVE))
3765 return 0;
Christoph Lameter53e15af2007-05-06 14:49:43 -07003766 }
3767
Christoph Lameter224a88b2008-04-14 19:11:31 +03003768 for_each_object(p, s, addr, page->objects)
Christoph Lameter7656c722007-05-09 02:32:40 -07003769 if (!test_bit(slab_index(p, s, addr), map))
Tero Roponen37d57442010-12-01 20:04:20 +02003770 if (!check_object(s, page, p, SLUB_RED_ACTIVE))
Christoph Lameter53e15af2007-05-06 14:49:43 -07003771 return 0;
3772 return 1;
3773}
3774
Christoph Lameter434e2452007-07-17 04:03:30 -07003775static void validate_slab_slab(struct kmem_cache *s, struct page *page,
3776 unsigned long *map)
Christoph Lameter53e15af2007-05-06 14:49:43 -07003777{
Christoph Lameter881db7f2011-06-01 12:25:53 -05003778 slab_lock(page);
3779 validate_slab(s, page, map);
3780 slab_unlock(page);
Christoph Lameter53e15af2007-05-06 14:49:43 -07003781}
3782
Christoph Lameter434e2452007-07-17 04:03:30 -07003783static int validate_slab_node(struct kmem_cache *s,
3784 struct kmem_cache_node *n, unsigned long *map)
Christoph Lameter53e15af2007-05-06 14:49:43 -07003785{
3786 unsigned long count = 0;
3787 struct page *page;
3788 unsigned long flags;
3789
3790 spin_lock_irqsave(&n->list_lock, flags);
3791
3792 list_for_each_entry(page, &n->partial, lru) {
Christoph Lameter434e2452007-07-17 04:03:30 -07003793 validate_slab_slab(s, page, map);
Christoph Lameter53e15af2007-05-06 14:49:43 -07003794 count++;
3795 }
3796 if (count != n->nr_partial)
3797 printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
3798 "counter=%ld\n", s->name, count, n->nr_partial);
3799
3800 if (!(s->flags & SLAB_STORE_USER))
3801 goto out;
3802
3803 list_for_each_entry(page, &n->full, lru) {
Christoph Lameter434e2452007-07-17 04:03:30 -07003804 validate_slab_slab(s, page, map);
Christoph Lameter53e15af2007-05-06 14:49:43 -07003805 count++;
3806 }
3807 if (count != atomic_long_read(&n->nr_slabs))
3808 printk(KERN_ERR "SLUB: %s %ld slabs counted but "
3809 "counter=%ld\n", s->name, count,
3810 atomic_long_read(&n->nr_slabs));
3811
3812out:
3813 spin_unlock_irqrestore(&n->list_lock, flags);
3814 return count;
3815}
3816
Christoph Lameter434e2452007-07-17 04:03:30 -07003817static long validate_slab_cache(struct kmem_cache *s)
Christoph Lameter53e15af2007-05-06 14:49:43 -07003818{
3819 int node;
3820 unsigned long count = 0;
Christoph Lameter205ab992008-04-14 19:11:40 +03003821 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
Christoph Lameter434e2452007-07-17 04:03:30 -07003822 sizeof(unsigned long), GFP_KERNEL);
3823
3824 if (!map)
3825 return -ENOMEM;
Christoph Lameter53e15af2007-05-06 14:49:43 -07003826
3827 flush_all(s);
Christoph Lameterf64dc582007-10-16 01:25:33 -07003828 for_each_node_state(node, N_NORMAL_MEMORY) {
Christoph Lameter53e15af2007-05-06 14:49:43 -07003829 struct kmem_cache_node *n = get_node(s, node);
3830
Christoph Lameter434e2452007-07-17 04:03:30 -07003831 count += validate_slab_node(s, n, map);
Christoph Lameter53e15af2007-05-06 14:49:43 -07003832 }
Christoph Lameter434e2452007-07-17 04:03:30 -07003833 kfree(map);
Christoph Lameter53e15af2007-05-06 14:49:43 -07003834 return count;
3835}
Christoph Lameter88a420e2007-05-06 14:49:45 -07003836/*
Christoph Lameter672bba32007-05-09 02:32:39 -07003837 * Generate lists of code addresses where slabcache objects are allocated
Christoph Lameter88a420e2007-05-06 14:49:45 -07003838 * and freed.
3839 */
3840
3841struct location {
3842 unsigned long count;
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03003843 unsigned long addr;
Christoph Lameter45edfa52007-05-09 02:32:45 -07003844 long long sum_time;
3845 long min_time;
3846 long max_time;
3847 long min_pid;
3848 long max_pid;
Rusty Russell174596a2009-01-01 10:12:29 +10303849 DECLARE_BITMAP(cpus, NR_CPUS);
Christoph Lameter45edfa52007-05-09 02:32:45 -07003850 nodemask_t nodes;
Christoph Lameter88a420e2007-05-06 14:49:45 -07003851};
3852
3853struct loc_track {
3854 unsigned long max;
3855 unsigned long count;
3856 struct location *loc;
3857};
3858
3859static void free_loc_track(struct loc_track *t)
3860{
3861 if (t->max)
3862 free_pages((unsigned long)t->loc,
3863 get_order(sizeof(struct location) * t->max));
3864}
3865
Christoph Lameter68dff6a2007-07-17 04:03:20 -07003866static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
Christoph Lameter88a420e2007-05-06 14:49:45 -07003867{
3868 struct location *l;
3869 int order;
3870
Christoph Lameter88a420e2007-05-06 14:49:45 -07003871 order = get_order(sizeof(struct location) * max);
3872
Christoph Lameter68dff6a2007-07-17 04:03:20 -07003873 l = (void *)__get_free_pages(flags, order);
Christoph Lameter88a420e2007-05-06 14:49:45 -07003874 if (!l)
3875 return 0;
3876
3877 if (t->count) {
3878 memcpy(l, t->loc, sizeof(struct location) * t->count);
3879 free_loc_track(t);
3880 }
3881 t->max = max;
3882 t->loc = l;
3883 return 1;
3884}
3885
3886static int add_location(struct loc_track *t, struct kmem_cache *s,
Christoph Lameter45edfa52007-05-09 02:32:45 -07003887 const struct track *track)
Christoph Lameter88a420e2007-05-06 14:49:45 -07003888{
3889 long start, end, pos;
3890 struct location *l;
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03003891 unsigned long caddr;
Christoph Lameter45edfa52007-05-09 02:32:45 -07003892 unsigned long age = jiffies - track->when;
Christoph Lameter88a420e2007-05-06 14:49:45 -07003893
3894 start = -1;
3895 end = t->count;
3896
3897 for ( ; ; ) {
3898 pos = start + (end - start + 1) / 2;
3899
3900 /*
3901 * There is nothing at "end". If we end up there
3902 * we need to add something to before end.
3903 */
3904 if (pos == end)
3905 break;
3906
3907 caddr = t->loc[pos].addr;
Christoph Lameter45edfa52007-05-09 02:32:45 -07003908 if (track->addr == caddr) {
3909
3910 l = &t->loc[pos];
3911 l->count++;
3912 if (track->when) {
3913 l->sum_time += age;
3914 if (age < l->min_time)
3915 l->min_time = age;
3916 if (age > l->max_time)
3917 l->max_time = age;
3918
3919 if (track->pid < l->min_pid)
3920 l->min_pid = track->pid;
3921 if (track->pid > l->max_pid)
3922 l->max_pid = track->pid;
3923
Rusty Russell174596a2009-01-01 10:12:29 +10303924 cpumask_set_cpu(track->cpu,
3925 to_cpumask(l->cpus));
Christoph Lameter45edfa52007-05-09 02:32:45 -07003926 }
3927 node_set(page_to_nid(virt_to_page(track)), l->nodes);
Christoph Lameter88a420e2007-05-06 14:49:45 -07003928 return 1;
3929 }
3930
Christoph Lameter45edfa52007-05-09 02:32:45 -07003931 if (track->addr < caddr)
Christoph Lameter88a420e2007-05-06 14:49:45 -07003932 end = pos;
3933 else
3934 start = pos;
3935 }
3936
3937 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07003938 * Not found. Insert new tracking element.
Christoph Lameter88a420e2007-05-06 14:49:45 -07003939 */
Christoph Lameter68dff6a2007-07-17 04:03:20 -07003940 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
Christoph Lameter88a420e2007-05-06 14:49:45 -07003941 return 0;
3942
3943 l = t->loc + pos;
3944 if (pos < t->count)
3945 memmove(l + 1, l,
3946 (t->count - pos) * sizeof(struct location));
3947 t->count++;
3948 l->count = 1;
Christoph Lameter45edfa52007-05-09 02:32:45 -07003949 l->addr = track->addr;
3950 l->sum_time = age;
3951 l->min_time = age;
3952 l->max_time = age;
3953 l->min_pid = track->pid;
3954 l->max_pid = track->pid;
Rusty Russell174596a2009-01-01 10:12:29 +10303955 cpumask_clear(to_cpumask(l->cpus));
3956 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
Christoph Lameter45edfa52007-05-09 02:32:45 -07003957 nodes_clear(l->nodes);
3958 node_set(page_to_nid(virt_to_page(track)), l->nodes);
Christoph Lameter88a420e2007-05-06 14:49:45 -07003959 return 1;
3960}
3961
3962static void process_slab(struct loc_track *t, struct kmem_cache *s,
Eric Dumazetbbd7d572010-03-24 22:25:47 +01003963 struct page *page, enum track_item alloc,
Namhyung Kima5dd5c12010-09-29 21:02:13 +09003964 unsigned long *map)
Christoph Lameter88a420e2007-05-06 14:49:45 -07003965{
Christoph Lametera973e9d2008-03-01 13:40:44 -08003966 void *addr = page_address(page);
Christoph Lameter88a420e2007-05-06 14:49:45 -07003967 void *p;
3968
Christoph Lameter39b26462008-04-14 19:11:30 +03003969 bitmap_zero(map, page->objects);
Christoph Lameter5f80b132011-04-15 14:48:13 -05003970 get_map(s, page, map);
Christoph Lameter88a420e2007-05-06 14:49:45 -07003971
Christoph Lameter224a88b2008-04-14 19:11:31 +03003972 for_each_object(p, s, addr, page->objects)
Christoph Lameter45edfa52007-05-09 02:32:45 -07003973 if (!test_bit(slab_index(p, s, addr), map))
3974 add_location(t, s, get_track(s, p, alloc));
Christoph Lameter88a420e2007-05-06 14:49:45 -07003975}
3976
3977static int list_locations(struct kmem_cache *s, char *buf,
3978 enum track_item alloc)
3979{
Harvey Harrisone374d482008-01-31 15:20:50 -08003980 int len = 0;
Christoph Lameter88a420e2007-05-06 14:49:45 -07003981 unsigned long i;
Christoph Lameter68dff6a2007-07-17 04:03:20 -07003982 struct loc_track t = { 0, 0, NULL };
Christoph Lameter88a420e2007-05-06 14:49:45 -07003983 int node;
Eric Dumazetbbd7d572010-03-24 22:25:47 +01003984 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
3985 sizeof(unsigned long), GFP_KERNEL);
Christoph Lameter88a420e2007-05-06 14:49:45 -07003986
Eric Dumazetbbd7d572010-03-24 22:25:47 +01003987 if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
3988 GFP_TEMPORARY)) {
3989 kfree(map);
Christoph Lameter68dff6a2007-07-17 04:03:20 -07003990 return sprintf(buf, "Out of memory\n");
Eric Dumazetbbd7d572010-03-24 22:25:47 +01003991 }
Christoph Lameter88a420e2007-05-06 14:49:45 -07003992 /* Push back cpu slabs */
3993 flush_all(s);
3994
Christoph Lameterf64dc582007-10-16 01:25:33 -07003995 for_each_node_state(node, N_NORMAL_MEMORY) {
Christoph Lameter88a420e2007-05-06 14:49:45 -07003996 struct kmem_cache_node *n = get_node(s, node);
3997 unsigned long flags;
3998 struct page *page;
3999
Christoph Lameter9e869432007-08-22 14:01:56 -07004000 if (!atomic_long_read(&n->nr_slabs))
Christoph Lameter88a420e2007-05-06 14:49:45 -07004001 continue;
4002
4003 spin_lock_irqsave(&n->list_lock, flags);
4004 list_for_each_entry(page, &n->partial, lru)
Eric Dumazetbbd7d572010-03-24 22:25:47 +01004005 process_slab(&t, s, page, alloc, map);
Christoph Lameter88a420e2007-05-06 14:49:45 -07004006 list_for_each_entry(page, &n->full, lru)
Eric Dumazetbbd7d572010-03-24 22:25:47 +01004007 process_slab(&t, s, page, alloc, map);
Christoph Lameter88a420e2007-05-06 14:49:45 -07004008 spin_unlock_irqrestore(&n->list_lock, flags);
4009 }
4010
4011 for (i = 0; i < t.count; i++) {
Christoph Lameter45edfa52007-05-09 02:32:45 -07004012 struct location *l = &t.loc[i];
Christoph Lameter88a420e2007-05-06 14:49:45 -07004013
Hugh Dickins9c246242008-12-09 13:14:27 -08004014 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
Christoph Lameter88a420e2007-05-06 14:49:45 -07004015 break;
Harvey Harrisone374d482008-01-31 15:20:50 -08004016 len += sprintf(buf + len, "%7ld ", l->count);
Christoph Lameter45edfa52007-05-09 02:32:45 -07004017
4018 if (l->addr)
Joe Perches62c70bc2011-01-13 15:45:52 -08004019 len += sprintf(buf + len, "%pS", (void *)l->addr);
Christoph Lameter88a420e2007-05-06 14:49:45 -07004020 else
Harvey Harrisone374d482008-01-31 15:20:50 -08004021 len += sprintf(buf + len, "<not-available>");
Christoph Lameter45edfa52007-05-09 02:32:45 -07004022
4023 if (l->sum_time != l->min_time) {
Harvey Harrisone374d482008-01-31 15:20:50 -08004024 len += sprintf(buf + len, " age=%ld/%ld/%ld",
Roman Zippelf8bd2252008-05-01 04:34:31 -07004025 l->min_time,
4026 (long)div_u64(l->sum_time, l->count),
4027 l->max_time);
Christoph Lameter45edfa52007-05-09 02:32:45 -07004028 } else
Harvey Harrisone374d482008-01-31 15:20:50 -08004029 len += sprintf(buf + len, " age=%ld",
Christoph Lameter45edfa52007-05-09 02:32:45 -07004030 l->min_time);
4031
4032 if (l->min_pid != l->max_pid)
Harvey Harrisone374d482008-01-31 15:20:50 -08004033 len += sprintf(buf + len, " pid=%ld-%ld",
Christoph Lameter45edfa52007-05-09 02:32:45 -07004034 l->min_pid, l->max_pid);
4035 else
Harvey Harrisone374d482008-01-31 15:20:50 -08004036 len += sprintf(buf + len, " pid=%ld",
Christoph Lameter45edfa52007-05-09 02:32:45 -07004037 l->min_pid);
4038
Rusty Russell174596a2009-01-01 10:12:29 +10304039 if (num_online_cpus() > 1 &&
4040 !cpumask_empty(to_cpumask(l->cpus)) &&
Harvey Harrisone374d482008-01-31 15:20:50 -08004041 len < PAGE_SIZE - 60) {
4042 len += sprintf(buf + len, " cpus=");
4043 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
Rusty Russell174596a2009-01-01 10:12:29 +10304044 to_cpumask(l->cpus));
Christoph Lameter45edfa52007-05-09 02:32:45 -07004045 }
4046
Christoph Lameter62bc62a2009-06-16 15:32:15 -07004047 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
Harvey Harrisone374d482008-01-31 15:20:50 -08004048 len < PAGE_SIZE - 60) {
4049 len += sprintf(buf + len, " nodes=");
4050 len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
Christoph Lameter45edfa52007-05-09 02:32:45 -07004051 l->nodes);
4052 }
4053
Harvey Harrisone374d482008-01-31 15:20:50 -08004054 len += sprintf(buf + len, "\n");
Christoph Lameter88a420e2007-05-06 14:49:45 -07004055 }
4056
4057 free_loc_track(&t);
Eric Dumazetbbd7d572010-03-24 22:25:47 +01004058 kfree(map);
Christoph Lameter88a420e2007-05-06 14:49:45 -07004059 if (!t.count)
Harvey Harrisone374d482008-01-31 15:20:50 -08004060 len += sprintf(buf, "No data\n");
4061 return len;
Christoph Lameter88a420e2007-05-06 14:49:45 -07004062}
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004063#endif
Christoph Lameter88a420e2007-05-06 14:49:45 -07004064
Christoph Lametera5a84752010-10-05 13:57:27 -05004065#ifdef SLUB_RESILIENCY_TEST
4066static void resiliency_test(void)
4067{
4068 u8 *p;
4069
4070 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10);
4071
4072 printk(KERN_ERR "SLUB resiliency testing\n");
4073 printk(KERN_ERR "-----------------------\n");
4074 printk(KERN_ERR "A. Corruption after allocation\n");
4075
4076 p = kzalloc(16, GFP_KERNEL);
4077 p[16] = 0x12;
4078 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
4079 " 0x12->0x%p\n\n", p + 16);
4080
4081 validate_slab_cache(kmalloc_caches[4]);
4082
4083 /* Hmmm... The next two are dangerous */
4084 p = kzalloc(32, GFP_KERNEL);
4085 p[32 + sizeof(void *)] = 0x34;
4086 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
4087 " 0x34 -> -0x%p\n", p);
4088 printk(KERN_ERR
4089 "If allocated object is overwritten then not detectable\n\n");
4090
4091 validate_slab_cache(kmalloc_caches[5]);
4092 p = kzalloc(64, GFP_KERNEL);
4093 p += 64 + (get_cycles() & 0xff) * sizeof(void *);
4094 *p = 0x56;
4095 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
4096 p);
4097 printk(KERN_ERR
4098 "If allocated object is overwritten then not detectable\n\n");
4099 validate_slab_cache(kmalloc_caches[6]);
4100
4101 printk(KERN_ERR "\nB. Corruption after free\n");
4102 p = kzalloc(128, GFP_KERNEL);
4103 kfree(p);
4104 *p = 0x78;
4105 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
4106 validate_slab_cache(kmalloc_caches[7]);
4107
4108 p = kzalloc(256, GFP_KERNEL);
4109 kfree(p);
4110 p[50] = 0x9a;
4111 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
4112 p);
4113 validate_slab_cache(kmalloc_caches[8]);
4114
4115 p = kzalloc(512, GFP_KERNEL);
4116 kfree(p);
4117 p[512] = 0xab;
4118 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
4119 validate_slab_cache(kmalloc_caches[9]);
4120}
4121#else
4122#ifdef CONFIG_SYSFS
4123static void resiliency_test(void) {};
4124#endif
4125#endif
4126
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004127#ifdef CONFIG_SYSFS
Christoph Lameter81819f02007-05-06 14:49:36 -07004128enum slab_stat_type {
Christoph Lameter205ab992008-04-14 19:11:40 +03004129 SL_ALL, /* All slabs */
4130 SL_PARTIAL, /* Only partially allocated slabs */
4131 SL_CPU, /* Only slabs used for cpu caches */
4132 SL_OBJECTS, /* Determine allocated objects not slabs */
4133 SL_TOTAL /* Determine object capacity not slabs */
Christoph Lameter81819f02007-05-06 14:49:36 -07004134};
4135
Christoph Lameter205ab992008-04-14 19:11:40 +03004136#define SO_ALL (1 << SL_ALL)
Christoph Lameter81819f02007-05-06 14:49:36 -07004137#define SO_PARTIAL (1 << SL_PARTIAL)
4138#define SO_CPU (1 << SL_CPU)
4139#define SO_OBJECTS (1 << SL_OBJECTS)
Christoph Lameter205ab992008-04-14 19:11:40 +03004140#define SO_TOTAL (1 << SL_TOTAL)
Christoph Lameter81819f02007-05-06 14:49:36 -07004141
Cyrill Gorcunov62e5c4b2008-03-02 23:28:24 +03004142static ssize_t show_slab_objects(struct kmem_cache *s,
4143 char *buf, unsigned long flags)
Christoph Lameter81819f02007-05-06 14:49:36 -07004144{
4145 unsigned long total = 0;
Christoph Lameter81819f02007-05-06 14:49:36 -07004146 int node;
4147 int x;
4148 unsigned long *nodes;
4149 unsigned long *per_cpu;
4150
4151 nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
Cyrill Gorcunov62e5c4b2008-03-02 23:28:24 +03004152 if (!nodes)
4153 return -ENOMEM;
Christoph Lameter81819f02007-05-06 14:49:36 -07004154 per_cpu = nodes + nr_node_ids;
4155
Christoph Lameter205ab992008-04-14 19:11:40 +03004156 if (flags & SO_CPU) {
4157 int cpu;
Christoph Lameter81819f02007-05-06 14:49:36 -07004158
Christoph Lameter205ab992008-04-14 19:11:40 +03004159 for_each_possible_cpu(cpu) {
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06004160 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
Christoph Lameterdfb4f092007-10-16 01:26:05 -07004161
Christoph Lameter205ab992008-04-14 19:11:40 +03004162 if (!c || c->node < 0)
4163 continue;
4164
4165 if (c->page) {
4166 if (flags & SO_TOTAL)
4167 x = c->page->objects;
4168 else if (flags & SO_OBJECTS)
4169 x = c->page->inuse;
Christoph Lameter81819f02007-05-06 14:49:36 -07004170 else
4171 x = 1;
Christoph Lameter205ab992008-04-14 19:11:40 +03004172
Christoph Lameter81819f02007-05-06 14:49:36 -07004173 total += x;
Christoph Lameter205ab992008-04-14 19:11:40 +03004174 nodes[c->node] += x;
Christoph Lameter81819f02007-05-06 14:49:36 -07004175 }
Christoph Lameter205ab992008-04-14 19:11:40 +03004176 per_cpu[c->node]++;
Christoph Lameter81819f02007-05-06 14:49:36 -07004177 }
4178 }
4179
Christoph Lameter04d94872011-01-10 10:15:15 -06004180 lock_memory_hotplug();
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004181#ifdef CONFIG_SLUB_DEBUG
Christoph Lameter205ab992008-04-14 19:11:40 +03004182 if (flags & SO_ALL) {
4183 for_each_node_state(node, N_NORMAL_MEMORY) {
4184 struct kmem_cache_node *n = get_node(s, node);
Christoph Lameter81819f02007-05-06 14:49:36 -07004185
Christoph Lameter205ab992008-04-14 19:11:40 +03004186 if (flags & SO_TOTAL)
4187 x = atomic_long_read(&n->total_objects);
4188 else if (flags & SO_OBJECTS)
4189 x = atomic_long_read(&n->total_objects) -
4190 count_partial(n, count_free);
4191
4192 else
4193 x = atomic_long_read(&n->nr_slabs);
4194 total += x;
4195 nodes[node] += x;
4196 }
4197
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004198 } else
4199#endif
4200 if (flags & SO_PARTIAL) {
Christoph Lameter205ab992008-04-14 19:11:40 +03004201 for_each_node_state(node, N_NORMAL_MEMORY) {
4202 struct kmem_cache_node *n = get_node(s, node);
4203
4204 if (flags & SO_TOTAL)
4205 x = count_partial(n, count_total);
4206 else if (flags & SO_OBJECTS)
4207 x = count_partial(n, count_inuse);
Christoph Lameter81819f02007-05-06 14:49:36 -07004208 else
4209 x = n->nr_partial;
4210 total += x;
4211 nodes[node] += x;
4212 }
Christoph Lameter81819f02007-05-06 14:49:36 -07004213 }
Christoph Lameter81819f02007-05-06 14:49:36 -07004214 x = sprintf(buf, "%lu", total);
4215#ifdef CONFIG_NUMA
Christoph Lameterf64dc582007-10-16 01:25:33 -07004216 for_each_node_state(node, N_NORMAL_MEMORY)
Christoph Lameter81819f02007-05-06 14:49:36 -07004217 if (nodes[node])
4218 x += sprintf(buf + x, " N%d=%lu",
4219 node, nodes[node]);
4220#endif
Christoph Lameter04d94872011-01-10 10:15:15 -06004221 unlock_memory_hotplug();
Christoph Lameter81819f02007-05-06 14:49:36 -07004222 kfree(nodes);
4223 return x + sprintf(buf + x, "\n");
4224}
4225
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004226#ifdef CONFIG_SLUB_DEBUG
Christoph Lameter81819f02007-05-06 14:49:36 -07004227static int any_slab_objects(struct kmem_cache *s)
4228{
4229 int node;
Christoph Lameterdfb4f092007-10-16 01:26:05 -07004230
4231 for_each_online_node(node) {
Christoph Lameter81819f02007-05-06 14:49:36 -07004232 struct kmem_cache_node *n = get_node(s, node);
4233
Christoph Lameterdfb4f092007-10-16 01:26:05 -07004234 if (!n)
4235 continue;
4236
Benjamin Herrenschmidt4ea33e22008-05-06 20:42:39 -07004237 if (atomic_long_read(&n->total_objects))
Christoph Lameter81819f02007-05-06 14:49:36 -07004238 return 1;
4239 }
4240 return 0;
4241}
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004242#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07004243
4244#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
4245#define to_slab(n) container_of(n, struct kmem_cache, kobj);
4246
4247struct slab_attribute {
4248 struct attribute attr;
4249 ssize_t (*show)(struct kmem_cache *s, char *buf);
4250 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
4251};
4252
4253#define SLAB_ATTR_RO(_name) \
4254 static struct slab_attribute _name##_attr = __ATTR_RO(_name)
4255
4256#define SLAB_ATTR(_name) \
4257 static struct slab_attribute _name##_attr = \
4258 __ATTR(_name, 0644, _name##_show, _name##_store)
4259
Christoph Lameter81819f02007-05-06 14:49:36 -07004260static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
4261{
4262 return sprintf(buf, "%d\n", s->size);
4263}
4264SLAB_ATTR_RO(slab_size);
4265
4266static ssize_t align_show(struct kmem_cache *s, char *buf)
4267{
4268 return sprintf(buf, "%d\n", s->align);
4269}
4270SLAB_ATTR_RO(align);
4271
4272static ssize_t object_size_show(struct kmem_cache *s, char *buf)
4273{
4274 return sprintf(buf, "%d\n", s->objsize);
4275}
4276SLAB_ATTR_RO(object_size);
4277
4278static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
4279{
Christoph Lameter834f3d12008-04-14 19:11:31 +03004280 return sprintf(buf, "%d\n", oo_objects(s->oo));
Christoph Lameter81819f02007-05-06 14:49:36 -07004281}
4282SLAB_ATTR_RO(objs_per_slab);
4283
Christoph Lameter06b285d2008-04-14 19:11:41 +03004284static ssize_t order_store(struct kmem_cache *s,
4285 const char *buf, size_t length)
4286{
Christoph Lameter0121c6192008-04-29 16:11:12 -07004287 unsigned long order;
4288 int err;
4289
4290 err = strict_strtoul(buf, 10, &order);
4291 if (err)
4292 return err;
Christoph Lameter06b285d2008-04-14 19:11:41 +03004293
4294 if (order > slub_max_order || order < slub_min_order)
4295 return -EINVAL;
4296
4297 calculate_sizes(s, order);
4298 return length;
4299}
4300
Christoph Lameter81819f02007-05-06 14:49:36 -07004301static ssize_t order_show(struct kmem_cache *s, char *buf)
4302{
Christoph Lameter834f3d12008-04-14 19:11:31 +03004303 return sprintf(buf, "%d\n", oo_order(s->oo));
Christoph Lameter81819f02007-05-06 14:49:36 -07004304}
Christoph Lameter06b285d2008-04-14 19:11:41 +03004305SLAB_ATTR(order);
Christoph Lameter81819f02007-05-06 14:49:36 -07004306
David Rientjes73d342b2009-02-22 17:40:09 -08004307static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
4308{
4309 return sprintf(buf, "%lu\n", s->min_partial);
4310}
4311
4312static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
4313 size_t length)
4314{
4315 unsigned long min;
4316 int err;
4317
4318 err = strict_strtoul(buf, 10, &min);
4319 if (err)
4320 return err;
4321
David Rientjesc0bdb232009-02-25 09:16:35 +02004322 set_min_partial(s, min);
David Rientjes73d342b2009-02-22 17:40:09 -08004323 return length;
4324}
4325SLAB_ATTR(min_partial);
4326
Christoph Lameter81819f02007-05-06 14:49:36 -07004327static ssize_t ctor_show(struct kmem_cache *s, char *buf)
4328{
Joe Perches62c70bc2011-01-13 15:45:52 -08004329 if (!s->ctor)
4330 return 0;
4331 return sprintf(buf, "%pS\n", s->ctor);
Christoph Lameter81819f02007-05-06 14:49:36 -07004332}
4333SLAB_ATTR_RO(ctor);
4334
Christoph Lameter81819f02007-05-06 14:49:36 -07004335static ssize_t aliases_show(struct kmem_cache *s, char *buf)
4336{
4337 return sprintf(buf, "%d\n", s->refcount - 1);
4338}
4339SLAB_ATTR_RO(aliases);
4340
Christoph Lameter81819f02007-05-06 14:49:36 -07004341static ssize_t partial_show(struct kmem_cache *s, char *buf)
4342{
Christoph Lameterd9acf4b2008-02-15 15:22:21 -08004343 return show_slab_objects(s, buf, SO_PARTIAL);
Christoph Lameter81819f02007-05-06 14:49:36 -07004344}
4345SLAB_ATTR_RO(partial);
4346
4347static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
4348{
Christoph Lameterd9acf4b2008-02-15 15:22:21 -08004349 return show_slab_objects(s, buf, SO_CPU);
Christoph Lameter81819f02007-05-06 14:49:36 -07004350}
4351SLAB_ATTR_RO(cpu_slabs);
4352
4353static ssize_t objects_show(struct kmem_cache *s, char *buf)
4354{
Christoph Lameter205ab992008-04-14 19:11:40 +03004355 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
Christoph Lameter81819f02007-05-06 14:49:36 -07004356}
4357SLAB_ATTR_RO(objects);
4358
Christoph Lameter205ab992008-04-14 19:11:40 +03004359static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
4360{
4361 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
4362}
4363SLAB_ATTR_RO(objects_partial);
4364
Christoph Lameter81819f02007-05-06 14:49:36 -07004365static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
4366{
4367 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
4368}
4369
4370static ssize_t reclaim_account_store(struct kmem_cache *s,
4371 const char *buf, size_t length)
4372{
4373 s->flags &= ~SLAB_RECLAIM_ACCOUNT;
4374 if (buf[0] == '1')
4375 s->flags |= SLAB_RECLAIM_ACCOUNT;
4376 return length;
4377}
4378SLAB_ATTR(reclaim_account);
4379
4380static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
4381{
Christoph Lameter5af60832007-05-06 14:49:56 -07004382 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
Christoph Lameter81819f02007-05-06 14:49:36 -07004383}
4384SLAB_ATTR_RO(hwcache_align);
4385
4386#ifdef CONFIG_ZONE_DMA
4387static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
4388{
4389 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
4390}
4391SLAB_ATTR_RO(cache_dma);
4392#endif
4393
4394static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
4395{
4396 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
4397}
4398SLAB_ATTR_RO(destroy_by_rcu);
4399
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08004400static ssize_t reserved_show(struct kmem_cache *s, char *buf)
4401{
4402 return sprintf(buf, "%d\n", s->reserved);
4403}
4404SLAB_ATTR_RO(reserved);
4405
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004406#ifdef CONFIG_SLUB_DEBUG
Christoph Lametera5a84752010-10-05 13:57:27 -05004407static ssize_t slabs_show(struct kmem_cache *s, char *buf)
4408{
4409 return show_slab_objects(s, buf, SO_ALL);
4410}
4411SLAB_ATTR_RO(slabs);
4412
4413static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
4414{
4415 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
4416}
4417SLAB_ATTR_RO(total_objects);
4418
4419static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
4420{
4421 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
4422}
4423
4424static ssize_t sanity_checks_store(struct kmem_cache *s,
4425 const char *buf, size_t length)
4426{
4427 s->flags &= ~SLAB_DEBUG_FREE;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004428 if (buf[0] == '1') {
4429 s->flags &= ~__CMPXCHG_DOUBLE;
Christoph Lametera5a84752010-10-05 13:57:27 -05004430 s->flags |= SLAB_DEBUG_FREE;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004431 }
Christoph Lametera5a84752010-10-05 13:57:27 -05004432 return length;
4433}
4434SLAB_ATTR(sanity_checks);
4435
4436static ssize_t trace_show(struct kmem_cache *s, char *buf)
4437{
4438 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
4439}
4440
4441static ssize_t trace_store(struct kmem_cache *s, const char *buf,
4442 size_t length)
4443{
4444 s->flags &= ~SLAB_TRACE;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004445 if (buf[0] == '1') {
4446 s->flags &= ~__CMPXCHG_DOUBLE;
Christoph Lametera5a84752010-10-05 13:57:27 -05004447 s->flags |= SLAB_TRACE;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004448 }
Christoph Lametera5a84752010-10-05 13:57:27 -05004449 return length;
4450}
4451SLAB_ATTR(trace);
4452
Christoph Lameter81819f02007-05-06 14:49:36 -07004453static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
4454{
4455 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
4456}
4457
4458static ssize_t red_zone_store(struct kmem_cache *s,
4459 const char *buf, size_t length)
4460{
4461 if (any_slab_objects(s))
4462 return -EBUSY;
4463
4464 s->flags &= ~SLAB_RED_ZONE;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004465 if (buf[0] == '1') {
4466 s->flags &= ~__CMPXCHG_DOUBLE;
Christoph Lameter81819f02007-05-06 14:49:36 -07004467 s->flags |= SLAB_RED_ZONE;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004468 }
Christoph Lameter06b285d2008-04-14 19:11:41 +03004469 calculate_sizes(s, -1);
Christoph Lameter81819f02007-05-06 14:49:36 -07004470 return length;
4471}
4472SLAB_ATTR(red_zone);
4473
4474static ssize_t poison_show(struct kmem_cache *s, char *buf)
4475{
4476 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
4477}
4478
4479static ssize_t poison_store(struct kmem_cache *s,
4480 const char *buf, size_t length)
4481{
4482 if (any_slab_objects(s))
4483 return -EBUSY;
4484
4485 s->flags &= ~SLAB_POISON;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004486 if (buf[0] == '1') {
4487 s->flags &= ~__CMPXCHG_DOUBLE;
Christoph Lameter81819f02007-05-06 14:49:36 -07004488 s->flags |= SLAB_POISON;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004489 }
Christoph Lameter06b285d2008-04-14 19:11:41 +03004490 calculate_sizes(s, -1);
Christoph Lameter81819f02007-05-06 14:49:36 -07004491 return length;
4492}
4493SLAB_ATTR(poison);
4494
4495static ssize_t store_user_show(struct kmem_cache *s, char *buf)
4496{
4497 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
4498}
4499
4500static ssize_t store_user_store(struct kmem_cache *s,
4501 const char *buf, size_t length)
4502{
4503 if (any_slab_objects(s))
4504 return -EBUSY;
4505
4506 s->flags &= ~SLAB_STORE_USER;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004507 if (buf[0] == '1') {
4508 s->flags &= ~__CMPXCHG_DOUBLE;
Christoph Lameter81819f02007-05-06 14:49:36 -07004509 s->flags |= SLAB_STORE_USER;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004510 }
Christoph Lameter06b285d2008-04-14 19:11:41 +03004511 calculate_sizes(s, -1);
Christoph Lameter81819f02007-05-06 14:49:36 -07004512 return length;
4513}
4514SLAB_ATTR(store_user);
4515
Christoph Lameter53e15af2007-05-06 14:49:43 -07004516static ssize_t validate_show(struct kmem_cache *s, char *buf)
4517{
4518 return 0;
4519}
4520
4521static ssize_t validate_store(struct kmem_cache *s,
4522 const char *buf, size_t length)
4523{
Christoph Lameter434e2452007-07-17 04:03:30 -07004524 int ret = -EINVAL;
4525
4526 if (buf[0] == '1') {
4527 ret = validate_slab_cache(s);
4528 if (ret >= 0)
4529 ret = length;
4530 }
4531 return ret;
Christoph Lameter53e15af2007-05-06 14:49:43 -07004532}
4533SLAB_ATTR(validate);
Christoph Lametera5a84752010-10-05 13:57:27 -05004534
4535static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
4536{
4537 if (!(s->flags & SLAB_STORE_USER))
4538 return -ENOSYS;
4539 return list_locations(s, buf, TRACK_ALLOC);
4540}
4541SLAB_ATTR_RO(alloc_calls);
4542
4543static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
4544{
4545 if (!(s->flags & SLAB_STORE_USER))
4546 return -ENOSYS;
4547 return list_locations(s, buf, TRACK_FREE);
4548}
4549SLAB_ATTR_RO(free_calls);
4550#endif /* CONFIG_SLUB_DEBUG */
4551
4552#ifdef CONFIG_FAILSLAB
4553static ssize_t failslab_show(struct kmem_cache *s, char *buf)
4554{
4555 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
4556}
4557
4558static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
4559 size_t length)
4560{
4561 s->flags &= ~SLAB_FAILSLAB;
4562 if (buf[0] == '1')
4563 s->flags |= SLAB_FAILSLAB;
4564 return length;
4565}
4566SLAB_ATTR(failslab);
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004567#endif
Christoph Lameter53e15af2007-05-06 14:49:43 -07004568
Christoph Lameter2086d262007-05-06 14:49:46 -07004569static ssize_t shrink_show(struct kmem_cache *s, char *buf)
4570{
4571 return 0;
4572}
4573
4574static ssize_t shrink_store(struct kmem_cache *s,
4575 const char *buf, size_t length)
4576{
4577 if (buf[0] == '1') {
4578 int rc = kmem_cache_shrink(s);
4579
4580 if (rc)
4581 return rc;
4582 } else
4583 return -EINVAL;
4584 return length;
4585}
4586SLAB_ATTR(shrink);
4587
Christoph Lameter81819f02007-05-06 14:49:36 -07004588#ifdef CONFIG_NUMA
Christoph Lameter98246012008-01-07 23:20:26 -08004589static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
Christoph Lameter81819f02007-05-06 14:49:36 -07004590{
Christoph Lameter98246012008-01-07 23:20:26 -08004591 return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
Christoph Lameter81819f02007-05-06 14:49:36 -07004592}
4593
Christoph Lameter98246012008-01-07 23:20:26 -08004594static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
Christoph Lameter81819f02007-05-06 14:49:36 -07004595 const char *buf, size_t length)
4596{
Christoph Lameter0121c6192008-04-29 16:11:12 -07004597 unsigned long ratio;
4598 int err;
Christoph Lameter81819f02007-05-06 14:49:36 -07004599
Christoph Lameter0121c6192008-04-29 16:11:12 -07004600 err = strict_strtoul(buf, 10, &ratio);
4601 if (err)
4602 return err;
4603
Christoph Lametere2cb96b2008-08-19 08:51:22 -05004604 if (ratio <= 100)
Christoph Lameter0121c6192008-04-29 16:11:12 -07004605 s->remote_node_defrag_ratio = ratio * 10;
4606
Christoph Lameter81819f02007-05-06 14:49:36 -07004607 return length;
4608}
Christoph Lameter98246012008-01-07 23:20:26 -08004609SLAB_ATTR(remote_node_defrag_ratio);
Christoph Lameter81819f02007-05-06 14:49:36 -07004610#endif
4611
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08004612#ifdef CONFIG_SLUB_STATS
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08004613static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
4614{
4615 unsigned long sum = 0;
4616 int cpu;
4617 int len;
4618 int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
4619
4620 if (!data)
4621 return -ENOMEM;
4622
4623 for_each_online_cpu(cpu) {
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06004624 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08004625
4626 data[cpu] = x;
4627 sum += x;
4628 }
4629
4630 len = sprintf(buf, "%lu", sum);
4631
Christoph Lameter50ef37b2008-04-14 18:52:05 +03004632#ifdef CONFIG_SMP
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08004633 for_each_online_cpu(cpu) {
4634 if (data[cpu] && len < PAGE_SIZE - 20)
Christoph Lameter50ef37b2008-04-14 18:52:05 +03004635 len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08004636 }
Christoph Lameter50ef37b2008-04-14 18:52:05 +03004637#endif
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08004638 kfree(data);
4639 return len + sprintf(buf + len, "\n");
4640}
4641
David Rientjes78eb00c2009-10-15 02:20:22 -07004642static void clear_stat(struct kmem_cache *s, enum stat_item si)
4643{
4644 int cpu;
4645
4646 for_each_online_cpu(cpu)
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06004647 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
David Rientjes78eb00c2009-10-15 02:20:22 -07004648}
4649
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08004650#define STAT_ATTR(si, text) \
4651static ssize_t text##_show(struct kmem_cache *s, char *buf) \
4652{ \
4653 return show_stat(s, buf, si); \
4654} \
David Rientjes78eb00c2009-10-15 02:20:22 -07004655static ssize_t text##_store(struct kmem_cache *s, \
4656 const char *buf, size_t length) \
4657{ \
4658 if (buf[0] != '0') \
4659 return -EINVAL; \
4660 clear_stat(s, si); \
4661 return length; \
4662} \
4663SLAB_ATTR(text); \
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08004664
4665STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
4666STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
4667STAT_ATTR(FREE_FASTPATH, free_fastpath);
4668STAT_ATTR(FREE_SLOWPATH, free_slowpath);
4669STAT_ATTR(FREE_FROZEN, free_frozen);
4670STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
4671STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
4672STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
4673STAT_ATTR(ALLOC_SLAB, alloc_slab);
4674STAT_ATTR(ALLOC_REFILL, alloc_refill);
4675STAT_ATTR(FREE_SLAB, free_slab);
4676STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
4677STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
4678STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
4679STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
4680STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
4681STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
Christoph Lameter65c33762008-04-14 19:11:40 +03004682STAT_ATTR(ORDER_FALLBACK, order_fallback);
Christoph Lameterb789ef52011-06-01 12:25:49 -05004683STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
4684STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08004685#endif
4686
Pekka Enberg06428782008-01-07 23:20:27 -08004687static struct attribute *slab_attrs[] = {
Christoph Lameter81819f02007-05-06 14:49:36 -07004688 &slab_size_attr.attr,
4689 &object_size_attr.attr,
4690 &objs_per_slab_attr.attr,
4691 &order_attr.attr,
David Rientjes73d342b2009-02-22 17:40:09 -08004692 &min_partial_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07004693 &objects_attr.attr,
Christoph Lameter205ab992008-04-14 19:11:40 +03004694 &objects_partial_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07004695 &partial_attr.attr,
4696 &cpu_slabs_attr.attr,
4697 &ctor_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07004698 &aliases_attr.attr,
4699 &align_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07004700 &hwcache_align_attr.attr,
4701 &reclaim_account_attr.attr,
4702 &destroy_by_rcu_attr.attr,
Christoph Lametera5a84752010-10-05 13:57:27 -05004703 &shrink_attr.attr,
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08004704 &reserved_attr.attr,
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004705#ifdef CONFIG_SLUB_DEBUG
Christoph Lametera5a84752010-10-05 13:57:27 -05004706 &total_objects_attr.attr,
4707 &slabs_attr.attr,
4708 &sanity_checks_attr.attr,
4709 &trace_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07004710 &red_zone_attr.attr,
4711 &poison_attr.attr,
4712 &store_user_attr.attr,
Christoph Lameter53e15af2007-05-06 14:49:43 -07004713 &validate_attr.attr,
Christoph Lameter88a420e2007-05-06 14:49:45 -07004714 &alloc_calls_attr.attr,
4715 &free_calls_attr.attr,
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004716#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07004717#ifdef CONFIG_ZONE_DMA
4718 &cache_dma_attr.attr,
4719#endif
4720#ifdef CONFIG_NUMA
Christoph Lameter98246012008-01-07 23:20:26 -08004721 &remote_node_defrag_ratio_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07004722#endif
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08004723#ifdef CONFIG_SLUB_STATS
4724 &alloc_fastpath_attr.attr,
4725 &alloc_slowpath_attr.attr,
4726 &free_fastpath_attr.attr,
4727 &free_slowpath_attr.attr,
4728 &free_frozen_attr.attr,
4729 &free_add_partial_attr.attr,
4730 &free_remove_partial_attr.attr,
4731 &alloc_from_partial_attr.attr,
4732 &alloc_slab_attr.attr,
4733 &alloc_refill_attr.attr,
4734 &free_slab_attr.attr,
4735 &cpuslab_flush_attr.attr,
4736 &deactivate_full_attr.attr,
4737 &deactivate_empty_attr.attr,
4738 &deactivate_to_head_attr.attr,
4739 &deactivate_to_tail_attr.attr,
4740 &deactivate_remote_frees_attr.attr,
Christoph Lameter65c33762008-04-14 19:11:40 +03004741 &order_fallback_attr.attr,
Christoph Lameterb789ef52011-06-01 12:25:49 -05004742 &cmpxchg_double_fail_attr.attr,
4743 &cmpxchg_double_cpu_fail_attr.attr,
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08004744#endif
Dmitry Monakhov4c13dd32010-02-26 09:36:12 +03004745#ifdef CONFIG_FAILSLAB
4746 &failslab_attr.attr,
4747#endif
4748
Christoph Lameter81819f02007-05-06 14:49:36 -07004749 NULL
4750};
4751
4752static struct attribute_group slab_attr_group = {
4753 .attrs = slab_attrs,
4754};
4755
4756static ssize_t slab_attr_show(struct kobject *kobj,
4757 struct attribute *attr,
4758 char *buf)
4759{
4760 struct slab_attribute *attribute;
4761 struct kmem_cache *s;
4762 int err;
4763
4764 attribute = to_slab_attr(attr);
4765 s = to_slab(kobj);
4766
4767 if (!attribute->show)
4768 return -EIO;
4769
4770 err = attribute->show(s, buf);
4771
4772 return err;
4773}
4774
4775static ssize_t slab_attr_store(struct kobject *kobj,
4776 struct attribute *attr,
4777 const char *buf, size_t len)
4778{
4779 struct slab_attribute *attribute;
4780 struct kmem_cache *s;
4781 int err;
4782
4783 attribute = to_slab_attr(attr);
4784 s = to_slab(kobj);
4785
4786 if (!attribute->store)
4787 return -EIO;
4788
4789 err = attribute->store(s, buf, len);
4790
4791 return err;
4792}
4793
Christoph Lameter151c6022008-01-07 22:29:05 -08004794static void kmem_cache_release(struct kobject *kobj)
4795{
4796 struct kmem_cache *s = to_slab(kobj);
4797
Pekka Enberg84c1cf62010-09-14 23:21:12 +03004798 kfree(s->name);
Christoph Lameter151c6022008-01-07 22:29:05 -08004799 kfree(s);
4800}
4801
Emese Revfy52cf25d2010-01-19 02:58:23 +01004802static const struct sysfs_ops slab_sysfs_ops = {
Christoph Lameter81819f02007-05-06 14:49:36 -07004803 .show = slab_attr_show,
4804 .store = slab_attr_store,
4805};
4806
4807static struct kobj_type slab_ktype = {
4808 .sysfs_ops = &slab_sysfs_ops,
Christoph Lameter151c6022008-01-07 22:29:05 -08004809 .release = kmem_cache_release
Christoph Lameter81819f02007-05-06 14:49:36 -07004810};
4811
4812static int uevent_filter(struct kset *kset, struct kobject *kobj)
4813{
4814 struct kobj_type *ktype = get_ktype(kobj);
4815
4816 if (ktype == &slab_ktype)
4817 return 1;
4818 return 0;
4819}
4820
Emese Revfy9cd43612009-12-31 14:52:51 +01004821static const struct kset_uevent_ops slab_uevent_ops = {
Christoph Lameter81819f02007-05-06 14:49:36 -07004822 .filter = uevent_filter,
4823};
4824
Greg Kroah-Hartman27c3a312007-11-01 09:29:06 -06004825static struct kset *slab_kset;
Christoph Lameter81819f02007-05-06 14:49:36 -07004826
4827#define ID_STR_LENGTH 64
4828
4829/* Create a unique string id for a slab cache:
Christoph Lameter6446faa2008-02-15 23:45:26 -08004830 *
4831 * Format :[flags-]size
Christoph Lameter81819f02007-05-06 14:49:36 -07004832 */
4833static char *create_unique_id(struct kmem_cache *s)
4834{
4835 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
4836 char *p = name;
4837
4838 BUG_ON(!name);
4839
4840 *p++ = ':';
4841 /*
4842 * First flags affecting slabcache operations. We will only
4843 * get here for aliasable slabs so we do not need to support
4844 * too many flags. The flags here must cover all flags that
4845 * are matched during merging to guarantee that the id is
4846 * unique.
4847 */
4848 if (s->flags & SLAB_CACHE_DMA)
4849 *p++ = 'd';
4850 if (s->flags & SLAB_RECLAIM_ACCOUNT)
4851 *p++ = 'a';
4852 if (s->flags & SLAB_DEBUG_FREE)
4853 *p++ = 'F';
Vegard Nossum5a896d92008-04-04 00:54:48 +02004854 if (!(s->flags & SLAB_NOTRACK))
4855 *p++ = 't';
Christoph Lameter81819f02007-05-06 14:49:36 -07004856 if (p != name + 1)
4857 *p++ = '-';
4858 p += sprintf(p, "%07d", s->size);
4859 BUG_ON(p > name + ID_STR_LENGTH - 1);
4860 return name;
4861}
4862
4863static int sysfs_slab_add(struct kmem_cache *s)
4864{
4865 int err;
4866 const char *name;
4867 int unmergeable;
4868
4869 if (slab_state < SYSFS)
4870 /* Defer until later */
4871 return 0;
4872
4873 unmergeable = slab_unmergeable(s);
4874 if (unmergeable) {
4875 /*
4876 * Slabcache can never be merged so we can use the name proper.
4877 * This is typically the case for debug situations. In that
4878 * case we can catch duplicate names easily.
4879 */
Greg Kroah-Hartman27c3a312007-11-01 09:29:06 -06004880 sysfs_remove_link(&slab_kset->kobj, s->name);
Christoph Lameter81819f02007-05-06 14:49:36 -07004881 name = s->name;
4882 } else {
4883 /*
4884 * Create a unique name for the slab as a target
4885 * for the symlinks.
4886 */
4887 name = create_unique_id(s);
4888 }
4889
Greg Kroah-Hartman27c3a312007-11-01 09:29:06 -06004890 s->kobj.kset = slab_kset;
Greg Kroah-Hartman1eada112007-12-17 23:05:35 -07004891 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
4892 if (err) {
4893 kobject_put(&s->kobj);
Christoph Lameter81819f02007-05-06 14:49:36 -07004894 return err;
Greg Kroah-Hartman1eada112007-12-17 23:05:35 -07004895 }
Christoph Lameter81819f02007-05-06 14:49:36 -07004896
4897 err = sysfs_create_group(&s->kobj, &slab_attr_group);
Xiaotian Feng5788d8a2009-07-22 11:28:53 +08004898 if (err) {
4899 kobject_del(&s->kobj);
4900 kobject_put(&s->kobj);
Christoph Lameter81819f02007-05-06 14:49:36 -07004901 return err;
Xiaotian Feng5788d8a2009-07-22 11:28:53 +08004902 }
Christoph Lameter81819f02007-05-06 14:49:36 -07004903 kobject_uevent(&s->kobj, KOBJ_ADD);
4904 if (!unmergeable) {
4905 /* Setup first alias */
4906 sysfs_slab_alias(s, s->name);
4907 kfree(name);
4908 }
4909 return 0;
4910}
4911
4912static void sysfs_slab_remove(struct kmem_cache *s)
4913{
Christoph Lameter2bce6482010-07-19 11:39:11 -05004914 if (slab_state < SYSFS)
4915 /*
4916 * Sysfs has not been setup yet so no need to remove the
4917 * cache from sysfs.
4918 */
4919 return;
4920
Christoph Lameter81819f02007-05-06 14:49:36 -07004921 kobject_uevent(&s->kobj, KOBJ_REMOVE);
4922 kobject_del(&s->kobj);
Christoph Lameter151c6022008-01-07 22:29:05 -08004923 kobject_put(&s->kobj);
Christoph Lameter81819f02007-05-06 14:49:36 -07004924}
4925
4926/*
4927 * Need to buffer aliases during bootup until sysfs becomes
Nick Andrew9f6c708e2008-12-05 14:08:08 +11004928 * available lest we lose that information.
Christoph Lameter81819f02007-05-06 14:49:36 -07004929 */
4930struct saved_alias {
4931 struct kmem_cache *s;
4932 const char *name;
4933 struct saved_alias *next;
4934};
4935
Adrian Bunk5af328a2007-07-17 04:03:27 -07004936static struct saved_alias *alias_list;
Christoph Lameter81819f02007-05-06 14:49:36 -07004937
4938static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
4939{
4940 struct saved_alias *al;
4941
4942 if (slab_state == SYSFS) {
4943 /*
4944 * If we have a leftover link then remove it.
4945 */
Greg Kroah-Hartman27c3a312007-11-01 09:29:06 -06004946 sysfs_remove_link(&slab_kset->kobj, name);
4947 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
Christoph Lameter81819f02007-05-06 14:49:36 -07004948 }
4949
4950 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
4951 if (!al)
4952 return -ENOMEM;
4953
4954 al->s = s;
4955 al->name = name;
4956 al->next = alias_list;
4957 alias_list = al;
4958 return 0;
4959}
4960
4961static int __init slab_sysfs_init(void)
4962{
Christoph Lameter5b95a4ac2007-07-17 04:03:19 -07004963 struct kmem_cache *s;
Christoph Lameter81819f02007-05-06 14:49:36 -07004964 int err;
4965
Christoph Lameter2bce6482010-07-19 11:39:11 -05004966 down_write(&slub_lock);
4967
Greg Kroah-Hartman0ff21e42007-11-06 10:36:58 -08004968 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
Greg Kroah-Hartman27c3a312007-11-01 09:29:06 -06004969 if (!slab_kset) {
Christoph Lameter2bce6482010-07-19 11:39:11 -05004970 up_write(&slub_lock);
Christoph Lameter81819f02007-05-06 14:49:36 -07004971 printk(KERN_ERR "Cannot register slab subsystem.\n");
4972 return -ENOSYS;
4973 }
4974
Christoph Lameter26a7bd02007-05-09 02:32:39 -07004975 slab_state = SYSFS;
4976
Christoph Lameter5b95a4ac2007-07-17 04:03:19 -07004977 list_for_each_entry(s, &slab_caches, list) {
Christoph Lameter26a7bd02007-05-09 02:32:39 -07004978 err = sysfs_slab_add(s);
Christoph Lameter5d540fb2007-08-30 23:56:26 -07004979 if (err)
4980 printk(KERN_ERR "SLUB: Unable to add boot slab %s"
4981 " to sysfs\n", s->name);
Christoph Lameter26a7bd02007-05-09 02:32:39 -07004982 }
Christoph Lameter81819f02007-05-06 14:49:36 -07004983
4984 while (alias_list) {
4985 struct saved_alias *al = alias_list;
4986
4987 alias_list = alias_list->next;
4988 err = sysfs_slab_alias(al->s, al->name);
Christoph Lameter5d540fb2007-08-30 23:56:26 -07004989 if (err)
4990 printk(KERN_ERR "SLUB: Unable to add boot slab alias"
4991 " %s to sysfs\n", s->name);
Christoph Lameter81819f02007-05-06 14:49:36 -07004992 kfree(al);
4993 }
4994
Christoph Lameter2bce6482010-07-19 11:39:11 -05004995 up_write(&slub_lock);
Christoph Lameter81819f02007-05-06 14:49:36 -07004996 resiliency_test();
4997 return 0;
4998}
4999
5000__initcall(slab_sysfs_init);
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05005001#endif /* CONFIG_SYSFS */
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005002
5003/*
5004 * The /proc/slabinfo ABI
5005 */
Linus Torvalds158a9622008-01-02 13:04:48 -08005006#ifdef CONFIG_SLABINFO
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005007static void print_slabinfo_header(struct seq_file *m)
5008{
5009 seq_puts(m, "slabinfo - version: 2.1\n");
5010 seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
5011 "<objperslab> <pagesperslab>");
5012 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
5013 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
5014 seq_putc(m, '\n');
5015}
5016
5017static void *s_start(struct seq_file *m, loff_t *pos)
5018{
5019 loff_t n = *pos;
5020
5021 down_read(&slub_lock);
5022 if (!n)
5023 print_slabinfo_header(m);
5024
5025 return seq_list_start(&slab_caches, *pos);
5026}
5027
5028static void *s_next(struct seq_file *m, void *p, loff_t *pos)
5029{
5030 return seq_list_next(p, &slab_caches, pos);
5031}
5032
5033static void s_stop(struct seq_file *m, void *p)
5034{
5035 up_read(&slub_lock);
5036}
5037
5038static int s_show(struct seq_file *m, void *p)
5039{
5040 unsigned long nr_partials = 0;
5041 unsigned long nr_slabs = 0;
5042 unsigned long nr_inuse = 0;
Christoph Lameter205ab992008-04-14 19:11:40 +03005043 unsigned long nr_objs = 0;
5044 unsigned long nr_free = 0;
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005045 struct kmem_cache *s;
5046 int node;
5047
5048 s = list_entry(p, struct kmem_cache, list);
5049
5050 for_each_online_node(node) {
5051 struct kmem_cache_node *n = get_node(s, node);
5052
5053 if (!n)
5054 continue;
5055
5056 nr_partials += n->nr_partial;
5057 nr_slabs += atomic_long_read(&n->nr_slabs);
Christoph Lameter205ab992008-04-14 19:11:40 +03005058 nr_objs += atomic_long_read(&n->total_objects);
5059 nr_free += count_partial(n, count_free);
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005060 }
5061
Christoph Lameter205ab992008-04-14 19:11:40 +03005062 nr_inuse = nr_objs - nr_free;
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005063
5064 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
Christoph Lameter834f3d12008-04-14 19:11:31 +03005065 nr_objs, s->size, oo_objects(s->oo),
5066 (1 << oo_order(s->oo)));
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005067 seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
5068 seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
5069 0UL);
5070 seq_putc(m, '\n');
5071 return 0;
5072}
5073
Alexey Dobriyan7b3c3a52008-10-06 02:42:17 +04005074static const struct seq_operations slabinfo_op = {
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005075 .start = s_start,
5076 .next = s_next,
5077 .stop = s_stop,
5078 .show = s_show,
5079};
5080
Alexey Dobriyan7b3c3a52008-10-06 02:42:17 +04005081static int slabinfo_open(struct inode *inode, struct file *file)
5082{
5083 return seq_open(file, &slabinfo_op);
5084}
5085
5086static const struct file_operations proc_slabinfo_operations = {
5087 .open = slabinfo_open,
5088 .read = seq_read,
5089 .llseek = seq_lseek,
5090 .release = seq_release,
5091};
5092
5093static int __init slab_proc_init(void)
5094{
WANG Congcf5d1132009-08-18 19:11:40 +03005095 proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
Alexey Dobriyan7b3c3a52008-10-06 02:42:17 +04005096 return 0;
5097}
5098module_init(slab_proc_init);
Linus Torvalds158a9622008-01-02 13:04:48 -08005099#endif /* CONFIG_SLABINFO */