blob: 0e0504ed6ff101c28c0394092e2ca8d60b2d63a1 [file] [log] [blame]
Christoph Lameter81819f02007-05-06 14:49:36 -07001/*
2 * SLUB: A slab allocator that limits cache line use instead of queuing
3 * objects in per cpu and per node lists.
4 *
Christoph Lameter881db7f2011-06-01 12:25:53 -05005 * The allocator synchronizes using per slab locks or atomic operatios
6 * and only uses a centralized lock to manage a pool of partial slabs.
Christoph Lameter81819f02007-05-06 14:49:36 -07007 *
Christoph Lametercde53532008-07-04 09:59:22 -07008 * (C) 2007 SGI, Christoph Lameter
Christoph Lameter881db7f2011-06-01 12:25:53 -05009 * (C) 2011 Linux Foundation, Christoph Lameter
Christoph Lameter81819f02007-05-06 14:49:36 -070010 */
11
12#include <linux/mm.h>
Nick Piggin1eb5ac62009-05-05 19:13:44 +100013#include <linux/swap.h> /* struct reclaim_state */
Christoph Lameter81819f02007-05-06 14:49:36 -070014#include <linux/module.h>
15#include <linux/bit_spinlock.h>
16#include <linux/interrupt.h>
17#include <linux/bitops.h>
18#include <linux/slab.h>
Christoph Lameter97d06602012-07-06 15:25:11 -050019#include "slab.h"
Alexey Dobriyan7b3c3a52008-10-06 02:42:17 +040020#include <linux/proc_fs.h>
Christoph Lameter81819f02007-05-06 14:49:36 -070021#include <linux/seq_file.h>
Vegard Nossum5a896d92008-04-04 00:54:48 +020022#include <linux/kmemcheck.h>
Christoph Lameter81819f02007-05-06 14:49:36 -070023#include <linux/cpu.h>
24#include <linux/cpuset.h>
25#include <linux/mempolicy.h>
26#include <linux/ctype.h>
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070027#include <linux/debugobjects.h>
Christoph Lameter81819f02007-05-06 14:49:36 -070028#include <linux/kallsyms.h>
Yasunori Gotob9049e22007-10-21 16:41:37 -070029#include <linux/memory.h>
Roman Zippelf8bd2252008-05-01 04:34:31 -070030#include <linux/math64.h>
Akinobu Mita773ff602008-12-23 19:37:01 +090031#include <linux/fault-inject.h>
Pekka Enbergbfa71452011-07-07 22:47:01 +030032#include <linux/stacktrace.h>
Christoph Lameter4de900b2012-01-30 15:53:51 -060033#include <linux/prefetch.h>
Christoph Lameter81819f02007-05-06 14:49:36 -070034
Richard Kennedy4a923792010-10-21 10:29:19 +010035#include <trace/events/kmem.h>
36
Christoph Lameter81819f02007-05-06 14:49:36 -070037/*
38 * Lock order:
Christoph Lameter18004c52012-07-06 15:25:12 -050039 * 1. slab_mutex (Global Mutex)
Christoph Lameter881db7f2011-06-01 12:25:53 -050040 * 2. node->list_lock
41 * 3. slab_lock(page) (Only on some arches and for debugging)
Christoph Lameter81819f02007-05-06 14:49:36 -070042 *
Christoph Lameter18004c52012-07-06 15:25:12 -050043 * slab_mutex
Christoph Lameter881db7f2011-06-01 12:25:53 -050044 *
Christoph Lameter18004c52012-07-06 15:25:12 -050045 * The role of the slab_mutex is to protect the list of all the slabs
Christoph Lameter881db7f2011-06-01 12:25:53 -050046 * and to synchronize major metadata changes to slab cache structures.
47 *
48 * The slab_lock is only used for debugging and on arches that do not
49 * have the ability to do a cmpxchg_double. It only protects the second
50 * double word in the page struct. Meaning
51 * A. page->freelist -> List of object free in a page
52 * B. page->counters -> Counters of objects
53 * C. page->frozen -> frozen state
54 *
55 * If a slab is frozen then it is exempt from list management. It is not
56 * on any list. The processor that froze the slab is the one who can
57 * perform list operations on the page. Other processors may put objects
58 * onto the freelist but the processor that froze the slab is the only
59 * one that can retrieve the objects from the page's freelist.
Christoph Lameter81819f02007-05-06 14:49:36 -070060 *
61 * The list_lock protects the partial and full list on each node and
62 * the partial slab counter. If taken then no new slabs may be added or
63 * removed from the lists nor make the number of partial slabs be modified.
64 * (Note that the total number of slabs is an atomic value that may be
65 * modified without taking the list lock).
66 *
67 * The list_lock is a centralized lock and thus we avoid taking it as
68 * much as possible. As long as SLUB does not have to handle partial
69 * slabs, operations can continue without any centralized lock. F.e.
70 * allocating a long series of objects that fill up slabs does not require
71 * the list lock.
Christoph Lameter81819f02007-05-06 14:49:36 -070072 * Interrupts are disabled during allocation and deallocation in order to
73 * make the slab allocator safe to use in the context of an irq. In addition
74 * interrupts are disabled to ensure that the processor does not change
75 * while handling per_cpu slabs, due to kernel preemption.
76 *
77 * SLUB assigns one slab for allocation to each processor.
78 * Allocations only occur from these slabs called cpu slabs.
79 *
Christoph Lameter672bba32007-05-09 02:32:39 -070080 * Slabs with free elements are kept on a partial list and during regular
81 * operations no list for full slabs is used. If an object in a full slab is
Christoph Lameter81819f02007-05-06 14:49:36 -070082 * freed then the slab will show up again on the partial lists.
Christoph Lameter672bba32007-05-09 02:32:39 -070083 * We track full slabs for debugging purposes though because otherwise we
84 * cannot scan all objects.
Christoph Lameter81819f02007-05-06 14:49:36 -070085 *
86 * Slabs are freed when they become empty. Teardown and setup is
87 * minimal so we rely on the page allocators per cpu caches for
88 * fast frees and allocs.
89 *
90 * Overloading of page flags that are otherwise used for LRU management.
91 *
Christoph Lameter4b6f0752007-05-16 22:10:53 -070092 * PageActive The slab is frozen and exempt from list processing.
93 * This means that the slab is dedicated to a purpose
94 * such as satisfying allocations for a specific
95 * processor. Objects may be freed in the slab while
96 * it is frozen but slab_free will then skip the usual
97 * list operations. It is up to the processor holding
98 * the slab to integrate the slab into the slab lists
99 * when the slab is no longer needed.
100 *
101 * One use of this flag is to mark slabs that are
102 * used for allocations. Then such a slab becomes a cpu
103 * slab. The cpu slab may be equipped with an additional
Christoph Lameterdfb4f092007-10-16 01:26:05 -0700104 * freelist that allows lockless access to
Christoph Lameter894b8782007-05-10 03:15:16 -0700105 * free objects in addition to the regular freelist
106 * that requires the slab lock.
Christoph Lameter81819f02007-05-06 14:49:36 -0700107 *
108 * PageError Slab requires special handling due to debug
109 * options set. This moves slab handling out of
Christoph Lameter894b8782007-05-10 03:15:16 -0700110 * the fast path and disables lockless freelists.
Christoph Lameter81819f02007-05-06 14:49:36 -0700111 */
112
Christoph Lameteraf537b02010-07-09 14:07:14 -0500113#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
114 SLAB_TRACE | SLAB_DEBUG_FREE)
115
116static inline int kmem_cache_debug(struct kmem_cache *s)
117{
Christoph Lameter5577bd82007-05-16 22:10:56 -0700118#ifdef CONFIG_SLUB_DEBUG
Christoph Lameteraf537b02010-07-09 14:07:14 -0500119 return unlikely(s->flags & SLAB_DEBUG_FLAGS);
Christoph Lameter5577bd82007-05-16 22:10:56 -0700120#else
Christoph Lameteraf537b02010-07-09 14:07:14 -0500121 return 0;
Christoph Lameter5577bd82007-05-16 22:10:56 -0700122#endif
Christoph Lameteraf537b02010-07-09 14:07:14 -0500123}
Christoph Lameter5577bd82007-05-16 22:10:56 -0700124
Christoph Lameter81819f02007-05-06 14:49:36 -0700125/*
126 * Issues still to be resolved:
127 *
Christoph Lameter81819f02007-05-06 14:49:36 -0700128 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
129 *
Christoph Lameter81819f02007-05-06 14:49:36 -0700130 * - Variable sizing of the per node arrays
131 */
132
133/* Enable to test recovery from slab corruption on boot */
134#undef SLUB_RESILIENCY_TEST
135
Christoph Lameterb789ef52011-06-01 12:25:49 -0500136/* Enable to log cmpxchg failures */
137#undef SLUB_DEBUG_CMPXCHG
138
Christoph Lameter81819f02007-05-06 14:49:36 -0700139/*
Christoph Lameter2086d262007-05-06 14:49:46 -0700140 * Mininum number of partial slabs. These will be left on the partial
141 * lists even if they are empty. kmem_cache_shrink may reclaim them.
142 */
Christoph Lameter76be8952007-12-21 14:37:37 -0800143#define MIN_PARTIAL 5
Christoph Lametere95eed52007-05-06 14:49:44 -0700144
Christoph Lameter2086d262007-05-06 14:49:46 -0700145/*
146 * Maximum number of desirable partial slabs.
147 * The existence of more partial slabs makes kmem_cache_shrink
148 * sort the partial list by the number of objects in the.
149 */
150#define MAX_PARTIAL 10
151
Christoph Lameter81819f02007-05-06 14:49:36 -0700152#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
153 SLAB_POISON | SLAB_STORE_USER)
Christoph Lameter672bba32007-05-09 02:32:39 -0700154
Christoph Lameter81819f02007-05-06 14:49:36 -0700155/*
David Rientjes3de47212009-07-27 18:30:35 -0700156 * Debugging flags that require metadata to be stored in the slab. These get
157 * disabled when slub_debug=O is used and a cache's min order increases with
158 * metadata.
David Rientjesfa5ec8a2009-07-07 00:14:14 -0700159 */
David Rientjes3de47212009-07-27 18:30:35 -0700160#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
David Rientjesfa5ec8a2009-07-07 00:14:14 -0700161
162/*
Christoph Lameter81819f02007-05-06 14:49:36 -0700163 * Set of flags that will prevent slab merging
164 */
165#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
Dmitry Monakhov4c13dd32010-02-26 09:36:12 +0300166 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
167 SLAB_FAILSLAB)
Christoph Lameter81819f02007-05-06 14:49:36 -0700168
169#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
Vegard Nossum5a896d92008-04-04 00:54:48 +0200170 SLAB_CACHE_DMA | SLAB_NOTRACK)
Christoph Lameter81819f02007-05-06 14:49:36 -0700171
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +0400172#define OO_SHIFT 16
173#define OO_MASK ((1 << OO_SHIFT) - 1)
Christoph Lameter50d5c412011-06-01 12:25:45 -0500174#define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +0400175
Christoph Lameter81819f02007-05-06 14:49:36 -0700176/* Internal SLUB flags */
Christoph Lameterf90ec392010-07-09 14:07:11 -0500177#define __OBJECT_POISON 0x80000000UL /* Poison object */
Christoph Lameterb789ef52011-06-01 12:25:49 -0500178#define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */
Christoph Lameter81819f02007-05-06 14:49:36 -0700179
180static int kmem_size = sizeof(struct kmem_cache);
181
182#ifdef CONFIG_SMP
183static struct notifier_block slab_notifier;
184#endif
185
Christoph Lameter02cbc872007-05-09 02:32:43 -0700186/*
187 * Tracking user of a slab.
188 */
Ben Greeard6543e32011-07-07 11:36:36 -0700189#define TRACK_ADDRS_COUNT 16
Christoph Lameter02cbc872007-05-09 02:32:43 -0700190struct track {
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +0300191 unsigned long addr; /* Called from address */
Ben Greeard6543e32011-07-07 11:36:36 -0700192#ifdef CONFIG_STACKTRACE
193 unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
194#endif
Christoph Lameter02cbc872007-05-09 02:32:43 -0700195 int cpu; /* Was running on cpu */
196 int pid; /* Pid context */
197 unsigned long when; /* When did the operation occur */
198};
199
200enum track_item { TRACK_ALLOC, TRACK_FREE };
201
Christoph Lameterab4d5ed2010-10-05 13:57:26 -0500202#ifdef CONFIG_SYSFS
Christoph Lameter81819f02007-05-06 14:49:36 -0700203static int sysfs_slab_add(struct kmem_cache *);
204static int sysfs_slab_alias(struct kmem_cache *, const char *);
205static void sysfs_slab_remove(struct kmem_cache *);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -0800206
Christoph Lameter81819f02007-05-06 14:49:36 -0700207#else
Christoph Lameter0c710012007-07-17 04:03:24 -0700208static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
209static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
210 { return 0; }
Christoph Lameter151c6022008-01-07 22:29:05 -0800211static inline void sysfs_slab_remove(struct kmem_cache *s)
212{
Pekka Enberg84c1cf62010-09-14 23:21:12 +0300213 kfree(s->name);
Christoph Lameter151c6022008-01-07 22:29:05 -0800214 kfree(s);
215}
Christoph Lameter8ff12cf2008-02-07 17:47:41 -0800216
Christoph Lameter81819f02007-05-06 14:49:36 -0700217#endif
218
Christoph Lameter4fdccdf2011-03-22 13:35:00 -0500219static inline void stat(const struct kmem_cache *s, enum stat_item si)
Christoph Lameter8ff12cf2008-02-07 17:47:41 -0800220{
221#ifdef CONFIG_SLUB_STATS
Christoph Lameter84e554e62009-12-18 16:26:23 -0600222 __this_cpu_inc(s->cpu_slab->stat[si]);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -0800223#endif
224}
225
Christoph Lameter81819f02007-05-06 14:49:36 -0700226/********************************************************************
227 * Core slab cache functions
228 *******************************************************************/
229
Christoph Lameter81819f02007-05-06 14:49:36 -0700230static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
231{
Christoph Lameter81819f02007-05-06 14:49:36 -0700232 return s->node[node];
Christoph Lameter81819f02007-05-06 14:49:36 -0700233}
234
Christoph Lameter6446faa2008-02-15 23:45:26 -0800235/* Verify that a pointer has an address that is valid within a slab page */
Christoph Lameter02cbc872007-05-09 02:32:43 -0700236static inline int check_valid_pointer(struct kmem_cache *s,
237 struct page *page, const void *object)
238{
239 void *base;
240
Christoph Lametera973e9d2008-03-01 13:40:44 -0800241 if (!object)
Christoph Lameter02cbc872007-05-09 02:32:43 -0700242 return 1;
243
Christoph Lametera973e9d2008-03-01 13:40:44 -0800244 base = page_address(page);
Christoph Lameter39b26462008-04-14 19:11:30 +0300245 if (object < base || object >= base + page->objects * s->size ||
Christoph Lameter02cbc872007-05-09 02:32:43 -0700246 (object - base) % s->size) {
247 return 0;
248 }
249
250 return 1;
251}
252
Christoph Lameter7656c722007-05-09 02:32:40 -0700253static inline void *get_freepointer(struct kmem_cache *s, void *object)
254{
255 return *(void **)(object + s->offset);
256}
257
Eric Dumazet0ad95002011-12-16 16:25:34 +0100258static void prefetch_freepointer(const struct kmem_cache *s, void *object)
259{
260 prefetch(object + s->offset);
261}
262
Christoph Lameter1393d9a2011-05-16 15:26:08 -0500263static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
264{
265 void *p;
266
267#ifdef CONFIG_DEBUG_PAGEALLOC
268 probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p));
269#else
270 p = get_freepointer(s, object);
271#endif
272 return p;
273}
274
Christoph Lameter7656c722007-05-09 02:32:40 -0700275static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
276{
277 *(void **)(object + s->offset) = fp;
278}
279
280/* Loop over all objects in a slab */
Christoph Lameter224a88b2008-04-14 19:11:31 +0300281#define for_each_object(__p, __s, __addr, __objects) \
282 for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
Christoph Lameter7656c722007-05-09 02:32:40 -0700283 __p += (__s)->size)
284
Christoph Lameter7656c722007-05-09 02:32:40 -0700285/* Determine object index from a given position */
286static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
287{
288 return (p - addr) / s->size;
289}
290
Mariusz Kozlowskid71f6062011-02-26 20:10:26 +0100291static inline size_t slab_ksize(const struct kmem_cache *s)
292{
293#ifdef CONFIG_SLUB_DEBUG
294 /*
295 * Debugging requires use of the padding between object
296 * and whatever may come after it.
297 */
298 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500299 return s->object_size;
Mariusz Kozlowskid71f6062011-02-26 20:10:26 +0100300
301#endif
302 /*
303 * If we have the need to store the freelist pointer
304 * back there or track user information then we can
305 * only use the space before that information.
306 */
307 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
308 return s->inuse;
309 /*
310 * Else we can use all the padding etc for the allocation
311 */
312 return s->size;
313}
314
Lai Jiangshanab9a0f12011-03-10 15:21:48 +0800315static inline int order_objects(int order, unsigned long size, int reserved)
316{
317 return ((PAGE_SIZE << order) - reserved) / size;
318}
319
Christoph Lameter834f3d12008-04-14 19:11:31 +0300320static inline struct kmem_cache_order_objects oo_make(int order,
Lai Jiangshanab9a0f12011-03-10 15:21:48 +0800321 unsigned long size, int reserved)
Christoph Lameter834f3d12008-04-14 19:11:31 +0300322{
323 struct kmem_cache_order_objects x = {
Lai Jiangshanab9a0f12011-03-10 15:21:48 +0800324 (order << OO_SHIFT) + order_objects(order, size, reserved)
Christoph Lameter834f3d12008-04-14 19:11:31 +0300325 };
326
327 return x;
328}
329
330static inline int oo_order(struct kmem_cache_order_objects x)
331{
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +0400332 return x.x >> OO_SHIFT;
Christoph Lameter834f3d12008-04-14 19:11:31 +0300333}
334
335static inline int oo_objects(struct kmem_cache_order_objects x)
336{
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +0400337 return x.x & OO_MASK;
Christoph Lameter834f3d12008-04-14 19:11:31 +0300338}
339
Christoph Lameter881db7f2011-06-01 12:25:53 -0500340/*
341 * Per slab locking using the pagelock
342 */
343static __always_inline void slab_lock(struct page *page)
344{
345 bit_spin_lock(PG_locked, &page->flags);
346}
347
348static __always_inline void slab_unlock(struct page *page)
349{
350 __bit_spin_unlock(PG_locked, &page->flags);
351}
352
Christoph Lameter1d071712011-07-14 12:49:12 -0500353/* Interrupts must be disabled (for the fallback code to work right) */
354static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
Christoph Lameterb789ef52011-06-01 12:25:49 -0500355 void *freelist_old, unsigned long counters_old,
356 void *freelist_new, unsigned long counters_new,
357 const char *n)
358{
Christoph Lameter1d071712011-07-14 12:49:12 -0500359 VM_BUG_ON(!irqs_disabled());
Heiko Carstens25654092012-01-12 17:17:33 -0800360#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
361 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
Christoph Lameterb789ef52011-06-01 12:25:49 -0500362 if (s->flags & __CMPXCHG_DOUBLE) {
Jan Beulichcdcd6292012-01-02 17:02:18 +0000363 if (cmpxchg_double(&page->freelist, &page->counters,
Christoph Lameterb789ef52011-06-01 12:25:49 -0500364 freelist_old, counters_old,
365 freelist_new, counters_new))
366 return 1;
367 } else
368#endif
369 {
Christoph Lameter881db7f2011-06-01 12:25:53 -0500370 slab_lock(page);
Christoph Lameterb789ef52011-06-01 12:25:49 -0500371 if (page->freelist == freelist_old && page->counters == counters_old) {
372 page->freelist = freelist_new;
373 page->counters = counters_new;
Christoph Lameter881db7f2011-06-01 12:25:53 -0500374 slab_unlock(page);
Christoph Lameterb789ef52011-06-01 12:25:49 -0500375 return 1;
376 }
Christoph Lameter881db7f2011-06-01 12:25:53 -0500377 slab_unlock(page);
Christoph Lameterb789ef52011-06-01 12:25:49 -0500378 }
379
380 cpu_relax();
381 stat(s, CMPXCHG_DOUBLE_FAIL);
382
383#ifdef SLUB_DEBUG_CMPXCHG
384 printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
385#endif
386
387 return 0;
388}
389
Christoph Lameter1d071712011-07-14 12:49:12 -0500390static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
391 void *freelist_old, unsigned long counters_old,
392 void *freelist_new, unsigned long counters_new,
393 const char *n)
394{
Heiko Carstens25654092012-01-12 17:17:33 -0800395#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
396 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
Christoph Lameter1d071712011-07-14 12:49:12 -0500397 if (s->flags & __CMPXCHG_DOUBLE) {
Jan Beulichcdcd6292012-01-02 17:02:18 +0000398 if (cmpxchg_double(&page->freelist, &page->counters,
Christoph Lameter1d071712011-07-14 12:49:12 -0500399 freelist_old, counters_old,
400 freelist_new, counters_new))
401 return 1;
402 } else
403#endif
404 {
405 unsigned long flags;
406
407 local_irq_save(flags);
408 slab_lock(page);
409 if (page->freelist == freelist_old && page->counters == counters_old) {
410 page->freelist = freelist_new;
411 page->counters = counters_new;
412 slab_unlock(page);
413 local_irq_restore(flags);
414 return 1;
415 }
416 slab_unlock(page);
417 local_irq_restore(flags);
418 }
419
420 cpu_relax();
421 stat(s, CMPXCHG_DOUBLE_FAIL);
422
423#ifdef SLUB_DEBUG_CMPXCHG
424 printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
425#endif
426
427 return 0;
428}
429
Christoph Lameter41ecc552007-05-09 02:32:44 -0700430#ifdef CONFIG_SLUB_DEBUG
431/*
Christoph Lameter5f80b132011-04-15 14:48:13 -0500432 * Determine a map of object in use on a page.
433 *
Christoph Lameter881db7f2011-06-01 12:25:53 -0500434 * Node listlock must be held to guarantee that the page does
Christoph Lameter5f80b132011-04-15 14:48:13 -0500435 * not vanish from under us.
436 */
437static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
438{
439 void *p;
440 void *addr = page_address(page);
441
442 for (p = page->freelist; p; p = get_freepointer(s, p))
443 set_bit(slab_index(p, s, addr), map);
444}
445
Christoph Lameter41ecc552007-05-09 02:32:44 -0700446/*
447 * Debug settings:
448 */
Christoph Lameterf0630ff2007-07-15 23:38:14 -0700449#ifdef CONFIG_SLUB_DEBUG_ON
450static int slub_debug = DEBUG_DEFAULT_FLAGS;
451#else
Christoph Lameter41ecc552007-05-09 02:32:44 -0700452static int slub_debug;
Christoph Lameterf0630ff2007-07-15 23:38:14 -0700453#endif
Christoph Lameter41ecc552007-05-09 02:32:44 -0700454
455static char *slub_debug_slabs;
David Rientjesfa5ec8a2009-07-07 00:14:14 -0700456static int disable_higher_order_debug;
Christoph Lameter41ecc552007-05-09 02:32:44 -0700457
Christoph Lameter7656c722007-05-09 02:32:40 -0700458/*
Christoph Lameter81819f02007-05-06 14:49:36 -0700459 * Object debugging
460 */
461static void print_section(char *text, u8 *addr, unsigned int length)
462{
Sebastian Andrzej Siewiorffc79d22011-07-29 14:10:20 +0200463 print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
464 length, 1);
Christoph Lameter81819f02007-05-06 14:49:36 -0700465}
466
Christoph Lameter81819f02007-05-06 14:49:36 -0700467static struct track *get_track(struct kmem_cache *s, void *object,
468 enum track_item alloc)
469{
470 struct track *p;
471
472 if (s->offset)
473 p = object + s->offset + sizeof(void *);
474 else
475 p = object + s->inuse;
476
477 return p + alloc;
478}
479
480static void set_track(struct kmem_cache *s, void *object,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +0300481 enum track_item alloc, unsigned long addr)
Christoph Lameter81819f02007-05-06 14:49:36 -0700482{
Akinobu Mita1a00df42009-03-07 00:36:21 +0900483 struct track *p = get_track(s, object, alloc);
Christoph Lameter81819f02007-05-06 14:49:36 -0700484
Christoph Lameter81819f02007-05-06 14:49:36 -0700485 if (addr) {
Ben Greeard6543e32011-07-07 11:36:36 -0700486#ifdef CONFIG_STACKTRACE
487 struct stack_trace trace;
488 int i;
489
490 trace.nr_entries = 0;
491 trace.max_entries = TRACK_ADDRS_COUNT;
492 trace.entries = p->addrs;
493 trace.skip = 3;
494 save_stack_trace(&trace);
495
496 /* See rant in lockdep.c */
497 if (trace.nr_entries != 0 &&
498 trace.entries[trace.nr_entries - 1] == ULONG_MAX)
499 trace.nr_entries--;
500
501 for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
502 p->addrs[i] = 0;
503#endif
Christoph Lameter81819f02007-05-06 14:49:36 -0700504 p->addr = addr;
505 p->cpu = smp_processor_id();
Alexey Dobriyan88e4ccf2008-06-23 02:58:37 +0400506 p->pid = current->pid;
Christoph Lameter81819f02007-05-06 14:49:36 -0700507 p->when = jiffies;
508 } else
509 memset(p, 0, sizeof(struct track));
510}
511
Christoph Lameter81819f02007-05-06 14:49:36 -0700512static void init_tracking(struct kmem_cache *s, void *object)
513{
Christoph Lameter24922682007-07-17 04:03:18 -0700514 if (!(s->flags & SLAB_STORE_USER))
515 return;
516
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +0300517 set_track(s, object, TRACK_FREE, 0UL);
518 set_track(s, object, TRACK_ALLOC, 0UL);
Christoph Lameter81819f02007-05-06 14:49:36 -0700519}
520
521static void print_track(const char *s, struct track *t)
522{
523 if (!t->addr)
524 return;
525
Linus Torvalds7daf7052008-07-14 12:12:53 -0700526 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +0300527 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
Ben Greeard6543e32011-07-07 11:36:36 -0700528#ifdef CONFIG_STACKTRACE
529 {
530 int i;
531 for (i = 0; i < TRACK_ADDRS_COUNT; i++)
532 if (t->addrs[i])
533 printk(KERN_ERR "\t%pS\n", (void *)t->addrs[i]);
534 else
535 break;
536 }
537#endif
Christoph Lameter81819f02007-05-06 14:49:36 -0700538}
539
Christoph Lameter24922682007-07-17 04:03:18 -0700540static void print_tracking(struct kmem_cache *s, void *object)
541{
542 if (!(s->flags & SLAB_STORE_USER))
543 return;
544
545 print_track("Allocated", get_track(s, object, TRACK_ALLOC));
546 print_track("Freed", get_track(s, object, TRACK_FREE));
547}
548
549static void print_page_info(struct page *page)
550{
Christoph Lameter39b26462008-04-14 19:11:30 +0300551 printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
552 page, page->objects, page->inuse, page->freelist, page->flags);
Christoph Lameter24922682007-07-17 04:03:18 -0700553
554}
555
556static void slab_bug(struct kmem_cache *s, char *fmt, ...)
557{
558 va_list args;
559 char buf[100];
560
561 va_start(args, fmt);
562 vsnprintf(buf, sizeof(buf), fmt, args);
563 va_end(args);
564 printk(KERN_ERR "========================================"
565 "=====================================\n");
Dave Jones265d47e2011-11-15 15:04:00 -0800566 printk(KERN_ERR "BUG %s (%s): %s\n", s->name, print_tainted(), buf);
Christoph Lameter24922682007-07-17 04:03:18 -0700567 printk(KERN_ERR "----------------------------------------"
568 "-------------------------------------\n\n");
569}
570
571static void slab_fix(struct kmem_cache *s, char *fmt, ...)
572{
573 va_list args;
574 char buf[100];
575
576 va_start(args, fmt);
577 vsnprintf(buf, sizeof(buf), fmt, args);
578 va_end(args);
579 printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
580}
581
582static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
Christoph Lameter81819f02007-05-06 14:49:36 -0700583{
584 unsigned int off; /* Offset of last byte */
Christoph Lametera973e9d2008-03-01 13:40:44 -0800585 u8 *addr = page_address(page);
Christoph Lameter24922682007-07-17 04:03:18 -0700586
587 print_tracking(s, p);
588
589 print_page_info(page);
590
591 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
592 p, p - addr, get_freepointer(s, p));
593
594 if (p > addr + 16)
Sebastian Andrzej Siewiorffc79d22011-07-29 14:10:20 +0200595 print_section("Bytes b4 ", p - 16, 16);
Christoph Lameter24922682007-07-17 04:03:18 -0700596
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500597 print_section("Object ", p, min_t(unsigned long, s->object_size,
Sebastian Andrzej Siewiorffc79d22011-07-29 14:10:20 +0200598 PAGE_SIZE));
Christoph Lameter81819f02007-05-06 14:49:36 -0700599 if (s->flags & SLAB_RED_ZONE)
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500600 print_section("Redzone ", p + s->object_size,
601 s->inuse - s->object_size);
Christoph Lameter81819f02007-05-06 14:49:36 -0700602
Christoph Lameter81819f02007-05-06 14:49:36 -0700603 if (s->offset)
604 off = s->offset + sizeof(void *);
605 else
606 off = s->inuse;
607
Christoph Lameter24922682007-07-17 04:03:18 -0700608 if (s->flags & SLAB_STORE_USER)
Christoph Lameter81819f02007-05-06 14:49:36 -0700609 off += 2 * sizeof(struct track);
Christoph Lameter81819f02007-05-06 14:49:36 -0700610
611 if (off != s->size)
612 /* Beginning of the filler is the free pointer */
Sebastian Andrzej Siewiorffc79d22011-07-29 14:10:20 +0200613 print_section("Padding ", p + off, s->size - off);
Christoph Lameter24922682007-07-17 04:03:18 -0700614
615 dump_stack();
Christoph Lameter81819f02007-05-06 14:49:36 -0700616}
617
618static void object_err(struct kmem_cache *s, struct page *page,
619 u8 *object, char *reason)
620{
Christoph Lameter3dc50632008-04-23 12:28:01 -0700621 slab_bug(s, "%s", reason);
Christoph Lameter24922682007-07-17 04:03:18 -0700622 print_trailer(s, page, object);
Christoph Lameter81819f02007-05-06 14:49:36 -0700623}
624
Christoph Lameter24922682007-07-17 04:03:18 -0700625static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
Christoph Lameter81819f02007-05-06 14:49:36 -0700626{
627 va_list args;
628 char buf[100];
629
Christoph Lameter24922682007-07-17 04:03:18 -0700630 va_start(args, fmt);
631 vsnprintf(buf, sizeof(buf), fmt, args);
Christoph Lameter81819f02007-05-06 14:49:36 -0700632 va_end(args);
Christoph Lameter3dc50632008-04-23 12:28:01 -0700633 slab_bug(s, "%s", buf);
Christoph Lameter24922682007-07-17 04:03:18 -0700634 print_page_info(page);
Christoph Lameter81819f02007-05-06 14:49:36 -0700635 dump_stack();
636}
637
Christoph Lameterf7cb1932010-09-29 07:15:01 -0500638static void init_object(struct kmem_cache *s, void *object, u8 val)
Christoph Lameter81819f02007-05-06 14:49:36 -0700639{
640 u8 *p = object;
641
642 if (s->flags & __OBJECT_POISON) {
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500643 memset(p, POISON_FREE, s->object_size - 1);
644 p[s->object_size - 1] = POISON_END;
Christoph Lameter81819f02007-05-06 14:49:36 -0700645 }
646
647 if (s->flags & SLAB_RED_ZONE)
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500648 memset(p + s->object_size, val, s->inuse - s->object_size);
Christoph Lameter81819f02007-05-06 14:49:36 -0700649}
650
Christoph Lameter24922682007-07-17 04:03:18 -0700651static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
652 void *from, void *to)
653{
654 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
655 memset(from, data, to - from);
656}
657
658static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
659 u8 *object, char *what,
Pekka Enberg06428782008-01-07 23:20:27 -0800660 u8 *start, unsigned int value, unsigned int bytes)
Christoph Lameter24922682007-07-17 04:03:18 -0700661{
662 u8 *fault;
663 u8 *end;
664
Akinobu Mita798248202011-10-31 17:08:07 -0700665 fault = memchr_inv(start, value, bytes);
Christoph Lameter24922682007-07-17 04:03:18 -0700666 if (!fault)
667 return 1;
668
669 end = start + bytes;
670 while (end > fault && end[-1] == value)
671 end--;
672
673 slab_bug(s, "%s overwritten", what);
674 printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
675 fault, end - 1, fault[0], value);
676 print_trailer(s, page, object);
677
678 restore_bytes(s, what, value, fault, end);
679 return 0;
Christoph Lameter81819f02007-05-06 14:49:36 -0700680}
681
Christoph Lameter81819f02007-05-06 14:49:36 -0700682/*
683 * Object layout:
684 *
685 * object address
686 * Bytes of the object to be managed.
687 * If the freepointer may overlay the object then the free
688 * pointer is the first word of the object.
Christoph Lameter672bba32007-05-09 02:32:39 -0700689 *
Christoph Lameter81819f02007-05-06 14:49:36 -0700690 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
691 * 0xa5 (POISON_END)
692 *
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500693 * object + s->object_size
Christoph Lameter81819f02007-05-06 14:49:36 -0700694 * Padding to reach word boundary. This is also used for Redzoning.
Christoph Lameter672bba32007-05-09 02:32:39 -0700695 * Padding is extended by another word if Redzoning is enabled and
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500696 * object_size == inuse.
Christoph Lameter672bba32007-05-09 02:32:39 -0700697 *
Christoph Lameter81819f02007-05-06 14:49:36 -0700698 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
699 * 0xcc (RED_ACTIVE) for objects in use.
700 *
701 * object + s->inuse
Christoph Lameter672bba32007-05-09 02:32:39 -0700702 * Meta data starts here.
703 *
Christoph Lameter81819f02007-05-06 14:49:36 -0700704 * A. Free pointer (if we cannot overwrite object on free)
705 * B. Tracking data for SLAB_STORE_USER
Christoph Lameter672bba32007-05-09 02:32:39 -0700706 * C. Padding to reach required alignment boundary or at mininum
Christoph Lameter6446faa2008-02-15 23:45:26 -0800707 * one word if debugging is on to be able to detect writes
Christoph Lameter672bba32007-05-09 02:32:39 -0700708 * before the word boundary.
709 *
710 * Padding is done using 0x5a (POISON_INUSE)
Christoph Lameter81819f02007-05-06 14:49:36 -0700711 *
712 * object + s->size
Christoph Lameter672bba32007-05-09 02:32:39 -0700713 * Nothing is used beyond s->size.
Christoph Lameter81819f02007-05-06 14:49:36 -0700714 *
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500715 * If slabcaches are merged then the object_size and inuse boundaries are mostly
Christoph Lameter672bba32007-05-09 02:32:39 -0700716 * ignored. And therefore no slab options that rely on these boundaries
Christoph Lameter81819f02007-05-06 14:49:36 -0700717 * may be used with merged slabcaches.
718 */
719
Christoph Lameter81819f02007-05-06 14:49:36 -0700720static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
721{
722 unsigned long off = s->inuse; /* The end of info */
723
724 if (s->offset)
725 /* Freepointer is placed after the object. */
726 off += sizeof(void *);
727
728 if (s->flags & SLAB_STORE_USER)
729 /* We also have user information there */
730 off += 2 * sizeof(struct track);
731
732 if (s->size == off)
733 return 1;
734
Christoph Lameter24922682007-07-17 04:03:18 -0700735 return check_bytes_and_report(s, page, p, "Object padding",
736 p + off, POISON_INUSE, s->size - off);
Christoph Lameter81819f02007-05-06 14:49:36 -0700737}
738
Christoph Lameter39b26462008-04-14 19:11:30 +0300739/* Check the pad bytes at the end of a slab page */
Christoph Lameter81819f02007-05-06 14:49:36 -0700740static int slab_pad_check(struct kmem_cache *s, struct page *page)
741{
Christoph Lameter24922682007-07-17 04:03:18 -0700742 u8 *start;
743 u8 *fault;
744 u8 *end;
745 int length;
746 int remainder;
Christoph Lameter81819f02007-05-06 14:49:36 -0700747
748 if (!(s->flags & SLAB_POISON))
749 return 1;
750
Christoph Lametera973e9d2008-03-01 13:40:44 -0800751 start = page_address(page);
Lai Jiangshanab9a0f12011-03-10 15:21:48 +0800752 length = (PAGE_SIZE << compound_order(page)) - s->reserved;
Christoph Lameter39b26462008-04-14 19:11:30 +0300753 end = start + length;
754 remainder = length % s->size;
Christoph Lameter81819f02007-05-06 14:49:36 -0700755 if (!remainder)
756 return 1;
757
Akinobu Mita798248202011-10-31 17:08:07 -0700758 fault = memchr_inv(end - remainder, POISON_INUSE, remainder);
Christoph Lameter24922682007-07-17 04:03:18 -0700759 if (!fault)
760 return 1;
761 while (end > fault && end[-1] == POISON_INUSE)
762 end--;
763
764 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
Sebastian Andrzej Siewiorffc79d22011-07-29 14:10:20 +0200765 print_section("Padding ", end - remainder, remainder);
Christoph Lameter24922682007-07-17 04:03:18 -0700766
Eric Dumazet8a3d2712009-09-03 16:08:06 +0200767 restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
Christoph Lameter24922682007-07-17 04:03:18 -0700768 return 0;
Christoph Lameter81819f02007-05-06 14:49:36 -0700769}
770
771static int check_object(struct kmem_cache *s, struct page *page,
Christoph Lameterf7cb1932010-09-29 07:15:01 -0500772 void *object, u8 val)
Christoph Lameter81819f02007-05-06 14:49:36 -0700773{
774 u8 *p = object;
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500775 u8 *endobject = object + s->object_size;
Christoph Lameter81819f02007-05-06 14:49:36 -0700776
777 if (s->flags & SLAB_RED_ZONE) {
Christoph Lameter24922682007-07-17 04:03:18 -0700778 if (!check_bytes_and_report(s, page, object, "Redzone",
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500779 endobject, val, s->inuse - s->object_size))
Christoph Lameter81819f02007-05-06 14:49:36 -0700780 return 0;
Christoph Lameter81819f02007-05-06 14:49:36 -0700781 } else {
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500782 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
Ingo Molnar3adbefe2008-02-05 17:57:39 -0800783 check_bytes_and_report(s, page, p, "Alignment padding",
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500784 endobject, POISON_INUSE, s->inuse - s->object_size);
Ingo Molnar3adbefe2008-02-05 17:57:39 -0800785 }
Christoph Lameter81819f02007-05-06 14:49:36 -0700786 }
787
788 if (s->flags & SLAB_POISON) {
Christoph Lameterf7cb1932010-09-29 07:15:01 -0500789 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
Christoph Lameter24922682007-07-17 04:03:18 -0700790 (!check_bytes_and_report(s, page, p, "Poison", p,
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500791 POISON_FREE, s->object_size - 1) ||
Christoph Lameter24922682007-07-17 04:03:18 -0700792 !check_bytes_and_report(s, page, p, "Poison",
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500793 p + s->object_size - 1, POISON_END, 1)))
Christoph Lameter81819f02007-05-06 14:49:36 -0700794 return 0;
Christoph Lameter81819f02007-05-06 14:49:36 -0700795 /*
796 * check_pad_bytes cleans up on its own.
797 */
798 check_pad_bytes(s, page, p);
799 }
800
Christoph Lameterf7cb1932010-09-29 07:15:01 -0500801 if (!s->offset && val == SLUB_RED_ACTIVE)
Christoph Lameter81819f02007-05-06 14:49:36 -0700802 /*
803 * Object and freepointer overlap. Cannot check
804 * freepointer while object is allocated.
805 */
806 return 1;
807
808 /* Check free pointer validity */
809 if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
810 object_err(s, page, p, "Freepointer corrupt");
811 /*
Nick Andrew9f6c708e2008-12-05 14:08:08 +1100812 * No choice but to zap it and thus lose the remainder
Christoph Lameter81819f02007-05-06 14:49:36 -0700813 * of the free objects in this slab. May cause
Christoph Lameter672bba32007-05-09 02:32:39 -0700814 * another error because the object count is now wrong.
Christoph Lameter81819f02007-05-06 14:49:36 -0700815 */
Christoph Lametera973e9d2008-03-01 13:40:44 -0800816 set_freepointer(s, p, NULL);
Christoph Lameter81819f02007-05-06 14:49:36 -0700817 return 0;
818 }
819 return 1;
820}
821
822static int check_slab(struct kmem_cache *s, struct page *page)
823{
Christoph Lameter39b26462008-04-14 19:11:30 +0300824 int maxobj;
825
Christoph Lameter81819f02007-05-06 14:49:36 -0700826 VM_BUG_ON(!irqs_disabled());
827
828 if (!PageSlab(page)) {
Christoph Lameter24922682007-07-17 04:03:18 -0700829 slab_err(s, page, "Not a valid slab page");
Christoph Lameter81819f02007-05-06 14:49:36 -0700830 return 0;
831 }
Christoph Lameter39b26462008-04-14 19:11:30 +0300832
Lai Jiangshanab9a0f12011-03-10 15:21:48 +0800833 maxobj = order_objects(compound_order(page), s->size, s->reserved);
Christoph Lameter39b26462008-04-14 19:11:30 +0300834 if (page->objects > maxobj) {
835 slab_err(s, page, "objects %u > max %u",
836 s->name, page->objects, maxobj);
837 return 0;
838 }
839 if (page->inuse > page->objects) {
Christoph Lameter24922682007-07-17 04:03:18 -0700840 slab_err(s, page, "inuse %u > max %u",
Christoph Lameter39b26462008-04-14 19:11:30 +0300841 s->name, page->inuse, page->objects);
Christoph Lameter81819f02007-05-06 14:49:36 -0700842 return 0;
843 }
844 /* Slab_pad_check fixes things up after itself */
845 slab_pad_check(s, page);
846 return 1;
847}
848
849/*
Christoph Lameter672bba32007-05-09 02:32:39 -0700850 * Determine if a certain object on a page is on the freelist. Must hold the
851 * slab lock to guarantee that the chains are in a consistent state.
Christoph Lameter81819f02007-05-06 14:49:36 -0700852 */
853static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
854{
855 int nr = 0;
Christoph Lameter881db7f2011-06-01 12:25:53 -0500856 void *fp;
Christoph Lameter81819f02007-05-06 14:49:36 -0700857 void *object = NULL;
Christoph Lameter224a88b2008-04-14 19:11:31 +0300858 unsigned long max_objects;
Christoph Lameter81819f02007-05-06 14:49:36 -0700859
Christoph Lameter881db7f2011-06-01 12:25:53 -0500860 fp = page->freelist;
Christoph Lameter39b26462008-04-14 19:11:30 +0300861 while (fp && nr <= page->objects) {
Christoph Lameter81819f02007-05-06 14:49:36 -0700862 if (fp == search)
863 return 1;
864 if (!check_valid_pointer(s, page, fp)) {
865 if (object) {
866 object_err(s, page, object,
867 "Freechain corrupt");
Christoph Lametera973e9d2008-03-01 13:40:44 -0800868 set_freepointer(s, object, NULL);
Christoph Lameter81819f02007-05-06 14:49:36 -0700869 break;
870 } else {
Christoph Lameter24922682007-07-17 04:03:18 -0700871 slab_err(s, page, "Freepointer corrupt");
Christoph Lametera973e9d2008-03-01 13:40:44 -0800872 page->freelist = NULL;
Christoph Lameter39b26462008-04-14 19:11:30 +0300873 page->inuse = page->objects;
Christoph Lameter24922682007-07-17 04:03:18 -0700874 slab_fix(s, "Freelist cleared");
Christoph Lameter81819f02007-05-06 14:49:36 -0700875 return 0;
876 }
877 break;
878 }
879 object = fp;
880 fp = get_freepointer(s, object);
881 nr++;
882 }
883
Lai Jiangshanab9a0f12011-03-10 15:21:48 +0800884 max_objects = order_objects(compound_order(page), s->size, s->reserved);
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +0400885 if (max_objects > MAX_OBJS_PER_PAGE)
886 max_objects = MAX_OBJS_PER_PAGE;
Christoph Lameter224a88b2008-04-14 19:11:31 +0300887
888 if (page->objects != max_objects) {
889 slab_err(s, page, "Wrong number of objects. Found %d but "
890 "should be %d", page->objects, max_objects);
891 page->objects = max_objects;
892 slab_fix(s, "Number of objects adjusted.");
893 }
Christoph Lameter39b26462008-04-14 19:11:30 +0300894 if (page->inuse != page->objects - nr) {
Christoph Lameter70d71222007-05-06 14:49:47 -0700895 slab_err(s, page, "Wrong object count. Counter is %d but "
Christoph Lameter39b26462008-04-14 19:11:30 +0300896 "counted were %d", page->inuse, page->objects - nr);
897 page->inuse = page->objects - nr;
Christoph Lameter24922682007-07-17 04:03:18 -0700898 slab_fix(s, "Object count adjusted.");
Christoph Lameter81819f02007-05-06 14:49:36 -0700899 }
900 return search == NULL;
901}
902
Christoph Lameter0121c6192008-04-29 16:11:12 -0700903static void trace(struct kmem_cache *s, struct page *page, void *object,
904 int alloc)
Christoph Lameter3ec09742007-05-16 22:11:00 -0700905{
906 if (s->flags & SLAB_TRACE) {
907 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
908 s->name,
909 alloc ? "alloc" : "free",
910 object, page->inuse,
911 page->freelist);
912
913 if (!alloc)
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500914 print_section("Object ", (void *)object, s->object_size);
Christoph Lameter3ec09742007-05-16 22:11:00 -0700915
916 dump_stack();
917 }
918}
919
Christoph Lameter643b1132007-05-06 14:49:42 -0700920/*
Christoph Lameterc016b0b2010-08-20 12:37:16 -0500921 * Hooks for other subsystems that check memory allocations. In a typical
922 * production configuration these hooks all should produce no code at all.
923 */
924static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
925{
Christoph Lameterc1d50832010-08-20 12:37:17 -0500926 flags &= gfp_allowed_mask;
Christoph Lameterc016b0b2010-08-20 12:37:16 -0500927 lockdep_trace_alloc(flags);
928 might_sleep_if(flags & __GFP_WAIT);
929
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500930 return should_failslab(s->object_size, flags, s->flags);
Christoph Lameterc016b0b2010-08-20 12:37:16 -0500931}
932
933static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
934{
Christoph Lameterc1d50832010-08-20 12:37:17 -0500935 flags &= gfp_allowed_mask;
Eric Dumazetb3d41882011-02-14 18:35:22 +0100936 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500937 kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags);
Christoph Lameterc016b0b2010-08-20 12:37:16 -0500938}
939
940static inline void slab_free_hook(struct kmem_cache *s, void *x)
941{
942 kmemleak_free_recursive(x, s->flags);
Christoph Lameterc016b0b2010-08-20 12:37:16 -0500943
Christoph Lameterd3f661d2011-02-25 11:38:52 -0600944 /*
945 * Trouble is that we may no longer disable interupts in the fast path
946 * So in order to make the debug calls that expect irqs to be
947 * disabled we need to disable interrupts temporarily.
948 */
949#if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP)
950 {
951 unsigned long flags;
952
953 local_irq_save(flags);
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500954 kmemcheck_slab_free(s, x, s->object_size);
955 debug_check_no_locks_freed(x, s->object_size);
Christoph Lameterd3f661d2011-02-25 11:38:52 -0600956 local_irq_restore(flags);
957 }
958#endif
Thomas Gleixnerf9b615d2011-03-24 21:26:46 +0200959 if (!(s->flags & SLAB_DEBUG_OBJECTS))
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500960 debug_check_no_obj_freed(x, s->object_size);
Christoph Lameterc016b0b2010-08-20 12:37:16 -0500961}
962
963/*
Christoph Lameter672bba32007-05-09 02:32:39 -0700964 * Tracking of fully allocated slabs for debugging purposes.
Christoph Lameter5cc6eee2011-06-01 12:25:50 -0500965 *
966 * list_lock must be held.
Christoph Lameter643b1132007-05-06 14:49:42 -0700967 */
Christoph Lameter5cc6eee2011-06-01 12:25:50 -0500968static void add_full(struct kmem_cache *s,
969 struct kmem_cache_node *n, struct page *page)
Christoph Lameter643b1132007-05-06 14:49:42 -0700970{
Christoph Lameter643b1132007-05-06 14:49:42 -0700971 if (!(s->flags & SLAB_STORE_USER))
972 return;
973
Christoph Lameter5cc6eee2011-06-01 12:25:50 -0500974 list_add(&page->lru, &n->full);
975}
Christoph Lameter643b1132007-05-06 14:49:42 -0700976
Christoph Lameter5cc6eee2011-06-01 12:25:50 -0500977/*
978 * list_lock must be held.
979 */
980static void remove_full(struct kmem_cache *s, struct page *page)
981{
982 if (!(s->flags & SLAB_STORE_USER))
983 return;
984
Christoph Lameter643b1132007-05-06 14:49:42 -0700985 list_del(&page->lru);
Christoph Lameter643b1132007-05-06 14:49:42 -0700986}
987
Christoph Lameter0f389ec2008-04-14 18:53:02 +0300988/* Tracking of the number of slabs for debugging purposes */
989static inline unsigned long slabs_node(struct kmem_cache *s, int node)
990{
991 struct kmem_cache_node *n = get_node(s, node);
992
993 return atomic_long_read(&n->nr_slabs);
994}
995
Alexander Beregalov26c02cf2009-06-11 14:08:48 +0400996static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
997{
998 return atomic_long_read(&n->nr_slabs);
999}
1000
Christoph Lameter205ab992008-04-14 19:11:40 +03001001static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
Christoph Lameter0f389ec2008-04-14 18:53:02 +03001002{
1003 struct kmem_cache_node *n = get_node(s, node);
1004
1005 /*
1006 * May be called early in order to allocate a slab for the
1007 * kmem_cache_node structure. Solve the chicken-egg
1008 * dilemma by deferring the increment of the count during
1009 * bootstrap (see early_kmem_cache_node_alloc).
1010 */
Christoph Lameter7340cc82010-09-28 08:10:26 -05001011 if (n) {
Christoph Lameter0f389ec2008-04-14 18:53:02 +03001012 atomic_long_inc(&n->nr_slabs);
Christoph Lameter205ab992008-04-14 19:11:40 +03001013 atomic_long_add(objects, &n->total_objects);
1014 }
Christoph Lameter0f389ec2008-04-14 18:53:02 +03001015}
Christoph Lameter205ab992008-04-14 19:11:40 +03001016static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
Christoph Lameter0f389ec2008-04-14 18:53:02 +03001017{
1018 struct kmem_cache_node *n = get_node(s, node);
1019
1020 atomic_long_dec(&n->nr_slabs);
Christoph Lameter205ab992008-04-14 19:11:40 +03001021 atomic_long_sub(objects, &n->total_objects);
Christoph Lameter0f389ec2008-04-14 18:53:02 +03001022}
1023
1024/* Object debug checks for alloc/free paths */
Christoph Lameter3ec09742007-05-16 22:11:00 -07001025static void setup_object_debug(struct kmem_cache *s, struct page *page,
1026 void *object)
1027{
1028 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
1029 return;
1030
Christoph Lameterf7cb1932010-09-29 07:15:01 -05001031 init_object(s, object, SLUB_RED_INACTIVE);
Christoph Lameter3ec09742007-05-16 22:11:00 -07001032 init_tracking(s, object);
1033}
1034
Christoph Lameter15370662010-08-20 12:37:12 -05001035static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03001036 void *object, unsigned long addr)
Christoph Lameter81819f02007-05-06 14:49:36 -07001037{
1038 if (!check_slab(s, page))
1039 goto bad;
1040
Christoph Lameter81819f02007-05-06 14:49:36 -07001041 if (!check_valid_pointer(s, page, object)) {
1042 object_err(s, page, object, "Freelist Pointer check fails");
Christoph Lameter70d71222007-05-06 14:49:47 -07001043 goto bad;
Christoph Lameter81819f02007-05-06 14:49:36 -07001044 }
1045
Christoph Lameterf7cb1932010-09-29 07:15:01 -05001046 if (!check_object(s, page, object, SLUB_RED_INACTIVE))
Christoph Lameter81819f02007-05-06 14:49:36 -07001047 goto bad;
Christoph Lameter81819f02007-05-06 14:49:36 -07001048
Christoph Lameter3ec09742007-05-16 22:11:00 -07001049 /* Success perform special debug activities for allocs */
1050 if (s->flags & SLAB_STORE_USER)
1051 set_track(s, object, TRACK_ALLOC, addr);
1052 trace(s, page, object, 1);
Christoph Lameterf7cb1932010-09-29 07:15:01 -05001053 init_object(s, object, SLUB_RED_ACTIVE);
Christoph Lameter81819f02007-05-06 14:49:36 -07001054 return 1;
Christoph Lameter3ec09742007-05-16 22:11:00 -07001055
Christoph Lameter81819f02007-05-06 14:49:36 -07001056bad:
1057 if (PageSlab(page)) {
1058 /*
1059 * If this is a slab page then lets do the best we can
1060 * to avoid issues in the future. Marking all objects
Christoph Lameter672bba32007-05-09 02:32:39 -07001061 * as used avoids touching the remaining objects.
Christoph Lameter81819f02007-05-06 14:49:36 -07001062 */
Christoph Lameter24922682007-07-17 04:03:18 -07001063 slab_fix(s, "Marking all objects used");
Christoph Lameter39b26462008-04-14 19:11:30 +03001064 page->inuse = page->objects;
Christoph Lametera973e9d2008-03-01 13:40:44 -08001065 page->freelist = NULL;
Christoph Lameter81819f02007-05-06 14:49:36 -07001066 }
1067 return 0;
1068}
1069
Christoph Lameter15370662010-08-20 12:37:12 -05001070static noinline int free_debug_processing(struct kmem_cache *s,
1071 struct page *page, void *object, unsigned long addr)
Christoph Lameter81819f02007-05-06 14:49:36 -07001072{
Christoph Lameter5c2e4bb2011-06-01 12:25:54 -05001073 unsigned long flags;
1074 int rc = 0;
1075
1076 local_irq_save(flags);
Christoph Lameter881db7f2011-06-01 12:25:53 -05001077 slab_lock(page);
1078
Christoph Lameter81819f02007-05-06 14:49:36 -07001079 if (!check_slab(s, page))
1080 goto fail;
1081
1082 if (!check_valid_pointer(s, page, object)) {
Christoph Lameter70d71222007-05-06 14:49:47 -07001083 slab_err(s, page, "Invalid object pointer 0x%p", object);
Christoph Lameter81819f02007-05-06 14:49:36 -07001084 goto fail;
1085 }
1086
1087 if (on_freelist(s, page, object)) {
Christoph Lameter24922682007-07-17 04:03:18 -07001088 object_err(s, page, object, "Object already free");
Christoph Lameter81819f02007-05-06 14:49:36 -07001089 goto fail;
1090 }
1091
Christoph Lameterf7cb1932010-09-29 07:15:01 -05001092 if (!check_object(s, page, object, SLUB_RED_ACTIVE))
Christoph Lameter5c2e4bb2011-06-01 12:25:54 -05001093 goto out;
Christoph Lameter81819f02007-05-06 14:49:36 -07001094
1095 if (unlikely(s != page->slab)) {
Ingo Molnar3adbefe2008-02-05 17:57:39 -08001096 if (!PageSlab(page)) {
Christoph Lameter70d71222007-05-06 14:49:47 -07001097 slab_err(s, page, "Attempt to free object(0x%p) "
1098 "outside of slab", object);
Ingo Molnar3adbefe2008-02-05 17:57:39 -08001099 } else if (!page->slab) {
Christoph Lameter81819f02007-05-06 14:49:36 -07001100 printk(KERN_ERR
Christoph Lameter70d71222007-05-06 14:49:47 -07001101 "SLUB <none>: no slab for object 0x%p.\n",
Christoph Lameter81819f02007-05-06 14:49:36 -07001102 object);
Christoph Lameter70d71222007-05-06 14:49:47 -07001103 dump_stack();
Pekka Enberg06428782008-01-07 23:20:27 -08001104 } else
Christoph Lameter24922682007-07-17 04:03:18 -07001105 object_err(s, page, object,
1106 "page slab pointer corrupt.");
Christoph Lameter81819f02007-05-06 14:49:36 -07001107 goto fail;
1108 }
Christoph Lameter3ec09742007-05-16 22:11:00 -07001109
Christoph Lameter3ec09742007-05-16 22:11:00 -07001110 if (s->flags & SLAB_STORE_USER)
1111 set_track(s, object, TRACK_FREE, addr);
1112 trace(s, page, object, 0);
Christoph Lameterf7cb1932010-09-29 07:15:01 -05001113 init_object(s, object, SLUB_RED_INACTIVE);
Christoph Lameter5c2e4bb2011-06-01 12:25:54 -05001114 rc = 1;
1115out:
Christoph Lameter881db7f2011-06-01 12:25:53 -05001116 slab_unlock(page);
Christoph Lameter5c2e4bb2011-06-01 12:25:54 -05001117 local_irq_restore(flags);
1118 return rc;
Christoph Lameter3ec09742007-05-16 22:11:00 -07001119
Christoph Lameter81819f02007-05-06 14:49:36 -07001120fail:
Christoph Lameter24922682007-07-17 04:03:18 -07001121 slab_fix(s, "Object at 0x%p not freed", object);
Christoph Lameter5c2e4bb2011-06-01 12:25:54 -05001122 goto out;
Christoph Lameter81819f02007-05-06 14:49:36 -07001123}
1124
Christoph Lameter41ecc552007-05-09 02:32:44 -07001125static int __init setup_slub_debug(char *str)
1126{
Christoph Lameterf0630ff2007-07-15 23:38:14 -07001127 slub_debug = DEBUG_DEFAULT_FLAGS;
1128 if (*str++ != '=' || !*str)
1129 /*
1130 * No options specified. Switch on full debugging.
1131 */
1132 goto out;
Christoph Lameter41ecc552007-05-09 02:32:44 -07001133
1134 if (*str == ',')
Christoph Lameterf0630ff2007-07-15 23:38:14 -07001135 /*
1136 * No options but restriction on slabs. This means full
1137 * debugging for slabs matching a pattern.
1138 */
1139 goto check_slabs;
1140
David Rientjesfa5ec8a2009-07-07 00:14:14 -07001141 if (tolower(*str) == 'o') {
1142 /*
1143 * Avoid enabling debugging on caches if its minimum order
1144 * would increase as a result.
1145 */
1146 disable_higher_order_debug = 1;
1147 goto out;
1148 }
1149
Christoph Lameterf0630ff2007-07-15 23:38:14 -07001150 slub_debug = 0;
1151 if (*str == '-')
1152 /*
1153 * Switch off all debugging measures.
1154 */
1155 goto out;
1156
1157 /*
1158 * Determine which debug features should be switched on
1159 */
Pekka Enberg06428782008-01-07 23:20:27 -08001160 for (; *str && *str != ','; str++) {
Christoph Lameterf0630ff2007-07-15 23:38:14 -07001161 switch (tolower(*str)) {
1162 case 'f':
1163 slub_debug |= SLAB_DEBUG_FREE;
1164 break;
1165 case 'z':
1166 slub_debug |= SLAB_RED_ZONE;
1167 break;
1168 case 'p':
1169 slub_debug |= SLAB_POISON;
1170 break;
1171 case 'u':
1172 slub_debug |= SLAB_STORE_USER;
1173 break;
1174 case 't':
1175 slub_debug |= SLAB_TRACE;
1176 break;
Dmitry Monakhov4c13dd32010-02-26 09:36:12 +03001177 case 'a':
1178 slub_debug |= SLAB_FAILSLAB;
1179 break;
Christoph Lameterf0630ff2007-07-15 23:38:14 -07001180 default:
1181 printk(KERN_ERR "slub_debug option '%c' "
Pekka Enberg06428782008-01-07 23:20:27 -08001182 "unknown. skipped\n", *str);
Christoph Lameterf0630ff2007-07-15 23:38:14 -07001183 }
1184 }
1185
1186check_slabs:
1187 if (*str == ',')
Christoph Lameter41ecc552007-05-09 02:32:44 -07001188 slub_debug_slabs = str + 1;
Christoph Lameterf0630ff2007-07-15 23:38:14 -07001189out:
Christoph Lameter41ecc552007-05-09 02:32:44 -07001190 return 1;
1191}
1192
1193__setup("slub_debug", setup_slub_debug);
1194
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05001195static unsigned long kmem_cache_flags(unsigned long object_size,
Christoph Lameterba0268a2007-09-11 15:24:11 -07001196 unsigned long flags, const char *name,
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07001197 void (*ctor)(void *))
Christoph Lameter41ecc552007-05-09 02:32:44 -07001198{
1199 /*
Christoph Lametere1533622008-02-15 23:45:24 -08001200 * Enable debugging if selected on the kernel commandline.
Christoph Lameter41ecc552007-05-09 02:32:44 -07001201 */
Christoph Lametere1533622008-02-15 23:45:24 -08001202 if (slub_debug && (!slub_debug_slabs ||
David Rientjes3de47212009-07-27 18:30:35 -07001203 !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
1204 flags |= slub_debug;
Christoph Lameterba0268a2007-09-11 15:24:11 -07001205
1206 return flags;
Christoph Lameter41ecc552007-05-09 02:32:44 -07001207}
1208#else
Christoph Lameter3ec09742007-05-16 22:11:00 -07001209static inline void setup_object_debug(struct kmem_cache *s,
1210 struct page *page, void *object) {}
Christoph Lameter41ecc552007-05-09 02:32:44 -07001211
Christoph Lameter3ec09742007-05-16 22:11:00 -07001212static inline int alloc_debug_processing(struct kmem_cache *s,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03001213 struct page *page, void *object, unsigned long addr) { return 0; }
Christoph Lameter41ecc552007-05-09 02:32:44 -07001214
Christoph Lameter3ec09742007-05-16 22:11:00 -07001215static inline int free_debug_processing(struct kmem_cache *s,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03001216 struct page *page, void *object, unsigned long addr) { return 0; }
Christoph Lameter41ecc552007-05-09 02:32:44 -07001217
Christoph Lameter41ecc552007-05-09 02:32:44 -07001218static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1219 { return 1; }
1220static inline int check_object(struct kmem_cache *s, struct page *page,
Christoph Lameterf7cb1932010-09-29 07:15:01 -05001221 void *object, u8 val) { return 1; }
Christoph Lameter5cc6eee2011-06-01 12:25:50 -05001222static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1223 struct page *page) {}
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001224static inline void remove_full(struct kmem_cache *s, struct page *page) {}
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05001225static inline unsigned long kmem_cache_flags(unsigned long object_size,
Christoph Lameterba0268a2007-09-11 15:24:11 -07001226 unsigned long flags, const char *name,
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07001227 void (*ctor)(void *))
Christoph Lameterba0268a2007-09-11 15:24:11 -07001228{
1229 return flags;
1230}
Christoph Lameter41ecc552007-05-09 02:32:44 -07001231#define slub_debug 0
Christoph Lameter0f389ec2008-04-14 18:53:02 +03001232
Ingo Molnarfdaa45e2009-09-15 11:00:26 +02001233#define disable_higher_order_debug 0
1234
Christoph Lameter0f389ec2008-04-14 18:53:02 +03001235static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1236 { return 0; }
Alexander Beregalov26c02cf2009-06-11 14:08:48 +04001237static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1238 { return 0; }
Christoph Lameter205ab992008-04-14 19:11:40 +03001239static inline void inc_slabs_node(struct kmem_cache *s, int node,
1240 int objects) {}
1241static inline void dec_slabs_node(struct kmem_cache *s, int node,
1242 int objects) {}
Christoph Lameter7d550c52010-08-25 14:07:16 -05001243
1244static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
1245 { return 0; }
1246
1247static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
1248 void *object) {}
1249
1250static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
1251
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05001252#endif /* CONFIG_SLUB_DEBUG */
Christoph Lameter205ab992008-04-14 19:11:40 +03001253
Christoph Lameter81819f02007-05-06 14:49:36 -07001254/*
1255 * Slab allocation and freeing
1256 */
Christoph Lameter65c33762008-04-14 19:11:40 +03001257static inline struct page *alloc_slab_page(gfp_t flags, int node,
1258 struct kmem_cache_order_objects oo)
1259{
1260 int order = oo_order(oo);
1261
Vegard Nossumb1eeab62008-11-25 16:55:53 +01001262 flags |= __GFP_NOTRACK;
1263
Christoph Lameter2154a332010-07-09 14:07:10 -05001264 if (node == NUMA_NO_NODE)
Christoph Lameter65c33762008-04-14 19:11:40 +03001265 return alloc_pages(flags, order);
1266 else
Minchan Kim6b65aaf2010-04-14 23:58:36 +09001267 return alloc_pages_exact_node(node, flags, order);
Christoph Lameter65c33762008-04-14 19:11:40 +03001268}
1269
Christoph Lameter81819f02007-05-06 14:49:36 -07001270static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1271{
Pekka Enberg06428782008-01-07 23:20:27 -08001272 struct page *page;
Christoph Lameter834f3d12008-04-14 19:11:31 +03001273 struct kmem_cache_order_objects oo = s->oo;
Pekka Enbergba522702009-06-24 21:59:51 +03001274 gfp_t alloc_gfp;
Christoph Lameter81819f02007-05-06 14:49:36 -07001275
Christoph Lameter7e0528d2011-06-01 12:25:44 -05001276 flags &= gfp_allowed_mask;
1277
1278 if (flags & __GFP_WAIT)
1279 local_irq_enable();
1280
Christoph Lameterb7a49f02008-02-14 14:21:32 -08001281 flags |= s->allocflags;
Mel Gormane12ba742007-10-16 01:25:52 -07001282
Pekka Enbergba522702009-06-24 21:59:51 +03001283 /*
1284 * Let the initial higher-order allocation fail under memory pressure
1285 * so we fall-back to the minimum order allocation.
1286 */
1287 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1288
1289 page = alloc_slab_page(alloc_gfp, node, oo);
Christoph Lameter65c33762008-04-14 19:11:40 +03001290 if (unlikely(!page)) {
1291 oo = s->min;
1292 /*
1293 * Allocation may have failed due to fragmentation.
1294 * Try a lower order alloc if possible
1295 */
1296 page = alloc_slab_page(flags, node, oo);
Christoph Lameter81819f02007-05-06 14:49:36 -07001297
Christoph Lameter7e0528d2011-06-01 12:25:44 -05001298 if (page)
1299 stat(s, ORDER_FALLBACK);
Christoph Lameter65c33762008-04-14 19:11:40 +03001300 }
Vegard Nossum5a896d92008-04-04 00:54:48 +02001301
Christoph Lameter7e0528d2011-06-01 12:25:44 -05001302 if (flags & __GFP_WAIT)
1303 local_irq_disable();
1304
1305 if (!page)
1306 return NULL;
1307
Vegard Nossum5a896d92008-04-04 00:54:48 +02001308 if (kmemcheck_enabled
Amerigo Wang5086c389c2009-08-19 21:44:13 +03001309 && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
Vegard Nossumb1eeab62008-11-25 16:55:53 +01001310 int pages = 1 << oo_order(oo);
1311
1312 kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
1313
1314 /*
1315 * Objects from caches that have a constructor don't get
1316 * cleared when they're allocated, so we need to do it here.
1317 */
1318 if (s->ctor)
1319 kmemcheck_mark_uninitialized_pages(page, pages);
1320 else
1321 kmemcheck_mark_unallocated_pages(page, pages);
Vegard Nossum5a896d92008-04-04 00:54:48 +02001322 }
1323
Christoph Lameter834f3d12008-04-14 19:11:31 +03001324 page->objects = oo_objects(oo);
Christoph Lameter81819f02007-05-06 14:49:36 -07001325 mod_zone_page_state(page_zone(page),
1326 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1327 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
Christoph Lameter65c33762008-04-14 19:11:40 +03001328 1 << oo_order(oo));
Christoph Lameter81819f02007-05-06 14:49:36 -07001329
1330 return page;
1331}
1332
1333static void setup_object(struct kmem_cache *s, struct page *page,
1334 void *object)
1335{
Christoph Lameter3ec09742007-05-16 22:11:00 -07001336 setup_object_debug(s, page, object);
Christoph Lameter4f104932007-05-06 14:50:17 -07001337 if (unlikely(s->ctor))
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07001338 s->ctor(object);
Christoph Lameter81819f02007-05-06 14:49:36 -07001339}
1340
1341static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1342{
1343 struct page *page;
Christoph Lameter81819f02007-05-06 14:49:36 -07001344 void *start;
Christoph Lameter81819f02007-05-06 14:49:36 -07001345 void *last;
1346 void *p;
1347
Christoph Lameter6cb06222007-10-16 01:25:41 -07001348 BUG_ON(flags & GFP_SLAB_BUG_MASK);
Christoph Lameter81819f02007-05-06 14:49:36 -07001349
Christoph Lameter6cb06222007-10-16 01:25:41 -07001350 page = allocate_slab(s,
1351 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
Christoph Lameter81819f02007-05-06 14:49:36 -07001352 if (!page)
1353 goto out;
1354
Christoph Lameter205ab992008-04-14 19:11:40 +03001355 inc_slabs_node(s, page_to_nid(page), page->objects);
Christoph Lameter81819f02007-05-06 14:49:36 -07001356 page->slab = s;
Joonsoo Kimc03f94c2012-05-18 00:47:47 +09001357 __SetPageSlab(page);
Christoph Lameter81819f02007-05-06 14:49:36 -07001358
1359 start = page_address(page);
Christoph Lameter81819f02007-05-06 14:49:36 -07001360
1361 if (unlikely(s->flags & SLAB_POISON))
Christoph Lameter834f3d12008-04-14 19:11:31 +03001362 memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page));
Christoph Lameter81819f02007-05-06 14:49:36 -07001363
1364 last = start;
Christoph Lameter224a88b2008-04-14 19:11:31 +03001365 for_each_object(p, s, start, page->objects) {
Christoph Lameter81819f02007-05-06 14:49:36 -07001366 setup_object(s, page, last);
1367 set_freepointer(s, last, p);
1368 last = p;
1369 }
1370 setup_object(s, page, last);
Christoph Lametera973e9d2008-03-01 13:40:44 -08001371 set_freepointer(s, last, NULL);
Christoph Lameter81819f02007-05-06 14:49:36 -07001372
1373 page->freelist = start;
Christoph Lametere6e82ea2011-08-09 16:12:24 -05001374 page->inuse = page->objects;
Christoph Lameter8cb0a502011-06-01 12:25:46 -05001375 page->frozen = 1;
Christoph Lameter81819f02007-05-06 14:49:36 -07001376out:
Christoph Lameter81819f02007-05-06 14:49:36 -07001377 return page;
1378}
1379
1380static void __free_slab(struct kmem_cache *s, struct page *page)
1381{
Christoph Lameter834f3d12008-04-14 19:11:31 +03001382 int order = compound_order(page);
1383 int pages = 1 << order;
Christoph Lameter81819f02007-05-06 14:49:36 -07001384
Christoph Lameteraf537b02010-07-09 14:07:14 -05001385 if (kmem_cache_debug(s)) {
Christoph Lameter81819f02007-05-06 14:49:36 -07001386 void *p;
1387
1388 slab_pad_check(s, page);
Christoph Lameter224a88b2008-04-14 19:11:31 +03001389 for_each_object(p, s, page_address(page),
1390 page->objects)
Christoph Lameterf7cb1932010-09-29 07:15:01 -05001391 check_object(s, page, p, SLUB_RED_INACTIVE);
Christoph Lameter81819f02007-05-06 14:49:36 -07001392 }
1393
Vegard Nossumb1eeab62008-11-25 16:55:53 +01001394 kmemcheck_free_shadow(page, compound_order(page));
Vegard Nossum5a896d92008-04-04 00:54:48 +02001395
Christoph Lameter81819f02007-05-06 14:49:36 -07001396 mod_zone_page_state(page_zone(page),
1397 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1398 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
Pekka Enberg06428782008-01-07 23:20:27 -08001399 -pages);
Christoph Lameter81819f02007-05-06 14:49:36 -07001400
Christoph Lameter49bd5222008-04-14 18:52:18 +03001401 __ClearPageSlab(page);
1402 reset_page_mapcount(page);
Nick Piggin1eb5ac62009-05-05 19:13:44 +10001403 if (current->reclaim_state)
1404 current->reclaim_state->reclaimed_slab += pages;
Christoph Lameter834f3d12008-04-14 19:11:31 +03001405 __free_pages(page, order);
Christoph Lameter81819f02007-05-06 14:49:36 -07001406}
1407
Lai Jiangshanda9a6382011-03-10 15:22:00 +08001408#define need_reserve_slab_rcu \
1409 (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
1410
Christoph Lameter81819f02007-05-06 14:49:36 -07001411static void rcu_free_slab(struct rcu_head *h)
1412{
1413 struct page *page;
1414
Lai Jiangshanda9a6382011-03-10 15:22:00 +08001415 if (need_reserve_slab_rcu)
1416 page = virt_to_head_page(h);
1417 else
1418 page = container_of((struct list_head *)h, struct page, lru);
1419
Christoph Lameter81819f02007-05-06 14:49:36 -07001420 __free_slab(page->slab, page);
1421}
1422
1423static void free_slab(struct kmem_cache *s, struct page *page)
1424{
1425 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
Lai Jiangshanda9a6382011-03-10 15:22:00 +08001426 struct rcu_head *head;
1427
1428 if (need_reserve_slab_rcu) {
1429 int order = compound_order(page);
1430 int offset = (PAGE_SIZE << order) - s->reserved;
1431
1432 VM_BUG_ON(s->reserved != sizeof(*head));
1433 head = page_address(page) + offset;
1434 } else {
1435 /*
1436 * RCU free overloads the RCU head over the LRU
1437 */
1438 head = (void *)&page->lru;
1439 }
Christoph Lameter81819f02007-05-06 14:49:36 -07001440
1441 call_rcu(head, rcu_free_slab);
1442 } else
1443 __free_slab(s, page);
1444}
1445
1446static void discard_slab(struct kmem_cache *s, struct page *page)
1447{
Christoph Lameter205ab992008-04-14 19:11:40 +03001448 dec_slabs_node(s, page_to_nid(page), page->objects);
Christoph Lameter81819f02007-05-06 14:49:36 -07001449 free_slab(s, page);
1450}
1451
1452/*
Christoph Lameter5cc6eee2011-06-01 12:25:50 -05001453 * Management of partially allocated slabs.
1454 *
1455 * list_lock must be held.
Christoph Lameter81819f02007-05-06 14:49:36 -07001456 */
Christoph Lameter5cc6eee2011-06-01 12:25:50 -05001457static inline void add_partial(struct kmem_cache_node *n,
Christoph Lameter7c2e1322008-01-07 23:20:27 -08001458 struct page *page, int tail)
Christoph Lameter81819f02007-05-06 14:49:36 -07001459{
Christoph Lametere95eed52007-05-06 14:49:44 -07001460 n->nr_partial++;
Shaohua Li136333d2011-08-24 08:57:52 +08001461 if (tail == DEACTIVATE_TO_TAIL)
Christoph Lameter7c2e1322008-01-07 23:20:27 -08001462 list_add_tail(&page->lru, &n->partial);
1463 else
1464 list_add(&page->lru, &n->partial);
Christoph Lameter81819f02007-05-06 14:49:36 -07001465}
1466
Christoph Lameter5cc6eee2011-06-01 12:25:50 -05001467/*
1468 * list_lock must be held.
1469 */
1470static inline void remove_partial(struct kmem_cache_node *n,
Christoph Lameter62e346a2010-09-28 08:10:28 -05001471 struct page *page)
1472{
1473 list_del(&page->lru);
1474 n->nr_partial--;
1475}
1476
Christoph Lameter81819f02007-05-06 14:49:36 -07001477/*
Christoph Lameter7ced3712012-05-09 10:09:53 -05001478 * Remove slab from the partial list, freeze it and
1479 * return the pointer to the freelist.
Christoph Lameter81819f02007-05-06 14:49:36 -07001480 *
Christoph Lameter497b66f2011-08-09 16:12:26 -05001481 * Returns a list of objects or NULL if it fails.
1482 *
Christoph Lameter7ced3712012-05-09 10:09:53 -05001483 * Must hold list_lock since we modify the partial list.
Christoph Lameter81819f02007-05-06 14:49:36 -07001484 */
Christoph Lameter497b66f2011-08-09 16:12:26 -05001485static inline void *acquire_slab(struct kmem_cache *s,
Christoph Lameteracd19fd2011-08-09 16:12:25 -05001486 struct kmem_cache_node *n, struct page *page,
Christoph Lameter49e22582011-08-09 16:12:27 -05001487 int mode)
Christoph Lameter81819f02007-05-06 14:49:36 -07001488{
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001489 void *freelist;
1490 unsigned long counters;
1491 struct page new;
1492
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001493 /*
1494 * Zap the freelist and set the frozen bit.
1495 * The old freelist is the list of objects for the
1496 * per cpu allocation list.
1497 */
Christoph Lameter7ced3712012-05-09 10:09:53 -05001498 freelist = page->freelist;
1499 counters = page->counters;
1500 new.counters = counters;
Pekka Enberg23910c52012-06-04 10:14:58 +03001501 if (mode) {
Christoph Lameter7ced3712012-05-09 10:09:53 -05001502 new.inuse = page->objects;
Pekka Enberg23910c52012-06-04 10:14:58 +03001503 new.freelist = NULL;
1504 } else {
1505 new.freelist = freelist;
1506 }
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001507
Christoph Lameter7ced3712012-05-09 10:09:53 -05001508 VM_BUG_ON(new.frozen);
1509 new.frozen = 1;
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001510
Christoph Lameter7ced3712012-05-09 10:09:53 -05001511 if (!__cmpxchg_double_slab(s, page,
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001512 freelist, counters,
Joonsoo Kim02d76332012-05-17 00:13:02 +09001513 new.freelist, new.counters,
Christoph Lameter7ced3712012-05-09 10:09:53 -05001514 "acquire_slab"))
Christoph Lameter7ced3712012-05-09 10:09:53 -05001515 return NULL;
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001516
1517 remove_partial(n, page);
Christoph Lameter7ced3712012-05-09 10:09:53 -05001518 WARN_ON(!freelist);
Christoph Lameter49e22582011-08-09 16:12:27 -05001519 return freelist;
Christoph Lameter81819f02007-05-06 14:49:36 -07001520}
1521
Christoph Lameter49e22582011-08-09 16:12:27 -05001522static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1523
Christoph Lameter81819f02007-05-06 14:49:36 -07001524/*
Christoph Lameter672bba32007-05-09 02:32:39 -07001525 * Try to allocate a partial slab from a specific node.
Christoph Lameter81819f02007-05-06 14:49:36 -07001526 */
Christoph Lameter497b66f2011-08-09 16:12:26 -05001527static void *get_partial_node(struct kmem_cache *s,
Christoph Lameteracd19fd2011-08-09 16:12:25 -05001528 struct kmem_cache_node *n, struct kmem_cache_cpu *c)
Christoph Lameter81819f02007-05-06 14:49:36 -07001529{
Christoph Lameter49e22582011-08-09 16:12:27 -05001530 struct page *page, *page2;
1531 void *object = NULL;
Christoph Lameter81819f02007-05-06 14:49:36 -07001532
1533 /*
1534 * Racy check. If we mistakenly see no partial slabs then we
1535 * just allocate an empty slab. If we mistakenly try to get a
Christoph Lameter672bba32007-05-09 02:32:39 -07001536 * partial slab and there is none available then get_partials()
1537 * will return NULL.
Christoph Lameter81819f02007-05-06 14:49:36 -07001538 */
1539 if (!n || !n->nr_partial)
1540 return NULL;
1541
1542 spin_lock(&n->list_lock);
Christoph Lameter49e22582011-08-09 16:12:27 -05001543 list_for_each_entry_safe(page, page2, &n->partial, lru) {
Alex,Shi12d79632011-09-07 10:26:36 +08001544 void *t = acquire_slab(s, n, page, object == NULL);
Christoph Lameter49e22582011-08-09 16:12:27 -05001545 int available;
1546
1547 if (!t)
1548 break;
1549
Alex,Shi12d79632011-09-07 10:26:36 +08001550 if (!object) {
Christoph Lameter49e22582011-08-09 16:12:27 -05001551 c->page = page;
Christoph Lameter49e22582011-08-09 16:12:27 -05001552 stat(s, ALLOC_FROM_PARTIAL);
Christoph Lameter49e22582011-08-09 16:12:27 -05001553 object = t;
1554 available = page->objects - page->inuse;
1555 } else {
Christoph Lameter49e22582011-08-09 16:12:27 -05001556 available = put_cpu_partial(s, page, 0);
Alex Shi8028dce2012-02-03 23:34:56 +08001557 stat(s, CPU_PARTIAL_NODE);
Christoph Lameter49e22582011-08-09 16:12:27 -05001558 }
1559 if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
1560 break;
1561
Christoph Lameter497b66f2011-08-09 16:12:26 -05001562 }
Christoph Lameter81819f02007-05-06 14:49:36 -07001563 spin_unlock(&n->list_lock);
Christoph Lameter497b66f2011-08-09 16:12:26 -05001564 return object;
Christoph Lameter81819f02007-05-06 14:49:36 -07001565}
1566
1567/*
Christoph Lameter672bba32007-05-09 02:32:39 -07001568 * Get a page from somewhere. Search in increasing NUMA distances.
Christoph Lameter81819f02007-05-06 14:49:36 -07001569 */
Joonsoo Kimde3ec032012-01-27 00:12:23 -08001570static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
Christoph Lameteracd19fd2011-08-09 16:12:25 -05001571 struct kmem_cache_cpu *c)
Christoph Lameter81819f02007-05-06 14:49:36 -07001572{
1573#ifdef CONFIG_NUMA
1574 struct zonelist *zonelist;
Mel Gormandd1a2392008-04-28 02:12:17 -07001575 struct zoneref *z;
Mel Gorman54a6eb52008-04-28 02:12:16 -07001576 struct zone *zone;
1577 enum zone_type high_zoneidx = gfp_zone(flags);
Christoph Lameter497b66f2011-08-09 16:12:26 -05001578 void *object;
Mel Gormancc9a6c82012-03-21 16:34:11 -07001579 unsigned int cpuset_mems_cookie;
Christoph Lameter81819f02007-05-06 14:49:36 -07001580
1581 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07001582 * The defrag ratio allows a configuration of the tradeoffs between
1583 * inter node defragmentation and node local allocations. A lower
1584 * defrag_ratio increases the tendency to do local allocations
1585 * instead of attempting to obtain partial slabs from other nodes.
Christoph Lameter81819f02007-05-06 14:49:36 -07001586 *
Christoph Lameter672bba32007-05-09 02:32:39 -07001587 * If the defrag_ratio is set to 0 then kmalloc() always
1588 * returns node local objects. If the ratio is higher then kmalloc()
1589 * may return off node objects because partial slabs are obtained
1590 * from other nodes and filled up.
Christoph Lameter81819f02007-05-06 14:49:36 -07001591 *
Christoph Lameter6446faa2008-02-15 23:45:26 -08001592 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
Christoph Lameter672bba32007-05-09 02:32:39 -07001593 * defrag_ratio = 1000) then every (well almost) allocation will
1594 * first attempt to defrag slab caches on other nodes. This means
1595 * scanning over all nodes to look for partial slabs which may be
1596 * expensive if we do it every time we are trying to find a slab
1597 * with available objects.
Christoph Lameter81819f02007-05-06 14:49:36 -07001598 */
Christoph Lameter98246012008-01-07 23:20:26 -08001599 if (!s->remote_node_defrag_ratio ||
1600 get_cycles() % 1024 > s->remote_node_defrag_ratio)
Christoph Lameter81819f02007-05-06 14:49:36 -07001601 return NULL;
1602
Mel Gormancc9a6c82012-03-21 16:34:11 -07001603 do {
1604 cpuset_mems_cookie = get_mems_allowed();
Andi Kleene7b691b2012-06-09 02:40:03 -07001605 zonelist = node_zonelist(slab_node(), flags);
Mel Gormancc9a6c82012-03-21 16:34:11 -07001606 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1607 struct kmem_cache_node *n;
Christoph Lameter81819f02007-05-06 14:49:36 -07001608
Mel Gormancc9a6c82012-03-21 16:34:11 -07001609 n = get_node(s, zone_to_nid(zone));
Christoph Lameter81819f02007-05-06 14:49:36 -07001610
Mel Gormancc9a6c82012-03-21 16:34:11 -07001611 if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
1612 n->nr_partial > s->min_partial) {
1613 object = get_partial_node(s, n, c);
1614 if (object) {
1615 /*
1616 * Return the object even if
1617 * put_mems_allowed indicated that
1618 * the cpuset mems_allowed was
1619 * updated in parallel. It's a
1620 * harmless race between the alloc
1621 * and the cpuset update.
1622 */
1623 put_mems_allowed(cpuset_mems_cookie);
1624 return object;
1625 }
Miao Xiec0ff7452010-05-24 14:32:08 -07001626 }
Christoph Lameter81819f02007-05-06 14:49:36 -07001627 }
Mel Gormancc9a6c82012-03-21 16:34:11 -07001628 } while (!put_mems_allowed(cpuset_mems_cookie));
Christoph Lameter81819f02007-05-06 14:49:36 -07001629#endif
1630 return NULL;
1631}
1632
1633/*
1634 * Get a partial page, lock it and return it.
1635 */
Christoph Lameter497b66f2011-08-09 16:12:26 -05001636static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
Christoph Lameteracd19fd2011-08-09 16:12:25 -05001637 struct kmem_cache_cpu *c)
Christoph Lameter81819f02007-05-06 14:49:36 -07001638{
Christoph Lameter497b66f2011-08-09 16:12:26 -05001639 void *object;
Christoph Lameter2154a332010-07-09 14:07:10 -05001640 int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
Christoph Lameter81819f02007-05-06 14:49:36 -07001641
Christoph Lameter497b66f2011-08-09 16:12:26 -05001642 object = get_partial_node(s, get_node(s, searchnode), c);
1643 if (object || node != NUMA_NO_NODE)
1644 return object;
Christoph Lameter81819f02007-05-06 14:49:36 -07001645
Christoph Lameteracd19fd2011-08-09 16:12:25 -05001646 return get_any_partial(s, flags, c);
Christoph Lameter81819f02007-05-06 14:49:36 -07001647}
1648
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06001649#ifdef CONFIG_PREEMPT
1650/*
1651 * Calculate the next globally unique transaction for disambiguiation
1652 * during cmpxchg. The transactions start with the cpu number and are then
1653 * incremented by CONFIG_NR_CPUS.
1654 */
1655#define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS)
1656#else
1657/*
1658 * No preemption supported therefore also no need to check for
1659 * different cpus.
1660 */
1661#define TID_STEP 1
1662#endif
1663
1664static inline unsigned long next_tid(unsigned long tid)
1665{
1666 return tid + TID_STEP;
1667}
1668
1669static inline unsigned int tid_to_cpu(unsigned long tid)
1670{
1671 return tid % TID_STEP;
1672}
1673
1674static inline unsigned long tid_to_event(unsigned long tid)
1675{
1676 return tid / TID_STEP;
1677}
1678
1679static inline unsigned int init_tid(int cpu)
1680{
1681 return cpu;
1682}
1683
1684static inline void note_cmpxchg_failure(const char *n,
1685 const struct kmem_cache *s, unsigned long tid)
1686{
1687#ifdef SLUB_DEBUG_CMPXCHG
1688 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
1689
1690 printk(KERN_INFO "%s %s: cmpxchg redo ", n, s->name);
1691
1692#ifdef CONFIG_PREEMPT
1693 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
1694 printk("due to cpu change %d -> %d\n",
1695 tid_to_cpu(tid), tid_to_cpu(actual_tid));
1696 else
1697#endif
1698 if (tid_to_event(tid) != tid_to_event(actual_tid))
1699 printk("due to cpu running other code. Event %ld->%ld\n",
1700 tid_to_event(tid), tid_to_event(actual_tid));
1701 else
1702 printk("for unknown reason: actual=%lx was=%lx target=%lx\n",
1703 actual_tid, tid, next_tid(tid));
1704#endif
Christoph Lameter4fdccdf2011-03-22 13:35:00 -05001705 stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06001706}
1707
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06001708void init_kmem_cache_cpus(struct kmem_cache *s)
1709{
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06001710 int cpu;
1711
1712 for_each_possible_cpu(cpu)
1713 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06001714}
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001715
1716/*
1717 * Remove the cpu slab
1718 */
Christoph Lameterc17dda42012-05-09 10:09:57 -05001719static void deactivate_slab(struct kmem_cache *s, struct page *page, void *freelist)
Christoph Lameter81819f02007-05-06 14:49:36 -07001720{
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001721 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001722 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1723 int lock = 0;
1724 enum slab_modes l = M_NONE, m = M_NONE;
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001725 void *nextfree;
Shaohua Li136333d2011-08-24 08:57:52 +08001726 int tail = DEACTIVATE_TO_HEAD;
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001727 struct page new;
1728 struct page old;
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08001729
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001730 if (page->freelist) {
Christoph Lameter84e554e62009-12-18 16:26:23 -06001731 stat(s, DEACTIVATE_REMOTE_FREES);
Shaohua Li136333d2011-08-24 08:57:52 +08001732 tail = DEACTIVATE_TO_TAIL;
Christoph Lameter894b8782007-05-10 03:15:16 -07001733 }
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001734
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001735 /*
1736 * Stage one: Free all available per cpu objects back
1737 * to the page freelist while it is still frozen. Leave the
1738 * last one.
1739 *
1740 * There is no need to take the list->lock because the page
1741 * is still frozen.
1742 */
1743 while (freelist && (nextfree = get_freepointer(s, freelist))) {
1744 void *prior;
1745 unsigned long counters;
1746
1747 do {
1748 prior = page->freelist;
1749 counters = page->counters;
1750 set_freepointer(s, freelist, prior);
1751 new.counters = counters;
1752 new.inuse--;
1753 VM_BUG_ON(!new.frozen);
1754
Christoph Lameter1d071712011-07-14 12:49:12 -05001755 } while (!__cmpxchg_double_slab(s, page,
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001756 prior, counters,
1757 freelist, new.counters,
1758 "drain percpu freelist"));
1759
1760 freelist = nextfree;
1761 }
1762
1763 /*
1764 * Stage two: Ensure that the page is unfrozen while the
1765 * list presence reflects the actual number of objects
1766 * during unfreeze.
1767 *
1768 * We setup the list membership and then perform a cmpxchg
1769 * with the count. If there is a mismatch then the page
1770 * is not unfrozen but the page is on the wrong list.
1771 *
1772 * Then we restart the process which may have to remove
1773 * the page from the list that we just put it on again
1774 * because the number of objects in the slab may have
1775 * changed.
1776 */
1777redo:
1778
1779 old.freelist = page->freelist;
1780 old.counters = page->counters;
1781 VM_BUG_ON(!old.frozen);
1782
1783 /* Determine target state of the slab */
1784 new.counters = old.counters;
1785 if (freelist) {
1786 new.inuse--;
1787 set_freepointer(s, freelist, old.freelist);
1788 new.freelist = freelist;
1789 } else
1790 new.freelist = old.freelist;
1791
1792 new.frozen = 0;
1793
Christoph Lameter81107182011-08-09 13:01:32 -05001794 if (!new.inuse && n->nr_partial > s->min_partial)
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001795 m = M_FREE;
1796 else if (new.freelist) {
1797 m = M_PARTIAL;
1798 if (!lock) {
1799 lock = 1;
1800 /*
1801 * Taking the spinlock removes the possiblity
1802 * that acquire_slab() will see a slab page that
1803 * is frozen
1804 */
1805 spin_lock(&n->list_lock);
1806 }
1807 } else {
1808 m = M_FULL;
1809 if (kmem_cache_debug(s) && !lock) {
1810 lock = 1;
1811 /*
1812 * This also ensures that the scanning of full
1813 * slabs from diagnostic functions will not see
1814 * any frozen slabs.
1815 */
1816 spin_lock(&n->list_lock);
1817 }
1818 }
1819
1820 if (l != m) {
1821
1822 if (l == M_PARTIAL)
1823
1824 remove_partial(n, page);
1825
1826 else if (l == M_FULL)
1827
1828 remove_full(s, page);
1829
1830 if (m == M_PARTIAL) {
1831
1832 add_partial(n, page, tail);
Shaohua Li136333d2011-08-24 08:57:52 +08001833 stat(s, tail);
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001834
1835 } else if (m == M_FULL) {
1836
1837 stat(s, DEACTIVATE_FULL);
1838 add_full(s, n, page);
1839
1840 }
1841 }
1842
1843 l = m;
Christoph Lameter1d071712011-07-14 12:49:12 -05001844 if (!__cmpxchg_double_slab(s, page,
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001845 old.freelist, old.counters,
1846 new.freelist, new.counters,
1847 "unfreezing slab"))
1848 goto redo;
1849
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001850 if (lock)
1851 spin_unlock(&n->list_lock);
1852
1853 if (m == M_FREE) {
1854 stat(s, DEACTIVATE_EMPTY);
1855 discard_slab(s, page);
1856 stat(s, FREE_SLAB);
1857 }
Christoph Lameter81819f02007-05-06 14:49:36 -07001858}
1859
Joonsoo Kimd24ac772012-05-18 22:01:17 +09001860/*
1861 * Unfreeze all the cpu partial slabs.
1862 *
1863 * This function must be called with interrupt disabled.
1864 */
Christoph Lameter49e22582011-08-09 16:12:27 -05001865static void unfreeze_partials(struct kmem_cache *s)
1866{
Joonsoo Kim43d77862012-06-09 02:23:16 +09001867 struct kmem_cache_node *n = NULL, *n2 = NULL;
Christoph Lameter49e22582011-08-09 16:12:27 -05001868 struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
Shaohua Li9ada1932011-11-14 13:34:13 +08001869 struct page *page, *discard_page = NULL;
Christoph Lameter49e22582011-08-09 16:12:27 -05001870
1871 while ((page = c->partial)) {
Christoph Lameter49e22582011-08-09 16:12:27 -05001872 struct page new;
1873 struct page old;
1874
1875 c->partial = page->next;
Joonsoo Kim43d77862012-06-09 02:23:16 +09001876
1877 n2 = get_node(s, page_to_nid(page));
1878 if (n != n2) {
1879 if (n)
1880 spin_unlock(&n->list_lock);
1881
1882 n = n2;
1883 spin_lock(&n->list_lock);
1884 }
Christoph Lameter49e22582011-08-09 16:12:27 -05001885
1886 do {
1887
1888 old.freelist = page->freelist;
1889 old.counters = page->counters;
1890 VM_BUG_ON(!old.frozen);
1891
1892 new.counters = old.counters;
1893 new.freelist = old.freelist;
1894
1895 new.frozen = 0;
1896
Joonsoo Kimd24ac772012-05-18 22:01:17 +09001897 } while (!__cmpxchg_double_slab(s, page,
Christoph Lameter49e22582011-08-09 16:12:27 -05001898 old.freelist, old.counters,
1899 new.freelist, new.counters,
1900 "unfreezing slab"));
1901
Joonsoo Kim43d77862012-06-09 02:23:16 +09001902 if (unlikely(!new.inuse && n->nr_partial > s->min_partial)) {
Shaohua Li9ada1932011-11-14 13:34:13 +08001903 page->next = discard_page;
1904 discard_page = page;
Joonsoo Kim43d77862012-06-09 02:23:16 +09001905 } else {
1906 add_partial(n, page, DEACTIVATE_TO_TAIL);
1907 stat(s, FREE_ADD_PARTIAL);
Christoph Lameter49e22582011-08-09 16:12:27 -05001908 }
1909 }
1910
1911 if (n)
1912 spin_unlock(&n->list_lock);
Shaohua Li9ada1932011-11-14 13:34:13 +08001913
1914 while (discard_page) {
1915 page = discard_page;
1916 discard_page = discard_page->next;
1917
1918 stat(s, DEACTIVATE_EMPTY);
1919 discard_slab(s, page);
1920 stat(s, FREE_SLAB);
1921 }
Christoph Lameter49e22582011-08-09 16:12:27 -05001922}
1923
1924/*
1925 * Put a page that was just frozen (in __slab_free) into a partial page
1926 * slot if available. This is done without interrupts disabled and without
1927 * preemption disabled. The cmpxchg is racy and may put the partial page
1928 * onto a random cpus partial slot.
1929 *
1930 * If we did not find a slot then simply move all the partials to the
1931 * per node partial list.
1932 */
1933int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
1934{
1935 struct page *oldpage;
1936 int pages;
1937 int pobjects;
1938
1939 do {
1940 pages = 0;
1941 pobjects = 0;
1942 oldpage = this_cpu_read(s->cpu_slab->partial);
1943
1944 if (oldpage) {
1945 pobjects = oldpage->pobjects;
1946 pages = oldpage->pages;
1947 if (drain && pobjects > s->cpu_partial) {
1948 unsigned long flags;
1949 /*
1950 * partial array is full. Move the existing
1951 * set to the per node partial list.
1952 */
1953 local_irq_save(flags);
1954 unfreeze_partials(s);
1955 local_irq_restore(flags);
1956 pobjects = 0;
1957 pages = 0;
Alex Shi8028dce2012-02-03 23:34:56 +08001958 stat(s, CPU_PARTIAL_DRAIN);
Christoph Lameter49e22582011-08-09 16:12:27 -05001959 }
1960 }
1961
1962 pages++;
1963 pobjects += page->objects - page->inuse;
1964
1965 page->pages = pages;
1966 page->pobjects = pobjects;
1967 page->next = oldpage;
1968
Christoph Lameter933393f2011-12-22 11:58:51 -06001969 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
Christoph Lameter49e22582011-08-09 16:12:27 -05001970 return pobjects;
1971}
1972
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001973static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
Christoph Lameter81819f02007-05-06 14:49:36 -07001974{
Christoph Lameter84e554e62009-12-18 16:26:23 -06001975 stat(s, CPUSLAB_FLUSH);
Christoph Lameterc17dda42012-05-09 10:09:57 -05001976 deactivate_slab(s, c->page, c->freelist);
1977
1978 c->tid = next_tid(c->tid);
1979 c->page = NULL;
1980 c->freelist = NULL;
Christoph Lameter81819f02007-05-06 14:49:36 -07001981}
1982
1983/*
1984 * Flush cpu slab.
Christoph Lameter6446faa2008-02-15 23:45:26 -08001985 *
Christoph Lameter81819f02007-05-06 14:49:36 -07001986 * Called from IPI handler with interrupts disabled.
1987 */
Christoph Lameter0c710012007-07-17 04:03:24 -07001988static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
Christoph Lameter81819f02007-05-06 14:49:36 -07001989{
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06001990 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
Christoph Lameter81819f02007-05-06 14:49:36 -07001991
Christoph Lameter49e22582011-08-09 16:12:27 -05001992 if (likely(c)) {
1993 if (c->page)
1994 flush_slab(s, c);
1995
1996 unfreeze_partials(s);
1997 }
Christoph Lameter81819f02007-05-06 14:49:36 -07001998}
1999
2000static void flush_cpu_slab(void *d)
2001{
2002 struct kmem_cache *s = d;
Christoph Lameter81819f02007-05-06 14:49:36 -07002003
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002004 __flush_cpu_slab(s, smp_processor_id());
Christoph Lameter81819f02007-05-06 14:49:36 -07002005}
2006
Gilad Ben-Yossefa8364d52012-03-28 14:42:44 -07002007static bool has_cpu_slab(int cpu, void *info)
2008{
2009 struct kmem_cache *s = info;
2010 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2011
majianpeng02e1a9c2012-05-17 17:03:26 -07002012 return c->page || c->partial;
Gilad Ben-Yossefa8364d52012-03-28 14:42:44 -07002013}
2014
Christoph Lameter81819f02007-05-06 14:49:36 -07002015static void flush_all(struct kmem_cache *s)
2016{
Gilad Ben-Yossefa8364d52012-03-28 14:42:44 -07002017 on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
Christoph Lameter81819f02007-05-06 14:49:36 -07002018}
2019
2020/*
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002021 * Check if the objects in a per cpu structure fit numa
2022 * locality expectations.
2023 */
Christoph Lameter57d437d2012-05-09 10:09:59 -05002024static inline int node_match(struct page *page, int node)
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002025{
2026#ifdef CONFIG_NUMA
Christoph Lameter57d437d2012-05-09 10:09:59 -05002027 if (node != NUMA_NO_NODE && page_to_nid(page) != node)
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002028 return 0;
2029#endif
2030 return 1;
2031}
2032
Pekka Enberg781b2ba2009-06-10 18:50:32 +03002033static int count_free(struct page *page)
2034{
2035 return page->objects - page->inuse;
2036}
2037
2038static unsigned long count_partial(struct kmem_cache_node *n,
2039 int (*get_count)(struct page *))
2040{
2041 unsigned long flags;
2042 unsigned long x = 0;
2043 struct page *page;
2044
2045 spin_lock_irqsave(&n->list_lock, flags);
2046 list_for_each_entry(page, &n->partial, lru)
2047 x += get_count(page);
2048 spin_unlock_irqrestore(&n->list_lock, flags);
2049 return x;
2050}
2051
Alexander Beregalov26c02cf2009-06-11 14:08:48 +04002052static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2053{
2054#ifdef CONFIG_SLUB_DEBUG
2055 return atomic_long_read(&n->total_objects);
2056#else
2057 return 0;
2058#endif
2059}
2060
Pekka Enberg781b2ba2009-06-10 18:50:32 +03002061static noinline void
2062slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2063{
2064 int node;
2065
2066 printk(KERN_WARNING
2067 "SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
2068 nid, gfpflags);
2069 printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, "
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05002070 "default order: %d, min order: %d\n", s->name, s->object_size,
Pekka Enberg781b2ba2009-06-10 18:50:32 +03002071 s->size, oo_order(s->oo), oo_order(s->min));
2072
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05002073 if (oo_order(s->min) > get_order(s->object_size))
David Rientjesfa5ec8a2009-07-07 00:14:14 -07002074 printk(KERN_WARNING " %s debugging increased min order, use "
2075 "slub_debug=O to disable.\n", s->name);
2076
Pekka Enberg781b2ba2009-06-10 18:50:32 +03002077 for_each_online_node(node) {
2078 struct kmem_cache_node *n = get_node(s, node);
2079 unsigned long nr_slabs;
2080 unsigned long nr_objs;
2081 unsigned long nr_free;
2082
2083 if (!n)
2084 continue;
2085
Alexander Beregalov26c02cf2009-06-11 14:08:48 +04002086 nr_free = count_partial(n, count_free);
2087 nr_slabs = node_nr_slabs(n);
2088 nr_objs = node_nr_objs(n);
Pekka Enberg781b2ba2009-06-10 18:50:32 +03002089
2090 printk(KERN_WARNING
2091 " node %d: slabs: %ld, objs: %ld, free: %ld\n",
2092 node, nr_slabs, nr_objs, nr_free);
2093 }
2094}
2095
Christoph Lameter497b66f2011-08-09 16:12:26 -05002096static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2097 int node, struct kmem_cache_cpu **pc)
2098{
Christoph Lameter6faa6832012-05-09 10:09:51 -05002099 void *freelist;
Christoph Lameter188fd062012-05-09 10:09:55 -05002100 struct kmem_cache_cpu *c = *pc;
2101 struct page *page;
Christoph Lameter497b66f2011-08-09 16:12:26 -05002102
Christoph Lameter188fd062012-05-09 10:09:55 -05002103 freelist = get_partial(s, flags, node, c);
2104
2105 if (freelist)
2106 return freelist;
2107
2108 page = new_slab(s, flags, node);
Christoph Lameter497b66f2011-08-09 16:12:26 -05002109 if (page) {
2110 c = __this_cpu_ptr(s->cpu_slab);
2111 if (c->page)
2112 flush_slab(s, c);
2113
2114 /*
2115 * No other reference to the page yet so we can
2116 * muck around with it freely without cmpxchg
2117 */
Christoph Lameter6faa6832012-05-09 10:09:51 -05002118 freelist = page->freelist;
Christoph Lameter497b66f2011-08-09 16:12:26 -05002119 page->freelist = NULL;
2120
2121 stat(s, ALLOC_SLAB);
Christoph Lameter497b66f2011-08-09 16:12:26 -05002122 c->page = page;
2123 *pc = c;
2124 } else
Christoph Lameter6faa6832012-05-09 10:09:51 -05002125 freelist = NULL;
Christoph Lameter497b66f2011-08-09 16:12:26 -05002126
Christoph Lameter6faa6832012-05-09 10:09:51 -05002127 return freelist;
Christoph Lameter497b66f2011-08-09 16:12:26 -05002128}
2129
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002130/*
Christoph Lameter213eeb92011-11-11 14:07:14 -06002131 * Check the page->freelist of a page and either transfer the freelist to the per cpu freelist
2132 * or deactivate the page.
2133 *
2134 * The page is still frozen if the return value is not NULL.
2135 *
2136 * If this function returns NULL then the page has been unfrozen.
Joonsoo Kimd24ac772012-05-18 22:01:17 +09002137 *
2138 * This function must be called with interrupt disabled.
Christoph Lameter213eeb92011-11-11 14:07:14 -06002139 */
2140static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2141{
2142 struct page new;
2143 unsigned long counters;
2144 void *freelist;
2145
2146 do {
2147 freelist = page->freelist;
2148 counters = page->counters;
Christoph Lameter6faa6832012-05-09 10:09:51 -05002149
Christoph Lameter213eeb92011-11-11 14:07:14 -06002150 new.counters = counters;
2151 VM_BUG_ON(!new.frozen);
2152
2153 new.inuse = page->objects;
2154 new.frozen = freelist != NULL;
2155
Joonsoo Kimd24ac772012-05-18 22:01:17 +09002156 } while (!__cmpxchg_double_slab(s, page,
Christoph Lameter213eeb92011-11-11 14:07:14 -06002157 freelist, counters,
2158 NULL, new.counters,
2159 "get_freelist"));
2160
2161 return freelist;
2162}
2163
2164/*
Christoph Lameter894b8782007-05-10 03:15:16 -07002165 * Slow path. The lockless freelist is empty or we need to perform
2166 * debugging duties.
Christoph Lameter81819f02007-05-06 14:49:36 -07002167 *
Christoph Lameter894b8782007-05-10 03:15:16 -07002168 * Processing is still very fast if new objects have been freed to the
2169 * regular freelist. In that case we simply take over the regular freelist
2170 * as the lockless freelist and zap the regular freelist.
Christoph Lameter81819f02007-05-06 14:49:36 -07002171 *
Christoph Lameter894b8782007-05-10 03:15:16 -07002172 * If that is not working then we fall back to the partial lists. We take the
2173 * first element of the freelist as the object to allocate now and move the
2174 * rest of the freelist to the lockless freelist.
2175 *
2176 * And if we were unable to get a new slab from the partial slab lists then
Christoph Lameter6446faa2008-02-15 23:45:26 -08002177 * we need to allocate a new slab. This is the slowest path since it involves
2178 * a call to the page allocator and the setup of a new slab.
Christoph Lameter81819f02007-05-06 14:49:36 -07002179 */
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03002180static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2181 unsigned long addr, struct kmem_cache_cpu *c)
Christoph Lameter81819f02007-05-06 14:49:36 -07002182{
Christoph Lameter6faa6832012-05-09 10:09:51 -05002183 void *freelist;
Christoph Lameterf6e7def2012-05-09 10:09:58 -05002184 struct page *page;
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002185 unsigned long flags;
2186
2187 local_irq_save(flags);
2188#ifdef CONFIG_PREEMPT
2189 /*
2190 * We may have been preempted and rescheduled on a different
2191 * cpu before disabling interrupts. Need to reload cpu area
2192 * pointer.
2193 */
2194 c = this_cpu_ptr(s->cpu_slab);
2195#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07002196
Christoph Lameterf6e7def2012-05-09 10:09:58 -05002197 page = c->page;
2198 if (!page)
Christoph Lameter81819f02007-05-06 14:49:36 -07002199 goto new_slab;
Christoph Lameter49e22582011-08-09 16:12:27 -05002200redo:
Christoph Lameter6faa6832012-05-09 10:09:51 -05002201
Christoph Lameter57d437d2012-05-09 10:09:59 -05002202 if (unlikely(!node_match(page, node))) {
Christoph Lametere36a2652011-06-01 12:25:57 -05002203 stat(s, ALLOC_NODE_MISMATCH);
Christoph Lameterf6e7def2012-05-09 10:09:58 -05002204 deactivate_slab(s, page, c->freelist);
Christoph Lameterc17dda42012-05-09 10:09:57 -05002205 c->page = NULL;
2206 c->freelist = NULL;
Christoph Lameterfc59c052011-06-01 12:25:56 -05002207 goto new_slab;
2208 }
Christoph Lameter6446faa2008-02-15 23:45:26 -08002209
Eric Dumazet73736e02011-12-13 04:57:06 +01002210 /* must check again c->freelist in case of cpu migration or IRQ */
Christoph Lameter6faa6832012-05-09 10:09:51 -05002211 freelist = c->freelist;
2212 if (freelist)
Eric Dumazet73736e02011-12-13 04:57:06 +01002213 goto load_freelist;
2214
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002215 stat(s, ALLOC_SLOWPATH);
2216
Christoph Lameterf6e7def2012-05-09 10:09:58 -05002217 freelist = get_freelist(s, page);
Christoph Lameter6446faa2008-02-15 23:45:26 -08002218
Christoph Lameter6faa6832012-05-09 10:09:51 -05002219 if (!freelist) {
Christoph Lameter03e404a2011-06-01 12:25:58 -05002220 c->page = NULL;
2221 stat(s, DEACTIVATE_BYPASS);
Christoph Lameterfc59c052011-06-01 12:25:56 -05002222 goto new_slab;
Christoph Lameter03e404a2011-06-01 12:25:58 -05002223 }
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002224
Christoph Lameter81819f02007-05-06 14:49:36 -07002225 stat(s, ALLOC_REFILL);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08002226
Christoph Lameter894b8782007-05-10 03:15:16 -07002227load_freelist:
Christoph Lameter507effe2012-05-09 10:09:52 -05002228 /*
2229 * freelist is pointing to the list of objects to be used.
2230 * page is pointing to the page from which the objects are obtained.
2231 * That page must be frozen for per cpu allocations to work.
2232 */
2233 VM_BUG_ON(!c->page->frozen);
Christoph Lameter6faa6832012-05-09 10:09:51 -05002234 c->freelist = get_freepointer(s, freelist);
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002235 c->tid = next_tid(c->tid);
2236 local_irq_restore(flags);
Christoph Lameter6faa6832012-05-09 10:09:51 -05002237 return freelist;
Christoph Lameter81819f02007-05-06 14:49:36 -07002238
Christoph Lameter81819f02007-05-06 14:49:36 -07002239new_slab:
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002240
Christoph Lameter49e22582011-08-09 16:12:27 -05002241 if (c->partial) {
Christoph Lameterf6e7def2012-05-09 10:09:58 -05002242 page = c->page = c->partial;
2243 c->partial = page->next;
Christoph Lameter49e22582011-08-09 16:12:27 -05002244 stat(s, CPU_PARTIAL_ALLOC);
2245 c->freelist = NULL;
2246 goto redo;
Christoph Lameter81819f02007-05-06 14:49:36 -07002247 }
2248
Christoph Lameter188fd062012-05-09 10:09:55 -05002249 freelist = new_slab_objects(s, gfpflags, node, &c);
Christoph Lameterb811c202007-10-16 23:25:51 -07002250
Christoph Lameterf46974362012-05-09 10:09:54 -05002251 if (unlikely(!freelist)) {
2252 if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
2253 slab_out_of_memory(s, gfpflags, node);
Christoph Lameter01ad8a72011-04-15 14:48:14 -05002254
Christoph Lameterf46974362012-05-09 10:09:54 -05002255 local_irq_restore(flags);
2256 return NULL;
Christoph Lameter81819f02007-05-06 14:49:36 -07002257 }
Christoph Lameter894b8782007-05-10 03:15:16 -07002258
Christoph Lameterf6e7def2012-05-09 10:09:58 -05002259 page = c->page;
Christoph Lameter497b66f2011-08-09 16:12:26 -05002260 if (likely(!kmem_cache_debug(s)))
Christoph Lameter81819f02007-05-06 14:49:36 -07002261 goto load_freelist;
Christoph Lameter894b8782007-05-10 03:15:16 -07002262
Christoph Lameter497b66f2011-08-09 16:12:26 -05002263 /* Only entered in the debug case */
Christoph Lameterf6e7def2012-05-09 10:09:58 -05002264 if (!alloc_debug_processing(s, page, freelist, addr))
Christoph Lameter497b66f2011-08-09 16:12:26 -05002265 goto new_slab; /* Slab failed checks. Next slab needed */
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002266
Christoph Lameterf6e7def2012-05-09 10:09:58 -05002267 deactivate_slab(s, page, get_freepointer(s, freelist));
Christoph Lameterc17dda42012-05-09 10:09:57 -05002268 c->page = NULL;
2269 c->freelist = NULL;
Christoph Lametera71ae472011-05-25 09:47:43 -05002270 local_irq_restore(flags);
Christoph Lameter6faa6832012-05-09 10:09:51 -05002271 return freelist;
Christoph Lameter894b8782007-05-10 03:15:16 -07002272}
2273
2274/*
2275 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2276 * have the fastpath folded into their functions. So no function call
2277 * overhead for requests that can be satisfied on the fastpath.
2278 *
2279 * The fastpath works by first checking if the lockless freelist can be used.
2280 * If not then __slab_alloc is called for slow processing.
2281 *
2282 * Otherwise we can simply pick the next object from the lockless free list.
2283 */
Pekka Enberg06428782008-01-07 23:20:27 -08002284static __always_inline void *slab_alloc(struct kmem_cache *s,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03002285 gfp_t gfpflags, int node, unsigned long addr)
Christoph Lameter894b8782007-05-10 03:15:16 -07002286{
Christoph Lameter894b8782007-05-10 03:15:16 -07002287 void **object;
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002288 struct kmem_cache_cpu *c;
Christoph Lameter57d437d2012-05-09 10:09:59 -05002289 struct page *page;
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002290 unsigned long tid;
Christoph Lameter1f842602008-01-07 23:20:30 -08002291
Christoph Lameterc016b0b2010-08-20 12:37:16 -05002292 if (slab_pre_alloc_hook(s, gfpflags))
Akinobu Mita773ff602008-12-23 19:37:01 +09002293 return NULL;
2294
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002295redo:
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002296
2297 /*
2298 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
2299 * enabled. We may switch back and forth between cpus while
2300 * reading from one cpu area. That does not matter as long
2301 * as we end up on the original cpu again when doing the cmpxchg.
2302 */
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06002303 c = __this_cpu_ptr(s->cpu_slab);
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002304
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002305 /*
2306 * The transaction ids are globally unique per cpu and per operation on
2307 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
2308 * occurs on the right processor and that there was no operation on the
2309 * linked list in between.
2310 */
2311 tid = c->tid;
2312 barrier();
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002313
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06002314 object = c->freelist;
Christoph Lameter57d437d2012-05-09 10:09:59 -05002315 page = c->page;
2316 if (unlikely(!object || !node_match(page, node)))
Christoph Lameter894b8782007-05-10 03:15:16 -07002317
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002318 object = __slab_alloc(s, gfpflags, node, addr, c);
Christoph Lameter894b8782007-05-10 03:15:16 -07002319
2320 else {
Eric Dumazet0ad95002011-12-16 16:25:34 +01002321 void *next_object = get_freepointer_safe(s, object);
2322
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002323 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002324 * The cmpxchg will only match if there was no additional
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002325 * operation and if we are on the right processor.
2326 *
2327 * The cmpxchg does the following atomically (without lock semantics!)
2328 * 1. Relocate first pointer to the current per cpu area.
2329 * 2. Verify that tid and freelist have not been changed
2330 * 3. If they were not changed replace tid and freelist
2331 *
2332 * Since this is without lock semantics the protection is only against
2333 * code executing on this cpu *not* from access by other cpus.
2334 */
Christoph Lameter933393f2011-12-22 11:58:51 -06002335 if (unlikely(!this_cpu_cmpxchg_double(
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002336 s->cpu_slab->freelist, s->cpu_slab->tid,
2337 object, tid,
Eric Dumazet0ad95002011-12-16 16:25:34 +01002338 next_object, next_tid(tid)))) {
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002339
2340 note_cmpxchg_failure("slab_alloc", s, tid);
2341 goto redo;
2342 }
Eric Dumazet0ad95002011-12-16 16:25:34 +01002343 prefetch_freepointer(s, next_object);
Christoph Lameter84e554e62009-12-18 16:26:23 -06002344 stat(s, ALLOC_FASTPATH);
Christoph Lameter894b8782007-05-10 03:15:16 -07002345 }
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002346
Pekka Enberg74e21342009-11-25 20:14:48 +02002347 if (unlikely(gfpflags & __GFP_ZERO) && object)
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05002348 memset(object, 0, s->object_size);
Christoph Lameterd07dbea2007-07-17 04:03:23 -07002349
Christoph Lameterc016b0b2010-08-20 12:37:16 -05002350 slab_post_alloc_hook(s, gfpflags, object);
Vegard Nossum5a896d92008-04-04 00:54:48 +02002351
Christoph Lameter894b8782007-05-10 03:15:16 -07002352 return object;
Christoph Lameter81819f02007-05-06 14:49:36 -07002353}
2354
2355void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2356{
Christoph Lameter2154a332010-07-09 14:07:10 -05002357 void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002358
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05002359 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002360
2361 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07002362}
2363EXPORT_SYMBOL(kmem_cache_alloc);
2364
Li Zefan0f24f122009-12-11 15:45:30 +08002365#ifdef CONFIG_TRACING
Richard Kennedy4a923792010-10-21 10:29:19 +01002366void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002367{
Richard Kennedy4a923792010-10-21 10:29:19 +01002368 void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
2369 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
2370 return ret;
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002371}
Richard Kennedy4a923792010-10-21 10:29:19 +01002372EXPORT_SYMBOL(kmem_cache_alloc_trace);
2373
2374void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
2375{
2376 void *ret = kmalloc_order(size, flags, order);
2377 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
2378 return ret;
2379}
2380EXPORT_SYMBOL(kmalloc_order_trace);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002381#endif
2382
Christoph Lameter81819f02007-05-06 14:49:36 -07002383#ifdef CONFIG_NUMA
2384void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
2385{
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002386 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
2387
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02002388 trace_kmem_cache_alloc_node(_RET_IP_, ret,
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05002389 s->object_size, s->size, gfpflags, node);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002390
2391 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07002392}
2393EXPORT_SYMBOL(kmem_cache_alloc_node);
Christoph Lameter81819f02007-05-06 14:49:36 -07002394
Li Zefan0f24f122009-12-11 15:45:30 +08002395#ifdef CONFIG_TRACING
Richard Kennedy4a923792010-10-21 10:29:19 +01002396void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002397 gfp_t gfpflags,
Richard Kennedy4a923792010-10-21 10:29:19 +01002398 int node, size_t size)
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002399{
Richard Kennedy4a923792010-10-21 10:29:19 +01002400 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
2401
2402 trace_kmalloc_node(_RET_IP_, ret,
2403 size, s->size, gfpflags, node);
2404 return ret;
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002405}
Richard Kennedy4a923792010-10-21 10:29:19 +01002406EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002407#endif
Namhyung Kim5d1f57e2010-09-29 21:02:15 +09002408#endif
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002409
Christoph Lameter81819f02007-05-06 14:49:36 -07002410/*
Christoph Lameter894b8782007-05-10 03:15:16 -07002411 * Slow patch handling. This may still be called frequently since objects
2412 * have a longer lifetime than the cpu slabs in most processing loads.
Christoph Lameter81819f02007-05-06 14:49:36 -07002413 *
Christoph Lameter894b8782007-05-10 03:15:16 -07002414 * So we still attempt to reduce cache line usage. Just take the slab
2415 * lock and free the item. If there is no additional partial page
2416 * handling required then we can return immediately.
Christoph Lameter81819f02007-05-06 14:49:36 -07002417 */
Christoph Lameter894b8782007-05-10 03:15:16 -07002418static void __slab_free(struct kmem_cache *s, struct page *page,
Christoph Lameterff120592009-12-18 16:26:22 -06002419 void *x, unsigned long addr)
Christoph Lameter81819f02007-05-06 14:49:36 -07002420{
2421 void *prior;
2422 void **object = (void *)x;
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002423 int was_frozen;
2424 int inuse;
2425 struct page new;
2426 unsigned long counters;
2427 struct kmem_cache_node *n = NULL;
Christoph Lameter61728d12011-06-01 12:25:51 -05002428 unsigned long uninitialized_var(flags);
Christoph Lameter81819f02007-05-06 14:49:36 -07002429
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002430 stat(s, FREE_SLOWPATH);
Christoph Lameter81819f02007-05-06 14:49:36 -07002431
Christoph Lameter8dc16c62011-04-15 14:48:16 -05002432 if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr))
Christoph Lameter80f08c12011-06-01 12:25:55 -05002433 return;
Christoph Lameter6446faa2008-02-15 23:45:26 -08002434
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002435 do {
2436 prior = page->freelist;
2437 counters = page->counters;
2438 set_freepointer(s, object, prior);
2439 new.counters = counters;
2440 was_frozen = new.frozen;
2441 new.inuse--;
2442 if ((!new.inuse || !prior) && !was_frozen && !n) {
Christoph Lameter49e22582011-08-09 16:12:27 -05002443
2444 if (!kmem_cache_debug(s) && !prior)
2445
2446 /*
2447 * Slab was on no list before and will be partially empty
2448 * We can defer the list move and instead freeze it.
2449 */
2450 new.frozen = 1;
2451
2452 else { /* Needs to be taken off a list */
2453
2454 n = get_node(s, page_to_nid(page));
2455 /*
2456 * Speculatively acquire the list_lock.
2457 * If the cmpxchg does not succeed then we may
2458 * drop the list_lock without any processing.
2459 *
2460 * Otherwise the list_lock will synchronize with
2461 * other processors updating the list of slabs.
2462 */
2463 spin_lock_irqsave(&n->list_lock, flags);
2464
2465 }
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002466 }
2467 inuse = new.inuse;
Christoph Lameter81819f02007-05-06 14:49:36 -07002468
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002469 } while (!cmpxchg_double_slab(s, page,
2470 prior, counters,
2471 object, new.counters,
2472 "__slab_free"));
Christoph Lameter81819f02007-05-06 14:49:36 -07002473
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002474 if (likely(!n)) {
Christoph Lameter49e22582011-08-09 16:12:27 -05002475
2476 /*
2477 * If we just froze the page then put it onto the
2478 * per cpu partial list.
2479 */
Alex Shi8028dce2012-02-03 23:34:56 +08002480 if (new.frozen && !was_frozen) {
Christoph Lameter49e22582011-08-09 16:12:27 -05002481 put_cpu_partial(s, page, 1);
Alex Shi8028dce2012-02-03 23:34:56 +08002482 stat(s, CPU_PARTIAL_FREE);
2483 }
Christoph Lameter49e22582011-08-09 16:12:27 -05002484 /*
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002485 * The list lock was not taken therefore no list
2486 * activity can be necessary.
2487 */
2488 if (was_frozen)
2489 stat(s, FREE_FROZEN);
Christoph Lameter80f08c12011-06-01 12:25:55 -05002490 return;
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002491 }
Christoph Lameter81819f02007-05-06 14:49:36 -07002492
2493 /*
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002494 * was_frozen may have been set after we acquired the list_lock in
2495 * an earlier loop. So we need to check it here again.
Christoph Lameter81819f02007-05-06 14:49:36 -07002496 */
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002497 if (was_frozen)
2498 stat(s, FREE_FROZEN);
2499 else {
2500 if (unlikely(!inuse && n->nr_partial > s->min_partial))
2501 goto slab_empty;
Christoph Lameter81819f02007-05-06 14:49:36 -07002502
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002503 /*
2504 * Objects left in the slab. If it was not on the partial list before
2505 * then add it.
2506 */
2507 if (unlikely(!prior)) {
2508 remove_full(s, page);
Shaohua Li136333d2011-08-24 08:57:52 +08002509 add_partial(n, page, DEACTIVATE_TO_TAIL);
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002510 stat(s, FREE_ADD_PARTIAL);
2511 }
Christoph Lameter81819f02007-05-06 14:49:36 -07002512 }
Christoph Lameter80f08c12011-06-01 12:25:55 -05002513 spin_unlock_irqrestore(&n->list_lock, flags);
Christoph Lameter81819f02007-05-06 14:49:36 -07002514 return;
2515
2516slab_empty:
Christoph Lametera973e9d2008-03-01 13:40:44 -08002517 if (prior) {
Christoph Lameter81819f02007-05-06 14:49:36 -07002518 /*
Christoph Lameter6fbabb22011-08-08 11:16:56 -05002519 * Slab on the partial list.
Christoph Lameter81819f02007-05-06 14:49:36 -07002520 */
Christoph Lameter5cc6eee2011-06-01 12:25:50 -05002521 remove_partial(n, page);
Christoph Lameter84e554e62009-12-18 16:26:23 -06002522 stat(s, FREE_REMOVE_PARTIAL);
Christoph Lameter6fbabb22011-08-08 11:16:56 -05002523 } else
2524 /* Slab must be on the full list */
2525 remove_full(s, page);
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002526
Christoph Lameter80f08c12011-06-01 12:25:55 -05002527 spin_unlock_irqrestore(&n->list_lock, flags);
Christoph Lameter84e554e62009-12-18 16:26:23 -06002528 stat(s, FREE_SLAB);
Christoph Lameter81819f02007-05-06 14:49:36 -07002529 discard_slab(s, page);
Christoph Lameter81819f02007-05-06 14:49:36 -07002530}
2531
Christoph Lameter894b8782007-05-10 03:15:16 -07002532/*
2533 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
2534 * can perform fastpath freeing without additional function calls.
2535 *
2536 * The fastpath is only possible if we are freeing to the current cpu slab
2537 * of this processor. This typically the case if we have just allocated
2538 * the item before.
2539 *
2540 * If fastpath is not possible then fall back to __slab_free where we deal
2541 * with all sorts of special processing.
2542 */
Pekka Enberg06428782008-01-07 23:20:27 -08002543static __always_inline void slab_free(struct kmem_cache *s,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03002544 struct page *page, void *x, unsigned long addr)
Christoph Lameter894b8782007-05-10 03:15:16 -07002545{
2546 void **object = (void *)x;
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002547 struct kmem_cache_cpu *c;
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002548 unsigned long tid;
Christoph Lameter1f842602008-01-07 23:20:30 -08002549
Christoph Lameterc016b0b2010-08-20 12:37:16 -05002550 slab_free_hook(s, x);
2551
Christoph Lametera24c5a02011-03-15 12:45:21 -05002552redo:
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002553 /*
2554 * Determine the currently cpus per cpu slab.
2555 * The cpu may change afterward. However that does not matter since
2556 * data is retrieved via this pointer. If we are on the same cpu
2557 * during the cmpxchg then the free will succedd.
2558 */
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06002559 c = __this_cpu_ptr(s->cpu_slab);
Christoph Lameterc016b0b2010-08-20 12:37:16 -05002560
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002561 tid = c->tid;
2562 barrier();
Christoph Lameterc016b0b2010-08-20 12:37:16 -05002563
Christoph Lameter442b06b2011-05-17 16:29:31 -05002564 if (likely(page == c->page)) {
Christoph Lameterff120592009-12-18 16:26:22 -06002565 set_freepointer(s, object, c->freelist);
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002566
Christoph Lameter933393f2011-12-22 11:58:51 -06002567 if (unlikely(!this_cpu_cmpxchg_double(
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002568 s->cpu_slab->freelist, s->cpu_slab->tid,
2569 c->freelist, tid,
2570 object, next_tid(tid)))) {
2571
2572 note_cmpxchg_failure("slab_free", s, tid);
2573 goto redo;
2574 }
Christoph Lameter84e554e62009-12-18 16:26:23 -06002575 stat(s, FREE_FASTPATH);
Christoph Lameter894b8782007-05-10 03:15:16 -07002576 } else
Christoph Lameterff120592009-12-18 16:26:22 -06002577 __slab_free(s, page, x, addr);
Christoph Lameter894b8782007-05-10 03:15:16 -07002578
Christoph Lameter894b8782007-05-10 03:15:16 -07002579}
2580
Christoph Lameter81819f02007-05-06 14:49:36 -07002581void kmem_cache_free(struct kmem_cache *s, void *x)
2582{
Christoph Lameter77c5e2d2007-05-06 14:49:42 -07002583 struct page *page;
Christoph Lameter81819f02007-05-06 14:49:36 -07002584
Christoph Lameterb49af682007-05-06 14:49:41 -07002585 page = virt_to_head_page(x);
Christoph Lameter81819f02007-05-06 14:49:36 -07002586
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03002587 slab_free(s, page, x, _RET_IP_);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002588
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02002589 trace_kmem_cache_free(_RET_IP_, x);
Christoph Lameter81819f02007-05-06 14:49:36 -07002590}
2591EXPORT_SYMBOL(kmem_cache_free);
2592
Christoph Lameter81819f02007-05-06 14:49:36 -07002593/*
Christoph Lameter672bba32007-05-09 02:32:39 -07002594 * Object placement in a slab is made very easy because we always start at
2595 * offset 0. If we tune the size of the object to the alignment then we can
2596 * get the required alignment by putting one properly sized object after
2597 * another.
Christoph Lameter81819f02007-05-06 14:49:36 -07002598 *
2599 * Notice that the allocation order determines the sizes of the per cpu
2600 * caches. Each processor has always one slab available for allocations.
2601 * Increasing the allocation order reduces the number of times that slabs
Christoph Lameter672bba32007-05-09 02:32:39 -07002602 * must be moved on and off the partial lists and is therefore a factor in
Christoph Lameter81819f02007-05-06 14:49:36 -07002603 * locking overhead.
Christoph Lameter81819f02007-05-06 14:49:36 -07002604 */
2605
2606/*
2607 * Mininum / Maximum order of slab pages. This influences locking overhead
2608 * and slab fragmentation. A higher order reduces the number of partial slabs
2609 * and increases the number of allocations possible without having to
2610 * take the list_lock.
2611 */
2612static int slub_min_order;
Christoph Lameter114e9e82008-04-14 19:11:41 +03002613static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
Christoph Lameter9b2cd502008-04-14 19:11:41 +03002614static int slub_min_objects;
Christoph Lameter81819f02007-05-06 14:49:36 -07002615
2616/*
2617 * Merge control. If this is set then no merging of slab caches will occur.
Christoph Lameter672bba32007-05-09 02:32:39 -07002618 * (Could be removed. This was introduced to pacify the merge skeptics.)
Christoph Lameter81819f02007-05-06 14:49:36 -07002619 */
2620static int slub_nomerge;
2621
2622/*
Christoph Lameter81819f02007-05-06 14:49:36 -07002623 * Calculate the order of allocation given an slab object size.
2624 *
Christoph Lameter672bba32007-05-09 02:32:39 -07002625 * The order of allocation has significant impact on performance and other
2626 * system components. Generally order 0 allocations should be preferred since
2627 * order 0 does not cause fragmentation in the page allocator. Larger objects
2628 * be problematic to put into order 0 slabs because there may be too much
Christoph Lameterc124f5b2008-04-14 19:13:29 +03002629 * unused space left. We go to a higher order if more than 1/16th of the slab
Christoph Lameter672bba32007-05-09 02:32:39 -07002630 * would be wasted.
Christoph Lameter81819f02007-05-06 14:49:36 -07002631 *
Christoph Lameter672bba32007-05-09 02:32:39 -07002632 * In order to reach satisfactory performance we must ensure that a minimum
2633 * number of objects is in one slab. Otherwise we may generate too much
2634 * activity on the partial lists which requires taking the list_lock. This is
2635 * less a concern for large slabs though which are rarely used.
Christoph Lameter81819f02007-05-06 14:49:36 -07002636 *
Christoph Lameter672bba32007-05-09 02:32:39 -07002637 * slub_max_order specifies the order where we begin to stop considering the
2638 * number of objects in a slab as critical. If we reach slub_max_order then
2639 * we try to keep the page order as low as possible. So we accept more waste
2640 * of space in favor of a small page order.
2641 *
2642 * Higher order allocations also allow the placement of more objects in a
2643 * slab and thereby reduce object handling overhead. If the user has
2644 * requested a higher mininum order then we start with that one instead of
2645 * the smallest order which will fit the object.
Christoph Lameter81819f02007-05-06 14:49:36 -07002646 */
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002647static inline int slab_order(int size, int min_objects,
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002648 int max_order, int fract_leftover, int reserved)
Christoph Lameter81819f02007-05-06 14:49:36 -07002649{
2650 int order;
2651 int rem;
Christoph Lameter6300ea72007-07-17 04:03:20 -07002652 int min_order = slub_min_order;
Christoph Lameter81819f02007-05-06 14:49:36 -07002653
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002654 if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +04002655 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
Christoph Lameter39b26462008-04-14 19:11:30 +03002656
Christoph Lameter6300ea72007-07-17 04:03:20 -07002657 for (order = max(min_order,
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002658 fls(min_objects * size - 1) - PAGE_SHIFT);
2659 order <= max_order; order++) {
2660
Christoph Lameter81819f02007-05-06 14:49:36 -07002661 unsigned long slab_size = PAGE_SIZE << order;
2662
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002663 if (slab_size < min_objects * size + reserved)
Christoph Lameter81819f02007-05-06 14:49:36 -07002664 continue;
2665
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002666 rem = (slab_size - reserved) % size;
Christoph Lameter81819f02007-05-06 14:49:36 -07002667
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002668 if (rem <= slab_size / fract_leftover)
Christoph Lameter81819f02007-05-06 14:49:36 -07002669 break;
2670
2671 }
Christoph Lameter672bba32007-05-09 02:32:39 -07002672
Christoph Lameter81819f02007-05-06 14:49:36 -07002673 return order;
2674}
2675
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002676static inline int calculate_order(int size, int reserved)
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002677{
2678 int order;
2679 int min_objects;
2680 int fraction;
Zhang Yanmine8120ff2009-02-12 18:00:17 +02002681 int max_objects;
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002682
2683 /*
2684 * Attempt to find best configuration for a slab. This
2685 * works by first attempting to generate a layout with
2686 * the best configuration and backing off gradually.
2687 *
2688 * First we reduce the acceptable waste in a slab. Then
2689 * we reduce the minimum objects required in a slab.
2690 */
2691 min_objects = slub_min_objects;
Christoph Lameter9b2cd502008-04-14 19:11:41 +03002692 if (!min_objects)
2693 min_objects = 4 * (fls(nr_cpu_ids) + 1);
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002694 max_objects = order_objects(slub_max_order, size, reserved);
Zhang Yanmine8120ff2009-02-12 18:00:17 +02002695 min_objects = min(min_objects, max_objects);
2696
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002697 while (min_objects > 1) {
Christoph Lameterc124f5b2008-04-14 19:13:29 +03002698 fraction = 16;
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002699 while (fraction >= 4) {
2700 order = slab_order(size, min_objects,
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002701 slub_max_order, fraction, reserved);
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002702 if (order <= slub_max_order)
2703 return order;
2704 fraction /= 2;
2705 }
Amerigo Wang5086c389c2009-08-19 21:44:13 +03002706 min_objects--;
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002707 }
2708
2709 /*
2710 * We were unable to place multiple objects in a slab. Now
2711 * lets see if we can place a single object there.
2712 */
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002713 order = slab_order(size, 1, slub_max_order, 1, reserved);
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002714 if (order <= slub_max_order)
2715 return order;
2716
2717 /*
2718 * Doh this slab cannot be placed using slub_max_order.
2719 */
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002720 order = slab_order(size, 1, MAX_ORDER, 1, reserved);
David Rientjes818cf592009-04-23 09:58:22 +03002721 if (order < MAX_ORDER)
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002722 return order;
2723 return -ENOSYS;
2724}
2725
Christoph Lameter81819f02007-05-06 14:49:36 -07002726/*
Christoph Lameter672bba32007-05-09 02:32:39 -07002727 * Figure out what the alignment of the objects will be.
Christoph Lameter81819f02007-05-06 14:49:36 -07002728 */
2729static unsigned long calculate_alignment(unsigned long flags,
2730 unsigned long align, unsigned long size)
2731{
2732 /*
Christoph Lameter6446faa2008-02-15 23:45:26 -08002733 * If the user wants hardware cache aligned objects then follow that
2734 * suggestion if the object is sufficiently large.
Christoph Lameter81819f02007-05-06 14:49:36 -07002735 *
Christoph Lameter6446faa2008-02-15 23:45:26 -08002736 * The hardware cache alignment cannot override the specified
2737 * alignment though. If that is greater then use it.
Christoph Lameter81819f02007-05-06 14:49:36 -07002738 */
Nick Pigginb6210382008-03-05 14:05:56 -08002739 if (flags & SLAB_HWCACHE_ALIGN) {
2740 unsigned long ralign = cache_line_size();
2741 while (size <= ralign / 2)
2742 ralign /= 2;
2743 align = max(align, ralign);
2744 }
Christoph Lameter81819f02007-05-06 14:49:36 -07002745
2746 if (align < ARCH_SLAB_MINALIGN)
Nick Pigginb6210382008-03-05 14:05:56 -08002747 align = ARCH_SLAB_MINALIGN;
Christoph Lameter81819f02007-05-06 14:49:36 -07002748
2749 return ALIGN(align, sizeof(void *));
2750}
2751
Pekka Enberg5595cff2008-08-05 09:28:47 +03002752static void
Joonsoo Kim40534972012-05-11 00:50:47 +09002753init_kmem_cache_node(struct kmem_cache_node *n)
Christoph Lameter81819f02007-05-06 14:49:36 -07002754{
2755 n->nr_partial = 0;
Christoph Lameter81819f02007-05-06 14:49:36 -07002756 spin_lock_init(&n->list_lock);
2757 INIT_LIST_HEAD(&n->partial);
Christoph Lameter8ab13722007-07-17 04:03:32 -07002758#ifdef CONFIG_SLUB_DEBUG
Christoph Lameter0f389ec2008-04-14 18:53:02 +03002759 atomic_long_set(&n->nr_slabs, 0);
Salman Qazi02b71b72008-09-11 12:25:41 -07002760 atomic_long_set(&n->total_objects, 0);
Christoph Lameter643b1132007-05-06 14:49:42 -07002761 INIT_LIST_HEAD(&n->full);
Christoph Lameter8ab13722007-07-17 04:03:32 -07002762#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07002763}
2764
Christoph Lameter55136592010-08-20 12:37:13 -05002765static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
Christoph Lameter4c93c3552007-10-16 01:26:08 -07002766{
Christoph Lameter6c182dc2010-08-20 12:37:14 -05002767 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
2768 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06002769
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002770 /*
Chris Metcalfd4d84fe2011-06-02 10:19:41 -04002771 * Must align to double word boundary for the double cmpxchg
2772 * instructions to work; see __pcpu_double_call_return_bool().
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002773 */
Chris Metcalfd4d84fe2011-06-02 10:19:41 -04002774 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
2775 2 * sizeof(void *));
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06002776
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002777 if (!s->cpu_slab)
2778 return 0;
2779
2780 init_kmem_cache_cpus(s);
2781
2782 return 1;
Christoph Lameter4c93c3552007-10-16 01:26:08 -07002783}
Christoph Lameter4c93c3552007-10-16 01:26:08 -07002784
Christoph Lameter51df1142010-08-20 12:37:15 -05002785static struct kmem_cache *kmem_cache_node;
2786
Christoph Lameter81819f02007-05-06 14:49:36 -07002787/*
2788 * No kmalloc_node yet so do it by hand. We know that this is the first
2789 * slab on the node for this slabcache. There are no concurrent accesses
2790 * possible.
2791 *
2792 * Note that this function only works on the kmalloc_node_cache
Christoph Lameter4c93c3552007-10-16 01:26:08 -07002793 * when allocating for the kmalloc_node_cache. This is used for bootstrapping
2794 * memory on a fresh node that has no slab structures yet.
Christoph Lameter81819f02007-05-06 14:49:36 -07002795 */
Christoph Lameter55136592010-08-20 12:37:13 -05002796static void early_kmem_cache_node_alloc(int node)
Christoph Lameter81819f02007-05-06 14:49:36 -07002797{
2798 struct page *page;
2799 struct kmem_cache_node *n;
2800
Christoph Lameter51df1142010-08-20 12:37:15 -05002801 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
Christoph Lameter81819f02007-05-06 14:49:36 -07002802
Christoph Lameter51df1142010-08-20 12:37:15 -05002803 page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
Christoph Lameter81819f02007-05-06 14:49:36 -07002804
2805 BUG_ON(!page);
Christoph Lametera2f92ee2007-08-22 14:01:57 -07002806 if (page_to_nid(page) != node) {
2807 printk(KERN_ERR "SLUB: Unable to allocate memory from "
2808 "node %d\n", node);
2809 printk(KERN_ERR "SLUB: Allocating a useless per node structure "
2810 "in order to be able to continue\n");
2811 }
2812
Christoph Lameter81819f02007-05-06 14:49:36 -07002813 n = page->freelist;
2814 BUG_ON(!n);
Christoph Lameter51df1142010-08-20 12:37:15 -05002815 page->freelist = get_freepointer(kmem_cache_node, n);
Christoph Lametere6e82ea2011-08-09 16:12:24 -05002816 page->inuse = 1;
Christoph Lameter8cb0a502011-06-01 12:25:46 -05002817 page->frozen = 0;
Christoph Lameter51df1142010-08-20 12:37:15 -05002818 kmem_cache_node->node[node] = n;
Christoph Lameter8ab13722007-07-17 04:03:32 -07002819#ifdef CONFIG_SLUB_DEBUG
Christoph Lameterf7cb1932010-09-29 07:15:01 -05002820 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
Christoph Lameter51df1142010-08-20 12:37:15 -05002821 init_tracking(kmem_cache_node, n);
Christoph Lameter8ab13722007-07-17 04:03:32 -07002822#endif
Joonsoo Kim40534972012-05-11 00:50:47 +09002823 init_kmem_cache_node(n);
Christoph Lameter51df1142010-08-20 12:37:15 -05002824 inc_slabs_node(kmem_cache_node, node, page->objects);
Christoph Lameter6446faa2008-02-15 23:45:26 -08002825
Shaohua Li136333d2011-08-24 08:57:52 +08002826 add_partial(n, page, DEACTIVATE_TO_HEAD);
Christoph Lameter81819f02007-05-06 14:49:36 -07002827}
2828
2829static void free_kmem_cache_nodes(struct kmem_cache *s)
2830{
2831 int node;
2832
Christoph Lameterf64dc582007-10-16 01:25:33 -07002833 for_each_node_state(node, N_NORMAL_MEMORY) {
Christoph Lameter81819f02007-05-06 14:49:36 -07002834 struct kmem_cache_node *n = s->node[node];
Christoph Lameter51df1142010-08-20 12:37:15 -05002835
Alexander Duyck73367bd2010-05-21 14:41:35 -07002836 if (n)
Christoph Lameter51df1142010-08-20 12:37:15 -05002837 kmem_cache_free(kmem_cache_node, n);
2838
Christoph Lameter81819f02007-05-06 14:49:36 -07002839 s->node[node] = NULL;
2840 }
2841}
2842
Christoph Lameter55136592010-08-20 12:37:13 -05002843static int init_kmem_cache_nodes(struct kmem_cache *s)
Christoph Lameter81819f02007-05-06 14:49:36 -07002844{
2845 int node;
Christoph Lameter81819f02007-05-06 14:49:36 -07002846
Christoph Lameterf64dc582007-10-16 01:25:33 -07002847 for_each_node_state(node, N_NORMAL_MEMORY) {
Christoph Lameter81819f02007-05-06 14:49:36 -07002848 struct kmem_cache_node *n;
2849
Alexander Duyck73367bd2010-05-21 14:41:35 -07002850 if (slab_state == DOWN) {
Christoph Lameter55136592010-08-20 12:37:13 -05002851 early_kmem_cache_node_alloc(node);
Alexander Duyck73367bd2010-05-21 14:41:35 -07002852 continue;
Christoph Lameter81819f02007-05-06 14:49:36 -07002853 }
Christoph Lameter51df1142010-08-20 12:37:15 -05002854 n = kmem_cache_alloc_node(kmem_cache_node,
Christoph Lameter55136592010-08-20 12:37:13 -05002855 GFP_KERNEL, node);
Alexander Duyck73367bd2010-05-21 14:41:35 -07002856
2857 if (!n) {
2858 free_kmem_cache_nodes(s);
2859 return 0;
2860 }
2861
Christoph Lameter81819f02007-05-06 14:49:36 -07002862 s->node[node] = n;
Joonsoo Kim40534972012-05-11 00:50:47 +09002863 init_kmem_cache_node(n);
Christoph Lameter81819f02007-05-06 14:49:36 -07002864 }
2865 return 1;
2866}
Christoph Lameter81819f02007-05-06 14:49:36 -07002867
David Rientjesc0bdb232009-02-25 09:16:35 +02002868static void set_min_partial(struct kmem_cache *s, unsigned long min)
David Rientjes3b89d7d2009-02-22 17:40:07 -08002869{
2870 if (min < MIN_PARTIAL)
2871 min = MIN_PARTIAL;
2872 else if (min > MAX_PARTIAL)
2873 min = MAX_PARTIAL;
2874 s->min_partial = min;
2875}
2876
Christoph Lameter81819f02007-05-06 14:49:36 -07002877/*
2878 * calculate_sizes() determines the order and the distribution of data within
2879 * a slab object.
2880 */
Christoph Lameter06b285d2008-04-14 19:11:41 +03002881static int calculate_sizes(struct kmem_cache *s, int forced_order)
Christoph Lameter81819f02007-05-06 14:49:36 -07002882{
2883 unsigned long flags = s->flags;
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05002884 unsigned long size = s->object_size;
Christoph Lameter81819f02007-05-06 14:49:36 -07002885 unsigned long align = s->align;
Christoph Lameter834f3d12008-04-14 19:11:31 +03002886 int order;
Christoph Lameter81819f02007-05-06 14:49:36 -07002887
2888 /*
Christoph Lameterd8b42bf2008-02-15 23:45:25 -08002889 * Round up object size to the next word boundary. We can only
2890 * place the free pointer at word boundaries and this determines
2891 * the possible location of the free pointer.
2892 */
2893 size = ALIGN(size, sizeof(void *));
2894
2895#ifdef CONFIG_SLUB_DEBUG
2896 /*
Christoph Lameter81819f02007-05-06 14:49:36 -07002897 * Determine if we can poison the object itself. If the user of
2898 * the slab may touch the object after free or before allocation
2899 * then we should never poison the object itself.
2900 */
2901 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
Christoph Lameterc59def92007-05-16 22:10:50 -07002902 !s->ctor)
Christoph Lameter81819f02007-05-06 14:49:36 -07002903 s->flags |= __OBJECT_POISON;
2904 else
2905 s->flags &= ~__OBJECT_POISON;
2906
Christoph Lameter81819f02007-05-06 14:49:36 -07002907
2908 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07002909 * If we are Redzoning then check if there is some space between the
Christoph Lameter81819f02007-05-06 14:49:36 -07002910 * end of the object and the free pointer. If not then add an
Christoph Lameter672bba32007-05-09 02:32:39 -07002911 * additional word to have some bytes to store Redzone information.
Christoph Lameter81819f02007-05-06 14:49:36 -07002912 */
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05002913 if ((flags & SLAB_RED_ZONE) && size == s->object_size)
Christoph Lameter81819f02007-05-06 14:49:36 -07002914 size += sizeof(void *);
Christoph Lameter41ecc552007-05-09 02:32:44 -07002915#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07002916
2917 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07002918 * With that we have determined the number of bytes in actual use
2919 * by the object. This is the potential offset to the free pointer.
Christoph Lameter81819f02007-05-06 14:49:36 -07002920 */
2921 s->inuse = size;
2922
2923 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
Christoph Lameterc59def92007-05-16 22:10:50 -07002924 s->ctor)) {
Christoph Lameter81819f02007-05-06 14:49:36 -07002925 /*
2926 * Relocate free pointer after the object if it is not
2927 * permitted to overwrite the first word of the object on
2928 * kmem_cache_free.
2929 *
2930 * This is the case if we do RCU, have a constructor or
2931 * destructor or are poisoning the objects.
2932 */
2933 s->offset = size;
2934 size += sizeof(void *);
2935 }
2936
Christoph Lameterc12b3c62007-05-23 13:57:31 -07002937#ifdef CONFIG_SLUB_DEBUG
Christoph Lameter81819f02007-05-06 14:49:36 -07002938 if (flags & SLAB_STORE_USER)
2939 /*
2940 * Need to store information about allocs and frees after
2941 * the object.
2942 */
2943 size += 2 * sizeof(struct track);
2944
Christoph Lameterbe7b3fb2007-05-09 02:32:36 -07002945 if (flags & SLAB_RED_ZONE)
Christoph Lameter81819f02007-05-06 14:49:36 -07002946 /*
2947 * Add some empty padding so that we can catch
2948 * overwrites from earlier objects rather than let
2949 * tracking information or the free pointer be
Frederik Schwarzer0211a9c2008-12-29 22:14:56 +01002950 * corrupted if a user writes before the start
Christoph Lameter81819f02007-05-06 14:49:36 -07002951 * of the object.
2952 */
2953 size += sizeof(void *);
Christoph Lameter41ecc552007-05-09 02:32:44 -07002954#endif
Christoph Lameter672bba32007-05-09 02:32:39 -07002955
Christoph Lameter81819f02007-05-06 14:49:36 -07002956 /*
2957 * Determine the alignment based on various parameters that the
Christoph Lameter65c02d42007-05-09 02:32:35 -07002958 * user specified and the dynamic determination of cache line size
2959 * on bootup.
Christoph Lameter81819f02007-05-06 14:49:36 -07002960 */
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05002961 align = calculate_alignment(flags, align, s->object_size);
Zhang, Yanmindcb0ce12009-07-30 11:28:11 +08002962 s->align = align;
Christoph Lameter81819f02007-05-06 14:49:36 -07002963
2964 /*
2965 * SLUB stores one object immediately after another beginning from
2966 * offset 0. In order to align the objects we have to simply size
2967 * each object to conform to the alignment.
2968 */
2969 size = ALIGN(size, align);
2970 s->size = size;
Christoph Lameter06b285d2008-04-14 19:11:41 +03002971 if (forced_order >= 0)
2972 order = forced_order;
2973 else
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002974 order = calculate_order(size, s->reserved);
Christoph Lameter81819f02007-05-06 14:49:36 -07002975
Christoph Lameter834f3d12008-04-14 19:11:31 +03002976 if (order < 0)
Christoph Lameter81819f02007-05-06 14:49:36 -07002977 return 0;
2978
Christoph Lameterb7a49f02008-02-14 14:21:32 -08002979 s->allocflags = 0;
Christoph Lameter834f3d12008-04-14 19:11:31 +03002980 if (order)
Christoph Lameterb7a49f02008-02-14 14:21:32 -08002981 s->allocflags |= __GFP_COMP;
2982
2983 if (s->flags & SLAB_CACHE_DMA)
2984 s->allocflags |= SLUB_DMA;
2985
2986 if (s->flags & SLAB_RECLAIM_ACCOUNT)
2987 s->allocflags |= __GFP_RECLAIMABLE;
2988
Christoph Lameter81819f02007-05-06 14:49:36 -07002989 /*
2990 * Determine the number of objects per slab
2991 */
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002992 s->oo = oo_make(order, size, s->reserved);
2993 s->min = oo_make(get_order(size), size, s->reserved);
Christoph Lameter205ab992008-04-14 19:11:40 +03002994 if (oo_objects(s->oo) > oo_objects(s->max))
2995 s->max = s->oo;
Christoph Lameter81819f02007-05-06 14:49:36 -07002996
Christoph Lameter834f3d12008-04-14 19:11:31 +03002997 return !!oo_objects(s->oo);
Christoph Lameter81819f02007-05-06 14:49:36 -07002998
2999}
3000
Christoph Lameter55136592010-08-20 12:37:13 -05003001static int kmem_cache_open(struct kmem_cache *s,
Christoph Lameter81819f02007-05-06 14:49:36 -07003002 const char *name, size_t size,
3003 size_t align, unsigned long flags,
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07003004 void (*ctor)(void *))
Christoph Lameter81819f02007-05-06 14:49:36 -07003005{
3006 memset(s, 0, kmem_size);
3007 s->name = name;
3008 s->ctor = ctor;
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003009 s->object_size = size;
Christoph Lameter81819f02007-05-06 14:49:36 -07003010 s->align = align;
Christoph Lameterba0268a2007-09-11 15:24:11 -07003011 s->flags = kmem_cache_flags(size, flags, name, ctor);
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08003012 s->reserved = 0;
Christoph Lameter81819f02007-05-06 14:49:36 -07003013
Lai Jiangshanda9a6382011-03-10 15:22:00 +08003014 if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
3015 s->reserved = sizeof(struct rcu_head);
Christoph Lameter81819f02007-05-06 14:49:36 -07003016
Christoph Lameter06b285d2008-04-14 19:11:41 +03003017 if (!calculate_sizes(s, -1))
Christoph Lameter81819f02007-05-06 14:49:36 -07003018 goto error;
David Rientjes3de47212009-07-27 18:30:35 -07003019 if (disable_higher_order_debug) {
3020 /*
3021 * Disable debugging flags that store metadata if the min slab
3022 * order increased.
3023 */
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003024 if (get_order(s->size) > get_order(s->object_size)) {
David Rientjes3de47212009-07-27 18:30:35 -07003025 s->flags &= ~DEBUG_METADATA_FLAGS;
3026 s->offset = 0;
3027 if (!calculate_sizes(s, -1))
3028 goto error;
3029 }
3030 }
Christoph Lameter81819f02007-05-06 14:49:36 -07003031
Heiko Carstens25654092012-01-12 17:17:33 -08003032#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
3033 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
Christoph Lameterb789ef52011-06-01 12:25:49 -05003034 if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0)
3035 /* Enable fast mode */
3036 s->flags |= __CMPXCHG_DOUBLE;
3037#endif
3038
David Rientjes3b89d7d2009-02-22 17:40:07 -08003039 /*
3040 * The larger the object size is, the more pages we want on the partial
3041 * list to avoid pounding the page allocator excessively.
3042 */
Christoph Lameter49e22582011-08-09 16:12:27 -05003043 set_min_partial(s, ilog2(s->size) / 2);
3044
3045 /*
3046 * cpu_partial determined the maximum number of objects kept in the
3047 * per cpu partial lists of a processor.
3048 *
3049 * Per cpu partial lists mainly contain slabs that just have one
3050 * object freed. If they are used for allocation then they can be
3051 * filled up again with minimal effort. The slab will never hit the
3052 * per node partial lists and therefore no locking will be required.
3053 *
3054 * This setting also determines
3055 *
3056 * A) The number of objects from per cpu partial slabs dumped to the
3057 * per node list when we reach the limit.
Alex Shi9f264902011-09-01 11:32:18 +08003058 * B) The number of objects in cpu partial slabs to extract from the
Christoph Lameter49e22582011-08-09 16:12:27 -05003059 * per node list when we run out of per cpu objects. We only fetch 50%
3060 * to keep some capacity around for frees.
3061 */
Christoph Lameter8f1e33d2011-11-23 09:24:27 -06003062 if (kmem_cache_debug(s))
3063 s->cpu_partial = 0;
3064 else if (s->size >= PAGE_SIZE)
Christoph Lameter49e22582011-08-09 16:12:27 -05003065 s->cpu_partial = 2;
3066 else if (s->size >= 1024)
3067 s->cpu_partial = 6;
3068 else if (s->size >= 256)
3069 s->cpu_partial = 13;
3070 else
3071 s->cpu_partial = 30;
3072
Christoph Lameter81819f02007-05-06 14:49:36 -07003073 s->refcount = 1;
3074#ifdef CONFIG_NUMA
Christoph Lametere2cb96b2008-08-19 08:51:22 -05003075 s->remote_node_defrag_ratio = 1000;
Christoph Lameter81819f02007-05-06 14:49:36 -07003076#endif
Christoph Lameter55136592010-08-20 12:37:13 -05003077 if (!init_kmem_cache_nodes(s))
Christoph Lameterdfb4f092007-10-16 01:26:05 -07003078 goto error;
Christoph Lameter81819f02007-05-06 14:49:36 -07003079
Christoph Lameter55136592010-08-20 12:37:13 -05003080 if (alloc_kmem_cache_cpus(s))
Christoph Lameter81819f02007-05-06 14:49:36 -07003081 return 1;
Christoph Lameterff120592009-12-18 16:26:22 -06003082
Christoph Lameter4c93c3552007-10-16 01:26:08 -07003083 free_kmem_cache_nodes(s);
Christoph Lameter81819f02007-05-06 14:49:36 -07003084error:
3085 if (flags & SLAB_PANIC)
3086 panic("Cannot create slab %s size=%lu realsize=%u "
3087 "order=%u offset=%u flags=%lx\n",
Christoph Lameter834f3d12008-04-14 19:11:31 +03003088 s->name, (unsigned long)size, s->size, oo_order(s->oo),
Christoph Lameter81819f02007-05-06 14:49:36 -07003089 s->offset, flags);
3090 return 0;
3091}
Christoph Lameter81819f02007-05-06 14:49:36 -07003092
3093/*
Christoph Lameter81819f02007-05-06 14:49:36 -07003094 * Determine the size of a slab object
3095 */
3096unsigned int kmem_cache_size(struct kmem_cache *s)
3097{
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003098 return s->object_size;
Christoph Lameter81819f02007-05-06 14:49:36 -07003099}
3100EXPORT_SYMBOL(kmem_cache_size);
3101
Christoph Lameter33b12c32008-04-25 12:22:43 -07003102static void list_slab_objects(struct kmem_cache *s, struct page *page,
3103 const char *text)
Christoph Lameter81819f02007-05-06 14:49:36 -07003104{
Christoph Lameter33b12c32008-04-25 12:22:43 -07003105#ifdef CONFIG_SLUB_DEBUG
3106 void *addr = page_address(page);
3107 void *p;
Namhyung Kima5dd5c12010-09-29 21:02:13 +09003108 unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
3109 sizeof(long), GFP_ATOMIC);
Eric Dumazetbbd7d572010-03-24 22:25:47 +01003110 if (!map)
3111 return;
Christoph Lameter33b12c32008-04-25 12:22:43 -07003112 slab_err(s, page, "%s", text);
3113 slab_lock(page);
Christoph Lameter33b12c32008-04-25 12:22:43 -07003114
Christoph Lameter5f80b132011-04-15 14:48:13 -05003115 get_map(s, page, map);
Christoph Lameter33b12c32008-04-25 12:22:43 -07003116 for_each_object(p, s, addr, page->objects) {
3117
3118 if (!test_bit(slab_index(p, s, addr), map)) {
3119 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n",
3120 p, p - addr);
3121 print_tracking(s, p);
3122 }
3123 }
3124 slab_unlock(page);
Eric Dumazetbbd7d572010-03-24 22:25:47 +01003125 kfree(map);
Christoph Lameter33b12c32008-04-25 12:22:43 -07003126#endif
3127}
3128
Christoph Lameter81819f02007-05-06 14:49:36 -07003129/*
Christoph Lameter599870b2008-04-23 12:36:52 -07003130 * Attempt to free all partial slabs on a node.
Christoph Lameter69cb8e62011-08-09 16:12:22 -05003131 * This is called from kmem_cache_close(). We must be the last thread
3132 * using the cache and therefore we do not need to lock anymore.
Christoph Lameter81819f02007-05-06 14:49:36 -07003133 */
Christoph Lameter599870b2008-04-23 12:36:52 -07003134static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
Christoph Lameter81819f02007-05-06 14:49:36 -07003135{
Christoph Lameter81819f02007-05-06 14:49:36 -07003136 struct page *page, *h;
3137
Christoph Lameter33b12c32008-04-25 12:22:43 -07003138 list_for_each_entry_safe(page, h, &n->partial, lru) {
Christoph Lameter81819f02007-05-06 14:49:36 -07003139 if (!page->inuse) {
Christoph Lameter5cc6eee2011-06-01 12:25:50 -05003140 remove_partial(n, page);
Christoph Lameter81819f02007-05-06 14:49:36 -07003141 discard_slab(s, page);
Christoph Lameter33b12c32008-04-25 12:22:43 -07003142 } else {
3143 list_slab_objects(s, page,
3144 "Objects remaining on kmem_cache_close()");
Christoph Lameter599870b2008-04-23 12:36:52 -07003145 }
Christoph Lameter33b12c32008-04-25 12:22:43 -07003146 }
Christoph Lameter81819f02007-05-06 14:49:36 -07003147}
3148
3149/*
Christoph Lameter672bba32007-05-09 02:32:39 -07003150 * Release all resources used by a slab cache.
Christoph Lameter81819f02007-05-06 14:49:36 -07003151 */
Christoph Lameter0c710012007-07-17 04:03:24 -07003152static inline int kmem_cache_close(struct kmem_cache *s)
Christoph Lameter81819f02007-05-06 14:49:36 -07003153{
3154 int node;
3155
3156 flush_all(s);
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06003157 free_percpu(s->cpu_slab);
Christoph Lameter81819f02007-05-06 14:49:36 -07003158 /* Attempt to free all objects */
Christoph Lameterf64dc582007-10-16 01:25:33 -07003159 for_each_node_state(node, N_NORMAL_MEMORY) {
Christoph Lameter81819f02007-05-06 14:49:36 -07003160 struct kmem_cache_node *n = get_node(s, node);
3161
Christoph Lameter599870b2008-04-23 12:36:52 -07003162 free_partial(s, n);
3163 if (n->nr_partial || slabs_node(s, node))
Christoph Lameter81819f02007-05-06 14:49:36 -07003164 return 1;
3165 }
3166 free_kmem_cache_nodes(s);
3167 return 0;
3168}
3169
3170/*
3171 * Close a cache and release the kmem_cache structure
3172 * (must be used for caches created using kmem_cache_create)
3173 */
3174void kmem_cache_destroy(struct kmem_cache *s)
3175{
Christoph Lameter18004c52012-07-06 15:25:12 -05003176 mutex_lock(&slab_mutex);
Christoph Lameter81819f02007-05-06 14:49:36 -07003177 s->refcount--;
3178 if (!s->refcount) {
3179 list_del(&s->list);
Christoph Lameter18004c52012-07-06 15:25:12 -05003180 mutex_unlock(&slab_mutex);
Pekka Enbergd629d812008-04-23 22:31:08 +03003181 if (kmem_cache_close(s)) {
3182 printk(KERN_ERR "SLUB %s: %s called for cache that "
3183 "still has objects.\n", s->name, __func__);
3184 dump_stack();
3185 }
Eric Dumazetd76b1592009-09-03 22:38:59 +03003186 if (s->flags & SLAB_DESTROY_BY_RCU)
3187 rcu_barrier();
Christoph Lameter81819f02007-05-06 14:49:36 -07003188 sysfs_slab_remove(s);
Christoph Lameter69cb8e62011-08-09 16:12:22 -05003189 } else
Christoph Lameter18004c52012-07-06 15:25:12 -05003190 mutex_unlock(&slab_mutex);
Christoph Lameter81819f02007-05-06 14:49:36 -07003191}
3192EXPORT_SYMBOL(kmem_cache_destroy);
3193
3194/********************************************************************
3195 * Kmalloc subsystem
3196 *******************************************************************/
3197
Christoph Lameter51df1142010-08-20 12:37:15 -05003198struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
Christoph Lameter81819f02007-05-06 14:49:36 -07003199EXPORT_SYMBOL(kmalloc_caches);
3200
Christoph Lameter51df1142010-08-20 12:37:15 -05003201static struct kmem_cache *kmem_cache;
3202
Christoph Lameter55136592010-08-20 12:37:13 -05003203#ifdef CONFIG_ZONE_DMA
Christoph Lameter51df1142010-08-20 12:37:15 -05003204static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
Christoph Lameter55136592010-08-20 12:37:13 -05003205#endif
3206
Christoph Lameter81819f02007-05-06 14:49:36 -07003207static int __init setup_slub_min_order(char *str)
3208{
Pekka Enberg06428782008-01-07 23:20:27 -08003209 get_option(&str, &slub_min_order);
Christoph Lameter81819f02007-05-06 14:49:36 -07003210
3211 return 1;
3212}
3213
3214__setup("slub_min_order=", setup_slub_min_order);
3215
3216static int __init setup_slub_max_order(char *str)
3217{
Pekka Enberg06428782008-01-07 23:20:27 -08003218 get_option(&str, &slub_max_order);
David Rientjes818cf592009-04-23 09:58:22 +03003219 slub_max_order = min(slub_max_order, MAX_ORDER - 1);
Christoph Lameter81819f02007-05-06 14:49:36 -07003220
3221 return 1;
3222}
3223
3224__setup("slub_max_order=", setup_slub_max_order);
3225
3226static int __init setup_slub_min_objects(char *str)
3227{
Pekka Enberg06428782008-01-07 23:20:27 -08003228 get_option(&str, &slub_min_objects);
Christoph Lameter81819f02007-05-06 14:49:36 -07003229
3230 return 1;
3231}
3232
3233__setup("slub_min_objects=", setup_slub_min_objects);
3234
3235static int __init setup_slub_nomerge(char *str)
3236{
3237 slub_nomerge = 1;
3238 return 1;
3239}
3240
3241__setup("slub_nomerge", setup_slub_nomerge);
3242
Christoph Lameter51df1142010-08-20 12:37:15 -05003243static struct kmem_cache *__init create_kmalloc_cache(const char *name,
3244 int size, unsigned int flags)
Christoph Lameter81819f02007-05-06 14:49:36 -07003245{
Christoph Lameter51df1142010-08-20 12:37:15 -05003246 struct kmem_cache *s;
3247
3248 s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
3249
Pekka Enberg83b519e2009-06-10 19:40:04 +03003250 /*
3251 * This function is called with IRQs disabled during early-boot on
Christoph Lameter18004c52012-07-06 15:25:12 -05003252 * single CPU so there's no need to take slab_mutex here.
Pekka Enberg83b519e2009-06-10 19:40:04 +03003253 */
Christoph Lameter55136592010-08-20 12:37:13 -05003254 if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN,
Christoph Lameter319d1e22008-04-14 19:11:41 +03003255 flags, NULL))
Christoph Lameter81819f02007-05-06 14:49:36 -07003256 goto panic;
3257
3258 list_add(&s->list, &slab_caches);
Christoph Lameter51df1142010-08-20 12:37:15 -05003259 return s;
Christoph Lameter81819f02007-05-06 14:49:36 -07003260
3261panic:
3262 panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
Christoph Lameter51df1142010-08-20 12:37:15 -05003263 return NULL;
Christoph Lameter81819f02007-05-06 14:49:36 -07003264}
3265
Christoph Lameterf1b26332007-07-17 04:03:26 -07003266/*
3267 * Conversion table for small slabs sizes / 8 to the index in the
3268 * kmalloc array. This is necessary for slabs < 192 since we have non power
3269 * of two cache sizes there. The size of larger slabs can be determined using
3270 * fls.
3271 */
3272static s8 size_index[24] = {
3273 3, /* 8 */
3274 4, /* 16 */
3275 5, /* 24 */
3276 5, /* 32 */
3277 6, /* 40 */
3278 6, /* 48 */
3279 6, /* 56 */
3280 6, /* 64 */
3281 1, /* 72 */
3282 1, /* 80 */
3283 1, /* 88 */
3284 1, /* 96 */
3285 7, /* 104 */
3286 7, /* 112 */
3287 7, /* 120 */
3288 7, /* 128 */
3289 2, /* 136 */
3290 2, /* 144 */
3291 2, /* 152 */
3292 2, /* 160 */
3293 2, /* 168 */
3294 2, /* 176 */
3295 2, /* 184 */
3296 2 /* 192 */
3297};
3298
Aaro Koskinenacdfcd02009-08-28 14:28:54 +03003299static inline int size_index_elem(size_t bytes)
3300{
3301 return (bytes - 1) / 8;
3302}
3303
Christoph Lameter81819f02007-05-06 14:49:36 -07003304static struct kmem_cache *get_slab(size_t size, gfp_t flags)
3305{
Christoph Lameterf1b26332007-07-17 04:03:26 -07003306 int index;
Christoph Lameter81819f02007-05-06 14:49:36 -07003307
Christoph Lameterf1b26332007-07-17 04:03:26 -07003308 if (size <= 192) {
3309 if (!size)
3310 return ZERO_SIZE_PTR;
Christoph Lameter81819f02007-05-06 14:49:36 -07003311
Aaro Koskinenacdfcd02009-08-28 14:28:54 +03003312 index = size_index[size_index_elem(size)];
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003313 } else
Christoph Lameterf1b26332007-07-17 04:03:26 -07003314 index = fls(size - 1);
Christoph Lameter81819f02007-05-06 14:49:36 -07003315
3316#ifdef CONFIG_ZONE_DMA
Christoph Lameterf1b26332007-07-17 04:03:26 -07003317 if (unlikely((flags & SLUB_DMA)))
Christoph Lameter51df1142010-08-20 12:37:15 -05003318 return kmalloc_dma_caches[index];
Christoph Lameterf1b26332007-07-17 04:03:26 -07003319
Christoph Lameter81819f02007-05-06 14:49:36 -07003320#endif
Christoph Lameter51df1142010-08-20 12:37:15 -05003321 return kmalloc_caches[index];
Christoph Lameter81819f02007-05-06 14:49:36 -07003322}
3323
3324void *__kmalloc(size_t size, gfp_t flags)
3325{
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003326 struct kmem_cache *s;
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003327 void *ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07003328
Christoph Lameterffadd4d2009-02-17 12:05:07 -05003329 if (unlikely(size > SLUB_MAX_SIZE))
Pekka Enbergeada35e2008-02-11 22:47:46 +02003330 return kmalloc_large(size, flags);
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003331
3332 s = get_slab(size, flags);
3333
3334 if (unlikely(ZERO_OR_NULL_PTR(s)))
Christoph Lameter6cb8f912007-07-17 04:03:22 -07003335 return s;
3336
Christoph Lameter2154a332010-07-09 14:07:10 -05003337 ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003338
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02003339 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003340
3341 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07003342}
3343EXPORT_SYMBOL(__kmalloc);
3344
Namhyung Kim5d1f57e2010-09-29 21:02:15 +09003345#ifdef CONFIG_NUMA
Christoph Lameterf619cfe2008-03-01 13:56:40 -08003346static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3347{
Vegard Nossumb1eeab62008-11-25 16:55:53 +01003348 struct page *page;
Catalin Marinase4f7c0b42009-07-07 10:32:59 +01003349 void *ptr = NULL;
Christoph Lameterf619cfe2008-03-01 13:56:40 -08003350
Vegard Nossumb1eeab62008-11-25 16:55:53 +01003351 flags |= __GFP_COMP | __GFP_NOTRACK;
3352 page = alloc_pages_node(node, flags, get_order(size));
Christoph Lameterf619cfe2008-03-01 13:56:40 -08003353 if (page)
Catalin Marinase4f7c0b42009-07-07 10:32:59 +01003354 ptr = page_address(page);
3355
3356 kmemleak_alloc(ptr, size, 1, flags);
3357 return ptr;
Christoph Lameterf619cfe2008-03-01 13:56:40 -08003358}
3359
Christoph Lameter81819f02007-05-06 14:49:36 -07003360void *__kmalloc_node(size_t size, gfp_t flags, int node)
3361{
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003362 struct kmem_cache *s;
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003363 void *ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07003364
Ingo Molnar057685c2009-02-20 12:15:30 +01003365 if (unlikely(size > SLUB_MAX_SIZE)) {
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003366 ret = kmalloc_large_node(size, flags, node);
3367
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02003368 trace_kmalloc_node(_RET_IP_, ret,
3369 size, PAGE_SIZE << get_order(size),
3370 flags, node);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003371
3372 return ret;
3373 }
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003374
3375 s = get_slab(size, flags);
3376
3377 if (unlikely(ZERO_OR_NULL_PTR(s)))
Christoph Lameter6cb8f912007-07-17 04:03:22 -07003378 return s;
3379
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003380 ret = slab_alloc(s, flags, node, _RET_IP_);
3381
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02003382 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003383
3384 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07003385}
3386EXPORT_SYMBOL(__kmalloc_node);
3387#endif
3388
3389size_t ksize(const void *object)
3390{
Christoph Lameter272c1d22007-06-08 13:46:49 -07003391 struct page *page;
Christoph Lameter81819f02007-05-06 14:49:36 -07003392
Christoph Lameteref8b4522007-10-16 01:24:46 -07003393 if (unlikely(object == ZERO_SIZE_PTR))
Christoph Lameter272c1d22007-06-08 13:46:49 -07003394 return 0;
3395
Vegard Nossum294a80a2007-12-04 23:45:30 -08003396 page = virt_to_head_page(object);
Vegard Nossum294a80a2007-12-04 23:45:30 -08003397
Pekka Enberg76994412008-05-22 19:22:25 +03003398 if (unlikely(!PageSlab(page))) {
3399 WARN_ON(!PageCompound(page));
Vegard Nossum294a80a2007-12-04 23:45:30 -08003400 return PAGE_SIZE << compound_order(page);
Pekka Enberg76994412008-05-22 19:22:25 +03003401 }
Christoph Lameter81819f02007-05-06 14:49:36 -07003402
Eric Dumazetb3d41882011-02-14 18:35:22 +01003403 return slab_ksize(page->slab);
Christoph Lameter81819f02007-05-06 14:49:36 -07003404}
Kirill A. Shutemovb1aabec2009-02-10 15:21:44 +02003405EXPORT_SYMBOL(ksize);
Christoph Lameter81819f02007-05-06 14:49:36 -07003406
Ben Greeard18a90d2011-07-07 11:36:37 -07003407#ifdef CONFIG_SLUB_DEBUG
3408bool verify_mem_not_deleted(const void *x)
3409{
3410 struct page *page;
3411 void *object = (void *)x;
3412 unsigned long flags;
3413 bool rv;
3414
3415 if (unlikely(ZERO_OR_NULL_PTR(x)))
3416 return false;
3417
3418 local_irq_save(flags);
3419
3420 page = virt_to_head_page(x);
3421 if (unlikely(!PageSlab(page))) {
3422 /* maybe it was from stack? */
3423 rv = true;
3424 goto out_unlock;
3425 }
3426
3427 slab_lock(page);
3428 if (on_freelist(page->slab, page, object)) {
3429 object_err(page->slab, page, object, "Object is on free-list");
3430 rv = false;
3431 } else {
3432 rv = true;
3433 }
3434 slab_unlock(page);
3435
3436out_unlock:
3437 local_irq_restore(flags);
3438 return rv;
3439}
3440EXPORT_SYMBOL(verify_mem_not_deleted);
3441#endif
3442
Christoph Lameter81819f02007-05-06 14:49:36 -07003443void kfree(const void *x)
3444{
Christoph Lameter81819f02007-05-06 14:49:36 -07003445 struct page *page;
Christoph Lameter5bb983b2008-02-07 17:47:41 -08003446 void *object = (void *)x;
Christoph Lameter81819f02007-05-06 14:49:36 -07003447
Pekka Enberg2121db72009-03-25 11:05:57 +02003448 trace_kfree(_RET_IP_, x);
3449
Satyam Sharma2408c552007-10-16 01:24:44 -07003450 if (unlikely(ZERO_OR_NULL_PTR(x)))
Christoph Lameter81819f02007-05-06 14:49:36 -07003451 return;
3452
Christoph Lameterb49af682007-05-06 14:49:41 -07003453 page = virt_to_head_page(x);
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003454 if (unlikely(!PageSlab(page))) {
Christoph Lameter09375022008-05-28 10:32:22 -07003455 BUG_ON(!PageCompound(page));
Catalin Marinase4f7c0b42009-07-07 10:32:59 +01003456 kmemleak_free(x);
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003457 put_page(page);
3458 return;
3459 }
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03003460 slab_free(page->slab, page, object, _RET_IP_);
Christoph Lameter81819f02007-05-06 14:49:36 -07003461}
3462EXPORT_SYMBOL(kfree);
3463
Christoph Lameter2086d262007-05-06 14:49:46 -07003464/*
Christoph Lameter672bba32007-05-09 02:32:39 -07003465 * kmem_cache_shrink removes empty slabs from the partial lists and sorts
3466 * the remaining slabs by the number of items in use. The slabs with the
3467 * most items in use come first. New allocations will then fill those up
3468 * and thus they can be removed from the partial lists.
3469 *
3470 * The slabs with the least items are placed last. This results in them
3471 * being allocated from last increasing the chance that the last objects
3472 * are freed in them.
Christoph Lameter2086d262007-05-06 14:49:46 -07003473 */
3474int kmem_cache_shrink(struct kmem_cache *s)
3475{
3476 int node;
3477 int i;
3478 struct kmem_cache_node *n;
3479 struct page *page;
3480 struct page *t;
Christoph Lameter205ab992008-04-14 19:11:40 +03003481 int objects = oo_objects(s->max);
Christoph Lameter2086d262007-05-06 14:49:46 -07003482 struct list_head *slabs_by_inuse =
Christoph Lameter834f3d12008-04-14 19:11:31 +03003483 kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
Christoph Lameter2086d262007-05-06 14:49:46 -07003484 unsigned long flags;
3485
3486 if (!slabs_by_inuse)
3487 return -ENOMEM;
3488
3489 flush_all(s);
Christoph Lameterf64dc582007-10-16 01:25:33 -07003490 for_each_node_state(node, N_NORMAL_MEMORY) {
Christoph Lameter2086d262007-05-06 14:49:46 -07003491 n = get_node(s, node);
3492
3493 if (!n->nr_partial)
3494 continue;
3495
Christoph Lameter834f3d12008-04-14 19:11:31 +03003496 for (i = 0; i < objects; i++)
Christoph Lameter2086d262007-05-06 14:49:46 -07003497 INIT_LIST_HEAD(slabs_by_inuse + i);
3498
3499 spin_lock_irqsave(&n->list_lock, flags);
3500
3501 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07003502 * Build lists indexed by the items in use in each slab.
Christoph Lameter2086d262007-05-06 14:49:46 -07003503 *
Christoph Lameter672bba32007-05-09 02:32:39 -07003504 * Note that concurrent frees may occur while we hold the
3505 * list_lock. page->inuse here is the upper limit.
Christoph Lameter2086d262007-05-06 14:49:46 -07003506 */
3507 list_for_each_entry_safe(page, t, &n->partial, lru) {
Christoph Lameter69cb8e62011-08-09 16:12:22 -05003508 list_move(&page->lru, slabs_by_inuse + page->inuse);
3509 if (!page->inuse)
3510 n->nr_partial--;
Christoph Lameter2086d262007-05-06 14:49:46 -07003511 }
3512
Christoph Lameter2086d262007-05-06 14:49:46 -07003513 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07003514 * Rebuild the partial list with the slabs filled up most
3515 * first and the least used slabs at the end.
Christoph Lameter2086d262007-05-06 14:49:46 -07003516 */
Christoph Lameter69cb8e62011-08-09 16:12:22 -05003517 for (i = objects - 1; i > 0; i--)
Christoph Lameter2086d262007-05-06 14:49:46 -07003518 list_splice(slabs_by_inuse + i, n->partial.prev);
3519
Christoph Lameter2086d262007-05-06 14:49:46 -07003520 spin_unlock_irqrestore(&n->list_lock, flags);
Christoph Lameter69cb8e62011-08-09 16:12:22 -05003521
3522 /* Release empty slabs */
3523 list_for_each_entry_safe(page, t, slabs_by_inuse, lru)
3524 discard_slab(s, page);
Christoph Lameter2086d262007-05-06 14:49:46 -07003525 }
3526
3527 kfree(slabs_by_inuse);
3528 return 0;
3529}
3530EXPORT_SYMBOL(kmem_cache_shrink);
3531
Pekka Enberg92a5bbc2010-10-06 16:58:16 +03003532#if defined(CONFIG_MEMORY_HOTPLUG)
Yasunori Gotob9049e22007-10-21 16:41:37 -07003533static int slab_mem_going_offline_callback(void *arg)
3534{
3535 struct kmem_cache *s;
3536
Christoph Lameter18004c52012-07-06 15:25:12 -05003537 mutex_lock(&slab_mutex);
Yasunori Gotob9049e22007-10-21 16:41:37 -07003538 list_for_each_entry(s, &slab_caches, list)
3539 kmem_cache_shrink(s);
Christoph Lameter18004c52012-07-06 15:25:12 -05003540 mutex_unlock(&slab_mutex);
Yasunori Gotob9049e22007-10-21 16:41:37 -07003541
3542 return 0;
3543}
3544
3545static void slab_mem_offline_callback(void *arg)
3546{
3547 struct kmem_cache_node *n;
3548 struct kmem_cache *s;
3549 struct memory_notify *marg = arg;
3550 int offline_node;
3551
3552 offline_node = marg->status_change_nid;
3553
3554 /*
3555 * If the node still has available memory. we need kmem_cache_node
3556 * for it yet.
3557 */
3558 if (offline_node < 0)
3559 return;
3560
Christoph Lameter18004c52012-07-06 15:25:12 -05003561 mutex_lock(&slab_mutex);
Yasunori Gotob9049e22007-10-21 16:41:37 -07003562 list_for_each_entry(s, &slab_caches, list) {
3563 n = get_node(s, offline_node);
3564 if (n) {
3565 /*
3566 * if n->nr_slabs > 0, slabs still exist on the node
3567 * that is going down. We were unable to free them,
Adam Buchbinderc9404c92009-12-18 15:40:42 -05003568 * and offline_pages() function shouldn't call this
Yasunori Gotob9049e22007-10-21 16:41:37 -07003569 * callback. So, we must fail.
3570 */
Christoph Lameter0f389ec2008-04-14 18:53:02 +03003571 BUG_ON(slabs_node(s, offline_node));
Yasunori Gotob9049e22007-10-21 16:41:37 -07003572
3573 s->node[offline_node] = NULL;
Christoph Lameter8de66a02010-08-25 14:51:14 -05003574 kmem_cache_free(kmem_cache_node, n);
Yasunori Gotob9049e22007-10-21 16:41:37 -07003575 }
3576 }
Christoph Lameter18004c52012-07-06 15:25:12 -05003577 mutex_unlock(&slab_mutex);
Yasunori Gotob9049e22007-10-21 16:41:37 -07003578}
3579
3580static int slab_mem_going_online_callback(void *arg)
3581{
3582 struct kmem_cache_node *n;
3583 struct kmem_cache *s;
3584 struct memory_notify *marg = arg;
3585 int nid = marg->status_change_nid;
3586 int ret = 0;
3587
3588 /*
3589 * If the node's memory is already available, then kmem_cache_node is
3590 * already created. Nothing to do.
3591 */
3592 if (nid < 0)
3593 return 0;
3594
3595 /*
Christoph Lameter0121c6192008-04-29 16:11:12 -07003596 * We are bringing a node online. No memory is available yet. We must
Yasunori Gotob9049e22007-10-21 16:41:37 -07003597 * allocate a kmem_cache_node structure in order to bring the node
3598 * online.
3599 */
Christoph Lameter18004c52012-07-06 15:25:12 -05003600 mutex_lock(&slab_mutex);
Yasunori Gotob9049e22007-10-21 16:41:37 -07003601 list_for_each_entry(s, &slab_caches, list) {
3602 /*
3603 * XXX: kmem_cache_alloc_node will fallback to other nodes
3604 * since memory is not yet available from the node that
3605 * is brought up.
3606 */
Christoph Lameter8de66a02010-08-25 14:51:14 -05003607 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
Yasunori Gotob9049e22007-10-21 16:41:37 -07003608 if (!n) {
3609 ret = -ENOMEM;
3610 goto out;
3611 }
Joonsoo Kim40534972012-05-11 00:50:47 +09003612 init_kmem_cache_node(n);
Yasunori Gotob9049e22007-10-21 16:41:37 -07003613 s->node[nid] = n;
3614 }
3615out:
Christoph Lameter18004c52012-07-06 15:25:12 -05003616 mutex_unlock(&slab_mutex);
Yasunori Gotob9049e22007-10-21 16:41:37 -07003617 return ret;
3618}
3619
3620static int slab_memory_callback(struct notifier_block *self,
3621 unsigned long action, void *arg)
3622{
3623 int ret = 0;
3624
3625 switch (action) {
3626 case MEM_GOING_ONLINE:
3627 ret = slab_mem_going_online_callback(arg);
3628 break;
3629 case MEM_GOING_OFFLINE:
3630 ret = slab_mem_going_offline_callback(arg);
3631 break;
3632 case MEM_OFFLINE:
3633 case MEM_CANCEL_ONLINE:
3634 slab_mem_offline_callback(arg);
3635 break;
3636 case MEM_ONLINE:
3637 case MEM_CANCEL_OFFLINE:
3638 break;
3639 }
KAMEZAWA Hiroyukidc19f9d2008-12-01 13:13:48 -08003640 if (ret)
3641 ret = notifier_from_errno(ret);
3642 else
3643 ret = NOTIFY_OK;
Yasunori Gotob9049e22007-10-21 16:41:37 -07003644 return ret;
3645}
3646
3647#endif /* CONFIG_MEMORY_HOTPLUG */
3648
Christoph Lameter81819f02007-05-06 14:49:36 -07003649/********************************************************************
3650 * Basic setup of slabs
3651 *******************************************************************/
3652
Christoph Lameter51df1142010-08-20 12:37:15 -05003653/*
3654 * Used for early kmem_cache structures that were allocated using
3655 * the page allocator
3656 */
3657
3658static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
3659{
3660 int node;
3661
3662 list_add(&s->list, &slab_caches);
3663 s->refcount = -1;
3664
3665 for_each_node_state(node, N_NORMAL_MEMORY) {
3666 struct kmem_cache_node *n = get_node(s, node);
3667 struct page *p;
3668
3669 if (n) {
3670 list_for_each_entry(p, &n->partial, lru)
3671 p->slab = s;
3672
Li Zefan607bf322011-04-12 15:22:26 +08003673#ifdef CONFIG_SLUB_DEBUG
Christoph Lameter51df1142010-08-20 12:37:15 -05003674 list_for_each_entry(p, &n->full, lru)
3675 p->slab = s;
3676#endif
3677 }
3678 }
3679}
3680
Christoph Lameter81819f02007-05-06 14:49:36 -07003681void __init kmem_cache_init(void)
3682{
3683 int i;
Christoph Lameter4b356be2007-06-16 10:16:13 -07003684 int caches = 0;
Christoph Lameter51df1142010-08-20 12:37:15 -05003685 struct kmem_cache *temp_kmem_cache;
3686 int order;
Christoph Lameter51df1142010-08-20 12:37:15 -05003687 struct kmem_cache *temp_kmem_cache_node;
3688 unsigned long kmalloc_size;
3689
Stanislaw Gruszkafc8d8622012-01-10 15:07:32 -08003690 if (debug_guardpage_minorder())
3691 slub_max_order = 0;
3692
Christoph Lameter51df1142010-08-20 12:37:15 -05003693 kmem_size = offsetof(struct kmem_cache, node) +
3694 nr_node_ids * sizeof(struct kmem_cache_node *);
3695
3696 /* Allocate two kmem_caches from the page allocator */
3697 kmalloc_size = ALIGN(kmem_size, cache_line_size());
3698 order = get_order(2 * kmalloc_size);
3699 kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order);
3700
Christoph Lameter81819f02007-05-06 14:49:36 -07003701 /*
3702 * Must first have the slab cache available for the allocations of the
Christoph Lameter672bba32007-05-09 02:32:39 -07003703 * struct kmem_cache_node's. There is special bootstrap code in
Christoph Lameter81819f02007-05-06 14:49:36 -07003704 * kmem_cache_open for slab_state == DOWN.
3705 */
Christoph Lameter51df1142010-08-20 12:37:15 -05003706 kmem_cache_node = (void *)kmem_cache + kmalloc_size;
3707
3708 kmem_cache_open(kmem_cache_node, "kmem_cache_node",
3709 sizeof(struct kmem_cache_node),
3710 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
Yasunori Gotob9049e22007-10-21 16:41:37 -07003711
Nadia Derbey0c40ba42008-04-29 01:00:41 -07003712 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
Christoph Lameter81819f02007-05-06 14:49:36 -07003713
3714 /* Able to allocate the per node structures */
3715 slab_state = PARTIAL;
3716
Christoph Lameter51df1142010-08-20 12:37:15 -05003717 temp_kmem_cache = kmem_cache;
3718 kmem_cache_open(kmem_cache, "kmem_cache", kmem_size,
3719 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
3720 kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
3721 memcpy(kmem_cache, temp_kmem_cache, kmem_size);
Christoph Lameter81819f02007-05-06 14:49:36 -07003722
Christoph Lameter51df1142010-08-20 12:37:15 -05003723 /*
3724 * Allocate kmem_cache_node properly from the kmem_cache slab.
3725 * kmem_cache_node is separately allocated so no need to
3726 * update any list pointers.
3727 */
3728 temp_kmem_cache_node = kmem_cache_node;
Christoph Lameter81819f02007-05-06 14:49:36 -07003729
Christoph Lameter51df1142010-08-20 12:37:15 -05003730 kmem_cache_node = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
3731 memcpy(kmem_cache_node, temp_kmem_cache_node, kmem_size);
3732
3733 kmem_cache_bootstrap_fixup(kmem_cache_node);
3734
3735 caches++;
Christoph Lameter51df1142010-08-20 12:37:15 -05003736 kmem_cache_bootstrap_fixup(kmem_cache);
3737 caches++;
3738 /* Free temporary boot structure */
3739 free_pages((unsigned long)temp_kmem_cache, order);
3740
3741 /* Now we can use the kmem_cache to allocate kmalloc slabs */
Christoph Lameterf1b26332007-07-17 04:03:26 -07003742
3743 /*
3744 * Patch up the size_index table if we have strange large alignment
3745 * requirements for the kmalloc array. This is only the case for
Christoph Lameter6446faa2008-02-15 23:45:26 -08003746 * MIPS it seems. The standard arches will not generate any code here.
Christoph Lameterf1b26332007-07-17 04:03:26 -07003747 *
3748 * Largest permitted alignment is 256 bytes due to the way we
3749 * handle the index determination for the smaller caches.
3750 *
3751 * Make sure that nothing crazy happens if someone starts tinkering
3752 * around with ARCH_KMALLOC_MINALIGN
3753 */
3754 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
3755 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
3756
Aaro Koskinenacdfcd02009-08-28 14:28:54 +03003757 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
3758 int elem = size_index_elem(i);
3759 if (elem >= ARRAY_SIZE(size_index))
3760 break;
3761 size_index[elem] = KMALLOC_SHIFT_LOW;
3762 }
Christoph Lameterf1b26332007-07-17 04:03:26 -07003763
Aaro Koskinenacdfcd02009-08-28 14:28:54 +03003764 if (KMALLOC_MIN_SIZE == 64) {
3765 /*
3766 * The 96 byte size cache is not used if the alignment
3767 * is 64 byte.
3768 */
3769 for (i = 64 + 8; i <= 96; i += 8)
3770 size_index[size_index_elem(i)] = 7;
3771 } else if (KMALLOC_MIN_SIZE == 128) {
Christoph Lameter41d54d32008-07-03 09:14:26 -05003772 /*
3773 * The 192 byte sized cache is not used if the alignment
3774 * is 128 byte. Redirect kmalloc to use the 256 byte cache
3775 * instead.
3776 */
3777 for (i = 128 + 8; i <= 192; i += 8)
Aaro Koskinenacdfcd02009-08-28 14:28:54 +03003778 size_index[size_index_elem(i)] = 8;
Christoph Lameter41d54d32008-07-03 09:14:26 -05003779 }
3780
Christoph Lameter51df1142010-08-20 12:37:15 -05003781 /* Caches that are not of the two-to-the-power-of size */
3782 if (KMALLOC_MIN_SIZE <= 32) {
3783 kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
3784 caches++;
3785 }
3786
3787 if (KMALLOC_MIN_SIZE <= 64) {
3788 kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
3789 caches++;
3790 }
3791
3792 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
3793 kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
3794 caches++;
3795 }
3796
Christoph Lameter81819f02007-05-06 14:49:36 -07003797 slab_state = UP;
3798
3799 /* Provide the correct kmalloc names now that the caches are up */
Pekka Enberg84c1cf62010-09-14 23:21:12 +03003800 if (KMALLOC_MIN_SIZE <= 32) {
3801 kmalloc_caches[1]->name = kstrdup(kmalloc_caches[1]->name, GFP_NOWAIT);
3802 BUG_ON(!kmalloc_caches[1]->name);
3803 }
3804
3805 if (KMALLOC_MIN_SIZE <= 64) {
3806 kmalloc_caches[2]->name = kstrdup(kmalloc_caches[2]->name, GFP_NOWAIT);
3807 BUG_ON(!kmalloc_caches[2]->name);
3808 }
3809
Christoph Lameterd7278bd2010-07-09 14:07:12 -05003810 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
3811 char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
3812
3813 BUG_ON(!s);
Christoph Lameter51df1142010-08-20 12:37:15 -05003814 kmalloc_caches[i]->name = s;
Christoph Lameterd7278bd2010-07-09 14:07:12 -05003815 }
Christoph Lameter81819f02007-05-06 14:49:36 -07003816
3817#ifdef CONFIG_SMP
3818 register_cpu_notifier(&slab_notifier);
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06003819#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07003820
Christoph Lameter55136592010-08-20 12:37:13 -05003821#ifdef CONFIG_ZONE_DMA
Christoph Lameter51df1142010-08-20 12:37:15 -05003822 for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
3823 struct kmem_cache *s = kmalloc_caches[i];
Christoph Lameter55136592010-08-20 12:37:13 -05003824
Christoph Lameter51df1142010-08-20 12:37:15 -05003825 if (s && s->size) {
Christoph Lameter55136592010-08-20 12:37:13 -05003826 char *name = kasprintf(GFP_NOWAIT,
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003827 "dma-kmalloc-%d", s->object_size);
Christoph Lameter55136592010-08-20 12:37:13 -05003828
3829 BUG_ON(!name);
Christoph Lameter51df1142010-08-20 12:37:15 -05003830 kmalloc_dma_caches[i] = create_kmalloc_cache(name,
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003831 s->object_size, SLAB_CACHE_DMA);
Christoph Lameter55136592010-08-20 12:37:13 -05003832 }
3833 }
3834#endif
Ingo Molnar3adbefe2008-02-05 17:57:39 -08003835 printk(KERN_INFO
3836 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
Christoph Lameter4b356be2007-06-16 10:16:13 -07003837 " CPUs=%d, Nodes=%d\n",
3838 caches, cache_line_size(),
Christoph Lameter81819f02007-05-06 14:49:36 -07003839 slub_min_order, slub_max_order, slub_min_objects,
3840 nr_cpu_ids, nr_node_ids);
3841}
3842
Pekka Enberg7e85ee02009-06-12 14:03:06 +03003843void __init kmem_cache_init_late(void)
3844{
Pekka Enberg7e85ee02009-06-12 14:03:06 +03003845}
3846
Christoph Lameter81819f02007-05-06 14:49:36 -07003847/*
3848 * Find a mergeable slab cache
3849 */
3850static int slab_unmergeable(struct kmem_cache *s)
3851{
3852 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
3853 return 1;
3854
Christoph Lameterc59def92007-05-16 22:10:50 -07003855 if (s->ctor)
Christoph Lameter81819f02007-05-06 14:49:36 -07003856 return 1;
3857
Christoph Lameter8ffa6872007-05-31 00:40:51 -07003858 /*
3859 * We may have set a slab to be unmergeable during bootstrap.
3860 */
3861 if (s->refcount < 0)
3862 return 1;
3863
Christoph Lameter81819f02007-05-06 14:49:36 -07003864 return 0;
3865}
3866
3867static struct kmem_cache *find_mergeable(size_t size,
Christoph Lameterba0268a2007-09-11 15:24:11 -07003868 size_t align, unsigned long flags, const char *name,
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07003869 void (*ctor)(void *))
Christoph Lameter81819f02007-05-06 14:49:36 -07003870{
Christoph Lameter5b95a4ac2007-07-17 04:03:19 -07003871 struct kmem_cache *s;
Christoph Lameter81819f02007-05-06 14:49:36 -07003872
3873 if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
3874 return NULL;
3875
Christoph Lameterc59def92007-05-16 22:10:50 -07003876 if (ctor)
Christoph Lameter81819f02007-05-06 14:49:36 -07003877 return NULL;
3878
3879 size = ALIGN(size, sizeof(void *));
3880 align = calculate_alignment(flags, align, size);
3881 size = ALIGN(size, align);
Christoph Lameterba0268a2007-09-11 15:24:11 -07003882 flags = kmem_cache_flags(size, flags, name, NULL);
Christoph Lameter81819f02007-05-06 14:49:36 -07003883
Christoph Lameter5b95a4ac2007-07-17 04:03:19 -07003884 list_for_each_entry(s, &slab_caches, list) {
Christoph Lameter81819f02007-05-06 14:49:36 -07003885 if (slab_unmergeable(s))
3886 continue;
3887
3888 if (size > s->size)
3889 continue;
3890
Christoph Lameterba0268a2007-09-11 15:24:11 -07003891 if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
Christoph Lameter81819f02007-05-06 14:49:36 -07003892 continue;
3893 /*
3894 * Check if alignment is compatible.
3895 * Courtesy of Adrian Drzewiecki
3896 */
Pekka Enberg06428782008-01-07 23:20:27 -08003897 if ((s->size & ~(align - 1)) != s->size)
Christoph Lameter81819f02007-05-06 14:49:36 -07003898 continue;
3899
3900 if (s->size - size >= sizeof(void *))
3901 continue;
3902
3903 return s;
3904 }
3905 return NULL;
3906}
3907
Christoph Lameter039363f2012-07-06 15:25:10 -05003908struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07003909 size_t align, unsigned long flags, void (*ctor)(void *))
Christoph Lameter81819f02007-05-06 14:49:36 -07003910{
3911 struct kmem_cache *s;
Pekka Enberg84c1cf62010-09-14 23:21:12 +03003912 char *n;
Christoph Lameter81819f02007-05-06 14:49:36 -07003913
Christoph Lameterba0268a2007-09-11 15:24:11 -07003914 s = find_mergeable(size, align, flags, name, ctor);
Christoph Lameter81819f02007-05-06 14:49:36 -07003915 if (s) {
3916 s->refcount++;
3917 /*
3918 * Adjust the object sizes so that we clear
3919 * the complete object on kzalloc.
3920 */
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003921 s->object_size = max(s->object_size, (int)size);
Christoph Lameter81819f02007-05-06 14:49:36 -07003922 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
Christoph Lameter6446faa2008-02-15 23:45:26 -08003923
David Rientjes7b8f3b62008-12-17 22:09:46 -08003924 if (sysfs_slab_alias(s, name)) {
David Rientjes7b8f3b62008-12-17 22:09:46 -08003925 s->refcount--;
Christoph Lameter20cea962012-07-06 15:25:13 -05003926 return NULL;
David Rientjes7b8f3b62008-12-17 22:09:46 -08003927 }
Christoph Lametera0e1d1b2007-07-17 04:03:31 -07003928 return s;
3929 }
Christoph Lameter6446faa2008-02-15 23:45:26 -08003930
Pekka Enberg84c1cf62010-09-14 23:21:12 +03003931 n = kstrdup(name, GFP_KERNEL);
3932 if (!n)
Christoph Lameter20cea962012-07-06 15:25:13 -05003933 return NULL;
Pekka Enberg84c1cf62010-09-14 23:21:12 +03003934
Christoph Lametera0e1d1b2007-07-17 04:03:31 -07003935 s = kmalloc(kmem_size, GFP_KERNEL);
3936 if (s) {
Pekka Enberg84c1cf62010-09-14 23:21:12 +03003937 if (kmem_cache_open(s, n,
Christoph Lameterc59def92007-05-16 22:10:50 -07003938 size, align, flags, ctor)) {
Christoph Lameter20cea962012-07-06 15:25:13 -05003939 int r;
3940
Christoph Lameter81819f02007-05-06 14:49:36 -07003941 list_add(&s->list, &slab_caches);
Christoph Lameter18004c52012-07-06 15:25:12 -05003942 mutex_unlock(&slab_mutex);
Christoph Lameter20cea962012-07-06 15:25:13 -05003943 r = sysfs_slab_add(s);
3944 mutex_lock(&slab_mutex);
3945
3946 if (!r)
3947 return s;
3948
3949 list_del(&s->list);
3950 kmem_cache_close(s);
Christoph Lametera0e1d1b2007-07-17 04:03:31 -07003951 }
3952 kfree(s);
Christoph Lameter81819f02007-05-06 14:49:36 -07003953 }
Joonsoo Kim601d39d2012-05-11 00:32:59 +09003954 kfree(n);
Christoph Lameter20cea962012-07-06 15:25:13 -05003955 return NULL;
Christoph Lameter81819f02007-05-06 14:49:36 -07003956}
Christoph Lameter81819f02007-05-06 14:49:36 -07003957
Christoph Lameter81819f02007-05-06 14:49:36 -07003958#ifdef CONFIG_SMP
Christoph Lameter27390bc2007-06-01 00:47:09 -07003959/*
Christoph Lameter672bba32007-05-09 02:32:39 -07003960 * Use the cpu notifier to insure that the cpu slabs are flushed when
3961 * necessary.
Christoph Lameter81819f02007-05-06 14:49:36 -07003962 */
3963static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
3964 unsigned long action, void *hcpu)
3965{
3966 long cpu = (long)hcpu;
Christoph Lameter5b95a4ac2007-07-17 04:03:19 -07003967 struct kmem_cache *s;
3968 unsigned long flags;
Christoph Lameter81819f02007-05-06 14:49:36 -07003969
3970 switch (action) {
3971 case CPU_UP_CANCELED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07003972 case CPU_UP_CANCELED_FROZEN:
Christoph Lameter81819f02007-05-06 14:49:36 -07003973 case CPU_DEAD:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07003974 case CPU_DEAD_FROZEN:
Christoph Lameter18004c52012-07-06 15:25:12 -05003975 mutex_lock(&slab_mutex);
Christoph Lameter5b95a4ac2007-07-17 04:03:19 -07003976 list_for_each_entry(s, &slab_caches, list) {
3977 local_irq_save(flags);
3978 __flush_cpu_slab(s, cpu);
3979 local_irq_restore(flags);
3980 }
Christoph Lameter18004c52012-07-06 15:25:12 -05003981 mutex_unlock(&slab_mutex);
Christoph Lameter81819f02007-05-06 14:49:36 -07003982 break;
3983 default:
3984 break;
3985 }
3986 return NOTIFY_OK;
3987}
3988
Pekka Enberg06428782008-01-07 23:20:27 -08003989static struct notifier_block __cpuinitdata slab_notifier = {
Ingo Molnar3adbefe2008-02-05 17:57:39 -08003990 .notifier_call = slab_cpuup_callback
Pekka Enberg06428782008-01-07 23:20:27 -08003991};
Christoph Lameter81819f02007-05-06 14:49:36 -07003992
3993#endif
3994
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03003995void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
Christoph Lameter81819f02007-05-06 14:49:36 -07003996{
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003997 struct kmem_cache *s;
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03003998 void *ret;
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003999
Christoph Lameterffadd4d2009-02-17 12:05:07 -05004000 if (unlikely(size > SLUB_MAX_SIZE))
Pekka Enbergeada35e2008-02-11 22:47:46 +02004001 return kmalloc_large(size, gfpflags);
4002
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07004003 s = get_slab(size, gfpflags);
Christoph Lameter81819f02007-05-06 14:49:36 -07004004
Satyam Sharma2408c552007-10-16 01:24:44 -07004005 if (unlikely(ZERO_OR_NULL_PTR(s)))
Christoph Lameter6cb8f912007-07-17 04:03:22 -07004006 return s;
Christoph Lameter81819f02007-05-06 14:49:36 -07004007
Christoph Lameter2154a332010-07-09 14:07:10 -05004008 ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller);
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03004009
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004010 /* Honor the call site pointer we received. */
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02004011 trace_kmalloc(caller, ret, size, s->size, gfpflags);
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03004012
4013 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07004014}
4015
Namhyung Kim5d1f57e2010-09-29 21:02:15 +09004016#ifdef CONFIG_NUMA
Christoph Lameter81819f02007-05-06 14:49:36 -07004017void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03004018 int node, unsigned long caller)
Christoph Lameter81819f02007-05-06 14:49:36 -07004019{
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07004020 struct kmem_cache *s;
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03004021 void *ret;
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07004022
Xiaotian Fengd3e14aa2010-04-08 17:26:44 +08004023 if (unlikely(size > SLUB_MAX_SIZE)) {
4024 ret = kmalloc_large_node(size, gfpflags, node);
4025
4026 trace_kmalloc_node(caller, ret,
4027 size, PAGE_SIZE << get_order(size),
4028 gfpflags, node);
4029
4030 return ret;
4031 }
Pekka Enbergeada35e2008-02-11 22:47:46 +02004032
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07004033 s = get_slab(size, gfpflags);
Christoph Lameter81819f02007-05-06 14:49:36 -07004034
Satyam Sharma2408c552007-10-16 01:24:44 -07004035 if (unlikely(ZERO_OR_NULL_PTR(s)))
Christoph Lameter6cb8f912007-07-17 04:03:22 -07004036 return s;
Christoph Lameter81819f02007-05-06 14:49:36 -07004037
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03004038 ret = slab_alloc(s, gfpflags, node, caller);
4039
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004040 /* Honor the call site pointer we received. */
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02004041 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03004042
4043 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07004044}
Namhyung Kim5d1f57e2010-09-29 21:02:15 +09004045#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07004046
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004047#ifdef CONFIG_SYSFS
Christoph Lameter205ab992008-04-14 19:11:40 +03004048static int count_inuse(struct page *page)
4049{
4050 return page->inuse;
4051}
4052
4053static int count_total(struct page *page)
4054{
4055 return page->objects;
4056}
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004057#endif
Christoph Lameter205ab992008-04-14 19:11:40 +03004058
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004059#ifdef CONFIG_SLUB_DEBUG
Christoph Lameter434e2452007-07-17 04:03:30 -07004060static int validate_slab(struct kmem_cache *s, struct page *page,
4061 unsigned long *map)
Christoph Lameter53e15af2007-05-06 14:49:43 -07004062{
4063 void *p;
Christoph Lametera973e9d2008-03-01 13:40:44 -08004064 void *addr = page_address(page);
Christoph Lameter53e15af2007-05-06 14:49:43 -07004065
4066 if (!check_slab(s, page) ||
4067 !on_freelist(s, page, NULL))
4068 return 0;
4069
4070 /* Now we know that a valid freelist exists */
Christoph Lameter39b26462008-04-14 19:11:30 +03004071 bitmap_zero(map, page->objects);
Christoph Lameter53e15af2007-05-06 14:49:43 -07004072
Christoph Lameter5f80b132011-04-15 14:48:13 -05004073 get_map(s, page, map);
4074 for_each_object(p, s, addr, page->objects) {
4075 if (test_bit(slab_index(p, s, addr), map))
4076 if (!check_object(s, page, p, SLUB_RED_INACTIVE))
4077 return 0;
Christoph Lameter53e15af2007-05-06 14:49:43 -07004078 }
4079
Christoph Lameter224a88b2008-04-14 19:11:31 +03004080 for_each_object(p, s, addr, page->objects)
Christoph Lameter7656c722007-05-09 02:32:40 -07004081 if (!test_bit(slab_index(p, s, addr), map))
Tero Roponen37d57442010-12-01 20:04:20 +02004082 if (!check_object(s, page, p, SLUB_RED_ACTIVE))
Christoph Lameter53e15af2007-05-06 14:49:43 -07004083 return 0;
4084 return 1;
4085}
4086
Christoph Lameter434e2452007-07-17 04:03:30 -07004087static void validate_slab_slab(struct kmem_cache *s, struct page *page,
4088 unsigned long *map)
Christoph Lameter53e15af2007-05-06 14:49:43 -07004089{
Christoph Lameter881db7f2011-06-01 12:25:53 -05004090 slab_lock(page);
4091 validate_slab(s, page, map);
4092 slab_unlock(page);
Christoph Lameter53e15af2007-05-06 14:49:43 -07004093}
4094
Christoph Lameter434e2452007-07-17 04:03:30 -07004095static int validate_slab_node(struct kmem_cache *s,
4096 struct kmem_cache_node *n, unsigned long *map)
Christoph Lameter53e15af2007-05-06 14:49:43 -07004097{
4098 unsigned long count = 0;
4099 struct page *page;
4100 unsigned long flags;
4101
4102 spin_lock_irqsave(&n->list_lock, flags);
4103
4104 list_for_each_entry(page, &n->partial, lru) {
Christoph Lameter434e2452007-07-17 04:03:30 -07004105 validate_slab_slab(s, page, map);
Christoph Lameter53e15af2007-05-06 14:49:43 -07004106 count++;
4107 }
4108 if (count != n->nr_partial)
4109 printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
4110 "counter=%ld\n", s->name, count, n->nr_partial);
4111
4112 if (!(s->flags & SLAB_STORE_USER))
4113 goto out;
4114
4115 list_for_each_entry(page, &n->full, lru) {
Christoph Lameter434e2452007-07-17 04:03:30 -07004116 validate_slab_slab(s, page, map);
Christoph Lameter53e15af2007-05-06 14:49:43 -07004117 count++;
4118 }
4119 if (count != atomic_long_read(&n->nr_slabs))
4120 printk(KERN_ERR "SLUB: %s %ld slabs counted but "
4121 "counter=%ld\n", s->name, count,
4122 atomic_long_read(&n->nr_slabs));
4123
4124out:
4125 spin_unlock_irqrestore(&n->list_lock, flags);
4126 return count;
4127}
4128
Christoph Lameter434e2452007-07-17 04:03:30 -07004129static long validate_slab_cache(struct kmem_cache *s)
Christoph Lameter53e15af2007-05-06 14:49:43 -07004130{
4131 int node;
4132 unsigned long count = 0;
Christoph Lameter205ab992008-04-14 19:11:40 +03004133 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
Christoph Lameter434e2452007-07-17 04:03:30 -07004134 sizeof(unsigned long), GFP_KERNEL);
4135
4136 if (!map)
4137 return -ENOMEM;
Christoph Lameter53e15af2007-05-06 14:49:43 -07004138
4139 flush_all(s);
Christoph Lameterf64dc582007-10-16 01:25:33 -07004140 for_each_node_state(node, N_NORMAL_MEMORY) {
Christoph Lameter53e15af2007-05-06 14:49:43 -07004141 struct kmem_cache_node *n = get_node(s, node);
4142
Christoph Lameter434e2452007-07-17 04:03:30 -07004143 count += validate_slab_node(s, n, map);
Christoph Lameter53e15af2007-05-06 14:49:43 -07004144 }
Christoph Lameter434e2452007-07-17 04:03:30 -07004145 kfree(map);
Christoph Lameter53e15af2007-05-06 14:49:43 -07004146 return count;
4147}
Christoph Lameter88a420e2007-05-06 14:49:45 -07004148/*
Christoph Lameter672bba32007-05-09 02:32:39 -07004149 * Generate lists of code addresses where slabcache objects are allocated
Christoph Lameter88a420e2007-05-06 14:49:45 -07004150 * and freed.
4151 */
4152
4153struct location {
4154 unsigned long count;
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03004155 unsigned long addr;
Christoph Lameter45edfa52007-05-09 02:32:45 -07004156 long long sum_time;
4157 long min_time;
4158 long max_time;
4159 long min_pid;
4160 long max_pid;
Rusty Russell174596a2009-01-01 10:12:29 +10304161 DECLARE_BITMAP(cpus, NR_CPUS);
Christoph Lameter45edfa52007-05-09 02:32:45 -07004162 nodemask_t nodes;
Christoph Lameter88a420e2007-05-06 14:49:45 -07004163};
4164
4165struct loc_track {
4166 unsigned long max;
4167 unsigned long count;
4168 struct location *loc;
4169};
4170
4171static void free_loc_track(struct loc_track *t)
4172{
4173 if (t->max)
4174 free_pages((unsigned long)t->loc,
4175 get_order(sizeof(struct location) * t->max));
4176}
4177
Christoph Lameter68dff6a2007-07-17 04:03:20 -07004178static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
Christoph Lameter88a420e2007-05-06 14:49:45 -07004179{
4180 struct location *l;
4181 int order;
4182
Christoph Lameter88a420e2007-05-06 14:49:45 -07004183 order = get_order(sizeof(struct location) * max);
4184
Christoph Lameter68dff6a2007-07-17 04:03:20 -07004185 l = (void *)__get_free_pages(flags, order);
Christoph Lameter88a420e2007-05-06 14:49:45 -07004186 if (!l)
4187 return 0;
4188
4189 if (t->count) {
4190 memcpy(l, t->loc, sizeof(struct location) * t->count);
4191 free_loc_track(t);
4192 }
4193 t->max = max;
4194 t->loc = l;
4195 return 1;
4196}
4197
4198static int add_location(struct loc_track *t, struct kmem_cache *s,
Christoph Lameter45edfa52007-05-09 02:32:45 -07004199 const struct track *track)
Christoph Lameter88a420e2007-05-06 14:49:45 -07004200{
4201 long start, end, pos;
4202 struct location *l;
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03004203 unsigned long caddr;
Christoph Lameter45edfa52007-05-09 02:32:45 -07004204 unsigned long age = jiffies - track->when;
Christoph Lameter88a420e2007-05-06 14:49:45 -07004205
4206 start = -1;
4207 end = t->count;
4208
4209 for ( ; ; ) {
4210 pos = start + (end - start + 1) / 2;
4211
4212 /*
4213 * There is nothing at "end". If we end up there
4214 * we need to add something to before end.
4215 */
4216 if (pos == end)
4217 break;
4218
4219 caddr = t->loc[pos].addr;
Christoph Lameter45edfa52007-05-09 02:32:45 -07004220 if (track->addr == caddr) {
4221
4222 l = &t->loc[pos];
4223 l->count++;
4224 if (track->when) {
4225 l->sum_time += age;
4226 if (age < l->min_time)
4227 l->min_time = age;
4228 if (age > l->max_time)
4229 l->max_time = age;
4230
4231 if (track->pid < l->min_pid)
4232 l->min_pid = track->pid;
4233 if (track->pid > l->max_pid)
4234 l->max_pid = track->pid;
4235
Rusty Russell174596a2009-01-01 10:12:29 +10304236 cpumask_set_cpu(track->cpu,
4237 to_cpumask(l->cpus));
Christoph Lameter45edfa52007-05-09 02:32:45 -07004238 }
4239 node_set(page_to_nid(virt_to_page(track)), l->nodes);
Christoph Lameter88a420e2007-05-06 14:49:45 -07004240 return 1;
4241 }
4242
Christoph Lameter45edfa52007-05-09 02:32:45 -07004243 if (track->addr < caddr)
Christoph Lameter88a420e2007-05-06 14:49:45 -07004244 end = pos;
4245 else
4246 start = pos;
4247 }
4248
4249 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07004250 * Not found. Insert new tracking element.
Christoph Lameter88a420e2007-05-06 14:49:45 -07004251 */
Christoph Lameter68dff6a2007-07-17 04:03:20 -07004252 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
Christoph Lameter88a420e2007-05-06 14:49:45 -07004253 return 0;
4254
4255 l = t->loc + pos;
4256 if (pos < t->count)
4257 memmove(l + 1, l,
4258 (t->count - pos) * sizeof(struct location));
4259 t->count++;
4260 l->count = 1;
Christoph Lameter45edfa52007-05-09 02:32:45 -07004261 l->addr = track->addr;
4262 l->sum_time = age;
4263 l->min_time = age;
4264 l->max_time = age;
4265 l->min_pid = track->pid;
4266 l->max_pid = track->pid;
Rusty Russell174596a2009-01-01 10:12:29 +10304267 cpumask_clear(to_cpumask(l->cpus));
4268 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
Christoph Lameter45edfa52007-05-09 02:32:45 -07004269 nodes_clear(l->nodes);
4270 node_set(page_to_nid(virt_to_page(track)), l->nodes);
Christoph Lameter88a420e2007-05-06 14:49:45 -07004271 return 1;
4272}
4273
4274static void process_slab(struct loc_track *t, struct kmem_cache *s,
Eric Dumazetbbd7d572010-03-24 22:25:47 +01004275 struct page *page, enum track_item alloc,
Namhyung Kima5dd5c12010-09-29 21:02:13 +09004276 unsigned long *map)
Christoph Lameter88a420e2007-05-06 14:49:45 -07004277{
Christoph Lametera973e9d2008-03-01 13:40:44 -08004278 void *addr = page_address(page);
Christoph Lameter88a420e2007-05-06 14:49:45 -07004279 void *p;
4280
Christoph Lameter39b26462008-04-14 19:11:30 +03004281 bitmap_zero(map, page->objects);
Christoph Lameter5f80b132011-04-15 14:48:13 -05004282 get_map(s, page, map);
Christoph Lameter88a420e2007-05-06 14:49:45 -07004283
Christoph Lameter224a88b2008-04-14 19:11:31 +03004284 for_each_object(p, s, addr, page->objects)
Christoph Lameter45edfa52007-05-09 02:32:45 -07004285 if (!test_bit(slab_index(p, s, addr), map))
4286 add_location(t, s, get_track(s, p, alloc));
Christoph Lameter88a420e2007-05-06 14:49:45 -07004287}
4288
4289static int list_locations(struct kmem_cache *s, char *buf,
4290 enum track_item alloc)
4291{
Harvey Harrisone374d482008-01-31 15:20:50 -08004292 int len = 0;
Christoph Lameter88a420e2007-05-06 14:49:45 -07004293 unsigned long i;
Christoph Lameter68dff6a2007-07-17 04:03:20 -07004294 struct loc_track t = { 0, 0, NULL };
Christoph Lameter88a420e2007-05-06 14:49:45 -07004295 int node;
Eric Dumazetbbd7d572010-03-24 22:25:47 +01004296 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
4297 sizeof(unsigned long), GFP_KERNEL);
Christoph Lameter88a420e2007-05-06 14:49:45 -07004298
Eric Dumazetbbd7d572010-03-24 22:25:47 +01004299 if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
4300 GFP_TEMPORARY)) {
4301 kfree(map);
Christoph Lameter68dff6a2007-07-17 04:03:20 -07004302 return sprintf(buf, "Out of memory\n");
Eric Dumazetbbd7d572010-03-24 22:25:47 +01004303 }
Christoph Lameter88a420e2007-05-06 14:49:45 -07004304 /* Push back cpu slabs */
4305 flush_all(s);
4306
Christoph Lameterf64dc582007-10-16 01:25:33 -07004307 for_each_node_state(node, N_NORMAL_MEMORY) {
Christoph Lameter88a420e2007-05-06 14:49:45 -07004308 struct kmem_cache_node *n = get_node(s, node);
4309 unsigned long flags;
4310 struct page *page;
4311
Christoph Lameter9e869432007-08-22 14:01:56 -07004312 if (!atomic_long_read(&n->nr_slabs))
Christoph Lameter88a420e2007-05-06 14:49:45 -07004313 continue;
4314
4315 spin_lock_irqsave(&n->list_lock, flags);
4316 list_for_each_entry(page, &n->partial, lru)
Eric Dumazetbbd7d572010-03-24 22:25:47 +01004317 process_slab(&t, s, page, alloc, map);
Christoph Lameter88a420e2007-05-06 14:49:45 -07004318 list_for_each_entry(page, &n->full, lru)
Eric Dumazetbbd7d572010-03-24 22:25:47 +01004319 process_slab(&t, s, page, alloc, map);
Christoph Lameter88a420e2007-05-06 14:49:45 -07004320 spin_unlock_irqrestore(&n->list_lock, flags);
4321 }
4322
4323 for (i = 0; i < t.count; i++) {
Christoph Lameter45edfa52007-05-09 02:32:45 -07004324 struct location *l = &t.loc[i];
Christoph Lameter88a420e2007-05-06 14:49:45 -07004325
Hugh Dickins9c246242008-12-09 13:14:27 -08004326 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
Christoph Lameter88a420e2007-05-06 14:49:45 -07004327 break;
Harvey Harrisone374d482008-01-31 15:20:50 -08004328 len += sprintf(buf + len, "%7ld ", l->count);
Christoph Lameter45edfa52007-05-09 02:32:45 -07004329
4330 if (l->addr)
Joe Perches62c70bc2011-01-13 15:45:52 -08004331 len += sprintf(buf + len, "%pS", (void *)l->addr);
Christoph Lameter88a420e2007-05-06 14:49:45 -07004332 else
Harvey Harrisone374d482008-01-31 15:20:50 -08004333 len += sprintf(buf + len, "<not-available>");
Christoph Lameter45edfa52007-05-09 02:32:45 -07004334
4335 if (l->sum_time != l->min_time) {
Harvey Harrisone374d482008-01-31 15:20:50 -08004336 len += sprintf(buf + len, " age=%ld/%ld/%ld",
Roman Zippelf8bd2252008-05-01 04:34:31 -07004337 l->min_time,
4338 (long)div_u64(l->sum_time, l->count),
4339 l->max_time);
Christoph Lameter45edfa52007-05-09 02:32:45 -07004340 } else
Harvey Harrisone374d482008-01-31 15:20:50 -08004341 len += sprintf(buf + len, " age=%ld",
Christoph Lameter45edfa52007-05-09 02:32:45 -07004342 l->min_time);
4343
4344 if (l->min_pid != l->max_pid)
Harvey Harrisone374d482008-01-31 15:20:50 -08004345 len += sprintf(buf + len, " pid=%ld-%ld",
Christoph Lameter45edfa52007-05-09 02:32:45 -07004346 l->min_pid, l->max_pid);
4347 else
Harvey Harrisone374d482008-01-31 15:20:50 -08004348 len += sprintf(buf + len, " pid=%ld",
Christoph Lameter45edfa52007-05-09 02:32:45 -07004349 l->min_pid);
4350
Rusty Russell174596a2009-01-01 10:12:29 +10304351 if (num_online_cpus() > 1 &&
4352 !cpumask_empty(to_cpumask(l->cpus)) &&
Harvey Harrisone374d482008-01-31 15:20:50 -08004353 len < PAGE_SIZE - 60) {
4354 len += sprintf(buf + len, " cpus=");
4355 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
Rusty Russell174596a2009-01-01 10:12:29 +10304356 to_cpumask(l->cpus));
Christoph Lameter45edfa52007-05-09 02:32:45 -07004357 }
4358
Christoph Lameter62bc62a2009-06-16 15:32:15 -07004359 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
Harvey Harrisone374d482008-01-31 15:20:50 -08004360 len < PAGE_SIZE - 60) {
4361 len += sprintf(buf + len, " nodes=");
4362 len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
Christoph Lameter45edfa52007-05-09 02:32:45 -07004363 l->nodes);
4364 }
4365
Harvey Harrisone374d482008-01-31 15:20:50 -08004366 len += sprintf(buf + len, "\n");
Christoph Lameter88a420e2007-05-06 14:49:45 -07004367 }
4368
4369 free_loc_track(&t);
Eric Dumazetbbd7d572010-03-24 22:25:47 +01004370 kfree(map);
Christoph Lameter88a420e2007-05-06 14:49:45 -07004371 if (!t.count)
Harvey Harrisone374d482008-01-31 15:20:50 -08004372 len += sprintf(buf, "No data\n");
4373 return len;
Christoph Lameter88a420e2007-05-06 14:49:45 -07004374}
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004375#endif
Christoph Lameter88a420e2007-05-06 14:49:45 -07004376
Christoph Lametera5a84752010-10-05 13:57:27 -05004377#ifdef SLUB_RESILIENCY_TEST
4378static void resiliency_test(void)
4379{
4380 u8 *p;
4381
4382 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10);
4383
4384 printk(KERN_ERR "SLUB resiliency testing\n");
4385 printk(KERN_ERR "-----------------------\n");
4386 printk(KERN_ERR "A. Corruption after allocation\n");
4387
4388 p = kzalloc(16, GFP_KERNEL);
4389 p[16] = 0x12;
4390 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
4391 " 0x12->0x%p\n\n", p + 16);
4392
4393 validate_slab_cache(kmalloc_caches[4]);
4394
4395 /* Hmmm... The next two are dangerous */
4396 p = kzalloc(32, GFP_KERNEL);
4397 p[32 + sizeof(void *)] = 0x34;
4398 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
4399 " 0x34 -> -0x%p\n", p);
4400 printk(KERN_ERR
4401 "If allocated object is overwritten then not detectable\n\n");
4402
4403 validate_slab_cache(kmalloc_caches[5]);
4404 p = kzalloc(64, GFP_KERNEL);
4405 p += 64 + (get_cycles() & 0xff) * sizeof(void *);
4406 *p = 0x56;
4407 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
4408 p);
4409 printk(KERN_ERR
4410 "If allocated object is overwritten then not detectable\n\n");
4411 validate_slab_cache(kmalloc_caches[6]);
4412
4413 printk(KERN_ERR "\nB. Corruption after free\n");
4414 p = kzalloc(128, GFP_KERNEL);
4415 kfree(p);
4416 *p = 0x78;
4417 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
4418 validate_slab_cache(kmalloc_caches[7]);
4419
4420 p = kzalloc(256, GFP_KERNEL);
4421 kfree(p);
4422 p[50] = 0x9a;
4423 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
4424 p);
4425 validate_slab_cache(kmalloc_caches[8]);
4426
4427 p = kzalloc(512, GFP_KERNEL);
4428 kfree(p);
4429 p[512] = 0xab;
4430 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
4431 validate_slab_cache(kmalloc_caches[9]);
4432}
4433#else
4434#ifdef CONFIG_SYSFS
4435static void resiliency_test(void) {};
4436#endif
4437#endif
4438
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004439#ifdef CONFIG_SYSFS
Christoph Lameter81819f02007-05-06 14:49:36 -07004440enum slab_stat_type {
Christoph Lameter205ab992008-04-14 19:11:40 +03004441 SL_ALL, /* All slabs */
4442 SL_PARTIAL, /* Only partially allocated slabs */
4443 SL_CPU, /* Only slabs used for cpu caches */
4444 SL_OBJECTS, /* Determine allocated objects not slabs */
4445 SL_TOTAL /* Determine object capacity not slabs */
Christoph Lameter81819f02007-05-06 14:49:36 -07004446};
4447
Christoph Lameter205ab992008-04-14 19:11:40 +03004448#define SO_ALL (1 << SL_ALL)
Christoph Lameter81819f02007-05-06 14:49:36 -07004449#define SO_PARTIAL (1 << SL_PARTIAL)
4450#define SO_CPU (1 << SL_CPU)
4451#define SO_OBJECTS (1 << SL_OBJECTS)
Christoph Lameter205ab992008-04-14 19:11:40 +03004452#define SO_TOTAL (1 << SL_TOTAL)
Christoph Lameter81819f02007-05-06 14:49:36 -07004453
Cyrill Gorcunov62e5c4b2008-03-02 23:28:24 +03004454static ssize_t show_slab_objects(struct kmem_cache *s,
4455 char *buf, unsigned long flags)
Christoph Lameter81819f02007-05-06 14:49:36 -07004456{
4457 unsigned long total = 0;
Christoph Lameter81819f02007-05-06 14:49:36 -07004458 int node;
4459 int x;
4460 unsigned long *nodes;
4461 unsigned long *per_cpu;
4462
4463 nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
Cyrill Gorcunov62e5c4b2008-03-02 23:28:24 +03004464 if (!nodes)
4465 return -ENOMEM;
Christoph Lameter81819f02007-05-06 14:49:36 -07004466 per_cpu = nodes + nr_node_ids;
4467
Christoph Lameter205ab992008-04-14 19:11:40 +03004468 if (flags & SO_CPU) {
4469 int cpu;
Christoph Lameter81819f02007-05-06 14:49:36 -07004470
Christoph Lameter205ab992008-04-14 19:11:40 +03004471 for_each_possible_cpu(cpu) {
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06004472 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
Christoph Lameterec3ab082012-05-09 10:09:56 -05004473 int node;
Christoph Lameter49e22582011-08-09 16:12:27 -05004474 struct page *page;
Christoph Lameterdfb4f092007-10-16 01:26:05 -07004475
Eric Dumazetbc6697d2011-11-22 16:02:02 +01004476 page = ACCESS_ONCE(c->page);
Christoph Lameterec3ab082012-05-09 10:09:56 -05004477 if (!page)
4478 continue;
Christoph Lameter205ab992008-04-14 19:11:40 +03004479
Christoph Lameterec3ab082012-05-09 10:09:56 -05004480 node = page_to_nid(page);
4481 if (flags & SO_TOTAL)
4482 x = page->objects;
4483 else if (flags & SO_OBJECTS)
4484 x = page->inuse;
4485 else
4486 x = 1;
Christoph Lameter49e22582011-08-09 16:12:27 -05004487
Christoph Lameterec3ab082012-05-09 10:09:56 -05004488 total += x;
4489 nodes[node] += x;
4490
4491 page = ACCESS_ONCE(c->partial);
Christoph Lameter49e22582011-08-09 16:12:27 -05004492 if (page) {
4493 x = page->pobjects;
Eric Dumazetbc6697d2011-11-22 16:02:02 +01004494 total += x;
4495 nodes[node] += x;
Christoph Lameter49e22582011-08-09 16:12:27 -05004496 }
Christoph Lameterec3ab082012-05-09 10:09:56 -05004497
Eric Dumazetbc6697d2011-11-22 16:02:02 +01004498 per_cpu[node]++;
Christoph Lameter81819f02007-05-06 14:49:36 -07004499 }
4500 }
4501
Christoph Lameter04d94872011-01-10 10:15:15 -06004502 lock_memory_hotplug();
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004503#ifdef CONFIG_SLUB_DEBUG
Christoph Lameter205ab992008-04-14 19:11:40 +03004504 if (flags & SO_ALL) {
4505 for_each_node_state(node, N_NORMAL_MEMORY) {
4506 struct kmem_cache_node *n = get_node(s, node);
Christoph Lameter81819f02007-05-06 14:49:36 -07004507
Christoph Lameter205ab992008-04-14 19:11:40 +03004508 if (flags & SO_TOTAL)
4509 x = atomic_long_read(&n->total_objects);
4510 else if (flags & SO_OBJECTS)
4511 x = atomic_long_read(&n->total_objects) -
4512 count_partial(n, count_free);
4513
4514 else
4515 x = atomic_long_read(&n->nr_slabs);
4516 total += x;
4517 nodes[node] += x;
4518 }
4519
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004520 } else
4521#endif
4522 if (flags & SO_PARTIAL) {
Christoph Lameter205ab992008-04-14 19:11:40 +03004523 for_each_node_state(node, N_NORMAL_MEMORY) {
4524 struct kmem_cache_node *n = get_node(s, node);
4525
4526 if (flags & SO_TOTAL)
4527 x = count_partial(n, count_total);
4528 else if (flags & SO_OBJECTS)
4529 x = count_partial(n, count_inuse);
Christoph Lameter81819f02007-05-06 14:49:36 -07004530 else
4531 x = n->nr_partial;
4532 total += x;
4533 nodes[node] += x;
4534 }
Christoph Lameter81819f02007-05-06 14:49:36 -07004535 }
Christoph Lameter81819f02007-05-06 14:49:36 -07004536 x = sprintf(buf, "%lu", total);
4537#ifdef CONFIG_NUMA
Christoph Lameterf64dc582007-10-16 01:25:33 -07004538 for_each_node_state(node, N_NORMAL_MEMORY)
Christoph Lameter81819f02007-05-06 14:49:36 -07004539 if (nodes[node])
4540 x += sprintf(buf + x, " N%d=%lu",
4541 node, nodes[node]);
4542#endif
Christoph Lameter04d94872011-01-10 10:15:15 -06004543 unlock_memory_hotplug();
Christoph Lameter81819f02007-05-06 14:49:36 -07004544 kfree(nodes);
4545 return x + sprintf(buf + x, "\n");
4546}
4547
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004548#ifdef CONFIG_SLUB_DEBUG
Christoph Lameter81819f02007-05-06 14:49:36 -07004549static int any_slab_objects(struct kmem_cache *s)
4550{
4551 int node;
Christoph Lameterdfb4f092007-10-16 01:26:05 -07004552
4553 for_each_online_node(node) {
Christoph Lameter81819f02007-05-06 14:49:36 -07004554 struct kmem_cache_node *n = get_node(s, node);
4555
Christoph Lameterdfb4f092007-10-16 01:26:05 -07004556 if (!n)
4557 continue;
4558
Benjamin Herrenschmidt4ea33e22008-05-06 20:42:39 -07004559 if (atomic_long_read(&n->total_objects))
Christoph Lameter81819f02007-05-06 14:49:36 -07004560 return 1;
4561 }
4562 return 0;
4563}
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004564#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07004565
4566#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
Phil Carmody497888c2011-07-14 15:07:13 +03004567#define to_slab(n) container_of(n, struct kmem_cache, kobj)
Christoph Lameter81819f02007-05-06 14:49:36 -07004568
4569struct slab_attribute {
4570 struct attribute attr;
4571 ssize_t (*show)(struct kmem_cache *s, char *buf);
4572 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
4573};
4574
4575#define SLAB_ATTR_RO(_name) \
Vasiliy Kulikovab067e92011-09-27 21:54:53 +04004576 static struct slab_attribute _name##_attr = \
4577 __ATTR(_name, 0400, _name##_show, NULL)
Christoph Lameter81819f02007-05-06 14:49:36 -07004578
4579#define SLAB_ATTR(_name) \
4580 static struct slab_attribute _name##_attr = \
Vasiliy Kulikovab067e92011-09-27 21:54:53 +04004581 __ATTR(_name, 0600, _name##_show, _name##_store)
Christoph Lameter81819f02007-05-06 14:49:36 -07004582
Christoph Lameter81819f02007-05-06 14:49:36 -07004583static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
4584{
4585 return sprintf(buf, "%d\n", s->size);
4586}
4587SLAB_ATTR_RO(slab_size);
4588
4589static ssize_t align_show(struct kmem_cache *s, char *buf)
4590{
4591 return sprintf(buf, "%d\n", s->align);
4592}
4593SLAB_ATTR_RO(align);
4594
4595static ssize_t object_size_show(struct kmem_cache *s, char *buf)
4596{
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05004597 return sprintf(buf, "%d\n", s->object_size);
Christoph Lameter81819f02007-05-06 14:49:36 -07004598}
4599SLAB_ATTR_RO(object_size);
4600
4601static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
4602{
Christoph Lameter834f3d12008-04-14 19:11:31 +03004603 return sprintf(buf, "%d\n", oo_objects(s->oo));
Christoph Lameter81819f02007-05-06 14:49:36 -07004604}
4605SLAB_ATTR_RO(objs_per_slab);
4606
Christoph Lameter06b285d2008-04-14 19:11:41 +03004607static ssize_t order_store(struct kmem_cache *s,
4608 const char *buf, size_t length)
4609{
Christoph Lameter0121c6192008-04-29 16:11:12 -07004610 unsigned long order;
4611 int err;
4612
4613 err = strict_strtoul(buf, 10, &order);
4614 if (err)
4615 return err;
Christoph Lameter06b285d2008-04-14 19:11:41 +03004616
4617 if (order > slub_max_order || order < slub_min_order)
4618 return -EINVAL;
4619
4620 calculate_sizes(s, order);
4621 return length;
4622}
4623
Christoph Lameter81819f02007-05-06 14:49:36 -07004624static ssize_t order_show(struct kmem_cache *s, char *buf)
4625{
Christoph Lameter834f3d12008-04-14 19:11:31 +03004626 return sprintf(buf, "%d\n", oo_order(s->oo));
Christoph Lameter81819f02007-05-06 14:49:36 -07004627}
Christoph Lameter06b285d2008-04-14 19:11:41 +03004628SLAB_ATTR(order);
Christoph Lameter81819f02007-05-06 14:49:36 -07004629
David Rientjes73d342b2009-02-22 17:40:09 -08004630static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
4631{
4632 return sprintf(buf, "%lu\n", s->min_partial);
4633}
4634
4635static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
4636 size_t length)
4637{
4638 unsigned long min;
4639 int err;
4640
4641 err = strict_strtoul(buf, 10, &min);
4642 if (err)
4643 return err;
4644
David Rientjesc0bdb232009-02-25 09:16:35 +02004645 set_min_partial(s, min);
David Rientjes73d342b2009-02-22 17:40:09 -08004646 return length;
4647}
4648SLAB_ATTR(min_partial);
4649
Christoph Lameter49e22582011-08-09 16:12:27 -05004650static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
4651{
4652 return sprintf(buf, "%u\n", s->cpu_partial);
4653}
4654
4655static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
4656 size_t length)
4657{
4658 unsigned long objects;
4659 int err;
4660
4661 err = strict_strtoul(buf, 10, &objects);
4662 if (err)
4663 return err;
David Rientjes74ee4ef2012-01-09 13:19:45 -08004664 if (objects && kmem_cache_debug(s))
4665 return -EINVAL;
Christoph Lameter49e22582011-08-09 16:12:27 -05004666
4667 s->cpu_partial = objects;
4668 flush_all(s);
4669 return length;
4670}
4671SLAB_ATTR(cpu_partial);
4672
Christoph Lameter81819f02007-05-06 14:49:36 -07004673static ssize_t ctor_show(struct kmem_cache *s, char *buf)
4674{
Joe Perches62c70bc2011-01-13 15:45:52 -08004675 if (!s->ctor)
4676 return 0;
4677 return sprintf(buf, "%pS\n", s->ctor);
Christoph Lameter81819f02007-05-06 14:49:36 -07004678}
4679SLAB_ATTR_RO(ctor);
4680
Christoph Lameter81819f02007-05-06 14:49:36 -07004681static ssize_t aliases_show(struct kmem_cache *s, char *buf)
4682{
4683 return sprintf(buf, "%d\n", s->refcount - 1);
4684}
4685SLAB_ATTR_RO(aliases);
4686
Christoph Lameter81819f02007-05-06 14:49:36 -07004687static ssize_t partial_show(struct kmem_cache *s, char *buf)
4688{
Christoph Lameterd9acf4b2008-02-15 15:22:21 -08004689 return show_slab_objects(s, buf, SO_PARTIAL);
Christoph Lameter81819f02007-05-06 14:49:36 -07004690}
4691SLAB_ATTR_RO(partial);
4692
4693static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
4694{
Christoph Lameterd9acf4b2008-02-15 15:22:21 -08004695 return show_slab_objects(s, buf, SO_CPU);
Christoph Lameter81819f02007-05-06 14:49:36 -07004696}
4697SLAB_ATTR_RO(cpu_slabs);
4698
4699static ssize_t objects_show(struct kmem_cache *s, char *buf)
4700{
Christoph Lameter205ab992008-04-14 19:11:40 +03004701 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
Christoph Lameter81819f02007-05-06 14:49:36 -07004702}
4703SLAB_ATTR_RO(objects);
4704
Christoph Lameter205ab992008-04-14 19:11:40 +03004705static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
4706{
4707 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
4708}
4709SLAB_ATTR_RO(objects_partial);
4710
Christoph Lameter49e22582011-08-09 16:12:27 -05004711static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
4712{
4713 int objects = 0;
4714 int pages = 0;
4715 int cpu;
4716 int len;
4717
4718 for_each_online_cpu(cpu) {
4719 struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial;
4720
4721 if (page) {
4722 pages += page->pages;
4723 objects += page->pobjects;
4724 }
4725 }
4726
4727 len = sprintf(buf, "%d(%d)", objects, pages);
4728
4729#ifdef CONFIG_SMP
4730 for_each_online_cpu(cpu) {
4731 struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial;
4732
4733 if (page && len < PAGE_SIZE - 20)
4734 len += sprintf(buf + len, " C%d=%d(%d)", cpu,
4735 page->pobjects, page->pages);
4736 }
4737#endif
4738 return len + sprintf(buf + len, "\n");
4739}
4740SLAB_ATTR_RO(slabs_cpu_partial);
4741
Christoph Lameter81819f02007-05-06 14:49:36 -07004742static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
4743{
4744 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
4745}
4746
4747static ssize_t reclaim_account_store(struct kmem_cache *s,
4748 const char *buf, size_t length)
4749{
4750 s->flags &= ~SLAB_RECLAIM_ACCOUNT;
4751 if (buf[0] == '1')
4752 s->flags |= SLAB_RECLAIM_ACCOUNT;
4753 return length;
4754}
4755SLAB_ATTR(reclaim_account);
4756
4757static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
4758{
Christoph Lameter5af60832007-05-06 14:49:56 -07004759 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
Christoph Lameter81819f02007-05-06 14:49:36 -07004760}
4761SLAB_ATTR_RO(hwcache_align);
4762
4763#ifdef CONFIG_ZONE_DMA
4764static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
4765{
4766 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
4767}
4768SLAB_ATTR_RO(cache_dma);
4769#endif
4770
4771static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
4772{
4773 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
4774}
4775SLAB_ATTR_RO(destroy_by_rcu);
4776
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08004777static ssize_t reserved_show(struct kmem_cache *s, char *buf)
4778{
4779 return sprintf(buf, "%d\n", s->reserved);
4780}
4781SLAB_ATTR_RO(reserved);
4782
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004783#ifdef CONFIG_SLUB_DEBUG
Christoph Lametera5a84752010-10-05 13:57:27 -05004784static ssize_t slabs_show(struct kmem_cache *s, char *buf)
4785{
4786 return show_slab_objects(s, buf, SO_ALL);
4787}
4788SLAB_ATTR_RO(slabs);
4789
4790static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
4791{
4792 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
4793}
4794SLAB_ATTR_RO(total_objects);
4795
4796static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
4797{
4798 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
4799}
4800
4801static ssize_t sanity_checks_store(struct kmem_cache *s,
4802 const char *buf, size_t length)
4803{
4804 s->flags &= ~SLAB_DEBUG_FREE;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004805 if (buf[0] == '1') {
4806 s->flags &= ~__CMPXCHG_DOUBLE;
Christoph Lametera5a84752010-10-05 13:57:27 -05004807 s->flags |= SLAB_DEBUG_FREE;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004808 }
Christoph Lametera5a84752010-10-05 13:57:27 -05004809 return length;
4810}
4811SLAB_ATTR(sanity_checks);
4812
4813static ssize_t trace_show(struct kmem_cache *s, char *buf)
4814{
4815 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
4816}
4817
4818static ssize_t trace_store(struct kmem_cache *s, const char *buf,
4819 size_t length)
4820{
4821 s->flags &= ~SLAB_TRACE;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004822 if (buf[0] == '1') {
4823 s->flags &= ~__CMPXCHG_DOUBLE;
Christoph Lametera5a84752010-10-05 13:57:27 -05004824 s->flags |= SLAB_TRACE;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004825 }
Christoph Lametera5a84752010-10-05 13:57:27 -05004826 return length;
4827}
4828SLAB_ATTR(trace);
4829
Christoph Lameter81819f02007-05-06 14:49:36 -07004830static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
4831{
4832 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
4833}
4834
4835static ssize_t red_zone_store(struct kmem_cache *s,
4836 const char *buf, size_t length)
4837{
4838 if (any_slab_objects(s))
4839 return -EBUSY;
4840
4841 s->flags &= ~SLAB_RED_ZONE;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004842 if (buf[0] == '1') {
4843 s->flags &= ~__CMPXCHG_DOUBLE;
Christoph Lameter81819f02007-05-06 14:49:36 -07004844 s->flags |= SLAB_RED_ZONE;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004845 }
Christoph Lameter06b285d2008-04-14 19:11:41 +03004846 calculate_sizes(s, -1);
Christoph Lameter81819f02007-05-06 14:49:36 -07004847 return length;
4848}
4849SLAB_ATTR(red_zone);
4850
4851static ssize_t poison_show(struct kmem_cache *s, char *buf)
4852{
4853 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
4854}
4855
4856static ssize_t poison_store(struct kmem_cache *s,
4857 const char *buf, size_t length)
4858{
4859 if (any_slab_objects(s))
4860 return -EBUSY;
4861
4862 s->flags &= ~SLAB_POISON;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004863 if (buf[0] == '1') {
4864 s->flags &= ~__CMPXCHG_DOUBLE;
Christoph Lameter81819f02007-05-06 14:49:36 -07004865 s->flags |= SLAB_POISON;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004866 }
Christoph Lameter06b285d2008-04-14 19:11:41 +03004867 calculate_sizes(s, -1);
Christoph Lameter81819f02007-05-06 14:49:36 -07004868 return length;
4869}
4870SLAB_ATTR(poison);
4871
4872static ssize_t store_user_show(struct kmem_cache *s, char *buf)
4873{
4874 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
4875}
4876
4877static ssize_t store_user_store(struct kmem_cache *s,
4878 const char *buf, size_t length)
4879{
4880 if (any_slab_objects(s))
4881 return -EBUSY;
4882
4883 s->flags &= ~SLAB_STORE_USER;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004884 if (buf[0] == '1') {
4885 s->flags &= ~__CMPXCHG_DOUBLE;
Christoph Lameter81819f02007-05-06 14:49:36 -07004886 s->flags |= SLAB_STORE_USER;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004887 }
Christoph Lameter06b285d2008-04-14 19:11:41 +03004888 calculate_sizes(s, -1);
Christoph Lameter81819f02007-05-06 14:49:36 -07004889 return length;
4890}
4891SLAB_ATTR(store_user);
4892
Christoph Lameter53e15af2007-05-06 14:49:43 -07004893static ssize_t validate_show(struct kmem_cache *s, char *buf)
4894{
4895 return 0;
4896}
4897
4898static ssize_t validate_store(struct kmem_cache *s,
4899 const char *buf, size_t length)
4900{
Christoph Lameter434e2452007-07-17 04:03:30 -07004901 int ret = -EINVAL;
4902
4903 if (buf[0] == '1') {
4904 ret = validate_slab_cache(s);
4905 if (ret >= 0)
4906 ret = length;
4907 }
4908 return ret;
Christoph Lameter53e15af2007-05-06 14:49:43 -07004909}
4910SLAB_ATTR(validate);
Christoph Lametera5a84752010-10-05 13:57:27 -05004911
4912static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
4913{
4914 if (!(s->flags & SLAB_STORE_USER))
4915 return -ENOSYS;
4916 return list_locations(s, buf, TRACK_ALLOC);
4917}
4918SLAB_ATTR_RO(alloc_calls);
4919
4920static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
4921{
4922 if (!(s->flags & SLAB_STORE_USER))
4923 return -ENOSYS;
4924 return list_locations(s, buf, TRACK_FREE);
4925}
4926SLAB_ATTR_RO(free_calls);
4927#endif /* CONFIG_SLUB_DEBUG */
4928
4929#ifdef CONFIG_FAILSLAB
4930static ssize_t failslab_show(struct kmem_cache *s, char *buf)
4931{
4932 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
4933}
4934
4935static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
4936 size_t length)
4937{
4938 s->flags &= ~SLAB_FAILSLAB;
4939 if (buf[0] == '1')
4940 s->flags |= SLAB_FAILSLAB;
4941 return length;
4942}
4943SLAB_ATTR(failslab);
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004944#endif
Christoph Lameter53e15af2007-05-06 14:49:43 -07004945
Christoph Lameter2086d262007-05-06 14:49:46 -07004946static ssize_t shrink_show(struct kmem_cache *s, char *buf)
4947{
4948 return 0;
4949}
4950
4951static ssize_t shrink_store(struct kmem_cache *s,
4952 const char *buf, size_t length)
4953{
4954 if (buf[0] == '1') {
4955 int rc = kmem_cache_shrink(s);
4956
4957 if (rc)
4958 return rc;
4959 } else
4960 return -EINVAL;
4961 return length;
4962}
4963SLAB_ATTR(shrink);
4964
Christoph Lameter81819f02007-05-06 14:49:36 -07004965#ifdef CONFIG_NUMA
Christoph Lameter98246012008-01-07 23:20:26 -08004966static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
Christoph Lameter81819f02007-05-06 14:49:36 -07004967{
Christoph Lameter98246012008-01-07 23:20:26 -08004968 return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
Christoph Lameter81819f02007-05-06 14:49:36 -07004969}
4970
Christoph Lameter98246012008-01-07 23:20:26 -08004971static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
Christoph Lameter81819f02007-05-06 14:49:36 -07004972 const char *buf, size_t length)
4973{
Christoph Lameter0121c6192008-04-29 16:11:12 -07004974 unsigned long ratio;
4975 int err;
Christoph Lameter81819f02007-05-06 14:49:36 -07004976
Christoph Lameter0121c6192008-04-29 16:11:12 -07004977 err = strict_strtoul(buf, 10, &ratio);
4978 if (err)
4979 return err;
4980
Christoph Lametere2cb96b2008-08-19 08:51:22 -05004981 if (ratio <= 100)
Christoph Lameter0121c6192008-04-29 16:11:12 -07004982 s->remote_node_defrag_ratio = ratio * 10;
4983
Christoph Lameter81819f02007-05-06 14:49:36 -07004984 return length;
4985}
Christoph Lameter98246012008-01-07 23:20:26 -08004986SLAB_ATTR(remote_node_defrag_ratio);
Christoph Lameter81819f02007-05-06 14:49:36 -07004987#endif
4988
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08004989#ifdef CONFIG_SLUB_STATS
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08004990static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
4991{
4992 unsigned long sum = 0;
4993 int cpu;
4994 int len;
4995 int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
4996
4997 if (!data)
4998 return -ENOMEM;
4999
5000 for_each_online_cpu(cpu) {
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06005001 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005002
5003 data[cpu] = x;
5004 sum += x;
5005 }
5006
5007 len = sprintf(buf, "%lu", sum);
5008
Christoph Lameter50ef37b2008-04-14 18:52:05 +03005009#ifdef CONFIG_SMP
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005010 for_each_online_cpu(cpu) {
5011 if (data[cpu] && len < PAGE_SIZE - 20)
Christoph Lameter50ef37b2008-04-14 18:52:05 +03005012 len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005013 }
Christoph Lameter50ef37b2008-04-14 18:52:05 +03005014#endif
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005015 kfree(data);
5016 return len + sprintf(buf + len, "\n");
5017}
5018
David Rientjes78eb00c2009-10-15 02:20:22 -07005019static void clear_stat(struct kmem_cache *s, enum stat_item si)
5020{
5021 int cpu;
5022
5023 for_each_online_cpu(cpu)
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06005024 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
David Rientjes78eb00c2009-10-15 02:20:22 -07005025}
5026
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005027#define STAT_ATTR(si, text) \
5028static ssize_t text##_show(struct kmem_cache *s, char *buf) \
5029{ \
5030 return show_stat(s, buf, si); \
5031} \
David Rientjes78eb00c2009-10-15 02:20:22 -07005032static ssize_t text##_store(struct kmem_cache *s, \
5033 const char *buf, size_t length) \
5034{ \
5035 if (buf[0] != '0') \
5036 return -EINVAL; \
5037 clear_stat(s, si); \
5038 return length; \
5039} \
5040SLAB_ATTR(text); \
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005041
5042STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
5043STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
5044STAT_ATTR(FREE_FASTPATH, free_fastpath);
5045STAT_ATTR(FREE_SLOWPATH, free_slowpath);
5046STAT_ATTR(FREE_FROZEN, free_frozen);
5047STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
5048STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
5049STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
5050STAT_ATTR(ALLOC_SLAB, alloc_slab);
5051STAT_ATTR(ALLOC_REFILL, alloc_refill);
Christoph Lametere36a2652011-06-01 12:25:57 -05005052STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005053STAT_ATTR(FREE_SLAB, free_slab);
5054STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
5055STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
5056STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
5057STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
5058STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
5059STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
Christoph Lameter03e404a2011-06-01 12:25:58 -05005060STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
Christoph Lameter65c33762008-04-14 19:11:40 +03005061STAT_ATTR(ORDER_FALLBACK, order_fallback);
Christoph Lameterb789ef52011-06-01 12:25:49 -05005062STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
5063STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
Christoph Lameter49e22582011-08-09 16:12:27 -05005064STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
5065STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
Alex Shi8028dce2012-02-03 23:34:56 +08005066STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
5067STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005068#endif
5069
Pekka Enberg06428782008-01-07 23:20:27 -08005070static struct attribute *slab_attrs[] = {
Christoph Lameter81819f02007-05-06 14:49:36 -07005071 &slab_size_attr.attr,
5072 &object_size_attr.attr,
5073 &objs_per_slab_attr.attr,
5074 &order_attr.attr,
David Rientjes73d342b2009-02-22 17:40:09 -08005075 &min_partial_attr.attr,
Christoph Lameter49e22582011-08-09 16:12:27 -05005076 &cpu_partial_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07005077 &objects_attr.attr,
Christoph Lameter205ab992008-04-14 19:11:40 +03005078 &objects_partial_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07005079 &partial_attr.attr,
5080 &cpu_slabs_attr.attr,
5081 &ctor_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07005082 &aliases_attr.attr,
5083 &align_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07005084 &hwcache_align_attr.attr,
5085 &reclaim_account_attr.attr,
5086 &destroy_by_rcu_attr.attr,
Christoph Lametera5a84752010-10-05 13:57:27 -05005087 &shrink_attr.attr,
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08005088 &reserved_attr.attr,
Christoph Lameter49e22582011-08-09 16:12:27 -05005089 &slabs_cpu_partial_attr.attr,
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05005090#ifdef CONFIG_SLUB_DEBUG
Christoph Lametera5a84752010-10-05 13:57:27 -05005091 &total_objects_attr.attr,
5092 &slabs_attr.attr,
5093 &sanity_checks_attr.attr,
5094 &trace_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07005095 &red_zone_attr.attr,
5096 &poison_attr.attr,
5097 &store_user_attr.attr,
Christoph Lameter53e15af2007-05-06 14:49:43 -07005098 &validate_attr.attr,
Christoph Lameter88a420e2007-05-06 14:49:45 -07005099 &alloc_calls_attr.attr,
5100 &free_calls_attr.attr,
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05005101#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07005102#ifdef CONFIG_ZONE_DMA
5103 &cache_dma_attr.attr,
5104#endif
5105#ifdef CONFIG_NUMA
Christoph Lameter98246012008-01-07 23:20:26 -08005106 &remote_node_defrag_ratio_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07005107#endif
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005108#ifdef CONFIG_SLUB_STATS
5109 &alloc_fastpath_attr.attr,
5110 &alloc_slowpath_attr.attr,
5111 &free_fastpath_attr.attr,
5112 &free_slowpath_attr.attr,
5113 &free_frozen_attr.attr,
5114 &free_add_partial_attr.attr,
5115 &free_remove_partial_attr.attr,
5116 &alloc_from_partial_attr.attr,
5117 &alloc_slab_attr.attr,
5118 &alloc_refill_attr.attr,
Christoph Lametere36a2652011-06-01 12:25:57 -05005119 &alloc_node_mismatch_attr.attr,
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005120 &free_slab_attr.attr,
5121 &cpuslab_flush_attr.attr,
5122 &deactivate_full_attr.attr,
5123 &deactivate_empty_attr.attr,
5124 &deactivate_to_head_attr.attr,
5125 &deactivate_to_tail_attr.attr,
5126 &deactivate_remote_frees_attr.attr,
Christoph Lameter03e404a2011-06-01 12:25:58 -05005127 &deactivate_bypass_attr.attr,
Christoph Lameter65c33762008-04-14 19:11:40 +03005128 &order_fallback_attr.attr,
Christoph Lameterb789ef52011-06-01 12:25:49 -05005129 &cmpxchg_double_fail_attr.attr,
5130 &cmpxchg_double_cpu_fail_attr.attr,
Christoph Lameter49e22582011-08-09 16:12:27 -05005131 &cpu_partial_alloc_attr.attr,
5132 &cpu_partial_free_attr.attr,
Alex Shi8028dce2012-02-03 23:34:56 +08005133 &cpu_partial_node_attr.attr,
5134 &cpu_partial_drain_attr.attr,
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005135#endif
Dmitry Monakhov4c13dd32010-02-26 09:36:12 +03005136#ifdef CONFIG_FAILSLAB
5137 &failslab_attr.attr,
5138#endif
5139
Christoph Lameter81819f02007-05-06 14:49:36 -07005140 NULL
5141};
5142
5143static struct attribute_group slab_attr_group = {
5144 .attrs = slab_attrs,
5145};
5146
5147static ssize_t slab_attr_show(struct kobject *kobj,
5148 struct attribute *attr,
5149 char *buf)
5150{
5151 struct slab_attribute *attribute;
5152 struct kmem_cache *s;
5153 int err;
5154
5155 attribute = to_slab_attr(attr);
5156 s = to_slab(kobj);
5157
5158 if (!attribute->show)
5159 return -EIO;
5160
5161 err = attribute->show(s, buf);
5162
5163 return err;
5164}
5165
5166static ssize_t slab_attr_store(struct kobject *kobj,
5167 struct attribute *attr,
5168 const char *buf, size_t len)
5169{
5170 struct slab_attribute *attribute;
5171 struct kmem_cache *s;
5172 int err;
5173
5174 attribute = to_slab_attr(attr);
5175 s = to_slab(kobj);
5176
5177 if (!attribute->store)
5178 return -EIO;
5179
5180 err = attribute->store(s, buf, len);
5181
5182 return err;
5183}
5184
Christoph Lameter151c6022008-01-07 22:29:05 -08005185static void kmem_cache_release(struct kobject *kobj)
5186{
5187 struct kmem_cache *s = to_slab(kobj);
5188
Pekka Enberg84c1cf62010-09-14 23:21:12 +03005189 kfree(s->name);
Christoph Lameter151c6022008-01-07 22:29:05 -08005190 kfree(s);
5191}
5192
Emese Revfy52cf25d2010-01-19 02:58:23 +01005193static const struct sysfs_ops slab_sysfs_ops = {
Christoph Lameter81819f02007-05-06 14:49:36 -07005194 .show = slab_attr_show,
5195 .store = slab_attr_store,
5196};
5197
5198static struct kobj_type slab_ktype = {
5199 .sysfs_ops = &slab_sysfs_ops,
Christoph Lameter151c6022008-01-07 22:29:05 -08005200 .release = kmem_cache_release
Christoph Lameter81819f02007-05-06 14:49:36 -07005201};
5202
5203static int uevent_filter(struct kset *kset, struct kobject *kobj)
5204{
5205 struct kobj_type *ktype = get_ktype(kobj);
5206
5207 if (ktype == &slab_ktype)
5208 return 1;
5209 return 0;
5210}
5211
Emese Revfy9cd43612009-12-31 14:52:51 +01005212static const struct kset_uevent_ops slab_uevent_ops = {
Christoph Lameter81819f02007-05-06 14:49:36 -07005213 .filter = uevent_filter,
5214};
5215
Greg Kroah-Hartman27c3a312007-11-01 09:29:06 -06005216static struct kset *slab_kset;
Christoph Lameter81819f02007-05-06 14:49:36 -07005217
5218#define ID_STR_LENGTH 64
5219
5220/* Create a unique string id for a slab cache:
Christoph Lameter6446faa2008-02-15 23:45:26 -08005221 *
5222 * Format :[flags-]size
Christoph Lameter81819f02007-05-06 14:49:36 -07005223 */
5224static char *create_unique_id(struct kmem_cache *s)
5225{
5226 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
5227 char *p = name;
5228
5229 BUG_ON(!name);
5230
5231 *p++ = ':';
5232 /*
5233 * First flags affecting slabcache operations. We will only
5234 * get here for aliasable slabs so we do not need to support
5235 * too many flags. The flags here must cover all flags that
5236 * are matched during merging to guarantee that the id is
5237 * unique.
5238 */
5239 if (s->flags & SLAB_CACHE_DMA)
5240 *p++ = 'd';
5241 if (s->flags & SLAB_RECLAIM_ACCOUNT)
5242 *p++ = 'a';
5243 if (s->flags & SLAB_DEBUG_FREE)
5244 *p++ = 'F';
Vegard Nossum5a896d92008-04-04 00:54:48 +02005245 if (!(s->flags & SLAB_NOTRACK))
5246 *p++ = 't';
Christoph Lameter81819f02007-05-06 14:49:36 -07005247 if (p != name + 1)
5248 *p++ = '-';
5249 p += sprintf(p, "%07d", s->size);
5250 BUG_ON(p > name + ID_STR_LENGTH - 1);
5251 return name;
5252}
5253
5254static int sysfs_slab_add(struct kmem_cache *s)
5255{
5256 int err;
5257 const char *name;
5258 int unmergeable;
5259
Christoph Lameter97d06602012-07-06 15:25:11 -05005260 if (slab_state < FULL)
Christoph Lameter81819f02007-05-06 14:49:36 -07005261 /* Defer until later */
5262 return 0;
5263
5264 unmergeable = slab_unmergeable(s);
5265 if (unmergeable) {
5266 /*
5267 * Slabcache can never be merged so we can use the name proper.
5268 * This is typically the case for debug situations. In that
5269 * case we can catch duplicate names easily.
5270 */
Greg Kroah-Hartman27c3a312007-11-01 09:29:06 -06005271 sysfs_remove_link(&slab_kset->kobj, s->name);
Christoph Lameter81819f02007-05-06 14:49:36 -07005272 name = s->name;
5273 } else {
5274 /*
5275 * Create a unique name for the slab as a target
5276 * for the symlinks.
5277 */
5278 name = create_unique_id(s);
5279 }
5280
Greg Kroah-Hartman27c3a312007-11-01 09:29:06 -06005281 s->kobj.kset = slab_kset;
Greg Kroah-Hartman1eada112007-12-17 23:05:35 -07005282 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
5283 if (err) {
5284 kobject_put(&s->kobj);
Christoph Lameter81819f02007-05-06 14:49:36 -07005285 return err;
Greg Kroah-Hartman1eada112007-12-17 23:05:35 -07005286 }
Christoph Lameter81819f02007-05-06 14:49:36 -07005287
5288 err = sysfs_create_group(&s->kobj, &slab_attr_group);
Xiaotian Feng5788d8a2009-07-22 11:28:53 +08005289 if (err) {
5290 kobject_del(&s->kobj);
5291 kobject_put(&s->kobj);
Christoph Lameter81819f02007-05-06 14:49:36 -07005292 return err;
Xiaotian Feng5788d8a2009-07-22 11:28:53 +08005293 }
Christoph Lameter81819f02007-05-06 14:49:36 -07005294 kobject_uevent(&s->kobj, KOBJ_ADD);
5295 if (!unmergeable) {
5296 /* Setup first alias */
5297 sysfs_slab_alias(s, s->name);
5298 kfree(name);
5299 }
5300 return 0;
5301}
5302
5303static void sysfs_slab_remove(struct kmem_cache *s)
5304{
Christoph Lameter97d06602012-07-06 15:25:11 -05005305 if (slab_state < FULL)
Christoph Lameter2bce6482010-07-19 11:39:11 -05005306 /*
5307 * Sysfs has not been setup yet so no need to remove the
5308 * cache from sysfs.
5309 */
5310 return;
5311
Christoph Lameter81819f02007-05-06 14:49:36 -07005312 kobject_uevent(&s->kobj, KOBJ_REMOVE);
5313 kobject_del(&s->kobj);
Christoph Lameter151c6022008-01-07 22:29:05 -08005314 kobject_put(&s->kobj);
Christoph Lameter81819f02007-05-06 14:49:36 -07005315}
5316
5317/*
5318 * Need to buffer aliases during bootup until sysfs becomes
Nick Andrew9f6c708e2008-12-05 14:08:08 +11005319 * available lest we lose that information.
Christoph Lameter81819f02007-05-06 14:49:36 -07005320 */
5321struct saved_alias {
5322 struct kmem_cache *s;
5323 const char *name;
5324 struct saved_alias *next;
5325};
5326
Adrian Bunk5af328a2007-07-17 04:03:27 -07005327static struct saved_alias *alias_list;
Christoph Lameter81819f02007-05-06 14:49:36 -07005328
5329static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
5330{
5331 struct saved_alias *al;
5332
Christoph Lameter97d06602012-07-06 15:25:11 -05005333 if (slab_state == FULL) {
Christoph Lameter81819f02007-05-06 14:49:36 -07005334 /*
5335 * If we have a leftover link then remove it.
5336 */
Greg Kroah-Hartman27c3a312007-11-01 09:29:06 -06005337 sysfs_remove_link(&slab_kset->kobj, name);
5338 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
Christoph Lameter81819f02007-05-06 14:49:36 -07005339 }
5340
5341 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
5342 if (!al)
5343 return -ENOMEM;
5344
5345 al->s = s;
5346 al->name = name;
5347 al->next = alias_list;
5348 alias_list = al;
5349 return 0;
5350}
5351
5352static int __init slab_sysfs_init(void)
5353{
Christoph Lameter5b95a4ac2007-07-17 04:03:19 -07005354 struct kmem_cache *s;
Christoph Lameter81819f02007-05-06 14:49:36 -07005355 int err;
5356
Christoph Lameter18004c52012-07-06 15:25:12 -05005357 mutex_lock(&slab_mutex);
Christoph Lameter2bce6482010-07-19 11:39:11 -05005358
Greg Kroah-Hartman0ff21e42007-11-06 10:36:58 -08005359 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
Greg Kroah-Hartman27c3a312007-11-01 09:29:06 -06005360 if (!slab_kset) {
Christoph Lameter18004c52012-07-06 15:25:12 -05005361 mutex_unlock(&slab_mutex);
Christoph Lameter81819f02007-05-06 14:49:36 -07005362 printk(KERN_ERR "Cannot register slab subsystem.\n");
5363 return -ENOSYS;
5364 }
5365
Christoph Lameter97d06602012-07-06 15:25:11 -05005366 slab_state = FULL;
Christoph Lameter26a7bd02007-05-09 02:32:39 -07005367
Christoph Lameter5b95a4ac2007-07-17 04:03:19 -07005368 list_for_each_entry(s, &slab_caches, list) {
Christoph Lameter26a7bd02007-05-09 02:32:39 -07005369 err = sysfs_slab_add(s);
Christoph Lameter5d540fb2007-08-30 23:56:26 -07005370 if (err)
5371 printk(KERN_ERR "SLUB: Unable to add boot slab %s"
5372 " to sysfs\n", s->name);
Christoph Lameter26a7bd02007-05-09 02:32:39 -07005373 }
Christoph Lameter81819f02007-05-06 14:49:36 -07005374
5375 while (alias_list) {
5376 struct saved_alias *al = alias_list;
5377
5378 alias_list = alias_list->next;
5379 err = sysfs_slab_alias(al->s, al->name);
Christoph Lameter5d540fb2007-08-30 23:56:26 -07005380 if (err)
5381 printk(KERN_ERR "SLUB: Unable to add boot slab alias"
Julia Lawall068ce412012-07-08 13:37:40 +02005382 " %s to sysfs\n", al->name);
Christoph Lameter81819f02007-05-06 14:49:36 -07005383 kfree(al);
5384 }
5385
Christoph Lameter18004c52012-07-06 15:25:12 -05005386 mutex_unlock(&slab_mutex);
Christoph Lameter81819f02007-05-06 14:49:36 -07005387 resiliency_test();
5388 return 0;
5389}
5390
5391__initcall(slab_sysfs_init);
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05005392#endif /* CONFIG_SYSFS */
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005393
5394/*
5395 * The /proc/slabinfo ABI
5396 */
Linus Torvalds158a9622008-01-02 13:04:48 -08005397#ifdef CONFIG_SLABINFO
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005398static void print_slabinfo_header(struct seq_file *m)
5399{
5400 seq_puts(m, "slabinfo - version: 2.1\n");
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05005401 seq_puts(m, "# name <active_objs> <num_objs> <object_size> "
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005402 "<objperslab> <pagesperslab>");
5403 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
5404 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
5405 seq_putc(m, '\n');
5406}
5407
5408static void *s_start(struct seq_file *m, loff_t *pos)
5409{
5410 loff_t n = *pos;
5411
Christoph Lameter18004c52012-07-06 15:25:12 -05005412 mutex_lock(&slab_mutex);
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005413 if (!n)
5414 print_slabinfo_header(m);
5415
5416 return seq_list_start(&slab_caches, *pos);
5417}
5418
5419static void *s_next(struct seq_file *m, void *p, loff_t *pos)
5420{
5421 return seq_list_next(p, &slab_caches, pos);
5422}
5423
5424static void s_stop(struct seq_file *m, void *p)
5425{
Christoph Lameter18004c52012-07-06 15:25:12 -05005426 mutex_unlock(&slab_mutex);
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005427}
5428
5429static int s_show(struct seq_file *m, void *p)
5430{
5431 unsigned long nr_partials = 0;
5432 unsigned long nr_slabs = 0;
5433 unsigned long nr_inuse = 0;
Christoph Lameter205ab992008-04-14 19:11:40 +03005434 unsigned long nr_objs = 0;
5435 unsigned long nr_free = 0;
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005436 struct kmem_cache *s;
5437 int node;
5438
5439 s = list_entry(p, struct kmem_cache, list);
5440
5441 for_each_online_node(node) {
5442 struct kmem_cache_node *n = get_node(s, node);
5443
5444 if (!n)
5445 continue;
5446
5447 nr_partials += n->nr_partial;
5448 nr_slabs += atomic_long_read(&n->nr_slabs);
Christoph Lameter205ab992008-04-14 19:11:40 +03005449 nr_objs += atomic_long_read(&n->total_objects);
5450 nr_free += count_partial(n, count_free);
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005451 }
5452
Christoph Lameter205ab992008-04-14 19:11:40 +03005453 nr_inuse = nr_objs - nr_free;
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005454
5455 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
Christoph Lameter834f3d12008-04-14 19:11:31 +03005456 nr_objs, s->size, oo_objects(s->oo),
5457 (1 << oo_order(s->oo)));
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005458 seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
5459 seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
5460 0UL);
5461 seq_putc(m, '\n');
5462 return 0;
5463}
5464
Alexey Dobriyan7b3c3a52008-10-06 02:42:17 +04005465static const struct seq_operations slabinfo_op = {
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005466 .start = s_start,
5467 .next = s_next,
5468 .stop = s_stop,
5469 .show = s_show,
5470};
5471
Alexey Dobriyan7b3c3a52008-10-06 02:42:17 +04005472static int slabinfo_open(struct inode *inode, struct file *file)
5473{
5474 return seq_open(file, &slabinfo_op);
5475}
5476
5477static const struct file_operations proc_slabinfo_operations = {
5478 .open = slabinfo_open,
5479 .read = seq_read,
5480 .llseek = seq_lseek,
5481 .release = seq_release,
5482};
5483
5484static int __init slab_proc_init(void)
5485{
Vasiliy Kulikovab067e92011-09-27 21:54:53 +04005486 proc_create("slabinfo", S_IRUSR, NULL, &proc_slabinfo_operations);
Alexey Dobriyan7b3c3a52008-10-06 02:42:17 +04005487 return 0;
5488}
5489module_init(slab_proc_init);
Linus Torvalds158a9622008-01-02 13:04:48 -08005490#endif /* CONFIG_SLABINFO */