blob: 99059217434063100ed6249012bef1395b3cf81c [file] [log] [blame]
Christoph Lameter81819f02007-05-06 14:49:36 -07001/*
2 * SLUB: A slab allocator that limits cache line use instead of queuing
3 * objects in per cpu and per node lists.
4 *
Christoph Lameter881db7f2011-06-01 12:25:53 -05005 * The allocator synchronizes using per slab locks or atomic operatios
6 * and only uses a centralized lock to manage a pool of partial slabs.
Christoph Lameter81819f02007-05-06 14:49:36 -07007 *
Christoph Lametercde53532008-07-04 09:59:22 -07008 * (C) 2007 SGI, Christoph Lameter
Christoph Lameter881db7f2011-06-01 12:25:53 -05009 * (C) 2011 Linux Foundation, Christoph Lameter
Christoph Lameter81819f02007-05-06 14:49:36 -070010 */
11
12#include <linux/mm.h>
Nick Piggin1eb5ac62009-05-05 19:13:44 +100013#include <linux/swap.h> /* struct reclaim_state */
Christoph Lameter81819f02007-05-06 14:49:36 -070014#include <linux/module.h>
15#include <linux/bit_spinlock.h>
16#include <linux/interrupt.h>
17#include <linux/bitops.h>
18#include <linux/slab.h>
Christoph Lameter97d06602012-07-06 15:25:11 -050019#include "slab.h"
Alexey Dobriyan7b3c3a52008-10-06 02:42:17 +040020#include <linux/proc_fs.h>
Christoph Lameter81819f02007-05-06 14:49:36 -070021#include <linux/seq_file.h>
Vegard Nossum5a896d92008-04-04 00:54:48 +020022#include <linux/kmemcheck.h>
Christoph Lameter81819f02007-05-06 14:49:36 -070023#include <linux/cpu.h>
24#include <linux/cpuset.h>
25#include <linux/mempolicy.h>
26#include <linux/ctype.h>
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070027#include <linux/debugobjects.h>
Christoph Lameter81819f02007-05-06 14:49:36 -070028#include <linux/kallsyms.h>
Yasunori Gotob9049e22007-10-21 16:41:37 -070029#include <linux/memory.h>
Roman Zippelf8bd2252008-05-01 04:34:31 -070030#include <linux/math64.h>
Akinobu Mita773ff602008-12-23 19:37:01 +090031#include <linux/fault-inject.h>
Pekka Enbergbfa71452011-07-07 22:47:01 +030032#include <linux/stacktrace.h>
Christoph Lameter4de900b2012-01-30 15:53:51 -060033#include <linux/prefetch.h>
Christoph Lameter81819f02007-05-06 14:49:36 -070034
Richard Kennedy4a923792010-10-21 10:29:19 +010035#include <trace/events/kmem.h>
36
Mel Gorman072bb0a2012-07-31 16:43:58 -070037#include "internal.h"
38
Christoph Lameter81819f02007-05-06 14:49:36 -070039/*
40 * Lock order:
Christoph Lameter18004c52012-07-06 15:25:12 -050041 * 1. slab_mutex (Global Mutex)
Christoph Lameter881db7f2011-06-01 12:25:53 -050042 * 2. node->list_lock
43 * 3. slab_lock(page) (Only on some arches and for debugging)
Christoph Lameter81819f02007-05-06 14:49:36 -070044 *
Christoph Lameter18004c52012-07-06 15:25:12 -050045 * slab_mutex
Christoph Lameter881db7f2011-06-01 12:25:53 -050046 *
Christoph Lameter18004c52012-07-06 15:25:12 -050047 * The role of the slab_mutex is to protect the list of all the slabs
Christoph Lameter881db7f2011-06-01 12:25:53 -050048 * and to synchronize major metadata changes to slab cache structures.
49 *
50 * The slab_lock is only used for debugging and on arches that do not
51 * have the ability to do a cmpxchg_double. It only protects the second
52 * double word in the page struct. Meaning
53 * A. page->freelist -> List of object free in a page
54 * B. page->counters -> Counters of objects
55 * C. page->frozen -> frozen state
56 *
57 * If a slab is frozen then it is exempt from list management. It is not
58 * on any list. The processor that froze the slab is the one who can
59 * perform list operations on the page. Other processors may put objects
60 * onto the freelist but the processor that froze the slab is the only
61 * one that can retrieve the objects from the page's freelist.
Christoph Lameter81819f02007-05-06 14:49:36 -070062 *
63 * The list_lock protects the partial and full list on each node and
64 * the partial slab counter. If taken then no new slabs may be added or
65 * removed from the lists nor make the number of partial slabs be modified.
66 * (Note that the total number of slabs is an atomic value that may be
67 * modified without taking the list lock).
68 *
69 * The list_lock is a centralized lock and thus we avoid taking it as
70 * much as possible. As long as SLUB does not have to handle partial
71 * slabs, operations can continue without any centralized lock. F.e.
72 * allocating a long series of objects that fill up slabs does not require
73 * the list lock.
Christoph Lameter81819f02007-05-06 14:49:36 -070074 * Interrupts are disabled during allocation and deallocation in order to
75 * make the slab allocator safe to use in the context of an irq. In addition
76 * interrupts are disabled to ensure that the processor does not change
77 * while handling per_cpu slabs, due to kernel preemption.
78 *
79 * SLUB assigns one slab for allocation to each processor.
80 * Allocations only occur from these slabs called cpu slabs.
81 *
Christoph Lameter672bba32007-05-09 02:32:39 -070082 * Slabs with free elements are kept on a partial list and during regular
83 * operations no list for full slabs is used. If an object in a full slab is
Christoph Lameter81819f02007-05-06 14:49:36 -070084 * freed then the slab will show up again on the partial lists.
Christoph Lameter672bba32007-05-09 02:32:39 -070085 * We track full slabs for debugging purposes though because otherwise we
86 * cannot scan all objects.
Christoph Lameter81819f02007-05-06 14:49:36 -070087 *
88 * Slabs are freed when they become empty. Teardown and setup is
89 * minimal so we rely on the page allocators per cpu caches for
90 * fast frees and allocs.
91 *
92 * Overloading of page flags that are otherwise used for LRU management.
93 *
Christoph Lameter4b6f0752007-05-16 22:10:53 -070094 * PageActive The slab is frozen and exempt from list processing.
95 * This means that the slab is dedicated to a purpose
96 * such as satisfying allocations for a specific
97 * processor. Objects may be freed in the slab while
98 * it is frozen but slab_free will then skip the usual
99 * list operations. It is up to the processor holding
100 * the slab to integrate the slab into the slab lists
101 * when the slab is no longer needed.
102 *
103 * One use of this flag is to mark slabs that are
104 * used for allocations. Then such a slab becomes a cpu
105 * slab. The cpu slab may be equipped with an additional
Christoph Lameterdfb4f092007-10-16 01:26:05 -0700106 * freelist that allows lockless access to
Christoph Lameter894b8782007-05-10 03:15:16 -0700107 * free objects in addition to the regular freelist
108 * that requires the slab lock.
Christoph Lameter81819f02007-05-06 14:49:36 -0700109 *
110 * PageError Slab requires special handling due to debug
111 * options set. This moves slab handling out of
Christoph Lameter894b8782007-05-10 03:15:16 -0700112 * the fast path and disables lockless freelists.
Christoph Lameter81819f02007-05-06 14:49:36 -0700113 */
114
Christoph Lameteraf537b02010-07-09 14:07:14 -0500115#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
116 SLAB_TRACE | SLAB_DEBUG_FREE)
117
118static inline int kmem_cache_debug(struct kmem_cache *s)
119{
Christoph Lameter5577bd82007-05-16 22:10:56 -0700120#ifdef CONFIG_SLUB_DEBUG
Christoph Lameteraf537b02010-07-09 14:07:14 -0500121 return unlikely(s->flags & SLAB_DEBUG_FLAGS);
Christoph Lameter5577bd82007-05-16 22:10:56 -0700122#else
Christoph Lameteraf537b02010-07-09 14:07:14 -0500123 return 0;
Christoph Lameter5577bd82007-05-16 22:10:56 -0700124#endif
Christoph Lameteraf537b02010-07-09 14:07:14 -0500125}
Christoph Lameter5577bd82007-05-16 22:10:56 -0700126
Christoph Lameter81819f02007-05-06 14:49:36 -0700127/*
128 * Issues still to be resolved:
129 *
Christoph Lameter81819f02007-05-06 14:49:36 -0700130 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
131 *
Christoph Lameter81819f02007-05-06 14:49:36 -0700132 * - Variable sizing of the per node arrays
133 */
134
135/* Enable to test recovery from slab corruption on boot */
136#undef SLUB_RESILIENCY_TEST
137
Christoph Lameterb789ef52011-06-01 12:25:49 -0500138/* Enable to log cmpxchg failures */
139#undef SLUB_DEBUG_CMPXCHG
140
Christoph Lameter81819f02007-05-06 14:49:36 -0700141/*
Christoph Lameter2086d262007-05-06 14:49:46 -0700142 * Mininum number of partial slabs. These will be left on the partial
143 * lists even if they are empty. kmem_cache_shrink may reclaim them.
144 */
Christoph Lameter76be8952007-12-21 14:37:37 -0800145#define MIN_PARTIAL 5
Christoph Lametere95eed52007-05-06 14:49:44 -0700146
Christoph Lameter2086d262007-05-06 14:49:46 -0700147/*
148 * Maximum number of desirable partial slabs.
149 * The existence of more partial slabs makes kmem_cache_shrink
150 * sort the partial list by the number of objects in the.
151 */
152#define MAX_PARTIAL 10
153
Christoph Lameter81819f02007-05-06 14:49:36 -0700154#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
155 SLAB_POISON | SLAB_STORE_USER)
Christoph Lameter672bba32007-05-09 02:32:39 -0700156
Christoph Lameter81819f02007-05-06 14:49:36 -0700157/*
David Rientjes3de47212009-07-27 18:30:35 -0700158 * Debugging flags that require metadata to be stored in the slab. These get
159 * disabled when slub_debug=O is used and a cache's min order increases with
160 * metadata.
David Rientjesfa5ec8a2009-07-07 00:14:14 -0700161 */
David Rientjes3de47212009-07-27 18:30:35 -0700162#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
David Rientjesfa5ec8a2009-07-07 00:14:14 -0700163
164/*
Christoph Lameter81819f02007-05-06 14:49:36 -0700165 * Set of flags that will prevent slab merging
166 */
167#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
Dmitry Monakhov4c13dd32010-02-26 09:36:12 +0300168 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
169 SLAB_FAILSLAB)
Christoph Lameter81819f02007-05-06 14:49:36 -0700170
171#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
Vegard Nossum5a896d92008-04-04 00:54:48 +0200172 SLAB_CACHE_DMA | SLAB_NOTRACK)
Christoph Lameter81819f02007-05-06 14:49:36 -0700173
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +0400174#define OO_SHIFT 16
175#define OO_MASK ((1 << OO_SHIFT) - 1)
Christoph Lameter50d5c412011-06-01 12:25:45 -0500176#define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +0400177
Christoph Lameter81819f02007-05-06 14:49:36 -0700178/* Internal SLUB flags */
Christoph Lameterf90ec392010-07-09 14:07:11 -0500179#define __OBJECT_POISON 0x80000000UL /* Poison object */
Christoph Lameterb789ef52011-06-01 12:25:49 -0500180#define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */
Christoph Lameter81819f02007-05-06 14:49:36 -0700181
182static int kmem_size = sizeof(struct kmem_cache);
183
184#ifdef CONFIG_SMP
185static struct notifier_block slab_notifier;
186#endif
187
Christoph Lameter02cbc872007-05-09 02:32:43 -0700188/*
189 * Tracking user of a slab.
190 */
Ben Greeard6543e32011-07-07 11:36:36 -0700191#define TRACK_ADDRS_COUNT 16
Christoph Lameter02cbc872007-05-09 02:32:43 -0700192struct track {
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +0300193 unsigned long addr; /* Called from address */
Ben Greeard6543e32011-07-07 11:36:36 -0700194#ifdef CONFIG_STACKTRACE
195 unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
196#endif
Christoph Lameter02cbc872007-05-09 02:32:43 -0700197 int cpu; /* Was running on cpu */
198 int pid; /* Pid context */
199 unsigned long when; /* When did the operation occur */
200};
201
202enum track_item { TRACK_ALLOC, TRACK_FREE };
203
Christoph Lameterab4d5ed2010-10-05 13:57:26 -0500204#ifdef CONFIG_SYSFS
Christoph Lameter81819f02007-05-06 14:49:36 -0700205static int sysfs_slab_add(struct kmem_cache *);
206static int sysfs_slab_alias(struct kmem_cache *, const char *);
207static void sysfs_slab_remove(struct kmem_cache *);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -0800208
Christoph Lameter81819f02007-05-06 14:49:36 -0700209#else
Christoph Lameter0c710012007-07-17 04:03:24 -0700210static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
211static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
212 { return 0; }
Christoph Lameter151c6022008-01-07 22:29:05 -0800213static inline void sysfs_slab_remove(struct kmem_cache *s)
214{
Pekka Enberg84c1cf62010-09-14 23:21:12 +0300215 kfree(s->name);
Christoph Lameter151c6022008-01-07 22:29:05 -0800216 kfree(s);
217}
Christoph Lameter8ff12cf2008-02-07 17:47:41 -0800218
Christoph Lameter81819f02007-05-06 14:49:36 -0700219#endif
220
Christoph Lameter4fdccdf2011-03-22 13:35:00 -0500221static inline void stat(const struct kmem_cache *s, enum stat_item si)
Christoph Lameter8ff12cf2008-02-07 17:47:41 -0800222{
223#ifdef CONFIG_SLUB_STATS
Christoph Lameter84e554e62009-12-18 16:26:23 -0600224 __this_cpu_inc(s->cpu_slab->stat[si]);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -0800225#endif
226}
227
Christoph Lameter81819f02007-05-06 14:49:36 -0700228/********************************************************************
229 * Core slab cache functions
230 *******************************************************************/
231
Christoph Lameter81819f02007-05-06 14:49:36 -0700232static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
233{
Christoph Lameter81819f02007-05-06 14:49:36 -0700234 return s->node[node];
Christoph Lameter81819f02007-05-06 14:49:36 -0700235}
236
Christoph Lameter6446faa2008-02-15 23:45:26 -0800237/* Verify that a pointer has an address that is valid within a slab page */
Christoph Lameter02cbc872007-05-09 02:32:43 -0700238static inline int check_valid_pointer(struct kmem_cache *s,
239 struct page *page, const void *object)
240{
241 void *base;
242
Christoph Lametera973e9d2008-03-01 13:40:44 -0800243 if (!object)
Christoph Lameter02cbc872007-05-09 02:32:43 -0700244 return 1;
245
Christoph Lametera973e9d2008-03-01 13:40:44 -0800246 base = page_address(page);
Christoph Lameter39b26462008-04-14 19:11:30 +0300247 if (object < base || object >= base + page->objects * s->size ||
Christoph Lameter02cbc872007-05-09 02:32:43 -0700248 (object - base) % s->size) {
249 return 0;
250 }
251
252 return 1;
253}
254
Christoph Lameter7656c722007-05-09 02:32:40 -0700255static inline void *get_freepointer(struct kmem_cache *s, void *object)
256{
257 return *(void **)(object + s->offset);
258}
259
Eric Dumazet0ad95002011-12-16 16:25:34 +0100260static void prefetch_freepointer(const struct kmem_cache *s, void *object)
261{
262 prefetch(object + s->offset);
263}
264
Christoph Lameter1393d9a2011-05-16 15:26:08 -0500265static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
266{
267 void *p;
268
269#ifdef CONFIG_DEBUG_PAGEALLOC
270 probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p));
271#else
272 p = get_freepointer(s, object);
273#endif
274 return p;
275}
276
Christoph Lameter7656c722007-05-09 02:32:40 -0700277static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
278{
279 *(void **)(object + s->offset) = fp;
280}
281
282/* Loop over all objects in a slab */
Christoph Lameter224a88b2008-04-14 19:11:31 +0300283#define for_each_object(__p, __s, __addr, __objects) \
284 for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
Christoph Lameter7656c722007-05-09 02:32:40 -0700285 __p += (__s)->size)
286
Christoph Lameter7656c722007-05-09 02:32:40 -0700287/* Determine object index from a given position */
288static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
289{
290 return (p - addr) / s->size;
291}
292
Mariusz Kozlowskid71f6062011-02-26 20:10:26 +0100293static inline size_t slab_ksize(const struct kmem_cache *s)
294{
295#ifdef CONFIG_SLUB_DEBUG
296 /*
297 * Debugging requires use of the padding between object
298 * and whatever may come after it.
299 */
300 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500301 return s->object_size;
Mariusz Kozlowskid71f6062011-02-26 20:10:26 +0100302
303#endif
304 /*
305 * If we have the need to store the freelist pointer
306 * back there or track user information then we can
307 * only use the space before that information.
308 */
309 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
310 return s->inuse;
311 /*
312 * Else we can use all the padding etc for the allocation
313 */
314 return s->size;
315}
316
Lai Jiangshanab9a0f12011-03-10 15:21:48 +0800317static inline int order_objects(int order, unsigned long size, int reserved)
318{
319 return ((PAGE_SIZE << order) - reserved) / size;
320}
321
Christoph Lameter834f3d12008-04-14 19:11:31 +0300322static inline struct kmem_cache_order_objects oo_make(int order,
Lai Jiangshanab9a0f12011-03-10 15:21:48 +0800323 unsigned long size, int reserved)
Christoph Lameter834f3d12008-04-14 19:11:31 +0300324{
325 struct kmem_cache_order_objects x = {
Lai Jiangshanab9a0f12011-03-10 15:21:48 +0800326 (order << OO_SHIFT) + order_objects(order, size, reserved)
Christoph Lameter834f3d12008-04-14 19:11:31 +0300327 };
328
329 return x;
330}
331
332static inline int oo_order(struct kmem_cache_order_objects x)
333{
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +0400334 return x.x >> OO_SHIFT;
Christoph Lameter834f3d12008-04-14 19:11:31 +0300335}
336
337static inline int oo_objects(struct kmem_cache_order_objects x)
338{
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +0400339 return x.x & OO_MASK;
Christoph Lameter834f3d12008-04-14 19:11:31 +0300340}
341
Christoph Lameter881db7f2011-06-01 12:25:53 -0500342/*
343 * Per slab locking using the pagelock
344 */
345static __always_inline void slab_lock(struct page *page)
346{
347 bit_spin_lock(PG_locked, &page->flags);
348}
349
350static __always_inline void slab_unlock(struct page *page)
351{
352 __bit_spin_unlock(PG_locked, &page->flags);
353}
354
Christoph Lameter1d071712011-07-14 12:49:12 -0500355/* Interrupts must be disabled (for the fallback code to work right) */
356static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
Christoph Lameterb789ef52011-06-01 12:25:49 -0500357 void *freelist_old, unsigned long counters_old,
358 void *freelist_new, unsigned long counters_new,
359 const char *n)
360{
Christoph Lameter1d071712011-07-14 12:49:12 -0500361 VM_BUG_ON(!irqs_disabled());
Heiko Carstens25654092012-01-12 17:17:33 -0800362#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
363 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
Christoph Lameterb789ef52011-06-01 12:25:49 -0500364 if (s->flags & __CMPXCHG_DOUBLE) {
Jan Beulichcdcd6292012-01-02 17:02:18 +0000365 if (cmpxchg_double(&page->freelist, &page->counters,
Christoph Lameterb789ef52011-06-01 12:25:49 -0500366 freelist_old, counters_old,
367 freelist_new, counters_new))
368 return 1;
369 } else
370#endif
371 {
Christoph Lameter881db7f2011-06-01 12:25:53 -0500372 slab_lock(page);
Christoph Lameterb789ef52011-06-01 12:25:49 -0500373 if (page->freelist == freelist_old && page->counters == counters_old) {
374 page->freelist = freelist_new;
375 page->counters = counters_new;
Christoph Lameter881db7f2011-06-01 12:25:53 -0500376 slab_unlock(page);
Christoph Lameterb789ef52011-06-01 12:25:49 -0500377 return 1;
378 }
Christoph Lameter881db7f2011-06-01 12:25:53 -0500379 slab_unlock(page);
Christoph Lameterb789ef52011-06-01 12:25:49 -0500380 }
381
382 cpu_relax();
383 stat(s, CMPXCHG_DOUBLE_FAIL);
384
385#ifdef SLUB_DEBUG_CMPXCHG
386 printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
387#endif
388
389 return 0;
390}
391
Christoph Lameter1d071712011-07-14 12:49:12 -0500392static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
393 void *freelist_old, unsigned long counters_old,
394 void *freelist_new, unsigned long counters_new,
395 const char *n)
396{
Heiko Carstens25654092012-01-12 17:17:33 -0800397#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
398 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
Christoph Lameter1d071712011-07-14 12:49:12 -0500399 if (s->flags & __CMPXCHG_DOUBLE) {
Jan Beulichcdcd6292012-01-02 17:02:18 +0000400 if (cmpxchg_double(&page->freelist, &page->counters,
Christoph Lameter1d071712011-07-14 12:49:12 -0500401 freelist_old, counters_old,
402 freelist_new, counters_new))
403 return 1;
404 } else
405#endif
406 {
407 unsigned long flags;
408
409 local_irq_save(flags);
410 slab_lock(page);
411 if (page->freelist == freelist_old && page->counters == counters_old) {
412 page->freelist = freelist_new;
413 page->counters = counters_new;
414 slab_unlock(page);
415 local_irq_restore(flags);
416 return 1;
417 }
418 slab_unlock(page);
419 local_irq_restore(flags);
420 }
421
422 cpu_relax();
423 stat(s, CMPXCHG_DOUBLE_FAIL);
424
425#ifdef SLUB_DEBUG_CMPXCHG
426 printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
427#endif
428
429 return 0;
430}
431
Christoph Lameter41ecc552007-05-09 02:32:44 -0700432#ifdef CONFIG_SLUB_DEBUG
433/*
Christoph Lameter5f80b132011-04-15 14:48:13 -0500434 * Determine a map of object in use on a page.
435 *
Christoph Lameter881db7f2011-06-01 12:25:53 -0500436 * Node listlock must be held to guarantee that the page does
Christoph Lameter5f80b132011-04-15 14:48:13 -0500437 * not vanish from under us.
438 */
439static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
440{
441 void *p;
442 void *addr = page_address(page);
443
444 for (p = page->freelist; p; p = get_freepointer(s, p))
445 set_bit(slab_index(p, s, addr), map);
446}
447
Christoph Lameter41ecc552007-05-09 02:32:44 -0700448/*
449 * Debug settings:
450 */
Christoph Lameterf0630ff2007-07-15 23:38:14 -0700451#ifdef CONFIG_SLUB_DEBUG_ON
452static int slub_debug = DEBUG_DEFAULT_FLAGS;
453#else
Christoph Lameter41ecc552007-05-09 02:32:44 -0700454static int slub_debug;
Christoph Lameterf0630ff2007-07-15 23:38:14 -0700455#endif
Christoph Lameter41ecc552007-05-09 02:32:44 -0700456
457static char *slub_debug_slabs;
David Rientjesfa5ec8a2009-07-07 00:14:14 -0700458static int disable_higher_order_debug;
Christoph Lameter41ecc552007-05-09 02:32:44 -0700459
Christoph Lameter7656c722007-05-09 02:32:40 -0700460/*
Christoph Lameter81819f02007-05-06 14:49:36 -0700461 * Object debugging
462 */
463static void print_section(char *text, u8 *addr, unsigned int length)
464{
Sebastian Andrzej Siewiorffc79d22011-07-29 14:10:20 +0200465 print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
466 length, 1);
Christoph Lameter81819f02007-05-06 14:49:36 -0700467}
468
Christoph Lameter81819f02007-05-06 14:49:36 -0700469static struct track *get_track(struct kmem_cache *s, void *object,
470 enum track_item alloc)
471{
472 struct track *p;
473
474 if (s->offset)
475 p = object + s->offset + sizeof(void *);
476 else
477 p = object + s->inuse;
478
479 return p + alloc;
480}
481
482static void set_track(struct kmem_cache *s, void *object,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +0300483 enum track_item alloc, unsigned long addr)
Christoph Lameter81819f02007-05-06 14:49:36 -0700484{
Akinobu Mita1a00df42009-03-07 00:36:21 +0900485 struct track *p = get_track(s, object, alloc);
Christoph Lameter81819f02007-05-06 14:49:36 -0700486
Christoph Lameter81819f02007-05-06 14:49:36 -0700487 if (addr) {
Ben Greeard6543e32011-07-07 11:36:36 -0700488#ifdef CONFIG_STACKTRACE
489 struct stack_trace trace;
490 int i;
491
492 trace.nr_entries = 0;
493 trace.max_entries = TRACK_ADDRS_COUNT;
494 trace.entries = p->addrs;
495 trace.skip = 3;
496 save_stack_trace(&trace);
497
498 /* See rant in lockdep.c */
499 if (trace.nr_entries != 0 &&
500 trace.entries[trace.nr_entries - 1] == ULONG_MAX)
501 trace.nr_entries--;
502
503 for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
504 p->addrs[i] = 0;
505#endif
Christoph Lameter81819f02007-05-06 14:49:36 -0700506 p->addr = addr;
507 p->cpu = smp_processor_id();
Alexey Dobriyan88e4ccf2008-06-23 02:58:37 +0400508 p->pid = current->pid;
Christoph Lameter81819f02007-05-06 14:49:36 -0700509 p->when = jiffies;
510 } else
511 memset(p, 0, sizeof(struct track));
512}
513
Christoph Lameter81819f02007-05-06 14:49:36 -0700514static void init_tracking(struct kmem_cache *s, void *object)
515{
Christoph Lameter24922682007-07-17 04:03:18 -0700516 if (!(s->flags & SLAB_STORE_USER))
517 return;
518
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +0300519 set_track(s, object, TRACK_FREE, 0UL);
520 set_track(s, object, TRACK_ALLOC, 0UL);
Christoph Lameter81819f02007-05-06 14:49:36 -0700521}
522
523static void print_track(const char *s, struct track *t)
524{
525 if (!t->addr)
526 return;
527
Linus Torvalds7daf7052008-07-14 12:12:53 -0700528 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +0300529 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
Ben Greeard6543e32011-07-07 11:36:36 -0700530#ifdef CONFIG_STACKTRACE
531 {
532 int i;
533 for (i = 0; i < TRACK_ADDRS_COUNT; i++)
534 if (t->addrs[i])
535 printk(KERN_ERR "\t%pS\n", (void *)t->addrs[i]);
536 else
537 break;
538 }
539#endif
Christoph Lameter81819f02007-05-06 14:49:36 -0700540}
541
Christoph Lameter24922682007-07-17 04:03:18 -0700542static void print_tracking(struct kmem_cache *s, void *object)
543{
544 if (!(s->flags & SLAB_STORE_USER))
545 return;
546
547 print_track("Allocated", get_track(s, object, TRACK_ALLOC));
548 print_track("Freed", get_track(s, object, TRACK_FREE));
549}
550
551static void print_page_info(struct page *page)
552{
Christoph Lameter39b26462008-04-14 19:11:30 +0300553 printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
554 page, page->objects, page->inuse, page->freelist, page->flags);
Christoph Lameter24922682007-07-17 04:03:18 -0700555
556}
557
558static void slab_bug(struct kmem_cache *s, char *fmt, ...)
559{
560 va_list args;
561 char buf[100];
562
563 va_start(args, fmt);
564 vsnprintf(buf, sizeof(buf), fmt, args);
565 va_end(args);
566 printk(KERN_ERR "========================================"
567 "=====================================\n");
Dave Jones265d47e2011-11-15 15:04:00 -0800568 printk(KERN_ERR "BUG %s (%s): %s\n", s->name, print_tainted(), buf);
Christoph Lameter24922682007-07-17 04:03:18 -0700569 printk(KERN_ERR "----------------------------------------"
570 "-------------------------------------\n\n");
571}
572
573static void slab_fix(struct kmem_cache *s, char *fmt, ...)
574{
575 va_list args;
576 char buf[100];
577
578 va_start(args, fmt);
579 vsnprintf(buf, sizeof(buf), fmt, args);
580 va_end(args);
581 printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
582}
583
584static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
Christoph Lameter81819f02007-05-06 14:49:36 -0700585{
586 unsigned int off; /* Offset of last byte */
Christoph Lametera973e9d2008-03-01 13:40:44 -0800587 u8 *addr = page_address(page);
Christoph Lameter24922682007-07-17 04:03:18 -0700588
589 print_tracking(s, p);
590
591 print_page_info(page);
592
593 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
594 p, p - addr, get_freepointer(s, p));
595
596 if (p > addr + 16)
Sebastian Andrzej Siewiorffc79d22011-07-29 14:10:20 +0200597 print_section("Bytes b4 ", p - 16, 16);
Christoph Lameter24922682007-07-17 04:03:18 -0700598
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500599 print_section("Object ", p, min_t(unsigned long, s->object_size,
Sebastian Andrzej Siewiorffc79d22011-07-29 14:10:20 +0200600 PAGE_SIZE));
Christoph Lameter81819f02007-05-06 14:49:36 -0700601 if (s->flags & SLAB_RED_ZONE)
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500602 print_section("Redzone ", p + s->object_size,
603 s->inuse - s->object_size);
Christoph Lameter81819f02007-05-06 14:49:36 -0700604
Christoph Lameter81819f02007-05-06 14:49:36 -0700605 if (s->offset)
606 off = s->offset + sizeof(void *);
607 else
608 off = s->inuse;
609
Christoph Lameter24922682007-07-17 04:03:18 -0700610 if (s->flags & SLAB_STORE_USER)
Christoph Lameter81819f02007-05-06 14:49:36 -0700611 off += 2 * sizeof(struct track);
Christoph Lameter81819f02007-05-06 14:49:36 -0700612
613 if (off != s->size)
614 /* Beginning of the filler is the free pointer */
Sebastian Andrzej Siewiorffc79d22011-07-29 14:10:20 +0200615 print_section("Padding ", p + off, s->size - off);
Christoph Lameter24922682007-07-17 04:03:18 -0700616
617 dump_stack();
Christoph Lameter81819f02007-05-06 14:49:36 -0700618}
619
620static void object_err(struct kmem_cache *s, struct page *page,
621 u8 *object, char *reason)
622{
Christoph Lameter3dc50632008-04-23 12:28:01 -0700623 slab_bug(s, "%s", reason);
Christoph Lameter24922682007-07-17 04:03:18 -0700624 print_trailer(s, page, object);
Christoph Lameter81819f02007-05-06 14:49:36 -0700625}
626
Christoph Lameter24922682007-07-17 04:03:18 -0700627static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
Christoph Lameter81819f02007-05-06 14:49:36 -0700628{
629 va_list args;
630 char buf[100];
631
Christoph Lameter24922682007-07-17 04:03:18 -0700632 va_start(args, fmt);
633 vsnprintf(buf, sizeof(buf), fmt, args);
Christoph Lameter81819f02007-05-06 14:49:36 -0700634 va_end(args);
Christoph Lameter3dc50632008-04-23 12:28:01 -0700635 slab_bug(s, "%s", buf);
Christoph Lameter24922682007-07-17 04:03:18 -0700636 print_page_info(page);
Christoph Lameter81819f02007-05-06 14:49:36 -0700637 dump_stack();
638}
639
Christoph Lameterf7cb1932010-09-29 07:15:01 -0500640static void init_object(struct kmem_cache *s, void *object, u8 val)
Christoph Lameter81819f02007-05-06 14:49:36 -0700641{
642 u8 *p = object;
643
644 if (s->flags & __OBJECT_POISON) {
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500645 memset(p, POISON_FREE, s->object_size - 1);
646 p[s->object_size - 1] = POISON_END;
Christoph Lameter81819f02007-05-06 14:49:36 -0700647 }
648
649 if (s->flags & SLAB_RED_ZONE)
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500650 memset(p + s->object_size, val, s->inuse - s->object_size);
Christoph Lameter81819f02007-05-06 14:49:36 -0700651}
652
Christoph Lameter24922682007-07-17 04:03:18 -0700653static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
654 void *from, void *to)
655{
656 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
657 memset(from, data, to - from);
658}
659
660static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
661 u8 *object, char *what,
Pekka Enberg06428782008-01-07 23:20:27 -0800662 u8 *start, unsigned int value, unsigned int bytes)
Christoph Lameter24922682007-07-17 04:03:18 -0700663{
664 u8 *fault;
665 u8 *end;
666
Akinobu Mita798248202011-10-31 17:08:07 -0700667 fault = memchr_inv(start, value, bytes);
Christoph Lameter24922682007-07-17 04:03:18 -0700668 if (!fault)
669 return 1;
670
671 end = start + bytes;
672 while (end > fault && end[-1] == value)
673 end--;
674
675 slab_bug(s, "%s overwritten", what);
676 printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
677 fault, end - 1, fault[0], value);
678 print_trailer(s, page, object);
679
680 restore_bytes(s, what, value, fault, end);
681 return 0;
Christoph Lameter81819f02007-05-06 14:49:36 -0700682}
683
Christoph Lameter81819f02007-05-06 14:49:36 -0700684/*
685 * Object layout:
686 *
687 * object address
688 * Bytes of the object to be managed.
689 * If the freepointer may overlay the object then the free
690 * pointer is the first word of the object.
Christoph Lameter672bba32007-05-09 02:32:39 -0700691 *
Christoph Lameter81819f02007-05-06 14:49:36 -0700692 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
693 * 0xa5 (POISON_END)
694 *
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500695 * object + s->object_size
Christoph Lameter81819f02007-05-06 14:49:36 -0700696 * Padding to reach word boundary. This is also used for Redzoning.
Christoph Lameter672bba32007-05-09 02:32:39 -0700697 * Padding is extended by another word if Redzoning is enabled and
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500698 * object_size == inuse.
Christoph Lameter672bba32007-05-09 02:32:39 -0700699 *
Christoph Lameter81819f02007-05-06 14:49:36 -0700700 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
701 * 0xcc (RED_ACTIVE) for objects in use.
702 *
703 * object + s->inuse
Christoph Lameter672bba32007-05-09 02:32:39 -0700704 * Meta data starts here.
705 *
Christoph Lameter81819f02007-05-06 14:49:36 -0700706 * A. Free pointer (if we cannot overwrite object on free)
707 * B. Tracking data for SLAB_STORE_USER
Christoph Lameter672bba32007-05-09 02:32:39 -0700708 * C. Padding to reach required alignment boundary or at mininum
Christoph Lameter6446faa2008-02-15 23:45:26 -0800709 * one word if debugging is on to be able to detect writes
Christoph Lameter672bba32007-05-09 02:32:39 -0700710 * before the word boundary.
711 *
712 * Padding is done using 0x5a (POISON_INUSE)
Christoph Lameter81819f02007-05-06 14:49:36 -0700713 *
714 * object + s->size
Christoph Lameter672bba32007-05-09 02:32:39 -0700715 * Nothing is used beyond s->size.
Christoph Lameter81819f02007-05-06 14:49:36 -0700716 *
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500717 * If slabcaches are merged then the object_size and inuse boundaries are mostly
Christoph Lameter672bba32007-05-09 02:32:39 -0700718 * ignored. And therefore no slab options that rely on these boundaries
Christoph Lameter81819f02007-05-06 14:49:36 -0700719 * may be used with merged slabcaches.
720 */
721
Christoph Lameter81819f02007-05-06 14:49:36 -0700722static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
723{
724 unsigned long off = s->inuse; /* The end of info */
725
726 if (s->offset)
727 /* Freepointer is placed after the object. */
728 off += sizeof(void *);
729
730 if (s->flags & SLAB_STORE_USER)
731 /* We also have user information there */
732 off += 2 * sizeof(struct track);
733
734 if (s->size == off)
735 return 1;
736
Christoph Lameter24922682007-07-17 04:03:18 -0700737 return check_bytes_and_report(s, page, p, "Object padding",
738 p + off, POISON_INUSE, s->size - off);
Christoph Lameter81819f02007-05-06 14:49:36 -0700739}
740
Christoph Lameter39b26462008-04-14 19:11:30 +0300741/* Check the pad bytes at the end of a slab page */
Christoph Lameter81819f02007-05-06 14:49:36 -0700742static int slab_pad_check(struct kmem_cache *s, struct page *page)
743{
Christoph Lameter24922682007-07-17 04:03:18 -0700744 u8 *start;
745 u8 *fault;
746 u8 *end;
747 int length;
748 int remainder;
Christoph Lameter81819f02007-05-06 14:49:36 -0700749
750 if (!(s->flags & SLAB_POISON))
751 return 1;
752
Christoph Lametera973e9d2008-03-01 13:40:44 -0800753 start = page_address(page);
Lai Jiangshanab9a0f12011-03-10 15:21:48 +0800754 length = (PAGE_SIZE << compound_order(page)) - s->reserved;
Christoph Lameter39b26462008-04-14 19:11:30 +0300755 end = start + length;
756 remainder = length % s->size;
Christoph Lameter81819f02007-05-06 14:49:36 -0700757 if (!remainder)
758 return 1;
759
Akinobu Mita798248202011-10-31 17:08:07 -0700760 fault = memchr_inv(end - remainder, POISON_INUSE, remainder);
Christoph Lameter24922682007-07-17 04:03:18 -0700761 if (!fault)
762 return 1;
763 while (end > fault && end[-1] == POISON_INUSE)
764 end--;
765
766 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
Sebastian Andrzej Siewiorffc79d22011-07-29 14:10:20 +0200767 print_section("Padding ", end - remainder, remainder);
Christoph Lameter24922682007-07-17 04:03:18 -0700768
Eric Dumazet8a3d2712009-09-03 16:08:06 +0200769 restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
Christoph Lameter24922682007-07-17 04:03:18 -0700770 return 0;
Christoph Lameter81819f02007-05-06 14:49:36 -0700771}
772
773static int check_object(struct kmem_cache *s, struct page *page,
Christoph Lameterf7cb1932010-09-29 07:15:01 -0500774 void *object, u8 val)
Christoph Lameter81819f02007-05-06 14:49:36 -0700775{
776 u8 *p = object;
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500777 u8 *endobject = object + s->object_size;
Christoph Lameter81819f02007-05-06 14:49:36 -0700778
779 if (s->flags & SLAB_RED_ZONE) {
Christoph Lameter24922682007-07-17 04:03:18 -0700780 if (!check_bytes_and_report(s, page, object, "Redzone",
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500781 endobject, val, s->inuse - s->object_size))
Christoph Lameter81819f02007-05-06 14:49:36 -0700782 return 0;
Christoph Lameter81819f02007-05-06 14:49:36 -0700783 } else {
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500784 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
Ingo Molnar3adbefe2008-02-05 17:57:39 -0800785 check_bytes_and_report(s, page, p, "Alignment padding",
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500786 endobject, POISON_INUSE, s->inuse - s->object_size);
Ingo Molnar3adbefe2008-02-05 17:57:39 -0800787 }
Christoph Lameter81819f02007-05-06 14:49:36 -0700788 }
789
790 if (s->flags & SLAB_POISON) {
Christoph Lameterf7cb1932010-09-29 07:15:01 -0500791 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
Christoph Lameter24922682007-07-17 04:03:18 -0700792 (!check_bytes_and_report(s, page, p, "Poison", p,
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500793 POISON_FREE, s->object_size - 1) ||
Christoph Lameter24922682007-07-17 04:03:18 -0700794 !check_bytes_and_report(s, page, p, "Poison",
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500795 p + s->object_size - 1, POISON_END, 1)))
Christoph Lameter81819f02007-05-06 14:49:36 -0700796 return 0;
Christoph Lameter81819f02007-05-06 14:49:36 -0700797 /*
798 * check_pad_bytes cleans up on its own.
799 */
800 check_pad_bytes(s, page, p);
801 }
802
Christoph Lameterf7cb1932010-09-29 07:15:01 -0500803 if (!s->offset && val == SLUB_RED_ACTIVE)
Christoph Lameter81819f02007-05-06 14:49:36 -0700804 /*
805 * Object and freepointer overlap. Cannot check
806 * freepointer while object is allocated.
807 */
808 return 1;
809
810 /* Check free pointer validity */
811 if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
812 object_err(s, page, p, "Freepointer corrupt");
813 /*
Nick Andrew9f6c708e2008-12-05 14:08:08 +1100814 * No choice but to zap it and thus lose the remainder
Christoph Lameter81819f02007-05-06 14:49:36 -0700815 * of the free objects in this slab. May cause
Christoph Lameter672bba32007-05-09 02:32:39 -0700816 * another error because the object count is now wrong.
Christoph Lameter81819f02007-05-06 14:49:36 -0700817 */
Christoph Lametera973e9d2008-03-01 13:40:44 -0800818 set_freepointer(s, p, NULL);
Christoph Lameter81819f02007-05-06 14:49:36 -0700819 return 0;
820 }
821 return 1;
822}
823
824static int check_slab(struct kmem_cache *s, struct page *page)
825{
Christoph Lameter39b26462008-04-14 19:11:30 +0300826 int maxobj;
827
Christoph Lameter81819f02007-05-06 14:49:36 -0700828 VM_BUG_ON(!irqs_disabled());
829
830 if (!PageSlab(page)) {
Christoph Lameter24922682007-07-17 04:03:18 -0700831 slab_err(s, page, "Not a valid slab page");
Christoph Lameter81819f02007-05-06 14:49:36 -0700832 return 0;
833 }
Christoph Lameter39b26462008-04-14 19:11:30 +0300834
Lai Jiangshanab9a0f12011-03-10 15:21:48 +0800835 maxobj = order_objects(compound_order(page), s->size, s->reserved);
Christoph Lameter39b26462008-04-14 19:11:30 +0300836 if (page->objects > maxobj) {
837 slab_err(s, page, "objects %u > max %u",
838 s->name, page->objects, maxobj);
839 return 0;
840 }
841 if (page->inuse > page->objects) {
Christoph Lameter24922682007-07-17 04:03:18 -0700842 slab_err(s, page, "inuse %u > max %u",
Christoph Lameter39b26462008-04-14 19:11:30 +0300843 s->name, page->inuse, page->objects);
Christoph Lameter81819f02007-05-06 14:49:36 -0700844 return 0;
845 }
846 /* Slab_pad_check fixes things up after itself */
847 slab_pad_check(s, page);
848 return 1;
849}
850
851/*
Christoph Lameter672bba32007-05-09 02:32:39 -0700852 * Determine if a certain object on a page is on the freelist. Must hold the
853 * slab lock to guarantee that the chains are in a consistent state.
Christoph Lameter81819f02007-05-06 14:49:36 -0700854 */
855static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
856{
857 int nr = 0;
Christoph Lameter881db7f2011-06-01 12:25:53 -0500858 void *fp;
Christoph Lameter81819f02007-05-06 14:49:36 -0700859 void *object = NULL;
Christoph Lameter224a88b2008-04-14 19:11:31 +0300860 unsigned long max_objects;
Christoph Lameter81819f02007-05-06 14:49:36 -0700861
Christoph Lameter881db7f2011-06-01 12:25:53 -0500862 fp = page->freelist;
Christoph Lameter39b26462008-04-14 19:11:30 +0300863 while (fp && nr <= page->objects) {
Christoph Lameter81819f02007-05-06 14:49:36 -0700864 if (fp == search)
865 return 1;
866 if (!check_valid_pointer(s, page, fp)) {
867 if (object) {
868 object_err(s, page, object,
869 "Freechain corrupt");
Christoph Lametera973e9d2008-03-01 13:40:44 -0800870 set_freepointer(s, object, NULL);
Christoph Lameter81819f02007-05-06 14:49:36 -0700871 break;
872 } else {
Christoph Lameter24922682007-07-17 04:03:18 -0700873 slab_err(s, page, "Freepointer corrupt");
Christoph Lametera973e9d2008-03-01 13:40:44 -0800874 page->freelist = NULL;
Christoph Lameter39b26462008-04-14 19:11:30 +0300875 page->inuse = page->objects;
Christoph Lameter24922682007-07-17 04:03:18 -0700876 slab_fix(s, "Freelist cleared");
Christoph Lameter81819f02007-05-06 14:49:36 -0700877 return 0;
878 }
879 break;
880 }
881 object = fp;
882 fp = get_freepointer(s, object);
883 nr++;
884 }
885
Lai Jiangshanab9a0f12011-03-10 15:21:48 +0800886 max_objects = order_objects(compound_order(page), s->size, s->reserved);
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +0400887 if (max_objects > MAX_OBJS_PER_PAGE)
888 max_objects = MAX_OBJS_PER_PAGE;
Christoph Lameter224a88b2008-04-14 19:11:31 +0300889
890 if (page->objects != max_objects) {
891 slab_err(s, page, "Wrong number of objects. Found %d but "
892 "should be %d", page->objects, max_objects);
893 page->objects = max_objects;
894 slab_fix(s, "Number of objects adjusted.");
895 }
Christoph Lameter39b26462008-04-14 19:11:30 +0300896 if (page->inuse != page->objects - nr) {
Christoph Lameter70d71222007-05-06 14:49:47 -0700897 slab_err(s, page, "Wrong object count. Counter is %d but "
Christoph Lameter39b26462008-04-14 19:11:30 +0300898 "counted were %d", page->inuse, page->objects - nr);
899 page->inuse = page->objects - nr;
Christoph Lameter24922682007-07-17 04:03:18 -0700900 slab_fix(s, "Object count adjusted.");
Christoph Lameter81819f02007-05-06 14:49:36 -0700901 }
902 return search == NULL;
903}
904
Christoph Lameter0121c6192008-04-29 16:11:12 -0700905static void trace(struct kmem_cache *s, struct page *page, void *object,
906 int alloc)
Christoph Lameter3ec09742007-05-16 22:11:00 -0700907{
908 if (s->flags & SLAB_TRACE) {
909 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
910 s->name,
911 alloc ? "alloc" : "free",
912 object, page->inuse,
913 page->freelist);
914
915 if (!alloc)
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500916 print_section("Object ", (void *)object, s->object_size);
Christoph Lameter3ec09742007-05-16 22:11:00 -0700917
918 dump_stack();
919 }
920}
921
Christoph Lameter643b1132007-05-06 14:49:42 -0700922/*
Christoph Lameterc016b0b2010-08-20 12:37:16 -0500923 * Hooks for other subsystems that check memory allocations. In a typical
924 * production configuration these hooks all should produce no code at all.
925 */
926static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
927{
Christoph Lameterc1d50832010-08-20 12:37:17 -0500928 flags &= gfp_allowed_mask;
Christoph Lameterc016b0b2010-08-20 12:37:16 -0500929 lockdep_trace_alloc(flags);
930 might_sleep_if(flags & __GFP_WAIT);
931
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500932 return should_failslab(s->object_size, flags, s->flags);
Christoph Lameterc016b0b2010-08-20 12:37:16 -0500933}
934
935static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
936{
Christoph Lameterc1d50832010-08-20 12:37:17 -0500937 flags &= gfp_allowed_mask;
Eric Dumazetb3d41882011-02-14 18:35:22 +0100938 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500939 kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags);
Christoph Lameterc016b0b2010-08-20 12:37:16 -0500940}
941
942static inline void slab_free_hook(struct kmem_cache *s, void *x)
943{
944 kmemleak_free_recursive(x, s->flags);
Christoph Lameterc016b0b2010-08-20 12:37:16 -0500945
Christoph Lameterd3f661d2011-02-25 11:38:52 -0600946 /*
947 * Trouble is that we may no longer disable interupts in the fast path
948 * So in order to make the debug calls that expect irqs to be
949 * disabled we need to disable interrupts temporarily.
950 */
951#if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP)
952 {
953 unsigned long flags;
954
955 local_irq_save(flags);
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500956 kmemcheck_slab_free(s, x, s->object_size);
957 debug_check_no_locks_freed(x, s->object_size);
Christoph Lameterd3f661d2011-02-25 11:38:52 -0600958 local_irq_restore(flags);
959 }
960#endif
Thomas Gleixnerf9b615d2011-03-24 21:26:46 +0200961 if (!(s->flags & SLAB_DEBUG_OBJECTS))
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500962 debug_check_no_obj_freed(x, s->object_size);
Christoph Lameterc016b0b2010-08-20 12:37:16 -0500963}
964
965/*
Christoph Lameter672bba32007-05-09 02:32:39 -0700966 * Tracking of fully allocated slabs for debugging purposes.
Christoph Lameter5cc6eee2011-06-01 12:25:50 -0500967 *
968 * list_lock must be held.
Christoph Lameter643b1132007-05-06 14:49:42 -0700969 */
Christoph Lameter5cc6eee2011-06-01 12:25:50 -0500970static void add_full(struct kmem_cache *s,
971 struct kmem_cache_node *n, struct page *page)
Christoph Lameter643b1132007-05-06 14:49:42 -0700972{
Christoph Lameter643b1132007-05-06 14:49:42 -0700973 if (!(s->flags & SLAB_STORE_USER))
974 return;
975
Christoph Lameter5cc6eee2011-06-01 12:25:50 -0500976 list_add(&page->lru, &n->full);
977}
Christoph Lameter643b1132007-05-06 14:49:42 -0700978
Christoph Lameter5cc6eee2011-06-01 12:25:50 -0500979/*
980 * list_lock must be held.
981 */
982static void remove_full(struct kmem_cache *s, struct page *page)
983{
984 if (!(s->flags & SLAB_STORE_USER))
985 return;
986
Christoph Lameter643b1132007-05-06 14:49:42 -0700987 list_del(&page->lru);
Christoph Lameter643b1132007-05-06 14:49:42 -0700988}
989
Christoph Lameter0f389ec2008-04-14 18:53:02 +0300990/* Tracking of the number of slabs for debugging purposes */
991static inline unsigned long slabs_node(struct kmem_cache *s, int node)
992{
993 struct kmem_cache_node *n = get_node(s, node);
994
995 return atomic_long_read(&n->nr_slabs);
996}
997
Alexander Beregalov26c02cf2009-06-11 14:08:48 +0400998static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
999{
1000 return atomic_long_read(&n->nr_slabs);
1001}
1002
Christoph Lameter205ab992008-04-14 19:11:40 +03001003static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
Christoph Lameter0f389ec2008-04-14 18:53:02 +03001004{
1005 struct kmem_cache_node *n = get_node(s, node);
1006
1007 /*
1008 * May be called early in order to allocate a slab for the
1009 * kmem_cache_node structure. Solve the chicken-egg
1010 * dilemma by deferring the increment of the count during
1011 * bootstrap (see early_kmem_cache_node_alloc).
1012 */
Christoph Lameter7340cc82010-09-28 08:10:26 -05001013 if (n) {
Christoph Lameter0f389ec2008-04-14 18:53:02 +03001014 atomic_long_inc(&n->nr_slabs);
Christoph Lameter205ab992008-04-14 19:11:40 +03001015 atomic_long_add(objects, &n->total_objects);
1016 }
Christoph Lameter0f389ec2008-04-14 18:53:02 +03001017}
Christoph Lameter205ab992008-04-14 19:11:40 +03001018static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
Christoph Lameter0f389ec2008-04-14 18:53:02 +03001019{
1020 struct kmem_cache_node *n = get_node(s, node);
1021
1022 atomic_long_dec(&n->nr_slabs);
Christoph Lameter205ab992008-04-14 19:11:40 +03001023 atomic_long_sub(objects, &n->total_objects);
Christoph Lameter0f389ec2008-04-14 18:53:02 +03001024}
1025
1026/* Object debug checks for alloc/free paths */
Christoph Lameter3ec09742007-05-16 22:11:00 -07001027static void setup_object_debug(struct kmem_cache *s, struct page *page,
1028 void *object)
1029{
1030 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
1031 return;
1032
Christoph Lameterf7cb1932010-09-29 07:15:01 -05001033 init_object(s, object, SLUB_RED_INACTIVE);
Christoph Lameter3ec09742007-05-16 22:11:00 -07001034 init_tracking(s, object);
1035}
1036
Christoph Lameter15370662010-08-20 12:37:12 -05001037static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03001038 void *object, unsigned long addr)
Christoph Lameter81819f02007-05-06 14:49:36 -07001039{
1040 if (!check_slab(s, page))
1041 goto bad;
1042
Christoph Lameter81819f02007-05-06 14:49:36 -07001043 if (!check_valid_pointer(s, page, object)) {
1044 object_err(s, page, object, "Freelist Pointer check fails");
Christoph Lameter70d71222007-05-06 14:49:47 -07001045 goto bad;
Christoph Lameter81819f02007-05-06 14:49:36 -07001046 }
1047
Christoph Lameterf7cb1932010-09-29 07:15:01 -05001048 if (!check_object(s, page, object, SLUB_RED_INACTIVE))
Christoph Lameter81819f02007-05-06 14:49:36 -07001049 goto bad;
Christoph Lameter81819f02007-05-06 14:49:36 -07001050
Christoph Lameter3ec09742007-05-16 22:11:00 -07001051 /* Success perform special debug activities for allocs */
1052 if (s->flags & SLAB_STORE_USER)
1053 set_track(s, object, TRACK_ALLOC, addr);
1054 trace(s, page, object, 1);
Christoph Lameterf7cb1932010-09-29 07:15:01 -05001055 init_object(s, object, SLUB_RED_ACTIVE);
Christoph Lameter81819f02007-05-06 14:49:36 -07001056 return 1;
Christoph Lameter3ec09742007-05-16 22:11:00 -07001057
Christoph Lameter81819f02007-05-06 14:49:36 -07001058bad:
1059 if (PageSlab(page)) {
1060 /*
1061 * If this is a slab page then lets do the best we can
1062 * to avoid issues in the future. Marking all objects
Christoph Lameter672bba32007-05-09 02:32:39 -07001063 * as used avoids touching the remaining objects.
Christoph Lameter81819f02007-05-06 14:49:36 -07001064 */
Christoph Lameter24922682007-07-17 04:03:18 -07001065 slab_fix(s, "Marking all objects used");
Christoph Lameter39b26462008-04-14 19:11:30 +03001066 page->inuse = page->objects;
Christoph Lametera973e9d2008-03-01 13:40:44 -08001067 page->freelist = NULL;
Christoph Lameter81819f02007-05-06 14:49:36 -07001068 }
1069 return 0;
1070}
1071
Christoph Lameter19c7ff92012-05-30 12:54:46 -05001072static noinline struct kmem_cache_node *free_debug_processing(
1073 struct kmem_cache *s, struct page *page, void *object,
1074 unsigned long addr, unsigned long *flags)
Christoph Lameter81819f02007-05-06 14:49:36 -07001075{
Christoph Lameter19c7ff92012-05-30 12:54:46 -05001076 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
Christoph Lameter5c2e4bb2011-06-01 12:25:54 -05001077
Christoph Lameter19c7ff92012-05-30 12:54:46 -05001078 spin_lock_irqsave(&n->list_lock, *flags);
Christoph Lameter881db7f2011-06-01 12:25:53 -05001079 slab_lock(page);
1080
Christoph Lameter81819f02007-05-06 14:49:36 -07001081 if (!check_slab(s, page))
1082 goto fail;
1083
1084 if (!check_valid_pointer(s, page, object)) {
Christoph Lameter70d71222007-05-06 14:49:47 -07001085 slab_err(s, page, "Invalid object pointer 0x%p", object);
Christoph Lameter81819f02007-05-06 14:49:36 -07001086 goto fail;
1087 }
1088
1089 if (on_freelist(s, page, object)) {
Christoph Lameter24922682007-07-17 04:03:18 -07001090 object_err(s, page, object, "Object already free");
Christoph Lameter81819f02007-05-06 14:49:36 -07001091 goto fail;
1092 }
1093
Christoph Lameterf7cb1932010-09-29 07:15:01 -05001094 if (!check_object(s, page, object, SLUB_RED_ACTIVE))
Christoph Lameter5c2e4bb2011-06-01 12:25:54 -05001095 goto out;
Christoph Lameter81819f02007-05-06 14:49:36 -07001096
1097 if (unlikely(s != page->slab)) {
Ingo Molnar3adbefe2008-02-05 17:57:39 -08001098 if (!PageSlab(page)) {
Christoph Lameter70d71222007-05-06 14:49:47 -07001099 slab_err(s, page, "Attempt to free object(0x%p) "
1100 "outside of slab", object);
Ingo Molnar3adbefe2008-02-05 17:57:39 -08001101 } else if (!page->slab) {
Christoph Lameter81819f02007-05-06 14:49:36 -07001102 printk(KERN_ERR
Christoph Lameter70d71222007-05-06 14:49:47 -07001103 "SLUB <none>: no slab for object 0x%p.\n",
Christoph Lameter81819f02007-05-06 14:49:36 -07001104 object);
Christoph Lameter70d71222007-05-06 14:49:47 -07001105 dump_stack();
Pekka Enberg06428782008-01-07 23:20:27 -08001106 } else
Christoph Lameter24922682007-07-17 04:03:18 -07001107 object_err(s, page, object,
1108 "page slab pointer corrupt.");
Christoph Lameter81819f02007-05-06 14:49:36 -07001109 goto fail;
1110 }
Christoph Lameter3ec09742007-05-16 22:11:00 -07001111
Christoph Lameter3ec09742007-05-16 22:11:00 -07001112 if (s->flags & SLAB_STORE_USER)
1113 set_track(s, object, TRACK_FREE, addr);
1114 trace(s, page, object, 0);
Christoph Lameterf7cb1932010-09-29 07:15:01 -05001115 init_object(s, object, SLUB_RED_INACTIVE);
Christoph Lameter5c2e4bb2011-06-01 12:25:54 -05001116out:
Christoph Lameter881db7f2011-06-01 12:25:53 -05001117 slab_unlock(page);
Christoph Lameter19c7ff92012-05-30 12:54:46 -05001118 /*
1119 * Keep node_lock to preserve integrity
1120 * until the object is actually freed
1121 */
1122 return n;
Christoph Lameter3ec09742007-05-16 22:11:00 -07001123
Christoph Lameter81819f02007-05-06 14:49:36 -07001124fail:
Christoph Lameter19c7ff92012-05-30 12:54:46 -05001125 slab_unlock(page);
1126 spin_unlock_irqrestore(&n->list_lock, *flags);
Christoph Lameter24922682007-07-17 04:03:18 -07001127 slab_fix(s, "Object at 0x%p not freed", object);
Christoph Lameter19c7ff92012-05-30 12:54:46 -05001128 return NULL;
Christoph Lameter81819f02007-05-06 14:49:36 -07001129}
1130
Christoph Lameter41ecc552007-05-09 02:32:44 -07001131static int __init setup_slub_debug(char *str)
1132{
Christoph Lameterf0630ff2007-07-15 23:38:14 -07001133 slub_debug = DEBUG_DEFAULT_FLAGS;
1134 if (*str++ != '=' || !*str)
1135 /*
1136 * No options specified. Switch on full debugging.
1137 */
1138 goto out;
Christoph Lameter41ecc552007-05-09 02:32:44 -07001139
1140 if (*str == ',')
Christoph Lameterf0630ff2007-07-15 23:38:14 -07001141 /*
1142 * No options but restriction on slabs. This means full
1143 * debugging for slabs matching a pattern.
1144 */
1145 goto check_slabs;
1146
David Rientjesfa5ec8a2009-07-07 00:14:14 -07001147 if (tolower(*str) == 'o') {
1148 /*
1149 * Avoid enabling debugging on caches if its minimum order
1150 * would increase as a result.
1151 */
1152 disable_higher_order_debug = 1;
1153 goto out;
1154 }
1155
Christoph Lameterf0630ff2007-07-15 23:38:14 -07001156 slub_debug = 0;
1157 if (*str == '-')
1158 /*
1159 * Switch off all debugging measures.
1160 */
1161 goto out;
1162
1163 /*
1164 * Determine which debug features should be switched on
1165 */
Pekka Enberg06428782008-01-07 23:20:27 -08001166 for (; *str && *str != ','; str++) {
Christoph Lameterf0630ff2007-07-15 23:38:14 -07001167 switch (tolower(*str)) {
1168 case 'f':
1169 slub_debug |= SLAB_DEBUG_FREE;
1170 break;
1171 case 'z':
1172 slub_debug |= SLAB_RED_ZONE;
1173 break;
1174 case 'p':
1175 slub_debug |= SLAB_POISON;
1176 break;
1177 case 'u':
1178 slub_debug |= SLAB_STORE_USER;
1179 break;
1180 case 't':
1181 slub_debug |= SLAB_TRACE;
1182 break;
Dmitry Monakhov4c13dd32010-02-26 09:36:12 +03001183 case 'a':
1184 slub_debug |= SLAB_FAILSLAB;
1185 break;
Christoph Lameterf0630ff2007-07-15 23:38:14 -07001186 default:
1187 printk(KERN_ERR "slub_debug option '%c' "
Pekka Enberg06428782008-01-07 23:20:27 -08001188 "unknown. skipped\n", *str);
Christoph Lameterf0630ff2007-07-15 23:38:14 -07001189 }
1190 }
1191
1192check_slabs:
1193 if (*str == ',')
Christoph Lameter41ecc552007-05-09 02:32:44 -07001194 slub_debug_slabs = str + 1;
Christoph Lameterf0630ff2007-07-15 23:38:14 -07001195out:
Christoph Lameter41ecc552007-05-09 02:32:44 -07001196 return 1;
1197}
1198
1199__setup("slub_debug", setup_slub_debug);
1200
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05001201static unsigned long kmem_cache_flags(unsigned long object_size,
Christoph Lameterba0268a2007-09-11 15:24:11 -07001202 unsigned long flags, const char *name,
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07001203 void (*ctor)(void *))
Christoph Lameter41ecc552007-05-09 02:32:44 -07001204{
1205 /*
Christoph Lametere1533622008-02-15 23:45:24 -08001206 * Enable debugging if selected on the kernel commandline.
Christoph Lameter41ecc552007-05-09 02:32:44 -07001207 */
Christoph Lametere1533622008-02-15 23:45:24 -08001208 if (slub_debug && (!slub_debug_slabs ||
David Rientjes3de47212009-07-27 18:30:35 -07001209 !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
1210 flags |= slub_debug;
Christoph Lameterba0268a2007-09-11 15:24:11 -07001211
1212 return flags;
Christoph Lameter41ecc552007-05-09 02:32:44 -07001213}
1214#else
Christoph Lameter3ec09742007-05-16 22:11:00 -07001215static inline void setup_object_debug(struct kmem_cache *s,
1216 struct page *page, void *object) {}
Christoph Lameter41ecc552007-05-09 02:32:44 -07001217
Christoph Lameter3ec09742007-05-16 22:11:00 -07001218static inline int alloc_debug_processing(struct kmem_cache *s,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03001219 struct page *page, void *object, unsigned long addr) { return 0; }
Christoph Lameter41ecc552007-05-09 02:32:44 -07001220
Christoph Lameter19c7ff92012-05-30 12:54:46 -05001221static inline struct kmem_cache_node *free_debug_processing(
1222 struct kmem_cache *s, struct page *page, void *object,
1223 unsigned long addr, unsigned long *flags) { return NULL; }
Christoph Lameter41ecc552007-05-09 02:32:44 -07001224
Christoph Lameter41ecc552007-05-09 02:32:44 -07001225static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1226 { return 1; }
1227static inline int check_object(struct kmem_cache *s, struct page *page,
Christoph Lameterf7cb1932010-09-29 07:15:01 -05001228 void *object, u8 val) { return 1; }
Christoph Lameter5cc6eee2011-06-01 12:25:50 -05001229static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1230 struct page *page) {}
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001231static inline void remove_full(struct kmem_cache *s, struct page *page) {}
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05001232static inline unsigned long kmem_cache_flags(unsigned long object_size,
Christoph Lameterba0268a2007-09-11 15:24:11 -07001233 unsigned long flags, const char *name,
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07001234 void (*ctor)(void *))
Christoph Lameterba0268a2007-09-11 15:24:11 -07001235{
1236 return flags;
1237}
Christoph Lameter41ecc552007-05-09 02:32:44 -07001238#define slub_debug 0
Christoph Lameter0f389ec2008-04-14 18:53:02 +03001239
Ingo Molnarfdaa45e2009-09-15 11:00:26 +02001240#define disable_higher_order_debug 0
1241
Christoph Lameter0f389ec2008-04-14 18:53:02 +03001242static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1243 { return 0; }
Alexander Beregalov26c02cf2009-06-11 14:08:48 +04001244static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1245 { return 0; }
Christoph Lameter205ab992008-04-14 19:11:40 +03001246static inline void inc_slabs_node(struct kmem_cache *s, int node,
1247 int objects) {}
1248static inline void dec_slabs_node(struct kmem_cache *s, int node,
1249 int objects) {}
Christoph Lameter7d550c52010-08-25 14:07:16 -05001250
1251static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
1252 { return 0; }
1253
1254static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
1255 void *object) {}
1256
1257static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
1258
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05001259#endif /* CONFIG_SLUB_DEBUG */
Christoph Lameter205ab992008-04-14 19:11:40 +03001260
Christoph Lameter81819f02007-05-06 14:49:36 -07001261/*
1262 * Slab allocation and freeing
1263 */
Christoph Lameter65c33762008-04-14 19:11:40 +03001264static inline struct page *alloc_slab_page(gfp_t flags, int node,
1265 struct kmem_cache_order_objects oo)
1266{
1267 int order = oo_order(oo);
1268
Vegard Nossumb1eeab62008-11-25 16:55:53 +01001269 flags |= __GFP_NOTRACK;
1270
Christoph Lameter2154a332010-07-09 14:07:10 -05001271 if (node == NUMA_NO_NODE)
Christoph Lameter65c33762008-04-14 19:11:40 +03001272 return alloc_pages(flags, order);
1273 else
Minchan Kim6b65aaf2010-04-14 23:58:36 +09001274 return alloc_pages_exact_node(node, flags, order);
Christoph Lameter65c33762008-04-14 19:11:40 +03001275}
1276
Christoph Lameter81819f02007-05-06 14:49:36 -07001277static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1278{
Pekka Enberg06428782008-01-07 23:20:27 -08001279 struct page *page;
Christoph Lameter834f3d12008-04-14 19:11:31 +03001280 struct kmem_cache_order_objects oo = s->oo;
Pekka Enbergba522702009-06-24 21:59:51 +03001281 gfp_t alloc_gfp;
Christoph Lameter81819f02007-05-06 14:49:36 -07001282
Christoph Lameter7e0528d2011-06-01 12:25:44 -05001283 flags &= gfp_allowed_mask;
1284
1285 if (flags & __GFP_WAIT)
1286 local_irq_enable();
1287
Christoph Lameterb7a49f02008-02-14 14:21:32 -08001288 flags |= s->allocflags;
Mel Gormane12ba742007-10-16 01:25:52 -07001289
Pekka Enbergba522702009-06-24 21:59:51 +03001290 /*
1291 * Let the initial higher-order allocation fail under memory pressure
1292 * so we fall-back to the minimum order allocation.
1293 */
1294 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1295
1296 page = alloc_slab_page(alloc_gfp, node, oo);
Christoph Lameter65c33762008-04-14 19:11:40 +03001297 if (unlikely(!page)) {
1298 oo = s->min;
1299 /*
1300 * Allocation may have failed due to fragmentation.
1301 * Try a lower order alloc if possible
1302 */
1303 page = alloc_slab_page(flags, node, oo);
Christoph Lameter81819f02007-05-06 14:49:36 -07001304
Christoph Lameter7e0528d2011-06-01 12:25:44 -05001305 if (page)
1306 stat(s, ORDER_FALLBACK);
Christoph Lameter65c33762008-04-14 19:11:40 +03001307 }
Vegard Nossum5a896d92008-04-04 00:54:48 +02001308
David Rientjes737b7192012-07-09 14:00:38 -07001309 if (kmemcheck_enabled && page
Amerigo Wang5086c389c2009-08-19 21:44:13 +03001310 && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
Vegard Nossumb1eeab62008-11-25 16:55:53 +01001311 int pages = 1 << oo_order(oo);
1312
1313 kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
1314
1315 /*
1316 * Objects from caches that have a constructor don't get
1317 * cleared when they're allocated, so we need to do it here.
1318 */
1319 if (s->ctor)
1320 kmemcheck_mark_uninitialized_pages(page, pages);
1321 else
1322 kmemcheck_mark_unallocated_pages(page, pages);
Vegard Nossum5a896d92008-04-04 00:54:48 +02001323 }
1324
David Rientjes737b7192012-07-09 14:00:38 -07001325 if (flags & __GFP_WAIT)
1326 local_irq_disable();
1327 if (!page)
1328 return NULL;
1329
Christoph Lameter834f3d12008-04-14 19:11:31 +03001330 page->objects = oo_objects(oo);
Christoph Lameter81819f02007-05-06 14:49:36 -07001331 mod_zone_page_state(page_zone(page),
1332 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1333 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
Christoph Lameter65c33762008-04-14 19:11:40 +03001334 1 << oo_order(oo));
Christoph Lameter81819f02007-05-06 14:49:36 -07001335
1336 return page;
1337}
1338
1339static void setup_object(struct kmem_cache *s, struct page *page,
1340 void *object)
1341{
Christoph Lameter3ec09742007-05-16 22:11:00 -07001342 setup_object_debug(s, page, object);
Christoph Lameter4f104932007-05-06 14:50:17 -07001343 if (unlikely(s->ctor))
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07001344 s->ctor(object);
Christoph Lameter81819f02007-05-06 14:49:36 -07001345}
1346
1347static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1348{
1349 struct page *page;
Christoph Lameter81819f02007-05-06 14:49:36 -07001350 void *start;
Christoph Lameter81819f02007-05-06 14:49:36 -07001351 void *last;
1352 void *p;
1353
Christoph Lameter6cb06222007-10-16 01:25:41 -07001354 BUG_ON(flags & GFP_SLAB_BUG_MASK);
Christoph Lameter81819f02007-05-06 14:49:36 -07001355
Christoph Lameter6cb06222007-10-16 01:25:41 -07001356 page = allocate_slab(s,
1357 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
Christoph Lameter81819f02007-05-06 14:49:36 -07001358 if (!page)
1359 goto out;
1360
Christoph Lameter205ab992008-04-14 19:11:40 +03001361 inc_slabs_node(s, page_to_nid(page), page->objects);
Christoph Lameter81819f02007-05-06 14:49:36 -07001362 page->slab = s;
Joonsoo Kimc03f94c2012-05-18 00:47:47 +09001363 __SetPageSlab(page);
Mel Gorman072bb0a2012-07-31 16:43:58 -07001364 if (page->pfmemalloc)
1365 SetPageSlabPfmemalloc(page);
Christoph Lameter81819f02007-05-06 14:49:36 -07001366
1367 start = page_address(page);
Christoph Lameter81819f02007-05-06 14:49:36 -07001368
1369 if (unlikely(s->flags & SLAB_POISON))
Christoph Lameter834f3d12008-04-14 19:11:31 +03001370 memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page));
Christoph Lameter81819f02007-05-06 14:49:36 -07001371
1372 last = start;
Christoph Lameter224a88b2008-04-14 19:11:31 +03001373 for_each_object(p, s, start, page->objects) {
Christoph Lameter81819f02007-05-06 14:49:36 -07001374 setup_object(s, page, last);
1375 set_freepointer(s, last, p);
1376 last = p;
1377 }
1378 setup_object(s, page, last);
Christoph Lametera973e9d2008-03-01 13:40:44 -08001379 set_freepointer(s, last, NULL);
Christoph Lameter81819f02007-05-06 14:49:36 -07001380
1381 page->freelist = start;
Christoph Lametere6e82ea2011-08-09 16:12:24 -05001382 page->inuse = page->objects;
Christoph Lameter8cb0a502011-06-01 12:25:46 -05001383 page->frozen = 1;
Christoph Lameter81819f02007-05-06 14:49:36 -07001384out:
Christoph Lameter81819f02007-05-06 14:49:36 -07001385 return page;
1386}
1387
1388static void __free_slab(struct kmem_cache *s, struct page *page)
1389{
Christoph Lameter834f3d12008-04-14 19:11:31 +03001390 int order = compound_order(page);
1391 int pages = 1 << order;
Christoph Lameter81819f02007-05-06 14:49:36 -07001392
Christoph Lameteraf537b02010-07-09 14:07:14 -05001393 if (kmem_cache_debug(s)) {
Christoph Lameter81819f02007-05-06 14:49:36 -07001394 void *p;
1395
1396 slab_pad_check(s, page);
Christoph Lameter224a88b2008-04-14 19:11:31 +03001397 for_each_object(p, s, page_address(page),
1398 page->objects)
Christoph Lameterf7cb1932010-09-29 07:15:01 -05001399 check_object(s, page, p, SLUB_RED_INACTIVE);
Christoph Lameter81819f02007-05-06 14:49:36 -07001400 }
1401
Vegard Nossumb1eeab62008-11-25 16:55:53 +01001402 kmemcheck_free_shadow(page, compound_order(page));
Vegard Nossum5a896d92008-04-04 00:54:48 +02001403
Christoph Lameter81819f02007-05-06 14:49:36 -07001404 mod_zone_page_state(page_zone(page),
1405 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1406 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
Pekka Enberg06428782008-01-07 23:20:27 -08001407 -pages);
Christoph Lameter81819f02007-05-06 14:49:36 -07001408
Mel Gorman072bb0a2012-07-31 16:43:58 -07001409 __ClearPageSlabPfmemalloc(page);
Christoph Lameter49bd5222008-04-14 18:52:18 +03001410 __ClearPageSlab(page);
1411 reset_page_mapcount(page);
Nick Piggin1eb5ac62009-05-05 19:13:44 +10001412 if (current->reclaim_state)
1413 current->reclaim_state->reclaimed_slab += pages;
Christoph Lameter834f3d12008-04-14 19:11:31 +03001414 __free_pages(page, order);
Christoph Lameter81819f02007-05-06 14:49:36 -07001415}
1416
Lai Jiangshanda9a6382011-03-10 15:22:00 +08001417#define need_reserve_slab_rcu \
1418 (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
1419
Christoph Lameter81819f02007-05-06 14:49:36 -07001420static void rcu_free_slab(struct rcu_head *h)
1421{
1422 struct page *page;
1423
Lai Jiangshanda9a6382011-03-10 15:22:00 +08001424 if (need_reserve_slab_rcu)
1425 page = virt_to_head_page(h);
1426 else
1427 page = container_of((struct list_head *)h, struct page, lru);
1428
Christoph Lameter81819f02007-05-06 14:49:36 -07001429 __free_slab(page->slab, page);
1430}
1431
1432static void free_slab(struct kmem_cache *s, struct page *page)
1433{
1434 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
Lai Jiangshanda9a6382011-03-10 15:22:00 +08001435 struct rcu_head *head;
1436
1437 if (need_reserve_slab_rcu) {
1438 int order = compound_order(page);
1439 int offset = (PAGE_SIZE << order) - s->reserved;
1440
1441 VM_BUG_ON(s->reserved != sizeof(*head));
1442 head = page_address(page) + offset;
1443 } else {
1444 /*
1445 * RCU free overloads the RCU head over the LRU
1446 */
1447 head = (void *)&page->lru;
1448 }
Christoph Lameter81819f02007-05-06 14:49:36 -07001449
1450 call_rcu(head, rcu_free_slab);
1451 } else
1452 __free_slab(s, page);
1453}
1454
1455static void discard_slab(struct kmem_cache *s, struct page *page)
1456{
Christoph Lameter205ab992008-04-14 19:11:40 +03001457 dec_slabs_node(s, page_to_nid(page), page->objects);
Christoph Lameter81819f02007-05-06 14:49:36 -07001458 free_slab(s, page);
1459}
1460
1461/*
Christoph Lameter5cc6eee2011-06-01 12:25:50 -05001462 * Management of partially allocated slabs.
1463 *
1464 * list_lock must be held.
Christoph Lameter81819f02007-05-06 14:49:36 -07001465 */
Christoph Lameter5cc6eee2011-06-01 12:25:50 -05001466static inline void add_partial(struct kmem_cache_node *n,
Christoph Lameter7c2e1322008-01-07 23:20:27 -08001467 struct page *page, int tail)
Christoph Lameter81819f02007-05-06 14:49:36 -07001468{
Christoph Lametere95eed52007-05-06 14:49:44 -07001469 n->nr_partial++;
Shaohua Li136333d2011-08-24 08:57:52 +08001470 if (tail == DEACTIVATE_TO_TAIL)
Christoph Lameter7c2e1322008-01-07 23:20:27 -08001471 list_add_tail(&page->lru, &n->partial);
1472 else
1473 list_add(&page->lru, &n->partial);
Christoph Lameter81819f02007-05-06 14:49:36 -07001474}
1475
Christoph Lameter5cc6eee2011-06-01 12:25:50 -05001476/*
1477 * list_lock must be held.
1478 */
1479static inline void remove_partial(struct kmem_cache_node *n,
Christoph Lameter62e346a2010-09-28 08:10:28 -05001480 struct page *page)
1481{
1482 list_del(&page->lru);
1483 n->nr_partial--;
1484}
1485
Christoph Lameter81819f02007-05-06 14:49:36 -07001486/*
Christoph Lameter7ced3712012-05-09 10:09:53 -05001487 * Remove slab from the partial list, freeze it and
1488 * return the pointer to the freelist.
Christoph Lameter81819f02007-05-06 14:49:36 -07001489 *
Christoph Lameter497b66f2011-08-09 16:12:26 -05001490 * Returns a list of objects or NULL if it fails.
1491 *
Christoph Lameter7ced3712012-05-09 10:09:53 -05001492 * Must hold list_lock since we modify the partial list.
Christoph Lameter81819f02007-05-06 14:49:36 -07001493 */
Christoph Lameter497b66f2011-08-09 16:12:26 -05001494static inline void *acquire_slab(struct kmem_cache *s,
Christoph Lameteracd19fd2011-08-09 16:12:25 -05001495 struct kmem_cache_node *n, struct page *page,
Christoph Lameter49e22582011-08-09 16:12:27 -05001496 int mode)
Christoph Lameter81819f02007-05-06 14:49:36 -07001497{
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001498 void *freelist;
1499 unsigned long counters;
1500 struct page new;
1501
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001502 /*
1503 * Zap the freelist and set the frozen bit.
1504 * The old freelist is the list of objects for the
1505 * per cpu allocation list.
1506 */
Christoph Lameter7ced3712012-05-09 10:09:53 -05001507 freelist = page->freelist;
1508 counters = page->counters;
1509 new.counters = counters;
Pekka Enberg23910c52012-06-04 10:14:58 +03001510 if (mode) {
Christoph Lameter7ced3712012-05-09 10:09:53 -05001511 new.inuse = page->objects;
Pekka Enberg23910c52012-06-04 10:14:58 +03001512 new.freelist = NULL;
1513 } else {
1514 new.freelist = freelist;
1515 }
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001516
Christoph Lameter7ced3712012-05-09 10:09:53 -05001517 VM_BUG_ON(new.frozen);
1518 new.frozen = 1;
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001519
Christoph Lameter7ced3712012-05-09 10:09:53 -05001520 if (!__cmpxchg_double_slab(s, page,
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001521 freelist, counters,
Joonsoo Kim02d76332012-05-17 00:13:02 +09001522 new.freelist, new.counters,
Christoph Lameter7ced3712012-05-09 10:09:53 -05001523 "acquire_slab"))
Christoph Lameter7ced3712012-05-09 10:09:53 -05001524 return NULL;
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001525
1526 remove_partial(n, page);
Christoph Lameter7ced3712012-05-09 10:09:53 -05001527 WARN_ON(!freelist);
Christoph Lameter49e22582011-08-09 16:12:27 -05001528 return freelist;
Christoph Lameter81819f02007-05-06 14:49:36 -07001529}
1530
Christoph Lameter49e22582011-08-09 16:12:27 -05001531static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1532
Christoph Lameter81819f02007-05-06 14:49:36 -07001533/*
Christoph Lameter672bba32007-05-09 02:32:39 -07001534 * Try to allocate a partial slab from a specific node.
Christoph Lameter81819f02007-05-06 14:49:36 -07001535 */
Christoph Lameter497b66f2011-08-09 16:12:26 -05001536static void *get_partial_node(struct kmem_cache *s,
Christoph Lameteracd19fd2011-08-09 16:12:25 -05001537 struct kmem_cache_node *n, struct kmem_cache_cpu *c)
Christoph Lameter81819f02007-05-06 14:49:36 -07001538{
Christoph Lameter49e22582011-08-09 16:12:27 -05001539 struct page *page, *page2;
1540 void *object = NULL;
Christoph Lameter81819f02007-05-06 14:49:36 -07001541
1542 /*
1543 * Racy check. If we mistakenly see no partial slabs then we
1544 * just allocate an empty slab. If we mistakenly try to get a
Christoph Lameter672bba32007-05-09 02:32:39 -07001545 * partial slab and there is none available then get_partials()
1546 * will return NULL.
Christoph Lameter81819f02007-05-06 14:49:36 -07001547 */
1548 if (!n || !n->nr_partial)
1549 return NULL;
1550
1551 spin_lock(&n->list_lock);
Christoph Lameter49e22582011-08-09 16:12:27 -05001552 list_for_each_entry_safe(page, page2, &n->partial, lru) {
Alex,Shi12d79632011-09-07 10:26:36 +08001553 void *t = acquire_slab(s, n, page, object == NULL);
Christoph Lameter49e22582011-08-09 16:12:27 -05001554 int available;
1555
1556 if (!t)
1557 break;
1558
Alex,Shi12d79632011-09-07 10:26:36 +08001559 if (!object) {
Christoph Lameter49e22582011-08-09 16:12:27 -05001560 c->page = page;
Christoph Lameter49e22582011-08-09 16:12:27 -05001561 stat(s, ALLOC_FROM_PARTIAL);
Christoph Lameter49e22582011-08-09 16:12:27 -05001562 object = t;
1563 available = page->objects - page->inuse;
1564 } else {
Christoph Lameter49e22582011-08-09 16:12:27 -05001565 available = put_cpu_partial(s, page, 0);
Alex Shi8028dce2012-02-03 23:34:56 +08001566 stat(s, CPU_PARTIAL_NODE);
Christoph Lameter49e22582011-08-09 16:12:27 -05001567 }
1568 if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
1569 break;
1570
Christoph Lameter497b66f2011-08-09 16:12:26 -05001571 }
Christoph Lameter81819f02007-05-06 14:49:36 -07001572 spin_unlock(&n->list_lock);
Christoph Lameter497b66f2011-08-09 16:12:26 -05001573 return object;
Christoph Lameter81819f02007-05-06 14:49:36 -07001574}
1575
1576/*
Christoph Lameter672bba32007-05-09 02:32:39 -07001577 * Get a page from somewhere. Search in increasing NUMA distances.
Christoph Lameter81819f02007-05-06 14:49:36 -07001578 */
Joonsoo Kimde3ec032012-01-27 00:12:23 -08001579static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
Christoph Lameteracd19fd2011-08-09 16:12:25 -05001580 struct kmem_cache_cpu *c)
Christoph Lameter81819f02007-05-06 14:49:36 -07001581{
1582#ifdef CONFIG_NUMA
1583 struct zonelist *zonelist;
Mel Gormandd1a2392008-04-28 02:12:17 -07001584 struct zoneref *z;
Mel Gorman54a6eb52008-04-28 02:12:16 -07001585 struct zone *zone;
1586 enum zone_type high_zoneidx = gfp_zone(flags);
Christoph Lameter497b66f2011-08-09 16:12:26 -05001587 void *object;
Mel Gormancc9a6c82012-03-21 16:34:11 -07001588 unsigned int cpuset_mems_cookie;
Christoph Lameter81819f02007-05-06 14:49:36 -07001589
1590 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07001591 * The defrag ratio allows a configuration of the tradeoffs between
1592 * inter node defragmentation and node local allocations. A lower
1593 * defrag_ratio increases the tendency to do local allocations
1594 * instead of attempting to obtain partial slabs from other nodes.
Christoph Lameter81819f02007-05-06 14:49:36 -07001595 *
Christoph Lameter672bba32007-05-09 02:32:39 -07001596 * If the defrag_ratio is set to 0 then kmalloc() always
1597 * returns node local objects. If the ratio is higher then kmalloc()
1598 * may return off node objects because partial slabs are obtained
1599 * from other nodes and filled up.
Christoph Lameter81819f02007-05-06 14:49:36 -07001600 *
Christoph Lameter6446faa2008-02-15 23:45:26 -08001601 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
Christoph Lameter672bba32007-05-09 02:32:39 -07001602 * defrag_ratio = 1000) then every (well almost) allocation will
1603 * first attempt to defrag slab caches on other nodes. This means
1604 * scanning over all nodes to look for partial slabs which may be
1605 * expensive if we do it every time we are trying to find a slab
1606 * with available objects.
Christoph Lameter81819f02007-05-06 14:49:36 -07001607 */
Christoph Lameter98246012008-01-07 23:20:26 -08001608 if (!s->remote_node_defrag_ratio ||
1609 get_cycles() % 1024 > s->remote_node_defrag_ratio)
Christoph Lameter81819f02007-05-06 14:49:36 -07001610 return NULL;
1611
Mel Gormancc9a6c82012-03-21 16:34:11 -07001612 do {
1613 cpuset_mems_cookie = get_mems_allowed();
Andi Kleene7b691b2012-06-09 02:40:03 -07001614 zonelist = node_zonelist(slab_node(), flags);
Mel Gormancc9a6c82012-03-21 16:34:11 -07001615 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1616 struct kmem_cache_node *n;
Christoph Lameter81819f02007-05-06 14:49:36 -07001617
Mel Gormancc9a6c82012-03-21 16:34:11 -07001618 n = get_node(s, zone_to_nid(zone));
Christoph Lameter81819f02007-05-06 14:49:36 -07001619
Mel Gormancc9a6c82012-03-21 16:34:11 -07001620 if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
1621 n->nr_partial > s->min_partial) {
1622 object = get_partial_node(s, n, c);
1623 if (object) {
1624 /*
1625 * Return the object even if
1626 * put_mems_allowed indicated that
1627 * the cpuset mems_allowed was
1628 * updated in parallel. It's a
1629 * harmless race between the alloc
1630 * and the cpuset update.
1631 */
1632 put_mems_allowed(cpuset_mems_cookie);
1633 return object;
1634 }
Miao Xiec0ff7452010-05-24 14:32:08 -07001635 }
Christoph Lameter81819f02007-05-06 14:49:36 -07001636 }
Mel Gormancc9a6c82012-03-21 16:34:11 -07001637 } while (!put_mems_allowed(cpuset_mems_cookie));
Christoph Lameter81819f02007-05-06 14:49:36 -07001638#endif
1639 return NULL;
1640}
1641
1642/*
1643 * Get a partial page, lock it and return it.
1644 */
Christoph Lameter497b66f2011-08-09 16:12:26 -05001645static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
Christoph Lameteracd19fd2011-08-09 16:12:25 -05001646 struct kmem_cache_cpu *c)
Christoph Lameter81819f02007-05-06 14:49:36 -07001647{
Christoph Lameter497b66f2011-08-09 16:12:26 -05001648 void *object;
Christoph Lameter2154a332010-07-09 14:07:10 -05001649 int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
Christoph Lameter81819f02007-05-06 14:49:36 -07001650
Christoph Lameter497b66f2011-08-09 16:12:26 -05001651 object = get_partial_node(s, get_node(s, searchnode), c);
1652 if (object || node != NUMA_NO_NODE)
1653 return object;
Christoph Lameter81819f02007-05-06 14:49:36 -07001654
Christoph Lameteracd19fd2011-08-09 16:12:25 -05001655 return get_any_partial(s, flags, c);
Christoph Lameter81819f02007-05-06 14:49:36 -07001656}
1657
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06001658#ifdef CONFIG_PREEMPT
1659/*
1660 * Calculate the next globally unique transaction for disambiguiation
1661 * during cmpxchg. The transactions start with the cpu number and are then
1662 * incremented by CONFIG_NR_CPUS.
1663 */
1664#define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS)
1665#else
1666/*
1667 * No preemption supported therefore also no need to check for
1668 * different cpus.
1669 */
1670#define TID_STEP 1
1671#endif
1672
1673static inline unsigned long next_tid(unsigned long tid)
1674{
1675 return tid + TID_STEP;
1676}
1677
1678static inline unsigned int tid_to_cpu(unsigned long tid)
1679{
1680 return tid % TID_STEP;
1681}
1682
1683static inline unsigned long tid_to_event(unsigned long tid)
1684{
1685 return tid / TID_STEP;
1686}
1687
1688static inline unsigned int init_tid(int cpu)
1689{
1690 return cpu;
1691}
1692
1693static inline void note_cmpxchg_failure(const char *n,
1694 const struct kmem_cache *s, unsigned long tid)
1695{
1696#ifdef SLUB_DEBUG_CMPXCHG
1697 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
1698
1699 printk(KERN_INFO "%s %s: cmpxchg redo ", n, s->name);
1700
1701#ifdef CONFIG_PREEMPT
1702 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
1703 printk("due to cpu change %d -> %d\n",
1704 tid_to_cpu(tid), tid_to_cpu(actual_tid));
1705 else
1706#endif
1707 if (tid_to_event(tid) != tid_to_event(actual_tid))
1708 printk("due to cpu running other code. Event %ld->%ld\n",
1709 tid_to_event(tid), tid_to_event(actual_tid));
1710 else
1711 printk("for unknown reason: actual=%lx was=%lx target=%lx\n",
1712 actual_tid, tid, next_tid(tid));
1713#endif
Christoph Lameter4fdccdf2011-03-22 13:35:00 -05001714 stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06001715}
1716
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06001717void init_kmem_cache_cpus(struct kmem_cache *s)
1718{
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06001719 int cpu;
1720
1721 for_each_possible_cpu(cpu)
1722 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06001723}
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001724
1725/*
1726 * Remove the cpu slab
1727 */
Christoph Lameterc17dda42012-05-09 10:09:57 -05001728static void deactivate_slab(struct kmem_cache *s, struct page *page, void *freelist)
Christoph Lameter81819f02007-05-06 14:49:36 -07001729{
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001730 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001731 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1732 int lock = 0;
1733 enum slab_modes l = M_NONE, m = M_NONE;
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001734 void *nextfree;
Shaohua Li136333d2011-08-24 08:57:52 +08001735 int tail = DEACTIVATE_TO_HEAD;
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001736 struct page new;
1737 struct page old;
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08001738
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001739 if (page->freelist) {
Christoph Lameter84e554e62009-12-18 16:26:23 -06001740 stat(s, DEACTIVATE_REMOTE_FREES);
Shaohua Li136333d2011-08-24 08:57:52 +08001741 tail = DEACTIVATE_TO_TAIL;
Christoph Lameter894b8782007-05-10 03:15:16 -07001742 }
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001743
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001744 /*
1745 * Stage one: Free all available per cpu objects back
1746 * to the page freelist while it is still frozen. Leave the
1747 * last one.
1748 *
1749 * There is no need to take the list->lock because the page
1750 * is still frozen.
1751 */
1752 while (freelist && (nextfree = get_freepointer(s, freelist))) {
1753 void *prior;
1754 unsigned long counters;
1755
1756 do {
1757 prior = page->freelist;
1758 counters = page->counters;
1759 set_freepointer(s, freelist, prior);
1760 new.counters = counters;
1761 new.inuse--;
1762 VM_BUG_ON(!new.frozen);
1763
Christoph Lameter1d071712011-07-14 12:49:12 -05001764 } while (!__cmpxchg_double_slab(s, page,
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001765 prior, counters,
1766 freelist, new.counters,
1767 "drain percpu freelist"));
1768
1769 freelist = nextfree;
1770 }
1771
1772 /*
1773 * Stage two: Ensure that the page is unfrozen while the
1774 * list presence reflects the actual number of objects
1775 * during unfreeze.
1776 *
1777 * We setup the list membership and then perform a cmpxchg
1778 * with the count. If there is a mismatch then the page
1779 * is not unfrozen but the page is on the wrong list.
1780 *
1781 * Then we restart the process which may have to remove
1782 * the page from the list that we just put it on again
1783 * because the number of objects in the slab may have
1784 * changed.
1785 */
1786redo:
1787
1788 old.freelist = page->freelist;
1789 old.counters = page->counters;
1790 VM_BUG_ON(!old.frozen);
1791
1792 /* Determine target state of the slab */
1793 new.counters = old.counters;
1794 if (freelist) {
1795 new.inuse--;
1796 set_freepointer(s, freelist, old.freelist);
1797 new.freelist = freelist;
1798 } else
1799 new.freelist = old.freelist;
1800
1801 new.frozen = 0;
1802
Christoph Lameter81107182011-08-09 13:01:32 -05001803 if (!new.inuse && n->nr_partial > s->min_partial)
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001804 m = M_FREE;
1805 else if (new.freelist) {
1806 m = M_PARTIAL;
1807 if (!lock) {
1808 lock = 1;
1809 /*
1810 * Taking the spinlock removes the possiblity
1811 * that acquire_slab() will see a slab page that
1812 * is frozen
1813 */
1814 spin_lock(&n->list_lock);
1815 }
1816 } else {
1817 m = M_FULL;
1818 if (kmem_cache_debug(s) && !lock) {
1819 lock = 1;
1820 /*
1821 * This also ensures that the scanning of full
1822 * slabs from diagnostic functions will not see
1823 * any frozen slabs.
1824 */
1825 spin_lock(&n->list_lock);
1826 }
1827 }
1828
1829 if (l != m) {
1830
1831 if (l == M_PARTIAL)
1832
1833 remove_partial(n, page);
1834
1835 else if (l == M_FULL)
1836
1837 remove_full(s, page);
1838
1839 if (m == M_PARTIAL) {
1840
1841 add_partial(n, page, tail);
Shaohua Li136333d2011-08-24 08:57:52 +08001842 stat(s, tail);
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001843
1844 } else if (m == M_FULL) {
1845
1846 stat(s, DEACTIVATE_FULL);
1847 add_full(s, n, page);
1848
1849 }
1850 }
1851
1852 l = m;
Christoph Lameter1d071712011-07-14 12:49:12 -05001853 if (!__cmpxchg_double_slab(s, page,
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001854 old.freelist, old.counters,
1855 new.freelist, new.counters,
1856 "unfreezing slab"))
1857 goto redo;
1858
Christoph Lameter2cfb7452011-06-01 12:25:52 -05001859 if (lock)
1860 spin_unlock(&n->list_lock);
1861
1862 if (m == M_FREE) {
1863 stat(s, DEACTIVATE_EMPTY);
1864 discard_slab(s, page);
1865 stat(s, FREE_SLAB);
1866 }
Christoph Lameter81819f02007-05-06 14:49:36 -07001867}
1868
Joonsoo Kimd24ac772012-05-18 22:01:17 +09001869/*
1870 * Unfreeze all the cpu partial slabs.
1871 *
1872 * This function must be called with interrupt disabled.
1873 */
Christoph Lameter49e22582011-08-09 16:12:27 -05001874static void unfreeze_partials(struct kmem_cache *s)
1875{
Joonsoo Kim43d77862012-06-09 02:23:16 +09001876 struct kmem_cache_node *n = NULL, *n2 = NULL;
Christoph Lameter49e22582011-08-09 16:12:27 -05001877 struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
Shaohua Li9ada1932011-11-14 13:34:13 +08001878 struct page *page, *discard_page = NULL;
Christoph Lameter49e22582011-08-09 16:12:27 -05001879
1880 while ((page = c->partial)) {
Christoph Lameter49e22582011-08-09 16:12:27 -05001881 struct page new;
1882 struct page old;
1883
1884 c->partial = page->next;
Joonsoo Kim43d77862012-06-09 02:23:16 +09001885
1886 n2 = get_node(s, page_to_nid(page));
1887 if (n != n2) {
1888 if (n)
1889 spin_unlock(&n->list_lock);
1890
1891 n = n2;
1892 spin_lock(&n->list_lock);
1893 }
Christoph Lameter49e22582011-08-09 16:12:27 -05001894
1895 do {
1896
1897 old.freelist = page->freelist;
1898 old.counters = page->counters;
1899 VM_BUG_ON(!old.frozen);
1900
1901 new.counters = old.counters;
1902 new.freelist = old.freelist;
1903
1904 new.frozen = 0;
1905
Joonsoo Kimd24ac772012-05-18 22:01:17 +09001906 } while (!__cmpxchg_double_slab(s, page,
Christoph Lameter49e22582011-08-09 16:12:27 -05001907 old.freelist, old.counters,
1908 new.freelist, new.counters,
1909 "unfreezing slab"));
1910
Joonsoo Kim43d77862012-06-09 02:23:16 +09001911 if (unlikely(!new.inuse && n->nr_partial > s->min_partial)) {
Shaohua Li9ada1932011-11-14 13:34:13 +08001912 page->next = discard_page;
1913 discard_page = page;
Joonsoo Kim43d77862012-06-09 02:23:16 +09001914 } else {
1915 add_partial(n, page, DEACTIVATE_TO_TAIL);
1916 stat(s, FREE_ADD_PARTIAL);
Christoph Lameter49e22582011-08-09 16:12:27 -05001917 }
1918 }
1919
1920 if (n)
1921 spin_unlock(&n->list_lock);
Shaohua Li9ada1932011-11-14 13:34:13 +08001922
1923 while (discard_page) {
1924 page = discard_page;
1925 discard_page = discard_page->next;
1926
1927 stat(s, DEACTIVATE_EMPTY);
1928 discard_slab(s, page);
1929 stat(s, FREE_SLAB);
1930 }
Christoph Lameter49e22582011-08-09 16:12:27 -05001931}
1932
1933/*
1934 * Put a page that was just frozen (in __slab_free) into a partial page
1935 * slot if available. This is done without interrupts disabled and without
1936 * preemption disabled. The cmpxchg is racy and may put the partial page
1937 * onto a random cpus partial slot.
1938 *
1939 * If we did not find a slot then simply move all the partials to the
1940 * per node partial list.
1941 */
1942int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
1943{
1944 struct page *oldpage;
1945 int pages;
1946 int pobjects;
1947
1948 do {
1949 pages = 0;
1950 pobjects = 0;
1951 oldpage = this_cpu_read(s->cpu_slab->partial);
1952
1953 if (oldpage) {
1954 pobjects = oldpage->pobjects;
1955 pages = oldpage->pages;
1956 if (drain && pobjects > s->cpu_partial) {
1957 unsigned long flags;
1958 /*
1959 * partial array is full. Move the existing
1960 * set to the per node partial list.
1961 */
1962 local_irq_save(flags);
1963 unfreeze_partials(s);
1964 local_irq_restore(flags);
Joonsoo Kime24fc412012-06-23 03:22:38 +09001965 oldpage = NULL;
Christoph Lameter49e22582011-08-09 16:12:27 -05001966 pobjects = 0;
1967 pages = 0;
Alex Shi8028dce2012-02-03 23:34:56 +08001968 stat(s, CPU_PARTIAL_DRAIN);
Christoph Lameter49e22582011-08-09 16:12:27 -05001969 }
1970 }
1971
1972 pages++;
1973 pobjects += page->objects - page->inuse;
1974
1975 page->pages = pages;
1976 page->pobjects = pobjects;
1977 page->next = oldpage;
1978
Christoph Lameter933393f2011-12-22 11:58:51 -06001979 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
Christoph Lameter49e22582011-08-09 16:12:27 -05001980 return pobjects;
1981}
1982
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001983static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
Christoph Lameter81819f02007-05-06 14:49:36 -07001984{
Christoph Lameter84e554e62009-12-18 16:26:23 -06001985 stat(s, CPUSLAB_FLUSH);
Christoph Lameterc17dda42012-05-09 10:09:57 -05001986 deactivate_slab(s, c->page, c->freelist);
1987
1988 c->tid = next_tid(c->tid);
1989 c->page = NULL;
1990 c->freelist = NULL;
Christoph Lameter81819f02007-05-06 14:49:36 -07001991}
1992
1993/*
1994 * Flush cpu slab.
Christoph Lameter6446faa2008-02-15 23:45:26 -08001995 *
Christoph Lameter81819f02007-05-06 14:49:36 -07001996 * Called from IPI handler with interrupts disabled.
1997 */
Christoph Lameter0c710012007-07-17 04:03:24 -07001998static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
Christoph Lameter81819f02007-05-06 14:49:36 -07001999{
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06002000 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
Christoph Lameter81819f02007-05-06 14:49:36 -07002001
Christoph Lameter49e22582011-08-09 16:12:27 -05002002 if (likely(c)) {
2003 if (c->page)
2004 flush_slab(s, c);
2005
2006 unfreeze_partials(s);
2007 }
Christoph Lameter81819f02007-05-06 14:49:36 -07002008}
2009
2010static void flush_cpu_slab(void *d)
2011{
2012 struct kmem_cache *s = d;
Christoph Lameter81819f02007-05-06 14:49:36 -07002013
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002014 __flush_cpu_slab(s, smp_processor_id());
Christoph Lameter81819f02007-05-06 14:49:36 -07002015}
2016
Gilad Ben-Yossefa8364d52012-03-28 14:42:44 -07002017static bool has_cpu_slab(int cpu, void *info)
2018{
2019 struct kmem_cache *s = info;
2020 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2021
majianpeng02e1a9c2012-05-17 17:03:26 -07002022 return c->page || c->partial;
Gilad Ben-Yossefa8364d52012-03-28 14:42:44 -07002023}
2024
Christoph Lameter81819f02007-05-06 14:49:36 -07002025static void flush_all(struct kmem_cache *s)
2026{
Gilad Ben-Yossefa8364d52012-03-28 14:42:44 -07002027 on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
Christoph Lameter81819f02007-05-06 14:49:36 -07002028}
2029
2030/*
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002031 * Check if the objects in a per cpu structure fit numa
2032 * locality expectations.
2033 */
Christoph Lameter57d437d2012-05-09 10:09:59 -05002034static inline int node_match(struct page *page, int node)
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002035{
2036#ifdef CONFIG_NUMA
Christoph Lameter57d437d2012-05-09 10:09:59 -05002037 if (node != NUMA_NO_NODE && page_to_nid(page) != node)
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002038 return 0;
2039#endif
2040 return 1;
2041}
2042
Pekka Enberg781b2ba2009-06-10 18:50:32 +03002043static int count_free(struct page *page)
2044{
2045 return page->objects - page->inuse;
2046}
2047
2048static unsigned long count_partial(struct kmem_cache_node *n,
2049 int (*get_count)(struct page *))
2050{
2051 unsigned long flags;
2052 unsigned long x = 0;
2053 struct page *page;
2054
2055 spin_lock_irqsave(&n->list_lock, flags);
2056 list_for_each_entry(page, &n->partial, lru)
2057 x += get_count(page);
2058 spin_unlock_irqrestore(&n->list_lock, flags);
2059 return x;
2060}
2061
Alexander Beregalov26c02cf2009-06-11 14:08:48 +04002062static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2063{
2064#ifdef CONFIG_SLUB_DEBUG
2065 return atomic_long_read(&n->total_objects);
2066#else
2067 return 0;
2068#endif
2069}
2070
Pekka Enberg781b2ba2009-06-10 18:50:32 +03002071static noinline void
2072slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2073{
2074 int node;
2075
2076 printk(KERN_WARNING
2077 "SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
2078 nid, gfpflags);
2079 printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, "
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05002080 "default order: %d, min order: %d\n", s->name, s->object_size,
Pekka Enberg781b2ba2009-06-10 18:50:32 +03002081 s->size, oo_order(s->oo), oo_order(s->min));
2082
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05002083 if (oo_order(s->min) > get_order(s->object_size))
David Rientjesfa5ec8a2009-07-07 00:14:14 -07002084 printk(KERN_WARNING " %s debugging increased min order, use "
2085 "slub_debug=O to disable.\n", s->name);
2086
Pekka Enberg781b2ba2009-06-10 18:50:32 +03002087 for_each_online_node(node) {
2088 struct kmem_cache_node *n = get_node(s, node);
2089 unsigned long nr_slabs;
2090 unsigned long nr_objs;
2091 unsigned long nr_free;
2092
2093 if (!n)
2094 continue;
2095
Alexander Beregalov26c02cf2009-06-11 14:08:48 +04002096 nr_free = count_partial(n, count_free);
2097 nr_slabs = node_nr_slabs(n);
2098 nr_objs = node_nr_objs(n);
Pekka Enberg781b2ba2009-06-10 18:50:32 +03002099
2100 printk(KERN_WARNING
2101 " node %d: slabs: %ld, objs: %ld, free: %ld\n",
2102 node, nr_slabs, nr_objs, nr_free);
2103 }
2104}
2105
Christoph Lameter497b66f2011-08-09 16:12:26 -05002106static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2107 int node, struct kmem_cache_cpu **pc)
2108{
Christoph Lameter6faa6832012-05-09 10:09:51 -05002109 void *freelist;
Christoph Lameter188fd062012-05-09 10:09:55 -05002110 struct kmem_cache_cpu *c = *pc;
2111 struct page *page;
Christoph Lameter497b66f2011-08-09 16:12:26 -05002112
Christoph Lameter188fd062012-05-09 10:09:55 -05002113 freelist = get_partial(s, flags, node, c);
2114
2115 if (freelist)
2116 return freelist;
2117
2118 page = new_slab(s, flags, node);
Christoph Lameter497b66f2011-08-09 16:12:26 -05002119 if (page) {
2120 c = __this_cpu_ptr(s->cpu_slab);
2121 if (c->page)
2122 flush_slab(s, c);
2123
2124 /*
2125 * No other reference to the page yet so we can
2126 * muck around with it freely without cmpxchg
2127 */
Christoph Lameter6faa6832012-05-09 10:09:51 -05002128 freelist = page->freelist;
Christoph Lameter497b66f2011-08-09 16:12:26 -05002129 page->freelist = NULL;
2130
2131 stat(s, ALLOC_SLAB);
Christoph Lameter497b66f2011-08-09 16:12:26 -05002132 c->page = page;
2133 *pc = c;
2134 } else
Christoph Lameter6faa6832012-05-09 10:09:51 -05002135 freelist = NULL;
Christoph Lameter497b66f2011-08-09 16:12:26 -05002136
Christoph Lameter6faa6832012-05-09 10:09:51 -05002137 return freelist;
Christoph Lameter497b66f2011-08-09 16:12:26 -05002138}
2139
Mel Gorman072bb0a2012-07-31 16:43:58 -07002140static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
2141{
2142 if (unlikely(PageSlabPfmemalloc(page)))
2143 return gfp_pfmemalloc_allowed(gfpflags);
2144
2145 return true;
2146}
2147
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002148/*
Christoph Lameter213eeb92011-11-11 14:07:14 -06002149 * Check the page->freelist of a page and either transfer the freelist to the per cpu freelist
2150 * or deactivate the page.
2151 *
2152 * The page is still frozen if the return value is not NULL.
2153 *
2154 * If this function returns NULL then the page has been unfrozen.
Joonsoo Kimd24ac772012-05-18 22:01:17 +09002155 *
2156 * This function must be called with interrupt disabled.
Christoph Lameter213eeb92011-11-11 14:07:14 -06002157 */
2158static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2159{
2160 struct page new;
2161 unsigned long counters;
2162 void *freelist;
2163
2164 do {
2165 freelist = page->freelist;
2166 counters = page->counters;
Christoph Lameter6faa6832012-05-09 10:09:51 -05002167
Christoph Lameter213eeb92011-11-11 14:07:14 -06002168 new.counters = counters;
2169 VM_BUG_ON(!new.frozen);
2170
2171 new.inuse = page->objects;
2172 new.frozen = freelist != NULL;
2173
Joonsoo Kimd24ac772012-05-18 22:01:17 +09002174 } while (!__cmpxchg_double_slab(s, page,
Christoph Lameter213eeb92011-11-11 14:07:14 -06002175 freelist, counters,
2176 NULL, new.counters,
2177 "get_freelist"));
2178
2179 return freelist;
2180}
2181
2182/*
Christoph Lameter894b8782007-05-10 03:15:16 -07002183 * Slow path. The lockless freelist is empty or we need to perform
2184 * debugging duties.
Christoph Lameter81819f02007-05-06 14:49:36 -07002185 *
Christoph Lameter894b8782007-05-10 03:15:16 -07002186 * Processing is still very fast if new objects have been freed to the
2187 * regular freelist. In that case we simply take over the regular freelist
2188 * as the lockless freelist and zap the regular freelist.
Christoph Lameter81819f02007-05-06 14:49:36 -07002189 *
Christoph Lameter894b8782007-05-10 03:15:16 -07002190 * If that is not working then we fall back to the partial lists. We take the
2191 * first element of the freelist as the object to allocate now and move the
2192 * rest of the freelist to the lockless freelist.
2193 *
2194 * And if we were unable to get a new slab from the partial slab lists then
Christoph Lameter6446faa2008-02-15 23:45:26 -08002195 * we need to allocate a new slab. This is the slowest path since it involves
2196 * a call to the page allocator and the setup of a new slab.
Christoph Lameter81819f02007-05-06 14:49:36 -07002197 */
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03002198static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2199 unsigned long addr, struct kmem_cache_cpu *c)
Christoph Lameter81819f02007-05-06 14:49:36 -07002200{
Christoph Lameter6faa6832012-05-09 10:09:51 -05002201 void *freelist;
Christoph Lameterf6e7def2012-05-09 10:09:58 -05002202 struct page *page;
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002203 unsigned long flags;
2204
2205 local_irq_save(flags);
2206#ifdef CONFIG_PREEMPT
2207 /*
2208 * We may have been preempted and rescheduled on a different
2209 * cpu before disabling interrupts. Need to reload cpu area
2210 * pointer.
2211 */
2212 c = this_cpu_ptr(s->cpu_slab);
2213#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07002214
Christoph Lameterf6e7def2012-05-09 10:09:58 -05002215 page = c->page;
2216 if (!page)
Christoph Lameter81819f02007-05-06 14:49:36 -07002217 goto new_slab;
Christoph Lameter49e22582011-08-09 16:12:27 -05002218redo:
Christoph Lameter6faa6832012-05-09 10:09:51 -05002219
Christoph Lameter57d437d2012-05-09 10:09:59 -05002220 if (unlikely(!node_match(page, node))) {
Christoph Lametere36a2652011-06-01 12:25:57 -05002221 stat(s, ALLOC_NODE_MISMATCH);
Christoph Lameterf6e7def2012-05-09 10:09:58 -05002222 deactivate_slab(s, page, c->freelist);
Christoph Lameterc17dda42012-05-09 10:09:57 -05002223 c->page = NULL;
2224 c->freelist = NULL;
Christoph Lameterfc59c052011-06-01 12:25:56 -05002225 goto new_slab;
2226 }
Christoph Lameter6446faa2008-02-15 23:45:26 -08002227
Mel Gorman072bb0a2012-07-31 16:43:58 -07002228 /*
2229 * By rights, we should be searching for a slab page that was
2230 * PFMEMALLOC but right now, we are losing the pfmemalloc
2231 * information when the page leaves the per-cpu allocator
2232 */
2233 if (unlikely(!pfmemalloc_match(page, gfpflags))) {
2234 deactivate_slab(s, page, c->freelist);
2235 c->page = NULL;
2236 c->freelist = NULL;
2237 goto new_slab;
2238 }
2239
Eric Dumazet73736e02011-12-13 04:57:06 +01002240 /* must check again c->freelist in case of cpu migration or IRQ */
Christoph Lameter6faa6832012-05-09 10:09:51 -05002241 freelist = c->freelist;
2242 if (freelist)
Eric Dumazet73736e02011-12-13 04:57:06 +01002243 goto load_freelist;
2244
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002245 stat(s, ALLOC_SLOWPATH);
2246
Christoph Lameterf6e7def2012-05-09 10:09:58 -05002247 freelist = get_freelist(s, page);
Christoph Lameter6446faa2008-02-15 23:45:26 -08002248
Christoph Lameter6faa6832012-05-09 10:09:51 -05002249 if (!freelist) {
Christoph Lameter03e404a2011-06-01 12:25:58 -05002250 c->page = NULL;
2251 stat(s, DEACTIVATE_BYPASS);
Christoph Lameterfc59c052011-06-01 12:25:56 -05002252 goto new_slab;
Christoph Lameter03e404a2011-06-01 12:25:58 -05002253 }
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002254
Christoph Lameter81819f02007-05-06 14:49:36 -07002255 stat(s, ALLOC_REFILL);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08002256
Christoph Lameter894b8782007-05-10 03:15:16 -07002257load_freelist:
Christoph Lameter507effe2012-05-09 10:09:52 -05002258 /*
2259 * freelist is pointing to the list of objects to be used.
2260 * page is pointing to the page from which the objects are obtained.
2261 * That page must be frozen for per cpu allocations to work.
2262 */
2263 VM_BUG_ON(!c->page->frozen);
Christoph Lameter6faa6832012-05-09 10:09:51 -05002264 c->freelist = get_freepointer(s, freelist);
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002265 c->tid = next_tid(c->tid);
2266 local_irq_restore(flags);
Christoph Lameter6faa6832012-05-09 10:09:51 -05002267 return freelist;
Christoph Lameter81819f02007-05-06 14:49:36 -07002268
Christoph Lameter81819f02007-05-06 14:49:36 -07002269new_slab:
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002270
Christoph Lameter49e22582011-08-09 16:12:27 -05002271 if (c->partial) {
Christoph Lameterf6e7def2012-05-09 10:09:58 -05002272 page = c->page = c->partial;
2273 c->partial = page->next;
Christoph Lameter49e22582011-08-09 16:12:27 -05002274 stat(s, CPU_PARTIAL_ALLOC);
2275 c->freelist = NULL;
2276 goto redo;
Christoph Lameter81819f02007-05-06 14:49:36 -07002277 }
2278
Christoph Lameter188fd062012-05-09 10:09:55 -05002279 freelist = new_slab_objects(s, gfpflags, node, &c);
Christoph Lameterb811c202007-10-16 23:25:51 -07002280
Christoph Lameterf46974362012-05-09 10:09:54 -05002281 if (unlikely(!freelist)) {
2282 if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
2283 slab_out_of_memory(s, gfpflags, node);
Christoph Lameter01ad8a72011-04-15 14:48:14 -05002284
Christoph Lameterf46974362012-05-09 10:09:54 -05002285 local_irq_restore(flags);
2286 return NULL;
Christoph Lameter81819f02007-05-06 14:49:36 -07002287 }
Christoph Lameter894b8782007-05-10 03:15:16 -07002288
Christoph Lameterf6e7def2012-05-09 10:09:58 -05002289 page = c->page;
Christoph Lameter5091b742012-07-31 16:44:00 -07002290 if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
Christoph Lameter81819f02007-05-06 14:49:36 -07002291 goto load_freelist;
Christoph Lameter894b8782007-05-10 03:15:16 -07002292
Christoph Lameter497b66f2011-08-09 16:12:26 -05002293 /* Only entered in the debug case */
Christoph Lameter5091b742012-07-31 16:44:00 -07002294 if (kmem_cache_debug(s) && !alloc_debug_processing(s, page, freelist, addr))
Christoph Lameter497b66f2011-08-09 16:12:26 -05002295 goto new_slab; /* Slab failed checks. Next slab needed */
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002296
Christoph Lameterf6e7def2012-05-09 10:09:58 -05002297 deactivate_slab(s, page, get_freepointer(s, freelist));
Christoph Lameterc17dda42012-05-09 10:09:57 -05002298 c->page = NULL;
2299 c->freelist = NULL;
Christoph Lametera71ae472011-05-25 09:47:43 -05002300 local_irq_restore(flags);
Christoph Lameter6faa6832012-05-09 10:09:51 -05002301 return freelist;
Christoph Lameter894b8782007-05-10 03:15:16 -07002302}
2303
2304/*
2305 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2306 * have the fastpath folded into their functions. So no function call
2307 * overhead for requests that can be satisfied on the fastpath.
2308 *
2309 * The fastpath works by first checking if the lockless freelist can be used.
2310 * If not then __slab_alloc is called for slow processing.
2311 *
2312 * Otherwise we can simply pick the next object from the lockless free list.
2313 */
Pekka Enberg06428782008-01-07 23:20:27 -08002314static __always_inline void *slab_alloc(struct kmem_cache *s,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03002315 gfp_t gfpflags, int node, unsigned long addr)
Christoph Lameter894b8782007-05-10 03:15:16 -07002316{
Christoph Lameter894b8782007-05-10 03:15:16 -07002317 void **object;
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002318 struct kmem_cache_cpu *c;
Christoph Lameter57d437d2012-05-09 10:09:59 -05002319 struct page *page;
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002320 unsigned long tid;
Christoph Lameter1f842602008-01-07 23:20:30 -08002321
Christoph Lameterc016b0b2010-08-20 12:37:16 -05002322 if (slab_pre_alloc_hook(s, gfpflags))
Akinobu Mita773ff602008-12-23 19:37:01 +09002323 return NULL;
2324
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002325redo:
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002326
2327 /*
2328 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
2329 * enabled. We may switch back and forth between cpus while
2330 * reading from one cpu area. That does not matter as long
2331 * as we end up on the original cpu again when doing the cmpxchg.
2332 */
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06002333 c = __this_cpu_ptr(s->cpu_slab);
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002334
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002335 /*
2336 * The transaction ids are globally unique per cpu and per operation on
2337 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
2338 * occurs on the right processor and that there was no operation on the
2339 * linked list in between.
2340 */
2341 tid = c->tid;
2342 barrier();
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002343
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06002344 object = c->freelist;
Christoph Lameter57d437d2012-05-09 10:09:59 -05002345 page = c->page;
Christoph Lameter5091b742012-07-31 16:44:00 -07002346 if (unlikely(!object || !node_match(page, node)))
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002347 object = __slab_alloc(s, gfpflags, node, addr, c);
Christoph Lameter894b8782007-05-10 03:15:16 -07002348
2349 else {
Eric Dumazet0ad95002011-12-16 16:25:34 +01002350 void *next_object = get_freepointer_safe(s, object);
2351
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002352 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002353 * The cmpxchg will only match if there was no additional
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002354 * operation and if we are on the right processor.
2355 *
2356 * The cmpxchg does the following atomically (without lock semantics!)
2357 * 1. Relocate first pointer to the current per cpu area.
2358 * 2. Verify that tid and freelist have not been changed
2359 * 3. If they were not changed replace tid and freelist
2360 *
2361 * Since this is without lock semantics the protection is only against
2362 * code executing on this cpu *not* from access by other cpus.
2363 */
Christoph Lameter933393f2011-12-22 11:58:51 -06002364 if (unlikely(!this_cpu_cmpxchg_double(
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002365 s->cpu_slab->freelist, s->cpu_slab->tid,
2366 object, tid,
Eric Dumazet0ad95002011-12-16 16:25:34 +01002367 next_object, next_tid(tid)))) {
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002368
2369 note_cmpxchg_failure("slab_alloc", s, tid);
2370 goto redo;
2371 }
Eric Dumazet0ad95002011-12-16 16:25:34 +01002372 prefetch_freepointer(s, next_object);
Christoph Lameter84e554e62009-12-18 16:26:23 -06002373 stat(s, ALLOC_FASTPATH);
Christoph Lameter894b8782007-05-10 03:15:16 -07002374 }
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002375
Pekka Enberg74e21342009-11-25 20:14:48 +02002376 if (unlikely(gfpflags & __GFP_ZERO) && object)
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05002377 memset(object, 0, s->object_size);
Christoph Lameterd07dbea2007-07-17 04:03:23 -07002378
Christoph Lameterc016b0b2010-08-20 12:37:16 -05002379 slab_post_alloc_hook(s, gfpflags, object);
Vegard Nossum5a896d92008-04-04 00:54:48 +02002380
Christoph Lameter894b8782007-05-10 03:15:16 -07002381 return object;
Christoph Lameter81819f02007-05-06 14:49:36 -07002382}
2383
2384void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2385{
Christoph Lameter2154a332010-07-09 14:07:10 -05002386 void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002387
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05002388 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002389
2390 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07002391}
2392EXPORT_SYMBOL(kmem_cache_alloc);
2393
Li Zefan0f24f122009-12-11 15:45:30 +08002394#ifdef CONFIG_TRACING
Richard Kennedy4a923792010-10-21 10:29:19 +01002395void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002396{
Richard Kennedy4a923792010-10-21 10:29:19 +01002397 void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
2398 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
2399 return ret;
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002400}
Richard Kennedy4a923792010-10-21 10:29:19 +01002401EXPORT_SYMBOL(kmem_cache_alloc_trace);
2402
2403void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
2404{
2405 void *ret = kmalloc_order(size, flags, order);
2406 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
2407 return ret;
2408}
2409EXPORT_SYMBOL(kmalloc_order_trace);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002410#endif
2411
Christoph Lameter81819f02007-05-06 14:49:36 -07002412#ifdef CONFIG_NUMA
2413void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
2414{
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002415 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
2416
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02002417 trace_kmem_cache_alloc_node(_RET_IP_, ret,
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05002418 s->object_size, s->size, gfpflags, node);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002419
2420 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07002421}
2422EXPORT_SYMBOL(kmem_cache_alloc_node);
Christoph Lameter81819f02007-05-06 14:49:36 -07002423
Li Zefan0f24f122009-12-11 15:45:30 +08002424#ifdef CONFIG_TRACING
Richard Kennedy4a923792010-10-21 10:29:19 +01002425void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002426 gfp_t gfpflags,
Richard Kennedy4a923792010-10-21 10:29:19 +01002427 int node, size_t size)
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002428{
Richard Kennedy4a923792010-10-21 10:29:19 +01002429 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
2430
2431 trace_kmalloc_node(_RET_IP_, ret,
2432 size, s->size, gfpflags, node);
2433 return ret;
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002434}
Richard Kennedy4a923792010-10-21 10:29:19 +01002435EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002436#endif
Namhyung Kim5d1f57e2010-09-29 21:02:15 +09002437#endif
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002438
Christoph Lameter81819f02007-05-06 14:49:36 -07002439/*
Christoph Lameter894b8782007-05-10 03:15:16 -07002440 * Slow patch handling. This may still be called frequently since objects
2441 * have a longer lifetime than the cpu slabs in most processing loads.
Christoph Lameter81819f02007-05-06 14:49:36 -07002442 *
Christoph Lameter894b8782007-05-10 03:15:16 -07002443 * So we still attempt to reduce cache line usage. Just take the slab
2444 * lock and free the item. If there is no additional partial page
2445 * handling required then we can return immediately.
Christoph Lameter81819f02007-05-06 14:49:36 -07002446 */
Christoph Lameter894b8782007-05-10 03:15:16 -07002447static void __slab_free(struct kmem_cache *s, struct page *page,
Christoph Lameterff120592009-12-18 16:26:22 -06002448 void *x, unsigned long addr)
Christoph Lameter81819f02007-05-06 14:49:36 -07002449{
2450 void *prior;
2451 void **object = (void *)x;
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002452 int was_frozen;
2453 int inuse;
2454 struct page new;
2455 unsigned long counters;
2456 struct kmem_cache_node *n = NULL;
Christoph Lameter61728d12011-06-01 12:25:51 -05002457 unsigned long uninitialized_var(flags);
Christoph Lameter81819f02007-05-06 14:49:36 -07002458
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002459 stat(s, FREE_SLOWPATH);
Christoph Lameter81819f02007-05-06 14:49:36 -07002460
Christoph Lameter19c7ff92012-05-30 12:54:46 -05002461 if (kmem_cache_debug(s) &&
2462 !(n = free_debug_processing(s, page, x, addr, &flags)))
Christoph Lameter80f08c12011-06-01 12:25:55 -05002463 return;
Christoph Lameter6446faa2008-02-15 23:45:26 -08002464
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002465 do {
2466 prior = page->freelist;
2467 counters = page->counters;
2468 set_freepointer(s, object, prior);
2469 new.counters = counters;
2470 was_frozen = new.frozen;
2471 new.inuse--;
2472 if ((!new.inuse || !prior) && !was_frozen && !n) {
Christoph Lameter49e22582011-08-09 16:12:27 -05002473
2474 if (!kmem_cache_debug(s) && !prior)
2475
2476 /*
2477 * Slab was on no list before and will be partially empty
2478 * We can defer the list move and instead freeze it.
2479 */
2480 new.frozen = 1;
2481
2482 else { /* Needs to be taken off a list */
2483
2484 n = get_node(s, page_to_nid(page));
2485 /*
2486 * Speculatively acquire the list_lock.
2487 * If the cmpxchg does not succeed then we may
2488 * drop the list_lock without any processing.
2489 *
2490 * Otherwise the list_lock will synchronize with
2491 * other processors updating the list of slabs.
2492 */
2493 spin_lock_irqsave(&n->list_lock, flags);
2494
2495 }
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002496 }
2497 inuse = new.inuse;
Christoph Lameter81819f02007-05-06 14:49:36 -07002498
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002499 } while (!cmpxchg_double_slab(s, page,
2500 prior, counters,
2501 object, new.counters,
2502 "__slab_free"));
Christoph Lameter81819f02007-05-06 14:49:36 -07002503
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002504 if (likely(!n)) {
Christoph Lameter49e22582011-08-09 16:12:27 -05002505
2506 /*
2507 * If we just froze the page then put it onto the
2508 * per cpu partial list.
2509 */
Alex Shi8028dce2012-02-03 23:34:56 +08002510 if (new.frozen && !was_frozen) {
Christoph Lameter49e22582011-08-09 16:12:27 -05002511 put_cpu_partial(s, page, 1);
Alex Shi8028dce2012-02-03 23:34:56 +08002512 stat(s, CPU_PARTIAL_FREE);
2513 }
Christoph Lameter49e22582011-08-09 16:12:27 -05002514 /*
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002515 * The list lock was not taken therefore no list
2516 * activity can be necessary.
2517 */
2518 if (was_frozen)
2519 stat(s, FREE_FROZEN);
Christoph Lameter80f08c12011-06-01 12:25:55 -05002520 return;
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002521 }
Christoph Lameter81819f02007-05-06 14:49:36 -07002522
2523 /*
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002524 * was_frozen may have been set after we acquired the list_lock in
2525 * an earlier loop. So we need to check it here again.
Christoph Lameter81819f02007-05-06 14:49:36 -07002526 */
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002527 if (was_frozen)
2528 stat(s, FREE_FROZEN);
2529 else {
2530 if (unlikely(!inuse && n->nr_partial > s->min_partial))
2531 goto slab_empty;
Christoph Lameter81819f02007-05-06 14:49:36 -07002532
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002533 /*
2534 * Objects left in the slab. If it was not on the partial list before
2535 * then add it.
2536 */
2537 if (unlikely(!prior)) {
2538 remove_full(s, page);
Shaohua Li136333d2011-08-24 08:57:52 +08002539 add_partial(n, page, DEACTIVATE_TO_TAIL);
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002540 stat(s, FREE_ADD_PARTIAL);
2541 }
Christoph Lameter81819f02007-05-06 14:49:36 -07002542 }
Christoph Lameter80f08c12011-06-01 12:25:55 -05002543 spin_unlock_irqrestore(&n->list_lock, flags);
Christoph Lameter81819f02007-05-06 14:49:36 -07002544 return;
2545
2546slab_empty:
Christoph Lametera973e9d2008-03-01 13:40:44 -08002547 if (prior) {
Christoph Lameter81819f02007-05-06 14:49:36 -07002548 /*
Christoph Lameter6fbabb22011-08-08 11:16:56 -05002549 * Slab on the partial list.
Christoph Lameter81819f02007-05-06 14:49:36 -07002550 */
Christoph Lameter5cc6eee2011-06-01 12:25:50 -05002551 remove_partial(n, page);
Christoph Lameter84e554e62009-12-18 16:26:23 -06002552 stat(s, FREE_REMOVE_PARTIAL);
Christoph Lameter6fbabb22011-08-08 11:16:56 -05002553 } else
2554 /* Slab must be on the full list */
2555 remove_full(s, page);
Christoph Lameter2cfb7452011-06-01 12:25:52 -05002556
Christoph Lameter80f08c12011-06-01 12:25:55 -05002557 spin_unlock_irqrestore(&n->list_lock, flags);
Christoph Lameter84e554e62009-12-18 16:26:23 -06002558 stat(s, FREE_SLAB);
Christoph Lameter81819f02007-05-06 14:49:36 -07002559 discard_slab(s, page);
Christoph Lameter81819f02007-05-06 14:49:36 -07002560}
2561
Christoph Lameter894b8782007-05-10 03:15:16 -07002562/*
2563 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
2564 * can perform fastpath freeing without additional function calls.
2565 *
2566 * The fastpath is only possible if we are freeing to the current cpu slab
2567 * of this processor. This typically the case if we have just allocated
2568 * the item before.
2569 *
2570 * If fastpath is not possible then fall back to __slab_free where we deal
2571 * with all sorts of special processing.
2572 */
Pekka Enberg06428782008-01-07 23:20:27 -08002573static __always_inline void slab_free(struct kmem_cache *s,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03002574 struct page *page, void *x, unsigned long addr)
Christoph Lameter894b8782007-05-10 03:15:16 -07002575{
2576 void **object = (void *)x;
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002577 struct kmem_cache_cpu *c;
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002578 unsigned long tid;
Christoph Lameter1f842602008-01-07 23:20:30 -08002579
Christoph Lameterc016b0b2010-08-20 12:37:16 -05002580 slab_free_hook(s, x);
2581
Christoph Lametera24c5a02011-03-15 12:45:21 -05002582redo:
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002583 /*
2584 * Determine the currently cpus per cpu slab.
2585 * The cpu may change afterward. However that does not matter since
2586 * data is retrieved via this pointer. If we are on the same cpu
2587 * during the cmpxchg then the free will succedd.
2588 */
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06002589 c = __this_cpu_ptr(s->cpu_slab);
Christoph Lameterc016b0b2010-08-20 12:37:16 -05002590
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002591 tid = c->tid;
2592 barrier();
Christoph Lameterc016b0b2010-08-20 12:37:16 -05002593
Christoph Lameter442b06b2011-05-17 16:29:31 -05002594 if (likely(page == c->page)) {
Christoph Lameterff120592009-12-18 16:26:22 -06002595 set_freepointer(s, object, c->freelist);
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002596
Christoph Lameter933393f2011-12-22 11:58:51 -06002597 if (unlikely(!this_cpu_cmpxchg_double(
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002598 s->cpu_slab->freelist, s->cpu_slab->tid,
2599 c->freelist, tid,
2600 object, next_tid(tid)))) {
2601
2602 note_cmpxchg_failure("slab_free", s, tid);
2603 goto redo;
2604 }
Christoph Lameter84e554e62009-12-18 16:26:23 -06002605 stat(s, FREE_FASTPATH);
Christoph Lameter894b8782007-05-10 03:15:16 -07002606 } else
Christoph Lameterff120592009-12-18 16:26:22 -06002607 __slab_free(s, page, x, addr);
Christoph Lameter894b8782007-05-10 03:15:16 -07002608
Christoph Lameter894b8782007-05-10 03:15:16 -07002609}
2610
Christoph Lameter81819f02007-05-06 14:49:36 -07002611void kmem_cache_free(struct kmem_cache *s, void *x)
2612{
Christoph Lameter77c5e2d2007-05-06 14:49:42 -07002613 struct page *page;
Christoph Lameter81819f02007-05-06 14:49:36 -07002614
Christoph Lameterb49af682007-05-06 14:49:41 -07002615 page = virt_to_head_page(x);
Christoph Lameter81819f02007-05-06 14:49:36 -07002616
Christoph Lameter79576102012-09-04 23:06:14 +00002617 if (kmem_cache_debug(s) && page->slab != s) {
2618 pr_err("kmem_cache_free: Wrong slab cache. %s but object"
2619 " is from %s\n", page->slab->name, s->name);
2620 WARN_ON_ONCE(1);
2621 return;
2622 }
2623
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03002624 slab_free(s, page, x, _RET_IP_);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002625
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02002626 trace_kmem_cache_free(_RET_IP_, x);
Christoph Lameter81819f02007-05-06 14:49:36 -07002627}
2628EXPORT_SYMBOL(kmem_cache_free);
2629
Christoph Lameter81819f02007-05-06 14:49:36 -07002630/*
Christoph Lameter672bba32007-05-09 02:32:39 -07002631 * Object placement in a slab is made very easy because we always start at
2632 * offset 0. If we tune the size of the object to the alignment then we can
2633 * get the required alignment by putting one properly sized object after
2634 * another.
Christoph Lameter81819f02007-05-06 14:49:36 -07002635 *
2636 * Notice that the allocation order determines the sizes of the per cpu
2637 * caches. Each processor has always one slab available for allocations.
2638 * Increasing the allocation order reduces the number of times that slabs
Christoph Lameter672bba32007-05-09 02:32:39 -07002639 * must be moved on and off the partial lists and is therefore a factor in
Christoph Lameter81819f02007-05-06 14:49:36 -07002640 * locking overhead.
Christoph Lameter81819f02007-05-06 14:49:36 -07002641 */
2642
2643/*
2644 * Mininum / Maximum order of slab pages. This influences locking overhead
2645 * and slab fragmentation. A higher order reduces the number of partial slabs
2646 * and increases the number of allocations possible without having to
2647 * take the list_lock.
2648 */
2649static int slub_min_order;
Christoph Lameter114e9e82008-04-14 19:11:41 +03002650static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
Christoph Lameter9b2cd502008-04-14 19:11:41 +03002651static int slub_min_objects;
Christoph Lameter81819f02007-05-06 14:49:36 -07002652
2653/*
2654 * Merge control. If this is set then no merging of slab caches will occur.
Christoph Lameter672bba32007-05-09 02:32:39 -07002655 * (Could be removed. This was introduced to pacify the merge skeptics.)
Christoph Lameter81819f02007-05-06 14:49:36 -07002656 */
2657static int slub_nomerge;
2658
2659/*
Christoph Lameter81819f02007-05-06 14:49:36 -07002660 * Calculate the order of allocation given an slab object size.
2661 *
Christoph Lameter672bba32007-05-09 02:32:39 -07002662 * The order of allocation has significant impact on performance and other
2663 * system components. Generally order 0 allocations should be preferred since
2664 * order 0 does not cause fragmentation in the page allocator. Larger objects
2665 * be problematic to put into order 0 slabs because there may be too much
Christoph Lameterc124f5b2008-04-14 19:13:29 +03002666 * unused space left. We go to a higher order if more than 1/16th of the slab
Christoph Lameter672bba32007-05-09 02:32:39 -07002667 * would be wasted.
Christoph Lameter81819f02007-05-06 14:49:36 -07002668 *
Christoph Lameter672bba32007-05-09 02:32:39 -07002669 * In order to reach satisfactory performance we must ensure that a minimum
2670 * number of objects is in one slab. Otherwise we may generate too much
2671 * activity on the partial lists which requires taking the list_lock. This is
2672 * less a concern for large slabs though which are rarely used.
Christoph Lameter81819f02007-05-06 14:49:36 -07002673 *
Christoph Lameter672bba32007-05-09 02:32:39 -07002674 * slub_max_order specifies the order where we begin to stop considering the
2675 * number of objects in a slab as critical. If we reach slub_max_order then
2676 * we try to keep the page order as low as possible. So we accept more waste
2677 * of space in favor of a small page order.
2678 *
2679 * Higher order allocations also allow the placement of more objects in a
2680 * slab and thereby reduce object handling overhead. If the user has
2681 * requested a higher mininum order then we start with that one instead of
2682 * the smallest order which will fit the object.
Christoph Lameter81819f02007-05-06 14:49:36 -07002683 */
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002684static inline int slab_order(int size, int min_objects,
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002685 int max_order, int fract_leftover, int reserved)
Christoph Lameter81819f02007-05-06 14:49:36 -07002686{
2687 int order;
2688 int rem;
Christoph Lameter6300ea72007-07-17 04:03:20 -07002689 int min_order = slub_min_order;
Christoph Lameter81819f02007-05-06 14:49:36 -07002690
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002691 if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +04002692 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
Christoph Lameter39b26462008-04-14 19:11:30 +03002693
Christoph Lameter6300ea72007-07-17 04:03:20 -07002694 for (order = max(min_order,
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002695 fls(min_objects * size - 1) - PAGE_SHIFT);
2696 order <= max_order; order++) {
2697
Christoph Lameter81819f02007-05-06 14:49:36 -07002698 unsigned long slab_size = PAGE_SIZE << order;
2699
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002700 if (slab_size < min_objects * size + reserved)
Christoph Lameter81819f02007-05-06 14:49:36 -07002701 continue;
2702
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002703 rem = (slab_size - reserved) % size;
Christoph Lameter81819f02007-05-06 14:49:36 -07002704
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002705 if (rem <= slab_size / fract_leftover)
Christoph Lameter81819f02007-05-06 14:49:36 -07002706 break;
2707
2708 }
Christoph Lameter672bba32007-05-09 02:32:39 -07002709
Christoph Lameter81819f02007-05-06 14:49:36 -07002710 return order;
2711}
2712
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002713static inline int calculate_order(int size, int reserved)
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002714{
2715 int order;
2716 int min_objects;
2717 int fraction;
Zhang Yanmine8120ff2009-02-12 18:00:17 +02002718 int max_objects;
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002719
2720 /*
2721 * Attempt to find best configuration for a slab. This
2722 * works by first attempting to generate a layout with
2723 * the best configuration and backing off gradually.
2724 *
2725 * First we reduce the acceptable waste in a slab. Then
2726 * we reduce the minimum objects required in a slab.
2727 */
2728 min_objects = slub_min_objects;
Christoph Lameter9b2cd502008-04-14 19:11:41 +03002729 if (!min_objects)
2730 min_objects = 4 * (fls(nr_cpu_ids) + 1);
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002731 max_objects = order_objects(slub_max_order, size, reserved);
Zhang Yanmine8120ff2009-02-12 18:00:17 +02002732 min_objects = min(min_objects, max_objects);
2733
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002734 while (min_objects > 1) {
Christoph Lameterc124f5b2008-04-14 19:13:29 +03002735 fraction = 16;
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002736 while (fraction >= 4) {
2737 order = slab_order(size, min_objects,
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002738 slub_max_order, fraction, reserved);
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002739 if (order <= slub_max_order)
2740 return order;
2741 fraction /= 2;
2742 }
Amerigo Wang5086c389c2009-08-19 21:44:13 +03002743 min_objects--;
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002744 }
2745
2746 /*
2747 * We were unable to place multiple objects in a slab. Now
2748 * lets see if we can place a single object there.
2749 */
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002750 order = slab_order(size, 1, slub_max_order, 1, reserved);
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002751 if (order <= slub_max_order)
2752 return order;
2753
2754 /*
2755 * Doh this slab cannot be placed using slub_max_order.
2756 */
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08002757 order = slab_order(size, 1, MAX_ORDER, 1, reserved);
David Rientjes818cf592009-04-23 09:58:22 +03002758 if (order < MAX_ORDER)
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002759 return order;
2760 return -ENOSYS;
2761}
2762
Christoph Lameter81819f02007-05-06 14:49:36 -07002763/*
Christoph Lameter672bba32007-05-09 02:32:39 -07002764 * Figure out what the alignment of the objects will be.
Christoph Lameter81819f02007-05-06 14:49:36 -07002765 */
2766static unsigned long calculate_alignment(unsigned long flags,
2767 unsigned long align, unsigned long size)
2768{
2769 /*
Christoph Lameter6446faa2008-02-15 23:45:26 -08002770 * If the user wants hardware cache aligned objects then follow that
2771 * suggestion if the object is sufficiently large.
Christoph Lameter81819f02007-05-06 14:49:36 -07002772 *
Christoph Lameter6446faa2008-02-15 23:45:26 -08002773 * The hardware cache alignment cannot override the specified
2774 * alignment though. If that is greater then use it.
Christoph Lameter81819f02007-05-06 14:49:36 -07002775 */
Nick Pigginb6210382008-03-05 14:05:56 -08002776 if (flags & SLAB_HWCACHE_ALIGN) {
2777 unsigned long ralign = cache_line_size();
2778 while (size <= ralign / 2)
2779 ralign /= 2;
2780 align = max(align, ralign);
2781 }
Christoph Lameter81819f02007-05-06 14:49:36 -07002782
2783 if (align < ARCH_SLAB_MINALIGN)
Nick Pigginb6210382008-03-05 14:05:56 -08002784 align = ARCH_SLAB_MINALIGN;
Christoph Lameter81819f02007-05-06 14:49:36 -07002785
2786 return ALIGN(align, sizeof(void *));
2787}
2788
Pekka Enberg5595cff2008-08-05 09:28:47 +03002789static void
Joonsoo Kim40534972012-05-11 00:50:47 +09002790init_kmem_cache_node(struct kmem_cache_node *n)
Christoph Lameter81819f02007-05-06 14:49:36 -07002791{
2792 n->nr_partial = 0;
Christoph Lameter81819f02007-05-06 14:49:36 -07002793 spin_lock_init(&n->list_lock);
2794 INIT_LIST_HEAD(&n->partial);
Christoph Lameter8ab13722007-07-17 04:03:32 -07002795#ifdef CONFIG_SLUB_DEBUG
Christoph Lameter0f389ec2008-04-14 18:53:02 +03002796 atomic_long_set(&n->nr_slabs, 0);
Salman Qazi02b71b72008-09-11 12:25:41 -07002797 atomic_long_set(&n->total_objects, 0);
Christoph Lameter643b1132007-05-06 14:49:42 -07002798 INIT_LIST_HEAD(&n->full);
Christoph Lameter8ab13722007-07-17 04:03:32 -07002799#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07002800}
2801
Christoph Lameter55136592010-08-20 12:37:13 -05002802static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
Christoph Lameter4c93c3552007-10-16 01:26:08 -07002803{
Christoph Lameter6c182dc2010-08-20 12:37:14 -05002804 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
2805 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06002806
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002807 /*
Chris Metcalfd4d84fe2011-06-02 10:19:41 -04002808 * Must align to double word boundary for the double cmpxchg
2809 * instructions to work; see __pcpu_double_call_return_bool().
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002810 */
Chris Metcalfd4d84fe2011-06-02 10:19:41 -04002811 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
2812 2 * sizeof(void *));
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06002813
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -06002814 if (!s->cpu_slab)
2815 return 0;
2816
2817 init_kmem_cache_cpus(s);
2818
2819 return 1;
Christoph Lameter4c93c3552007-10-16 01:26:08 -07002820}
Christoph Lameter4c93c3552007-10-16 01:26:08 -07002821
Christoph Lameter51df1142010-08-20 12:37:15 -05002822static struct kmem_cache *kmem_cache_node;
2823
Christoph Lameter81819f02007-05-06 14:49:36 -07002824/*
2825 * No kmalloc_node yet so do it by hand. We know that this is the first
2826 * slab on the node for this slabcache. There are no concurrent accesses
2827 * possible.
2828 *
2829 * Note that this function only works on the kmalloc_node_cache
Christoph Lameter4c93c3552007-10-16 01:26:08 -07002830 * when allocating for the kmalloc_node_cache. This is used for bootstrapping
2831 * memory on a fresh node that has no slab structures yet.
Christoph Lameter81819f02007-05-06 14:49:36 -07002832 */
Christoph Lameter55136592010-08-20 12:37:13 -05002833static void early_kmem_cache_node_alloc(int node)
Christoph Lameter81819f02007-05-06 14:49:36 -07002834{
2835 struct page *page;
2836 struct kmem_cache_node *n;
2837
Christoph Lameter51df1142010-08-20 12:37:15 -05002838 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
Christoph Lameter81819f02007-05-06 14:49:36 -07002839
Christoph Lameter51df1142010-08-20 12:37:15 -05002840 page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
Christoph Lameter81819f02007-05-06 14:49:36 -07002841
2842 BUG_ON(!page);
Christoph Lametera2f92ee2007-08-22 14:01:57 -07002843 if (page_to_nid(page) != node) {
2844 printk(KERN_ERR "SLUB: Unable to allocate memory from "
2845 "node %d\n", node);
2846 printk(KERN_ERR "SLUB: Allocating a useless per node structure "
2847 "in order to be able to continue\n");
2848 }
2849
Christoph Lameter81819f02007-05-06 14:49:36 -07002850 n = page->freelist;
2851 BUG_ON(!n);
Christoph Lameter51df1142010-08-20 12:37:15 -05002852 page->freelist = get_freepointer(kmem_cache_node, n);
Christoph Lametere6e82ea2011-08-09 16:12:24 -05002853 page->inuse = 1;
Christoph Lameter8cb0a502011-06-01 12:25:46 -05002854 page->frozen = 0;
Christoph Lameter51df1142010-08-20 12:37:15 -05002855 kmem_cache_node->node[node] = n;
Christoph Lameter8ab13722007-07-17 04:03:32 -07002856#ifdef CONFIG_SLUB_DEBUG
Christoph Lameterf7cb1932010-09-29 07:15:01 -05002857 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
Christoph Lameter51df1142010-08-20 12:37:15 -05002858 init_tracking(kmem_cache_node, n);
Christoph Lameter8ab13722007-07-17 04:03:32 -07002859#endif
Joonsoo Kim40534972012-05-11 00:50:47 +09002860 init_kmem_cache_node(n);
Christoph Lameter51df1142010-08-20 12:37:15 -05002861 inc_slabs_node(kmem_cache_node, node, page->objects);
Christoph Lameter6446faa2008-02-15 23:45:26 -08002862
Shaohua Li136333d2011-08-24 08:57:52 +08002863 add_partial(n, page, DEACTIVATE_TO_HEAD);
Christoph Lameter81819f02007-05-06 14:49:36 -07002864}
2865
2866static void free_kmem_cache_nodes(struct kmem_cache *s)
2867{
2868 int node;
2869
Christoph Lameterf64dc582007-10-16 01:25:33 -07002870 for_each_node_state(node, N_NORMAL_MEMORY) {
Christoph Lameter81819f02007-05-06 14:49:36 -07002871 struct kmem_cache_node *n = s->node[node];
Christoph Lameter51df1142010-08-20 12:37:15 -05002872
Alexander Duyck73367bd2010-05-21 14:41:35 -07002873 if (n)
Christoph Lameter51df1142010-08-20 12:37:15 -05002874 kmem_cache_free(kmem_cache_node, n);
2875
Christoph Lameter81819f02007-05-06 14:49:36 -07002876 s->node[node] = NULL;
2877 }
2878}
2879
Christoph Lameter55136592010-08-20 12:37:13 -05002880static int init_kmem_cache_nodes(struct kmem_cache *s)
Christoph Lameter81819f02007-05-06 14:49:36 -07002881{
2882 int node;
Christoph Lameter81819f02007-05-06 14:49:36 -07002883
Christoph Lameterf64dc582007-10-16 01:25:33 -07002884 for_each_node_state(node, N_NORMAL_MEMORY) {
Christoph Lameter81819f02007-05-06 14:49:36 -07002885 struct kmem_cache_node *n;
2886
Alexander Duyck73367bd2010-05-21 14:41:35 -07002887 if (slab_state == DOWN) {
Christoph Lameter55136592010-08-20 12:37:13 -05002888 early_kmem_cache_node_alloc(node);
Alexander Duyck73367bd2010-05-21 14:41:35 -07002889 continue;
Christoph Lameter81819f02007-05-06 14:49:36 -07002890 }
Christoph Lameter51df1142010-08-20 12:37:15 -05002891 n = kmem_cache_alloc_node(kmem_cache_node,
Christoph Lameter55136592010-08-20 12:37:13 -05002892 GFP_KERNEL, node);
Alexander Duyck73367bd2010-05-21 14:41:35 -07002893
2894 if (!n) {
2895 free_kmem_cache_nodes(s);
2896 return 0;
2897 }
2898
Christoph Lameter81819f02007-05-06 14:49:36 -07002899 s->node[node] = n;
Joonsoo Kim40534972012-05-11 00:50:47 +09002900 init_kmem_cache_node(n);
Christoph Lameter81819f02007-05-06 14:49:36 -07002901 }
2902 return 1;
2903}
Christoph Lameter81819f02007-05-06 14:49:36 -07002904
David Rientjesc0bdb232009-02-25 09:16:35 +02002905static void set_min_partial(struct kmem_cache *s, unsigned long min)
David Rientjes3b89d7d2009-02-22 17:40:07 -08002906{
2907 if (min < MIN_PARTIAL)
2908 min = MIN_PARTIAL;
2909 else if (min > MAX_PARTIAL)
2910 min = MAX_PARTIAL;
2911 s->min_partial = min;
2912}
2913
Christoph Lameter81819f02007-05-06 14:49:36 -07002914/*
2915 * calculate_sizes() determines the order and the distribution of data within
2916 * a slab object.
2917 */
Christoph Lameter06b285d2008-04-14 19:11:41 +03002918static int calculate_sizes(struct kmem_cache *s, int forced_order)
Christoph Lameter81819f02007-05-06 14:49:36 -07002919{
2920 unsigned long flags = s->flags;
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05002921 unsigned long size = s->object_size;
Christoph Lameter81819f02007-05-06 14:49:36 -07002922 unsigned long align = s->align;
Christoph Lameter834f3d12008-04-14 19:11:31 +03002923 int order;
Christoph Lameter81819f02007-05-06 14:49:36 -07002924
2925 /*
Christoph Lameterd8b42bf2008-02-15 23:45:25 -08002926 * Round up object size to the next word boundary. We can only
2927 * place the free pointer at word boundaries and this determines
2928 * the possible location of the free pointer.
2929 */
2930 size = ALIGN(size, sizeof(void *));
2931
2932#ifdef CONFIG_SLUB_DEBUG
2933 /*
Christoph Lameter81819f02007-05-06 14:49:36 -07002934 * Determine if we can poison the object itself. If the user of
2935 * the slab may touch the object after free or before allocation
2936 * then we should never poison the object itself.
2937 */
2938 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
Christoph Lameterc59def92007-05-16 22:10:50 -07002939 !s->ctor)
Christoph Lameter81819f02007-05-06 14:49:36 -07002940 s->flags |= __OBJECT_POISON;
2941 else
2942 s->flags &= ~__OBJECT_POISON;
2943
Christoph Lameter81819f02007-05-06 14:49:36 -07002944
2945 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07002946 * If we are Redzoning then check if there is some space between the
Christoph Lameter81819f02007-05-06 14:49:36 -07002947 * end of the object and the free pointer. If not then add an
Christoph Lameter672bba32007-05-09 02:32:39 -07002948 * additional word to have some bytes to store Redzone information.
Christoph Lameter81819f02007-05-06 14:49:36 -07002949 */
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05002950 if ((flags & SLAB_RED_ZONE) && size == s->object_size)
Christoph Lameter81819f02007-05-06 14:49:36 -07002951 size += sizeof(void *);
Christoph Lameter41ecc552007-05-09 02:32:44 -07002952#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07002953
2954 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07002955 * With that we have determined the number of bytes in actual use
2956 * by the object. This is the potential offset to the free pointer.
Christoph Lameter81819f02007-05-06 14:49:36 -07002957 */
2958 s->inuse = size;
2959
2960 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
Christoph Lameterc59def92007-05-16 22:10:50 -07002961 s->ctor)) {
Christoph Lameter81819f02007-05-06 14:49:36 -07002962 /*
2963 * Relocate free pointer after the object if it is not
2964 * permitted to overwrite the first word of the object on
2965 * kmem_cache_free.
2966 *
2967 * This is the case if we do RCU, have a constructor or
2968 * destructor or are poisoning the objects.
2969 */
2970 s->offset = size;
2971 size += sizeof(void *);
2972 }
2973
Christoph Lameterc12b3c62007-05-23 13:57:31 -07002974#ifdef CONFIG_SLUB_DEBUG
Christoph Lameter81819f02007-05-06 14:49:36 -07002975 if (flags & SLAB_STORE_USER)
2976 /*
2977 * Need to store information about allocs and frees after
2978 * the object.
2979 */
2980 size += 2 * sizeof(struct track);
2981
Christoph Lameterbe7b3fb2007-05-09 02:32:36 -07002982 if (flags & SLAB_RED_ZONE)
Christoph Lameter81819f02007-05-06 14:49:36 -07002983 /*
2984 * Add some empty padding so that we can catch
2985 * overwrites from earlier objects rather than let
2986 * tracking information or the free pointer be
Frederik Schwarzer0211a9c2008-12-29 22:14:56 +01002987 * corrupted if a user writes before the start
Christoph Lameter81819f02007-05-06 14:49:36 -07002988 * of the object.
2989 */
2990 size += sizeof(void *);
Christoph Lameter41ecc552007-05-09 02:32:44 -07002991#endif
Christoph Lameter672bba32007-05-09 02:32:39 -07002992
Christoph Lameter81819f02007-05-06 14:49:36 -07002993 /*
2994 * Determine the alignment based on various parameters that the
Christoph Lameter65c02d42007-05-09 02:32:35 -07002995 * user specified and the dynamic determination of cache line size
2996 * on bootup.
Christoph Lameter81819f02007-05-06 14:49:36 -07002997 */
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05002998 align = calculate_alignment(flags, align, s->object_size);
Zhang, Yanmindcb0ce12009-07-30 11:28:11 +08002999 s->align = align;
Christoph Lameter81819f02007-05-06 14:49:36 -07003000
3001 /*
3002 * SLUB stores one object immediately after another beginning from
3003 * offset 0. In order to align the objects we have to simply size
3004 * each object to conform to the alignment.
3005 */
3006 size = ALIGN(size, align);
3007 s->size = size;
Christoph Lameter06b285d2008-04-14 19:11:41 +03003008 if (forced_order >= 0)
3009 order = forced_order;
3010 else
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08003011 order = calculate_order(size, s->reserved);
Christoph Lameter81819f02007-05-06 14:49:36 -07003012
Christoph Lameter834f3d12008-04-14 19:11:31 +03003013 if (order < 0)
Christoph Lameter81819f02007-05-06 14:49:36 -07003014 return 0;
3015
Christoph Lameterb7a49f02008-02-14 14:21:32 -08003016 s->allocflags = 0;
Christoph Lameter834f3d12008-04-14 19:11:31 +03003017 if (order)
Christoph Lameterb7a49f02008-02-14 14:21:32 -08003018 s->allocflags |= __GFP_COMP;
3019
3020 if (s->flags & SLAB_CACHE_DMA)
3021 s->allocflags |= SLUB_DMA;
3022
3023 if (s->flags & SLAB_RECLAIM_ACCOUNT)
3024 s->allocflags |= __GFP_RECLAIMABLE;
3025
Christoph Lameter81819f02007-05-06 14:49:36 -07003026 /*
3027 * Determine the number of objects per slab
3028 */
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08003029 s->oo = oo_make(order, size, s->reserved);
3030 s->min = oo_make(get_order(size), size, s->reserved);
Christoph Lameter205ab992008-04-14 19:11:40 +03003031 if (oo_objects(s->oo) > oo_objects(s->max))
3032 s->max = s->oo;
Christoph Lameter81819f02007-05-06 14:49:36 -07003033
Christoph Lameter834f3d12008-04-14 19:11:31 +03003034 return !!oo_objects(s->oo);
Christoph Lameter81819f02007-05-06 14:49:36 -07003035
3036}
3037
Christoph Lameter55136592010-08-20 12:37:13 -05003038static int kmem_cache_open(struct kmem_cache *s,
Christoph Lameter81819f02007-05-06 14:49:36 -07003039 const char *name, size_t size,
3040 size_t align, unsigned long flags,
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07003041 void (*ctor)(void *))
Christoph Lameter81819f02007-05-06 14:49:36 -07003042{
3043 memset(s, 0, kmem_size);
3044 s->name = name;
3045 s->ctor = ctor;
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003046 s->object_size = size;
Christoph Lameter81819f02007-05-06 14:49:36 -07003047 s->align = align;
Christoph Lameterba0268a2007-09-11 15:24:11 -07003048 s->flags = kmem_cache_flags(size, flags, name, ctor);
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08003049 s->reserved = 0;
Christoph Lameter81819f02007-05-06 14:49:36 -07003050
Lai Jiangshanda9a6382011-03-10 15:22:00 +08003051 if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
3052 s->reserved = sizeof(struct rcu_head);
Christoph Lameter81819f02007-05-06 14:49:36 -07003053
Christoph Lameter06b285d2008-04-14 19:11:41 +03003054 if (!calculate_sizes(s, -1))
Christoph Lameter81819f02007-05-06 14:49:36 -07003055 goto error;
David Rientjes3de47212009-07-27 18:30:35 -07003056 if (disable_higher_order_debug) {
3057 /*
3058 * Disable debugging flags that store metadata if the min slab
3059 * order increased.
3060 */
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003061 if (get_order(s->size) > get_order(s->object_size)) {
David Rientjes3de47212009-07-27 18:30:35 -07003062 s->flags &= ~DEBUG_METADATA_FLAGS;
3063 s->offset = 0;
3064 if (!calculate_sizes(s, -1))
3065 goto error;
3066 }
3067 }
Christoph Lameter81819f02007-05-06 14:49:36 -07003068
Heiko Carstens25654092012-01-12 17:17:33 -08003069#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
3070 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
Christoph Lameterb789ef52011-06-01 12:25:49 -05003071 if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0)
3072 /* Enable fast mode */
3073 s->flags |= __CMPXCHG_DOUBLE;
3074#endif
3075
David Rientjes3b89d7d2009-02-22 17:40:07 -08003076 /*
3077 * The larger the object size is, the more pages we want on the partial
3078 * list to avoid pounding the page allocator excessively.
3079 */
Christoph Lameter49e22582011-08-09 16:12:27 -05003080 set_min_partial(s, ilog2(s->size) / 2);
3081
3082 /*
3083 * cpu_partial determined the maximum number of objects kept in the
3084 * per cpu partial lists of a processor.
3085 *
3086 * Per cpu partial lists mainly contain slabs that just have one
3087 * object freed. If they are used for allocation then they can be
3088 * filled up again with minimal effort. The slab will never hit the
3089 * per node partial lists and therefore no locking will be required.
3090 *
3091 * This setting also determines
3092 *
3093 * A) The number of objects from per cpu partial slabs dumped to the
3094 * per node list when we reach the limit.
Alex Shi9f264902011-09-01 11:32:18 +08003095 * B) The number of objects in cpu partial slabs to extract from the
Christoph Lameter49e22582011-08-09 16:12:27 -05003096 * per node list when we run out of per cpu objects. We only fetch 50%
3097 * to keep some capacity around for frees.
3098 */
Christoph Lameter8f1e33d2011-11-23 09:24:27 -06003099 if (kmem_cache_debug(s))
3100 s->cpu_partial = 0;
3101 else if (s->size >= PAGE_SIZE)
Christoph Lameter49e22582011-08-09 16:12:27 -05003102 s->cpu_partial = 2;
3103 else if (s->size >= 1024)
3104 s->cpu_partial = 6;
3105 else if (s->size >= 256)
3106 s->cpu_partial = 13;
3107 else
3108 s->cpu_partial = 30;
3109
Christoph Lameter81819f02007-05-06 14:49:36 -07003110 s->refcount = 1;
3111#ifdef CONFIG_NUMA
Christoph Lametere2cb96b2008-08-19 08:51:22 -05003112 s->remote_node_defrag_ratio = 1000;
Christoph Lameter81819f02007-05-06 14:49:36 -07003113#endif
Christoph Lameter55136592010-08-20 12:37:13 -05003114 if (!init_kmem_cache_nodes(s))
Christoph Lameterdfb4f092007-10-16 01:26:05 -07003115 goto error;
Christoph Lameter81819f02007-05-06 14:49:36 -07003116
Christoph Lameter55136592010-08-20 12:37:13 -05003117 if (alloc_kmem_cache_cpus(s))
Christoph Lameter81819f02007-05-06 14:49:36 -07003118 return 1;
Christoph Lameterff120592009-12-18 16:26:22 -06003119
Christoph Lameter4c93c3552007-10-16 01:26:08 -07003120 free_kmem_cache_nodes(s);
Christoph Lameter81819f02007-05-06 14:49:36 -07003121error:
3122 if (flags & SLAB_PANIC)
3123 panic("Cannot create slab %s size=%lu realsize=%u "
3124 "order=%u offset=%u flags=%lx\n",
Christoph Lameter834f3d12008-04-14 19:11:31 +03003125 s->name, (unsigned long)size, s->size, oo_order(s->oo),
Christoph Lameter81819f02007-05-06 14:49:36 -07003126 s->offset, flags);
3127 return 0;
3128}
Christoph Lameter81819f02007-05-06 14:49:36 -07003129
3130/*
Christoph Lameter81819f02007-05-06 14:49:36 -07003131 * Determine the size of a slab object
3132 */
3133unsigned int kmem_cache_size(struct kmem_cache *s)
3134{
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003135 return s->object_size;
Christoph Lameter81819f02007-05-06 14:49:36 -07003136}
3137EXPORT_SYMBOL(kmem_cache_size);
3138
Christoph Lameter33b12c32008-04-25 12:22:43 -07003139static void list_slab_objects(struct kmem_cache *s, struct page *page,
3140 const char *text)
Christoph Lameter81819f02007-05-06 14:49:36 -07003141{
Christoph Lameter33b12c32008-04-25 12:22:43 -07003142#ifdef CONFIG_SLUB_DEBUG
3143 void *addr = page_address(page);
3144 void *p;
Namhyung Kima5dd5c12010-09-29 21:02:13 +09003145 unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
3146 sizeof(long), GFP_ATOMIC);
Eric Dumazetbbd7d572010-03-24 22:25:47 +01003147 if (!map)
3148 return;
Christoph Lameter33b12c32008-04-25 12:22:43 -07003149 slab_err(s, page, "%s", text);
3150 slab_lock(page);
Christoph Lameter33b12c32008-04-25 12:22:43 -07003151
Christoph Lameter5f80b132011-04-15 14:48:13 -05003152 get_map(s, page, map);
Christoph Lameter33b12c32008-04-25 12:22:43 -07003153 for_each_object(p, s, addr, page->objects) {
3154
3155 if (!test_bit(slab_index(p, s, addr), map)) {
3156 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n",
3157 p, p - addr);
3158 print_tracking(s, p);
3159 }
3160 }
3161 slab_unlock(page);
Eric Dumazetbbd7d572010-03-24 22:25:47 +01003162 kfree(map);
Christoph Lameter33b12c32008-04-25 12:22:43 -07003163#endif
3164}
3165
Christoph Lameter81819f02007-05-06 14:49:36 -07003166/*
Christoph Lameter599870b2008-04-23 12:36:52 -07003167 * Attempt to free all partial slabs on a node.
Christoph Lameter69cb8e62011-08-09 16:12:22 -05003168 * This is called from kmem_cache_close(). We must be the last thread
3169 * using the cache and therefore we do not need to lock anymore.
Christoph Lameter81819f02007-05-06 14:49:36 -07003170 */
Christoph Lameter599870b2008-04-23 12:36:52 -07003171static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
Christoph Lameter81819f02007-05-06 14:49:36 -07003172{
Christoph Lameter81819f02007-05-06 14:49:36 -07003173 struct page *page, *h;
3174
Christoph Lameter33b12c32008-04-25 12:22:43 -07003175 list_for_each_entry_safe(page, h, &n->partial, lru) {
Christoph Lameter81819f02007-05-06 14:49:36 -07003176 if (!page->inuse) {
Christoph Lameter5cc6eee2011-06-01 12:25:50 -05003177 remove_partial(n, page);
Christoph Lameter81819f02007-05-06 14:49:36 -07003178 discard_slab(s, page);
Christoph Lameter33b12c32008-04-25 12:22:43 -07003179 } else {
3180 list_slab_objects(s, page,
3181 "Objects remaining on kmem_cache_close()");
Christoph Lameter599870b2008-04-23 12:36:52 -07003182 }
Christoph Lameter33b12c32008-04-25 12:22:43 -07003183 }
Christoph Lameter81819f02007-05-06 14:49:36 -07003184}
3185
3186/*
Christoph Lameter672bba32007-05-09 02:32:39 -07003187 * Release all resources used by a slab cache.
Christoph Lameter81819f02007-05-06 14:49:36 -07003188 */
Christoph Lameter0c710012007-07-17 04:03:24 -07003189static inline int kmem_cache_close(struct kmem_cache *s)
Christoph Lameter81819f02007-05-06 14:49:36 -07003190{
3191 int node;
3192
3193 flush_all(s);
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06003194 free_percpu(s->cpu_slab);
Christoph Lameter81819f02007-05-06 14:49:36 -07003195 /* Attempt to free all objects */
Christoph Lameterf64dc582007-10-16 01:25:33 -07003196 for_each_node_state(node, N_NORMAL_MEMORY) {
Christoph Lameter81819f02007-05-06 14:49:36 -07003197 struct kmem_cache_node *n = get_node(s, node);
3198
Christoph Lameter599870b2008-04-23 12:36:52 -07003199 free_partial(s, n);
3200 if (n->nr_partial || slabs_node(s, node))
Christoph Lameter81819f02007-05-06 14:49:36 -07003201 return 1;
3202 }
3203 free_kmem_cache_nodes(s);
3204 return 0;
3205}
3206
3207/*
3208 * Close a cache and release the kmem_cache structure
3209 * (must be used for caches created using kmem_cache_create)
3210 */
3211void kmem_cache_destroy(struct kmem_cache *s)
3212{
Christoph Lameter18004c52012-07-06 15:25:12 -05003213 mutex_lock(&slab_mutex);
Christoph Lameter81819f02007-05-06 14:49:36 -07003214 s->refcount--;
3215 if (!s->refcount) {
3216 list_del(&s->list);
Christoph Lameter18004c52012-07-06 15:25:12 -05003217 mutex_unlock(&slab_mutex);
Pekka Enbergd629d812008-04-23 22:31:08 +03003218 if (kmem_cache_close(s)) {
3219 printk(KERN_ERR "SLUB %s: %s called for cache that "
3220 "still has objects.\n", s->name, __func__);
3221 dump_stack();
3222 }
Eric Dumazetd76b1592009-09-03 22:38:59 +03003223 if (s->flags & SLAB_DESTROY_BY_RCU)
3224 rcu_barrier();
Christoph Lameter81819f02007-05-06 14:49:36 -07003225 sysfs_slab_remove(s);
Christoph Lameter69cb8e62011-08-09 16:12:22 -05003226 } else
Christoph Lameter18004c52012-07-06 15:25:12 -05003227 mutex_unlock(&slab_mutex);
Christoph Lameter81819f02007-05-06 14:49:36 -07003228}
3229EXPORT_SYMBOL(kmem_cache_destroy);
3230
3231/********************************************************************
3232 * Kmalloc subsystem
3233 *******************************************************************/
3234
Christoph Lameter51df1142010-08-20 12:37:15 -05003235struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
Christoph Lameter81819f02007-05-06 14:49:36 -07003236EXPORT_SYMBOL(kmalloc_caches);
3237
Christoph Lameter51df1142010-08-20 12:37:15 -05003238static struct kmem_cache *kmem_cache;
3239
Christoph Lameter55136592010-08-20 12:37:13 -05003240#ifdef CONFIG_ZONE_DMA
Christoph Lameter51df1142010-08-20 12:37:15 -05003241static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
Christoph Lameter55136592010-08-20 12:37:13 -05003242#endif
3243
Christoph Lameter81819f02007-05-06 14:49:36 -07003244static int __init setup_slub_min_order(char *str)
3245{
Pekka Enberg06428782008-01-07 23:20:27 -08003246 get_option(&str, &slub_min_order);
Christoph Lameter81819f02007-05-06 14:49:36 -07003247
3248 return 1;
3249}
3250
3251__setup("slub_min_order=", setup_slub_min_order);
3252
3253static int __init setup_slub_max_order(char *str)
3254{
Pekka Enberg06428782008-01-07 23:20:27 -08003255 get_option(&str, &slub_max_order);
David Rientjes818cf592009-04-23 09:58:22 +03003256 slub_max_order = min(slub_max_order, MAX_ORDER - 1);
Christoph Lameter81819f02007-05-06 14:49:36 -07003257
3258 return 1;
3259}
3260
3261__setup("slub_max_order=", setup_slub_max_order);
3262
3263static int __init setup_slub_min_objects(char *str)
3264{
Pekka Enberg06428782008-01-07 23:20:27 -08003265 get_option(&str, &slub_min_objects);
Christoph Lameter81819f02007-05-06 14:49:36 -07003266
3267 return 1;
3268}
3269
3270__setup("slub_min_objects=", setup_slub_min_objects);
3271
3272static int __init setup_slub_nomerge(char *str)
3273{
3274 slub_nomerge = 1;
3275 return 1;
3276}
3277
3278__setup("slub_nomerge", setup_slub_nomerge);
3279
Christoph Lameter51df1142010-08-20 12:37:15 -05003280static struct kmem_cache *__init create_kmalloc_cache(const char *name,
3281 int size, unsigned int flags)
Christoph Lameter81819f02007-05-06 14:49:36 -07003282{
Christoph Lameter51df1142010-08-20 12:37:15 -05003283 struct kmem_cache *s;
3284
3285 s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
3286
Pekka Enberg83b519e2009-06-10 19:40:04 +03003287 /*
3288 * This function is called with IRQs disabled during early-boot on
Christoph Lameter18004c52012-07-06 15:25:12 -05003289 * single CPU so there's no need to take slab_mutex here.
Pekka Enberg83b519e2009-06-10 19:40:04 +03003290 */
Christoph Lameter55136592010-08-20 12:37:13 -05003291 if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN,
Christoph Lameter319d1e22008-04-14 19:11:41 +03003292 flags, NULL))
Christoph Lameter81819f02007-05-06 14:49:36 -07003293 goto panic;
3294
3295 list_add(&s->list, &slab_caches);
Christoph Lameter51df1142010-08-20 12:37:15 -05003296 return s;
Christoph Lameter81819f02007-05-06 14:49:36 -07003297
3298panic:
3299 panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
Christoph Lameter51df1142010-08-20 12:37:15 -05003300 return NULL;
Christoph Lameter81819f02007-05-06 14:49:36 -07003301}
3302
Christoph Lameterf1b26332007-07-17 04:03:26 -07003303/*
3304 * Conversion table for small slabs sizes / 8 to the index in the
3305 * kmalloc array. This is necessary for slabs < 192 since we have non power
3306 * of two cache sizes there. The size of larger slabs can be determined using
3307 * fls.
3308 */
3309static s8 size_index[24] = {
3310 3, /* 8 */
3311 4, /* 16 */
3312 5, /* 24 */
3313 5, /* 32 */
3314 6, /* 40 */
3315 6, /* 48 */
3316 6, /* 56 */
3317 6, /* 64 */
3318 1, /* 72 */
3319 1, /* 80 */
3320 1, /* 88 */
3321 1, /* 96 */
3322 7, /* 104 */
3323 7, /* 112 */
3324 7, /* 120 */
3325 7, /* 128 */
3326 2, /* 136 */
3327 2, /* 144 */
3328 2, /* 152 */
3329 2, /* 160 */
3330 2, /* 168 */
3331 2, /* 176 */
3332 2, /* 184 */
3333 2 /* 192 */
3334};
3335
Aaro Koskinenacdfcd02009-08-28 14:28:54 +03003336static inline int size_index_elem(size_t bytes)
3337{
3338 return (bytes - 1) / 8;
3339}
3340
Christoph Lameter81819f02007-05-06 14:49:36 -07003341static struct kmem_cache *get_slab(size_t size, gfp_t flags)
3342{
Christoph Lameterf1b26332007-07-17 04:03:26 -07003343 int index;
Christoph Lameter81819f02007-05-06 14:49:36 -07003344
Christoph Lameterf1b26332007-07-17 04:03:26 -07003345 if (size <= 192) {
3346 if (!size)
3347 return ZERO_SIZE_PTR;
Christoph Lameter81819f02007-05-06 14:49:36 -07003348
Aaro Koskinenacdfcd02009-08-28 14:28:54 +03003349 index = size_index[size_index_elem(size)];
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003350 } else
Christoph Lameterf1b26332007-07-17 04:03:26 -07003351 index = fls(size - 1);
Christoph Lameter81819f02007-05-06 14:49:36 -07003352
3353#ifdef CONFIG_ZONE_DMA
Christoph Lameterf1b26332007-07-17 04:03:26 -07003354 if (unlikely((flags & SLUB_DMA)))
Christoph Lameter51df1142010-08-20 12:37:15 -05003355 return kmalloc_dma_caches[index];
Christoph Lameterf1b26332007-07-17 04:03:26 -07003356
Christoph Lameter81819f02007-05-06 14:49:36 -07003357#endif
Christoph Lameter51df1142010-08-20 12:37:15 -05003358 return kmalloc_caches[index];
Christoph Lameter81819f02007-05-06 14:49:36 -07003359}
3360
3361void *__kmalloc(size_t size, gfp_t flags)
3362{
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003363 struct kmem_cache *s;
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003364 void *ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07003365
Christoph Lameterffadd4d2009-02-17 12:05:07 -05003366 if (unlikely(size > SLUB_MAX_SIZE))
Pekka Enbergeada35e2008-02-11 22:47:46 +02003367 return kmalloc_large(size, flags);
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003368
3369 s = get_slab(size, flags);
3370
3371 if (unlikely(ZERO_OR_NULL_PTR(s)))
Christoph Lameter6cb8f912007-07-17 04:03:22 -07003372 return s;
3373
Christoph Lameter2154a332010-07-09 14:07:10 -05003374 ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003375
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02003376 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003377
3378 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07003379}
3380EXPORT_SYMBOL(__kmalloc);
3381
Namhyung Kim5d1f57e2010-09-29 21:02:15 +09003382#ifdef CONFIG_NUMA
Christoph Lameterf619cfe2008-03-01 13:56:40 -08003383static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3384{
Vegard Nossumb1eeab62008-11-25 16:55:53 +01003385 struct page *page;
Catalin Marinase4f7c0b42009-07-07 10:32:59 +01003386 void *ptr = NULL;
Christoph Lameterf619cfe2008-03-01 13:56:40 -08003387
Vegard Nossumb1eeab62008-11-25 16:55:53 +01003388 flags |= __GFP_COMP | __GFP_NOTRACK;
3389 page = alloc_pages_node(node, flags, get_order(size));
Christoph Lameterf619cfe2008-03-01 13:56:40 -08003390 if (page)
Catalin Marinase4f7c0b42009-07-07 10:32:59 +01003391 ptr = page_address(page);
3392
3393 kmemleak_alloc(ptr, size, 1, flags);
3394 return ptr;
Christoph Lameterf619cfe2008-03-01 13:56:40 -08003395}
3396
Christoph Lameter81819f02007-05-06 14:49:36 -07003397void *__kmalloc_node(size_t size, gfp_t flags, int node)
3398{
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003399 struct kmem_cache *s;
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003400 void *ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07003401
Ingo Molnar057685c2009-02-20 12:15:30 +01003402 if (unlikely(size > SLUB_MAX_SIZE)) {
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003403 ret = kmalloc_large_node(size, flags, node);
3404
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02003405 trace_kmalloc_node(_RET_IP_, ret,
3406 size, PAGE_SIZE << get_order(size),
3407 flags, node);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003408
3409 return ret;
3410 }
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003411
3412 s = get_slab(size, flags);
3413
3414 if (unlikely(ZERO_OR_NULL_PTR(s)))
Christoph Lameter6cb8f912007-07-17 04:03:22 -07003415 return s;
3416
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003417 ret = slab_alloc(s, flags, node, _RET_IP_);
3418
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02003419 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03003420
3421 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07003422}
3423EXPORT_SYMBOL(__kmalloc_node);
3424#endif
3425
3426size_t ksize(const void *object)
3427{
Christoph Lameter272c1d22007-06-08 13:46:49 -07003428 struct page *page;
Christoph Lameter81819f02007-05-06 14:49:36 -07003429
Christoph Lameteref8b4522007-10-16 01:24:46 -07003430 if (unlikely(object == ZERO_SIZE_PTR))
Christoph Lameter272c1d22007-06-08 13:46:49 -07003431 return 0;
3432
Vegard Nossum294a80a2007-12-04 23:45:30 -08003433 page = virt_to_head_page(object);
Vegard Nossum294a80a2007-12-04 23:45:30 -08003434
Pekka Enberg76994412008-05-22 19:22:25 +03003435 if (unlikely(!PageSlab(page))) {
3436 WARN_ON(!PageCompound(page));
Vegard Nossum294a80a2007-12-04 23:45:30 -08003437 return PAGE_SIZE << compound_order(page);
Pekka Enberg76994412008-05-22 19:22:25 +03003438 }
Christoph Lameter81819f02007-05-06 14:49:36 -07003439
Eric Dumazetb3d41882011-02-14 18:35:22 +01003440 return slab_ksize(page->slab);
Christoph Lameter81819f02007-05-06 14:49:36 -07003441}
Kirill A. Shutemovb1aabec2009-02-10 15:21:44 +02003442EXPORT_SYMBOL(ksize);
Christoph Lameter81819f02007-05-06 14:49:36 -07003443
Ben Greeard18a90d2011-07-07 11:36:37 -07003444#ifdef CONFIG_SLUB_DEBUG
3445bool verify_mem_not_deleted(const void *x)
3446{
3447 struct page *page;
3448 void *object = (void *)x;
3449 unsigned long flags;
3450 bool rv;
3451
3452 if (unlikely(ZERO_OR_NULL_PTR(x)))
3453 return false;
3454
3455 local_irq_save(flags);
3456
3457 page = virt_to_head_page(x);
3458 if (unlikely(!PageSlab(page))) {
3459 /* maybe it was from stack? */
3460 rv = true;
3461 goto out_unlock;
3462 }
3463
3464 slab_lock(page);
3465 if (on_freelist(page->slab, page, object)) {
3466 object_err(page->slab, page, object, "Object is on free-list");
3467 rv = false;
3468 } else {
3469 rv = true;
3470 }
3471 slab_unlock(page);
3472
3473out_unlock:
3474 local_irq_restore(flags);
3475 return rv;
3476}
3477EXPORT_SYMBOL(verify_mem_not_deleted);
3478#endif
3479
Christoph Lameter81819f02007-05-06 14:49:36 -07003480void kfree(const void *x)
3481{
Christoph Lameter81819f02007-05-06 14:49:36 -07003482 struct page *page;
Christoph Lameter5bb983b2008-02-07 17:47:41 -08003483 void *object = (void *)x;
Christoph Lameter81819f02007-05-06 14:49:36 -07003484
Pekka Enberg2121db72009-03-25 11:05:57 +02003485 trace_kfree(_RET_IP_, x);
3486
Satyam Sharma2408c552007-10-16 01:24:44 -07003487 if (unlikely(ZERO_OR_NULL_PTR(x)))
Christoph Lameter81819f02007-05-06 14:49:36 -07003488 return;
3489
Christoph Lameterb49af682007-05-06 14:49:41 -07003490 page = virt_to_head_page(x);
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003491 if (unlikely(!PageSlab(page))) {
Christoph Lameter09375022008-05-28 10:32:22 -07003492 BUG_ON(!PageCompound(page));
Catalin Marinase4f7c0b42009-07-07 10:32:59 +01003493 kmemleak_free(x);
Glauber Costad9b7f222012-08-03 22:51:37 +04003494 __free_pages(page, compound_order(page));
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003495 return;
3496 }
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03003497 slab_free(page->slab, page, object, _RET_IP_);
Christoph Lameter81819f02007-05-06 14:49:36 -07003498}
3499EXPORT_SYMBOL(kfree);
3500
Christoph Lameter2086d262007-05-06 14:49:46 -07003501/*
Christoph Lameter672bba32007-05-09 02:32:39 -07003502 * kmem_cache_shrink removes empty slabs from the partial lists and sorts
3503 * the remaining slabs by the number of items in use. The slabs with the
3504 * most items in use come first. New allocations will then fill those up
3505 * and thus they can be removed from the partial lists.
3506 *
3507 * The slabs with the least items are placed last. This results in them
3508 * being allocated from last increasing the chance that the last objects
3509 * are freed in them.
Christoph Lameter2086d262007-05-06 14:49:46 -07003510 */
3511int kmem_cache_shrink(struct kmem_cache *s)
3512{
3513 int node;
3514 int i;
3515 struct kmem_cache_node *n;
3516 struct page *page;
3517 struct page *t;
Christoph Lameter205ab992008-04-14 19:11:40 +03003518 int objects = oo_objects(s->max);
Christoph Lameter2086d262007-05-06 14:49:46 -07003519 struct list_head *slabs_by_inuse =
Christoph Lameter834f3d12008-04-14 19:11:31 +03003520 kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
Christoph Lameter2086d262007-05-06 14:49:46 -07003521 unsigned long flags;
3522
3523 if (!slabs_by_inuse)
3524 return -ENOMEM;
3525
3526 flush_all(s);
Christoph Lameterf64dc582007-10-16 01:25:33 -07003527 for_each_node_state(node, N_NORMAL_MEMORY) {
Christoph Lameter2086d262007-05-06 14:49:46 -07003528 n = get_node(s, node);
3529
3530 if (!n->nr_partial)
3531 continue;
3532
Christoph Lameter834f3d12008-04-14 19:11:31 +03003533 for (i = 0; i < objects; i++)
Christoph Lameter2086d262007-05-06 14:49:46 -07003534 INIT_LIST_HEAD(slabs_by_inuse + i);
3535
3536 spin_lock_irqsave(&n->list_lock, flags);
3537
3538 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07003539 * Build lists indexed by the items in use in each slab.
Christoph Lameter2086d262007-05-06 14:49:46 -07003540 *
Christoph Lameter672bba32007-05-09 02:32:39 -07003541 * Note that concurrent frees may occur while we hold the
3542 * list_lock. page->inuse here is the upper limit.
Christoph Lameter2086d262007-05-06 14:49:46 -07003543 */
3544 list_for_each_entry_safe(page, t, &n->partial, lru) {
Christoph Lameter69cb8e62011-08-09 16:12:22 -05003545 list_move(&page->lru, slabs_by_inuse + page->inuse);
3546 if (!page->inuse)
3547 n->nr_partial--;
Christoph Lameter2086d262007-05-06 14:49:46 -07003548 }
3549
Christoph Lameter2086d262007-05-06 14:49:46 -07003550 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07003551 * Rebuild the partial list with the slabs filled up most
3552 * first and the least used slabs at the end.
Christoph Lameter2086d262007-05-06 14:49:46 -07003553 */
Christoph Lameter69cb8e62011-08-09 16:12:22 -05003554 for (i = objects - 1; i > 0; i--)
Christoph Lameter2086d262007-05-06 14:49:46 -07003555 list_splice(slabs_by_inuse + i, n->partial.prev);
3556
Christoph Lameter2086d262007-05-06 14:49:46 -07003557 spin_unlock_irqrestore(&n->list_lock, flags);
Christoph Lameter69cb8e62011-08-09 16:12:22 -05003558
3559 /* Release empty slabs */
3560 list_for_each_entry_safe(page, t, slabs_by_inuse, lru)
3561 discard_slab(s, page);
Christoph Lameter2086d262007-05-06 14:49:46 -07003562 }
3563
3564 kfree(slabs_by_inuse);
3565 return 0;
3566}
3567EXPORT_SYMBOL(kmem_cache_shrink);
3568
Pekka Enberg92a5bbc2010-10-06 16:58:16 +03003569#if defined(CONFIG_MEMORY_HOTPLUG)
Yasunori Gotob9049e22007-10-21 16:41:37 -07003570static int slab_mem_going_offline_callback(void *arg)
3571{
3572 struct kmem_cache *s;
3573
Christoph Lameter18004c52012-07-06 15:25:12 -05003574 mutex_lock(&slab_mutex);
Yasunori Gotob9049e22007-10-21 16:41:37 -07003575 list_for_each_entry(s, &slab_caches, list)
3576 kmem_cache_shrink(s);
Christoph Lameter18004c52012-07-06 15:25:12 -05003577 mutex_unlock(&slab_mutex);
Yasunori Gotob9049e22007-10-21 16:41:37 -07003578
3579 return 0;
3580}
3581
3582static void slab_mem_offline_callback(void *arg)
3583{
3584 struct kmem_cache_node *n;
3585 struct kmem_cache *s;
3586 struct memory_notify *marg = arg;
3587 int offline_node;
3588
3589 offline_node = marg->status_change_nid;
3590
3591 /*
3592 * If the node still has available memory. we need kmem_cache_node
3593 * for it yet.
3594 */
3595 if (offline_node < 0)
3596 return;
3597
Christoph Lameter18004c52012-07-06 15:25:12 -05003598 mutex_lock(&slab_mutex);
Yasunori Gotob9049e22007-10-21 16:41:37 -07003599 list_for_each_entry(s, &slab_caches, list) {
3600 n = get_node(s, offline_node);
3601 if (n) {
3602 /*
3603 * if n->nr_slabs > 0, slabs still exist on the node
3604 * that is going down. We were unable to free them,
Adam Buchbinderc9404c92009-12-18 15:40:42 -05003605 * and offline_pages() function shouldn't call this
Yasunori Gotob9049e22007-10-21 16:41:37 -07003606 * callback. So, we must fail.
3607 */
Christoph Lameter0f389ec2008-04-14 18:53:02 +03003608 BUG_ON(slabs_node(s, offline_node));
Yasunori Gotob9049e22007-10-21 16:41:37 -07003609
3610 s->node[offline_node] = NULL;
Christoph Lameter8de66a02010-08-25 14:51:14 -05003611 kmem_cache_free(kmem_cache_node, n);
Yasunori Gotob9049e22007-10-21 16:41:37 -07003612 }
3613 }
Christoph Lameter18004c52012-07-06 15:25:12 -05003614 mutex_unlock(&slab_mutex);
Yasunori Gotob9049e22007-10-21 16:41:37 -07003615}
3616
3617static int slab_mem_going_online_callback(void *arg)
3618{
3619 struct kmem_cache_node *n;
3620 struct kmem_cache *s;
3621 struct memory_notify *marg = arg;
3622 int nid = marg->status_change_nid;
3623 int ret = 0;
3624
3625 /*
3626 * If the node's memory is already available, then kmem_cache_node is
3627 * already created. Nothing to do.
3628 */
3629 if (nid < 0)
3630 return 0;
3631
3632 /*
Christoph Lameter0121c6192008-04-29 16:11:12 -07003633 * We are bringing a node online. No memory is available yet. We must
Yasunori Gotob9049e22007-10-21 16:41:37 -07003634 * allocate a kmem_cache_node structure in order to bring the node
3635 * online.
3636 */
Christoph Lameter18004c52012-07-06 15:25:12 -05003637 mutex_lock(&slab_mutex);
Yasunori Gotob9049e22007-10-21 16:41:37 -07003638 list_for_each_entry(s, &slab_caches, list) {
3639 /*
3640 * XXX: kmem_cache_alloc_node will fallback to other nodes
3641 * since memory is not yet available from the node that
3642 * is brought up.
3643 */
Christoph Lameter8de66a02010-08-25 14:51:14 -05003644 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
Yasunori Gotob9049e22007-10-21 16:41:37 -07003645 if (!n) {
3646 ret = -ENOMEM;
3647 goto out;
3648 }
Joonsoo Kim40534972012-05-11 00:50:47 +09003649 init_kmem_cache_node(n);
Yasunori Gotob9049e22007-10-21 16:41:37 -07003650 s->node[nid] = n;
3651 }
3652out:
Christoph Lameter18004c52012-07-06 15:25:12 -05003653 mutex_unlock(&slab_mutex);
Yasunori Gotob9049e22007-10-21 16:41:37 -07003654 return ret;
3655}
3656
3657static int slab_memory_callback(struct notifier_block *self,
3658 unsigned long action, void *arg)
3659{
3660 int ret = 0;
3661
3662 switch (action) {
3663 case MEM_GOING_ONLINE:
3664 ret = slab_mem_going_online_callback(arg);
3665 break;
3666 case MEM_GOING_OFFLINE:
3667 ret = slab_mem_going_offline_callback(arg);
3668 break;
3669 case MEM_OFFLINE:
3670 case MEM_CANCEL_ONLINE:
3671 slab_mem_offline_callback(arg);
3672 break;
3673 case MEM_ONLINE:
3674 case MEM_CANCEL_OFFLINE:
3675 break;
3676 }
KAMEZAWA Hiroyukidc19f9d2008-12-01 13:13:48 -08003677 if (ret)
3678 ret = notifier_from_errno(ret);
3679 else
3680 ret = NOTIFY_OK;
Yasunori Gotob9049e22007-10-21 16:41:37 -07003681 return ret;
3682}
3683
3684#endif /* CONFIG_MEMORY_HOTPLUG */
3685
Christoph Lameter81819f02007-05-06 14:49:36 -07003686/********************************************************************
3687 * Basic setup of slabs
3688 *******************************************************************/
3689
Christoph Lameter51df1142010-08-20 12:37:15 -05003690/*
3691 * Used for early kmem_cache structures that were allocated using
3692 * the page allocator
3693 */
3694
3695static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
3696{
3697 int node;
3698
3699 list_add(&s->list, &slab_caches);
3700 s->refcount = -1;
3701
3702 for_each_node_state(node, N_NORMAL_MEMORY) {
3703 struct kmem_cache_node *n = get_node(s, node);
3704 struct page *p;
3705
3706 if (n) {
3707 list_for_each_entry(p, &n->partial, lru)
3708 p->slab = s;
3709
Li Zefan607bf322011-04-12 15:22:26 +08003710#ifdef CONFIG_SLUB_DEBUG
Christoph Lameter51df1142010-08-20 12:37:15 -05003711 list_for_each_entry(p, &n->full, lru)
3712 p->slab = s;
3713#endif
3714 }
3715 }
3716}
3717
Christoph Lameter81819f02007-05-06 14:49:36 -07003718void __init kmem_cache_init(void)
3719{
3720 int i;
Christoph Lameter4b356be2007-06-16 10:16:13 -07003721 int caches = 0;
Christoph Lameter51df1142010-08-20 12:37:15 -05003722 struct kmem_cache *temp_kmem_cache;
3723 int order;
Christoph Lameter51df1142010-08-20 12:37:15 -05003724 struct kmem_cache *temp_kmem_cache_node;
3725 unsigned long kmalloc_size;
3726
Stanislaw Gruszkafc8d8622012-01-10 15:07:32 -08003727 if (debug_guardpage_minorder())
3728 slub_max_order = 0;
3729
Christoph Lameter51df1142010-08-20 12:37:15 -05003730 kmem_size = offsetof(struct kmem_cache, node) +
3731 nr_node_ids * sizeof(struct kmem_cache_node *);
3732
3733 /* Allocate two kmem_caches from the page allocator */
3734 kmalloc_size = ALIGN(kmem_size, cache_line_size());
3735 order = get_order(2 * kmalloc_size);
3736 kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order);
3737
Christoph Lameter81819f02007-05-06 14:49:36 -07003738 /*
3739 * Must first have the slab cache available for the allocations of the
Christoph Lameter672bba32007-05-09 02:32:39 -07003740 * struct kmem_cache_node's. There is special bootstrap code in
Christoph Lameter81819f02007-05-06 14:49:36 -07003741 * kmem_cache_open for slab_state == DOWN.
3742 */
Christoph Lameter51df1142010-08-20 12:37:15 -05003743 kmem_cache_node = (void *)kmem_cache + kmalloc_size;
3744
3745 kmem_cache_open(kmem_cache_node, "kmem_cache_node",
3746 sizeof(struct kmem_cache_node),
3747 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
Yasunori Gotob9049e22007-10-21 16:41:37 -07003748
Nadia Derbey0c40ba42008-04-29 01:00:41 -07003749 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
Christoph Lameter81819f02007-05-06 14:49:36 -07003750
3751 /* Able to allocate the per node structures */
3752 slab_state = PARTIAL;
3753
Christoph Lameter51df1142010-08-20 12:37:15 -05003754 temp_kmem_cache = kmem_cache;
3755 kmem_cache_open(kmem_cache, "kmem_cache", kmem_size,
3756 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
3757 kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
3758 memcpy(kmem_cache, temp_kmem_cache, kmem_size);
Christoph Lameter81819f02007-05-06 14:49:36 -07003759
Christoph Lameter51df1142010-08-20 12:37:15 -05003760 /*
3761 * Allocate kmem_cache_node properly from the kmem_cache slab.
3762 * kmem_cache_node is separately allocated so no need to
3763 * update any list pointers.
3764 */
3765 temp_kmem_cache_node = kmem_cache_node;
Christoph Lameter81819f02007-05-06 14:49:36 -07003766
Christoph Lameter51df1142010-08-20 12:37:15 -05003767 kmem_cache_node = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
3768 memcpy(kmem_cache_node, temp_kmem_cache_node, kmem_size);
3769
3770 kmem_cache_bootstrap_fixup(kmem_cache_node);
3771
3772 caches++;
Christoph Lameter51df1142010-08-20 12:37:15 -05003773 kmem_cache_bootstrap_fixup(kmem_cache);
3774 caches++;
3775 /* Free temporary boot structure */
3776 free_pages((unsigned long)temp_kmem_cache, order);
3777
3778 /* Now we can use the kmem_cache to allocate kmalloc slabs */
Christoph Lameterf1b26332007-07-17 04:03:26 -07003779
3780 /*
3781 * Patch up the size_index table if we have strange large alignment
3782 * requirements for the kmalloc array. This is only the case for
Christoph Lameter6446faa2008-02-15 23:45:26 -08003783 * MIPS it seems. The standard arches will not generate any code here.
Christoph Lameterf1b26332007-07-17 04:03:26 -07003784 *
3785 * Largest permitted alignment is 256 bytes due to the way we
3786 * handle the index determination for the smaller caches.
3787 *
3788 * Make sure that nothing crazy happens if someone starts tinkering
3789 * around with ARCH_KMALLOC_MINALIGN
3790 */
3791 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
3792 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
3793
Aaro Koskinenacdfcd02009-08-28 14:28:54 +03003794 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
3795 int elem = size_index_elem(i);
3796 if (elem >= ARRAY_SIZE(size_index))
3797 break;
3798 size_index[elem] = KMALLOC_SHIFT_LOW;
3799 }
Christoph Lameterf1b26332007-07-17 04:03:26 -07003800
Aaro Koskinenacdfcd02009-08-28 14:28:54 +03003801 if (KMALLOC_MIN_SIZE == 64) {
3802 /*
3803 * The 96 byte size cache is not used if the alignment
3804 * is 64 byte.
3805 */
3806 for (i = 64 + 8; i <= 96; i += 8)
3807 size_index[size_index_elem(i)] = 7;
3808 } else if (KMALLOC_MIN_SIZE == 128) {
Christoph Lameter41d54d32008-07-03 09:14:26 -05003809 /*
3810 * The 192 byte sized cache is not used if the alignment
3811 * is 128 byte. Redirect kmalloc to use the 256 byte cache
3812 * instead.
3813 */
3814 for (i = 128 + 8; i <= 192; i += 8)
Aaro Koskinenacdfcd02009-08-28 14:28:54 +03003815 size_index[size_index_elem(i)] = 8;
Christoph Lameter41d54d32008-07-03 09:14:26 -05003816 }
3817
Christoph Lameter51df1142010-08-20 12:37:15 -05003818 /* Caches that are not of the two-to-the-power-of size */
3819 if (KMALLOC_MIN_SIZE <= 32) {
3820 kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
3821 caches++;
3822 }
3823
3824 if (KMALLOC_MIN_SIZE <= 64) {
3825 kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
3826 caches++;
3827 }
3828
3829 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
3830 kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
3831 caches++;
3832 }
3833
Christoph Lameter81819f02007-05-06 14:49:36 -07003834 slab_state = UP;
3835
3836 /* Provide the correct kmalloc names now that the caches are up */
Pekka Enberg84c1cf62010-09-14 23:21:12 +03003837 if (KMALLOC_MIN_SIZE <= 32) {
3838 kmalloc_caches[1]->name = kstrdup(kmalloc_caches[1]->name, GFP_NOWAIT);
3839 BUG_ON(!kmalloc_caches[1]->name);
3840 }
3841
3842 if (KMALLOC_MIN_SIZE <= 64) {
3843 kmalloc_caches[2]->name = kstrdup(kmalloc_caches[2]->name, GFP_NOWAIT);
3844 BUG_ON(!kmalloc_caches[2]->name);
3845 }
3846
Christoph Lameterd7278bd2010-07-09 14:07:12 -05003847 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
3848 char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
3849
3850 BUG_ON(!s);
Christoph Lameter51df1142010-08-20 12:37:15 -05003851 kmalloc_caches[i]->name = s;
Christoph Lameterd7278bd2010-07-09 14:07:12 -05003852 }
Christoph Lameter81819f02007-05-06 14:49:36 -07003853
3854#ifdef CONFIG_SMP
3855 register_cpu_notifier(&slab_notifier);
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06003856#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07003857
Christoph Lameter55136592010-08-20 12:37:13 -05003858#ifdef CONFIG_ZONE_DMA
Christoph Lameter51df1142010-08-20 12:37:15 -05003859 for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
3860 struct kmem_cache *s = kmalloc_caches[i];
Christoph Lameter55136592010-08-20 12:37:13 -05003861
Christoph Lameter51df1142010-08-20 12:37:15 -05003862 if (s && s->size) {
Christoph Lameter55136592010-08-20 12:37:13 -05003863 char *name = kasprintf(GFP_NOWAIT,
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003864 "dma-kmalloc-%d", s->object_size);
Christoph Lameter55136592010-08-20 12:37:13 -05003865
3866 BUG_ON(!name);
Christoph Lameter51df1142010-08-20 12:37:15 -05003867 kmalloc_dma_caches[i] = create_kmalloc_cache(name,
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003868 s->object_size, SLAB_CACHE_DMA);
Christoph Lameter55136592010-08-20 12:37:13 -05003869 }
3870 }
3871#endif
Ingo Molnar3adbefe2008-02-05 17:57:39 -08003872 printk(KERN_INFO
3873 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
Christoph Lameter4b356be2007-06-16 10:16:13 -07003874 " CPUs=%d, Nodes=%d\n",
3875 caches, cache_line_size(),
Christoph Lameter81819f02007-05-06 14:49:36 -07003876 slub_min_order, slub_max_order, slub_min_objects,
3877 nr_cpu_ids, nr_node_ids);
3878}
3879
Pekka Enberg7e85ee02009-06-12 14:03:06 +03003880void __init kmem_cache_init_late(void)
3881{
Pekka Enberg7e85ee02009-06-12 14:03:06 +03003882}
3883
Christoph Lameter81819f02007-05-06 14:49:36 -07003884/*
3885 * Find a mergeable slab cache
3886 */
3887static int slab_unmergeable(struct kmem_cache *s)
3888{
3889 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
3890 return 1;
3891
Christoph Lameterc59def92007-05-16 22:10:50 -07003892 if (s->ctor)
Christoph Lameter81819f02007-05-06 14:49:36 -07003893 return 1;
3894
Christoph Lameter8ffa6872007-05-31 00:40:51 -07003895 /*
3896 * We may have set a slab to be unmergeable during bootstrap.
3897 */
3898 if (s->refcount < 0)
3899 return 1;
3900
Christoph Lameter81819f02007-05-06 14:49:36 -07003901 return 0;
3902}
3903
3904static struct kmem_cache *find_mergeable(size_t size,
Christoph Lameterba0268a2007-09-11 15:24:11 -07003905 size_t align, unsigned long flags, const char *name,
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07003906 void (*ctor)(void *))
Christoph Lameter81819f02007-05-06 14:49:36 -07003907{
Christoph Lameter5b95a4ac2007-07-17 04:03:19 -07003908 struct kmem_cache *s;
Christoph Lameter81819f02007-05-06 14:49:36 -07003909
3910 if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
3911 return NULL;
3912
Christoph Lameterc59def92007-05-16 22:10:50 -07003913 if (ctor)
Christoph Lameter81819f02007-05-06 14:49:36 -07003914 return NULL;
3915
3916 size = ALIGN(size, sizeof(void *));
3917 align = calculate_alignment(flags, align, size);
3918 size = ALIGN(size, align);
Christoph Lameterba0268a2007-09-11 15:24:11 -07003919 flags = kmem_cache_flags(size, flags, name, NULL);
Christoph Lameter81819f02007-05-06 14:49:36 -07003920
Christoph Lameter5b95a4ac2007-07-17 04:03:19 -07003921 list_for_each_entry(s, &slab_caches, list) {
Christoph Lameter81819f02007-05-06 14:49:36 -07003922 if (slab_unmergeable(s))
3923 continue;
3924
3925 if (size > s->size)
3926 continue;
3927
Christoph Lameterba0268a2007-09-11 15:24:11 -07003928 if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
Christoph Lameter81819f02007-05-06 14:49:36 -07003929 continue;
3930 /*
3931 * Check if alignment is compatible.
3932 * Courtesy of Adrian Drzewiecki
3933 */
Pekka Enberg06428782008-01-07 23:20:27 -08003934 if ((s->size & ~(align - 1)) != s->size)
Christoph Lameter81819f02007-05-06 14:49:36 -07003935 continue;
3936
3937 if (s->size - size >= sizeof(void *))
3938 continue;
3939
3940 return s;
3941 }
3942 return NULL;
3943}
3944
Christoph Lameter039363f2012-07-06 15:25:10 -05003945struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07003946 size_t align, unsigned long flags, void (*ctor)(void *))
Christoph Lameter81819f02007-05-06 14:49:36 -07003947{
3948 struct kmem_cache *s;
Pekka Enberg84c1cf62010-09-14 23:21:12 +03003949 char *n;
Christoph Lameter81819f02007-05-06 14:49:36 -07003950
Christoph Lameterba0268a2007-09-11 15:24:11 -07003951 s = find_mergeable(size, align, flags, name, ctor);
Christoph Lameter81819f02007-05-06 14:49:36 -07003952 if (s) {
3953 s->refcount++;
3954 /*
3955 * Adjust the object sizes so that we clear
3956 * the complete object on kzalloc.
3957 */
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003958 s->object_size = max(s->object_size, (int)size);
Christoph Lameter81819f02007-05-06 14:49:36 -07003959 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
Christoph Lameter6446faa2008-02-15 23:45:26 -08003960
David Rientjes7b8f3b62008-12-17 22:09:46 -08003961 if (sysfs_slab_alias(s, name)) {
David Rientjes7b8f3b62008-12-17 22:09:46 -08003962 s->refcount--;
Christoph Lameter20cea962012-07-06 15:25:13 -05003963 return NULL;
David Rientjes7b8f3b62008-12-17 22:09:46 -08003964 }
Christoph Lametera0e1d1b2007-07-17 04:03:31 -07003965 return s;
3966 }
Christoph Lameter6446faa2008-02-15 23:45:26 -08003967
Pekka Enberg84c1cf62010-09-14 23:21:12 +03003968 n = kstrdup(name, GFP_KERNEL);
3969 if (!n)
Christoph Lameter20cea962012-07-06 15:25:13 -05003970 return NULL;
Pekka Enberg84c1cf62010-09-14 23:21:12 +03003971
Christoph Lametera0e1d1b2007-07-17 04:03:31 -07003972 s = kmalloc(kmem_size, GFP_KERNEL);
3973 if (s) {
Pekka Enberg84c1cf62010-09-14 23:21:12 +03003974 if (kmem_cache_open(s, n,
Christoph Lameterc59def92007-05-16 22:10:50 -07003975 size, align, flags, ctor)) {
Christoph Lameter20cea962012-07-06 15:25:13 -05003976 int r;
3977
Christoph Lameter81819f02007-05-06 14:49:36 -07003978 list_add(&s->list, &slab_caches);
Christoph Lameter18004c52012-07-06 15:25:12 -05003979 mutex_unlock(&slab_mutex);
Christoph Lameter20cea962012-07-06 15:25:13 -05003980 r = sysfs_slab_add(s);
3981 mutex_lock(&slab_mutex);
3982
3983 if (!r)
3984 return s;
3985
3986 list_del(&s->list);
3987 kmem_cache_close(s);
Christoph Lametera0e1d1b2007-07-17 04:03:31 -07003988 }
3989 kfree(s);
Christoph Lameter81819f02007-05-06 14:49:36 -07003990 }
Joonsoo Kim601d39d2012-05-11 00:32:59 +09003991 kfree(n);
Christoph Lameter20cea962012-07-06 15:25:13 -05003992 return NULL;
Christoph Lameter81819f02007-05-06 14:49:36 -07003993}
Christoph Lameter81819f02007-05-06 14:49:36 -07003994
Christoph Lameter81819f02007-05-06 14:49:36 -07003995#ifdef CONFIG_SMP
Christoph Lameter27390bc2007-06-01 00:47:09 -07003996/*
Christoph Lameter672bba32007-05-09 02:32:39 -07003997 * Use the cpu notifier to insure that the cpu slabs are flushed when
3998 * necessary.
Christoph Lameter81819f02007-05-06 14:49:36 -07003999 */
4000static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
4001 unsigned long action, void *hcpu)
4002{
4003 long cpu = (long)hcpu;
Christoph Lameter5b95a4ac2007-07-17 04:03:19 -07004004 struct kmem_cache *s;
4005 unsigned long flags;
Christoph Lameter81819f02007-05-06 14:49:36 -07004006
4007 switch (action) {
4008 case CPU_UP_CANCELED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07004009 case CPU_UP_CANCELED_FROZEN:
Christoph Lameter81819f02007-05-06 14:49:36 -07004010 case CPU_DEAD:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07004011 case CPU_DEAD_FROZEN:
Christoph Lameter18004c52012-07-06 15:25:12 -05004012 mutex_lock(&slab_mutex);
Christoph Lameter5b95a4ac2007-07-17 04:03:19 -07004013 list_for_each_entry(s, &slab_caches, list) {
4014 local_irq_save(flags);
4015 __flush_cpu_slab(s, cpu);
4016 local_irq_restore(flags);
4017 }
Christoph Lameter18004c52012-07-06 15:25:12 -05004018 mutex_unlock(&slab_mutex);
Christoph Lameter81819f02007-05-06 14:49:36 -07004019 break;
4020 default:
4021 break;
4022 }
4023 return NOTIFY_OK;
4024}
4025
Pekka Enberg06428782008-01-07 23:20:27 -08004026static struct notifier_block __cpuinitdata slab_notifier = {
Ingo Molnar3adbefe2008-02-05 17:57:39 -08004027 .notifier_call = slab_cpuup_callback
Pekka Enberg06428782008-01-07 23:20:27 -08004028};
Christoph Lameter81819f02007-05-06 14:49:36 -07004029
4030#endif
4031
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03004032void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
Christoph Lameter81819f02007-05-06 14:49:36 -07004033{
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07004034 struct kmem_cache *s;
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03004035 void *ret;
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07004036
Christoph Lameterffadd4d2009-02-17 12:05:07 -05004037 if (unlikely(size > SLUB_MAX_SIZE))
Pekka Enbergeada35e2008-02-11 22:47:46 +02004038 return kmalloc_large(size, gfpflags);
4039
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07004040 s = get_slab(size, gfpflags);
Christoph Lameter81819f02007-05-06 14:49:36 -07004041
Satyam Sharma2408c552007-10-16 01:24:44 -07004042 if (unlikely(ZERO_OR_NULL_PTR(s)))
Christoph Lameter6cb8f912007-07-17 04:03:22 -07004043 return s;
Christoph Lameter81819f02007-05-06 14:49:36 -07004044
Christoph Lameter2154a332010-07-09 14:07:10 -05004045 ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller);
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03004046
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004047 /* Honor the call site pointer we received. */
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02004048 trace_kmalloc(caller, ret, size, s->size, gfpflags);
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03004049
4050 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07004051}
4052
Namhyung Kim5d1f57e2010-09-29 21:02:15 +09004053#ifdef CONFIG_NUMA
Christoph Lameter81819f02007-05-06 14:49:36 -07004054void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03004055 int node, unsigned long caller)
Christoph Lameter81819f02007-05-06 14:49:36 -07004056{
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07004057 struct kmem_cache *s;
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03004058 void *ret;
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07004059
Xiaotian Fengd3e14aa2010-04-08 17:26:44 +08004060 if (unlikely(size > SLUB_MAX_SIZE)) {
4061 ret = kmalloc_large_node(size, gfpflags, node);
4062
4063 trace_kmalloc_node(caller, ret,
4064 size, PAGE_SIZE << get_order(size),
4065 gfpflags, node);
4066
4067 return ret;
4068 }
Pekka Enbergeada35e2008-02-11 22:47:46 +02004069
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07004070 s = get_slab(size, gfpflags);
Christoph Lameter81819f02007-05-06 14:49:36 -07004071
Satyam Sharma2408c552007-10-16 01:24:44 -07004072 if (unlikely(ZERO_OR_NULL_PTR(s)))
Christoph Lameter6cb8f912007-07-17 04:03:22 -07004073 return s;
Christoph Lameter81819f02007-05-06 14:49:36 -07004074
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03004075 ret = slab_alloc(s, gfpflags, node, caller);
4076
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004077 /* Honor the call site pointer we received. */
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02004078 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03004079
4080 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07004081}
Namhyung Kim5d1f57e2010-09-29 21:02:15 +09004082#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07004083
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004084#ifdef CONFIG_SYSFS
Christoph Lameter205ab992008-04-14 19:11:40 +03004085static int count_inuse(struct page *page)
4086{
4087 return page->inuse;
4088}
4089
4090static int count_total(struct page *page)
4091{
4092 return page->objects;
4093}
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004094#endif
Christoph Lameter205ab992008-04-14 19:11:40 +03004095
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004096#ifdef CONFIG_SLUB_DEBUG
Christoph Lameter434e2452007-07-17 04:03:30 -07004097static int validate_slab(struct kmem_cache *s, struct page *page,
4098 unsigned long *map)
Christoph Lameter53e15af2007-05-06 14:49:43 -07004099{
4100 void *p;
Christoph Lametera973e9d2008-03-01 13:40:44 -08004101 void *addr = page_address(page);
Christoph Lameter53e15af2007-05-06 14:49:43 -07004102
4103 if (!check_slab(s, page) ||
4104 !on_freelist(s, page, NULL))
4105 return 0;
4106
4107 /* Now we know that a valid freelist exists */
Christoph Lameter39b26462008-04-14 19:11:30 +03004108 bitmap_zero(map, page->objects);
Christoph Lameter53e15af2007-05-06 14:49:43 -07004109
Christoph Lameter5f80b132011-04-15 14:48:13 -05004110 get_map(s, page, map);
4111 for_each_object(p, s, addr, page->objects) {
4112 if (test_bit(slab_index(p, s, addr), map))
4113 if (!check_object(s, page, p, SLUB_RED_INACTIVE))
4114 return 0;
Christoph Lameter53e15af2007-05-06 14:49:43 -07004115 }
4116
Christoph Lameter224a88b2008-04-14 19:11:31 +03004117 for_each_object(p, s, addr, page->objects)
Christoph Lameter7656c722007-05-09 02:32:40 -07004118 if (!test_bit(slab_index(p, s, addr), map))
Tero Roponen37d57442010-12-01 20:04:20 +02004119 if (!check_object(s, page, p, SLUB_RED_ACTIVE))
Christoph Lameter53e15af2007-05-06 14:49:43 -07004120 return 0;
4121 return 1;
4122}
4123
Christoph Lameter434e2452007-07-17 04:03:30 -07004124static void validate_slab_slab(struct kmem_cache *s, struct page *page,
4125 unsigned long *map)
Christoph Lameter53e15af2007-05-06 14:49:43 -07004126{
Christoph Lameter881db7f2011-06-01 12:25:53 -05004127 slab_lock(page);
4128 validate_slab(s, page, map);
4129 slab_unlock(page);
Christoph Lameter53e15af2007-05-06 14:49:43 -07004130}
4131
Christoph Lameter434e2452007-07-17 04:03:30 -07004132static int validate_slab_node(struct kmem_cache *s,
4133 struct kmem_cache_node *n, unsigned long *map)
Christoph Lameter53e15af2007-05-06 14:49:43 -07004134{
4135 unsigned long count = 0;
4136 struct page *page;
4137 unsigned long flags;
4138
4139 spin_lock_irqsave(&n->list_lock, flags);
4140
4141 list_for_each_entry(page, &n->partial, lru) {
Christoph Lameter434e2452007-07-17 04:03:30 -07004142 validate_slab_slab(s, page, map);
Christoph Lameter53e15af2007-05-06 14:49:43 -07004143 count++;
4144 }
4145 if (count != n->nr_partial)
4146 printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
4147 "counter=%ld\n", s->name, count, n->nr_partial);
4148
4149 if (!(s->flags & SLAB_STORE_USER))
4150 goto out;
4151
4152 list_for_each_entry(page, &n->full, lru) {
Christoph Lameter434e2452007-07-17 04:03:30 -07004153 validate_slab_slab(s, page, map);
Christoph Lameter53e15af2007-05-06 14:49:43 -07004154 count++;
4155 }
4156 if (count != atomic_long_read(&n->nr_slabs))
4157 printk(KERN_ERR "SLUB: %s %ld slabs counted but "
4158 "counter=%ld\n", s->name, count,
4159 atomic_long_read(&n->nr_slabs));
4160
4161out:
4162 spin_unlock_irqrestore(&n->list_lock, flags);
4163 return count;
4164}
4165
Christoph Lameter434e2452007-07-17 04:03:30 -07004166static long validate_slab_cache(struct kmem_cache *s)
Christoph Lameter53e15af2007-05-06 14:49:43 -07004167{
4168 int node;
4169 unsigned long count = 0;
Christoph Lameter205ab992008-04-14 19:11:40 +03004170 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
Christoph Lameter434e2452007-07-17 04:03:30 -07004171 sizeof(unsigned long), GFP_KERNEL);
4172
4173 if (!map)
4174 return -ENOMEM;
Christoph Lameter53e15af2007-05-06 14:49:43 -07004175
4176 flush_all(s);
Christoph Lameterf64dc582007-10-16 01:25:33 -07004177 for_each_node_state(node, N_NORMAL_MEMORY) {
Christoph Lameter53e15af2007-05-06 14:49:43 -07004178 struct kmem_cache_node *n = get_node(s, node);
4179
Christoph Lameter434e2452007-07-17 04:03:30 -07004180 count += validate_slab_node(s, n, map);
Christoph Lameter53e15af2007-05-06 14:49:43 -07004181 }
Christoph Lameter434e2452007-07-17 04:03:30 -07004182 kfree(map);
Christoph Lameter53e15af2007-05-06 14:49:43 -07004183 return count;
4184}
Christoph Lameter88a420e2007-05-06 14:49:45 -07004185/*
Christoph Lameter672bba32007-05-09 02:32:39 -07004186 * Generate lists of code addresses where slabcache objects are allocated
Christoph Lameter88a420e2007-05-06 14:49:45 -07004187 * and freed.
4188 */
4189
4190struct location {
4191 unsigned long count;
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03004192 unsigned long addr;
Christoph Lameter45edfa52007-05-09 02:32:45 -07004193 long long sum_time;
4194 long min_time;
4195 long max_time;
4196 long min_pid;
4197 long max_pid;
Rusty Russell174596a2009-01-01 10:12:29 +10304198 DECLARE_BITMAP(cpus, NR_CPUS);
Christoph Lameter45edfa52007-05-09 02:32:45 -07004199 nodemask_t nodes;
Christoph Lameter88a420e2007-05-06 14:49:45 -07004200};
4201
4202struct loc_track {
4203 unsigned long max;
4204 unsigned long count;
4205 struct location *loc;
4206};
4207
4208static void free_loc_track(struct loc_track *t)
4209{
4210 if (t->max)
4211 free_pages((unsigned long)t->loc,
4212 get_order(sizeof(struct location) * t->max));
4213}
4214
Christoph Lameter68dff6a2007-07-17 04:03:20 -07004215static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
Christoph Lameter88a420e2007-05-06 14:49:45 -07004216{
4217 struct location *l;
4218 int order;
4219
Christoph Lameter88a420e2007-05-06 14:49:45 -07004220 order = get_order(sizeof(struct location) * max);
4221
Christoph Lameter68dff6a2007-07-17 04:03:20 -07004222 l = (void *)__get_free_pages(flags, order);
Christoph Lameter88a420e2007-05-06 14:49:45 -07004223 if (!l)
4224 return 0;
4225
4226 if (t->count) {
4227 memcpy(l, t->loc, sizeof(struct location) * t->count);
4228 free_loc_track(t);
4229 }
4230 t->max = max;
4231 t->loc = l;
4232 return 1;
4233}
4234
4235static int add_location(struct loc_track *t, struct kmem_cache *s,
Christoph Lameter45edfa52007-05-09 02:32:45 -07004236 const struct track *track)
Christoph Lameter88a420e2007-05-06 14:49:45 -07004237{
4238 long start, end, pos;
4239 struct location *l;
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03004240 unsigned long caddr;
Christoph Lameter45edfa52007-05-09 02:32:45 -07004241 unsigned long age = jiffies - track->when;
Christoph Lameter88a420e2007-05-06 14:49:45 -07004242
4243 start = -1;
4244 end = t->count;
4245
4246 for ( ; ; ) {
4247 pos = start + (end - start + 1) / 2;
4248
4249 /*
4250 * There is nothing at "end". If we end up there
4251 * we need to add something to before end.
4252 */
4253 if (pos == end)
4254 break;
4255
4256 caddr = t->loc[pos].addr;
Christoph Lameter45edfa52007-05-09 02:32:45 -07004257 if (track->addr == caddr) {
4258
4259 l = &t->loc[pos];
4260 l->count++;
4261 if (track->when) {
4262 l->sum_time += age;
4263 if (age < l->min_time)
4264 l->min_time = age;
4265 if (age > l->max_time)
4266 l->max_time = age;
4267
4268 if (track->pid < l->min_pid)
4269 l->min_pid = track->pid;
4270 if (track->pid > l->max_pid)
4271 l->max_pid = track->pid;
4272
Rusty Russell174596a2009-01-01 10:12:29 +10304273 cpumask_set_cpu(track->cpu,
4274 to_cpumask(l->cpus));
Christoph Lameter45edfa52007-05-09 02:32:45 -07004275 }
4276 node_set(page_to_nid(virt_to_page(track)), l->nodes);
Christoph Lameter88a420e2007-05-06 14:49:45 -07004277 return 1;
4278 }
4279
Christoph Lameter45edfa52007-05-09 02:32:45 -07004280 if (track->addr < caddr)
Christoph Lameter88a420e2007-05-06 14:49:45 -07004281 end = pos;
4282 else
4283 start = pos;
4284 }
4285
4286 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07004287 * Not found. Insert new tracking element.
Christoph Lameter88a420e2007-05-06 14:49:45 -07004288 */
Christoph Lameter68dff6a2007-07-17 04:03:20 -07004289 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
Christoph Lameter88a420e2007-05-06 14:49:45 -07004290 return 0;
4291
4292 l = t->loc + pos;
4293 if (pos < t->count)
4294 memmove(l + 1, l,
4295 (t->count - pos) * sizeof(struct location));
4296 t->count++;
4297 l->count = 1;
Christoph Lameter45edfa52007-05-09 02:32:45 -07004298 l->addr = track->addr;
4299 l->sum_time = age;
4300 l->min_time = age;
4301 l->max_time = age;
4302 l->min_pid = track->pid;
4303 l->max_pid = track->pid;
Rusty Russell174596a2009-01-01 10:12:29 +10304304 cpumask_clear(to_cpumask(l->cpus));
4305 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
Christoph Lameter45edfa52007-05-09 02:32:45 -07004306 nodes_clear(l->nodes);
4307 node_set(page_to_nid(virt_to_page(track)), l->nodes);
Christoph Lameter88a420e2007-05-06 14:49:45 -07004308 return 1;
4309}
4310
4311static void process_slab(struct loc_track *t, struct kmem_cache *s,
Eric Dumazetbbd7d572010-03-24 22:25:47 +01004312 struct page *page, enum track_item alloc,
Namhyung Kima5dd5c12010-09-29 21:02:13 +09004313 unsigned long *map)
Christoph Lameter88a420e2007-05-06 14:49:45 -07004314{
Christoph Lametera973e9d2008-03-01 13:40:44 -08004315 void *addr = page_address(page);
Christoph Lameter88a420e2007-05-06 14:49:45 -07004316 void *p;
4317
Christoph Lameter39b26462008-04-14 19:11:30 +03004318 bitmap_zero(map, page->objects);
Christoph Lameter5f80b132011-04-15 14:48:13 -05004319 get_map(s, page, map);
Christoph Lameter88a420e2007-05-06 14:49:45 -07004320
Christoph Lameter224a88b2008-04-14 19:11:31 +03004321 for_each_object(p, s, addr, page->objects)
Christoph Lameter45edfa52007-05-09 02:32:45 -07004322 if (!test_bit(slab_index(p, s, addr), map))
4323 add_location(t, s, get_track(s, p, alloc));
Christoph Lameter88a420e2007-05-06 14:49:45 -07004324}
4325
4326static int list_locations(struct kmem_cache *s, char *buf,
4327 enum track_item alloc)
4328{
Harvey Harrisone374d482008-01-31 15:20:50 -08004329 int len = 0;
Christoph Lameter88a420e2007-05-06 14:49:45 -07004330 unsigned long i;
Christoph Lameter68dff6a2007-07-17 04:03:20 -07004331 struct loc_track t = { 0, 0, NULL };
Christoph Lameter88a420e2007-05-06 14:49:45 -07004332 int node;
Eric Dumazetbbd7d572010-03-24 22:25:47 +01004333 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
4334 sizeof(unsigned long), GFP_KERNEL);
Christoph Lameter88a420e2007-05-06 14:49:45 -07004335
Eric Dumazetbbd7d572010-03-24 22:25:47 +01004336 if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
4337 GFP_TEMPORARY)) {
4338 kfree(map);
Christoph Lameter68dff6a2007-07-17 04:03:20 -07004339 return sprintf(buf, "Out of memory\n");
Eric Dumazetbbd7d572010-03-24 22:25:47 +01004340 }
Christoph Lameter88a420e2007-05-06 14:49:45 -07004341 /* Push back cpu slabs */
4342 flush_all(s);
4343
Christoph Lameterf64dc582007-10-16 01:25:33 -07004344 for_each_node_state(node, N_NORMAL_MEMORY) {
Christoph Lameter88a420e2007-05-06 14:49:45 -07004345 struct kmem_cache_node *n = get_node(s, node);
4346 unsigned long flags;
4347 struct page *page;
4348
Christoph Lameter9e869432007-08-22 14:01:56 -07004349 if (!atomic_long_read(&n->nr_slabs))
Christoph Lameter88a420e2007-05-06 14:49:45 -07004350 continue;
4351
4352 spin_lock_irqsave(&n->list_lock, flags);
4353 list_for_each_entry(page, &n->partial, lru)
Eric Dumazetbbd7d572010-03-24 22:25:47 +01004354 process_slab(&t, s, page, alloc, map);
Christoph Lameter88a420e2007-05-06 14:49:45 -07004355 list_for_each_entry(page, &n->full, lru)
Eric Dumazetbbd7d572010-03-24 22:25:47 +01004356 process_slab(&t, s, page, alloc, map);
Christoph Lameter88a420e2007-05-06 14:49:45 -07004357 spin_unlock_irqrestore(&n->list_lock, flags);
4358 }
4359
4360 for (i = 0; i < t.count; i++) {
Christoph Lameter45edfa52007-05-09 02:32:45 -07004361 struct location *l = &t.loc[i];
Christoph Lameter88a420e2007-05-06 14:49:45 -07004362
Hugh Dickins9c246242008-12-09 13:14:27 -08004363 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
Christoph Lameter88a420e2007-05-06 14:49:45 -07004364 break;
Harvey Harrisone374d482008-01-31 15:20:50 -08004365 len += sprintf(buf + len, "%7ld ", l->count);
Christoph Lameter45edfa52007-05-09 02:32:45 -07004366
4367 if (l->addr)
Joe Perches62c70bc2011-01-13 15:45:52 -08004368 len += sprintf(buf + len, "%pS", (void *)l->addr);
Christoph Lameter88a420e2007-05-06 14:49:45 -07004369 else
Harvey Harrisone374d482008-01-31 15:20:50 -08004370 len += sprintf(buf + len, "<not-available>");
Christoph Lameter45edfa52007-05-09 02:32:45 -07004371
4372 if (l->sum_time != l->min_time) {
Harvey Harrisone374d482008-01-31 15:20:50 -08004373 len += sprintf(buf + len, " age=%ld/%ld/%ld",
Roman Zippelf8bd2252008-05-01 04:34:31 -07004374 l->min_time,
4375 (long)div_u64(l->sum_time, l->count),
4376 l->max_time);
Christoph Lameter45edfa52007-05-09 02:32:45 -07004377 } else
Harvey Harrisone374d482008-01-31 15:20:50 -08004378 len += sprintf(buf + len, " age=%ld",
Christoph Lameter45edfa52007-05-09 02:32:45 -07004379 l->min_time);
4380
4381 if (l->min_pid != l->max_pid)
Harvey Harrisone374d482008-01-31 15:20:50 -08004382 len += sprintf(buf + len, " pid=%ld-%ld",
Christoph Lameter45edfa52007-05-09 02:32:45 -07004383 l->min_pid, l->max_pid);
4384 else
Harvey Harrisone374d482008-01-31 15:20:50 -08004385 len += sprintf(buf + len, " pid=%ld",
Christoph Lameter45edfa52007-05-09 02:32:45 -07004386 l->min_pid);
4387
Rusty Russell174596a2009-01-01 10:12:29 +10304388 if (num_online_cpus() > 1 &&
4389 !cpumask_empty(to_cpumask(l->cpus)) &&
Harvey Harrisone374d482008-01-31 15:20:50 -08004390 len < PAGE_SIZE - 60) {
4391 len += sprintf(buf + len, " cpus=");
4392 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
Rusty Russell174596a2009-01-01 10:12:29 +10304393 to_cpumask(l->cpus));
Christoph Lameter45edfa52007-05-09 02:32:45 -07004394 }
4395
Christoph Lameter62bc62a2009-06-16 15:32:15 -07004396 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
Harvey Harrisone374d482008-01-31 15:20:50 -08004397 len < PAGE_SIZE - 60) {
4398 len += sprintf(buf + len, " nodes=");
4399 len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
Christoph Lameter45edfa52007-05-09 02:32:45 -07004400 l->nodes);
4401 }
4402
Harvey Harrisone374d482008-01-31 15:20:50 -08004403 len += sprintf(buf + len, "\n");
Christoph Lameter88a420e2007-05-06 14:49:45 -07004404 }
4405
4406 free_loc_track(&t);
Eric Dumazetbbd7d572010-03-24 22:25:47 +01004407 kfree(map);
Christoph Lameter88a420e2007-05-06 14:49:45 -07004408 if (!t.count)
Harvey Harrisone374d482008-01-31 15:20:50 -08004409 len += sprintf(buf, "No data\n");
4410 return len;
Christoph Lameter88a420e2007-05-06 14:49:45 -07004411}
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004412#endif
Christoph Lameter88a420e2007-05-06 14:49:45 -07004413
Christoph Lametera5a84752010-10-05 13:57:27 -05004414#ifdef SLUB_RESILIENCY_TEST
4415static void resiliency_test(void)
4416{
4417 u8 *p;
4418
4419 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10);
4420
4421 printk(KERN_ERR "SLUB resiliency testing\n");
4422 printk(KERN_ERR "-----------------------\n");
4423 printk(KERN_ERR "A. Corruption after allocation\n");
4424
4425 p = kzalloc(16, GFP_KERNEL);
4426 p[16] = 0x12;
4427 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
4428 " 0x12->0x%p\n\n", p + 16);
4429
4430 validate_slab_cache(kmalloc_caches[4]);
4431
4432 /* Hmmm... The next two are dangerous */
4433 p = kzalloc(32, GFP_KERNEL);
4434 p[32 + sizeof(void *)] = 0x34;
4435 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
4436 " 0x34 -> -0x%p\n", p);
4437 printk(KERN_ERR
4438 "If allocated object is overwritten then not detectable\n\n");
4439
4440 validate_slab_cache(kmalloc_caches[5]);
4441 p = kzalloc(64, GFP_KERNEL);
4442 p += 64 + (get_cycles() & 0xff) * sizeof(void *);
4443 *p = 0x56;
4444 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
4445 p);
4446 printk(KERN_ERR
4447 "If allocated object is overwritten then not detectable\n\n");
4448 validate_slab_cache(kmalloc_caches[6]);
4449
4450 printk(KERN_ERR "\nB. Corruption after free\n");
4451 p = kzalloc(128, GFP_KERNEL);
4452 kfree(p);
4453 *p = 0x78;
4454 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
4455 validate_slab_cache(kmalloc_caches[7]);
4456
4457 p = kzalloc(256, GFP_KERNEL);
4458 kfree(p);
4459 p[50] = 0x9a;
4460 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
4461 p);
4462 validate_slab_cache(kmalloc_caches[8]);
4463
4464 p = kzalloc(512, GFP_KERNEL);
4465 kfree(p);
4466 p[512] = 0xab;
4467 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
4468 validate_slab_cache(kmalloc_caches[9]);
4469}
4470#else
4471#ifdef CONFIG_SYSFS
4472static void resiliency_test(void) {};
4473#endif
4474#endif
4475
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004476#ifdef CONFIG_SYSFS
Christoph Lameter81819f02007-05-06 14:49:36 -07004477enum slab_stat_type {
Christoph Lameter205ab992008-04-14 19:11:40 +03004478 SL_ALL, /* All slabs */
4479 SL_PARTIAL, /* Only partially allocated slabs */
4480 SL_CPU, /* Only slabs used for cpu caches */
4481 SL_OBJECTS, /* Determine allocated objects not slabs */
4482 SL_TOTAL /* Determine object capacity not slabs */
Christoph Lameter81819f02007-05-06 14:49:36 -07004483};
4484
Christoph Lameter205ab992008-04-14 19:11:40 +03004485#define SO_ALL (1 << SL_ALL)
Christoph Lameter81819f02007-05-06 14:49:36 -07004486#define SO_PARTIAL (1 << SL_PARTIAL)
4487#define SO_CPU (1 << SL_CPU)
4488#define SO_OBJECTS (1 << SL_OBJECTS)
Christoph Lameter205ab992008-04-14 19:11:40 +03004489#define SO_TOTAL (1 << SL_TOTAL)
Christoph Lameter81819f02007-05-06 14:49:36 -07004490
Cyrill Gorcunov62e5c4b2008-03-02 23:28:24 +03004491static ssize_t show_slab_objects(struct kmem_cache *s,
4492 char *buf, unsigned long flags)
Christoph Lameter81819f02007-05-06 14:49:36 -07004493{
4494 unsigned long total = 0;
Christoph Lameter81819f02007-05-06 14:49:36 -07004495 int node;
4496 int x;
4497 unsigned long *nodes;
4498 unsigned long *per_cpu;
4499
4500 nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
Cyrill Gorcunov62e5c4b2008-03-02 23:28:24 +03004501 if (!nodes)
4502 return -ENOMEM;
Christoph Lameter81819f02007-05-06 14:49:36 -07004503 per_cpu = nodes + nr_node_ids;
4504
Christoph Lameter205ab992008-04-14 19:11:40 +03004505 if (flags & SO_CPU) {
4506 int cpu;
Christoph Lameter81819f02007-05-06 14:49:36 -07004507
Christoph Lameter205ab992008-04-14 19:11:40 +03004508 for_each_possible_cpu(cpu) {
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06004509 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
Christoph Lameterec3ab082012-05-09 10:09:56 -05004510 int node;
Christoph Lameter49e22582011-08-09 16:12:27 -05004511 struct page *page;
Christoph Lameterdfb4f092007-10-16 01:26:05 -07004512
Eric Dumazetbc6697d2011-11-22 16:02:02 +01004513 page = ACCESS_ONCE(c->page);
Christoph Lameterec3ab082012-05-09 10:09:56 -05004514 if (!page)
4515 continue;
Christoph Lameter205ab992008-04-14 19:11:40 +03004516
Christoph Lameterec3ab082012-05-09 10:09:56 -05004517 node = page_to_nid(page);
4518 if (flags & SO_TOTAL)
4519 x = page->objects;
4520 else if (flags & SO_OBJECTS)
4521 x = page->inuse;
4522 else
4523 x = 1;
Christoph Lameter49e22582011-08-09 16:12:27 -05004524
Christoph Lameterec3ab082012-05-09 10:09:56 -05004525 total += x;
4526 nodes[node] += x;
4527
4528 page = ACCESS_ONCE(c->partial);
Christoph Lameter49e22582011-08-09 16:12:27 -05004529 if (page) {
4530 x = page->pobjects;
Eric Dumazetbc6697d2011-11-22 16:02:02 +01004531 total += x;
4532 nodes[node] += x;
Christoph Lameter49e22582011-08-09 16:12:27 -05004533 }
Christoph Lameterec3ab082012-05-09 10:09:56 -05004534
Eric Dumazetbc6697d2011-11-22 16:02:02 +01004535 per_cpu[node]++;
Christoph Lameter81819f02007-05-06 14:49:36 -07004536 }
4537 }
4538
Christoph Lameter04d94872011-01-10 10:15:15 -06004539 lock_memory_hotplug();
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004540#ifdef CONFIG_SLUB_DEBUG
Christoph Lameter205ab992008-04-14 19:11:40 +03004541 if (flags & SO_ALL) {
4542 for_each_node_state(node, N_NORMAL_MEMORY) {
4543 struct kmem_cache_node *n = get_node(s, node);
Christoph Lameter81819f02007-05-06 14:49:36 -07004544
Christoph Lameter205ab992008-04-14 19:11:40 +03004545 if (flags & SO_TOTAL)
4546 x = atomic_long_read(&n->total_objects);
4547 else if (flags & SO_OBJECTS)
4548 x = atomic_long_read(&n->total_objects) -
4549 count_partial(n, count_free);
4550
4551 else
4552 x = atomic_long_read(&n->nr_slabs);
4553 total += x;
4554 nodes[node] += x;
4555 }
4556
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004557 } else
4558#endif
4559 if (flags & SO_PARTIAL) {
Christoph Lameter205ab992008-04-14 19:11:40 +03004560 for_each_node_state(node, N_NORMAL_MEMORY) {
4561 struct kmem_cache_node *n = get_node(s, node);
4562
4563 if (flags & SO_TOTAL)
4564 x = count_partial(n, count_total);
4565 else if (flags & SO_OBJECTS)
4566 x = count_partial(n, count_inuse);
Christoph Lameter81819f02007-05-06 14:49:36 -07004567 else
4568 x = n->nr_partial;
4569 total += x;
4570 nodes[node] += x;
4571 }
Christoph Lameter81819f02007-05-06 14:49:36 -07004572 }
Christoph Lameter81819f02007-05-06 14:49:36 -07004573 x = sprintf(buf, "%lu", total);
4574#ifdef CONFIG_NUMA
Christoph Lameterf64dc582007-10-16 01:25:33 -07004575 for_each_node_state(node, N_NORMAL_MEMORY)
Christoph Lameter81819f02007-05-06 14:49:36 -07004576 if (nodes[node])
4577 x += sprintf(buf + x, " N%d=%lu",
4578 node, nodes[node]);
4579#endif
Christoph Lameter04d94872011-01-10 10:15:15 -06004580 unlock_memory_hotplug();
Christoph Lameter81819f02007-05-06 14:49:36 -07004581 kfree(nodes);
4582 return x + sprintf(buf + x, "\n");
4583}
4584
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004585#ifdef CONFIG_SLUB_DEBUG
Christoph Lameter81819f02007-05-06 14:49:36 -07004586static int any_slab_objects(struct kmem_cache *s)
4587{
4588 int node;
Christoph Lameterdfb4f092007-10-16 01:26:05 -07004589
4590 for_each_online_node(node) {
Christoph Lameter81819f02007-05-06 14:49:36 -07004591 struct kmem_cache_node *n = get_node(s, node);
4592
Christoph Lameterdfb4f092007-10-16 01:26:05 -07004593 if (!n)
4594 continue;
4595
Benjamin Herrenschmidt4ea33e22008-05-06 20:42:39 -07004596 if (atomic_long_read(&n->total_objects))
Christoph Lameter81819f02007-05-06 14:49:36 -07004597 return 1;
4598 }
4599 return 0;
4600}
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004601#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07004602
4603#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
Phil Carmody497888c2011-07-14 15:07:13 +03004604#define to_slab(n) container_of(n, struct kmem_cache, kobj)
Christoph Lameter81819f02007-05-06 14:49:36 -07004605
4606struct slab_attribute {
4607 struct attribute attr;
4608 ssize_t (*show)(struct kmem_cache *s, char *buf);
4609 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
4610};
4611
4612#define SLAB_ATTR_RO(_name) \
Vasiliy Kulikovab067e92011-09-27 21:54:53 +04004613 static struct slab_attribute _name##_attr = \
4614 __ATTR(_name, 0400, _name##_show, NULL)
Christoph Lameter81819f02007-05-06 14:49:36 -07004615
4616#define SLAB_ATTR(_name) \
4617 static struct slab_attribute _name##_attr = \
Vasiliy Kulikovab067e92011-09-27 21:54:53 +04004618 __ATTR(_name, 0600, _name##_show, _name##_store)
Christoph Lameter81819f02007-05-06 14:49:36 -07004619
Christoph Lameter81819f02007-05-06 14:49:36 -07004620static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
4621{
4622 return sprintf(buf, "%d\n", s->size);
4623}
4624SLAB_ATTR_RO(slab_size);
4625
4626static ssize_t align_show(struct kmem_cache *s, char *buf)
4627{
4628 return sprintf(buf, "%d\n", s->align);
4629}
4630SLAB_ATTR_RO(align);
4631
4632static ssize_t object_size_show(struct kmem_cache *s, char *buf)
4633{
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05004634 return sprintf(buf, "%d\n", s->object_size);
Christoph Lameter81819f02007-05-06 14:49:36 -07004635}
4636SLAB_ATTR_RO(object_size);
4637
4638static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
4639{
Christoph Lameter834f3d12008-04-14 19:11:31 +03004640 return sprintf(buf, "%d\n", oo_objects(s->oo));
Christoph Lameter81819f02007-05-06 14:49:36 -07004641}
4642SLAB_ATTR_RO(objs_per_slab);
4643
Christoph Lameter06b285d2008-04-14 19:11:41 +03004644static ssize_t order_store(struct kmem_cache *s,
4645 const char *buf, size_t length)
4646{
Christoph Lameter0121c6192008-04-29 16:11:12 -07004647 unsigned long order;
4648 int err;
4649
4650 err = strict_strtoul(buf, 10, &order);
4651 if (err)
4652 return err;
Christoph Lameter06b285d2008-04-14 19:11:41 +03004653
4654 if (order > slub_max_order || order < slub_min_order)
4655 return -EINVAL;
4656
4657 calculate_sizes(s, order);
4658 return length;
4659}
4660
Christoph Lameter81819f02007-05-06 14:49:36 -07004661static ssize_t order_show(struct kmem_cache *s, char *buf)
4662{
Christoph Lameter834f3d12008-04-14 19:11:31 +03004663 return sprintf(buf, "%d\n", oo_order(s->oo));
Christoph Lameter81819f02007-05-06 14:49:36 -07004664}
Christoph Lameter06b285d2008-04-14 19:11:41 +03004665SLAB_ATTR(order);
Christoph Lameter81819f02007-05-06 14:49:36 -07004666
David Rientjes73d342b2009-02-22 17:40:09 -08004667static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
4668{
4669 return sprintf(buf, "%lu\n", s->min_partial);
4670}
4671
4672static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
4673 size_t length)
4674{
4675 unsigned long min;
4676 int err;
4677
4678 err = strict_strtoul(buf, 10, &min);
4679 if (err)
4680 return err;
4681
David Rientjesc0bdb232009-02-25 09:16:35 +02004682 set_min_partial(s, min);
David Rientjes73d342b2009-02-22 17:40:09 -08004683 return length;
4684}
4685SLAB_ATTR(min_partial);
4686
Christoph Lameter49e22582011-08-09 16:12:27 -05004687static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
4688{
4689 return sprintf(buf, "%u\n", s->cpu_partial);
4690}
4691
4692static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
4693 size_t length)
4694{
4695 unsigned long objects;
4696 int err;
4697
4698 err = strict_strtoul(buf, 10, &objects);
4699 if (err)
4700 return err;
David Rientjes74ee4ef2012-01-09 13:19:45 -08004701 if (objects && kmem_cache_debug(s))
4702 return -EINVAL;
Christoph Lameter49e22582011-08-09 16:12:27 -05004703
4704 s->cpu_partial = objects;
4705 flush_all(s);
4706 return length;
4707}
4708SLAB_ATTR(cpu_partial);
4709
Christoph Lameter81819f02007-05-06 14:49:36 -07004710static ssize_t ctor_show(struct kmem_cache *s, char *buf)
4711{
Joe Perches62c70bc2011-01-13 15:45:52 -08004712 if (!s->ctor)
4713 return 0;
4714 return sprintf(buf, "%pS\n", s->ctor);
Christoph Lameter81819f02007-05-06 14:49:36 -07004715}
4716SLAB_ATTR_RO(ctor);
4717
Christoph Lameter81819f02007-05-06 14:49:36 -07004718static ssize_t aliases_show(struct kmem_cache *s, char *buf)
4719{
4720 return sprintf(buf, "%d\n", s->refcount - 1);
4721}
4722SLAB_ATTR_RO(aliases);
4723
Christoph Lameter81819f02007-05-06 14:49:36 -07004724static ssize_t partial_show(struct kmem_cache *s, char *buf)
4725{
Christoph Lameterd9acf4b2008-02-15 15:22:21 -08004726 return show_slab_objects(s, buf, SO_PARTIAL);
Christoph Lameter81819f02007-05-06 14:49:36 -07004727}
4728SLAB_ATTR_RO(partial);
4729
4730static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
4731{
Christoph Lameterd9acf4b2008-02-15 15:22:21 -08004732 return show_slab_objects(s, buf, SO_CPU);
Christoph Lameter81819f02007-05-06 14:49:36 -07004733}
4734SLAB_ATTR_RO(cpu_slabs);
4735
4736static ssize_t objects_show(struct kmem_cache *s, char *buf)
4737{
Christoph Lameter205ab992008-04-14 19:11:40 +03004738 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
Christoph Lameter81819f02007-05-06 14:49:36 -07004739}
4740SLAB_ATTR_RO(objects);
4741
Christoph Lameter205ab992008-04-14 19:11:40 +03004742static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
4743{
4744 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
4745}
4746SLAB_ATTR_RO(objects_partial);
4747
Christoph Lameter49e22582011-08-09 16:12:27 -05004748static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
4749{
4750 int objects = 0;
4751 int pages = 0;
4752 int cpu;
4753 int len;
4754
4755 for_each_online_cpu(cpu) {
4756 struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial;
4757
4758 if (page) {
4759 pages += page->pages;
4760 objects += page->pobjects;
4761 }
4762 }
4763
4764 len = sprintf(buf, "%d(%d)", objects, pages);
4765
4766#ifdef CONFIG_SMP
4767 for_each_online_cpu(cpu) {
4768 struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial;
4769
4770 if (page && len < PAGE_SIZE - 20)
4771 len += sprintf(buf + len, " C%d=%d(%d)", cpu,
4772 page->pobjects, page->pages);
4773 }
4774#endif
4775 return len + sprintf(buf + len, "\n");
4776}
4777SLAB_ATTR_RO(slabs_cpu_partial);
4778
Christoph Lameter81819f02007-05-06 14:49:36 -07004779static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
4780{
4781 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
4782}
4783
4784static ssize_t reclaim_account_store(struct kmem_cache *s,
4785 const char *buf, size_t length)
4786{
4787 s->flags &= ~SLAB_RECLAIM_ACCOUNT;
4788 if (buf[0] == '1')
4789 s->flags |= SLAB_RECLAIM_ACCOUNT;
4790 return length;
4791}
4792SLAB_ATTR(reclaim_account);
4793
4794static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
4795{
Christoph Lameter5af60832007-05-06 14:49:56 -07004796 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
Christoph Lameter81819f02007-05-06 14:49:36 -07004797}
4798SLAB_ATTR_RO(hwcache_align);
4799
4800#ifdef CONFIG_ZONE_DMA
4801static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
4802{
4803 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
4804}
4805SLAB_ATTR_RO(cache_dma);
4806#endif
4807
4808static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
4809{
4810 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
4811}
4812SLAB_ATTR_RO(destroy_by_rcu);
4813
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08004814static ssize_t reserved_show(struct kmem_cache *s, char *buf)
4815{
4816 return sprintf(buf, "%d\n", s->reserved);
4817}
4818SLAB_ATTR_RO(reserved);
4819
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004820#ifdef CONFIG_SLUB_DEBUG
Christoph Lametera5a84752010-10-05 13:57:27 -05004821static ssize_t slabs_show(struct kmem_cache *s, char *buf)
4822{
4823 return show_slab_objects(s, buf, SO_ALL);
4824}
4825SLAB_ATTR_RO(slabs);
4826
4827static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
4828{
4829 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
4830}
4831SLAB_ATTR_RO(total_objects);
4832
4833static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
4834{
4835 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
4836}
4837
4838static ssize_t sanity_checks_store(struct kmem_cache *s,
4839 const char *buf, size_t length)
4840{
4841 s->flags &= ~SLAB_DEBUG_FREE;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004842 if (buf[0] == '1') {
4843 s->flags &= ~__CMPXCHG_DOUBLE;
Christoph Lametera5a84752010-10-05 13:57:27 -05004844 s->flags |= SLAB_DEBUG_FREE;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004845 }
Christoph Lametera5a84752010-10-05 13:57:27 -05004846 return length;
4847}
4848SLAB_ATTR(sanity_checks);
4849
4850static ssize_t trace_show(struct kmem_cache *s, char *buf)
4851{
4852 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
4853}
4854
4855static ssize_t trace_store(struct kmem_cache *s, const char *buf,
4856 size_t length)
4857{
4858 s->flags &= ~SLAB_TRACE;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004859 if (buf[0] == '1') {
4860 s->flags &= ~__CMPXCHG_DOUBLE;
Christoph Lametera5a84752010-10-05 13:57:27 -05004861 s->flags |= SLAB_TRACE;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004862 }
Christoph Lametera5a84752010-10-05 13:57:27 -05004863 return length;
4864}
4865SLAB_ATTR(trace);
4866
Christoph Lameter81819f02007-05-06 14:49:36 -07004867static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
4868{
4869 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
4870}
4871
4872static ssize_t red_zone_store(struct kmem_cache *s,
4873 const char *buf, size_t length)
4874{
4875 if (any_slab_objects(s))
4876 return -EBUSY;
4877
4878 s->flags &= ~SLAB_RED_ZONE;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004879 if (buf[0] == '1') {
4880 s->flags &= ~__CMPXCHG_DOUBLE;
Christoph Lameter81819f02007-05-06 14:49:36 -07004881 s->flags |= SLAB_RED_ZONE;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004882 }
Christoph Lameter06b285d2008-04-14 19:11:41 +03004883 calculate_sizes(s, -1);
Christoph Lameter81819f02007-05-06 14:49:36 -07004884 return length;
4885}
4886SLAB_ATTR(red_zone);
4887
4888static ssize_t poison_show(struct kmem_cache *s, char *buf)
4889{
4890 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
4891}
4892
4893static ssize_t poison_store(struct kmem_cache *s,
4894 const char *buf, size_t length)
4895{
4896 if (any_slab_objects(s))
4897 return -EBUSY;
4898
4899 s->flags &= ~SLAB_POISON;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004900 if (buf[0] == '1') {
4901 s->flags &= ~__CMPXCHG_DOUBLE;
Christoph Lameter81819f02007-05-06 14:49:36 -07004902 s->flags |= SLAB_POISON;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004903 }
Christoph Lameter06b285d2008-04-14 19:11:41 +03004904 calculate_sizes(s, -1);
Christoph Lameter81819f02007-05-06 14:49:36 -07004905 return length;
4906}
4907SLAB_ATTR(poison);
4908
4909static ssize_t store_user_show(struct kmem_cache *s, char *buf)
4910{
4911 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
4912}
4913
4914static ssize_t store_user_store(struct kmem_cache *s,
4915 const char *buf, size_t length)
4916{
4917 if (any_slab_objects(s))
4918 return -EBUSY;
4919
4920 s->flags &= ~SLAB_STORE_USER;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004921 if (buf[0] == '1') {
4922 s->flags &= ~__CMPXCHG_DOUBLE;
Christoph Lameter81819f02007-05-06 14:49:36 -07004923 s->flags |= SLAB_STORE_USER;
Christoph Lameterb789ef52011-06-01 12:25:49 -05004924 }
Christoph Lameter06b285d2008-04-14 19:11:41 +03004925 calculate_sizes(s, -1);
Christoph Lameter81819f02007-05-06 14:49:36 -07004926 return length;
4927}
4928SLAB_ATTR(store_user);
4929
Christoph Lameter53e15af2007-05-06 14:49:43 -07004930static ssize_t validate_show(struct kmem_cache *s, char *buf)
4931{
4932 return 0;
4933}
4934
4935static ssize_t validate_store(struct kmem_cache *s,
4936 const char *buf, size_t length)
4937{
Christoph Lameter434e2452007-07-17 04:03:30 -07004938 int ret = -EINVAL;
4939
4940 if (buf[0] == '1') {
4941 ret = validate_slab_cache(s);
4942 if (ret >= 0)
4943 ret = length;
4944 }
4945 return ret;
Christoph Lameter53e15af2007-05-06 14:49:43 -07004946}
4947SLAB_ATTR(validate);
Christoph Lametera5a84752010-10-05 13:57:27 -05004948
4949static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
4950{
4951 if (!(s->flags & SLAB_STORE_USER))
4952 return -ENOSYS;
4953 return list_locations(s, buf, TRACK_ALLOC);
4954}
4955SLAB_ATTR_RO(alloc_calls);
4956
4957static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
4958{
4959 if (!(s->flags & SLAB_STORE_USER))
4960 return -ENOSYS;
4961 return list_locations(s, buf, TRACK_FREE);
4962}
4963SLAB_ATTR_RO(free_calls);
4964#endif /* CONFIG_SLUB_DEBUG */
4965
4966#ifdef CONFIG_FAILSLAB
4967static ssize_t failslab_show(struct kmem_cache *s, char *buf)
4968{
4969 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
4970}
4971
4972static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
4973 size_t length)
4974{
4975 s->flags &= ~SLAB_FAILSLAB;
4976 if (buf[0] == '1')
4977 s->flags |= SLAB_FAILSLAB;
4978 return length;
4979}
4980SLAB_ATTR(failslab);
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05004981#endif
Christoph Lameter53e15af2007-05-06 14:49:43 -07004982
Christoph Lameter2086d262007-05-06 14:49:46 -07004983static ssize_t shrink_show(struct kmem_cache *s, char *buf)
4984{
4985 return 0;
4986}
4987
4988static ssize_t shrink_store(struct kmem_cache *s,
4989 const char *buf, size_t length)
4990{
4991 if (buf[0] == '1') {
4992 int rc = kmem_cache_shrink(s);
4993
4994 if (rc)
4995 return rc;
4996 } else
4997 return -EINVAL;
4998 return length;
4999}
5000SLAB_ATTR(shrink);
5001
Christoph Lameter81819f02007-05-06 14:49:36 -07005002#ifdef CONFIG_NUMA
Christoph Lameter98246012008-01-07 23:20:26 -08005003static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
Christoph Lameter81819f02007-05-06 14:49:36 -07005004{
Christoph Lameter98246012008-01-07 23:20:26 -08005005 return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
Christoph Lameter81819f02007-05-06 14:49:36 -07005006}
5007
Christoph Lameter98246012008-01-07 23:20:26 -08005008static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
Christoph Lameter81819f02007-05-06 14:49:36 -07005009 const char *buf, size_t length)
5010{
Christoph Lameter0121c6192008-04-29 16:11:12 -07005011 unsigned long ratio;
5012 int err;
Christoph Lameter81819f02007-05-06 14:49:36 -07005013
Christoph Lameter0121c6192008-04-29 16:11:12 -07005014 err = strict_strtoul(buf, 10, &ratio);
5015 if (err)
5016 return err;
5017
Christoph Lametere2cb96b2008-08-19 08:51:22 -05005018 if (ratio <= 100)
Christoph Lameter0121c6192008-04-29 16:11:12 -07005019 s->remote_node_defrag_ratio = ratio * 10;
5020
Christoph Lameter81819f02007-05-06 14:49:36 -07005021 return length;
5022}
Christoph Lameter98246012008-01-07 23:20:26 -08005023SLAB_ATTR(remote_node_defrag_ratio);
Christoph Lameter81819f02007-05-06 14:49:36 -07005024#endif
5025
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005026#ifdef CONFIG_SLUB_STATS
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005027static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
5028{
5029 unsigned long sum = 0;
5030 int cpu;
5031 int len;
5032 int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
5033
5034 if (!data)
5035 return -ENOMEM;
5036
5037 for_each_online_cpu(cpu) {
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06005038 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005039
5040 data[cpu] = x;
5041 sum += x;
5042 }
5043
5044 len = sprintf(buf, "%lu", sum);
5045
Christoph Lameter50ef37b2008-04-14 18:52:05 +03005046#ifdef CONFIG_SMP
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005047 for_each_online_cpu(cpu) {
5048 if (data[cpu] && len < PAGE_SIZE - 20)
Christoph Lameter50ef37b2008-04-14 18:52:05 +03005049 len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005050 }
Christoph Lameter50ef37b2008-04-14 18:52:05 +03005051#endif
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005052 kfree(data);
5053 return len + sprintf(buf + len, "\n");
5054}
5055
David Rientjes78eb00c2009-10-15 02:20:22 -07005056static void clear_stat(struct kmem_cache *s, enum stat_item si)
5057{
5058 int cpu;
5059
5060 for_each_online_cpu(cpu)
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06005061 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
David Rientjes78eb00c2009-10-15 02:20:22 -07005062}
5063
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005064#define STAT_ATTR(si, text) \
5065static ssize_t text##_show(struct kmem_cache *s, char *buf) \
5066{ \
5067 return show_stat(s, buf, si); \
5068} \
David Rientjes78eb00c2009-10-15 02:20:22 -07005069static ssize_t text##_store(struct kmem_cache *s, \
5070 const char *buf, size_t length) \
5071{ \
5072 if (buf[0] != '0') \
5073 return -EINVAL; \
5074 clear_stat(s, si); \
5075 return length; \
5076} \
5077SLAB_ATTR(text); \
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005078
5079STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
5080STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
5081STAT_ATTR(FREE_FASTPATH, free_fastpath);
5082STAT_ATTR(FREE_SLOWPATH, free_slowpath);
5083STAT_ATTR(FREE_FROZEN, free_frozen);
5084STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
5085STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
5086STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
5087STAT_ATTR(ALLOC_SLAB, alloc_slab);
5088STAT_ATTR(ALLOC_REFILL, alloc_refill);
Christoph Lametere36a2652011-06-01 12:25:57 -05005089STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005090STAT_ATTR(FREE_SLAB, free_slab);
5091STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
5092STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
5093STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
5094STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
5095STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
5096STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
Christoph Lameter03e404a2011-06-01 12:25:58 -05005097STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
Christoph Lameter65c33762008-04-14 19:11:40 +03005098STAT_ATTR(ORDER_FALLBACK, order_fallback);
Christoph Lameterb789ef52011-06-01 12:25:49 -05005099STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
5100STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
Christoph Lameter49e22582011-08-09 16:12:27 -05005101STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
5102STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
Alex Shi8028dce2012-02-03 23:34:56 +08005103STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
5104STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005105#endif
5106
Pekka Enberg06428782008-01-07 23:20:27 -08005107static struct attribute *slab_attrs[] = {
Christoph Lameter81819f02007-05-06 14:49:36 -07005108 &slab_size_attr.attr,
5109 &object_size_attr.attr,
5110 &objs_per_slab_attr.attr,
5111 &order_attr.attr,
David Rientjes73d342b2009-02-22 17:40:09 -08005112 &min_partial_attr.attr,
Christoph Lameter49e22582011-08-09 16:12:27 -05005113 &cpu_partial_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07005114 &objects_attr.attr,
Christoph Lameter205ab992008-04-14 19:11:40 +03005115 &objects_partial_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07005116 &partial_attr.attr,
5117 &cpu_slabs_attr.attr,
5118 &ctor_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07005119 &aliases_attr.attr,
5120 &align_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07005121 &hwcache_align_attr.attr,
5122 &reclaim_account_attr.attr,
5123 &destroy_by_rcu_attr.attr,
Christoph Lametera5a84752010-10-05 13:57:27 -05005124 &shrink_attr.attr,
Lai Jiangshanab9a0f12011-03-10 15:21:48 +08005125 &reserved_attr.attr,
Christoph Lameter49e22582011-08-09 16:12:27 -05005126 &slabs_cpu_partial_attr.attr,
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05005127#ifdef CONFIG_SLUB_DEBUG
Christoph Lametera5a84752010-10-05 13:57:27 -05005128 &total_objects_attr.attr,
5129 &slabs_attr.attr,
5130 &sanity_checks_attr.attr,
5131 &trace_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07005132 &red_zone_attr.attr,
5133 &poison_attr.attr,
5134 &store_user_attr.attr,
Christoph Lameter53e15af2007-05-06 14:49:43 -07005135 &validate_attr.attr,
Christoph Lameter88a420e2007-05-06 14:49:45 -07005136 &alloc_calls_attr.attr,
5137 &free_calls_attr.attr,
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05005138#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07005139#ifdef CONFIG_ZONE_DMA
5140 &cache_dma_attr.attr,
5141#endif
5142#ifdef CONFIG_NUMA
Christoph Lameter98246012008-01-07 23:20:26 -08005143 &remote_node_defrag_ratio_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07005144#endif
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005145#ifdef CONFIG_SLUB_STATS
5146 &alloc_fastpath_attr.attr,
5147 &alloc_slowpath_attr.attr,
5148 &free_fastpath_attr.attr,
5149 &free_slowpath_attr.attr,
5150 &free_frozen_attr.attr,
5151 &free_add_partial_attr.attr,
5152 &free_remove_partial_attr.attr,
5153 &alloc_from_partial_attr.attr,
5154 &alloc_slab_attr.attr,
5155 &alloc_refill_attr.attr,
Christoph Lametere36a2652011-06-01 12:25:57 -05005156 &alloc_node_mismatch_attr.attr,
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005157 &free_slab_attr.attr,
5158 &cpuslab_flush_attr.attr,
5159 &deactivate_full_attr.attr,
5160 &deactivate_empty_attr.attr,
5161 &deactivate_to_head_attr.attr,
5162 &deactivate_to_tail_attr.attr,
5163 &deactivate_remote_frees_attr.attr,
Christoph Lameter03e404a2011-06-01 12:25:58 -05005164 &deactivate_bypass_attr.attr,
Christoph Lameter65c33762008-04-14 19:11:40 +03005165 &order_fallback_attr.attr,
Christoph Lameterb789ef52011-06-01 12:25:49 -05005166 &cmpxchg_double_fail_attr.attr,
5167 &cmpxchg_double_cpu_fail_attr.attr,
Christoph Lameter49e22582011-08-09 16:12:27 -05005168 &cpu_partial_alloc_attr.attr,
5169 &cpu_partial_free_attr.attr,
Alex Shi8028dce2012-02-03 23:34:56 +08005170 &cpu_partial_node_attr.attr,
5171 &cpu_partial_drain_attr.attr,
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08005172#endif
Dmitry Monakhov4c13dd32010-02-26 09:36:12 +03005173#ifdef CONFIG_FAILSLAB
5174 &failslab_attr.attr,
5175#endif
5176
Christoph Lameter81819f02007-05-06 14:49:36 -07005177 NULL
5178};
5179
5180static struct attribute_group slab_attr_group = {
5181 .attrs = slab_attrs,
5182};
5183
5184static ssize_t slab_attr_show(struct kobject *kobj,
5185 struct attribute *attr,
5186 char *buf)
5187{
5188 struct slab_attribute *attribute;
5189 struct kmem_cache *s;
5190 int err;
5191
5192 attribute = to_slab_attr(attr);
5193 s = to_slab(kobj);
5194
5195 if (!attribute->show)
5196 return -EIO;
5197
5198 err = attribute->show(s, buf);
5199
5200 return err;
5201}
5202
5203static ssize_t slab_attr_store(struct kobject *kobj,
5204 struct attribute *attr,
5205 const char *buf, size_t len)
5206{
5207 struct slab_attribute *attribute;
5208 struct kmem_cache *s;
5209 int err;
5210
5211 attribute = to_slab_attr(attr);
5212 s = to_slab(kobj);
5213
5214 if (!attribute->store)
5215 return -EIO;
5216
5217 err = attribute->store(s, buf, len);
5218
5219 return err;
5220}
5221
Christoph Lameter151c6022008-01-07 22:29:05 -08005222static void kmem_cache_release(struct kobject *kobj)
5223{
5224 struct kmem_cache *s = to_slab(kobj);
5225
Pekka Enberg84c1cf62010-09-14 23:21:12 +03005226 kfree(s->name);
Christoph Lameter151c6022008-01-07 22:29:05 -08005227 kfree(s);
5228}
5229
Emese Revfy52cf25d2010-01-19 02:58:23 +01005230static const struct sysfs_ops slab_sysfs_ops = {
Christoph Lameter81819f02007-05-06 14:49:36 -07005231 .show = slab_attr_show,
5232 .store = slab_attr_store,
5233};
5234
5235static struct kobj_type slab_ktype = {
5236 .sysfs_ops = &slab_sysfs_ops,
Christoph Lameter151c6022008-01-07 22:29:05 -08005237 .release = kmem_cache_release
Christoph Lameter81819f02007-05-06 14:49:36 -07005238};
5239
5240static int uevent_filter(struct kset *kset, struct kobject *kobj)
5241{
5242 struct kobj_type *ktype = get_ktype(kobj);
5243
5244 if (ktype == &slab_ktype)
5245 return 1;
5246 return 0;
5247}
5248
Emese Revfy9cd43612009-12-31 14:52:51 +01005249static const struct kset_uevent_ops slab_uevent_ops = {
Christoph Lameter81819f02007-05-06 14:49:36 -07005250 .filter = uevent_filter,
5251};
5252
Greg Kroah-Hartman27c3a312007-11-01 09:29:06 -06005253static struct kset *slab_kset;
Christoph Lameter81819f02007-05-06 14:49:36 -07005254
5255#define ID_STR_LENGTH 64
5256
5257/* Create a unique string id for a slab cache:
Christoph Lameter6446faa2008-02-15 23:45:26 -08005258 *
5259 * Format :[flags-]size
Christoph Lameter81819f02007-05-06 14:49:36 -07005260 */
5261static char *create_unique_id(struct kmem_cache *s)
5262{
5263 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
5264 char *p = name;
5265
5266 BUG_ON(!name);
5267
5268 *p++ = ':';
5269 /*
5270 * First flags affecting slabcache operations. We will only
5271 * get here for aliasable slabs so we do not need to support
5272 * too many flags. The flags here must cover all flags that
5273 * are matched during merging to guarantee that the id is
5274 * unique.
5275 */
5276 if (s->flags & SLAB_CACHE_DMA)
5277 *p++ = 'd';
5278 if (s->flags & SLAB_RECLAIM_ACCOUNT)
5279 *p++ = 'a';
5280 if (s->flags & SLAB_DEBUG_FREE)
5281 *p++ = 'F';
Vegard Nossum5a896d92008-04-04 00:54:48 +02005282 if (!(s->flags & SLAB_NOTRACK))
5283 *p++ = 't';
Christoph Lameter81819f02007-05-06 14:49:36 -07005284 if (p != name + 1)
5285 *p++ = '-';
5286 p += sprintf(p, "%07d", s->size);
5287 BUG_ON(p > name + ID_STR_LENGTH - 1);
5288 return name;
5289}
5290
5291static int sysfs_slab_add(struct kmem_cache *s)
5292{
5293 int err;
5294 const char *name;
5295 int unmergeable;
5296
Christoph Lameter97d06602012-07-06 15:25:11 -05005297 if (slab_state < FULL)
Christoph Lameter81819f02007-05-06 14:49:36 -07005298 /* Defer until later */
5299 return 0;
5300
5301 unmergeable = slab_unmergeable(s);
5302 if (unmergeable) {
5303 /*
5304 * Slabcache can never be merged so we can use the name proper.
5305 * This is typically the case for debug situations. In that
5306 * case we can catch duplicate names easily.
5307 */
Greg Kroah-Hartman27c3a312007-11-01 09:29:06 -06005308 sysfs_remove_link(&slab_kset->kobj, s->name);
Christoph Lameter81819f02007-05-06 14:49:36 -07005309 name = s->name;
5310 } else {
5311 /*
5312 * Create a unique name for the slab as a target
5313 * for the symlinks.
5314 */
5315 name = create_unique_id(s);
5316 }
5317
Greg Kroah-Hartman27c3a312007-11-01 09:29:06 -06005318 s->kobj.kset = slab_kset;
Greg Kroah-Hartman1eada112007-12-17 23:05:35 -07005319 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
5320 if (err) {
5321 kobject_put(&s->kobj);
Christoph Lameter81819f02007-05-06 14:49:36 -07005322 return err;
Greg Kroah-Hartman1eada112007-12-17 23:05:35 -07005323 }
Christoph Lameter81819f02007-05-06 14:49:36 -07005324
5325 err = sysfs_create_group(&s->kobj, &slab_attr_group);
Xiaotian Feng5788d8a2009-07-22 11:28:53 +08005326 if (err) {
5327 kobject_del(&s->kobj);
5328 kobject_put(&s->kobj);
Christoph Lameter81819f02007-05-06 14:49:36 -07005329 return err;
Xiaotian Feng5788d8a2009-07-22 11:28:53 +08005330 }
Christoph Lameter81819f02007-05-06 14:49:36 -07005331 kobject_uevent(&s->kobj, KOBJ_ADD);
5332 if (!unmergeable) {
5333 /* Setup first alias */
5334 sysfs_slab_alias(s, s->name);
5335 kfree(name);
5336 }
5337 return 0;
5338}
5339
5340static void sysfs_slab_remove(struct kmem_cache *s)
5341{
Christoph Lameter97d06602012-07-06 15:25:11 -05005342 if (slab_state < FULL)
Christoph Lameter2bce6482010-07-19 11:39:11 -05005343 /*
5344 * Sysfs has not been setup yet so no need to remove the
5345 * cache from sysfs.
5346 */
5347 return;
5348
Christoph Lameter81819f02007-05-06 14:49:36 -07005349 kobject_uevent(&s->kobj, KOBJ_REMOVE);
5350 kobject_del(&s->kobj);
Christoph Lameter151c6022008-01-07 22:29:05 -08005351 kobject_put(&s->kobj);
Christoph Lameter81819f02007-05-06 14:49:36 -07005352}
5353
5354/*
5355 * Need to buffer aliases during bootup until sysfs becomes
Nick Andrew9f6c708e2008-12-05 14:08:08 +11005356 * available lest we lose that information.
Christoph Lameter81819f02007-05-06 14:49:36 -07005357 */
5358struct saved_alias {
5359 struct kmem_cache *s;
5360 const char *name;
5361 struct saved_alias *next;
5362};
5363
Adrian Bunk5af328a2007-07-17 04:03:27 -07005364static struct saved_alias *alias_list;
Christoph Lameter81819f02007-05-06 14:49:36 -07005365
5366static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
5367{
5368 struct saved_alias *al;
5369
Christoph Lameter97d06602012-07-06 15:25:11 -05005370 if (slab_state == FULL) {
Christoph Lameter81819f02007-05-06 14:49:36 -07005371 /*
5372 * If we have a leftover link then remove it.
5373 */
Greg Kroah-Hartman27c3a312007-11-01 09:29:06 -06005374 sysfs_remove_link(&slab_kset->kobj, name);
5375 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
Christoph Lameter81819f02007-05-06 14:49:36 -07005376 }
5377
5378 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
5379 if (!al)
5380 return -ENOMEM;
5381
5382 al->s = s;
5383 al->name = name;
5384 al->next = alias_list;
5385 alias_list = al;
5386 return 0;
5387}
5388
5389static int __init slab_sysfs_init(void)
5390{
Christoph Lameter5b95a4ac2007-07-17 04:03:19 -07005391 struct kmem_cache *s;
Christoph Lameter81819f02007-05-06 14:49:36 -07005392 int err;
5393
Christoph Lameter18004c52012-07-06 15:25:12 -05005394 mutex_lock(&slab_mutex);
Christoph Lameter2bce6482010-07-19 11:39:11 -05005395
Greg Kroah-Hartman0ff21e42007-11-06 10:36:58 -08005396 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
Greg Kroah-Hartman27c3a312007-11-01 09:29:06 -06005397 if (!slab_kset) {
Christoph Lameter18004c52012-07-06 15:25:12 -05005398 mutex_unlock(&slab_mutex);
Christoph Lameter81819f02007-05-06 14:49:36 -07005399 printk(KERN_ERR "Cannot register slab subsystem.\n");
5400 return -ENOSYS;
5401 }
5402
Christoph Lameter97d06602012-07-06 15:25:11 -05005403 slab_state = FULL;
Christoph Lameter26a7bd02007-05-09 02:32:39 -07005404
Christoph Lameter5b95a4ac2007-07-17 04:03:19 -07005405 list_for_each_entry(s, &slab_caches, list) {
Christoph Lameter26a7bd02007-05-09 02:32:39 -07005406 err = sysfs_slab_add(s);
Christoph Lameter5d540fb2007-08-30 23:56:26 -07005407 if (err)
5408 printk(KERN_ERR "SLUB: Unable to add boot slab %s"
5409 " to sysfs\n", s->name);
Christoph Lameter26a7bd02007-05-09 02:32:39 -07005410 }
Christoph Lameter81819f02007-05-06 14:49:36 -07005411
5412 while (alias_list) {
5413 struct saved_alias *al = alias_list;
5414
5415 alias_list = alias_list->next;
5416 err = sysfs_slab_alias(al->s, al->name);
Christoph Lameter5d540fb2007-08-30 23:56:26 -07005417 if (err)
5418 printk(KERN_ERR "SLUB: Unable to add boot slab alias"
Julia Lawall068ce412012-07-08 13:37:40 +02005419 " %s to sysfs\n", al->name);
Christoph Lameter81819f02007-05-06 14:49:36 -07005420 kfree(al);
5421 }
5422
Christoph Lameter18004c52012-07-06 15:25:12 -05005423 mutex_unlock(&slab_mutex);
Christoph Lameter81819f02007-05-06 14:49:36 -07005424 resiliency_test();
5425 return 0;
5426}
5427
5428__initcall(slab_sysfs_init);
Christoph Lameterab4d5ed2010-10-05 13:57:26 -05005429#endif /* CONFIG_SYSFS */
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005430
5431/*
5432 * The /proc/slabinfo ABI
5433 */
Linus Torvalds158a9622008-01-02 13:04:48 -08005434#ifdef CONFIG_SLABINFO
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005435static void print_slabinfo_header(struct seq_file *m)
5436{
5437 seq_puts(m, "slabinfo - version: 2.1\n");
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05005438 seq_puts(m, "# name <active_objs> <num_objs> <object_size> "
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005439 "<objperslab> <pagesperslab>");
5440 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
5441 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
5442 seq_putc(m, '\n');
5443}
5444
5445static void *s_start(struct seq_file *m, loff_t *pos)
5446{
5447 loff_t n = *pos;
5448
Christoph Lameter18004c52012-07-06 15:25:12 -05005449 mutex_lock(&slab_mutex);
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005450 if (!n)
5451 print_slabinfo_header(m);
5452
5453 return seq_list_start(&slab_caches, *pos);
5454}
5455
5456static void *s_next(struct seq_file *m, void *p, loff_t *pos)
5457{
5458 return seq_list_next(p, &slab_caches, pos);
5459}
5460
5461static void s_stop(struct seq_file *m, void *p)
5462{
Christoph Lameter18004c52012-07-06 15:25:12 -05005463 mutex_unlock(&slab_mutex);
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005464}
5465
5466static int s_show(struct seq_file *m, void *p)
5467{
5468 unsigned long nr_partials = 0;
5469 unsigned long nr_slabs = 0;
5470 unsigned long nr_inuse = 0;
Christoph Lameter205ab992008-04-14 19:11:40 +03005471 unsigned long nr_objs = 0;
5472 unsigned long nr_free = 0;
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005473 struct kmem_cache *s;
5474 int node;
5475
5476 s = list_entry(p, struct kmem_cache, list);
5477
5478 for_each_online_node(node) {
5479 struct kmem_cache_node *n = get_node(s, node);
5480
5481 if (!n)
5482 continue;
5483
5484 nr_partials += n->nr_partial;
5485 nr_slabs += atomic_long_read(&n->nr_slabs);
Christoph Lameter205ab992008-04-14 19:11:40 +03005486 nr_objs += atomic_long_read(&n->total_objects);
5487 nr_free += count_partial(n, count_free);
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005488 }
5489
Christoph Lameter205ab992008-04-14 19:11:40 +03005490 nr_inuse = nr_objs - nr_free;
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005491
5492 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
Christoph Lameter834f3d12008-04-14 19:11:31 +03005493 nr_objs, s->size, oo_objects(s->oo),
5494 (1 << oo_order(s->oo)));
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005495 seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
5496 seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
5497 0UL);
5498 seq_putc(m, '\n');
5499 return 0;
5500}
5501
Alexey Dobriyan7b3c3a52008-10-06 02:42:17 +04005502static const struct seq_operations slabinfo_op = {
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01005503 .start = s_start,
5504 .next = s_next,
5505 .stop = s_stop,
5506 .show = s_show,
5507};
5508
Alexey Dobriyan7b3c3a52008-10-06 02:42:17 +04005509static int slabinfo_open(struct inode *inode, struct file *file)
5510{
5511 return seq_open(file, &slabinfo_op);
5512}
5513
5514static const struct file_operations proc_slabinfo_operations = {
5515 .open = slabinfo_open,
5516 .read = seq_read,
5517 .llseek = seq_lseek,
5518 .release = seq_release,
5519};
5520
5521static int __init slab_proc_init(void)
5522{
Vasiliy Kulikovab067e92011-09-27 21:54:53 +04005523 proc_create("slabinfo", S_IRUSR, NULL, &proc_slabinfo_operations);
Alexey Dobriyan7b3c3a52008-10-06 02:42:17 +04005524 return 0;
5525}
5526module_init(slab_proc_init);
Linus Torvalds158a9622008-01-02 13:04:48 -08005527#endif /* CONFIG_SLABINFO */