blob: 8fbb2fd70b64b78bb6236d09e8c7128b58a22c22 [file] [log] [blame]
Christoph Lameter81819f02007-05-06 14:49:36 -07001/*
2 * SLUB: A slab allocator that limits cache line use instead of queuing
3 * objects in per cpu and per node lists.
4 *
5 * The allocator synchronizes using per slab locks and only
6 * uses a centralized lock to manage a pool of partial slabs.
7 *
Christoph Lametercde53532008-07-04 09:59:22 -07008 * (C) 2007 SGI, Christoph Lameter
Christoph Lameter81819f02007-05-06 14:49:36 -07009 */
10
11#include <linux/mm.h>
Nick Piggin1eb5ac62009-05-05 19:13:44 +100012#include <linux/swap.h> /* struct reclaim_state */
Christoph Lameter81819f02007-05-06 14:49:36 -070013#include <linux/module.h>
14#include <linux/bit_spinlock.h>
15#include <linux/interrupt.h>
16#include <linux/bitops.h>
17#include <linux/slab.h>
Alexey Dobriyan7b3c3a52008-10-06 02:42:17 +040018#include <linux/proc_fs.h>
Christoph Lameter81819f02007-05-06 14:49:36 -070019#include <linux/seq_file.h>
Zhaolei02af61b2009-04-10 14:26:18 +080020#include <linux/kmemtrace.h>
Vegard Nossum5a896d92008-04-04 00:54:48 +020021#include <linux/kmemcheck.h>
Christoph Lameter81819f02007-05-06 14:49:36 -070022#include <linux/cpu.h>
23#include <linux/cpuset.h>
24#include <linux/mempolicy.h>
25#include <linux/ctype.h>
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -070026#include <linux/debugobjects.h>
Christoph Lameter81819f02007-05-06 14:49:36 -070027#include <linux/kallsyms.h>
Yasunori Gotob9049e22007-10-21 16:41:37 -070028#include <linux/memory.h>
Roman Zippelf8bd2252008-05-01 04:34:31 -070029#include <linux/math64.h>
Akinobu Mita773ff602008-12-23 19:37:01 +090030#include <linux/fault-inject.h>
Christoph Lameter81819f02007-05-06 14:49:36 -070031
32/*
33 * Lock order:
34 * 1. slab_lock(page)
35 * 2. slab->list_lock
36 *
37 * The slab_lock protects operations on the object of a particular
38 * slab and its metadata in the page struct. If the slab lock
39 * has been taken then no allocations nor frees can be performed
40 * on the objects in the slab nor can the slab be added or removed
41 * from the partial or full lists since this would mean modifying
42 * the page_struct of the slab.
43 *
44 * The list_lock protects the partial and full list on each node and
45 * the partial slab counter. If taken then no new slabs may be added or
46 * removed from the lists nor make the number of partial slabs be modified.
47 * (Note that the total number of slabs is an atomic value that may be
48 * modified without taking the list lock).
49 *
50 * The list_lock is a centralized lock and thus we avoid taking it as
51 * much as possible. As long as SLUB does not have to handle partial
52 * slabs, operations can continue without any centralized lock. F.e.
53 * allocating a long series of objects that fill up slabs does not require
54 * the list lock.
55 *
56 * The lock order is sometimes inverted when we are trying to get a slab
57 * off a list. We take the list_lock and then look for a page on the list
58 * to use. While we do that objects in the slabs may be freed. We can
59 * only operate on the slab if we have also taken the slab_lock. So we use
60 * a slab_trylock() on the slab. If trylock was successful then no frees
61 * can occur anymore and we can use the slab for allocations etc. If the
62 * slab_trylock() does not succeed then frees are in progress in the slab and
63 * we must stay away from it for a while since we may cause a bouncing
64 * cacheline if we try to acquire the lock. So go onto the next slab.
65 * If all pages are busy then we may allocate a new slab instead of reusing
66 * a partial slab. A new slab has noone operating on it and thus there is
67 * no danger of cacheline contention.
68 *
69 * Interrupts are disabled during allocation and deallocation in order to
70 * make the slab allocator safe to use in the context of an irq. In addition
71 * interrupts are disabled to ensure that the processor does not change
72 * while handling per_cpu slabs, due to kernel preemption.
73 *
74 * SLUB assigns one slab for allocation to each processor.
75 * Allocations only occur from these slabs called cpu slabs.
76 *
Christoph Lameter672bba32007-05-09 02:32:39 -070077 * Slabs with free elements are kept on a partial list and during regular
78 * operations no list for full slabs is used. If an object in a full slab is
Christoph Lameter81819f02007-05-06 14:49:36 -070079 * freed then the slab will show up again on the partial lists.
Christoph Lameter672bba32007-05-09 02:32:39 -070080 * We track full slabs for debugging purposes though because otherwise we
81 * cannot scan all objects.
Christoph Lameter81819f02007-05-06 14:49:36 -070082 *
83 * Slabs are freed when they become empty. Teardown and setup is
84 * minimal so we rely on the page allocators per cpu caches for
85 * fast frees and allocs.
86 *
87 * Overloading of page flags that are otherwise used for LRU management.
88 *
Christoph Lameter4b6f0752007-05-16 22:10:53 -070089 * PageActive The slab is frozen and exempt from list processing.
90 * This means that the slab is dedicated to a purpose
91 * such as satisfying allocations for a specific
92 * processor. Objects may be freed in the slab while
93 * it is frozen but slab_free will then skip the usual
94 * list operations. It is up to the processor holding
95 * the slab to integrate the slab into the slab lists
96 * when the slab is no longer needed.
97 *
98 * One use of this flag is to mark slabs that are
99 * used for allocations. Then such a slab becomes a cpu
100 * slab. The cpu slab may be equipped with an additional
Christoph Lameterdfb4f092007-10-16 01:26:05 -0700101 * freelist that allows lockless access to
Christoph Lameter894b8782007-05-10 03:15:16 -0700102 * free objects in addition to the regular freelist
103 * that requires the slab lock.
Christoph Lameter81819f02007-05-06 14:49:36 -0700104 *
105 * PageError Slab requires special handling due to debug
106 * options set. This moves slab handling out of
Christoph Lameter894b8782007-05-10 03:15:16 -0700107 * the fast path and disables lockless freelists.
Christoph Lameter81819f02007-05-06 14:49:36 -0700108 */
109
Christoph Lameter5577bd82007-05-16 22:10:56 -0700110#ifdef CONFIG_SLUB_DEBUG
Andy Whitcroft8a380822008-07-23 21:27:18 -0700111#define SLABDEBUG 1
Christoph Lameter5577bd82007-05-16 22:10:56 -0700112#else
113#define SLABDEBUG 0
114#endif
115
Christoph Lameter81819f02007-05-06 14:49:36 -0700116/*
117 * Issues still to be resolved:
118 *
Christoph Lameter81819f02007-05-06 14:49:36 -0700119 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
120 *
Christoph Lameter81819f02007-05-06 14:49:36 -0700121 * - Variable sizing of the per node arrays
122 */
123
124/* Enable to test recovery from slab corruption on boot */
125#undef SLUB_RESILIENCY_TEST
126
Christoph Lameter81819f02007-05-06 14:49:36 -0700127/*
Christoph Lameter2086d262007-05-06 14:49:46 -0700128 * Mininum number of partial slabs. These will be left on the partial
129 * lists even if they are empty. kmem_cache_shrink may reclaim them.
130 */
Christoph Lameter76be8952007-12-21 14:37:37 -0800131#define MIN_PARTIAL 5
Christoph Lametere95eed52007-05-06 14:49:44 -0700132
Christoph Lameter2086d262007-05-06 14:49:46 -0700133/*
134 * Maximum number of desirable partial slabs.
135 * The existence of more partial slabs makes kmem_cache_shrink
136 * sort the partial list by the number of objects in the.
137 */
138#define MAX_PARTIAL 10
139
Christoph Lameter81819f02007-05-06 14:49:36 -0700140#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
141 SLAB_POISON | SLAB_STORE_USER)
Christoph Lameter672bba32007-05-09 02:32:39 -0700142
Christoph Lameter81819f02007-05-06 14:49:36 -0700143/*
David Rientjes3de47212009-07-27 18:30:35 -0700144 * Debugging flags that require metadata to be stored in the slab. These get
145 * disabled when slub_debug=O is used and a cache's min order increases with
146 * metadata.
David Rientjesfa5ec8a2009-07-07 00:14:14 -0700147 */
David Rientjes3de47212009-07-27 18:30:35 -0700148#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
David Rientjesfa5ec8a2009-07-07 00:14:14 -0700149
150/*
Christoph Lameter81819f02007-05-06 14:49:36 -0700151 * Set of flags that will prevent slab merging
152 */
153#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
Catalin Marinas06f22f12009-06-11 13:23:18 +0100154 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE)
Christoph Lameter81819f02007-05-06 14:49:36 -0700155
156#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
Vegard Nossum5a896d92008-04-04 00:54:48 +0200157 SLAB_CACHE_DMA | SLAB_NOTRACK)
Christoph Lameter81819f02007-05-06 14:49:36 -0700158
159#ifndef ARCH_KMALLOC_MINALIGN
Christoph Lameter47bfdc02007-05-06 14:49:37 -0700160#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
Christoph Lameter81819f02007-05-06 14:49:36 -0700161#endif
162
163#ifndef ARCH_SLAB_MINALIGN
Christoph Lameter47bfdc02007-05-06 14:49:37 -0700164#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
Christoph Lameter81819f02007-05-06 14:49:36 -0700165#endif
166
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +0400167#define OO_SHIFT 16
168#define OO_MASK ((1 << OO_SHIFT) - 1)
169#define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */
170
Christoph Lameter81819f02007-05-06 14:49:36 -0700171/* Internal SLUB flags */
Christoph Lameter1ceef402007-08-07 15:11:48 -0700172#define __OBJECT_POISON 0x80000000 /* Poison object */
173#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
Christoph Lameter81819f02007-05-06 14:49:36 -0700174
175static int kmem_size = sizeof(struct kmem_cache);
176
177#ifdef CONFIG_SMP
178static struct notifier_block slab_notifier;
179#endif
180
181static enum {
182 DOWN, /* No slab functionality available */
183 PARTIAL, /* kmem_cache_open() works but kmalloc does not */
Christoph Lameter672bba32007-05-09 02:32:39 -0700184 UP, /* Everything works but does not show up in sysfs */
Christoph Lameter81819f02007-05-06 14:49:36 -0700185 SYSFS /* Sysfs up */
186} slab_state = DOWN;
187
188/* A list of all slab caches on the system */
189static DECLARE_RWSEM(slub_lock);
Adrian Bunk5af328a2007-07-17 04:03:27 -0700190static LIST_HEAD(slab_caches);
Christoph Lameter81819f02007-05-06 14:49:36 -0700191
Christoph Lameter02cbc872007-05-09 02:32:43 -0700192/*
193 * Tracking user of a slab.
194 */
195struct track {
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +0300196 unsigned long addr; /* Called from address */
Christoph Lameter02cbc872007-05-09 02:32:43 -0700197 int cpu; /* Was running on cpu */
198 int pid; /* Pid context */
199 unsigned long when; /* When did the operation occur */
200};
201
202enum track_item { TRACK_ALLOC, TRACK_FREE };
203
Christoph Lameterf6acb632008-04-29 16:16:06 -0700204#ifdef CONFIG_SLUB_DEBUG
Christoph Lameter81819f02007-05-06 14:49:36 -0700205static int sysfs_slab_add(struct kmem_cache *);
206static int sysfs_slab_alias(struct kmem_cache *, const char *);
207static void sysfs_slab_remove(struct kmem_cache *);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -0800208
Christoph Lameter81819f02007-05-06 14:49:36 -0700209#else
Christoph Lameter0c710012007-07-17 04:03:24 -0700210static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
211static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
212 { return 0; }
Christoph Lameter151c6022008-01-07 22:29:05 -0800213static inline void sysfs_slab_remove(struct kmem_cache *s)
214{
215 kfree(s);
216}
Christoph Lameter8ff12cf2008-02-07 17:47:41 -0800217
Christoph Lameter81819f02007-05-06 14:49:36 -0700218#endif
219
Christoph Lameter84e554e2009-12-18 16:26:23 -0600220static inline void stat(struct kmem_cache *s, enum stat_item si)
Christoph Lameter8ff12cf2008-02-07 17:47:41 -0800221{
222#ifdef CONFIG_SLUB_STATS
Christoph Lameter84e554e2009-12-18 16:26:23 -0600223 __this_cpu_inc(s->cpu_slab->stat[si]);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -0800224#endif
225}
226
Christoph Lameter81819f02007-05-06 14:49:36 -0700227/********************************************************************
228 * Core slab cache functions
229 *******************************************************************/
230
231int slab_is_available(void)
232{
233 return slab_state >= UP;
234}
235
236static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
237{
238#ifdef CONFIG_NUMA
239 return s->node[node];
240#else
241 return &s->local_node;
242#endif
243}
244
Christoph Lameter6446faa2008-02-15 23:45:26 -0800245/* Verify that a pointer has an address that is valid within a slab page */
Christoph Lameter02cbc872007-05-09 02:32:43 -0700246static inline int check_valid_pointer(struct kmem_cache *s,
247 struct page *page, const void *object)
248{
249 void *base;
250
Christoph Lametera973e9d2008-03-01 13:40:44 -0800251 if (!object)
Christoph Lameter02cbc872007-05-09 02:32:43 -0700252 return 1;
253
Christoph Lametera973e9d2008-03-01 13:40:44 -0800254 base = page_address(page);
Christoph Lameter39b26462008-04-14 19:11:30 +0300255 if (object < base || object >= base + page->objects * s->size ||
Christoph Lameter02cbc872007-05-09 02:32:43 -0700256 (object - base) % s->size) {
257 return 0;
258 }
259
260 return 1;
261}
262
Christoph Lameter7656c722007-05-09 02:32:40 -0700263static inline void *get_freepointer(struct kmem_cache *s, void *object)
264{
265 return *(void **)(object + s->offset);
266}
267
268static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
269{
270 *(void **)(object + s->offset) = fp;
271}
272
273/* Loop over all objects in a slab */
Christoph Lameter224a88b2008-04-14 19:11:31 +0300274#define for_each_object(__p, __s, __addr, __objects) \
275 for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
Christoph Lameter7656c722007-05-09 02:32:40 -0700276 __p += (__s)->size)
277
278/* Scan freelist */
279#define for_each_free_object(__p, __s, __free) \
Christoph Lametera973e9d2008-03-01 13:40:44 -0800280 for (__p = (__free); __p; __p = get_freepointer((__s), __p))
Christoph Lameter7656c722007-05-09 02:32:40 -0700281
282/* Determine object index from a given position */
283static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
284{
285 return (p - addr) / s->size;
286}
287
Christoph Lameter834f3d12008-04-14 19:11:31 +0300288static inline struct kmem_cache_order_objects oo_make(int order,
289 unsigned long size)
290{
291 struct kmem_cache_order_objects x = {
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +0400292 (order << OO_SHIFT) + (PAGE_SIZE << order) / size
Christoph Lameter834f3d12008-04-14 19:11:31 +0300293 };
294
295 return x;
296}
297
298static inline int oo_order(struct kmem_cache_order_objects x)
299{
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +0400300 return x.x >> OO_SHIFT;
Christoph Lameter834f3d12008-04-14 19:11:31 +0300301}
302
303static inline int oo_objects(struct kmem_cache_order_objects x)
304{
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +0400305 return x.x & OO_MASK;
Christoph Lameter834f3d12008-04-14 19:11:31 +0300306}
307
Christoph Lameter41ecc552007-05-09 02:32:44 -0700308#ifdef CONFIG_SLUB_DEBUG
309/*
310 * Debug settings:
311 */
Christoph Lameterf0630ff2007-07-15 23:38:14 -0700312#ifdef CONFIG_SLUB_DEBUG_ON
313static int slub_debug = DEBUG_DEFAULT_FLAGS;
314#else
Christoph Lameter41ecc552007-05-09 02:32:44 -0700315static int slub_debug;
Christoph Lameterf0630ff2007-07-15 23:38:14 -0700316#endif
Christoph Lameter41ecc552007-05-09 02:32:44 -0700317
318static char *slub_debug_slabs;
David Rientjesfa5ec8a2009-07-07 00:14:14 -0700319static int disable_higher_order_debug;
Christoph Lameter41ecc552007-05-09 02:32:44 -0700320
Christoph Lameter7656c722007-05-09 02:32:40 -0700321/*
Christoph Lameter81819f02007-05-06 14:49:36 -0700322 * Object debugging
323 */
324static void print_section(char *text, u8 *addr, unsigned int length)
325{
326 int i, offset;
327 int newline = 1;
328 char ascii[17];
329
330 ascii[16] = 0;
331
332 for (i = 0; i < length; i++) {
333 if (newline) {
Christoph Lameter24922682007-07-17 04:03:18 -0700334 printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
Christoph Lameter81819f02007-05-06 14:49:36 -0700335 newline = 0;
336 }
Pekka Enberg06428782008-01-07 23:20:27 -0800337 printk(KERN_CONT " %02x", addr[i]);
Christoph Lameter81819f02007-05-06 14:49:36 -0700338 offset = i % 16;
339 ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
340 if (offset == 15) {
Pekka Enberg06428782008-01-07 23:20:27 -0800341 printk(KERN_CONT " %s\n", ascii);
Christoph Lameter81819f02007-05-06 14:49:36 -0700342 newline = 1;
343 }
344 }
345 if (!newline) {
346 i %= 16;
347 while (i < 16) {
Pekka Enberg06428782008-01-07 23:20:27 -0800348 printk(KERN_CONT " ");
Christoph Lameter81819f02007-05-06 14:49:36 -0700349 ascii[i] = ' ';
350 i++;
351 }
Pekka Enberg06428782008-01-07 23:20:27 -0800352 printk(KERN_CONT " %s\n", ascii);
Christoph Lameter81819f02007-05-06 14:49:36 -0700353 }
354}
355
Christoph Lameter81819f02007-05-06 14:49:36 -0700356static struct track *get_track(struct kmem_cache *s, void *object,
357 enum track_item alloc)
358{
359 struct track *p;
360
361 if (s->offset)
362 p = object + s->offset + sizeof(void *);
363 else
364 p = object + s->inuse;
365
366 return p + alloc;
367}
368
369static void set_track(struct kmem_cache *s, void *object,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +0300370 enum track_item alloc, unsigned long addr)
Christoph Lameter81819f02007-05-06 14:49:36 -0700371{
Akinobu Mita1a00df42009-03-07 00:36:21 +0900372 struct track *p = get_track(s, object, alloc);
Christoph Lameter81819f02007-05-06 14:49:36 -0700373
Christoph Lameter81819f02007-05-06 14:49:36 -0700374 if (addr) {
375 p->addr = addr;
376 p->cpu = smp_processor_id();
Alexey Dobriyan88e4ccf2008-06-23 02:58:37 +0400377 p->pid = current->pid;
Christoph Lameter81819f02007-05-06 14:49:36 -0700378 p->when = jiffies;
379 } else
380 memset(p, 0, sizeof(struct track));
381}
382
Christoph Lameter81819f02007-05-06 14:49:36 -0700383static void init_tracking(struct kmem_cache *s, void *object)
384{
Christoph Lameter24922682007-07-17 04:03:18 -0700385 if (!(s->flags & SLAB_STORE_USER))
386 return;
387
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +0300388 set_track(s, object, TRACK_FREE, 0UL);
389 set_track(s, object, TRACK_ALLOC, 0UL);
Christoph Lameter81819f02007-05-06 14:49:36 -0700390}
391
392static void print_track(const char *s, struct track *t)
393{
394 if (!t->addr)
395 return;
396
Linus Torvalds7daf7052008-07-14 12:12:53 -0700397 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +0300398 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
Christoph Lameter81819f02007-05-06 14:49:36 -0700399}
400
Christoph Lameter24922682007-07-17 04:03:18 -0700401static void print_tracking(struct kmem_cache *s, void *object)
402{
403 if (!(s->flags & SLAB_STORE_USER))
404 return;
405
406 print_track("Allocated", get_track(s, object, TRACK_ALLOC));
407 print_track("Freed", get_track(s, object, TRACK_FREE));
408}
409
410static void print_page_info(struct page *page)
411{
Christoph Lameter39b26462008-04-14 19:11:30 +0300412 printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
413 page, page->objects, page->inuse, page->freelist, page->flags);
Christoph Lameter24922682007-07-17 04:03:18 -0700414
415}
416
417static void slab_bug(struct kmem_cache *s, char *fmt, ...)
418{
419 va_list args;
420 char buf[100];
421
422 va_start(args, fmt);
423 vsnprintf(buf, sizeof(buf), fmt, args);
424 va_end(args);
425 printk(KERN_ERR "========================================"
426 "=====================================\n");
427 printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
428 printk(KERN_ERR "----------------------------------------"
429 "-------------------------------------\n\n");
430}
431
432static void slab_fix(struct kmem_cache *s, char *fmt, ...)
433{
434 va_list args;
435 char buf[100];
436
437 va_start(args, fmt);
438 vsnprintf(buf, sizeof(buf), fmt, args);
439 va_end(args);
440 printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
441}
442
443static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
Christoph Lameter81819f02007-05-06 14:49:36 -0700444{
445 unsigned int off; /* Offset of last byte */
Christoph Lametera973e9d2008-03-01 13:40:44 -0800446 u8 *addr = page_address(page);
Christoph Lameter24922682007-07-17 04:03:18 -0700447
448 print_tracking(s, p);
449
450 print_page_info(page);
451
452 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
453 p, p - addr, get_freepointer(s, p));
454
455 if (p > addr + 16)
456 print_section("Bytes b4", p - 16, 16);
457
Pekka Enberg0ebd6522008-07-19 14:17:22 +0300458 print_section("Object", p, min_t(unsigned long, s->objsize, PAGE_SIZE));
Christoph Lameter81819f02007-05-06 14:49:36 -0700459
460 if (s->flags & SLAB_RED_ZONE)
461 print_section("Redzone", p + s->objsize,
462 s->inuse - s->objsize);
463
Christoph Lameter81819f02007-05-06 14:49:36 -0700464 if (s->offset)
465 off = s->offset + sizeof(void *);
466 else
467 off = s->inuse;
468
Christoph Lameter24922682007-07-17 04:03:18 -0700469 if (s->flags & SLAB_STORE_USER)
Christoph Lameter81819f02007-05-06 14:49:36 -0700470 off += 2 * sizeof(struct track);
Christoph Lameter81819f02007-05-06 14:49:36 -0700471
472 if (off != s->size)
473 /* Beginning of the filler is the free pointer */
Christoph Lameter24922682007-07-17 04:03:18 -0700474 print_section("Padding", p + off, s->size - off);
475
476 dump_stack();
Christoph Lameter81819f02007-05-06 14:49:36 -0700477}
478
479static void object_err(struct kmem_cache *s, struct page *page,
480 u8 *object, char *reason)
481{
Christoph Lameter3dc50632008-04-23 12:28:01 -0700482 slab_bug(s, "%s", reason);
Christoph Lameter24922682007-07-17 04:03:18 -0700483 print_trailer(s, page, object);
Christoph Lameter81819f02007-05-06 14:49:36 -0700484}
485
Christoph Lameter24922682007-07-17 04:03:18 -0700486static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
Christoph Lameter81819f02007-05-06 14:49:36 -0700487{
488 va_list args;
489 char buf[100];
490
Christoph Lameter24922682007-07-17 04:03:18 -0700491 va_start(args, fmt);
492 vsnprintf(buf, sizeof(buf), fmt, args);
Christoph Lameter81819f02007-05-06 14:49:36 -0700493 va_end(args);
Christoph Lameter3dc50632008-04-23 12:28:01 -0700494 slab_bug(s, "%s", buf);
Christoph Lameter24922682007-07-17 04:03:18 -0700495 print_page_info(page);
Christoph Lameter81819f02007-05-06 14:49:36 -0700496 dump_stack();
497}
498
499static void init_object(struct kmem_cache *s, void *object, int active)
500{
501 u8 *p = object;
502
503 if (s->flags & __OBJECT_POISON) {
504 memset(p, POISON_FREE, s->objsize - 1);
Pekka Enberg06428782008-01-07 23:20:27 -0800505 p[s->objsize - 1] = POISON_END;
Christoph Lameter81819f02007-05-06 14:49:36 -0700506 }
507
508 if (s->flags & SLAB_RED_ZONE)
509 memset(p + s->objsize,
510 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE,
511 s->inuse - s->objsize);
512}
513
Christoph Lameter24922682007-07-17 04:03:18 -0700514static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
Christoph Lameter81819f02007-05-06 14:49:36 -0700515{
516 while (bytes) {
517 if (*start != (u8)value)
Christoph Lameter24922682007-07-17 04:03:18 -0700518 return start;
Christoph Lameter81819f02007-05-06 14:49:36 -0700519 start++;
520 bytes--;
521 }
Christoph Lameter24922682007-07-17 04:03:18 -0700522 return NULL;
523}
524
525static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
526 void *from, void *to)
527{
528 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
529 memset(from, data, to - from);
530}
531
532static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
533 u8 *object, char *what,
Pekka Enberg06428782008-01-07 23:20:27 -0800534 u8 *start, unsigned int value, unsigned int bytes)
Christoph Lameter24922682007-07-17 04:03:18 -0700535{
536 u8 *fault;
537 u8 *end;
538
539 fault = check_bytes(start, value, bytes);
540 if (!fault)
541 return 1;
542
543 end = start + bytes;
544 while (end > fault && end[-1] == value)
545 end--;
546
547 slab_bug(s, "%s overwritten", what);
548 printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
549 fault, end - 1, fault[0], value);
550 print_trailer(s, page, object);
551
552 restore_bytes(s, what, value, fault, end);
553 return 0;
Christoph Lameter81819f02007-05-06 14:49:36 -0700554}
555
Christoph Lameter81819f02007-05-06 14:49:36 -0700556/*
557 * Object layout:
558 *
559 * object address
560 * Bytes of the object to be managed.
561 * If the freepointer may overlay the object then the free
562 * pointer is the first word of the object.
Christoph Lameter672bba32007-05-09 02:32:39 -0700563 *
Christoph Lameter81819f02007-05-06 14:49:36 -0700564 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
565 * 0xa5 (POISON_END)
566 *
567 * object + s->objsize
568 * Padding to reach word boundary. This is also used for Redzoning.
Christoph Lameter672bba32007-05-09 02:32:39 -0700569 * Padding is extended by another word if Redzoning is enabled and
570 * objsize == inuse.
571 *
Christoph Lameter81819f02007-05-06 14:49:36 -0700572 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
573 * 0xcc (RED_ACTIVE) for objects in use.
574 *
575 * object + s->inuse
Christoph Lameter672bba32007-05-09 02:32:39 -0700576 * Meta data starts here.
577 *
Christoph Lameter81819f02007-05-06 14:49:36 -0700578 * A. Free pointer (if we cannot overwrite object on free)
579 * B. Tracking data for SLAB_STORE_USER
Christoph Lameter672bba32007-05-09 02:32:39 -0700580 * C. Padding to reach required alignment boundary or at mininum
Christoph Lameter6446faa2008-02-15 23:45:26 -0800581 * one word if debugging is on to be able to detect writes
Christoph Lameter672bba32007-05-09 02:32:39 -0700582 * before the word boundary.
583 *
584 * Padding is done using 0x5a (POISON_INUSE)
Christoph Lameter81819f02007-05-06 14:49:36 -0700585 *
586 * object + s->size
Christoph Lameter672bba32007-05-09 02:32:39 -0700587 * Nothing is used beyond s->size.
Christoph Lameter81819f02007-05-06 14:49:36 -0700588 *
Christoph Lameter672bba32007-05-09 02:32:39 -0700589 * If slabcaches are merged then the objsize and inuse boundaries are mostly
590 * ignored. And therefore no slab options that rely on these boundaries
Christoph Lameter81819f02007-05-06 14:49:36 -0700591 * may be used with merged slabcaches.
592 */
593
Christoph Lameter81819f02007-05-06 14:49:36 -0700594static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
595{
596 unsigned long off = s->inuse; /* The end of info */
597
598 if (s->offset)
599 /* Freepointer is placed after the object. */
600 off += sizeof(void *);
601
602 if (s->flags & SLAB_STORE_USER)
603 /* We also have user information there */
604 off += 2 * sizeof(struct track);
605
606 if (s->size == off)
607 return 1;
608
Christoph Lameter24922682007-07-17 04:03:18 -0700609 return check_bytes_and_report(s, page, p, "Object padding",
610 p + off, POISON_INUSE, s->size - off);
Christoph Lameter81819f02007-05-06 14:49:36 -0700611}
612
Christoph Lameter39b26462008-04-14 19:11:30 +0300613/* Check the pad bytes at the end of a slab page */
Christoph Lameter81819f02007-05-06 14:49:36 -0700614static int slab_pad_check(struct kmem_cache *s, struct page *page)
615{
Christoph Lameter24922682007-07-17 04:03:18 -0700616 u8 *start;
617 u8 *fault;
618 u8 *end;
619 int length;
620 int remainder;
Christoph Lameter81819f02007-05-06 14:49:36 -0700621
622 if (!(s->flags & SLAB_POISON))
623 return 1;
624
Christoph Lametera973e9d2008-03-01 13:40:44 -0800625 start = page_address(page);
Christoph Lameter834f3d12008-04-14 19:11:31 +0300626 length = (PAGE_SIZE << compound_order(page));
Christoph Lameter39b26462008-04-14 19:11:30 +0300627 end = start + length;
628 remainder = length % s->size;
Christoph Lameter81819f02007-05-06 14:49:36 -0700629 if (!remainder)
630 return 1;
631
Christoph Lameter39b26462008-04-14 19:11:30 +0300632 fault = check_bytes(end - remainder, POISON_INUSE, remainder);
Christoph Lameter24922682007-07-17 04:03:18 -0700633 if (!fault)
634 return 1;
635 while (end > fault && end[-1] == POISON_INUSE)
636 end--;
637
638 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
Christoph Lameter39b26462008-04-14 19:11:30 +0300639 print_section("Padding", end - remainder, remainder);
Christoph Lameter24922682007-07-17 04:03:18 -0700640
Eric Dumazet8a3d2712009-09-03 16:08:06 +0200641 restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
Christoph Lameter24922682007-07-17 04:03:18 -0700642 return 0;
Christoph Lameter81819f02007-05-06 14:49:36 -0700643}
644
645static int check_object(struct kmem_cache *s, struct page *page,
646 void *object, int active)
647{
648 u8 *p = object;
649 u8 *endobject = object + s->objsize;
650
651 if (s->flags & SLAB_RED_ZONE) {
652 unsigned int red =
653 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
654
Christoph Lameter24922682007-07-17 04:03:18 -0700655 if (!check_bytes_and_report(s, page, object, "Redzone",
656 endobject, red, s->inuse - s->objsize))
Christoph Lameter81819f02007-05-06 14:49:36 -0700657 return 0;
Christoph Lameter81819f02007-05-06 14:49:36 -0700658 } else {
Ingo Molnar3adbefe2008-02-05 17:57:39 -0800659 if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
660 check_bytes_and_report(s, page, p, "Alignment padding",
661 endobject, POISON_INUSE, s->inuse - s->objsize);
662 }
Christoph Lameter81819f02007-05-06 14:49:36 -0700663 }
664
665 if (s->flags & SLAB_POISON) {
666 if (!active && (s->flags & __OBJECT_POISON) &&
Christoph Lameter24922682007-07-17 04:03:18 -0700667 (!check_bytes_and_report(s, page, p, "Poison", p,
668 POISON_FREE, s->objsize - 1) ||
669 !check_bytes_and_report(s, page, p, "Poison",
Pekka Enberg06428782008-01-07 23:20:27 -0800670 p + s->objsize - 1, POISON_END, 1)))
Christoph Lameter81819f02007-05-06 14:49:36 -0700671 return 0;
Christoph Lameter81819f02007-05-06 14:49:36 -0700672 /*
673 * check_pad_bytes cleans up on its own.
674 */
675 check_pad_bytes(s, page, p);
676 }
677
678 if (!s->offset && active)
679 /*
680 * Object and freepointer overlap. Cannot check
681 * freepointer while object is allocated.
682 */
683 return 1;
684
685 /* Check free pointer validity */
686 if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
687 object_err(s, page, p, "Freepointer corrupt");
688 /*
Nick Andrew9f6c708e2008-12-05 14:08:08 +1100689 * No choice but to zap it and thus lose the remainder
Christoph Lameter81819f02007-05-06 14:49:36 -0700690 * of the free objects in this slab. May cause
Christoph Lameter672bba32007-05-09 02:32:39 -0700691 * another error because the object count is now wrong.
Christoph Lameter81819f02007-05-06 14:49:36 -0700692 */
Christoph Lametera973e9d2008-03-01 13:40:44 -0800693 set_freepointer(s, p, NULL);
Christoph Lameter81819f02007-05-06 14:49:36 -0700694 return 0;
695 }
696 return 1;
697}
698
699static int check_slab(struct kmem_cache *s, struct page *page)
700{
Christoph Lameter39b26462008-04-14 19:11:30 +0300701 int maxobj;
702
Christoph Lameter81819f02007-05-06 14:49:36 -0700703 VM_BUG_ON(!irqs_disabled());
704
705 if (!PageSlab(page)) {
Christoph Lameter24922682007-07-17 04:03:18 -0700706 slab_err(s, page, "Not a valid slab page");
Christoph Lameter81819f02007-05-06 14:49:36 -0700707 return 0;
708 }
Christoph Lameter39b26462008-04-14 19:11:30 +0300709
710 maxobj = (PAGE_SIZE << compound_order(page)) / s->size;
711 if (page->objects > maxobj) {
712 slab_err(s, page, "objects %u > max %u",
713 s->name, page->objects, maxobj);
714 return 0;
715 }
716 if (page->inuse > page->objects) {
Christoph Lameter24922682007-07-17 04:03:18 -0700717 slab_err(s, page, "inuse %u > max %u",
Christoph Lameter39b26462008-04-14 19:11:30 +0300718 s->name, page->inuse, page->objects);
Christoph Lameter81819f02007-05-06 14:49:36 -0700719 return 0;
720 }
721 /* Slab_pad_check fixes things up after itself */
722 slab_pad_check(s, page);
723 return 1;
724}
725
726/*
Christoph Lameter672bba32007-05-09 02:32:39 -0700727 * Determine if a certain object on a page is on the freelist. Must hold the
728 * slab lock to guarantee that the chains are in a consistent state.
Christoph Lameter81819f02007-05-06 14:49:36 -0700729 */
730static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
731{
732 int nr = 0;
733 void *fp = page->freelist;
734 void *object = NULL;
Christoph Lameter224a88b2008-04-14 19:11:31 +0300735 unsigned long max_objects;
Christoph Lameter81819f02007-05-06 14:49:36 -0700736
Christoph Lameter39b26462008-04-14 19:11:30 +0300737 while (fp && nr <= page->objects) {
Christoph Lameter81819f02007-05-06 14:49:36 -0700738 if (fp == search)
739 return 1;
740 if (!check_valid_pointer(s, page, fp)) {
741 if (object) {
742 object_err(s, page, object,
743 "Freechain corrupt");
Christoph Lametera973e9d2008-03-01 13:40:44 -0800744 set_freepointer(s, object, NULL);
Christoph Lameter81819f02007-05-06 14:49:36 -0700745 break;
746 } else {
Christoph Lameter24922682007-07-17 04:03:18 -0700747 slab_err(s, page, "Freepointer corrupt");
Christoph Lametera973e9d2008-03-01 13:40:44 -0800748 page->freelist = NULL;
Christoph Lameter39b26462008-04-14 19:11:30 +0300749 page->inuse = page->objects;
Christoph Lameter24922682007-07-17 04:03:18 -0700750 slab_fix(s, "Freelist cleared");
Christoph Lameter81819f02007-05-06 14:49:36 -0700751 return 0;
752 }
753 break;
754 }
755 object = fp;
756 fp = get_freepointer(s, object);
757 nr++;
758 }
759
Christoph Lameter224a88b2008-04-14 19:11:31 +0300760 max_objects = (PAGE_SIZE << compound_order(page)) / s->size;
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +0400761 if (max_objects > MAX_OBJS_PER_PAGE)
762 max_objects = MAX_OBJS_PER_PAGE;
Christoph Lameter224a88b2008-04-14 19:11:31 +0300763
764 if (page->objects != max_objects) {
765 slab_err(s, page, "Wrong number of objects. Found %d but "
766 "should be %d", page->objects, max_objects);
767 page->objects = max_objects;
768 slab_fix(s, "Number of objects adjusted.");
769 }
Christoph Lameter39b26462008-04-14 19:11:30 +0300770 if (page->inuse != page->objects - nr) {
Christoph Lameter70d71222007-05-06 14:49:47 -0700771 slab_err(s, page, "Wrong object count. Counter is %d but "
Christoph Lameter39b26462008-04-14 19:11:30 +0300772 "counted were %d", page->inuse, page->objects - nr);
773 page->inuse = page->objects - nr;
Christoph Lameter24922682007-07-17 04:03:18 -0700774 slab_fix(s, "Object count adjusted.");
Christoph Lameter81819f02007-05-06 14:49:36 -0700775 }
776 return search == NULL;
777}
778
Christoph Lameter0121c6192008-04-29 16:11:12 -0700779static void trace(struct kmem_cache *s, struct page *page, void *object,
780 int alloc)
Christoph Lameter3ec09742007-05-16 22:11:00 -0700781{
782 if (s->flags & SLAB_TRACE) {
783 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
784 s->name,
785 alloc ? "alloc" : "free",
786 object, page->inuse,
787 page->freelist);
788
789 if (!alloc)
790 print_section("Object", (void *)object, s->objsize);
791
792 dump_stack();
793 }
794}
795
Christoph Lameter643b1132007-05-06 14:49:42 -0700796/*
Christoph Lameter672bba32007-05-09 02:32:39 -0700797 * Tracking of fully allocated slabs for debugging purposes.
Christoph Lameter643b1132007-05-06 14:49:42 -0700798 */
Christoph Lametere95eed52007-05-06 14:49:44 -0700799static void add_full(struct kmem_cache_node *n, struct page *page)
Christoph Lameter643b1132007-05-06 14:49:42 -0700800{
Christoph Lameter643b1132007-05-06 14:49:42 -0700801 spin_lock(&n->list_lock);
802 list_add(&page->lru, &n->full);
803 spin_unlock(&n->list_lock);
804}
805
806static void remove_full(struct kmem_cache *s, struct page *page)
807{
808 struct kmem_cache_node *n;
809
810 if (!(s->flags & SLAB_STORE_USER))
811 return;
812
813 n = get_node(s, page_to_nid(page));
814
815 spin_lock(&n->list_lock);
816 list_del(&page->lru);
817 spin_unlock(&n->list_lock);
818}
819
Christoph Lameter0f389ec2008-04-14 18:53:02 +0300820/* Tracking of the number of slabs for debugging purposes */
821static inline unsigned long slabs_node(struct kmem_cache *s, int node)
822{
823 struct kmem_cache_node *n = get_node(s, node);
824
825 return atomic_long_read(&n->nr_slabs);
826}
827
Alexander Beregalov26c02cf2009-06-11 14:08:48 +0400828static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
829{
830 return atomic_long_read(&n->nr_slabs);
831}
832
Christoph Lameter205ab992008-04-14 19:11:40 +0300833static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
Christoph Lameter0f389ec2008-04-14 18:53:02 +0300834{
835 struct kmem_cache_node *n = get_node(s, node);
836
837 /*
838 * May be called early in order to allocate a slab for the
839 * kmem_cache_node structure. Solve the chicken-egg
840 * dilemma by deferring the increment of the count during
841 * bootstrap (see early_kmem_cache_node_alloc).
842 */
Christoph Lameter205ab992008-04-14 19:11:40 +0300843 if (!NUMA_BUILD || n) {
Christoph Lameter0f389ec2008-04-14 18:53:02 +0300844 atomic_long_inc(&n->nr_slabs);
Christoph Lameter205ab992008-04-14 19:11:40 +0300845 atomic_long_add(objects, &n->total_objects);
846 }
Christoph Lameter0f389ec2008-04-14 18:53:02 +0300847}
Christoph Lameter205ab992008-04-14 19:11:40 +0300848static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
Christoph Lameter0f389ec2008-04-14 18:53:02 +0300849{
850 struct kmem_cache_node *n = get_node(s, node);
851
852 atomic_long_dec(&n->nr_slabs);
Christoph Lameter205ab992008-04-14 19:11:40 +0300853 atomic_long_sub(objects, &n->total_objects);
Christoph Lameter0f389ec2008-04-14 18:53:02 +0300854}
855
856/* Object debug checks for alloc/free paths */
Christoph Lameter3ec09742007-05-16 22:11:00 -0700857static void setup_object_debug(struct kmem_cache *s, struct page *page,
858 void *object)
859{
860 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
861 return;
862
863 init_object(s, object, 0);
864 init_tracking(s, object);
865}
866
867static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +0300868 void *object, unsigned long addr)
Christoph Lameter81819f02007-05-06 14:49:36 -0700869{
870 if (!check_slab(s, page))
871 goto bad;
872
Christoph Lameterd692ef62008-02-15 23:45:24 -0800873 if (!on_freelist(s, page, object)) {
Christoph Lameter24922682007-07-17 04:03:18 -0700874 object_err(s, page, object, "Object already allocated");
Christoph Lameter70d71222007-05-06 14:49:47 -0700875 goto bad;
Christoph Lameter81819f02007-05-06 14:49:36 -0700876 }
877
878 if (!check_valid_pointer(s, page, object)) {
879 object_err(s, page, object, "Freelist Pointer check fails");
Christoph Lameter70d71222007-05-06 14:49:47 -0700880 goto bad;
Christoph Lameter81819f02007-05-06 14:49:36 -0700881 }
882
Christoph Lameterd692ef62008-02-15 23:45:24 -0800883 if (!check_object(s, page, object, 0))
Christoph Lameter81819f02007-05-06 14:49:36 -0700884 goto bad;
Christoph Lameter81819f02007-05-06 14:49:36 -0700885
Christoph Lameter3ec09742007-05-16 22:11:00 -0700886 /* Success perform special debug activities for allocs */
887 if (s->flags & SLAB_STORE_USER)
888 set_track(s, object, TRACK_ALLOC, addr);
889 trace(s, page, object, 1);
890 init_object(s, object, 1);
Christoph Lameter81819f02007-05-06 14:49:36 -0700891 return 1;
Christoph Lameter3ec09742007-05-16 22:11:00 -0700892
Christoph Lameter81819f02007-05-06 14:49:36 -0700893bad:
894 if (PageSlab(page)) {
895 /*
896 * If this is a slab page then lets do the best we can
897 * to avoid issues in the future. Marking all objects
Christoph Lameter672bba32007-05-09 02:32:39 -0700898 * as used avoids touching the remaining objects.
Christoph Lameter81819f02007-05-06 14:49:36 -0700899 */
Christoph Lameter24922682007-07-17 04:03:18 -0700900 slab_fix(s, "Marking all objects used");
Christoph Lameter39b26462008-04-14 19:11:30 +0300901 page->inuse = page->objects;
Christoph Lametera973e9d2008-03-01 13:40:44 -0800902 page->freelist = NULL;
Christoph Lameter81819f02007-05-06 14:49:36 -0700903 }
904 return 0;
905}
906
Christoph Lameter3ec09742007-05-16 22:11:00 -0700907static int free_debug_processing(struct kmem_cache *s, struct page *page,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +0300908 void *object, unsigned long addr)
Christoph Lameter81819f02007-05-06 14:49:36 -0700909{
910 if (!check_slab(s, page))
911 goto fail;
912
913 if (!check_valid_pointer(s, page, object)) {
Christoph Lameter70d71222007-05-06 14:49:47 -0700914 slab_err(s, page, "Invalid object pointer 0x%p", object);
Christoph Lameter81819f02007-05-06 14:49:36 -0700915 goto fail;
916 }
917
918 if (on_freelist(s, page, object)) {
Christoph Lameter24922682007-07-17 04:03:18 -0700919 object_err(s, page, object, "Object already free");
Christoph Lameter81819f02007-05-06 14:49:36 -0700920 goto fail;
921 }
922
923 if (!check_object(s, page, object, 1))
924 return 0;
925
926 if (unlikely(s != page->slab)) {
Ingo Molnar3adbefe2008-02-05 17:57:39 -0800927 if (!PageSlab(page)) {
Christoph Lameter70d71222007-05-06 14:49:47 -0700928 slab_err(s, page, "Attempt to free object(0x%p) "
929 "outside of slab", object);
Ingo Molnar3adbefe2008-02-05 17:57:39 -0800930 } else if (!page->slab) {
Christoph Lameter81819f02007-05-06 14:49:36 -0700931 printk(KERN_ERR
Christoph Lameter70d71222007-05-06 14:49:47 -0700932 "SLUB <none>: no slab for object 0x%p.\n",
Christoph Lameter81819f02007-05-06 14:49:36 -0700933 object);
Christoph Lameter70d71222007-05-06 14:49:47 -0700934 dump_stack();
Pekka Enberg06428782008-01-07 23:20:27 -0800935 } else
Christoph Lameter24922682007-07-17 04:03:18 -0700936 object_err(s, page, object,
937 "page slab pointer corrupt.");
Christoph Lameter81819f02007-05-06 14:49:36 -0700938 goto fail;
939 }
Christoph Lameter3ec09742007-05-16 22:11:00 -0700940
941 /* Special debug activities for freeing objects */
Andy Whitcroft8a380822008-07-23 21:27:18 -0700942 if (!PageSlubFrozen(page) && !page->freelist)
Christoph Lameter3ec09742007-05-16 22:11:00 -0700943 remove_full(s, page);
944 if (s->flags & SLAB_STORE_USER)
945 set_track(s, object, TRACK_FREE, addr);
946 trace(s, page, object, 0);
947 init_object(s, object, 0);
Christoph Lameter81819f02007-05-06 14:49:36 -0700948 return 1;
Christoph Lameter3ec09742007-05-16 22:11:00 -0700949
Christoph Lameter81819f02007-05-06 14:49:36 -0700950fail:
Christoph Lameter24922682007-07-17 04:03:18 -0700951 slab_fix(s, "Object at 0x%p not freed", object);
Christoph Lameter81819f02007-05-06 14:49:36 -0700952 return 0;
953}
954
Christoph Lameter41ecc552007-05-09 02:32:44 -0700955static int __init setup_slub_debug(char *str)
956{
Christoph Lameterf0630ff2007-07-15 23:38:14 -0700957 slub_debug = DEBUG_DEFAULT_FLAGS;
958 if (*str++ != '=' || !*str)
959 /*
960 * No options specified. Switch on full debugging.
961 */
962 goto out;
Christoph Lameter41ecc552007-05-09 02:32:44 -0700963
964 if (*str == ',')
Christoph Lameterf0630ff2007-07-15 23:38:14 -0700965 /*
966 * No options but restriction on slabs. This means full
967 * debugging for slabs matching a pattern.
968 */
969 goto check_slabs;
970
David Rientjesfa5ec8a2009-07-07 00:14:14 -0700971 if (tolower(*str) == 'o') {
972 /*
973 * Avoid enabling debugging on caches if its minimum order
974 * would increase as a result.
975 */
976 disable_higher_order_debug = 1;
977 goto out;
978 }
979
Christoph Lameterf0630ff2007-07-15 23:38:14 -0700980 slub_debug = 0;
981 if (*str == '-')
982 /*
983 * Switch off all debugging measures.
984 */
985 goto out;
986
987 /*
988 * Determine which debug features should be switched on
989 */
Pekka Enberg06428782008-01-07 23:20:27 -0800990 for (; *str && *str != ','; str++) {
Christoph Lameterf0630ff2007-07-15 23:38:14 -0700991 switch (tolower(*str)) {
992 case 'f':
993 slub_debug |= SLAB_DEBUG_FREE;
994 break;
995 case 'z':
996 slub_debug |= SLAB_RED_ZONE;
997 break;
998 case 'p':
999 slub_debug |= SLAB_POISON;
1000 break;
1001 case 'u':
1002 slub_debug |= SLAB_STORE_USER;
1003 break;
1004 case 't':
1005 slub_debug |= SLAB_TRACE;
1006 break;
1007 default:
1008 printk(KERN_ERR "slub_debug option '%c' "
Pekka Enberg06428782008-01-07 23:20:27 -08001009 "unknown. skipped\n", *str);
Christoph Lameterf0630ff2007-07-15 23:38:14 -07001010 }
1011 }
1012
1013check_slabs:
1014 if (*str == ',')
Christoph Lameter41ecc552007-05-09 02:32:44 -07001015 slub_debug_slabs = str + 1;
Christoph Lameterf0630ff2007-07-15 23:38:14 -07001016out:
Christoph Lameter41ecc552007-05-09 02:32:44 -07001017 return 1;
1018}
1019
1020__setup("slub_debug", setup_slub_debug);
1021
Christoph Lameterba0268a2007-09-11 15:24:11 -07001022static unsigned long kmem_cache_flags(unsigned long objsize,
1023 unsigned long flags, const char *name,
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07001024 void (*ctor)(void *))
Christoph Lameter41ecc552007-05-09 02:32:44 -07001025{
1026 /*
Christoph Lametere1533622008-02-15 23:45:24 -08001027 * Enable debugging if selected on the kernel commandline.
Christoph Lameter41ecc552007-05-09 02:32:44 -07001028 */
Christoph Lametere1533622008-02-15 23:45:24 -08001029 if (slub_debug && (!slub_debug_slabs ||
David Rientjes3de47212009-07-27 18:30:35 -07001030 !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
1031 flags |= slub_debug;
Christoph Lameterba0268a2007-09-11 15:24:11 -07001032
1033 return flags;
Christoph Lameter41ecc552007-05-09 02:32:44 -07001034}
1035#else
Christoph Lameter3ec09742007-05-16 22:11:00 -07001036static inline void setup_object_debug(struct kmem_cache *s,
1037 struct page *page, void *object) {}
Christoph Lameter41ecc552007-05-09 02:32:44 -07001038
Christoph Lameter3ec09742007-05-16 22:11:00 -07001039static inline int alloc_debug_processing(struct kmem_cache *s,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03001040 struct page *page, void *object, unsigned long addr) { return 0; }
Christoph Lameter41ecc552007-05-09 02:32:44 -07001041
Christoph Lameter3ec09742007-05-16 22:11:00 -07001042static inline int free_debug_processing(struct kmem_cache *s,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03001043 struct page *page, void *object, unsigned long addr) { return 0; }
Christoph Lameter41ecc552007-05-09 02:32:44 -07001044
Christoph Lameter41ecc552007-05-09 02:32:44 -07001045static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1046 { return 1; }
1047static inline int check_object(struct kmem_cache *s, struct page *page,
1048 void *object, int active) { return 1; }
Christoph Lameter3ec09742007-05-16 22:11:00 -07001049static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
Christoph Lameterba0268a2007-09-11 15:24:11 -07001050static inline unsigned long kmem_cache_flags(unsigned long objsize,
1051 unsigned long flags, const char *name,
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07001052 void (*ctor)(void *))
Christoph Lameterba0268a2007-09-11 15:24:11 -07001053{
1054 return flags;
1055}
Christoph Lameter41ecc552007-05-09 02:32:44 -07001056#define slub_debug 0
Christoph Lameter0f389ec2008-04-14 18:53:02 +03001057
Ingo Molnarfdaa45e2009-09-15 11:00:26 +02001058#define disable_higher_order_debug 0
1059
Christoph Lameter0f389ec2008-04-14 18:53:02 +03001060static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1061 { return 0; }
Alexander Beregalov26c02cf2009-06-11 14:08:48 +04001062static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1063 { return 0; }
Christoph Lameter205ab992008-04-14 19:11:40 +03001064static inline void inc_slabs_node(struct kmem_cache *s, int node,
1065 int objects) {}
1066static inline void dec_slabs_node(struct kmem_cache *s, int node,
1067 int objects) {}
Christoph Lameter41ecc552007-05-09 02:32:44 -07001068#endif
Christoph Lameter205ab992008-04-14 19:11:40 +03001069
Christoph Lameter81819f02007-05-06 14:49:36 -07001070/*
1071 * Slab allocation and freeing
1072 */
Christoph Lameter65c33762008-04-14 19:11:40 +03001073static inline struct page *alloc_slab_page(gfp_t flags, int node,
1074 struct kmem_cache_order_objects oo)
1075{
1076 int order = oo_order(oo);
1077
Vegard Nossumb1eeab62008-11-25 16:55:53 +01001078 flags |= __GFP_NOTRACK;
1079
Christoph Lameter65c33762008-04-14 19:11:40 +03001080 if (node == -1)
1081 return alloc_pages(flags, order);
1082 else
1083 return alloc_pages_node(node, flags, order);
1084}
1085
Christoph Lameter81819f02007-05-06 14:49:36 -07001086static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1087{
Pekka Enberg06428782008-01-07 23:20:27 -08001088 struct page *page;
Christoph Lameter834f3d12008-04-14 19:11:31 +03001089 struct kmem_cache_order_objects oo = s->oo;
Pekka Enbergba522702009-06-24 21:59:51 +03001090 gfp_t alloc_gfp;
Christoph Lameter81819f02007-05-06 14:49:36 -07001091
Christoph Lameterb7a49f02008-02-14 14:21:32 -08001092 flags |= s->allocflags;
Mel Gormane12ba742007-10-16 01:25:52 -07001093
Pekka Enbergba522702009-06-24 21:59:51 +03001094 /*
1095 * Let the initial higher-order allocation fail under memory pressure
1096 * so we fall-back to the minimum order allocation.
1097 */
1098 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1099
1100 page = alloc_slab_page(alloc_gfp, node, oo);
Christoph Lameter65c33762008-04-14 19:11:40 +03001101 if (unlikely(!page)) {
1102 oo = s->min;
1103 /*
1104 * Allocation may have failed due to fragmentation.
1105 * Try a lower order alloc if possible
1106 */
1107 page = alloc_slab_page(flags, node, oo);
1108 if (!page)
1109 return NULL;
Christoph Lameter81819f02007-05-06 14:49:36 -07001110
Christoph Lameter84e554e2009-12-18 16:26:23 -06001111 stat(s, ORDER_FALLBACK);
Christoph Lameter65c33762008-04-14 19:11:40 +03001112 }
Vegard Nossum5a896d92008-04-04 00:54:48 +02001113
1114 if (kmemcheck_enabled
Amerigo Wang5086c382009-08-19 21:44:13 +03001115 && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
Vegard Nossumb1eeab62008-11-25 16:55:53 +01001116 int pages = 1 << oo_order(oo);
1117
1118 kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
1119
1120 /*
1121 * Objects from caches that have a constructor don't get
1122 * cleared when they're allocated, so we need to do it here.
1123 */
1124 if (s->ctor)
1125 kmemcheck_mark_uninitialized_pages(page, pages);
1126 else
1127 kmemcheck_mark_unallocated_pages(page, pages);
Vegard Nossum5a896d92008-04-04 00:54:48 +02001128 }
1129
Christoph Lameter834f3d12008-04-14 19:11:31 +03001130 page->objects = oo_objects(oo);
Christoph Lameter81819f02007-05-06 14:49:36 -07001131 mod_zone_page_state(page_zone(page),
1132 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1133 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
Christoph Lameter65c33762008-04-14 19:11:40 +03001134 1 << oo_order(oo));
Christoph Lameter81819f02007-05-06 14:49:36 -07001135
1136 return page;
1137}
1138
1139static void setup_object(struct kmem_cache *s, struct page *page,
1140 void *object)
1141{
Christoph Lameter3ec09742007-05-16 22:11:00 -07001142 setup_object_debug(s, page, object);
Christoph Lameter4f104932007-05-06 14:50:17 -07001143 if (unlikely(s->ctor))
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07001144 s->ctor(object);
Christoph Lameter81819f02007-05-06 14:49:36 -07001145}
1146
1147static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1148{
1149 struct page *page;
Christoph Lameter81819f02007-05-06 14:49:36 -07001150 void *start;
Christoph Lameter81819f02007-05-06 14:49:36 -07001151 void *last;
1152 void *p;
1153
Christoph Lameter6cb06222007-10-16 01:25:41 -07001154 BUG_ON(flags & GFP_SLAB_BUG_MASK);
Christoph Lameter81819f02007-05-06 14:49:36 -07001155
Christoph Lameter6cb06222007-10-16 01:25:41 -07001156 page = allocate_slab(s,
1157 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
Christoph Lameter81819f02007-05-06 14:49:36 -07001158 if (!page)
1159 goto out;
1160
Christoph Lameter205ab992008-04-14 19:11:40 +03001161 inc_slabs_node(s, page_to_nid(page), page->objects);
Christoph Lameter81819f02007-05-06 14:49:36 -07001162 page->slab = s;
1163 page->flags |= 1 << PG_slab;
1164 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
1165 SLAB_STORE_USER | SLAB_TRACE))
Andy Whitcroft8a380822008-07-23 21:27:18 -07001166 __SetPageSlubDebug(page);
Christoph Lameter81819f02007-05-06 14:49:36 -07001167
1168 start = page_address(page);
Christoph Lameter81819f02007-05-06 14:49:36 -07001169
1170 if (unlikely(s->flags & SLAB_POISON))
Christoph Lameter834f3d12008-04-14 19:11:31 +03001171 memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page));
Christoph Lameter81819f02007-05-06 14:49:36 -07001172
1173 last = start;
Christoph Lameter224a88b2008-04-14 19:11:31 +03001174 for_each_object(p, s, start, page->objects) {
Christoph Lameter81819f02007-05-06 14:49:36 -07001175 setup_object(s, page, last);
1176 set_freepointer(s, last, p);
1177 last = p;
1178 }
1179 setup_object(s, page, last);
Christoph Lametera973e9d2008-03-01 13:40:44 -08001180 set_freepointer(s, last, NULL);
Christoph Lameter81819f02007-05-06 14:49:36 -07001181
1182 page->freelist = start;
1183 page->inuse = 0;
1184out:
Christoph Lameter81819f02007-05-06 14:49:36 -07001185 return page;
1186}
1187
1188static void __free_slab(struct kmem_cache *s, struct page *page)
1189{
Christoph Lameter834f3d12008-04-14 19:11:31 +03001190 int order = compound_order(page);
1191 int pages = 1 << order;
Christoph Lameter81819f02007-05-06 14:49:36 -07001192
Andy Whitcroft8a380822008-07-23 21:27:18 -07001193 if (unlikely(SLABDEBUG && PageSlubDebug(page))) {
Christoph Lameter81819f02007-05-06 14:49:36 -07001194 void *p;
1195
1196 slab_pad_check(s, page);
Christoph Lameter224a88b2008-04-14 19:11:31 +03001197 for_each_object(p, s, page_address(page),
1198 page->objects)
Christoph Lameter81819f02007-05-06 14:49:36 -07001199 check_object(s, page, p, 0);
Andy Whitcroft8a380822008-07-23 21:27:18 -07001200 __ClearPageSlubDebug(page);
Christoph Lameter81819f02007-05-06 14:49:36 -07001201 }
1202
Vegard Nossumb1eeab62008-11-25 16:55:53 +01001203 kmemcheck_free_shadow(page, compound_order(page));
Vegard Nossum5a896d92008-04-04 00:54:48 +02001204
Christoph Lameter81819f02007-05-06 14:49:36 -07001205 mod_zone_page_state(page_zone(page),
1206 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1207 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
Pekka Enberg06428782008-01-07 23:20:27 -08001208 -pages);
Christoph Lameter81819f02007-05-06 14:49:36 -07001209
Christoph Lameter49bd5222008-04-14 18:52:18 +03001210 __ClearPageSlab(page);
1211 reset_page_mapcount(page);
Nick Piggin1eb5ac62009-05-05 19:13:44 +10001212 if (current->reclaim_state)
1213 current->reclaim_state->reclaimed_slab += pages;
Christoph Lameter834f3d12008-04-14 19:11:31 +03001214 __free_pages(page, order);
Christoph Lameter81819f02007-05-06 14:49:36 -07001215}
1216
1217static void rcu_free_slab(struct rcu_head *h)
1218{
1219 struct page *page;
1220
1221 page = container_of((struct list_head *)h, struct page, lru);
1222 __free_slab(page->slab, page);
1223}
1224
1225static void free_slab(struct kmem_cache *s, struct page *page)
1226{
1227 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
1228 /*
1229 * RCU free overloads the RCU head over the LRU
1230 */
1231 struct rcu_head *head = (void *)&page->lru;
1232
1233 call_rcu(head, rcu_free_slab);
1234 } else
1235 __free_slab(s, page);
1236}
1237
1238static void discard_slab(struct kmem_cache *s, struct page *page)
1239{
Christoph Lameter205ab992008-04-14 19:11:40 +03001240 dec_slabs_node(s, page_to_nid(page), page->objects);
Christoph Lameter81819f02007-05-06 14:49:36 -07001241 free_slab(s, page);
1242}
1243
1244/*
1245 * Per slab locking using the pagelock
1246 */
1247static __always_inline void slab_lock(struct page *page)
1248{
1249 bit_spin_lock(PG_locked, &page->flags);
1250}
1251
1252static __always_inline void slab_unlock(struct page *page)
1253{
Nick Piggina76d3542008-01-07 23:20:27 -08001254 __bit_spin_unlock(PG_locked, &page->flags);
Christoph Lameter81819f02007-05-06 14:49:36 -07001255}
1256
1257static __always_inline int slab_trylock(struct page *page)
1258{
1259 int rc = 1;
1260
1261 rc = bit_spin_trylock(PG_locked, &page->flags);
1262 return rc;
1263}
1264
1265/*
1266 * Management of partially allocated slabs
1267 */
Christoph Lameter7c2e1322008-01-07 23:20:27 -08001268static void add_partial(struct kmem_cache_node *n,
1269 struct page *page, int tail)
Christoph Lameter81819f02007-05-06 14:49:36 -07001270{
Christoph Lametere95eed52007-05-06 14:49:44 -07001271 spin_lock(&n->list_lock);
1272 n->nr_partial++;
Christoph Lameter7c2e1322008-01-07 23:20:27 -08001273 if (tail)
1274 list_add_tail(&page->lru, &n->partial);
1275 else
1276 list_add(&page->lru, &n->partial);
Christoph Lameter81819f02007-05-06 14:49:36 -07001277 spin_unlock(&n->list_lock);
1278}
1279
Christoph Lameter0121c6192008-04-29 16:11:12 -07001280static void remove_partial(struct kmem_cache *s, struct page *page)
Christoph Lameter81819f02007-05-06 14:49:36 -07001281{
1282 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1283
1284 spin_lock(&n->list_lock);
1285 list_del(&page->lru);
1286 n->nr_partial--;
1287 spin_unlock(&n->list_lock);
1288}
1289
1290/*
Christoph Lameter672bba32007-05-09 02:32:39 -07001291 * Lock slab and remove from the partial list.
Christoph Lameter81819f02007-05-06 14:49:36 -07001292 *
Christoph Lameter672bba32007-05-09 02:32:39 -07001293 * Must hold list_lock.
Christoph Lameter81819f02007-05-06 14:49:36 -07001294 */
Christoph Lameter0121c6192008-04-29 16:11:12 -07001295static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
1296 struct page *page)
Christoph Lameter81819f02007-05-06 14:49:36 -07001297{
1298 if (slab_trylock(page)) {
1299 list_del(&page->lru);
1300 n->nr_partial--;
Andy Whitcroft8a380822008-07-23 21:27:18 -07001301 __SetPageSlubFrozen(page);
Christoph Lameter81819f02007-05-06 14:49:36 -07001302 return 1;
1303 }
1304 return 0;
1305}
1306
1307/*
Christoph Lameter672bba32007-05-09 02:32:39 -07001308 * Try to allocate a partial slab from a specific node.
Christoph Lameter81819f02007-05-06 14:49:36 -07001309 */
1310static struct page *get_partial_node(struct kmem_cache_node *n)
1311{
1312 struct page *page;
1313
1314 /*
1315 * Racy check. If we mistakenly see no partial slabs then we
1316 * just allocate an empty slab. If we mistakenly try to get a
Christoph Lameter672bba32007-05-09 02:32:39 -07001317 * partial slab and there is none available then get_partials()
1318 * will return NULL.
Christoph Lameter81819f02007-05-06 14:49:36 -07001319 */
1320 if (!n || !n->nr_partial)
1321 return NULL;
1322
1323 spin_lock(&n->list_lock);
1324 list_for_each_entry(page, &n->partial, lru)
Christoph Lameter4b6f0752007-05-16 22:10:53 -07001325 if (lock_and_freeze_slab(n, page))
Christoph Lameter81819f02007-05-06 14:49:36 -07001326 goto out;
1327 page = NULL;
1328out:
1329 spin_unlock(&n->list_lock);
1330 return page;
1331}
1332
1333/*
Christoph Lameter672bba32007-05-09 02:32:39 -07001334 * Get a page from somewhere. Search in increasing NUMA distances.
Christoph Lameter81819f02007-05-06 14:49:36 -07001335 */
1336static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1337{
1338#ifdef CONFIG_NUMA
1339 struct zonelist *zonelist;
Mel Gormandd1a2392008-04-28 02:12:17 -07001340 struct zoneref *z;
Mel Gorman54a6eb52008-04-28 02:12:16 -07001341 struct zone *zone;
1342 enum zone_type high_zoneidx = gfp_zone(flags);
Christoph Lameter81819f02007-05-06 14:49:36 -07001343 struct page *page;
1344
1345 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07001346 * The defrag ratio allows a configuration of the tradeoffs between
1347 * inter node defragmentation and node local allocations. A lower
1348 * defrag_ratio increases the tendency to do local allocations
1349 * instead of attempting to obtain partial slabs from other nodes.
Christoph Lameter81819f02007-05-06 14:49:36 -07001350 *
Christoph Lameter672bba32007-05-09 02:32:39 -07001351 * If the defrag_ratio is set to 0 then kmalloc() always
1352 * returns node local objects. If the ratio is higher then kmalloc()
1353 * may return off node objects because partial slabs are obtained
1354 * from other nodes and filled up.
Christoph Lameter81819f02007-05-06 14:49:36 -07001355 *
Christoph Lameter6446faa2008-02-15 23:45:26 -08001356 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
Christoph Lameter672bba32007-05-09 02:32:39 -07001357 * defrag_ratio = 1000) then every (well almost) allocation will
1358 * first attempt to defrag slab caches on other nodes. This means
1359 * scanning over all nodes to look for partial slabs which may be
1360 * expensive if we do it every time we are trying to find a slab
1361 * with available objects.
Christoph Lameter81819f02007-05-06 14:49:36 -07001362 */
Christoph Lameter98246012008-01-07 23:20:26 -08001363 if (!s->remote_node_defrag_ratio ||
1364 get_cycles() % 1024 > s->remote_node_defrag_ratio)
Christoph Lameter81819f02007-05-06 14:49:36 -07001365 return NULL;
1366
Mel Gorman0e884602008-04-28 02:12:14 -07001367 zonelist = node_zonelist(slab_node(current->mempolicy), flags);
Mel Gorman54a6eb52008-04-28 02:12:16 -07001368 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
Christoph Lameter81819f02007-05-06 14:49:36 -07001369 struct kmem_cache_node *n;
1370
Mel Gorman54a6eb52008-04-28 02:12:16 -07001371 n = get_node(s, zone_to_nid(zone));
Christoph Lameter81819f02007-05-06 14:49:36 -07001372
Mel Gorman54a6eb52008-04-28 02:12:16 -07001373 if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
David Rientjes3b89d7d2009-02-22 17:40:07 -08001374 n->nr_partial > s->min_partial) {
Christoph Lameter81819f02007-05-06 14:49:36 -07001375 page = get_partial_node(n);
1376 if (page)
1377 return page;
1378 }
1379 }
1380#endif
1381 return NULL;
1382}
1383
1384/*
1385 * Get a partial page, lock it and return it.
1386 */
1387static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
1388{
1389 struct page *page;
1390 int searchnode = (node == -1) ? numa_node_id() : node;
1391
1392 page = get_partial_node(get_node(s, searchnode));
1393 if (page || (flags & __GFP_THISNODE))
1394 return page;
1395
1396 return get_any_partial(s, flags);
1397}
1398
1399/*
1400 * Move a page back to the lists.
1401 *
1402 * Must be called with the slab lock held.
1403 *
1404 * On exit the slab lock will have been dropped.
1405 */
Christoph Lameter7c2e1322008-01-07 23:20:27 -08001406static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
Christoph Lameter81819f02007-05-06 14:49:36 -07001407{
Christoph Lametere95eed52007-05-06 14:49:44 -07001408 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1409
Andy Whitcroft8a380822008-07-23 21:27:18 -07001410 __ClearPageSlubFrozen(page);
Christoph Lameter81819f02007-05-06 14:49:36 -07001411 if (page->inuse) {
Christoph Lametere95eed52007-05-06 14:49:44 -07001412
Christoph Lametera973e9d2008-03-01 13:40:44 -08001413 if (page->freelist) {
Christoph Lameter7c2e1322008-01-07 23:20:27 -08001414 add_partial(n, page, tail);
Christoph Lameter84e554e2009-12-18 16:26:23 -06001415 stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08001416 } else {
Christoph Lameter84e554e2009-12-18 16:26:23 -06001417 stat(s, DEACTIVATE_FULL);
Andy Whitcroft8a380822008-07-23 21:27:18 -07001418 if (SLABDEBUG && PageSlubDebug(page) &&
1419 (s->flags & SLAB_STORE_USER))
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08001420 add_full(n, page);
1421 }
Christoph Lameter81819f02007-05-06 14:49:36 -07001422 slab_unlock(page);
1423 } else {
Christoph Lameter84e554e2009-12-18 16:26:23 -06001424 stat(s, DEACTIVATE_EMPTY);
David Rientjes3b89d7d2009-02-22 17:40:07 -08001425 if (n->nr_partial < s->min_partial) {
Christoph Lametere95eed52007-05-06 14:49:44 -07001426 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07001427 * Adding an empty slab to the partial slabs in order
1428 * to avoid page allocator overhead. This slab needs
1429 * to come after the other slabs with objects in
Christoph Lameter6446faa2008-02-15 23:45:26 -08001430 * so that the others get filled first. That way the
1431 * size of the partial list stays small.
1432 *
Christoph Lameter0121c6192008-04-29 16:11:12 -07001433 * kmem_cache_shrink can reclaim any empty slabs from
1434 * the partial list.
Christoph Lametere95eed52007-05-06 14:49:44 -07001435 */
Christoph Lameter7c2e1322008-01-07 23:20:27 -08001436 add_partial(n, page, 1);
Christoph Lametere95eed52007-05-06 14:49:44 -07001437 slab_unlock(page);
1438 } else {
1439 slab_unlock(page);
Christoph Lameter84e554e2009-12-18 16:26:23 -06001440 stat(s, FREE_SLAB);
Christoph Lametere95eed52007-05-06 14:49:44 -07001441 discard_slab(s, page);
1442 }
Christoph Lameter81819f02007-05-06 14:49:36 -07001443 }
1444}
1445
1446/*
1447 * Remove the cpu slab
1448 */
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001449static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
Christoph Lameter81819f02007-05-06 14:49:36 -07001450{
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001451 struct page *page = c->page;
Christoph Lameter7c2e1322008-01-07 23:20:27 -08001452 int tail = 1;
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08001453
Christoph Lameterb773ad72008-03-04 11:10:17 -08001454 if (page->freelist)
Christoph Lameter84e554e2009-12-18 16:26:23 -06001455 stat(s, DEACTIVATE_REMOTE_FREES);
Christoph Lameter894b8782007-05-10 03:15:16 -07001456 /*
Christoph Lameter6446faa2008-02-15 23:45:26 -08001457 * Merge cpu freelist into slab freelist. Typically we get here
Christoph Lameter894b8782007-05-10 03:15:16 -07001458 * because both freelists are empty. So this is unlikely
1459 * to occur.
1460 */
Christoph Lametera973e9d2008-03-01 13:40:44 -08001461 while (unlikely(c->freelist)) {
Christoph Lameter894b8782007-05-10 03:15:16 -07001462 void **object;
1463
Christoph Lameter7c2e1322008-01-07 23:20:27 -08001464 tail = 0; /* Hot objects. Put the slab first */
1465
Christoph Lameter894b8782007-05-10 03:15:16 -07001466 /* Retrieve object from cpu_freelist */
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001467 object = c->freelist;
Christoph Lameterff120592009-12-18 16:26:22 -06001468 c->freelist = get_freepointer(s, c->freelist);
Christoph Lameter894b8782007-05-10 03:15:16 -07001469
1470 /* And put onto the regular freelist */
Christoph Lameterff120592009-12-18 16:26:22 -06001471 set_freepointer(s, object, page->freelist);
Christoph Lameter894b8782007-05-10 03:15:16 -07001472 page->freelist = object;
1473 page->inuse--;
1474 }
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001475 c->page = NULL;
Christoph Lameter7c2e1322008-01-07 23:20:27 -08001476 unfreeze_slab(s, page, tail);
Christoph Lameter81819f02007-05-06 14:49:36 -07001477}
1478
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001479static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
Christoph Lameter81819f02007-05-06 14:49:36 -07001480{
Christoph Lameter84e554e2009-12-18 16:26:23 -06001481 stat(s, CPUSLAB_FLUSH);
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001482 slab_lock(c->page);
1483 deactivate_slab(s, c);
Christoph Lameter81819f02007-05-06 14:49:36 -07001484}
1485
1486/*
1487 * Flush cpu slab.
Christoph Lameter6446faa2008-02-15 23:45:26 -08001488 *
Christoph Lameter81819f02007-05-06 14:49:36 -07001489 * Called from IPI handler with interrupts disabled.
1490 */
Christoph Lameter0c710012007-07-17 04:03:24 -07001491static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
Christoph Lameter81819f02007-05-06 14:49:36 -07001492{
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06001493 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
Christoph Lameter81819f02007-05-06 14:49:36 -07001494
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001495 if (likely(c && c->page))
1496 flush_slab(s, c);
Christoph Lameter81819f02007-05-06 14:49:36 -07001497}
1498
1499static void flush_cpu_slab(void *d)
1500{
1501 struct kmem_cache *s = d;
Christoph Lameter81819f02007-05-06 14:49:36 -07001502
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001503 __flush_cpu_slab(s, smp_processor_id());
Christoph Lameter81819f02007-05-06 14:49:36 -07001504}
1505
1506static void flush_all(struct kmem_cache *s)
1507{
Jens Axboe15c8b6c2008-05-09 09:39:44 +02001508 on_each_cpu(flush_cpu_slab, s, 1);
Christoph Lameter81819f02007-05-06 14:49:36 -07001509}
1510
1511/*
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001512 * Check if the objects in a per cpu structure fit numa
1513 * locality expectations.
1514 */
1515static inline int node_match(struct kmem_cache_cpu *c, int node)
1516{
1517#ifdef CONFIG_NUMA
1518 if (node != -1 && c->node != node)
1519 return 0;
1520#endif
1521 return 1;
1522}
1523
Pekka Enberg781b2ba2009-06-10 18:50:32 +03001524static int count_free(struct page *page)
1525{
1526 return page->objects - page->inuse;
1527}
1528
1529static unsigned long count_partial(struct kmem_cache_node *n,
1530 int (*get_count)(struct page *))
1531{
1532 unsigned long flags;
1533 unsigned long x = 0;
1534 struct page *page;
1535
1536 spin_lock_irqsave(&n->list_lock, flags);
1537 list_for_each_entry(page, &n->partial, lru)
1538 x += get_count(page);
1539 spin_unlock_irqrestore(&n->list_lock, flags);
1540 return x;
1541}
1542
Alexander Beregalov26c02cf2009-06-11 14:08:48 +04001543static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
1544{
1545#ifdef CONFIG_SLUB_DEBUG
1546 return atomic_long_read(&n->total_objects);
1547#else
1548 return 0;
1549#endif
1550}
1551
Pekka Enberg781b2ba2009-06-10 18:50:32 +03001552static noinline void
1553slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
1554{
1555 int node;
1556
1557 printk(KERN_WARNING
1558 "SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
1559 nid, gfpflags);
1560 printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, "
1561 "default order: %d, min order: %d\n", s->name, s->objsize,
1562 s->size, oo_order(s->oo), oo_order(s->min));
1563
David Rientjesfa5ec8a2009-07-07 00:14:14 -07001564 if (oo_order(s->min) > get_order(s->objsize))
1565 printk(KERN_WARNING " %s debugging increased min order, use "
1566 "slub_debug=O to disable.\n", s->name);
1567
Pekka Enberg781b2ba2009-06-10 18:50:32 +03001568 for_each_online_node(node) {
1569 struct kmem_cache_node *n = get_node(s, node);
1570 unsigned long nr_slabs;
1571 unsigned long nr_objs;
1572 unsigned long nr_free;
1573
1574 if (!n)
1575 continue;
1576
Alexander Beregalov26c02cf2009-06-11 14:08:48 +04001577 nr_free = count_partial(n, count_free);
1578 nr_slabs = node_nr_slabs(n);
1579 nr_objs = node_nr_objs(n);
Pekka Enberg781b2ba2009-06-10 18:50:32 +03001580
1581 printk(KERN_WARNING
1582 " node %d: slabs: %ld, objs: %ld, free: %ld\n",
1583 node, nr_slabs, nr_objs, nr_free);
1584 }
1585}
1586
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001587/*
Christoph Lameter894b8782007-05-10 03:15:16 -07001588 * Slow path. The lockless freelist is empty or we need to perform
1589 * debugging duties.
Christoph Lameter81819f02007-05-06 14:49:36 -07001590 *
Christoph Lameter894b8782007-05-10 03:15:16 -07001591 * Interrupts are disabled.
Christoph Lameter81819f02007-05-06 14:49:36 -07001592 *
Christoph Lameter894b8782007-05-10 03:15:16 -07001593 * Processing is still very fast if new objects have been freed to the
1594 * regular freelist. In that case we simply take over the regular freelist
1595 * as the lockless freelist and zap the regular freelist.
Christoph Lameter81819f02007-05-06 14:49:36 -07001596 *
Christoph Lameter894b8782007-05-10 03:15:16 -07001597 * If that is not working then we fall back to the partial lists. We take the
1598 * first element of the freelist as the object to allocate now and move the
1599 * rest of the freelist to the lockless freelist.
1600 *
1601 * And if we were unable to get a new slab from the partial slab lists then
Christoph Lameter6446faa2008-02-15 23:45:26 -08001602 * we need to allocate a new slab. This is the slowest path since it involves
1603 * a call to the page allocator and the setup of a new slab.
Christoph Lameter81819f02007-05-06 14:49:36 -07001604 */
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03001605static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
1606 unsigned long addr, struct kmem_cache_cpu *c)
Christoph Lameter81819f02007-05-06 14:49:36 -07001607{
Christoph Lameter81819f02007-05-06 14:49:36 -07001608 void **object;
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001609 struct page *new;
Christoph Lameter81819f02007-05-06 14:49:36 -07001610
Linus Torvaldse72e9c22008-03-27 20:56:33 -07001611 /* We handle __GFP_ZERO in the caller */
1612 gfpflags &= ~__GFP_ZERO;
1613
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001614 if (!c->page)
Christoph Lameter81819f02007-05-06 14:49:36 -07001615 goto new_slab;
1616
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001617 slab_lock(c->page);
1618 if (unlikely(!node_match(c, node)))
Christoph Lameter81819f02007-05-06 14:49:36 -07001619 goto another_slab;
Christoph Lameter6446faa2008-02-15 23:45:26 -08001620
Christoph Lameter84e554e2009-12-18 16:26:23 -06001621 stat(s, ALLOC_REFILL);
Christoph Lameter6446faa2008-02-15 23:45:26 -08001622
Christoph Lameter894b8782007-05-10 03:15:16 -07001623load_freelist:
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001624 object = c->page->freelist;
Christoph Lametera973e9d2008-03-01 13:40:44 -08001625 if (unlikely(!object))
Christoph Lameter81819f02007-05-06 14:49:36 -07001626 goto another_slab;
Andy Whitcroft8a380822008-07-23 21:27:18 -07001627 if (unlikely(SLABDEBUG && PageSlubDebug(c->page)))
Christoph Lameter81819f02007-05-06 14:49:36 -07001628 goto debug;
1629
Christoph Lameterff120592009-12-18 16:26:22 -06001630 c->freelist = get_freepointer(s, object);
Christoph Lameter39b26462008-04-14 19:11:30 +03001631 c->page->inuse = c->page->objects;
Christoph Lametera973e9d2008-03-01 13:40:44 -08001632 c->page->freelist = NULL;
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001633 c->node = page_to_nid(c->page);
Christoph Lameter1f842602008-01-07 23:20:30 -08001634unlock_out:
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001635 slab_unlock(c->page);
Christoph Lameter84e554e2009-12-18 16:26:23 -06001636 stat(s, ALLOC_SLOWPATH);
Christoph Lameter81819f02007-05-06 14:49:36 -07001637 return object;
1638
1639another_slab:
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001640 deactivate_slab(s, c);
Christoph Lameter81819f02007-05-06 14:49:36 -07001641
1642new_slab:
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001643 new = get_partial(s, gfpflags, node);
1644 if (new) {
1645 c->page = new;
Christoph Lameter84e554e2009-12-18 16:26:23 -06001646 stat(s, ALLOC_FROM_PARTIAL);
Christoph Lameter894b8782007-05-10 03:15:16 -07001647 goto load_freelist;
Christoph Lameter81819f02007-05-06 14:49:36 -07001648 }
1649
Christoph Lameterb811c202007-10-16 23:25:51 -07001650 if (gfpflags & __GFP_WAIT)
1651 local_irq_enable();
1652
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001653 new = new_slab(s, gfpflags, node);
Christoph Lameterb811c202007-10-16 23:25:51 -07001654
1655 if (gfpflags & __GFP_WAIT)
1656 local_irq_disable();
1657
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001658 if (new) {
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06001659 c = __this_cpu_ptr(s->cpu_slab);
Christoph Lameter84e554e2009-12-18 16:26:23 -06001660 stat(s, ALLOC_SLAB);
Christoph Lameter05aa3452007-11-05 11:31:58 -08001661 if (c->page)
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001662 flush_slab(s, c);
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001663 slab_lock(new);
Andy Whitcroft8a380822008-07-23 21:27:18 -07001664 __SetPageSlubFrozen(new);
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001665 c->page = new;
Christoph Lameter4b6f0752007-05-16 22:10:53 -07001666 goto load_freelist;
Christoph Lameter81819f02007-05-06 14:49:36 -07001667 }
Pekka Enberg95f85982009-06-11 16:18:09 +03001668 if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
1669 slab_out_of_memory(s, gfpflags, node);
Christoph Lameter71c7a062008-02-14 14:28:01 -08001670 return NULL;
Christoph Lameter81819f02007-05-06 14:49:36 -07001671debug:
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001672 if (!alloc_debug_processing(s, c->page, object, addr))
Christoph Lameter81819f02007-05-06 14:49:36 -07001673 goto another_slab;
Christoph Lameter894b8782007-05-10 03:15:16 -07001674
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001675 c->page->inuse++;
Christoph Lameterff120592009-12-18 16:26:22 -06001676 c->page->freelist = get_freepointer(s, object);
Christoph Lameteree3c72a2007-10-16 01:26:07 -07001677 c->node = -1;
Christoph Lameter1f842602008-01-07 23:20:30 -08001678 goto unlock_out;
Christoph Lameter894b8782007-05-10 03:15:16 -07001679}
1680
1681/*
1682 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
1683 * have the fastpath folded into their functions. So no function call
1684 * overhead for requests that can be satisfied on the fastpath.
1685 *
1686 * The fastpath works by first checking if the lockless freelist can be used.
1687 * If not then __slab_alloc is called for slow processing.
1688 *
1689 * Otherwise we can simply pick the next object from the lockless free list.
1690 */
Pekka Enberg06428782008-01-07 23:20:27 -08001691static __always_inline void *slab_alloc(struct kmem_cache *s,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03001692 gfp_t gfpflags, int node, unsigned long addr)
Christoph Lameter894b8782007-05-10 03:15:16 -07001693{
Christoph Lameter894b8782007-05-10 03:15:16 -07001694 void **object;
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001695 struct kmem_cache_cpu *c;
Christoph Lameter1f842602008-01-07 23:20:30 -08001696 unsigned long flags;
1697
Benjamin Herrenschmidtdcce2842009-06-18 13:24:12 +10001698 gfpflags &= gfp_allowed_mask;
Pekka Enberg7e85ee02009-06-12 14:03:06 +03001699
Nick Piggincf40bd12009-01-21 08:12:39 +01001700 lockdep_trace_alloc(gfpflags);
OGAWA Hirofumi89124d72008-11-19 21:23:59 +09001701 might_sleep_if(gfpflags & __GFP_WAIT);
Pekka Enberg3c506ef2008-12-29 11:47:05 +02001702
Akinobu Mita773ff602008-12-23 19:37:01 +09001703 if (should_failslab(s->objsize, gfpflags))
1704 return NULL;
1705
Christoph Lameter894b8782007-05-10 03:15:16 -07001706 local_irq_save(flags);
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06001707 c = __this_cpu_ptr(s->cpu_slab);
1708 object = c->freelist;
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06001709 if (unlikely(!object || !node_match(c, node)))
Christoph Lameter894b8782007-05-10 03:15:16 -07001710
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001711 object = __slab_alloc(s, gfpflags, node, addr, c);
Christoph Lameter894b8782007-05-10 03:15:16 -07001712
1713 else {
Christoph Lameterff120592009-12-18 16:26:22 -06001714 c->freelist = get_freepointer(s, object);
Christoph Lameter84e554e2009-12-18 16:26:23 -06001715 stat(s, ALLOC_FASTPATH);
Christoph Lameter894b8782007-05-10 03:15:16 -07001716 }
1717 local_irq_restore(flags);
Christoph Lameterd07dbea2007-07-17 04:03:23 -07001718
Pekka Enberg74e21342009-11-25 20:14:48 +02001719 if (unlikely(gfpflags & __GFP_ZERO) && object)
Christoph Lameterff120592009-12-18 16:26:22 -06001720 memset(object, 0, s->objsize);
Christoph Lameterd07dbea2007-07-17 04:03:23 -07001721
Christoph Lameterff120592009-12-18 16:26:22 -06001722 kmemcheck_slab_alloc(s, gfpflags, object, s->objsize);
1723 kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, gfpflags);
Vegard Nossum5a896d92008-04-04 00:54:48 +02001724
Christoph Lameter894b8782007-05-10 03:15:16 -07001725 return object;
Christoph Lameter81819f02007-05-06 14:49:36 -07001726}
1727
1728void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1729{
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03001730 void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_);
1731
Eduard - Gabriel Munteanuca2b84c2009-03-23 15:12:24 +02001732 trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03001733
1734 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07001735}
1736EXPORT_SYMBOL(kmem_cache_alloc);
1737
Li Zefan0f24f122009-12-11 15:45:30 +08001738#ifdef CONFIG_TRACING
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03001739void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
1740{
1741 return slab_alloc(s, gfpflags, -1, _RET_IP_);
1742}
1743EXPORT_SYMBOL(kmem_cache_alloc_notrace);
1744#endif
1745
Christoph Lameter81819f02007-05-06 14:49:36 -07001746#ifdef CONFIG_NUMA
1747void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1748{
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03001749 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
1750
Eduard - Gabriel Munteanuca2b84c2009-03-23 15:12:24 +02001751 trace_kmem_cache_alloc_node(_RET_IP_, ret,
1752 s->objsize, s->size, gfpflags, node);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03001753
1754 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07001755}
1756EXPORT_SYMBOL(kmem_cache_alloc_node);
1757#endif
1758
Li Zefan0f24f122009-12-11 15:45:30 +08001759#ifdef CONFIG_TRACING
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03001760void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
1761 gfp_t gfpflags,
1762 int node)
1763{
1764 return slab_alloc(s, gfpflags, node, _RET_IP_);
1765}
1766EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
1767#endif
1768
Christoph Lameter81819f02007-05-06 14:49:36 -07001769/*
Christoph Lameter894b8782007-05-10 03:15:16 -07001770 * Slow patch handling. This may still be called frequently since objects
1771 * have a longer lifetime than the cpu slabs in most processing loads.
Christoph Lameter81819f02007-05-06 14:49:36 -07001772 *
Christoph Lameter894b8782007-05-10 03:15:16 -07001773 * So we still attempt to reduce cache line usage. Just take the slab
1774 * lock and free the item. If there is no additional partial page
1775 * handling required then we can return immediately.
Christoph Lameter81819f02007-05-06 14:49:36 -07001776 */
Christoph Lameter894b8782007-05-10 03:15:16 -07001777static void __slab_free(struct kmem_cache *s, struct page *page,
Christoph Lameterff120592009-12-18 16:26:22 -06001778 void *x, unsigned long addr)
Christoph Lameter81819f02007-05-06 14:49:36 -07001779{
1780 void *prior;
1781 void **object = (void *)x;
Christoph Lameter81819f02007-05-06 14:49:36 -07001782
Christoph Lameter84e554e2009-12-18 16:26:23 -06001783 stat(s, FREE_SLOWPATH);
Christoph Lameter81819f02007-05-06 14:49:36 -07001784 slab_lock(page);
1785
Andy Whitcroft8a380822008-07-23 21:27:18 -07001786 if (unlikely(SLABDEBUG && PageSlubDebug(page)))
Christoph Lameter81819f02007-05-06 14:49:36 -07001787 goto debug;
Christoph Lameter6446faa2008-02-15 23:45:26 -08001788
Christoph Lameter81819f02007-05-06 14:49:36 -07001789checks_ok:
Christoph Lameterff120592009-12-18 16:26:22 -06001790 prior = page->freelist;
1791 set_freepointer(s, object, prior);
Christoph Lameter81819f02007-05-06 14:49:36 -07001792 page->freelist = object;
1793 page->inuse--;
1794
Andy Whitcroft8a380822008-07-23 21:27:18 -07001795 if (unlikely(PageSlubFrozen(page))) {
Christoph Lameter84e554e2009-12-18 16:26:23 -06001796 stat(s, FREE_FROZEN);
Christoph Lameter81819f02007-05-06 14:49:36 -07001797 goto out_unlock;
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08001798 }
Christoph Lameter81819f02007-05-06 14:49:36 -07001799
1800 if (unlikely(!page->inuse))
1801 goto slab_empty;
1802
1803 /*
Christoph Lameter6446faa2008-02-15 23:45:26 -08001804 * Objects left in the slab. If it was not on the partial list before
Christoph Lameter81819f02007-05-06 14:49:36 -07001805 * then add it.
1806 */
Christoph Lametera973e9d2008-03-01 13:40:44 -08001807 if (unlikely(!prior)) {
Christoph Lameter7c2e1322008-01-07 23:20:27 -08001808 add_partial(get_node(s, page_to_nid(page)), page, 1);
Christoph Lameter84e554e2009-12-18 16:26:23 -06001809 stat(s, FREE_ADD_PARTIAL);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08001810 }
Christoph Lameter81819f02007-05-06 14:49:36 -07001811
1812out_unlock:
1813 slab_unlock(page);
Christoph Lameter81819f02007-05-06 14:49:36 -07001814 return;
1815
1816slab_empty:
Christoph Lametera973e9d2008-03-01 13:40:44 -08001817 if (prior) {
Christoph Lameter81819f02007-05-06 14:49:36 -07001818 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07001819 * Slab still on the partial list.
Christoph Lameter81819f02007-05-06 14:49:36 -07001820 */
1821 remove_partial(s, page);
Christoph Lameter84e554e2009-12-18 16:26:23 -06001822 stat(s, FREE_REMOVE_PARTIAL);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08001823 }
Christoph Lameter81819f02007-05-06 14:49:36 -07001824 slab_unlock(page);
Christoph Lameter84e554e2009-12-18 16:26:23 -06001825 stat(s, FREE_SLAB);
Christoph Lameter81819f02007-05-06 14:49:36 -07001826 discard_slab(s, page);
Christoph Lameter81819f02007-05-06 14:49:36 -07001827 return;
1828
1829debug:
Christoph Lameter3ec09742007-05-16 22:11:00 -07001830 if (!free_debug_processing(s, page, x, addr))
Christoph Lameter77c5e2d2007-05-06 14:49:42 -07001831 goto out_unlock;
Christoph Lameter77c5e2d2007-05-06 14:49:42 -07001832 goto checks_ok;
Christoph Lameter81819f02007-05-06 14:49:36 -07001833}
1834
Christoph Lameter894b8782007-05-10 03:15:16 -07001835/*
1836 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
1837 * can perform fastpath freeing without additional function calls.
1838 *
1839 * The fastpath is only possible if we are freeing to the current cpu slab
1840 * of this processor. This typically the case if we have just allocated
1841 * the item before.
1842 *
1843 * If fastpath is not possible then fall back to __slab_free where we deal
1844 * with all sorts of special processing.
1845 */
Pekka Enberg06428782008-01-07 23:20:27 -08001846static __always_inline void slab_free(struct kmem_cache *s,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03001847 struct page *page, void *x, unsigned long addr)
Christoph Lameter894b8782007-05-10 03:15:16 -07001848{
1849 void **object = (void *)x;
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001850 struct kmem_cache_cpu *c;
Christoph Lameter1f842602008-01-07 23:20:30 -08001851 unsigned long flags;
1852
Catalin Marinas06f22f12009-06-11 13:23:18 +01001853 kmemleak_free_recursive(x, s->flags);
Christoph Lameter894b8782007-05-10 03:15:16 -07001854 local_irq_save(flags);
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06001855 c = __this_cpu_ptr(s->cpu_slab);
Christoph Lameterff120592009-12-18 16:26:22 -06001856 kmemcheck_slab_free(s, object, s->objsize);
1857 debug_check_no_locks_freed(object, s->objsize);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07001858 if (!(s->flags & SLAB_DEBUG_OBJECTS))
Christoph Lameterff120592009-12-18 16:26:22 -06001859 debug_check_no_obj_freed(object, s->objsize);
Christoph Lameteree3c72a2007-10-16 01:26:07 -07001860 if (likely(page == c->page && c->node >= 0)) {
Christoph Lameterff120592009-12-18 16:26:22 -06001861 set_freepointer(s, object, c->freelist);
Christoph Lameterdfb4f092007-10-16 01:26:05 -07001862 c->freelist = object;
Christoph Lameter84e554e2009-12-18 16:26:23 -06001863 stat(s, FREE_FASTPATH);
Christoph Lameter894b8782007-05-10 03:15:16 -07001864 } else
Christoph Lameterff120592009-12-18 16:26:22 -06001865 __slab_free(s, page, x, addr);
Christoph Lameter894b8782007-05-10 03:15:16 -07001866
1867 local_irq_restore(flags);
1868}
1869
Christoph Lameter81819f02007-05-06 14:49:36 -07001870void kmem_cache_free(struct kmem_cache *s, void *x)
1871{
Christoph Lameter77c5e2d2007-05-06 14:49:42 -07001872 struct page *page;
Christoph Lameter81819f02007-05-06 14:49:36 -07001873
Christoph Lameterb49af682007-05-06 14:49:41 -07001874 page = virt_to_head_page(x);
Christoph Lameter81819f02007-05-06 14:49:36 -07001875
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03001876 slab_free(s, page, x, _RET_IP_);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03001877
Eduard - Gabriel Munteanuca2b84c2009-03-23 15:12:24 +02001878 trace_kmem_cache_free(_RET_IP_, x);
Christoph Lameter81819f02007-05-06 14:49:36 -07001879}
1880EXPORT_SYMBOL(kmem_cache_free);
1881
Cyrill Gorcunove9beef12008-10-28 22:02:26 +03001882/* Figure out on which slab page the object resides */
Christoph Lameter81819f02007-05-06 14:49:36 -07001883static struct page *get_object_page(const void *x)
1884{
Christoph Lameterb49af682007-05-06 14:49:41 -07001885 struct page *page = virt_to_head_page(x);
Christoph Lameter81819f02007-05-06 14:49:36 -07001886
1887 if (!PageSlab(page))
1888 return NULL;
1889
1890 return page;
1891}
1892
1893/*
Christoph Lameter672bba32007-05-09 02:32:39 -07001894 * Object placement in a slab is made very easy because we always start at
1895 * offset 0. If we tune the size of the object to the alignment then we can
1896 * get the required alignment by putting one properly sized object after
1897 * another.
Christoph Lameter81819f02007-05-06 14:49:36 -07001898 *
1899 * Notice that the allocation order determines the sizes of the per cpu
1900 * caches. Each processor has always one slab available for allocations.
1901 * Increasing the allocation order reduces the number of times that slabs
Christoph Lameter672bba32007-05-09 02:32:39 -07001902 * must be moved on and off the partial lists and is therefore a factor in
Christoph Lameter81819f02007-05-06 14:49:36 -07001903 * locking overhead.
Christoph Lameter81819f02007-05-06 14:49:36 -07001904 */
1905
1906/*
1907 * Mininum / Maximum order of slab pages. This influences locking overhead
1908 * and slab fragmentation. A higher order reduces the number of partial slabs
1909 * and increases the number of allocations possible without having to
1910 * take the list_lock.
1911 */
1912static int slub_min_order;
Christoph Lameter114e9e82008-04-14 19:11:41 +03001913static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
Christoph Lameter9b2cd502008-04-14 19:11:41 +03001914static int slub_min_objects;
Christoph Lameter81819f02007-05-06 14:49:36 -07001915
1916/*
1917 * Merge control. If this is set then no merging of slab caches will occur.
Christoph Lameter672bba32007-05-09 02:32:39 -07001918 * (Could be removed. This was introduced to pacify the merge skeptics.)
Christoph Lameter81819f02007-05-06 14:49:36 -07001919 */
1920static int slub_nomerge;
1921
1922/*
Christoph Lameter81819f02007-05-06 14:49:36 -07001923 * Calculate the order of allocation given an slab object size.
1924 *
Christoph Lameter672bba32007-05-09 02:32:39 -07001925 * The order of allocation has significant impact on performance and other
1926 * system components. Generally order 0 allocations should be preferred since
1927 * order 0 does not cause fragmentation in the page allocator. Larger objects
1928 * be problematic to put into order 0 slabs because there may be too much
Christoph Lameterc124f5b2008-04-14 19:13:29 +03001929 * unused space left. We go to a higher order if more than 1/16th of the slab
Christoph Lameter672bba32007-05-09 02:32:39 -07001930 * would be wasted.
Christoph Lameter81819f02007-05-06 14:49:36 -07001931 *
Christoph Lameter672bba32007-05-09 02:32:39 -07001932 * In order to reach satisfactory performance we must ensure that a minimum
1933 * number of objects is in one slab. Otherwise we may generate too much
1934 * activity on the partial lists which requires taking the list_lock. This is
1935 * less a concern for large slabs though which are rarely used.
Christoph Lameter81819f02007-05-06 14:49:36 -07001936 *
Christoph Lameter672bba32007-05-09 02:32:39 -07001937 * slub_max_order specifies the order where we begin to stop considering the
1938 * number of objects in a slab as critical. If we reach slub_max_order then
1939 * we try to keep the page order as low as possible. So we accept more waste
1940 * of space in favor of a small page order.
1941 *
1942 * Higher order allocations also allow the placement of more objects in a
1943 * slab and thereby reduce object handling overhead. If the user has
1944 * requested a higher mininum order then we start with that one instead of
1945 * the smallest order which will fit the object.
Christoph Lameter81819f02007-05-06 14:49:36 -07001946 */
Christoph Lameter5e6d4442007-05-09 02:32:46 -07001947static inline int slab_order(int size, int min_objects,
1948 int max_order, int fract_leftover)
Christoph Lameter81819f02007-05-06 14:49:36 -07001949{
1950 int order;
1951 int rem;
Christoph Lameter6300ea72007-07-17 04:03:20 -07001952 int min_order = slub_min_order;
Christoph Lameter81819f02007-05-06 14:49:36 -07001953
Cyrill Gorcunov210b5c02008-10-22 23:00:38 +04001954 if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE)
1955 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
Christoph Lameter39b26462008-04-14 19:11:30 +03001956
Christoph Lameter6300ea72007-07-17 04:03:20 -07001957 for (order = max(min_order,
Christoph Lameter5e6d4442007-05-09 02:32:46 -07001958 fls(min_objects * size - 1) - PAGE_SHIFT);
1959 order <= max_order; order++) {
1960
Christoph Lameter81819f02007-05-06 14:49:36 -07001961 unsigned long slab_size = PAGE_SIZE << order;
1962
Christoph Lameter5e6d4442007-05-09 02:32:46 -07001963 if (slab_size < min_objects * size)
Christoph Lameter81819f02007-05-06 14:49:36 -07001964 continue;
1965
Christoph Lameter81819f02007-05-06 14:49:36 -07001966 rem = slab_size % size;
1967
Christoph Lameter5e6d4442007-05-09 02:32:46 -07001968 if (rem <= slab_size / fract_leftover)
Christoph Lameter81819f02007-05-06 14:49:36 -07001969 break;
1970
1971 }
Christoph Lameter672bba32007-05-09 02:32:39 -07001972
Christoph Lameter81819f02007-05-06 14:49:36 -07001973 return order;
1974}
1975
Christoph Lameter5e6d4442007-05-09 02:32:46 -07001976static inline int calculate_order(int size)
1977{
1978 int order;
1979 int min_objects;
1980 int fraction;
Zhang Yanmine8120ff2009-02-12 18:00:17 +02001981 int max_objects;
Christoph Lameter5e6d4442007-05-09 02:32:46 -07001982
1983 /*
1984 * Attempt to find best configuration for a slab. This
1985 * works by first attempting to generate a layout with
1986 * the best configuration and backing off gradually.
1987 *
1988 * First we reduce the acceptable waste in a slab. Then
1989 * we reduce the minimum objects required in a slab.
1990 */
1991 min_objects = slub_min_objects;
Christoph Lameter9b2cd502008-04-14 19:11:41 +03001992 if (!min_objects)
1993 min_objects = 4 * (fls(nr_cpu_ids) + 1);
Zhang Yanmine8120ff2009-02-12 18:00:17 +02001994 max_objects = (PAGE_SIZE << slub_max_order)/size;
1995 min_objects = min(min_objects, max_objects);
1996
Christoph Lameter5e6d4442007-05-09 02:32:46 -07001997 while (min_objects > 1) {
Christoph Lameterc124f5b2008-04-14 19:13:29 +03001998 fraction = 16;
Christoph Lameter5e6d4442007-05-09 02:32:46 -07001999 while (fraction >= 4) {
2000 order = slab_order(size, min_objects,
2001 slub_max_order, fraction);
2002 if (order <= slub_max_order)
2003 return order;
2004 fraction /= 2;
2005 }
Amerigo Wang5086c382009-08-19 21:44:13 +03002006 min_objects--;
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002007 }
2008
2009 /*
2010 * We were unable to place multiple objects in a slab. Now
2011 * lets see if we can place a single object there.
2012 */
2013 order = slab_order(size, 1, slub_max_order, 1);
2014 if (order <= slub_max_order)
2015 return order;
2016
2017 /*
2018 * Doh this slab cannot be placed using slub_max_order.
2019 */
2020 order = slab_order(size, 1, MAX_ORDER, 1);
David Rientjes818cf592009-04-23 09:58:22 +03002021 if (order < MAX_ORDER)
Christoph Lameter5e6d4442007-05-09 02:32:46 -07002022 return order;
2023 return -ENOSYS;
2024}
2025
Christoph Lameter81819f02007-05-06 14:49:36 -07002026/*
Christoph Lameter672bba32007-05-09 02:32:39 -07002027 * Figure out what the alignment of the objects will be.
Christoph Lameter81819f02007-05-06 14:49:36 -07002028 */
2029static unsigned long calculate_alignment(unsigned long flags,
2030 unsigned long align, unsigned long size)
2031{
2032 /*
Christoph Lameter6446faa2008-02-15 23:45:26 -08002033 * If the user wants hardware cache aligned objects then follow that
2034 * suggestion if the object is sufficiently large.
Christoph Lameter81819f02007-05-06 14:49:36 -07002035 *
Christoph Lameter6446faa2008-02-15 23:45:26 -08002036 * The hardware cache alignment cannot override the specified
2037 * alignment though. If that is greater then use it.
Christoph Lameter81819f02007-05-06 14:49:36 -07002038 */
Nick Pigginb6210382008-03-05 14:05:56 -08002039 if (flags & SLAB_HWCACHE_ALIGN) {
2040 unsigned long ralign = cache_line_size();
2041 while (size <= ralign / 2)
2042 ralign /= 2;
2043 align = max(align, ralign);
2044 }
Christoph Lameter81819f02007-05-06 14:49:36 -07002045
2046 if (align < ARCH_SLAB_MINALIGN)
Nick Pigginb6210382008-03-05 14:05:56 -08002047 align = ARCH_SLAB_MINALIGN;
Christoph Lameter81819f02007-05-06 14:49:36 -07002048
2049 return ALIGN(align, sizeof(void *));
2050}
2051
Pekka Enberg5595cff2008-08-05 09:28:47 +03002052static void
2053init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
Christoph Lameter81819f02007-05-06 14:49:36 -07002054{
2055 n->nr_partial = 0;
Christoph Lameter81819f02007-05-06 14:49:36 -07002056 spin_lock_init(&n->list_lock);
2057 INIT_LIST_HEAD(&n->partial);
Christoph Lameter8ab13722007-07-17 04:03:32 -07002058#ifdef CONFIG_SLUB_DEBUG
Christoph Lameter0f389ec2008-04-14 18:53:02 +03002059 atomic_long_set(&n->nr_slabs, 0);
Salman Qazi02b71b72008-09-11 12:25:41 -07002060 atomic_long_set(&n->total_objects, 0);
Christoph Lameter643b1132007-05-06 14:49:42 -07002061 INIT_LIST_HEAD(&n->full);
Christoph Lameter8ab13722007-07-17 04:03:32 -07002062#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07002063}
2064
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06002065static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[SLUB_PAGE_SHIFT]);
Christoph Lameter4c93c3552007-10-16 01:26:08 -07002066
2067static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
2068{
Christoph Lameter756dee72009-12-18 16:26:21 -06002069 if (s < kmalloc_caches + KMALLOC_CACHES && s >= kmalloc_caches)
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06002070 /*
2071 * Boot time creation of the kmalloc array. Use static per cpu data
2072 * since the per cpu allocator is not available yet.
2073 */
2074 s->cpu_slab = per_cpu_var(kmalloc_percpu) + (s - kmalloc_caches);
2075 else
2076 s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);
2077
2078 if (!s->cpu_slab)
2079 return 0;
2080
Christoph Lameter4c93c3552007-10-16 01:26:08 -07002081 return 1;
2082}
Christoph Lameter4c93c3552007-10-16 01:26:08 -07002083
Christoph Lameter81819f02007-05-06 14:49:36 -07002084#ifdef CONFIG_NUMA
2085/*
2086 * No kmalloc_node yet so do it by hand. We know that this is the first
2087 * slab on the node for this slabcache. There are no concurrent accesses
2088 * possible.
2089 *
2090 * Note that this function only works on the kmalloc_node_cache
Christoph Lameter4c93c3552007-10-16 01:26:08 -07002091 * when allocating for the kmalloc_node_cache. This is used for bootstrapping
2092 * memory on a fresh node that has no slab structures yet.
Christoph Lameter81819f02007-05-06 14:49:36 -07002093 */
David Rientjes0094de92008-11-25 19:14:19 -08002094static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node)
Christoph Lameter81819f02007-05-06 14:49:36 -07002095{
2096 struct page *page;
2097 struct kmem_cache_node *n;
rootba84c732008-01-07 23:20:28 -08002098 unsigned long flags;
Christoph Lameter81819f02007-05-06 14:49:36 -07002099
2100 BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node));
2101
Christoph Lametera2f92ee2007-08-22 14:01:57 -07002102 page = new_slab(kmalloc_caches, gfpflags, node);
Christoph Lameter81819f02007-05-06 14:49:36 -07002103
2104 BUG_ON(!page);
Christoph Lametera2f92ee2007-08-22 14:01:57 -07002105 if (page_to_nid(page) != node) {
2106 printk(KERN_ERR "SLUB: Unable to allocate memory from "
2107 "node %d\n", node);
2108 printk(KERN_ERR "SLUB: Allocating a useless per node structure "
2109 "in order to be able to continue\n");
2110 }
2111
Christoph Lameter81819f02007-05-06 14:49:36 -07002112 n = page->freelist;
2113 BUG_ON(!n);
2114 page->freelist = get_freepointer(kmalloc_caches, n);
2115 page->inuse++;
2116 kmalloc_caches->node[node] = n;
Christoph Lameter8ab13722007-07-17 04:03:32 -07002117#ifdef CONFIG_SLUB_DEBUG
Christoph Lameterd45f39c2007-07-17 04:03:21 -07002118 init_object(kmalloc_caches, n, 1);
2119 init_tracking(kmalloc_caches, n);
Christoph Lameter8ab13722007-07-17 04:03:32 -07002120#endif
Pekka Enberg5595cff2008-08-05 09:28:47 +03002121 init_kmem_cache_node(n, kmalloc_caches);
Christoph Lameter205ab992008-04-14 19:11:40 +03002122 inc_slabs_node(kmalloc_caches, node, page->objects);
Christoph Lameter6446faa2008-02-15 23:45:26 -08002123
rootba84c732008-01-07 23:20:28 -08002124 /*
2125 * lockdep requires consistent irq usage for each lock
2126 * so even though there cannot be a race this early in
2127 * the boot sequence, we still disable irqs.
2128 */
2129 local_irq_save(flags);
Christoph Lameter7c2e1322008-01-07 23:20:27 -08002130 add_partial(n, page, 0);
rootba84c732008-01-07 23:20:28 -08002131 local_irq_restore(flags);
Christoph Lameter81819f02007-05-06 14:49:36 -07002132}
2133
2134static void free_kmem_cache_nodes(struct kmem_cache *s)
2135{
2136 int node;
2137
Christoph Lameterf64dc582007-10-16 01:25:33 -07002138 for_each_node_state(node, N_NORMAL_MEMORY) {
Christoph Lameter81819f02007-05-06 14:49:36 -07002139 struct kmem_cache_node *n = s->node[node];
2140 if (n && n != &s->local_node)
2141 kmem_cache_free(kmalloc_caches, n);
2142 s->node[node] = NULL;
2143 }
2144}
2145
2146static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2147{
2148 int node;
2149 int local_node;
2150
2151 if (slab_state >= UP)
2152 local_node = page_to_nid(virt_to_page(s));
2153 else
2154 local_node = 0;
2155
Christoph Lameterf64dc582007-10-16 01:25:33 -07002156 for_each_node_state(node, N_NORMAL_MEMORY) {
Christoph Lameter81819f02007-05-06 14:49:36 -07002157 struct kmem_cache_node *n;
2158
2159 if (local_node == node)
2160 n = &s->local_node;
2161 else {
2162 if (slab_state == DOWN) {
David Rientjes0094de92008-11-25 19:14:19 -08002163 early_kmem_cache_node_alloc(gfpflags, node);
Christoph Lameter81819f02007-05-06 14:49:36 -07002164 continue;
2165 }
2166 n = kmem_cache_alloc_node(kmalloc_caches,
2167 gfpflags, node);
2168
2169 if (!n) {
2170 free_kmem_cache_nodes(s);
2171 return 0;
2172 }
2173
2174 }
2175 s->node[node] = n;
Pekka Enberg5595cff2008-08-05 09:28:47 +03002176 init_kmem_cache_node(n, s);
Christoph Lameter81819f02007-05-06 14:49:36 -07002177 }
2178 return 1;
2179}
2180#else
2181static void free_kmem_cache_nodes(struct kmem_cache *s)
2182{
2183}
2184
2185static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2186{
Pekka Enberg5595cff2008-08-05 09:28:47 +03002187 init_kmem_cache_node(&s->local_node, s);
Christoph Lameter81819f02007-05-06 14:49:36 -07002188 return 1;
2189}
2190#endif
2191
David Rientjesc0bdb232009-02-25 09:16:35 +02002192static void set_min_partial(struct kmem_cache *s, unsigned long min)
David Rientjes3b89d7d2009-02-22 17:40:07 -08002193{
2194 if (min < MIN_PARTIAL)
2195 min = MIN_PARTIAL;
2196 else if (min > MAX_PARTIAL)
2197 min = MAX_PARTIAL;
2198 s->min_partial = min;
2199}
2200
Christoph Lameter81819f02007-05-06 14:49:36 -07002201/*
2202 * calculate_sizes() determines the order and the distribution of data within
2203 * a slab object.
2204 */
Christoph Lameter06b285d2008-04-14 19:11:41 +03002205static int calculate_sizes(struct kmem_cache *s, int forced_order)
Christoph Lameter81819f02007-05-06 14:49:36 -07002206{
2207 unsigned long flags = s->flags;
2208 unsigned long size = s->objsize;
2209 unsigned long align = s->align;
Christoph Lameter834f3d12008-04-14 19:11:31 +03002210 int order;
Christoph Lameter81819f02007-05-06 14:49:36 -07002211
2212 /*
Christoph Lameterd8b42bf2008-02-15 23:45:25 -08002213 * Round up object size to the next word boundary. We can only
2214 * place the free pointer at word boundaries and this determines
2215 * the possible location of the free pointer.
2216 */
2217 size = ALIGN(size, sizeof(void *));
2218
2219#ifdef CONFIG_SLUB_DEBUG
2220 /*
Christoph Lameter81819f02007-05-06 14:49:36 -07002221 * Determine if we can poison the object itself. If the user of
2222 * the slab may touch the object after free or before allocation
2223 * then we should never poison the object itself.
2224 */
2225 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
Christoph Lameterc59def9f2007-05-16 22:10:50 -07002226 !s->ctor)
Christoph Lameter81819f02007-05-06 14:49:36 -07002227 s->flags |= __OBJECT_POISON;
2228 else
2229 s->flags &= ~__OBJECT_POISON;
2230
Christoph Lameter81819f02007-05-06 14:49:36 -07002231
2232 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07002233 * If we are Redzoning then check if there is some space between the
Christoph Lameter81819f02007-05-06 14:49:36 -07002234 * end of the object and the free pointer. If not then add an
Christoph Lameter672bba32007-05-09 02:32:39 -07002235 * additional word to have some bytes to store Redzone information.
Christoph Lameter81819f02007-05-06 14:49:36 -07002236 */
2237 if ((flags & SLAB_RED_ZONE) && size == s->objsize)
2238 size += sizeof(void *);
Christoph Lameter41ecc552007-05-09 02:32:44 -07002239#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07002240
2241 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07002242 * With that we have determined the number of bytes in actual use
2243 * by the object. This is the potential offset to the free pointer.
Christoph Lameter81819f02007-05-06 14:49:36 -07002244 */
2245 s->inuse = size;
2246
2247 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
Christoph Lameterc59def9f2007-05-16 22:10:50 -07002248 s->ctor)) {
Christoph Lameter81819f02007-05-06 14:49:36 -07002249 /*
2250 * Relocate free pointer after the object if it is not
2251 * permitted to overwrite the first word of the object on
2252 * kmem_cache_free.
2253 *
2254 * This is the case if we do RCU, have a constructor or
2255 * destructor or are poisoning the objects.
2256 */
2257 s->offset = size;
2258 size += sizeof(void *);
2259 }
2260
Christoph Lameterc12b3c62007-05-23 13:57:31 -07002261#ifdef CONFIG_SLUB_DEBUG
Christoph Lameter81819f02007-05-06 14:49:36 -07002262 if (flags & SLAB_STORE_USER)
2263 /*
2264 * Need to store information about allocs and frees after
2265 * the object.
2266 */
2267 size += 2 * sizeof(struct track);
2268
Christoph Lameterbe7b3fb2007-05-09 02:32:36 -07002269 if (flags & SLAB_RED_ZONE)
Christoph Lameter81819f02007-05-06 14:49:36 -07002270 /*
2271 * Add some empty padding so that we can catch
2272 * overwrites from earlier objects rather than let
2273 * tracking information or the free pointer be
Frederik Schwarzer0211a9c2008-12-29 22:14:56 +01002274 * corrupted if a user writes before the start
Christoph Lameter81819f02007-05-06 14:49:36 -07002275 * of the object.
2276 */
2277 size += sizeof(void *);
Christoph Lameter41ecc552007-05-09 02:32:44 -07002278#endif
Christoph Lameter672bba32007-05-09 02:32:39 -07002279
Christoph Lameter81819f02007-05-06 14:49:36 -07002280 /*
2281 * Determine the alignment based on various parameters that the
Christoph Lameter65c02d42007-05-09 02:32:35 -07002282 * user specified and the dynamic determination of cache line size
2283 * on bootup.
Christoph Lameter81819f02007-05-06 14:49:36 -07002284 */
2285 align = calculate_alignment(flags, align, s->objsize);
Zhang, Yanmindcb0ce12009-07-30 11:28:11 +08002286 s->align = align;
Christoph Lameter81819f02007-05-06 14:49:36 -07002287
2288 /*
2289 * SLUB stores one object immediately after another beginning from
2290 * offset 0. In order to align the objects we have to simply size
2291 * each object to conform to the alignment.
2292 */
2293 size = ALIGN(size, align);
2294 s->size = size;
Christoph Lameter06b285d2008-04-14 19:11:41 +03002295 if (forced_order >= 0)
2296 order = forced_order;
2297 else
2298 order = calculate_order(size);
Christoph Lameter81819f02007-05-06 14:49:36 -07002299
Christoph Lameter834f3d12008-04-14 19:11:31 +03002300 if (order < 0)
Christoph Lameter81819f02007-05-06 14:49:36 -07002301 return 0;
2302
Christoph Lameterb7a49f02008-02-14 14:21:32 -08002303 s->allocflags = 0;
Christoph Lameter834f3d12008-04-14 19:11:31 +03002304 if (order)
Christoph Lameterb7a49f02008-02-14 14:21:32 -08002305 s->allocflags |= __GFP_COMP;
2306
2307 if (s->flags & SLAB_CACHE_DMA)
2308 s->allocflags |= SLUB_DMA;
2309
2310 if (s->flags & SLAB_RECLAIM_ACCOUNT)
2311 s->allocflags |= __GFP_RECLAIMABLE;
2312
Christoph Lameter81819f02007-05-06 14:49:36 -07002313 /*
2314 * Determine the number of objects per slab
2315 */
Christoph Lameter834f3d12008-04-14 19:11:31 +03002316 s->oo = oo_make(order, size);
Christoph Lameter65c33762008-04-14 19:11:40 +03002317 s->min = oo_make(get_order(size), size);
Christoph Lameter205ab992008-04-14 19:11:40 +03002318 if (oo_objects(s->oo) > oo_objects(s->max))
2319 s->max = s->oo;
Christoph Lameter81819f02007-05-06 14:49:36 -07002320
Christoph Lameter834f3d12008-04-14 19:11:31 +03002321 return !!oo_objects(s->oo);
Christoph Lameter81819f02007-05-06 14:49:36 -07002322
2323}
2324
Christoph Lameter81819f02007-05-06 14:49:36 -07002325static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
2326 const char *name, size_t size,
2327 size_t align, unsigned long flags,
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07002328 void (*ctor)(void *))
Christoph Lameter81819f02007-05-06 14:49:36 -07002329{
2330 memset(s, 0, kmem_size);
2331 s->name = name;
2332 s->ctor = ctor;
Christoph Lameter81819f02007-05-06 14:49:36 -07002333 s->objsize = size;
Christoph Lameter81819f02007-05-06 14:49:36 -07002334 s->align = align;
Christoph Lameterba0268a2007-09-11 15:24:11 -07002335 s->flags = kmem_cache_flags(size, flags, name, ctor);
Christoph Lameter81819f02007-05-06 14:49:36 -07002336
Christoph Lameter06b285d2008-04-14 19:11:41 +03002337 if (!calculate_sizes(s, -1))
Christoph Lameter81819f02007-05-06 14:49:36 -07002338 goto error;
David Rientjes3de47212009-07-27 18:30:35 -07002339 if (disable_higher_order_debug) {
2340 /*
2341 * Disable debugging flags that store metadata if the min slab
2342 * order increased.
2343 */
2344 if (get_order(s->size) > get_order(s->objsize)) {
2345 s->flags &= ~DEBUG_METADATA_FLAGS;
2346 s->offset = 0;
2347 if (!calculate_sizes(s, -1))
2348 goto error;
2349 }
2350 }
Christoph Lameter81819f02007-05-06 14:49:36 -07002351
David Rientjes3b89d7d2009-02-22 17:40:07 -08002352 /*
2353 * The larger the object size is, the more pages we want on the partial
2354 * list to avoid pounding the page allocator excessively.
2355 */
David Rientjesc0bdb232009-02-25 09:16:35 +02002356 set_min_partial(s, ilog2(s->size));
Christoph Lameter81819f02007-05-06 14:49:36 -07002357 s->refcount = 1;
2358#ifdef CONFIG_NUMA
Christoph Lametere2cb96b2008-08-19 08:51:22 -05002359 s->remote_node_defrag_ratio = 1000;
Christoph Lameter81819f02007-05-06 14:49:36 -07002360#endif
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002361 if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA))
2362 goto error;
Christoph Lameter81819f02007-05-06 14:49:36 -07002363
Christoph Lameterdfb4f092007-10-16 01:26:05 -07002364 if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA))
Christoph Lameter81819f02007-05-06 14:49:36 -07002365 return 1;
Christoph Lameterff120592009-12-18 16:26:22 -06002366
Christoph Lameter4c93c3552007-10-16 01:26:08 -07002367 free_kmem_cache_nodes(s);
Christoph Lameter81819f02007-05-06 14:49:36 -07002368error:
2369 if (flags & SLAB_PANIC)
2370 panic("Cannot create slab %s size=%lu realsize=%u "
2371 "order=%u offset=%u flags=%lx\n",
Christoph Lameter834f3d12008-04-14 19:11:31 +03002372 s->name, (unsigned long)size, s->size, oo_order(s->oo),
Christoph Lameter81819f02007-05-06 14:49:36 -07002373 s->offset, flags);
2374 return 0;
2375}
Christoph Lameter81819f02007-05-06 14:49:36 -07002376
2377/*
2378 * Check if a given pointer is valid
2379 */
2380int kmem_ptr_validate(struct kmem_cache *s, const void *object)
2381{
Pekka Enberg06428782008-01-07 23:20:27 -08002382 struct page *page;
Christoph Lameter81819f02007-05-06 14:49:36 -07002383
2384 page = get_object_page(object);
2385
2386 if (!page || s != page->slab)
2387 /* No slab or wrong slab */
2388 return 0;
2389
Christoph Lameterabcd08a2007-05-09 02:32:37 -07002390 if (!check_valid_pointer(s, page, object))
Christoph Lameter81819f02007-05-06 14:49:36 -07002391 return 0;
2392
2393 /*
2394 * We could also check if the object is on the slabs freelist.
2395 * But this would be too expensive and it seems that the main
Christoph Lameter6446faa2008-02-15 23:45:26 -08002396 * purpose of kmem_ptr_valid() is to check if the object belongs
Christoph Lameter81819f02007-05-06 14:49:36 -07002397 * to a certain slab.
2398 */
2399 return 1;
2400}
2401EXPORT_SYMBOL(kmem_ptr_validate);
2402
2403/*
2404 * Determine the size of a slab object
2405 */
2406unsigned int kmem_cache_size(struct kmem_cache *s)
2407{
2408 return s->objsize;
2409}
2410EXPORT_SYMBOL(kmem_cache_size);
2411
2412const char *kmem_cache_name(struct kmem_cache *s)
2413{
2414 return s->name;
2415}
2416EXPORT_SYMBOL(kmem_cache_name);
2417
Christoph Lameter33b12c32008-04-25 12:22:43 -07002418static void list_slab_objects(struct kmem_cache *s, struct page *page,
2419 const char *text)
Christoph Lameter81819f02007-05-06 14:49:36 -07002420{
Christoph Lameter33b12c32008-04-25 12:22:43 -07002421#ifdef CONFIG_SLUB_DEBUG
2422 void *addr = page_address(page);
2423 void *p;
2424 DECLARE_BITMAP(map, page->objects);
2425
2426 bitmap_zero(map, page->objects);
2427 slab_err(s, page, "%s", text);
2428 slab_lock(page);
2429 for_each_free_object(p, s, page->freelist)
2430 set_bit(slab_index(p, s, addr), map);
2431
2432 for_each_object(p, s, addr, page->objects) {
2433
2434 if (!test_bit(slab_index(p, s, addr), map)) {
2435 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n",
2436 p, p - addr);
2437 print_tracking(s, p);
2438 }
2439 }
2440 slab_unlock(page);
2441#endif
2442}
2443
Christoph Lameter81819f02007-05-06 14:49:36 -07002444/*
Christoph Lameter599870b2008-04-23 12:36:52 -07002445 * Attempt to free all partial slabs on a node.
Christoph Lameter81819f02007-05-06 14:49:36 -07002446 */
Christoph Lameter599870b2008-04-23 12:36:52 -07002447static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
Christoph Lameter81819f02007-05-06 14:49:36 -07002448{
Christoph Lameter81819f02007-05-06 14:49:36 -07002449 unsigned long flags;
2450 struct page *page, *h;
2451
2452 spin_lock_irqsave(&n->list_lock, flags);
Christoph Lameter33b12c32008-04-25 12:22:43 -07002453 list_for_each_entry_safe(page, h, &n->partial, lru) {
Christoph Lameter81819f02007-05-06 14:49:36 -07002454 if (!page->inuse) {
2455 list_del(&page->lru);
2456 discard_slab(s, page);
Christoph Lameter599870b2008-04-23 12:36:52 -07002457 n->nr_partial--;
Christoph Lameter33b12c32008-04-25 12:22:43 -07002458 } else {
2459 list_slab_objects(s, page,
2460 "Objects remaining on kmem_cache_close()");
Christoph Lameter599870b2008-04-23 12:36:52 -07002461 }
Christoph Lameter33b12c32008-04-25 12:22:43 -07002462 }
Christoph Lameter81819f02007-05-06 14:49:36 -07002463 spin_unlock_irqrestore(&n->list_lock, flags);
Christoph Lameter81819f02007-05-06 14:49:36 -07002464}
2465
2466/*
Christoph Lameter672bba32007-05-09 02:32:39 -07002467 * Release all resources used by a slab cache.
Christoph Lameter81819f02007-05-06 14:49:36 -07002468 */
Christoph Lameter0c710012007-07-17 04:03:24 -07002469static inline int kmem_cache_close(struct kmem_cache *s)
Christoph Lameter81819f02007-05-06 14:49:36 -07002470{
2471 int node;
2472
2473 flush_all(s);
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06002474 free_percpu(s->cpu_slab);
Christoph Lameter81819f02007-05-06 14:49:36 -07002475 /* Attempt to free all objects */
Christoph Lameterf64dc582007-10-16 01:25:33 -07002476 for_each_node_state(node, N_NORMAL_MEMORY) {
Christoph Lameter81819f02007-05-06 14:49:36 -07002477 struct kmem_cache_node *n = get_node(s, node);
2478
Christoph Lameter599870b2008-04-23 12:36:52 -07002479 free_partial(s, n);
2480 if (n->nr_partial || slabs_node(s, node))
Christoph Lameter81819f02007-05-06 14:49:36 -07002481 return 1;
2482 }
2483 free_kmem_cache_nodes(s);
2484 return 0;
2485}
2486
2487/*
2488 * Close a cache and release the kmem_cache structure
2489 * (must be used for caches created using kmem_cache_create)
2490 */
2491void kmem_cache_destroy(struct kmem_cache *s)
2492{
2493 down_write(&slub_lock);
2494 s->refcount--;
2495 if (!s->refcount) {
2496 list_del(&s->list);
Christoph Lametera0e1d1b2007-07-17 04:03:31 -07002497 up_write(&slub_lock);
Pekka Enbergd629d812008-04-23 22:31:08 +03002498 if (kmem_cache_close(s)) {
2499 printk(KERN_ERR "SLUB %s: %s called for cache that "
2500 "still has objects.\n", s->name, __func__);
2501 dump_stack();
2502 }
Eric Dumazetd76b1592009-09-03 22:38:59 +03002503 if (s->flags & SLAB_DESTROY_BY_RCU)
2504 rcu_barrier();
Christoph Lameter81819f02007-05-06 14:49:36 -07002505 sysfs_slab_remove(s);
Christoph Lametera0e1d1b2007-07-17 04:03:31 -07002506 } else
2507 up_write(&slub_lock);
Christoph Lameter81819f02007-05-06 14:49:36 -07002508}
2509EXPORT_SYMBOL(kmem_cache_destroy);
2510
2511/********************************************************************
2512 * Kmalloc subsystem
2513 *******************************************************************/
2514
Christoph Lameter756dee72009-12-18 16:26:21 -06002515struct kmem_cache kmalloc_caches[KMALLOC_CACHES] __cacheline_aligned;
Christoph Lameter81819f02007-05-06 14:49:36 -07002516EXPORT_SYMBOL(kmalloc_caches);
2517
Christoph Lameter81819f02007-05-06 14:49:36 -07002518static int __init setup_slub_min_order(char *str)
2519{
Pekka Enberg06428782008-01-07 23:20:27 -08002520 get_option(&str, &slub_min_order);
Christoph Lameter81819f02007-05-06 14:49:36 -07002521
2522 return 1;
2523}
2524
2525__setup("slub_min_order=", setup_slub_min_order);
2526
2527static int __init setup_slub_max_order(char *str)
2528{
Pekka Enberg06428782008-01-07 23:20:27 -08002529 get_option(&str, &slub_max_order);
David Rientjes818cf592009-04-23 09:58:22 +03002530 slub_max_order = min(slub_max_order, MAX_ORDER - 1);
Christoph Lameter81819f02007-05-06 14:49:36 -07002531
2532 return 1;
2533}
2534
2535__setup("slub_max_order=", setup_slub_max_order);
2536
2537static int __init setup_slub_min_objects(char *str)
2538{
Pekka Enberg06428782008-01-07 23:20:27 -08002539 get_option(&str, &slub_min_objects);
Christoph Lameter81819f02007-05-06 14:49:36 -07002540
2541 return 1;
2542}
2543
2544__setup("slub_min_objects=", setup_slub_min_objects);
2545
2546static int __init setup_slub_nomerge(char *str)
2547{
2548 slub_nomerge = 1;
2549 return 1;
2550}
2551
2552__setup("slub_nomerge", setup_slub_nomerge);
2553
Christoph Lameter81819f02007-05-06 14:49:36 -07002554static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
2555 const char *name, int size, gfp_t gfp_flags)
2556{
2557 unsigned int flags = 0;
2558
2559 if (gfp_flags & SLUB_DMA)
2560 flags = SLAB_CACHE_DMA;
2561
Pekka Enberg83b519e2009-06-10 19:40:04 +03002562 /*
2563 * This function is called with IRQs disabled during early-boot on
2564 * single CPU so there's no need to take slub_lock here.
2565 */
Christoph Lameter81819f02007-05-06 14:49:36 -07002566 if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
Christoph Lameter319d1e22008-04-14 19:11:41 +03002567 flags, NULL))
Christoph Lameter81819f02007-05-06 14:49:36 -07002568 goto panic;
2569
2570 list_add(&s->list, &slab_caches);
Pekka Enberg83b519e2009-06-10 19:40:04 +03002571
Christoph Lameter81819f02007-05-06 14:49:36 -07002572 if (sysfs_slab_add(s))
2573 goto panic;
2574 return s;
2575
2576panic:
2577 panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
2578}
2579
Christoph Lameter2e443fd2007-07-17 04:03:24 -07002580#ifdef CONFIG_ZONE_DMA
Christoph Lameterffadd4d2009-02-17 12:05:07 -05002581static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT];
Christoph Lameter1ceef402007-08-07 15:11:48 -07002582
2583static void sysfs_add_func(struct work_struct *w)
2584{
2585 struct kmem_cache *s;
2586
2587 down_write(&slub_lock);
2588 list_for_each_entry(s, &slab_caches, list) {
2589 if (s->flags & __SYSFS_ADD_DEFERRED) {
2590 s->flags &= ~__SYSFS_ADD_DEFERRED;
2591 sysfs_slab_add(s);
2592 }
2593 }
2594 up_write(&slub_lock);
2595}
2596
2597static DECLARE_WORK(sysfs_add_work, sysfs_add_func);
2598
Christoph Lameter2e443fd2007-07-17 04:03:24 -07002599static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
2600{
2601 struct kmem_cache *s;
Christoph Lameter2e443fd2007-07-17 04:03:24 -07002602 char *text;
2603 size_t realsize;
Nick Piggin964cf352009-06-15 13:35:10 +03002604 unsigned long slabflags;
Christoph Lameter756dee72009-12-18 16:26:21 -06002605 int i;
Christoph Lameter2e443fd2007-07-17 04:03:24 -07002606
2607 s = kmalloc_caches_dma[index];
2608 if (s)
2609 return s;
2610
2611 /* Dynamically create dma cache */
Christoph Lameter1ceef402007-08-07 15:11:48 -07002612 if (flags & __GFP_WAIT)
2613 down_write(&slub_lock);
2614 else {
2615 if (!down_write_trylock(&slub_lock))
2616 goto out;
2617 }
2618
2619 if (kmalloc_caches_dma[index])
2620 goto unlock_out;
Christoph Lameter2e443fd2007-07-17 04:03:24 -07002621
Christoph Lameter7b55f622007-07-17 04:03:27 -07002622 realsize = kmalloc_caches[index].objsize;
Ingo Molnar3adbefe2008-02-05 17:57:39 -08002623 text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
2624 (unsigned int)realsize);
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06002625
Christoph Lameter756dee72009-12-18 16:26:21 -06002626 s = NULL;
2627 for (i = 0; i < KMALLOC_CACHES; i++)
2628 if (!kmalloc_caches[i].size)
2629 break;
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06002630
Christoph Lameter756dee72009-12-18 16:26:21 -06002631 BUG_ON(i >= KMALLOC_CACHES);
2632 s = kmalloc_caches + i;
Christoph Lameter1ceef402007-08-07 15:11:48 -07002633
Nick Piggin964cf352009-06-15 13:35:10 +03002634 /*
2635 * Must defer sysfs creation to a workqueue because we don't know
2636 * what context we are called from. Before sysfs comes up, we don't
2637 * need to do anything because our sysfs initcall will start by
2638 * adding all existing slabs to sysfs.
2639 */
Pekka Enberg5caf5c72009-06-17 08:30:54 +03002640 slabflags = SLAB_CACHE_DMA|SLAB_NOTRACK;
Nick Piggin964cf352009-06-15 13:35:10 +03002641 if (slab_state >= SYSFS)
2642 slabflags |= __SYSFS_ADD_DEFERRED;
2643
David Rientjes7738dd92010-01-15 12:49:56 -08002644 if (!text || !kmem_cache_open(s, flags, text,
Nick Piggin964cf352009-06-15 13:35:10 +03002645 realsize, ARCH_KMALLOC_MINALIGN, slabflags, NULL)) {
Christoph Lameter756dee72009-12-18 16:26:21 -06002646 s->size = 0;
Christoph Lameter1ceef402007-08-07 15:11:48 -07002647 kfree(text);
2648 goto unlock_out;
Christoph Lameterdfce8642007-07-17 04:03:25 -07002649 }
Christoph Lameter1ceef402007-08-07 15:11:48 -07002650
2651 list_add(&s->list, &slab_caches);
2652 kmalloc_caches_dma[index] = s;
2653
Nick Piggin964cf352009-06-15 13:35:10 +03002654 if (slab_state >= SYSFS)
2655 schedule_work(&sysfs_add_work);
Christoph Lameter1ceef402007-08-07 15:11:48 -07002656
2657unlock_out:
Christoph Lameterdfce8642007-07-17 04:03:25 -07002658 up_write(&slub_lock);
Christoph Lameter1ceef402007-08-07 15:11:48 -07002659out:
Christoph Lameterdfce8642007-07-17 04:03:25 -07002660 return kmalloc_caches_dma[index];
Christoph Lameter2e443fd2007-07-17 04:03:24 -07002661}
2662#endif
2663
Christoph Lameterf1b26332007-07-17 04:03:26 -07002664/*
2665 * Conversion table for small slabs sizes / 8 to the index in the
2666 * kmalloc array. This is necessary for slabs < 192 since we have non power
2667 * of two cache sizes there. The size of larger slabs can be determined using
2668 * fls.
2669 */
2670static s8 size_index[24] = {
2671 3, /* 8 */
2672 4, /* 16 */
2673 5, /* 24 */
2674 5, /* 32 */
2675 6, /* 40 */
2676 6, /* 48 */
2677 6, /* 56 */
2678 6, /* 64 */
2679 1, /* 72 */
2680 1, /* 80 */
2681 1, /* 88 */
2682 1, /* 96 */
2683 7, /* 104 */
2684 7, /* 112 */
2685 7, /* 120 */
2686 7, /* 128 */
2687 2, /* 136 */
2688 2, /* 144 */
2689 2, /* 152 */
2690 2, /* 160 */
2691 2, /* 168 */
2692 2, /* 176 */
2693 2, /* 184 */
2694 2 /* 192 */
2695};
2696
Aaro Koskinenacdfcd02009-08-28 14:28:54 +03002697static inline int size_index_elem(size_t bytes)
2698{
2699 return (bytes - 1) / 8;
2700}
2701
Christoph Lameter81819f02007-05-06 14:49:36 -07002702static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2703{
Christoph Lameterf1b26332007-07-17 04:03:26 -07002704 int index;
Christoph Lameter81819f02007-05-06 14:49:36 -07002705
Christoph Lameterf1b26332007-07-17 04:03:26 -07002706 if (size <= 192) {
2707 if (!size)
2708 return ZERO_SIZE_PTR;
Christoph Lameter81819f02007-05-06 14:49:36 -07002709
Aaro Koskinenacdfcd02009-08-28 14:28:54 +03002710 index = size_index[size_index_elem(size)];
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07002711 } else
Christoph Lameterf1b26332007-07-17 04:03:26 -07002712 index = fls(size - 1);
Christoph Lameter81819f02007-05-06 14:49:36 -07002713
2714#ifdef CONFIG_ZONE_DMA
Christoph Lameterf1b26332007-07-17 04:03:26 -07002715 if (unlikely((flags & SLUB_DMA)))
Christoph Lameter2e443fd2007-07-17 04:03:24 -07002716 return dma_kmalloc_cache(index, flags);
Christoph Lameterf1b26332007-07-17 04:03:26 -07002717
Christoph Lameter81819f02007-05-06 14:49:36 -07002718#endif
2719 return &kmalloc_caches[index];
2720}
2721
2722void *__kmalloc(size_t size, gfp_t flags)
2723{
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07002724 struct kmem_cache *s;
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002725 void *ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07002726
Christoph Lameterffadd4d2009-02-17 12:05:07 -05002727 if (unlikely(size > SLUB_MAX_SIZE))
Pekka Enbergeada35e2008-02-11 22:47:46 +02002728 return kmalloc_large(size, flags);
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07002729
2730 s = get_slab(size, flags);
2731
2732 if (unlikely(ZERO_OR_NULL_PTR(s)))
Christoph Lameter6cb8f912007-07-17 04:03:22 -07002733 return s;
2734
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002735 ret = slab_alloc(s, flags, -1, _RET_IP_);
2736
Eduard - Gabriel Munteanuca2b84c2009-03-23 15:12:24 +02002737 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002738
2739 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07002740}
2741EXPORT_SYMBOL(__kmalloc);
2742
Christoph Lameterf619cfe2008-03-01 13:56:40 -08002743static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
2744{
Vegard Nossumb1eeab62008-11-25 16:55:53 +01002745 struct page *page;
Catalin Marinase4f7c0b42009-07-07 10:32:59 +01002746 void *ptr = NULL;
Christoph Lameterf619cfe2008-03-01 13:56:40 -08002747
Vegard Nossumb1eeab62008-11-25 16:55:53 +01002748 flags |= __GFP_COMP | __GFP_NOTRACK;
2749 page = alloc_pages_node(node, flags, get_order(size));
Christoph Lameterf619cfe2008-03-01 13:56:40 -08002750 if (page)
Catalin Marinase4f7c0b42009-07-07 10:32:59 +01002751 ptr = page_address(page);
2752
2753 kmemleak_alloc(ptr, size, 1, flags);
2754 return ptr;
Christoph Lameterf619cfe2008-03-01 13:56:40 -08002755}
2756
Christoph Lameter81819f02007-05-06 14:49:36 -07002757#ifdef CONFIG_NUMA
2758void *__kmalloc_node(size_t size, gfp_t flags, int node)
2759{
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07002760 struct kmem_cache *s;
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002761 void *ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07002762
Ingo Molnar057685c2009-02-20 12:15:30 +01002763 if (unlikely(size > SLUB_MAX_SIZE)) {
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002764 ret = kmalloc_large_node(size, flags, node);
2765
Eduard - Gabriel Munteanuca2b84c2009-03-23 15:12:24 +02002766 trace_kmalloc_node(_RET_IP_, ret,
2767 size, PAGE_SIZE << get_order(size),
2768 flags, node);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002769
2770 return ret;
2771 }
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07002772
2773 s = get_slab(size, flags);
2774
2775 if (unlikely(ZERO_OR_NULL_PTR(s)))
Christoph Lameter6cb8f912007-07-17 04:03:22 -07002776 return s;
2777
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002778 ret = slab_alloc(s, flags, node, _RET_IP_);
2779
Eduard - Gabriel Munteanuca2b84c2009-03-23 15:12:24 +02002780 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
Eduard - Gabriel Munteanu5b882be2008-08-19 20:43:26 +03002781
2782 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07002783}
2784EXPORT_SYMBOL(__kmalloc_node);
2785#endif
2786
2787size_t ksize(const void *object)
2788{
Christoph Lameter272c1d22007-06-08 13:46:49 -07002789 struct page *page;
Christoph Lameter81819f02007-05-06 14:49:36 -07002790 struct kmem_cache *s;
2791
Christoph Lameteref8b4522007-10-16 01:24:46 -07002792 if (unlikely(object == ZERO_SIZE_PTR))
Christoph Lameter272c1d22007-06-08 13:46:49 -07002793 return 0;
2794
Vegard Nossum294a80a2007-12-04 23:45:30 -08002795 page = virt_to_head_page(object);
Vegard Nossum294a80a2007-12-04 23:45:30 -08002796
Pekka Enberg76994412008-05-22 19:22:25 +03002797 if (unlikely(!PageSlab(page))) {
2798 WARN_ON(!PageCompound(page));
Vegard Nossum294a80a2007-12-04 23:45:30 -08002799 return PAGE_SIZE << compound_order(page);
Pekka Enberg76994412008-05-22 19:22:25 +03002800 }
Christoph Lameter81819f02007-05-06 14:49:36 -07002801 s = page->slab;
Christoph Lameter81819f02007-05-06 14:49:36 -07002802
Christoph Lameterae20bfd2008-02-15 23:45:25 -08002803#ifdef CONFIG_SLUB_DEBUG
Christoph Lameter81819f02007-05-06 14:49:36 -07002804 /*
2805 * Debugging requires use of the padding between object
2806 * and whatever may come after it.
2807 */
2808 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
2809 return s->objsize;
2810
Christoph Lameterae20bfd2008-02-15 23:45:25 -08002811#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07002812 /*
2813 * If we have the need to store the freelist pointer
2814 * back there or track user information then we can
2815 * only use the space before that information.
2816 */
2817 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
2818 return s->inuse;
Christoph Lameter81819f02007-05-06 14:49:36 -07002819 /*
2820 * Else we can use all the padding etc for the allocation
2821 */
2822 return s->size;
2823}
Kirill A. Shutemovb1aabec2009-02-10 15:21:44 +02002824EXPORT_SYMBOL(ksize);
Christoph Lameter81819f02007-05-06 14:49:36 -07002825
2826void kfree(const void *x)
2827{
Christoph Lameter81819f02007-05-06 14:49:36 -07002828 struct page *page;
Christoph Lameter5bb983b2008-02-07 17:47:41 -08002829 void *object = (void *)x;
Christoph Lameter81819f02007-05-06 14:49:36 -07002830
Pekka Enberg2121db72009-03-25 11:05:57 +02002831 trace_kfree(_RET_IP_, x);
2832
Satyam Sharma2408c552007-10-16 01:24:44 -07002833 if (unlikely(ZERO_OR_NULL_PTR(x)))
Christoph Lameter81819f02007-05-06 14:49:36 -07002834 return;
2835
Christoph Lameterb49af682007-05-06 14:49:41 -07002836 page = virt_to_head_page(x);
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07002837 if (unlikely(!PageSlab(page))) {
Christoph Lameter09375022008-05-28 10:32:22 -07002838 BUG_ON(!PageCompound(page));
Catalin Marinase4f7c0b42009-07-07 10:32:59 +01002839 kmemleak_free(x);
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07002840 put_page(page);
2841 return;
2842 }
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03002843 slab_free(page->slab, page, object, _RET_IP_);
Christoph Lameter81819f02007-05-06 14:49:36 -07002844}
2845EXPORT_SYMBOL(kfree);
2846
Christoph Lameter2086d262007-05-06 14:49:46 -07002847/*
Christoph Lameter672bba32007-05-09 02:32:39 -07002848 * kmem_cache_shrink removes empty slabs from the partial lists and sorts
2849 * the remaining slabs by the number of items in use. The slabs with the
2850 * most items in use come first. New allocations will then fill those up
2851 * and thus they can be removed from the partial lists.
2852 *
2853 * The slabs with the least items are placed last. This results in them
2854 * being allocated from last increasing the chance that the last objects
2855 * are freed in them.
Christoph Lameter2086d262007-05-06 14:49:46 -07002856 */
2857int kmem_cache_shrink(struct kmem_cache *s)
2858{
2859 int node;
2860 int i;
2861 struct kmem_cache_node *n;
2862 struct page *page;
2863 struct page *t;
Christoph Lameter205ab992008-04-14 19:11:40 +03002864 int objects = oo_objects(s->max);
Christoph Lameter2086d262007-05-06 14:49:46 -07002865 struct list_head *slabs_by_inuse =
Christoph Lameter834f3d12008-04-14 19:11:31 +03002866 kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
Christoph Lameter2086d262007-05-06 14:49:46 -07002867 unsigned long flags;
2868
2869 if (!slabs_by_inuse)
2870 return -ENOMEM;
2871
2872 flush_all(s);
Christoph Lameterf64dc582007-10-16 01:25:33 -07002873 for_each_node_state(node, N_NORMAL_MEMORY) {
Christoph Lameter2086d262007-05-06 14:49:46 -07002874 n = get_node(s, node);
2875
2876 if (!n->nr_partial)
2877 continue;
2878
Christoph Lameter834f3d12008-04-14 19:11:31 +03002879 for (i = 0; i < objects; i++)
Christoph Lameter2086d262007-05-06 14:49:46 -07002880 INIT_LIST_HEAD(slabs_by_inuse + i);
2881
2882 spin_lock_irqsave(&n->list_lock, flags);
2883
2884 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07002885 * Build lists indexed by the items in use in each slab.
Christoph Lameter2086d262007-05-06 14:49:46 -07002886 *
Christoph Lameter672bba32007-05-09 02:32:39 -07002887 * Note that concurrent frees may occur while we hold the
2888 * list_lock. page->inuse here is the upper limit.
Christoph Lameter2086d262007-05-06 14:49:46 -07002889 */
2890 list_for_each_entry_safe(page, t, &n->partial, lru) {
2891 if (!page->inuse && slab_trylock(page)) {
2892 /*
2893 * Must hold slab lock here because slab_free
2894 * may have freed the last object and be
2895 * waiting to release the slab.
2896 */
2897 list_del(&page->lru);
2898 n->nr_partial--;
2899 slab_unlock(page);
2900 discard_slab(s, page);
2901 } else {
Christoph Lameterfcda3d82007-07-30 13:06:46 -07002902 list_move(&page->lru,
2903 slabs_by_inuse + page->inuse);
Christoph Lameter2086d262007-05-06 14:49:46 -07002904 }
2905 }
2906
Christoph Lameter2086d262007-05-06 14:49:46 -07002907 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07002908 * Rebuild the partial list with the slabs filled up most
2909 * first and the least used slabs at the end.
Christoph Lameter2086d262007-05-06 14:49:46 -07002910 */
Christoph Lameter834f3d12008-04-14 19:11:31 +03002911 for (i = objects - 1; i >= 0; i--)
Christoph Lameter2086d262007-05-06 14:49:46 -07002912 list_splice(slabs_by_inuse + i, n->partial.prev);
2913
Christoph Lameter2086d262007-05-06 14:49:46 -07002914 spin_unlock_irqrestore(&n->list_lock, flags);
2915 }
2916
2917 kfree(slabs_by_inuse);
2918 return 0;
2919}
2920EXPORT_SYMBOL(kmem_cache_shrink);
2921
Yasunori Gotob9049e22007-10-21 16:41:37 -07002922#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
2923static int slab_mem_going_offline_callback(void *arg)
2924{
2925 struct kmem_cache *s;
2926
2927 down_read(&slub_lock);
2928 list_for_each_entry(s, &slab_caches, list)
2929 kmem_cache_shrink(s);
2930 up_read(&slub_lock);
2931
2932 return 0;
2933}
2934
2935static void slab_mem_offline_callback(void *arg)
2936{
2937 struct kmem_cache_node *n;
2938 struct kmem_cache *s;
2939 struct memory_notify *marg = arg;
2940 int offline_node;
2941
2942 offline_node = marg->status_change_nid;
2943
2944 /*
2945 * If the node still has available memory. we need kmem_cache_node
2946 * for it yet.
2947 */
2948 if (offline_node < 0)
2949 return;
2950
2951 down_read(&slub_lock);
2952 list_for_each_entry(s, &slab_caches, list) {
2953 n = get_node(s, offline_node);
2954 if (n) {
2955 /*
2956 * if n->nr_slabs > 0, slabs still exist on the node
2957 * that is going down. We were unable to free them,
2958 * and offline_pages() function shoudn't call this
2959 * callback. So, we must fail.
2960 */
Christoph Lameter0f389ec2008-04-14 18:53:02 +03002961 BUG_ON(slabs_node(s, offline_node));
Yasunori Gotob9049e22007-10-21 16:41:37 -07002962
2963 s->node[offline_node] = NULL;
2964 kmem_cache_free(kmalloc_caches, n);
2965 }
2966 }
2967 up_read(&slub_lock);
2968}
2969
2970static int slab_mem_going_online_callback(void *arg)
2971{
2972 struct kmem_cache_node *n;
2973 struct kmem_cache *s;
2974 struct memory_notify *marg = arg;
2975 int nid = marg->status_change_nid;
2976 int ret = 0;
2977
2978 /*
2979 * If the node's memory is already available, then kmem_cache_node is
2980 * already created. Nothing to do.
2981 */
2982 if (nid < 0)
2983 return 0;
2984
2985 /*
Christoph Lameter0121c6192008-04-29 16:11:12 -07002986 * We are bringing a node online. No memory is available yet. We must
Yasunori Gotob9049e22007-10-21 16:41:37 -07002987 * allocate a kmem_cache_node structure in order to bring the node
2988 * online.
2989 */
2990 down_read(&slub_lock);
2991 list_for_each_entry(s, &slab_caches, list) {
2992 /*
2993 * XXX: kmem_cache_alloc_node will fallback to other nodes
2994 * since memory is not yet available from the node that
2995 * is brought up.
2996 */
2997 n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL);
2998 if (!n) {
2999 ret = -ENOMEM;
3000 goto out;
3001 }
Pekka Enberg5595cff2008-08-05 09:28:47 +03003002 init_kmem_cache_node(n, s);
Yasunori Gotob9049e22007-10-21 16:41:37 -07003003 s->node[nid] = n;
3004 }
3005out:
3006 up_read(&slub_lock);
3007 return ret;
3008}
3009
3010static int slab_memory_callback(struct notifier_block *self,
3011 unsigned long action, void *arg)
3012{
3013 int ret = 0;
3014
3015 switch (action) {
3016 case MEM_GOING_ONLINE:
3017 ret = slab_mem_going_online_callback(arg);
3018 break;
3019 case MEM_GOING_OFFLINE:
3020 ret = slab_mem_going_offline_callback(arg);
3021 break;
3022 case MEM_OFFLINE:
3023 case MEM_CANCEL_ONLINE:
3024 slab_mem_offline_callback(arg);
3025 break;
3026 case MEM_ONLINE:
3027 case MEM_CANCEL_OFFLINE:
3028 break;
3029 }
KAMEZAWA Hiroyukidc19f9d2008-12-01 13:13:48 -08003030 if (ret)
3031 ret = notifier_from_errno(ret);
3032 else
3033 ret = NOTIFY_OK;
Yasunori Gotob9049e22007-10-21 16:41:37 -07003034 return ret;
3035}
3036
3037#endif /* CONFIG_MEMORY_HOTPLUG */
3038
Christoph Lameter81819f02007-05-06 14:49:36 -07003039/********************************************************************
3040 * Basic setup of slabs
3041 *******************************************************************/
3042
3043void __init kmem_cache_init(void)
3044{
3045 int i;
Christoph Lameter4b356be2007-06-16 10:16:13 -07003046 int caches = 0;
Christoph Lameter81819f02007-05-06 14:49:36 -07003047
3048#ifdef CONFIG_NUMA
3049 /*
3050 * Must first have the slab cache available for the allocations of the
Christoph Lameter672bba32007-05-09 02:32:39 -07003051 * struct kmem_cache_node's. There is special bootstrap code in
Christoph Lameter81819f02007-05-06 14:49:36 -07003052 * kmem_cache_open for slab_state == DOWN.
3053 */
3054 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
Pekka Enberg83b519e2009-06-10 19:40:04 +03003055 sizeof(struct kmem_cache_node), GFP_NOWAIT);
Christoph Lameter8ffa6872007-05-31 00:40:51 -07003056 kmalloc_caches[0].refcount = -1;
Christoph Lameter4b356be2007-06-16 10:16:13 -07003057 caches++;
Yasunori Gotob9049e22007-10-21 16:41:37 -07003058
Nadia Derbey0c40ba42008-04-29 01:00:41 -07003059 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
Christoph Lameter81819f02007-05-06 14:49:36 -07003060#endif
3061
3062 /* Able to allocate the per node structures */
3063 slab_state = PARTIAL;
3064
3065 /* Caches that are not of the two-to-the-power-of size */
Aaro Koskinenacdfcd02009-08-28 14:28:54 +03003066 if (KMALLOC_MIN_SIZE <= 32) {
Christoph Lameter4b356be2007-06-16 10:16:13 -07003067 create_kmalloc_cache(&kmalloc_caches[1],
Pekka Enberg83b519e2009-06-10 19:40:04 +03003068 "kmalloc-96", 96, GFP_NOWAIT);
Christoph Lameter4b356be2007-06-16 10:16:13 -07003069 caches++;
Aaro Koskinenacdfcd02009-08-28 14:28:54 +03003070 }
3071 if (KMALLOC_MIN_SIZE <= 64) {
Christoph Lameter4b356be2007-06-16 10:16:13 -07003072 create_kmalloc_cache(&kmalloc_caches[2],
Pekka Enberg83b519e2009-06-10 19:40:04 +03003073 "kmalloc-192", 192, GFP_NOWAIT);
Christoph Lameter4b356be2007-06-16 10:16:13 -07003074 caches++;
3075 }
Christoph Lameter81819f02007-05-06 14:49:36 -07003076
Christoph Lameterffadd4d2009-02-17 12:05:07 -05003077 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
Christoph Lameter81819f02007-05-06 14:49:36 -07003078 create_kmalloc_cache(&kmalloc_caches[i],
Pekka Enberg83b519e2009-06-10 19:40:04 +03003079 "kmalloc", 1 << i, GFP_NOWAIT);
Christoph Lameter4b356be2007-06-16 10:16:13 -07003080 caches++;
3081 }
Christoph Lameter81819f02007-05-06 14:49:36 -07003082
Christoph Lameterf1b26332007-07-17 04:03:26 -07003083
3084 /*
3085 * Patch up the size_index table if we have strange large alignment
3086 * requirements for the kmalloc array. This is only the case for
Christoph Lameter6446faa2008-02-15 23:45:26 -08003087 * MIPS it seems. The standard arches will not generate any code here.
Christoph Lameterf1b26332007-07-17 04:03:26 -07003088 *
3089 * Largest permitted alignment is 256 bytes due to the way we
3090 * handle the index determination for the smaller caches.
3091 *
3092 * Make sure that nothing crazy happens if someone starts tinkering
3093 * around with ARCH_KMALLOC_MINALIGN
3094 */
3095 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
3096 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
3097
Aaro Koskinenacdfcd02009-08-28 14:28:54 +03003098 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
3099 int elem = size_index_elem(i);
3100 if (elem >= ARRAY_SIZE(size_index))
3101 break;
3102 size_index[elem] = KMALLOC_SHIFT_LOW;
3103 }
Christoph Lameterf1b26332007-07-17 04:03:26 -07003104
Aaro Koskinenacdfcd02009-08-28 14:28:54 +03003105 if (KMALLOC_MIN_SIZE == 64) {
3106 /*
3107 * The 96 byte size cache is not used if the alignment
3108 * is 64 byte.
3109 */
3110 for (i = 64 + 8; i <= 96; i += 8)
3111 size_index[size_index_elem(i)] = 7;
3112 } else if (KMALLOC_MIN_SIZE == 128) {
Christoph Lameter41d54d32008-07-03 09:14:26 -05003113 /*
3114 * The 192 byte sized cache is not used if the alignment
3115 * is 128 byte. Redirect kmalloc to use the 256 byte cache
3116 * instead.
3117 */
3118 for (i = 128 + 8; i <= 192; i += 8)
Aaro Koskinenacdfcd02009-08-28 14:28:54 +03003119 size_index[size_index_elem(i)] = 8;
Christoph Lameter41d54d32008-07-03 09:14:26 -05003120 }
3121
Christoph Lameter81819f02007-05-06 14:49:36 -07003122 slab_state = UP;
3123
3124 /* Provide the correct kmalloc names now that the caches are up */
Christoph Lameterffadd4d2009-02-17 12:05:07 -05003125 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++)
Christoph Lameter81819f02007-05-06 14:49:36 -07003126 kmalloc_caches[i]. name =
Pekka Enberg83b519e2009-06-10 19:40:04 +03003127 kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
Christoph Lameter81819f02007-05-06 14:49:36 -07003128
3129#ifdef CONFIG_SMP
3130 register_cpu_notifier(&slab_notifier);
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06003131#endif
3132#ifdef CONFIG_NUMA
3133 kmem_size = offsetof(struct kmem_cache, node) +
3134 nr_node_ids * sizeof(struct kmem_cache_node *);
Christoph Lameter4c93c3552007-10-16 01:26:08 -07003135#else
3136 kmem_size = sizeof(struct kmem_cache);
Christoph Lameter81819f02007-05-06 14:49:36 -07003137#endif
3138
Ingo Molnar3adbefe2008-02-05 17:57:39 -08003139 printk(KERN_INFO
3140 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
Christoph Lameter4b356be2007-06-16 10:16:13 -07003141 " CPUs=%d, Nodes=%d\n",
3142 caches, cache_line_size(),
Christoph Lameter81819f02007-05-06 14:49:36 -07003143 slub_min_order, slub_max_order, slub_min_objects,
3144 nr_cpu_ids, nr_node_ids);
3145}
3146
Pekka Enberg7e85ee02009-06-12 14:03:06 +03003147void __init kmem_cache_init_late(void)
3148{
Pekka Enberg7e85ee02009-06-12 14:03:06 +03003149}
3150
Christoph Lameter81819f02007-05-06 14:49:36 -07003151/*
3152 * Find a mergeable slab cache
3153 */
3154static int slab_unmergeable(struct kmem_cache *s)
3155{
3156 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
3157 return 1;
3158
Christoph Lameterc59def9f2007-05-16 22:10:50 -07003159 if (s->ctor)
Christoph Lameter81819f02007-05-06 14:49:36 -07003160 return 1;
3161
Christoph Lameter8ffa6872007-05-31 00:40:51 -07003162 /*
3163 * We may have set a slab to be unmergeable during bootstrap.
3164 */
3165 if (s->refcount < 0)
3166 return 1;
3167
Christoph Lameter81819f02007-05-06 14:49:36 -07003168 return 0;
3169}
3170
3171static struct kmem_cache *find_mergeable(size_t size,
Christoph Lameterba0268a2007-09-11 15:24:11 -07003172 size_t align, unsigned long flags, const char *name,
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07003173 void (*ctor)(void *))
Christoph Lameter81819f02007-05-06 14:49:36 -07003174{
Christoph Lameter5b95a4a2007-07-17 04:03:19 -07003175 struct kmem_cache *s;
Christoph Lameter81819f02007-05-06 14:49:36 -07003176
3177 if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
3178 return NULL;
3179
Christoph Lameterc59def9f2007-05-16 22:10:50 -07003180 if (ctor)
Christoph Lameter81819f02007-05-06 14:49:36 -07003181 return NULL;
3182
3183 size = ALIGN(size, sizeof(void *));
3184 align = calculate_alignment(flags, align, size);
3185 size = ALIGN(size, align);
Christoph Lameterba0268a2007-09-11 15:24:11 -07003186 flags = kmem_cache_flags(size, flags, name, NULL);
Christoph Lameter81819f02007-05-06 14:49:36 -07003187
Christoph Lameter5b95a4a2007-07-17 04:03:19 -07003188 list_for_each_entry(s, &slab_caches, list) {
Christoph Lameter81819f02007-05-06 14:49:36 -07003189 if (slab_unmergeable(s))
3190 continue;
3191
3192 if (size > s->size)
3193 continue;
3194
Christoph Lameterba0268a2007-09-11 15:24:11 -07003195 if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
Christoph Lameter81819f02007-05-06 14:49:36 -07003196 continue;
3197 /*
3198 * Check if alignment is compatible.
3199 * Courtesy of Adrian Drzewiecki
3200 */
Pekka Enberg06428782008-01-07 23:20:27 -08003201 if ((s->size & ~(align - 1)) != s->size)
Christoph Lameter81819f02007-05-06 14:49:36 -07003202 continue;
3203
3204 if (s->size - size >= sizeof(void *))
3205 continue;
3206
3207 return s;
3208 }
3209 return NULL;
3210}
3211
3212struct kmem_cache *kmem_cache_create(const char *name, size_t size,
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07003213 size_t align, unsigned long flags, void (*ctor)(void *))
Christoph Lameter81819f02007-05-06 14:49:36 -07003214{
3215 struct kmem_cache *s;
3216
Benjamin Herrenschmidtfe1ff492009-09-21 17:02:30 -07003217 if (WARN_ON(!name))
3218 return NULL;
3219
Christoph Lameter81819f02007-05-06 14:49:36 -07003220 down_write(&slub_lock);
Christoph Lameterba0268a2007-09-11 15:24:11 -07003221 s = find_mergeable(size, align, flags, name, ctor);
Christoph Lameter81819f02007-05-06 14:49:36 -07003222 if (s) {
3223 s->refcount++;
3224 /*
3225 * Adjust the object sizes so that we clear
3226 * the complete object on kzalloc.
3227 */
3228 s->objsize = max(s->objsize, (int)size);
3229 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
Christoph Lametera0e1d1b2007-07-17 04:03:31 -07003230 up_write(&slub_lock);
Christoph Lameter6446faa2008-02-15 23:45:26 -08003231
David Rientjes7b8f3b62008-12-17 22:09:46 -08003232 if (sysfs_slab_alias(s, name)) {
3233 down_write(&slub_lock);
3234 s->refcount--;
3235 up_write(&slub_lock);
Christoph Lameter81819f02007-05-06 14:49:36 -07003236 goto err;
David Rientjes7b8f3b62008-12-17 22:09:46 -08003237 }
Christoph Lametera0e1d1b2007-07-17 04:03:31 -07003238 return s;
3239 }
Christoph Lameter6446faa2008-02-15 23:45:26 -08003240
Christoph Lametera0e1d1b2007-07-17 04:03:31 -07003241 s = kmalloc(kmem_size, GFP_KERNEL);
3242 if (s) {
3243 if (kmem_cache_open(s, GFP_KERNEL, name,
Christoph Lameterc59def9f2007-05-16 22:10:50 -07003244 size, align, flags, ctor)) {
Christoph Lameter81819f02007-05-06 14:49:36 -07003245 list_add(&s->list, &slab_caches);
Christoph Lametera0e1d1b2007-07-17 04:03:31 -07003246 up_write(&slub_lock);
David Rientjes7b8f3b62008-12-17 22:09:46 -08003247 if (sysfs_slab_add(s)) {
3248 down_write(&slub_lock);
3249 list_del(&s->list);
3250 up_write(&slub_lock);
3251 kfree(s);
Christoph Lametera0e1d1b2007-07-17 04:03:31 -07003252 goto err;
David Rientjes7b8f3b62008-12-17 22:09:46 -08003253 }
Christoph Lametera0e1d1b2007-07-17 04:03:31 -07003254 return s;
3255 }
3256 kfree(s);
Christoph Lameter81819f02007-05-06 14:49:36 -07003257 }
3258 up_write(&slub_lock);
Christoph Lameter81819f02007-05-06 14:49:36 -07003259
3260err:
Christoph Lameter81819f02007-05-06 14:49:36 -07003261 if (flags & SLAB_PANIC)
3262 panic("Cannot create slabcache %s\n", name);
3263 else
3264 s = NULL;
3265 return s;
3266}
3267EXPORT_SYMBOL(kmem_cache_create);
3268
Christoph Lameter81819f02007-05-06 14:49:36 -07003269#ifdef CONFIG_SMP
Christoph Lameter27390bc2007-06-01 00:47:09 -07003270/*
Christoph Lameter672bba32007-05-09 02:32:39 -07003271 * Use the cpu notifier to insure that the cpu slabs are flushed when
3272 * necessary.
Christoph Lameter81819f02007-05-06 14:49:36 -07003273 */
3274static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
3275 unsigned long action, void *hcpu)
3276{
3277 long cpu = (long)hcpu;
Christoph Lameter5b95a4a2007-07-17 04:03:19 -07003278 struct kmem_cache *s;
3279 unsigned long flags;
Christoph Lameter81819f02007-05-06 14:49:36 -07003280
3281 switch (action) {
3282 case CPU_UP_CANCELED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07003283 case CPU_UP_CANCELED_FROZEN:
Christoph Lameter81819f02007-05-06 14:49:36 -07003284 case CPU_DEAD:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07003285 case CPU_DEAD_FROZEN:
Christoph Lameter5b95a4a2007-07-17 04:03:19 -07003286 down_read(&slub_lock);
3287 list_for_each_entry(s, &slab_caches, list) {
3288 local_irq_save(flags);
3289 __flush_cpu_slab(s, cpu);
3290 local_irq_restore(flags);
3291 }
3292 up_read(&slub_lock);
Christoph Lameter81819f02007-05-06 14:49:36 -07003293 break;
3294 default:
3295 break;
3296 }
3297 return NOTIFY_OK;
3298}
3299
Pekka Enberg06428782008-01-07 23:20:27 -08003300static struct notifier_block __cpuinitdata slab_notifier = {
Ingo Molnar3adbefe2008-02-05 17:57:39 -08003301 .notifier_call = slab_cpuup_callback
Pekka Enberg06428782008-01-07 23:20:27 -08003302};
Christoph Lameter81819f02007-05-06 14:49:36 -07003303
3304#endif
3305
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03003306void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
Christoph Lameter81819f02007-05-06 14:49:36 -07003307{
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003308 struct kmem_cache *s;
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03003309 void *ret;
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003310
Christoph Lameterffadd4d2009-02-17 12:05:07 -05003311 if (unlikely(size > SLUB_MAX_SIZE))
Pekka Enbergeada35e2008-02-11 22:47:46 +02003312 return kmalloc_large(size, gfpflags);
3313
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003314 s = get_slab(size, gfpflags);
Christoph Lameter81819f02007-05-06 14:49:36 -07003315
Satyam Sharma2408c552007-10-16 01:24:44 -07003316 if (unlikely(ZERO_OR_NULL_PTR(s)))
Christoph Lameter6cb8f912007-07-17 04:03:22 -07003317 return s;
Christoph Lameter81819f02007-05-06 14:49:36 -07003318
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03003319 ret = slab_alloc(s, gfpflags, -1, caller);
3320
3321 /* Honor the call site pointer we recieved. */
Eduard - Gabriel Munteanuca2b84c2009-03-23 15:12:24 +02003322 trace_kmalloc(caller, ret, size, s->size, gfpflags);
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03003323
3324 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07003325}
3326
3327void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03003328 int node, unsigned long caller)
Christoph Lameter81819f02007-05-06 14:49:36 -07003329{
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003330 struct kmem_cache *s;
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03003331 void *ret;
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003332
Christoph Lameterffadd4d2009-02-17 12:05:07 -05003333 if (unlikely(size > SLUB_MAX_SIZE))
Christoph Lameterf619cfe2008-03-01 13:56:40 -08003334 return kmalloc_large_node(size, gfpflags, node);
Pekka Enbergeada35e2008-02-11 22:47:46 +02003335
Christoph Lameteraadb4bc2007-10-16 01:24:38 -07003336 s = get_slab(size, gfpflags);
Christoph Lameter81819f02007-05-06 14:49:36 -07003337
Satyam Sharma2408c552007-10-16 01:24:44 -07003338 if (unlikely(ZERO_OR_NULL_PTR(s)))
Christoph Lameter6cb8f912007-07-17 04:03:22 -07003339 return s;
Christoph Lameter81819f02007-05-06 14:49:36 -07003340
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03003341 ret = slab_alloc(s, gfpflags, node, caller);
3342
3343 /* Honor the call site pointer we recieved. */
Eduard - Gabriel Munteanuca2b84c2009-03-23 15:12:24 +02003344 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
Eduard - Gabriel Munteanu94b528d2008-08-24 20:49:35 +03003345
3346 return ret;
Christoph Lameter81819f02007-05-06 14:49:36 -07003347}
3348
Christoph Lameterf6acb632008-04-29 16:16:06 -07003349#ifdef CONFIG_SLUB_DEBUG
Christoph Lameter205ab992008-04-14 19:11:40 +03003350static int count_inuse(struct page *page)
3351{
3352 return page->inuse;
3353}
3354
3355static int count_total(struct page *page)
3356{
3357 return page->objects;
3358}
3359
Christoph Lameter434e2452007-07-17 04:03:30 -07003360static int validate_slab(struct kmem_cache *s, struct page *page,
3361 unsigned long *map)
Christoph Lameter53e15af2007-05-06 14:49:43 -07003362{
3363 void *p;
Christoph Lametera973e9d2008-03-01 13:40:44 -08003364 void *addr = page_address(page);
Christoph Lameter53e15af2007-05-06 14:49:43 -07003365
3366 if (!check_slab(s, page) ||
3367 !on_freelist(s, page, NULL))
3368 return 0;
3369
3370 /* Now we know that a valid freelist exists */
Christoph Lameter39b26462008-04-14 19:11:30 +03003371 bitmap_zero(map, page->objects);
Christoph Lameter53e15af2007-05-06 14:49:43 -07003372
Christoph Lameter7656c722007-05-09 02:32:40 -07003373 for_each_free_object(p, s, page->freelist) {
3374 set_bit(slab_index(p, s, addr), map);
Christoph Lameter53e15af2007-05-06 14:49:43 -07003375 if (!check_object(s, page, p, 0))
3376 return 0;
3377 }
3378
Christoph Lameter224a88b2008-04-14 19:11:31 +03003379 for_each_object(p, s, addr, page->objects)
Christoph Lameter7656c722007-05-09 02:32:40 -07003380 if (!test_bit(slab_index(p, s, addr), map))
Christoph Lameter53e15af2007-05-06 14:49:43 -07003381 if (!check_object(s, page, p, 1))
3382 return 0;
3383 return 1;
3384}
3385
Christoph Lameter434e2452007-07-17 04:03:30 -07003386static void validate_slab_slab(struct kmem_cache *s, struct page *page,
3387 unsigned long *map)
Christoph Lameter53e15af2007-05-06 14:49:43 -07003388{
3389 if (slab_trylock(page)) {
Christoph Lameter434e2452007-07-17 04:03:30 -07003390 validate_slab(s, page, map);
Christoph Lameter53e15af2007-05-06 14:49:43 -07003391 slab_unlock(page);
3392 } else
3393 printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
3394 s->name, page);
3395
3396 if (s->flags & DEBUG_DEFAULT_FLAGS) {
Andy Whitcroft8a380822008-07-23 21:27:18 -07003397 if (!PageSlubDebug(page))
3398 printk(KERN_ERR "SLUB %s: SlubDebug not set "
Christoph Lameter53e15af2007-05-06 14:49:43 -07003399 "on slab 0x%p\n", s->name, page);
3400 } else {
Andy Whitcroft8a380822008-07-23 21:27:18 -07003401 if (PageSlubDebug(page))
3402 printk(KERN_ERR "SLUB %s: SlubDebug set on "
Christoph Lameter53e15af2007-05-06 14:49:43 -07003403 "slab 0x%p\n", s->name, page);
3404 }
3405}
3406
Christoph Lameter434e2452007-07-17 04:03:30 -07003407static int validate_slab_node(struct kmem_cache *s,
3408 struct kmem_cache_node *n, unsigned long *map)
Christoph Lameter53e15af2007-05-06 14:49:43 -07003409{
3410 unsigned long count = 0;
3411 struct page *page;
3412 unsigned long flags;
3413
3414 spin_lock_irqsave(&n->list_lock, flags);
3415
3416 list_for_each_entry(page, &n->partial, lru) {
Christoph Lameter434e2452007-07-17 04:03:30 -07003417 validate_slab_slab(s, page, map);
Christoph Lameter53e15af2007-05-06 14:49:43 -07003418 count++;
3419 }
3420 if (count != n->nr_partial)
3421 printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
3422 "counter=%ld\n", s->name, count, n->nr_partial);
3423
3424 if (!(s->flags & SLAB_STORE_USER))
3425 goto out;
3426
3427 list_for_each_entry(page, &n->full, lru) {
Christoph Lameter434e2452007-07-17 04:03:30 -07003428 validate_slab_slab(s, page, map);
Christoph Lameter53e15af2007-05-06 14:49:43 -07003429 count++;
3430 }
3431 if (count != atomic_long_read(&n->nr_slabs))
3432 printk(KERN_ERR "SLUB: %s %ld slabs counted but "
3433 "counter=%ld\n", s->name, count,
3434 atomic_long_read(&n->nr_slabs));
3435
3436out:
3437 spin_unlock_irqrestore(&n->list_lock, flags);
3438 return count;
3439}
3440
Christoph Lameter434e2452007-07-17 04:03:30 -07003441static long validate_slab_cache(struct kmem_cache *s)
Christoph Lameter53e15af2007-05-06 14:49:43 -07003442{
3443 int node;
3444 unsigned long count = 0;
Christoph Lameter205ab992008-04-14 19:11:40 +03003445 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
Christoph Lameter434e2452007-07-17 04:03:30 -07003446 sizeof(unsigned long), GFP_KERNEL);
3447
3448 if (!map)
3449 return -ENOMEM;
Christoph Lameter53e15af2007-05-06 14:49:43 -07003450
3451 flush_all(s);
Christoph Lameterf64dc582007-10-16 01:25:33 -07003452 for_each_node_state(node, N_NORMAL_MEMORY) {
Christoph Lameter53e15af2007-05-06 14:49:43 -07003453 struct kmem_cache_node *n = get_node(s, node);
3454
Christoph Lameter434e2452007-07-17 04:03:30 -07003455 count += validate_slab_node(s, n, map);
Christoph Lameter53e15af2007-05-06 14:49:43 -07003456 }
Christoph Lameter434e2452007-07-17 04:03:30 -07003457 kfree(map);
Christoph Lameter53e15af2007-05-06 14:49:43 -07003458 return count;
3459}
3460
Christoph Lameterb3459702007-05-09 02:32:41 -07003461#ifdef SLUB_RESILIENCY_TEST
3462static void resiliency_test(void)
3463{
3464 u8 *p;
3465
3466 printk(KERN_ERR "SLUB resiliency testing\n");
3467 printk(KERN_ERR "-----------------------\n");
3468 printk(KERN_ERR "A. Corruption after allocation\n");
3469
3470 p = kzalloc(16, GFP_KERNEL);
3471 p[16] = 0x12;
3472 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
3473 " 0x12->0x%p\n\n", p + 16);
3474
3475 validate_slab_cache(kmalloc_caches + 4);
3476
3477 /* Hmmm... The next two are dangerous */
3478 p = kzalloc(32, GFP_KERNEL);
3479 p[32 + sizeof(void *)] = 0x34;
3480 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
Ingo Molnar3adbefe2008-02-05 17:57:39 -08003481 " 0x34 -> -0x%p\n", p);
3482 printk(KERN_ERR
3483 "If allocated object is overwritten then not detectable\n\n");
Christoph Lameterb3459702007-05-09 02:32:41 -07003484
3485 validate_slab_cache(kmalloc_caches + 5);
3486 p = kzalloc(64, GFP_KERNEL);
3487 p += 64 + (get_cycles() & 0xff) * sizeof(void *);
3488 *p = 0x56;
3489 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
3490 p);
Ingo Molnar3adbefe2008-02-05 17:57:39 -08003491 printk(KERN_ERR
3492 "If allocated object is overwritten then not detectable\n\n");
Christoph Lameterb3459702007-05-09 02:32:41 -07003493 validate_slab_cache(kmalloc_caches + 6);
3494
3495 printk(KERN_ERR "\nB. Corruption after free\n");
3496 p = kzalloc(128, GFP_KERNEL);
3497 kfree(p);
3498 *p = 0x78;
3499 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
3500 validate_slab_cache(kmalloc_caches + 7);
3501
3502 p = kzalloc(256, GFP_KERNEL);
3503 kfree(p);
3504 p[50] = 0x9a;
Ingo Molnar3adbefe2008-02-05 17:57:39 -08003505 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
3506 p);
Christoph Lameterb3459702007-05-09 02:32:41 -07003507 validate_slab_cache(kmalloc_caches + 8);
3508
3509 p = kzalloc(512, GFP_KERNEL);
3510 kfree(p);
3511 p[512] = 0xab;
3512 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
3513 validate_slab_cache(kmalloc_caches + 9);
3514}
3515#else
3516static void resiliency_test(void) {};
3517#endif
3518
Christoph Lameter88a420e2007-05-06 14:49:45 -07003519/*
Christoph Lameter672bba32007-05-09 02:32:39 -07003520 * Generate lists of code addresses where slabcache objects are allocated
Christoph Lameter88a420e2007-05-06 14:49:45 -07003521 * and freed.
3522 */
3523
3524struct location {
3525 unsigned long count;
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03003526 unsigned long addr;
Christoph Lameter45edfa52007-05-09 02:32:45 -07003527 long long sum_time;
3528 long min_time;
3529 long max_time;
3530 long min_pid;
3531 long max_pid;
Rusty Russell174596a2009-01-01 10:12:29 +10303532 DECLARE_BITMAP(cpus, NR_CPUS);
Christoph Lameter45edfa52007-05-09 02:32:45 -07003533 nodemask_t nodes;
Christoph Lameter88a420e2007-05-06 14:49:45 -07003534};
3535
3536struct loc_track {
3537 unsigned long max;
3538 unsigned long count;
3539 struct location *loc;
3540};
3541
3542static void free_loc_track(struct loc_track *t)
3543{
3544 if (t->max)
3545 free_pages((unsigned long)t->loc,
3546 get_order(sizeof(struct location) * t->max));
3547}
3548
Christoph Lameter68dff6a2007-07-17 04:03:20 -07003549static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
Christoph Lameter88a420e2007-05-06 14:49:45 -07003550{
3551 struct location *l;
3552 int order;
3553
Christoph Lameter88a420e2007-05-06 14:49:45 -07003554 order = get_order(sizeof(struct location) * max);
3555
Christoph Lameter68dff6a2007-07-17 04:03:20 -07003556 l = (void *)__get_free_pages(flags, order);
Christoph Lameter88a420e2007-05-06 14:49:45 -07003557 if (!l)
3558 return 0;
3559
3560 if (t->count) {
3561 memcpy(l, t->loc, sizeof(struct location) * t->count);
3562 free_loc_track(t);
3563 }
3564 t->max = max;
3565 t->loc = l;
3566 return 1;
3567}
3568
3569static int add_location(struct loc_track *t, struct kmem_cache *s,
Christoph Lameter45edfa52007-05-09 02:32:45 -07003570 const struct track *track)
Christoph Lameter88a420e2007-05-06 14:49:45 -07003571{
3572 long start, end, pos;
3573 struct location *l;
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03003574 unsigned long caddr;
Christoph Lameter45edfa52007-05-09 02:32:45 -07003575 unsigned long age = jiffies - track->when;
Christoph Lameter88a420e2007-05-06 14:49:45 -07003576
3577 start = -1;
3578 end = t->count;
3579
3580 for ( ; ; ) {
3581 pos = start + (end - start + 1) / 2;
3582
3583 /*
3584 * There is nothing at "end". If we end up there
3585 * we need to add something to before end.
3586 */
3587 if (pos == end)
3588 break;
3589
3590 caddr = t->loc[pos].addr;
Christoph Lameter45edfa52007-05-09 02:32:45 -07003591 if (track->addr == caddr) {
3592
3593 l = &t->loc[pos];
3594 l->count++;
3595 if (track->when) {
3596 l->sum_time += age;
3597 if (age < l->min_time)
3598 l->min_time = age;
3599 if (age > l->max_time)
3600 l->max_time = age;
3601
3602 if (track->pid < l->min_pid)
3603 l->min_pid = track->pid;
3604 if (track->pid > l->max_pid)
3605 l->max_pid = track->pid;
3606
Rusty Russell174596a2009-01-01 10:12:29 +10303607 cpumask_set_cpu(track->cpu,
3608 to_cpumask(l->cpus));
Christoph Lameter45edfa52007-05-09 02:32:45 -07003609 }
3610 node_set(page_to_nid(virt_to_page(track)), l->nodes);
Christoph Lameter88a420e2007-05-06 14:49:45 -07003611 return 1;
3612 }
3613
Christoph Lameter45edfa52007-05-09 02:32:45 -07003614 if (track->addr < caddr)
Christoph Lameter88a420e2007-05-06 14:49:45 -07003615 end = pos;
3616 else
3617 start = pos;
3618 }
3619
3620 /*
Christoph Lameter672bba32007-05-09 02:32:39 -07003621 * Not found. Insert new tracking element.
Christoph Lameter88a420e2007-05-06 14:49:45 -07003622 */
Christoph Lameter68dff6a2007-07-17 04:03:20 -07003623 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
Christoph Lameter88a420e2007-05-06 14:49:45 -07003624 return 0;
3625
3626 l = t->loc + pos;
3627 if (pos < t->count)
3628 memmove(l + 1, l,
3629 (t->count - pos) * sizeof(struct location));
3630 t->count++;
3631 l->count = 1;
Christoph Lameter45edfa52007-05-09 02:32:45 -07003632 l->addr = track->addr;
3633 l->sum_time = age;
3634 l->min_time = age;
3635 l->max_time = age;
3636 l->min_pid = track->pid;
3637 l->max_pid = track->pid;
Rusty Russell174596a2009-01-01 10:12:29 +10303638 cpumask_clear(to_cpumask(l->cpus));
3639 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
Christoph Lameter45edfa52007-05-09 02:32:45 -07003640 nodes_clear(l->nodes);
3641 node_set(page_to_nid(virt_to_page(track)), l->nodes);
Christoph Lameter88a420e2007-05-06 14:49:45 -07003642 return 1;
3643}
3644
3645static void process_slab(struct loc_track *t, struct kmem_cache *s,
3646 struct page *page, enum track_item alloc)
3647{
Christoph Lametera973e9d2008-03-01 13:40:44 -08003648 void *addr = page_address(page);
Christoph Lameter39b26462008-04-14 19:11:30 +03003649 DECLARE_BITMAP(map, page->objects);
Christoph Lameter88a420e2007-05-06 14:49:45 -07003650 void *p;
3651
Christoph Lameter39b26462008-04-14 19:11:30 +03003652 bitmap_zero(map, page->objects);
Christoph Lameter7656c722007-05-09 02:32:40 -07003653 for_each_free_object(p, s, page->freelist)
3654 set_bit(slab_index(p, s, addr), map);
Christoph Lameter88a420e2007-05-06 14:49:45 -07003655
Christoph Lameter224a88b2008-04-14 19:11:31 +03003656 for_each_object(p, s, addr, page->objects)
Christoph Lameter45edfa52007-05-09 02:32:45 -07003657 if (!test_bit(slab_index(p, s, addr), map))
3658 add_location(t, s, get_track(s, p, alloc));
Christoph Lameter88a420e2007-05-06 14:49:45 -07003659}
3660
3661static int list_locations(struct kmem_cache *s, char *buf,
3662 enum track_item alloc)
3663{
Harvey Harrisone374d482008-01-31 15:20:50 -08003664 int len = 0;
Christoph Lameter88a420e2007-05-06 14:49:45 -07003665 unsigned long i;
Christoph Lameter68dff6a2007-07-17 04:03:20 -07003666 struct loc_track t = { 0, 0, NULL };
Christoph Lameter88a420e2007-05-06 14:49:45 -07003667 int node;
3668
Christoph Lameter68dff6a2007-07-17 04:03:20 -07003669 if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
Andrew Mortonea3061d2007-10-16 01:26:09 -07003670 GFP_TEMPORARY))
Christoph Lameter68dff6a2007-07-17 04:03:20 -07003671 return sprintf(buf, "Out of memory\n");
Christoph Lameter88a420e2007-05-06 14:49:45 -07003672
3673 /* Push back cpu slabs */
3674 flush_all(s);
3675
Christoph Lameterf64dc582007-10-16 01:25:33 -07003676 for_each_node_state(node, N_NORMAL_MEMORY) {
Christoph Lameter88a420e2007-05-06 14:49:45 -07003677 struct kmem_cache_node *n = get_node(s, node);
3678 unsigned long flags;
3679 struct page *page;
3680
Christoph Lameter9e869432007-08-22 14:01:56 -07003681 if (!atomic_long_read(&n->nr_slabs))
Christoph Lameter88a420e2007-05-06 14:49:45 -07003682 continue;
3683
3684 spin_lock_irqsave(&n->list_lock, flags);
3685 list_for_each_entry(page, &n->partial, lru)
3686 process_slab(&t, s, page, alloc);
3687 list_for_each_entry(page, &n->full, lru)
3688 process_slab(&t, s, page, alloc);
3689 spin_unlock_irqrestore(&n->list_lock, flags);
3690 }
3691
3692 for (i = 0; i < t.count; i++) {
Christoph Lameter45edfa52007-05-09 02:32:45 -07003693 struct location *l = &t.loc[i];
Christoph Lameter88a420e2007-05-06 14:49:45 -07003694
Hugh Dickins9c246242008-12-09 13:14:27 -08003695 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
Christoph Lameter88a420e2007-05-06 14:49:45 -07003696 break;
Harvey Harrisone374d482008-01-31 15:20:50 -08003697 len += sprintf(buf + len, "%7ld ", l->count);
Christoph Lameter45edfa52007-05-09 02:32:45 -07003698
3699 if (l->addr)
Harvey Harrisone374d482008-01-31 15:20:50 -08003700 len += sprint_symbol(buf + len, (unsigned long)l->addr);
Christoph Lameter88a420e2007-05-06 14:49:45 -07003701 else
Harvey Harrisone374d482008-01-31 15:20:50 -08003702 len += sprintf(buf + len, "<not-available>");
Christoph Lameter45edfa52007-05-09 02:32:45 -07003703
3704 if (l->sum_time != l->min_time) {
Harvey Harrisone374d482008-01-31 15:20:50 -08003705 len += sprintf(buf + len, " age=%ld/%ld/%ld",
Roman Zippelf8bd2252008-05-01 04:34:31 -07003706 l->min_time,
3707 (long)div_u64(l->sum_time, l->count),
3708 l->max_time);
Christoph Lameter45edfa52007-05-09 02:32:45 -07003709 } else
Harvey Harrisone374d482008-01-31 15:20:50 -08003710 len += sprintf(buf + len, " age=%ld",
Christoph Lameter45edfa52007-05-09 02:32:45 -07003711 l->min_time);
3712
3713 if (l->min_pid != l->max_pid)
Harvey Harrisone374d482008-01-31 15:20:50 -08003714 len += sprintf(buf + len, " pid=%ld-%ld",
Christoph Lameter45edfa52007-05-09 02:32:45 -07003715 l->min_pid, l->max_pid);
3716 else
Harvey Harrisone374d482008-01-31 15:20:50 -08003717 len += sprintf(buf + len, " pid=%ld",
Christoph Lameter45edfa52007-05-09 02:32:45 -07003718 l->min_pid);
3719
Rusty Russell174596a2009-01-01 10:12:29 +10303720 if (num_online_cpus() > 1 &&
3721 !cpumask_empty(to_cpumask(l->cpus)) &&
Harvey Harrisone374d482008-01-31 15:20:50 -08003722 len < PAGE_SIZE - 60) {
3723 len += sprintf(buf + len, " cpus=");
3724 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
Rusty Russell174596a2009-01-01 10:12:29 +10303725 to_cpumask(l->cpus));
Christoph Lameter45edfa52007-05-09 02:32:45 -07003726 }
3727
Christoph Lameter62bc62a2009-06-16 15:32:15 -07003728 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
Harvey Harrisone374d482008-01-31 15:20:50 -08003729 len < PAGE_SIZE - 60) {
3730 len += sprintf(buf + len, " nodes=");
3731 len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
Christoph Lameter45edfa52007-05-09 02:32:45 -07003732 l->nodes);
3733 }
3734
Harvey Harrisone374d482008-01-31 15:20:50 -08003735 len += sprintf(buf + len, "\n");
Christoph Lameter88a420e2007-05-06 14:49:45 -07003736 }
3737
3738 free_loc_track(&t);
3739 if (!t.count)
Harvey Harrisone374d482008-01-31 15:20:50 -08003740 len += sprintf(buf, "No data\n");
3741 return len;
Christoph Lameter88a420e2007-05-06 14:49:45 -07003742}
3743
Christoph Lameter81819f02007-05-06 14:49:36 -07003744enum slab_stat_type {
Christoph Lameter205ab992008-04-14 19:11:40 +03003745 SL_ALL, /* All slabs */
3746 SL_PARTIAL, /* Only partially allocated slabs */
3747 SL_CPU, /* Only slabs used for cpu caches */
3748 SL_OBJECTS, /* Determine allocated objects not slabs */
3749 SL_TOTAL /* Determine object capacity not slabs */
Christoph Lameter81819f02007-05-06 14:49:36 -07003750};
3751
Christoph Lameter205ab992008-04-14 19:11:40 +03003752#define SO_ALL (1 << SL_ALL)
Christoph Lameter81819f02007-05-06 14:49:36 -07003753#define SO_PARTIAL (1 << SL_PARTIAL)
3754#define SO_CPU (1 << SL_CPU)
3755#define SO_OBJECTS (1 << SL_OBJECTS)
Christoph Lameter205ab992008-04-14 19:11:40 +03003756#define SO_TOTAL (1 << SL_TOTAL)
Christoph Lameter81819f02007-05-06 14:49:36 -07003757
Cyrill Gorcunov62e5c4b2008-03-02 23:28:24 +03003758static ssize_t show_slab_objects(struct kmem_cache *s,
3759 char *buf, unsigned long flags)
Christoph Lameter81819f02007-05-06 14:49:36 -07003760{
3761 unsigned long total = 0;
Christoph Lameter81819f02007-05-06 14:49:36 -07003762 int node;
3763 int x;
3764 unsigned long *nodes;
3765 unsigned long *per_cpu;
3766
3767 nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
Cyrill Gorcunov62e5c4b2008-03-02 23:28:24 +03003768 if (!nodes)
3769 return -ENOMEM;
Christoph Lameter81819f02007-05-06 14:49:36 -07003770 per_cpu = nodes + nr_node_ids;
3771
Christoph Lameter205ab992008-04-14 19:11:40 +03003772 if (flags & SO_CPU) {
3773 int cpu;
Christoph Lameter81819f02007-05-06 14:49:36 -07003774
Christoph Lameter205ab992008-04-14 19:11:40 +03003775 for_each_possible_cpu(cpu) {
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06003776 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
Christoph Lameterdfb4f092007-10-16 01:26:05 -07003777
Christoph Lameter205ab992008-04-14 19:11:40 +03003778 if (!c || c->node < 0)
3779 continue;
3780
3781 if (c->page) {
3782 if (flags & SO_TOTAL)
3783 x = c->page->objects;
3784 else if (flags & SO_OBJECTS)
3785 x = c->page->inuse;
Christoph Lameter81819f02007-05-06 14:49:36 -07003786 else
3787 x = 1;
Christoph Lameter205ab992008-04-14 19:11:40 +03003788
Christoph Lameter81819f02007-05-06 14:49:36 -07003789 total += x;
Christoph Lameter205ab992008-04-14 19:11:40 +03003790 nodes[c->node] += x;
Christoph Lameter81819f02007-05-06 14:49:36 -07003791 }
Christoph Lameter205ab992008-04-14 19:11:40 +03003792 per_cpu[c->node]++;
Christoph Lameter81819f02007-05-06 14:49:36 -07003793 }
3794 }
3795
Christoph Lameter205ab992008-04-14 19:11:40 +03003796 if (flags & SO_ALL) {
3797 for_each_node_state(node, N_NORMAL_MEMORY) {
3798 struct kmem_cache_node *n = get_node(s, node);
Christoph Lameter81819f02007-05-06 14:49:36 -07003799
Christoph Lameter205ab992008-04-14 19:11:40 +03003800 if (flags & SO_TOTAL)
3801 x = atomic_long_read(&n->total_objects);
3802 else if (flags & SO_OBJECTS)
3803 x = atomic_long_read(&n->total_objects) -
3804 count_partial(n, count_free);
3805
3806 else
3807 x = atomic_long_read(&n->nr_slabs);
3808 total += x;
3809 nodes[node] += x;
3810 }
3811
3812 } else if (flags & SO_PARTIAL) {
3813 for_each_node_state(node, N_NORMAL_MEMORY) {
3814 struct kmem_cache_node *n = get_node(s, node);
3815
3816 if (flags & SO_TOTAL)
3817 x = count_partial(n, count_total);
3818 else if (flags & SO_OBJECTS)
3819 x = count_partial(n, count_inuse);
Christoph Lameter81819f02007-05-06 14:49:36 -07003820 else
3821 x = n->nr_partial;
3822 total += x;
3823 nodes[node] += x;
3824 }
Christoph Lameter81819f02007-05-06 14:49:36 -07003825 }
Christoph Lameter81819f02007-05-06 14:49:36 -07003826 x = sprintf(buf, "%lu", total);
3827#ifdef CONFIG_NUMA
Christoph Lameterf64dc582007-10-16 01:25:33 -07003828 for_each_node_state(node, N_NORMAL_MEMORY)
Christoph Lameter81819f02007-05-06 14:49:36 -07003829 if (nodes[node])
3830 x += sprintf(buf + x, " N%d=%lu",
3831 node, nodes[node]);
3832#endif
3833 kfree(nodes);
3834 return x + sprintf(buf + x, "\n");
3835}
3836
3837static int any_slab_objects(struct kmem_cache *s)
3838{
3839 int node;
Christoph Lameterdfb4f092007-10-16 01:26:05 -07003840
3841 for_each_online_node(node) {
Christoph Lameter81819f02007-05-06 14:49:36 -07003842 struct kmem_cache_node *n = get_node(s, node);
3843
Christoph Lameterdfb4f092007-10-16 01:26:05 -07003844 if (!n)
3845 continue;
3846
Benjamin Herrenschmidt4ea33e22008-05-06 20:42:39 -07003847 if (atomic_long_read(&n->total_objects))
Christoph Lameter81819f02007-05-06 14:49:36 -07003848 return 1;
3849 }
3850 return 0;
3851}
3852
3853#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
3854#define to_slab(n) container_of(n, struct kmem_cache, kobj);
3855
3856struct slab_attribute {
3857 struct attribute attr;
3858 ssize_t (*show)(struct kmem_cache *s, char *buf);
3859 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
3860};
3861
3862#define SLAB_ATTR_RO(_name) \
3863 static struct slab_attribute _name##_attr = __ATTR_RO(_name)
3864
3865#define SLAB_ATTR(_name) \
3866 static struct slab_attribute _name##_attr = \
3867 __ATTR(_name, 0644, _name##_show, _name##_store)
3868
Christoph Lameter81819f02007-05-06 14:49:36 -07003869static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
3870{
3871 return sprintf(buf, "%d\n", s->size);
3872}
3873SLAB_ATTR_RO(slab_size);
3874
3875static ssize_t align_show(struct kmem_cache *s, char *buf)
3876{
3877 return sprintf(buf, "%d\n", s->align);
3878}
3879SLAB_ATTR_RO(align);
3880
3881static ssize_t object_size_show(struct kmem_cache *s, char *buf)
3882{
3883 return sprintf(buf, "%d\n", s->objsize);
3884}
3885SLAB_ATTR_RO(object_size);
3886
3887static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
3888{
Christoph Lameter834f3d12008-04-14 19:11:31 +03003889 return sprintf(buf, "%d\n", oo_objects(s->oo));
Christoph Lameter81819f02007-05-06 14:49:36 -07003890}
3891SLAB_ATTR_RO(objs_per_slab);
3892
Christoph Lameter06b285d2008-04-14 19:11:41 +03003893static ssize_t order_store(struct kmem_cache *s,
3894 const char *buf, size_t length)
3895{
Christoph Lameter0121c6192008-04-29 16:11:12 -07003896 unsigned long order;
3897 int err;
3898
3899 err = strict_strtoul(buf, 10, &order);
3900 if (err)
3901 return err;
Christoph Lameter06b285d2008-04-14 19:11:41 +03003902
3903 if (order > slub_max_order || order < slub_min_order)
3904 return -EINVAL;
3905
3906 calculate_sizes(s, order);
3907 return length;
3908}
3909
Christoph Lameter81819f02007-05-06 14:49:36 -07003910static ssize_t order_show(struct kmem_cache *s, char *buf)
3911{
Christoph Lameter834f3d12008-04-14 19:11:31 +03003912 return sprintf(buf, "%d\n", oo_order(s->oo));
Christoph Lameter81819f02007-05-06 14:49:36 -07003913}
Christoph Lameter06b285d2008-04-14 19:11:41 +03003914SLAB_ATTR(order);
Christoph Lameter81819f02007-05-06 14:49:36 -07003915
David Rientjes73d342b2009-02-22 17:40:09 -08003916static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
3917{
3918 return sprintf(buf, "%lu\n", s->min_partial);
3919}
3920
3921static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
3922 size_t length)
3923{
3924 unsigned long min;
3925 int err;
3926
3927 err = strict_strtoul(buf, 10, &min);
3928 if (err)
3929 return err;
3930
David Rientjesc0bdb232009-02-25 09:16:35 +02003931 set_min_partial(s, min);
David Rientjes73d342b2009-02-22 17:40:09 -08003932 return length;
3933}
3934SLAB_ATTR(min_partial);
3935
Christoph Lameter81819f02007-05-06 14:49:36 -07003936static ssize_t ctor_show(struct kmem_cache *s, char *buf)
3937{
3938 if (s->ctor) {
3939 int n = sprint_symbol(buf, (unsigned long)s->ctor);
3940
3941 return n + sprintf(buf + n, "\n");
3942 }
3943 return 0;
3944}
3945SLAB_ATTR_RO(ctor);
3946
Christoph Lameter81819f02007-05-06 14:49:36 -07003947static ssize_t aliases_show(struct kmem_cache *s, char *buf)
3948{
3949 return sprintf(buf, "%d\n", s->refcount - 1);
3950}
3951SLAB_ATTR_RO(aliases);
3952
3953static ssize_t slabs_show(struct kmem_cache *s, char *buf)
3954{
Christoph Lameter205ab992008-04-14 19:11:40 +03003955 return show_slab_objects(s, buf, SO_ALL);
Christoph Lameter81819f02007-05-06 14:49:36 -07003956}
3957SLAB_ATTR_RO(slabs);
3958
3959static ssize_t partial_show(struct kmem_cache *s, char *buf)
3960{
Christoph Lameterd9acf4b2008-02-15 15:22:21 -08003961 return show_slab_objects(s, buf, SO_PARTIAL);
Christoph Lameter81819f02007-05-06 14:49:36 -07003962}
3963SLAB_ATTR_RO(partial);
3964
3965static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
3966{
Christoph Lameterd9acf4b2008-02-15 15:22:21 -08003967 return show_slab_objects(s, buf, SO_CPU);
Christoph Lameter81819f02007-05-06 14:49:36 -07003968}
3969SLAB_ATTR_RO(cpu_slabs);
3970
3971static ssize_t objects_show(struct kmem_cache *s, char *buf)
3972{
Christoph Lameter205ab992008-04-14 19:11:40 +03003973 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
Christoph Lameter81819f02007-05-06 14:49:36 -07003974}
3975SLAB_ATTR_RO(objects);
3976
Christoph Lameter205ab992008-04-14 19:11:40 +03003977static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
3978{
3979 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
3980}
3981SLAB_ATTR_RO(objects_partial);
3982
3983static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
3984{
3985 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
3986}
3987SLAB_ATTR_RO(total_objects);
3988
Christoph Lameter81819f02007-05-06 14:49:36 -07003989static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
3990{
3991 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
3992}
3993
3994static ssize_t sanity_checks_store(struct kmem_cache *s,
3995 const char *buf, size_t length)
3996{
3997 s->flags &= ~SLAB_DEBUG_FREE;
3998 if (buf[0] == '1')
3999 s->flags |= SLAB_DEBUG_FREE;
4000 return length;
4001}
4002SLAB_ATTR(sanity_checks);
4003
4004static ssize_t trace_show(struct kmem_cache *s, char *buf)
4005{
4006 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
4007}
4008
4009static ssize_t trace_store(struct kmem_cache *s, const char *buf,
4010 size_t length)
4011{
4012 s->flags &= ~SLAB_TRACE;
4013 if (buf[0] == '1')
4014 s->flags |= SLAB_TRACE;
4015 return length;
4016}
4017SLAB_ATTR(trace);
4018
4019static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
4020{
4021 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
4022}
4023
4024static ssize_t reclaim_account_store(struct kmem_cache *s,
4025 const char *buf, size_t length)
4026{
4027 s->flags &= ~SLAB_RECLAIM_ACCOUNT;
4028 if (buf[0] == '1')
4029 s->flags |= SLAB_RECLAIM_ACCOUNT;
4030 return length;
4031}
4032SLAB_ATTR(reclaim_account);
4033
4034static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
4035{
Christoph Lameter5af60832007-05-06 14:49:56 -07004036 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
Christoph Lameter81819f02007-05-06 14:49:36 -07004037}
4038SLAB_ATTR_RO(hwcache_align);
4039
4040#ifdef CONFIG_ZONE_DMA
4041static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
4042{
4043 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
4044}
4045SLAB_ATTR_RO(cache_dma);
4046#endif
4047
4048static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
4049{
4050 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
4051}
4052SLAB_ATTR_RO(destroy_by_rcu);
4053
4054static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
4055{
4056 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
4057}
4058
4059static ssize_t red_zone_store(struct kmem_cache *s,
4060 const char *buf, size_t length)
4061{
4062 if (any_slab_objects(s))
4063 return -EBUSY;
4064
4065 s->flags &= ~SLAB_RED_ZONE;
4066 if (buf[0] == '1')
4067 s->flags |= SLAB_RED_ZONE;
Christoph Lameter06b285d2008-04-14 19:11:41 +03004068 calculate_sizes(s, -1);
Christoph Lameter81819f02007-05-06 14:49:36 -07004069 return length;
4070}
4071SLAB_ATTR(red_zone);
4072
4073static ssize_t poison_show(struct kmem_cache *s, char *buf)
4074{
4075 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
4076}
4077
4078static ssize_t poison_store(struct kmem_cache *s,
4079 const char *buf, size_t length)
4080{
4081 if (any_slab_objects(s))
4082 return -EBUSY;
4083
4084 s->flags &= ~SLAB_POISON;
4085 if (buf[0] == '1')
4086 s->flags |= SLAB_POISON;
Christoph Lameter06b285d2008-04-14 19:11:41 +03004087 calculate_sizes(s, -1);
Christoph Lameter81819f02007-05-06 14:49:36 -07004088 return length;
4089}
4090SLAB_ATTR(poison);
4091
4092static ssize_t store_user_show(struct kmem_cache *s, char *buf)
4093{
4094 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
4095}
4096
4097static ssize_t store_user_store(struct kmem_cache *s,
4098 const char *buf, size_t length)
4099{
4100 if (any_slab_objects(s))
4101 return -EBUSY;
4102
4103 s->flags &= ~SLAB_STORE_USER;
4104 if (buf[0] == '1')
4105 s->flags |= SLAB_STORE_USER;
Christoph Lameter06b285d2008-04-14 19:11:41 +03004106 calculate_sizes(s, -1);
Christoph Lameter81819f02007-05-06 14:49:36 -07004107 return length;
4108}
4109SLAB_ATTR(store_user);
4110
Christoph Lameter53e15af2007-05-06 14:49:43 -07004111static ssize_t validate_show(struct kmem_cache *s, char *buf)
4112{
4113 return 0;
4114}
4115
4116static ssize_t validate_store(struct kmem_cache *s,
4117 const char *buf, size_t length)
4118{
Christoph Lameter434e2452007-07-17 04:03:30 -07004119 int ret = -EINVAL;
4120
4121 if (buf[0] == '1') {
4122 ret = validate_slab_cache(s);
4123 if (ret >= 0)
4124 ret = length;
4125 }
4126 return ret;
Christoph Lameter53e15af2007-05-06 14:49:43 -07004127}
4128SLAB_ATTR(validate);
4129
Christoph Lameter2086d262007-05-06 14:49:46 -07004130static ssize_t shrink_show(struct kmem_cache *s, char *buf)
4131{
4132 return 0;
4133}
4134
4135static ssize_t shrink_store(struct kmem_cache *s,
4136 const char *buf, size_t length)
4137{
4138 if (buf[0] == '1') {
4139 int rc = kmem_cache_shrink(s);
4140
4141 if (rc)
4142 return rc;
4143 } else
4144 return -EINVAL;
4145 return length;
4146}
4147SLAB_ATTR(shrink);
4148
Christoph Lameter88a420e2007-05-06 14:49:45 -07004149static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
4150{
4151 if (!(s->flags & SLAB_STORE_USER))
4152 return -ENOSYS;
4153 return list_locations(s, buf, TRACK_ALLOC);
4154}
4155SLAB_ATTR_RO(alloc_calls);
4156
4157static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
4158{
4159 if (!(s->flags & SLAB_STORE_USER))
4160 return -ENOSYS;
4161 return list_locations(s, buf, TRACK_FREE);
4162}
4163SLAB_ATTR_RO(free_calls);
4164
Christoph Lameter81819f02007-05-06 14:49:36 -07004165#ifdef CONFIG_NUMA
Christoph Lameter98246012008-01-07 23:20:26 -08004166static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
Christoph Lameter81819f02007-05-06 14:49:36 -07004167{
Christoph Lameter98246012008-01-07 23:20:26 -08004168 return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
Christoph Lameter81819f02007-05-06 14:49:36 -07004169}
4170
Christoph Lameter98246012008-01-07 23:20:26 -08004171static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
Christoph Lameter81819f02007-05-06 14:49:36 -07004172 const char *buf, size_t length)
4173{
Christoph Lameter0121c6192008-04-29 16:11:12 -07004174 unsigned long ratio;
4175 int err;
Christoph Lameter81819f02007-05-06 14:49:36 -07004176
Christoph Lameter0121c6192008-04-29 16:11:12 -07004177 err = strict_strtoul(buf, 10, &ratio);
4178 if (err)
4179 return err;
4180
Christoph Lametere2cb96b2008-08-19 08:51:22 -05004181 if (ratio <= 100)
Christoph Lameter0121c6192008-04-29 16:11:12 -07004182 s->remote_node_defrag_ratio = ratio * 10;
4183
Christoph Lameter81819f02007-05-06 14:49:36 -07004184 return length;
4185}
Christoph Lameter98246012008-01-07 23:20:26 -08004186SLAB_ATTR(remote_node_defrag_ratio);
Christoph Lameter81819f02007-05-06 14:49:36 -07004187#endif
4188
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08004189#ifdef CONFIG_SLUB_STATS
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08004190static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
4191{
4192 unsigned long sum = 0;
4193 int cpu;
4194 int len;
4195 int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
4196
4197 if (!data)
4198 return -ENOMEM;
4199
4200 for_each_online_cpu(cpu) {
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06004201 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08004202
4203 data[cpu] = x;
4204 sum += x;
4205 }
4206
4207 len = sprintf(buf, "%lu", sum);
4208
Christoph Lameter50ef37b2008-04-14 18:52:05 +03004209#ifdef CONFIG_SMP
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08004210 for_each_online_cpu(cpu) {
4211 if (data[cpu] && len < PAGE_SIZE - 20)
Christoph Lameter50ef37b2008-04-14 18:52:05 +03004212 len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08004213 }
Christoph Lameter50ef37b2008-04-14 18:52:05 +03004214#endif
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08004215 kfree(data);
4216 return len + sprintf(buf + len, "\n");
4217}
4218
David Rientjes78eb00c2009-10-15 02:20:22 -07004219static void clear_stat(struct kmem_cache *s, enum stat_item si)
4220{
4221 int cpu;
4222
4223 for_each_online_cpu(cpu)
Christoph Lameter9dfc6e62009-12-18 16:26:20 -06004224 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
David Rientjes78eb00c2009-10-15 02:20:22 -07004225}
4226
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08004227#define STAT_ATTR(si, text) \
4228static ssize_t text##_show(struct kmem_cache *s, char *buf) \
4229{ \
4230 return show_stat(s, buf, si); \
4231} \
David Rientjes78eb00c2009-10-15 02:20:22 -07004232static ssize_t text##_store(struct kmem_cache *s, \
4233 const char *buf, size_t length) \
4234{ \
4235 if (buf[0] != '0') \
4236 return -EINVAL; \
4237 clear_stat(s, si); \
4238 return length; \
4239} \
4240SLAB_ATTR(text); \
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08004241
4242STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
4243STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
4244STAT_ATTR(FREE_FASTPATH, free_fastpath);
4245STAT_ATTR(FREE_SLOWPATH, free_slowpath);
4246STAT_ATTR(FREE_FROZEN, free_frozen);
4247STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
4248STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
4249STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
4250STAT_ATTR(ALLOC_SLAB, alloc_slab);
4251STAT_ATTR(ALLOC_REFILL, alloc_refill);
4252STAT_ATTR(FREE_SLAB, free_slab);
4253STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
4254STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
4255STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
4256STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
4257STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
4258STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
Christoph Lameter65c33762008-04-14 19:11:40 +03004259STAT_ATTR(ORDER_FALLBACK, order_fallback);
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08004260#endif
4261
Pekka Enberg06428782008-01-07 23:20:27 -08004262static struct attribute *slab_attrs[] = {
Christoph Lameter81819f02007-05-06 14:49:36 -07004263 &slab_size_attr.attr,
4264 &object_size_attr.attr,
4265 &objs_per_slab_attr.attr,
4266 &order_attr.attr,
David Rientjes73d342b2009-02-22 17:40:09 -08004267 &min_partial_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07004268 &objects_attr.attr,
Christoph Lameter205ab992008-04-14 19:11:40 +03004269 &objects_partial_attr.attr,
4270 &total_objects_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07004271 &slabs_attr.attr,
4272 &partial_attr.attr,
4273 &cpu_slabs_attr.attr,
4274 &ctor_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07004275 &aliases_attr.attr,
4276 &align_attr.attr,
4277 &sanity_checks_attr.attr,
4278 &trace_attr.attr,
4279 &hwcache_align_attr.attr,
4280 &reclaim_account_attr.attr,
4281 &destroy_by_rcu_attr.attr,
4282 &red_zone_attr.attr,
4283 &poison_attr.attr,
4284 &store_user_attr.attr,
Christoph Lameter53e15af2007-05-06 14:49:43 -07004285 &validate_attr.attr,
Christoph Lameter2086d262007-05-06 14:49:46 -07004286 &shrink_attr.attr,
Christoph Lameter88a420e2007-05-06 14:49:45 -07004287 &alloc_calls_attr.attr,
4288 &free_calls_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07004289#ifdef CONFIG_ZONE_DMA
4290 &cache_dma_attr.attr,
4291#endif
4292#ifdef CONFIG_NUMA
Christoph Lameter98246012008-01-07 23:20:26 -08004293 &remote_node_defrag_ratio_attr.attr,
Christoph Lameter81819f02007-05-06 14:49:36 -07004294#endif
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08004295#ifdef CONFIG_SLUB_STATS
4296 &alloc_fastpath_attr.attr,
4297 &alloc_slowpath_attr.attr,
4298 &free_fastpath_attr.attr,
4299 &free_slowpath_attr.attr,
4300 &free_frozen_attr.attr,
4301 &free_add_partial_attr.attr,
4302 &free_remove_partial_attr.attr,
4303 &alloc_from_partial_attr.attr,
4304 &alloc_slab_attr.attr,
4305 &alloc_refill_attr.attr,
4306 &free_slab_attr.attr,
4307 &cpuslab_flush_attr.attr,
4308 &deactivate_full_attr.attr,
4309 &deactivate_empty_attr.attr,
4310 &deactivate_to_head_attr.attr,
4311 &deactivate_to_tail_attr.attr,
4312 &deactivate_remote_frees_attr.attr,
Christoph Lameter65c33762008-04-14 19:11:40 +03004313 &order_fallback_attr.attr,
Christoph Lameter8ff12cf2008-02-07 17:47:41 -08004314#endif
Christoph Lameter81819f02007-05-06 14:49:36 -07004315 NULL
4316};
4317
4318static struct attribute_group slab_attr_group = {
4319 .attrs = slab_attrs,
4320};
4321
4322static ssize_t slab_attr_show(struct kobject *kobj,
4323 struct attribute *attr,
4324 char *buf)
4325{
4326 struct slab_attribute *attribute;
4327 struct kmem_cache *s;
4328 int err;
4329
4330 attribute = to_slab_attr(attr);
4331 s = to_slab(kobj);
4332
4333 if (!attribute->show)
4334 return -EIO;
4335
4336 err = attribute->show(s, buf);
4337
4338 return err;
4339}
4340
4341static ssize_t slab_attr_store(struct kobject *kobj,
4342 struct attribute *attr,
4343 const char *buf, size_t len)
4344{
4345 struct slab_attribute *attribute;
4346 struct kmem_cache *s;
4347 int err;
4348
4349 attribute = to_slab_attr(attr);
4350 s = to_slab(kobj);
4351
4352 if (!attribute->store)
4353 return -EIO;
4354
4355 err = attribute->store(s, buf, len);
4356
4357 return err;
4358}
4359
Christoph Lameter151c6022008-01-07 22:29:05 -08004360static void kmem_cache_release(struct kobject *kobj)
4361{
4362 struct kmem_cache *s = to_slab(kobj);
4363
4364 kfree(s);
4365}
4366
Christoph Lameter81819f02007-05-06 14:49:36 -07004367static struct sysfs_ops slab_sysfs_ops = {
4368 .show = slab_attr_show,
4369 .store = slab_attr_store,
4370};
4371
4372static struct kobj_type slab_ktype = {
4373 .sysfs_ops = &slab_sysfs_ops,
Christoph Lameter151c6022008-01-07 22:29:05 -08004374 .release = kmem_cache_release
Christoph Lameter81819f02007-05-06 14:49:36 -07004375};
4376
4377static int uevent_filter(struct kset *kset, struct kobject *kobj)
4378{
4379 struct kobj_type *ktype = get_ktype(kobj);
4380
4381 if (ktype == &slab_ktype)
4382 return 1;
4383 return 0;
4384}
4385
4386static struct kset_uevent_ops slab_uevent_ops = {
4387 .filter = uevent_filter,
4388};
4389
Greg Kroah-Hartman27c3a312007-11-01 09:29:06 -06004390static struct kset *slab_kset;
Christoph Lameter81819f02007-05-06 14:49:36 -07004391
4392#define ID_STR_LENGTH 64
4393
4394/* Create a unique string id for a slab cache:
Christoph Lameter6446faa2008-02-15 23:45:26 -08004395 *
4396 * Format :[flags-]size
Christoph Lameter81819f02007-05-06 14:49:36 -07004397 */
4398static char *create_unique_id(struct kmem_cache *s)
4399{
4400 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
4401 char *p = name;
4402
4403 BUG_ON(!name);
4404
4405 *p++ = ':';
4406 /*
4407 * First flags affecting slabcache operations. We will only
4408 * get here for aliasable slabs so we do not need to support
4409 * too many flags. The flags here must cover all flags that
4410 * are matched during merging to guarantee that the id is
4411 * unique.
4412 */
4413 if (s->flags & SLAB_CACHE_DMA)
4414 *p++ = 'd';
4415 if (s->flags & SLAB_RECLAIM_ACCOUNT)
4416 *p++ = 'a';
4417 if (s->flags & SLAB_DEBUG_FREE)
4418 *p++ = 'F';
Vegard Nossum5a896d92008-04-04 00:54:48 +02004419 if (!(s->flags & SLAB_NOTRACK))
4420 *p++ = 't';
Christoph Lameter81819f02007-05-06 14:49:36 -07004421 if (p != name + 1)
4422 *p++ = '-';
4423 p += sprintf(p, "%07d", s->size);
4424 BUG_ON(p > name + ID_STR_LENGTH - 1);
4425 return name;
4426}
4427
4428static int sysfs_slab_add(struct kmem_cache *s)
4429{
4430 int err;
4431 const char *name;
4432 int unmergeable;
4433
4434 if (slab_state < SYSFS)
4435 /* Defer until later */
4436 return 0;
4437
4438 unmergeable = slab_unmergeable(s);
4439 if (unmergeable) {
4440 /*
4441 * Slabcache can never be merged so we can use the name proper.
4442 * This is typically the case for debug situations. In that
4443 * case we can catch duplicate names easily.
4444 */
Greg Kroah-Hartman27c3a312007-11-01 09:29:06 -06004445 sysfs_remove_link(&slab_kset->kobj, s->name);
Christoph Lameter81819f02007-05-06 14:49:36 -07004446 name = s->name;
4447 } else {
4448 /*
4449 * Create a unique name for the slab as a target
4450 * for the symlinks.
4451 */
4452 name = create_unique_id(s);
4453 }
4454
Greg Kroah-Hartman27c3a312007-11-01 09:29:06 -06004455 s->kobj.kset = slab_kset;
Greg Kroah-Hartman1eada112007-12-17 23:05:35 -07004456 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
4457 if (err) {
4458 kobject_put(&s->kobj);
Christoph Lameter81819f02007-05-06 14:49:36 -07004459 return err;
Greg Kroah-Hartman1eada112007-12-17 23:05:35 -07004460 }
Christoph Lameter81819f02007-05-06 14:49:36 -07004461
4462 err = sysfs_create_group(&s->kobj, &slab_attr_group);
Xiaotian Feng5788d8a2009-07-22 11:28:53 +08004463 if (err) {
4464 kobject_del(&s->kobj);
4465 kobject_put(&s->kobj);
Christoph Lameter81819f02007-05-06 14:49:36 -07004466 return err;
Xiaotian Feng5788d8a2009-07-22 11:28:53 +08004467 }
Christoph Lameter81819f02007-05-06 14:49:36 -07004468 kobject_uevent(&s->kobj, KOBJ_ADD);
4469 if (!unmergeable) {
4470 /* Setup first alias */
4471 sysfs_slab_alias(s, s->name);
4472 kfree(name);
4473 }
4474 return 0;
4475}
4476
4477static void sysfs_slab_remove(struct kmem_cache *s)
4478{
4479 kobject_uevent(&s->kobj, KOBJ_REMOVE);
4480 kobject_del(&s->kobj);
Christoph Lameter151c6022008-01-07 22:29:05 -08004481 kobject_put(&s->kobj);
Christoph Lameter81819f02007-05-06 14:49:36 -07004482}
4483
4484/*
4485 * Need to buffer aliases during bootup until sysfs becomes
Nick Andrew9f6c708e2008-12-05 14:08:08 +11004486 * available lest we lose that information.
Christoph Lameter81819f02007-05-06 14:49:36 -07004487 */
4488struct saved_alias {
4489 struct kmem_cache *s;
4490 const char *name;
4491 struct saved_alias *next;
4492};
4493
Adrian Bunk5af328a2007-07-17 04:03:27 -07004494static struct saved_alias *alias_list;
Christoph Lameter81819f02007-05-06 14:49:36 -07004495
4496static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
4497{
4498 struct saved_alias *al;
4499
4500 if (slab_state == SYSFS) {
4501 /*
4502 * If we have a leftover link then remove it.
4503 */
Greg Kroah-Hartman27c3a312007-11-01 09:29:06 -06004504 sysfs_remove_link(&slab_kset->kobj, name);
4505 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
Christoph Lameter81819f02007-05-06 14:49:36 -07004506 }
4507
4508 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
4509 if (!al)
4510 return -ENOMEM;
4511
4512 al->s = s;
4513 al->name = name;
4514 al->next = alias_list;
4515 alias_list = al;
4516 return 0;
4517}
4518
4519static int __init slab_sysfs_init(void)
4520{
Christoph Lameter5b95a4a2007-07-17 04:03:19 -07004521 struct kmem_cache *s;
Christoph Lameter81819f02007-05-06 14:49:36 -07004522 int err;
4523
Greg Kroah-Hartman0ff21e42007-11-06 10:36:58 -08004524 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
Greg Kroah-Hartman27c3a312007-11-01 09:29:06 -06004525 if (!slab_kset) {
Christoph Lameter81819f02007-05-06 14:49:36 -07004526 printk(KERN_ERR "Cannot register slab subsystem.\n");
4527 return -ENOSYS;
4528 }
4529
Christoph Lameter26a7bd02007-05-09 02:32:39 -07004530 slab_state = SYSFS;
4531
Christoph Lameter5b95a4a2007-07-17 04:03:19 -07004532 list_for_each_entry(s, &slab_caches, list) {
Christoph Lameter26a7bd02007-05-09 02:32:39 -07004533 err = sysfs_slab_add(s);
Christoph Lameter5d540fb2007-08-30 23:56:26 -07004534 if (err)
4535 printk(KERN_ERR "SLUB: Unable to add boot slab %s"
4536 " to sysfs\n", s->name);
Christoph Lameter26a7bd02007-05-09 02:32:39 -07004537 }
Christoph Lameter81819f02007-05-06 14:49:36 -07004538
4539 while (alias_list) {
4540 struct saved_alias *al = alias_list;
4541
4542 alias_list = alias_list->next;
4543 err = sysfs_slab_alias(al->s, al->name);
Christoph Lameter5d540fb2007-08-30 23:56:26 -07004544 if (err)
4545 printk(KERN_ERR "SLUB: Unable to add boot slab alias"
4546 " %s to sysfs\n", s->name);
Christoph Lameter81819f02007-05-06 14:49:36 -07004547 kfree(al);
4548 }
4549
4550 resiliency_test();
4551 return 0;
4552}
4553
4554__initcall(slab_sysfs_init);
Christoph Lameter81819f02007-05-06 14:49:36 -07004555#endif
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01004556
4557/*
4558 * The /proc/slabinfo ABI
4559 */
Linus Torvalds158a9622008-01-02 13:04:48 -08004560#ifdef CONFIG_SLABINFO
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01004561static void print_slabinfo_header(struct seq_file *m)
4562{
4563 seq_puts(m, "slabinfo - version: 2.1\n");
4564 seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
4565 "<objperslab> <pagesperslab>");
4566 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
4567 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
4568 seq_putc(m, '\n');
4569}
4570
4571static void *s_start(struct seq_file *m, loff_t *pos)
4572{
4573 loff_t n = *pos;
4574
4575 down_read(&slub_lock);
4576 if (!n)
4577 print_slabinfo_header(m);
4578
4579 return seq_list_start(&slab_caches, *pos);
4580}
4581
4582static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4583{
4584 return seq_list_next(p, &slab_caches, pos);
4585}
4586
4587static void s_stop(struct seq_file *m, void *p)
4588{
4589 up_read(&slub_lock);
4590}
4591
4592static int s_show(struct seq_file *m, void *p)
4593{
4594 unsigned long nr_partials = 0;
4595 unsigned long nr_slabs = 0;
4596 unsigned long nr_inuse = 0;
Christoph Lameter205ab992008-04-14 19:11:40 +03004597 unsigned long nr_objs = 0;
4598 unsigned long nr_free = 0;
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01004599 struct kmem_cache *s;
4600 int node;
4601
4602 s = list_entry(p, struct kmem_cache, list);
4603
4604 for_each_online_node(node) {
4605 struct kmem_cache_node *n = get_node(s, node);
4606
4607 if (!n)
4608 continue;
4609
4610 nr_partials += n->nr_partial;
4611 nr_slabs += atomic_long_read(&n->nr_slabs);
Christoph Lameter205ab992008-04-14 19:11:40 +03004612 nr_objs += atomic_long_read(&n->total_objects);
4613 nr_free += count_partial(n, count_free);
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01004614 }
4615
Christoph Lameter205ab992008-04-14 19:11:40 +03004616 nr_inuse = nr_objs - nr_free;
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01004617
4618 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
Christoph Lameter834f3d12008-04-14 19:11:31 +03004619 nr_objs, s->size, oo_objects(s->oo),
4620 (1 << oo_order(s->oo)));
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01004621 seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
4622 seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
4623 0UL);
4624 seq_putc(m, '\n');
4625 return 0;
4626}
4627
Alexey Dobriyan7b3c3a52008-10-06 02:42:17 +04004628static const struct seq_operations slabinfo_op = {
Pekka J Enberg57ed3ed2008-01-01 17:23:28 +01004629 .start = s_start,
4630 .next = s_next,
4631 .stop = s_stop,
4632 .show = s_show,
4633};
4634
Alexey Dobriyan7b3c3a52008-10-06 02:42:17 +04004635static int slabinfo_open(struct inode *inode, struct file *file)
4636{
4637 return seq_open(file, &slabinfo_op);
4638}
4639
4640static const struct file_operations proc_slabinfo_operations = {
4641 .open = slabinfo_open,
4642 .read = seq_read,
4643 .llseek = seq_lseek,
4644 .release = seq_release,
4645};
4646
4647static int __init slab_proc_init(void)
4648{
WANG Congcf5d1132009-08-18 19:11:40 +03004649 proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
Alexey Dobriyan7b3c3a52008-10-06 02:42:17 +04004650 return 0;
4651}
4652module_init(slab_proc_init);
Linus Torvalds158a9622008-01-02 13:04:48 -08004653#endif /* CONFIG_SLABINFO */