blob: a9f325b28bed145a4aab6ff8414b074d3b233280 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/slab.c
3 * Written by Mark Hemment, 1996/97.
4 * (markhe@nextd.demon.co.uk)
5 *
6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
7 *
8 * Major cleanup, different bufctl logic, per-cpu arrays
9 * (c) 2000 Manfred Spraul
10 *
11 * Cleanup, make the head arrays unconditional, preparation for NUMA
12 * (c) 2002 Manfred Spraul
13 *
14 * An implementation of the Slab Allocator as described in outline in;
15 * UNIX Internals: The New Frontiers by Uresh Vahalia
16 * Pub: Prentice Hall ISBN 0-13-101908-2
17 * or with a little more detail in;
18 * The Slab Allocator: An Object-Caching Kernel Memory Allocator
19 * Jeff Bonwick (Sun Microsystems).
20 * Presented at: USENIX Summer 1994 Technical Conference
21 *
22 * The memory is organized in caches, one cache for each object type.
23 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
24 * Each cache consists out of many slabs (they are small (usually one
25 * page long) and always contiguous), and each slab contains multiple
26 * initialized objects.
27 *
28 * This means, that your constructor is used only for newly allocated
Simon Arlott183ff222007-10-20 01:27:18 +020029 * slabs and you must pass objects with the same initializations to
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 * kmem_cache_free.
31 *
32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
33 * normal). If you need a special memory type, then must create a new
34 * cache for that memory type.
35 *
36 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
37 * full slabs with 0 free objects
38 * partial slabs
39 * empty slabs with no allocated objects
40 *
41 * If partial slabs exist, then new allocations come from these slabs,
42 * otherwise from empty slabs or new slabs are allocated.
43 *
44 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
45 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
46 *
47 * Each cache has a short per-cpu head array, most allocs
48 * and frees go into that array, and if that array overflows, then 1/2
49 * of the entries in the array are given back into the global cache.
50 * The head array is strictly LIFO and should improve the cache hit rates.
51 * On SMP, it additionally reduces the spinlock operations.
52 *
Andrew Mortona737b3e2006-03-22 00:08:11 -080053 * The c_cpuarray may not be read with enabled local interrupts -
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 * it's changed with a smp_call_function().
55 *
56 * SMP synchronization:
57 * constructors and destructors are called without any locking.
Pekka Enberg343e0d72006-02-01 03:05:50 -080058 * Several members in struct kmem_cache and struct slab never change, they
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 * are accessed without any locking.
60 * The per-cpu arrays are never accessed from the wrong cpu, no locking,
61 * and local interrupts are disabled so slab code is preempt-safe.
62 * The non-constant members are protected with a per-cache irq spinlock.
63 *
64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
65 * in 2000 - many ideas in the current implementation are derived from
66 * his patch.
67 *
68 * Further notes from the original documentation:
69 *
70 * 11 April '97. Started multi-threading - markhe
Ingo Molnarfc0abb12006-01-18 17:42:33 -080071 * The global cache-chain is protected by the mutex 'cache_chain_mutex'.
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 * The sem is only needed when accessing/extending the cache-chain, which
73 * can never happen inside an interrupt (kmem_cache_create(),
74 * kmem_cache_shrink() and kmem_cache_reap()).
75 *
76 * At present, each engine can be growing a cache. This should be blocked.
77 *
Christoph Lametere498be72005-09-09 13:03:32 -070078 * 15 March 2005. NUMA slab allocator.
79 * Shai Fultheim <shai@scalex86.org>.
80 * Shobhit Dayal <shobhit@calsoftinc.com>
81 * Alok N Kataria <alokk@calsoftinc.com>
82 * Christoph Lameter <christoph@lameter.com>
83 *
84 * Modified the slab allocator to be node aware on NUMA systems.
85 * Each node has its own list of partial, free and full slabs.
86 * All object allocations for a node occur from node specific slab lists.
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 */
88
Linus Torvalds1da177e2005-04-16 15:20:36 -070089#include <linux/slab.h>
90#include <linux/mm.h>
Randy Dunlapc9cf5522006-06-27 02:53:52 -070091#include <linux/poison.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070092#include <linux/swap.h>
93#include <linux/cache.h>
94#include <linux/interrupt.h>
95#include <linux/init.h>
96#include <linux/compiler.h>
Paul Jackson101a5002006-03-24 03:16:07 -080097#include <linux/cpuset.h>
Alexey Dobriyana0ec95a82008-10-06 00:59:10 +040098#include <linux/proc_fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070099#include <linux/seq_file.h>
100#include <linux/notifier.h>
101#include <linux/kallsyms.h>
102#include <linux/cpu.h>
103#include <linux/sysctl.h>
104#include <linux/module.h>
Zhaolei02af61b2009-04-10 14:26:18 +0800105#include <linux/kmemtrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106#include <linux/rcupdate.h>
Paulo Marques543537b2005-06-23 00:09:02 -0700107#include <linux/string.h>
Andrew Morton138ae662006-12-06 20:36:41 -0800108#include <linux/uaccess.h>
Christoph Lametere498be72005-09-09 13:03:32 -0700109#include <linux/nodemask.h>
Catalin Marinasd5cff632009-06-11 13:22:40 +0100110#include <linux/kmemleak.h>
Christoph Lameterdc85da12006-01-18 17:42:36 -0800111#include <linux/mempolicy.h>
Ingo Molnarfc0abb12006-01-18 17:42:33 -0800112#include <linux/mutex.h>
Akinobu Mita8a8b6502006-12-08 02:39:44 -0800113#include <linux/fault-inject.h>
Ingo Molnare7eebaf2006-06-27 02:54:55 -0700114#include <linux/rtmutex.h>
Eric Dumazet6a2d7a92006-12-13 00:34:27 -0800115#include <linux/reciprocal_div.h>
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700116#include <linux/debugobjects.h>
Pekka Enbergc175eea2008-05-09 20:35:53 +0200117#include <linux/kmemcheck.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119#include <asm/cacheflush.h>
120#include <asm/tlbflush.h>
121#include <asm/page.h>
122
123/*
Christoph Lameter50953fe2007-05-06 14:50:16 -0700124 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 * 0 for faster, smaller code (especially in the critical paths).
126 *
127 * STATS - 1 to collect stats for /proc/slabinfo.
128 * 0 for faster, smaller code (especially in the critical paths).
129 *
130 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
131 */
132
133#ifdef CONFIG_DEBUG_SLAB
134#define DEBUG 1
135#define STATS 1
136#define FORCED_DEBUG 1
137#else
138#define DEBUG 0
139#define STATS 0
140#define FORCED_DEBUG 0
141#endif
142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143/* Shouldn't this be in a header file somewhere? */
144#define BYTES_PER_WORD sizeof(void *)
David Woodhouse87a927c2007-07-04 21:26:44 -0400145#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147#ifndef ARCH_KMALLOC_MINALIGN
148/*
149 * Enforce a minimum alignment for the kmalloc caches.
150 * Usually, the kmalloc caches are cache_line_size() aligned, except when
151 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
152 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
David Woodhouseb46b8f12007-05-08 00:22:59 -0700153 * alignment larger than the alignment of a 64-bit integer.
154 * ARCH_KMALLOC_MINALIGN allows that.
155 * Note that increasing this value may disable some debug features.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 */
David Woodhouseb46b8f12007-05-08 00:22:59 -0700157#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158#endif
159
160#ifndef ARCH_SLAB_MINALIGN
161/*
162 * Enforce a minimum alignment for all caches.
163 * Intended for archs that get misalignment faults even for BYTES_PER_WORD
164 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
165 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
166 * some debug features.
167 */
168#define ARCH_SLAB_MINALIGN 0
169#endif
170
171#ifndef ARCH_KMALLOC_FLAGS
172#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
173#endif
174
175/* Legal flag mask for kmem_cache_create(). */
176#if DEBUG
Christoph Lameter50953fe2007-05-06 14:50:16 -0700177# define CREATE_MASK (SLAB_RED_ZONE | \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
Christoph Lameterac2b8982006-03-22 00:08:15 -0800179 SLAB_CACHE_DMA | \
Christoph Lameter5af60832007-05-06 14:49:56 -0700180 SLAB_STORE_USER | \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700182 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
Pekka Enbergc175eea2008-05-09 20:35:53 +0200183 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184#else
Christoph Lameterac2b8982006-03-22 00:08:15 -0800185# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
Christoph Lameter5af60832007-05-06 14:49:56 -0700186 SLAB_CACHE_DMA | \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700188 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
Pekka Enbergc175eea2008-05-09 20:35:53 +0200189 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190#endif
191
192/*
193 * kmem_bufctl_t:
194 *
195 * Bufctl's are used for linking objs within a slab
196 * linked offsets.
197 *
198 * This implementation relies on "struct page" for locating the cache &
199 * slab an object belongs to.
200 * This allows the bufctl structure to be small (one int), but limits
201 * the number of objects a slab (not a cache) can contain when off-slab
202 * bufctls are used. The limit is the size of the largest general cache
203 * that does not use off-slab slabs.
204 * For 32bit archs with 4 kB pages, is this 56.
205 * This is not serious, as it is only for large objects, when it is unwise
206 * to have too many per slab.
207 * Note: This limit can be raised by introducing a general cache whose size
208 * is less than 512 (PAGE_SIZE<<3), but greater than 256.
209 */
210
Kyle Moffettfa5b08d2005-09-03 15:55:03 -0700211typedef unsigned int kmem_bufctl_t;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0)
213#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1)
Al Viro871751e2006-03-25 03:06:39 -0800214#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
215#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217/*
218 * struct slab
219 *
220 * Manages the objs in a slab. Placed either at the beginning of mem allocated
221 * for a slab, or allocated from an general cache.
222 * Slabs are chained into three list: fully used, partial, fully free slabs.
223 */
224struct slab {
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800225 struct list_head list;
226 unsigned long colouroff;
227 void *s_mem; /* including colour offset */
228 unsigned int inuse; /* num of objs active in slab */
229 kmem_bufctl_t free;
230 unsigned short nodeid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231};
232
233/*
234 * struct slab_rcu
235 *
236 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
237 * arrange for kmem_freepages to be called via RCU. This is useful if
238 * we need to approach a kernel structure obliquely, from its address
239 * obtained without the usual locking. We can lock the structure to
240 * stabilize it and check it's still at the given address, only if we
241 * can be sure that the memory has not been meanwhile reused for some
242 * other kind of object (which our subsystem's lock might corrupt).
243 *
244 * rcu_read_lock before reading the address, then rcu_read_unlock after
245 * taking the spinlock within the structure expected at that address.
246 *
247 * We assume struct slab_rcu can overlay struct slab when destroying.
248 */
249struct slab_rcu {
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800250 struct rcu_head head;
Pekka Enberg343e0d72006-02-01 03:05:50 -0800251 struct kmem_cache *cachep;
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800252 void *addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253};
254
255/*
256 * struct array_cache
257 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 * Purpose:
259 * - LIFO ordering, to hand out cache-warm objects from _alloc
260 * - reduce the number of linked list operations
261 * - reduce spinlock operations
262 *
263 * The limit is stored in the per-cpu structure to reduce the data cache
264 * footprint.
265 *
266 */
267struct array_cache {
268 unsigned int avail;
269 unsigned int limit;
270 unsigned int batchcount;
271 unsigned int touched;
Christoph Lametere498be72005-09-09 13:03:32 -0700272 spinlock_t lock;
Robert P. J. Daybda5b652007-10-16 23:30:05 -0700273 void *entry[]; /*
Andrew Mortona737b3e2006-03-22 00:08:11 -0800274 * Must have this definition in here for the proper
275 * alignment of array_cache. Also simplifies accessing
276 * the entries.
Andrew Mortona737b3e2006-03-22 00:08:11 -0800277 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278};
279
Andrew Mortona737b3e2006-03-22 00:08:11 -0800280/*
281 * bootstrap: The caches do not work without cpuarrays anymore, but the
282 * cpuarrays are allocated from the generic caches...
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 */
284#define BOOT_CPUCACHE_ENTRIES 1
285struct arraycache_init {
286 struct array_cache cache;
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800287 void *entries[BOOT_CPUCACHE_ENTRIES];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288};
289
290/*
Christoph Lametere498be72005-09-09 13:03:32 -0700291 * The slab lists for all objects.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 */
293struct kmem_list3 {
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800294 struct list_head slabs_partial; /* partial list first, better asm code */
295 struct list_head slabs_full;
296 struct list_head slabs_free;
297 unsigned long free_objects;
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800298 unsigned int free_limit;
Ravikiran G Thirumalai2e1217c2006-02-04 23:27:56 -0800299 unsigned int colour_next; /* Per-node cache coloring */
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800300 spinlock_t list_lock;
301 struct array_cache *shared; /* shared per node */
302 struct array_cache **alien; /* on other nodes */
Christoph Lameter35386e32006-03-22 00:09:05 -0800303 unsigned long next_reap; /* updated without locking */
304 int free_touched; /* updated without locking */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305};
306
Christoph Lametere498be72005-09-09 13:03:32 -0700307/*
308 * Need this for bootstrapping a per node allocator.
309 */
Pekka Enberg556a1692008-01-25 08:20:51 +0200310#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
Christoph Lametere498be72005-09-09 13:03:32 -0700311struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
312#define CACHE_CACHE 0
Pekka Enberg556a1692008-01-25 08:20:51 +0200313#define SIZE_AC MAX_NUMNODES
314#define SIZE_L3 (2 * MAX_NUMNODES)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315
Christoph Lametered11d9e2006-06-30 01:55:45 -0700316static int drain_freelist(struct kmem_cache *cache,
317 struct kmem_list3 *l3, int tofree);
318static void free_block(struct kmem_cache *cachep, void **objpp, int len,
319 int node);
Pekka Enberg83b519e2009-06-10 19:40:04 +0300320static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
David Howells65f27f32006-11-22 14:55:48 +0000321static void cache_reap(struct work_struct *unused);
Christoph Lametered11d9e2006-06-30 01:55:45 -0700322
Christoph Lametere498be72005-09-09 13:03:32 -0700323/*
Andrew Mortona737b3e2006-03-22 00:08:11 -0800324 * This function must be completely optimized away if a constant is passed to
325 * it. Mostly the same as what is in linux/slab.h except it returns an index.
Christoph Lametere498be72005-09-09 13:03:32 -0700326 */
Ivan Kokshaysky7243cc02005-09-22 21:43:58 -0700327static __always_inline int index_of(const size_t size)
Christoph Lametere498be72005-09-09 13:03:32 -0700328{
Steven Rostedt5ec8a842006-02-01 03:05:44 -0800329 extern void __bad_size(void);
330
Christoph Lametere498be72005-09-09 13:03:32 -0700331 if (__builtin_constant_p(size)) {
332 int i = 0;
333
334#define CACHE(x) \
335 if (size <=x) \
336 return i; \
337 else \
338 i++;
Joe Perches1c61fc42008-03-05 13:58:17 -0800339#include <linux/kmalloc_sizes.h>
Christoph Lametere498be72005-09-09 13:03:32 -0700340#undef CACHE
Steven Rostedt5ec8a842006-02-01 03:05:44 -0800341 __bad_size();
Ivan Kokshaysky7243cc02005-09-22 21:43:58 -0700342 } else
Steven Rostedt5ec8a842006-02-01 03:05:44 -0800343 __bad_size();
Christoph Lametere498be72005-09-09 13:03:32 -0700344 return 0;
345}
346
Ingo Molnare0a42722006-06-23 02:03:46 -0700347static int slab_early_init = 1;
348
Christoph Lametere498be72005-09-09 13:03:32 -0700349#define INDEX_AC index_of(sizeof(struct arraycache_init))
350#define INDEX_L3 index_of(sizeof(struct kmem_list3))
351
Pekka Enberg5295a742006-02-01 03:05:48 -0800352static void kmem_list3_init(struct kmem_list3 *parent)
Christoph Lametere498be72005-09-09 13:03:32 -0700353{
354 INIT_LIST_HEAD(&parent->slabs_full);
355 INIT_LIST_HEAD(&parent->slabs_partial);
356 INIT_LIST_HEAD(&parent->slabs_free);
357 parent->shared = NULL;
358 parent->alien = NULL;
Ravikiran G Thirumalai2e1217c2006-02-04 23:27:56 -0800359 parent->colour_next = 0;
Christoph Lametere498be72005-09-09 13:03:32 -0700360 spin_lock_init(&parent->list_lock);
361 parent->free_objects = 0;
362 parent->free_touched = 0;
363}
364
Andrew Mortona737b3e2006-03-22 00:08:11 -0800365#define MAKE_LIST(cachep, listp, slab, nodeid) \
366 do { \
367 INIT_LIST_HEAD(listp); \
368 list_splice(&(cachep->nodelists[nodeid]->slab), listp); \
Christoph Lametere498be72005-09-09 13:03:32 -0700369 } while (0)
370
Andrew Mortona737b3e2006-03-22 00:08:11 -0800371#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
372 do { \
Christoph Lametere498be72005-09-09 13:03:32 -0700373 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
374 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
375 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
376 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378#define CFLGS_OFF_SLAB (0x80000000UL)
379#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
380
381#define BATCHREFILL_LIMIT 16
Andrew Mortona737b3e2006-03-22 00:08:11 -0800382/*
383 * Optimization question: fewer reaps means less probability for unnessary
384 * cpucache drain/refill cycles.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 *
Adrian Bunkdc6f3f22005-11-08 16:44:08 +0100386 * OTOH the cpuarrays can contain lots of objects,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 * which could lock up otherwise freeable slabs.
388 */
389#define REAPTIMEOUT_CPUC (2*HZ)
390#define REAPTIMEOUT_LIST3 (4*HZ)
391
392#if STATS
393#define STATS_INC_ACTIVE(x) ((x)->num_active++)
394#define STATS_DEC_ACTIVE(x) ((x)->num_active--)
395#define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
396#define STATS_INC_GROWN(x) ((x)->grown++)
Christoph Lametered11d9e2006-06-30 01:55:45 -0700397#define STATS_ADD_REAPED(x,y) ((x)->reaped += (y))
Andrew Mortona737b3e2006-03-22 00:08:11 -0800398#define STATS_SET_HIGH(x) \
399 do { \
400 if ((x)->num_active > (x)->high_mark) \
401 (x)->high_mark = (x)->num_active; \
402 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403#define STATS_INC_ERR(x) ((x)->errors++)
404#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
Christoph Lametere498be72005-09-09 13:03:32 -0700405#define STATS_INC_NODEFREES(x) ((x)->node_frees++)
Ravikiran G Thirumalaifb7faf32006-04-10 22:52:54 -0700406#define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++)
Andrew Mortona737b3e2006-03-22 00:08:11 -0800407#define STATS_SET_FREEABLE(x, i) \
408 do { \
409 if ((x)->max_freeable < i) \
410 (x)->max_freeable = i; \
411 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
413#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
414#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
415#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
416#else
417#define STATS_INC_ACTIVE(x) do { } while (0)
418#define STATS_DEC_ACTIVE(x) do { } while (0)
419#define STATS_INC_ALLOCED(x) do { } while (0)
420#define STATS_INC_GROWN(x) do { } while (0)
Christoph Lametered11d9e2006-06-30 01:55:45 -0700421#define STATS_ADD_REAPED(x,y) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422#define STATS_SET_HIGH(x) do { } while (0)
423#define STATS_INC_ERR(x) do { } while (0)
424#define STATS_INC_NODEALLOCS(x) do { } while (0)
Christoph Lametere498be72005-09-09 13:03:32 -0700425#define STATS_INC_NODEFREES(x) do { } while (0)
Ravikiran G Thirumalaifb7faf32006-04-10 22:52:54 -0700426#define STATS_INC_ACOVERFLOW(x) do { } while (0)
Andrew Mortona737b3e2006-03-22 00:08:11 -0800427#define STATS_SET_FREEABLE(x, i) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428#define STATS_INC_ALLOCHIT(x) do { } while (0)
429#define STATS_INC_ALLOCMISS(x) do { } while (0)
430#define STATS_INC_FREEHIT(x) do { } while (0)
431#define STATS_INC_FREEMISS(x) do { } while (0)
432#endif
433
434#if DEBUG
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435
Andrew Mortona737b3e2006-03-22 00:08:11 -0800436/*
437 * memory layout of objects:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 * 0 : objp
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800439 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 * the end of an object is aligned with the end of the real
441 * allocation. Catches writes behind the end of the allocation.
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800442 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 * redzone word.
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800444 * cachep->obj_offset: The real object.
445 * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
Andrew Mortona737b3e2006-03-22 00:08:11 -0800446 * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
447 * [BYTES_PER_WORD long]
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 */
Pekka Enberg343e0d72006-02-01 03:05:50 -0800449static int obj_offset(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450{
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800451 return cachep->obj_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452}
453
Pekka Enberg343e0d72006-02-01 03:05:50 -0800454static int obj_size(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455{
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800456 return cachep->obj_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457}
458
David Woodhouseb46b8f12007-05-08 00:22:59 -0700459static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460{
461 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
David Woodhouseb46b8f12007-05-08 00:22:59 -0700462 return (unsigned long long*) (objp + obj_offset(cachep) -
463 sizeof(unsigned long long));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464}
465
David Woodhouseb46b8f12007-05-08 00:22:59 -0700466static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467{
468 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
469 if (cachep->flags & SLAB_STORE_USER)
David Woodhouseb46b8f12007-05-08 00:22:59 -0700470 return (unsigned long long *)(objp + cachep->buffer_size -
471 sizeof(unsigned long long) -
David Woodhouse87a927c2007-07-04 21:26:44 -0400472 REDZONE_ALIGN);
David Woodhouseb46b8f12007-05-08 00:22:59 -0700473 return (unsigned long long *) (objp + cachep->buffer_size -
474 sizeof(unsigned long long));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475}
476
Pekka Enberg343e0d72006-02-01 03:05:50 -0800477static void **dbg_userword(struct kmem_cache *cachep, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478{
479 BUG_ON(!(cachep->flags & SLAB_STORE_USER));
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800480 return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481}
482
483#else
484
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800485#define obj_offset(x) 0
486#define obj_size(cachep) (cachep->buffer_size)
David Woodhouseb46b8f12007-05-08 00:22:59 -0700487#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
488#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
490
491#endif
492
Li Zefan0f24f122009-12-11 15:45:30 +0800493#ifdef CONFIG_TRACING
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300494size_t slab_buffer_size(struct kmem_cache *cachep)
495{
496 return cachep->buffer_size;
497}
498EXPORT_SYMBOL(slab_buffer_size);
499#endif
500
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 * Do not go above this order unless 0 objects fit into the slab.
503 */
504#define BREAK_GFP_ORDER_HI 1
505#define BREAK_GFP_ORDER_LO 0
506static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
507
Andrew Mortona737b3e2006-03-22 00:08:11 -0800508/*
509 * Functions for storing/retrieving the cachep and or slab from the page
510 * allocator. These are used to find the slab an obj belongs to. With kfree(),
511 * these are used to find the cache which an obj belongs to.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 */
Pekka Enberg065d41c2005-11-13 16:06:46 -0800513static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
514{
515 page->lru.next = (struct list_head *)cache;
516}
517
518static inline struct kmem_cache *page_get_cache(struct page *page)
519{
Christoph Lameterd85f3382007-05-06 14:49:39 -0700520 page = compound_head(page);
Pekka Enbergddc2e812006-06-23 02:03:40 -0700521 BUG_ON(!PageSlab(page));
Pekka Enberg065d41c2005-11-13 16:06:46 -0800522 return (struct kmem_cache *)page->lru.next;
523}
524
525static inline void page_set_slab(struct page *page, struct slab *slab)
526{
527 page->lru.prev = (struct list_head *)slab;
528}
529
530static inline struct slab *page_get_slab(struct page *page)
531{
Pekka Enbergddc2e812006-06-23 02:03:40 -0700532 BUG_ON(!PageSlab(page));
Pekka Enberg065d41c2005-11-13 16:06:46 -0800533 return (struct slab *)page->lru.prev;
534}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535
Pekka Enberg6ed5eb22006-02-01 03:05:49 -0800536static inline struct kmem_cache *virt_to_cache(const void *obj)
537{
Christoph Lameterb49af682007-05-06 14:49:41 -0700538 struct page *page = virt_to_head_page(obj);
Pekka Enberg6ed5eb22006-02-01 03:05:49 -0800539 return page_get_cache(page);
540}
541
542static inline struct slab *virt_to_slab(const void *obj)
543{
Christoph Lameterb49af682007-05-06 14:49:41 -0700544 struct page *page = virt_to_head_page(obj);
Pekka Enberg6ed5eb22006-02-01 03:05:49 -0800545 return page_get_slab(page);
546}
547
Pekka Enberg8fea4e92006-03-22 00:08:10 -0800548static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
549 unsigned int idx)
550{
551 return slab->s_mem + cache->buffer_size * idx;
552}
553
Eric Dumazet6a2d7a92006-12-13 00:34:27 -0800554/*
555 * We want to avoid an expensive divide : (offset / cache->buffer_size)
556 * Using the fact that buffer_size is a constant for a particular cache,
557 * we can replace (offset / cache->buffer_size) by
558 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
559 */
560static inline unsigned int obj_to_index(const struct kmem_cache *cache,
561 const struct slab *slab, void *obj)
Pekka Enberg8fea4e92006-03-22 00:08:10 -0800562{
Eric Dumazet6a2d7a92006-12-13 00:34:27 -0800563 u32 offset = (obj - slab->s_mem);
564 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
Pekka Enberg8fea4e92006-03-22 00:08:10 -0800565}
566
Andrew Mortona737b3e2006-03-22 00:08:11 -0800567/*
568 * These are the default caches for kmalloc. Custom caches can have other sizes.
569 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570struct cache_sizes malloc_sizes[] = {
571#define CACHE(x) { .cs_size = (x) },
572#include <linux/kmalloc_sizes.h>
573 CACHE(ULONG_MAX)
574#undef CACHE
575};
576EXPORT_SYMBOL(malloc_sizes);
577
578/* Must match cache_sizes above. Out of line to keep cache footprint low. */
579struct cache_names {
580 char *name;
581 char *name_dma;
582};
583
584static struct cache_names __initdata cache_names[] = {
585#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
586#include <linux/kmalloc_sizes.h>
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800587 {NULL,}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588#undef CACHE
589};
590
591static struct arraycache_init initarray_cache __initdata =
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800592 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593static struct arraycache_init initarray_generic =
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800594 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595
596/* internal cache of cache description objs */
Pekka Enberg343e0d72006-02-01 03:05:50 -0800597static struct kmem_cache cache_cache = {
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800598 .batchcount = 1,
599 .limit = BOOT_CPUCACHE_ENTRIES,
600 .shared = 1,
Pekka Enberg343e0d72006-02-01 03:05:50 -0800601 .buffer_size = sizeof(struct kmem_cache),
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800602 .name = "kmem_cache",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603};
604
Ravikiran G Thirumalai056c6242006-09-25 23:31:38 -0700605#define BAD_ALIEN_MAGIC 0x01020304ul
606
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 * chicken and egg problem: delay the per-cpu array allocation
609 * until the general caches are up.
610 */
611static enum {
612 NONE,
Christoph Lametere498be72005-09-09 13:03:32 -0700613 PARTIAL_AC,
614 PARTIAL_L3,
Pekka Enberg8429db52009-06-12 15:58:59 +0300615 EARLY,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 FULL
617} g_cpucache_up;
618
Mike Kravetz39d24e62006-05-15 09:44:13 -0700619/*
620 * used by boot code to determine if it can use slab based allocator
621 */
622int slab_is_available(void)
623{
Pekka Enberg8429db52009-06-12 15:58:59 +0300624 return g_cpucache_up >= EARLY;
Mike Kravetz39d24e62006-05-15 09:44:13 -0700625}
626
Pekka Enbergce79ddc2009-11-23 22:01:15 +0200627#ifdef CONFIG_LOCKDEP
628
629/*
630 * Slab sometimes uses the kmalloc slabs to store the slab headers
631 * for other slabs "off slab".
632 * The locking for this is tricky in that it nests within the locks
633 * of all other slabs in a few places; to deal with this special
634 * locking we put on-slab caches into a separate lock-class.
635 *
636 * We set lock class for alien array caches which are up during init.
637 * The lock annotation will be lost if all cpus of a node goes down and
638 * then comes back up during hotplug
639 */
640static struct lock_class_key on_slab_l3_key;
641static struct lock_class_key on_slab_alc_key;
642
643static void init_node_lock_keys(int q)
644{
645 struct cache_sizes *s = malloc_sizes;
646
647 if (g_cpucache_up != FULL)
648 return;
649
650 for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
651 struct array_cache **alc;
652 struct kmem_list3 *l3;
653 int r;
654
655 l3 = s->cs_cachep->nodelists[q];
656 if (!l3 || OFF_SLAB(s->cs_cachep))
Pekka Enberg00afa752009-12-27 14:33:14 +0200657 continue;
Pekka Enbergce79ddc2009-11-23 22:01:15 +0200658 lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
659 alc = l3->alien;
660 /*
661 * FIXME: This check for BAD_ALIEN_MAGIC
662 * should go away when common slab code is taught to
663 * work even without alien caches.
664 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
665 * for alloc_alien_cache,
666 */
667 if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
Pekka Enberg00afa752009-12-27 14:33:14 +0200668 continue;
Pekka Enbergce79ddc2009-11-23 22:01:15 +0200669 for_each_node(r) {
670 if (alc[r])
671 lockdep_set_class(&alc[r]->lock,
672 &on_slab_alc_key);
673 }
674 }
675}
676
677static inline void init_lock_keys(void)
678{
679 int node;
680
681 for_each_node(node)
682 init_node_lock_keys(node);
683}
684#else
685static void init_node_lock_keys(int q)
686{
687}
688
689static inline void init_lock_keys(void)
690{
691}
692#endif
693
694/*
695 * Guard access to the cache-chain.
696 */
697static DEFINE_MUTEX(cache_chain_mutex);
698static struct list_head cache_chain;
699
Tejun Heo1871e522009-10-29 22:34:13 +0900700static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
Pekka Enberg343e0d72006-02-01 03:05:50 -0800702static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703{
704 return cachep->array[smp_processor_id()];
705}
706
Andrew Mortona737b3e2006-03-22 00:08:11 -0800707static inline struct kmem_cache *__find_general_cachep(size_t size,
708 gfp_t gfpflags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709{
710 struct cache_sizes *csizep = malloc_sizes;
711
712#if DEBUG
713 /* This happens if someone tries to call
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800714 * kmem_cache_create(), or __kmalloc(), before
715 * the generic caches are initialized.
716 */
Alok Katariac7e43c72005-09-14 12:17:53 -0700717 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718#endif
Christoph Lameter6cb8f912007-07-17 04:03:22 -0700719 if (!size)
720 return ZERO_SIZE_PTR;
721
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 while (size > csizep->cs_size)
723 csizep++;
724
725 /*
Martin Hicks0abf40c2005-09-03 15:54:54 -0700726 * Really subtle: The last entry with cs->cs_size==ULONG_MAX
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 * has cs_{dma,}cachep==NULL. Thus no special case
728 * for large kmalloc calls required.
729 */
Christoph Lameter4b51d662007-02-10 01:43:10 -0800730#ifdef CONFIG_ZONE_DMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 if (unlikely(gfpflags & GFP_DMA))
732 return csizep->cs_dmacachep;
Christoph Lameter4b51d662007-02-10 01:43:10 -0800733#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 return csizep->cs_cachep;
735}
736
Adrian Bunkb2213852006-09-25 23:31:02 -0700737static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
Manfred Spraul97e2bde2005-05-01 08:58:38 -0700738{
739 return __find_general_cachep(size, gfpflags);
740}
Manfred Spraul97e2bde2005-05-01 08:58:38 -0700741
Steven Rostedtfbaccac2006-02-01 03:05:45 -0800742static size_t slab_mgmt_size(size_t nr_objs, size_t align)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743{
Steven Rostedtfbaccac2006-02-01 03:05:45 -0800744 return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
745}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746
Andrew Mortona737b3e2006-03-22 00:08:11 -0800747/*
748 * Calculate the number of objects and left-over bytes for a given buffer size.
749 */
Steven Rostedtfbaccac2006-02-01 03:05:45 -0800750static void cache_estimate(unsigned long gfporder, size_t buffer_size,
751 size_t align, int flags, size_t *left_over,
752 unsigned int *num)
753{
754 int nr_objs;
755 size_t mgmt_size;
756 size_t slab_size = PAGE_SIZE << gfporder;
757
758 /*
759 * The slab management structure can be either off the slab or
760 * on it. For the latter case, the memory allocated for a
761 * slab is used for:
762 *
763 * - The struct slab
764 * - One kmem_bufctl_t for each object
765 * - Padding to respect alignment of @align
766 * - @buffer_size bytes for each object
767 *
768 * If the slab management structure is off the slab, then the
769 * alignment will already be calculated into the size. Because
770 * the slabs are all pages aligned, the objects will be at the
771 * correct alignment when allocated.
772 */
773 if (flags & CFLGS_OFF_SLAB) {
774 mgmt_size = 0;
775 nr_objs = slab_size / buffer_size;
776
777 if (nr_objs > SLAB_LIMIT)
778 nr_objs = SLAB_LIMIT;
779 } else {
780 /*
781 * Ignore padding for the initial guess. The padding
782 * is at most @align-1 bytes, and @buffer_size is at
783 * least @align. In the worst case, this result will
784 * be one greater than the number of objects that fit
785 * into the memory allocation when taking the padding
786 * into account.
787 */
788 nr_objs = (slab_size - sizeof(struct slab)) /
789 (buffer_size + sizeof(kmem_bufctl_t));
790
791 /*
792 * This calculated number will be either the right
793 * amount, or one greater than what we want.
794 */
795 if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
796 > slab_size)
797 nr_objs--;
798
799 if (nr_objs > SLAB_LIMIT)
800 nr_objs = SLAB_LIMIT;
801
802 mgmt_size = slab_mgmt_size(nr_objs, align);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 }
Steven Rostedtfbaccac2006-02-01 03:05:45 -0800804 *num = nr_objs;
805 *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806}
807
Harvey Harrisond40cee22008-04-30 00:55:07 -0700808#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809
Andrew Mortona737b3e2006-03-22 00:08:11 -0800810static void __slab_error(const char *function, struct kmem_cache *cachep,
811 char *msg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812{
813 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800814 function, cachep->name, msg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 dump_stack();
816}
817
Paul Menage3395ee02006-12-06 20:32:16 -0800818/*
819 * By default on NUMA we use alien caches to stage the freeing of
820 * objects allocated from other nodes. This causes massive memory
821 * inefficiencies when using fake NUMA setup to split memory into a
822 * large number of small nodes, so it can be disabled on the command
823 * line
824 */
825
826static int use_alien_caches __read_mostly = 1;
827static int __init noaliencache_setup(char *s)
828{
829 use_alien_caches = 0;
830 return 1;
831}
832__setup("noaliencache", noaliencache_setup);
833
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800834#ifdef CONFIG_NUMA
835/*
836 * Special reaping functions for NUMA systems called from cache_reap().
837 * These take care of doing round robin flushing of alien caches (containing
838 * objects freed on different nodes from which they were allocated) and the
839 * flushing of remote pcps by calling drain_node_pages.
840 */
Tejun Heo1871e522009-10-29 22:34:13 +0900841static DEFINE_PER_CPU(unsigned long, slab_reap_node);
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800842
843static void init_reap_node(int cpu)
844{
845 int node;
846
847 node = next_node(cpu_to_node(cpu), node_online_map);
848 if (node == MAX_NUMNODES)
Paul Jackson442295c2006-03-22 00:09:11 -0800849 node = first_node(node_online_map);
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800850
Tejun Heo1871e522009-10-29 22:34:13 +0900851 per_cpu(slab_reap_node, cpu) = node;
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800852}
853
854static void next_reap_node(void)
855{
Tejun Heo1871e522009-10-29 22:34:13 +0900856 int node = __get_cpu_var(slab_reap_node);
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800857
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800858 node = next_node(node, node_online_map);
859 if (unlikely(node >= MAX_NUMNODES))
860 node = first_node(node_online_map);
Tejun Heo1871e522009-10-29 22:34:13 +0900861 __get_cpu_var(slab_reap_node) = node;
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800862}
863
864#else
865#define init_reap_node(cpu) do { } while (0)
866#define next_reap_node(void) do { } while (0)
867#endif
868
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869/*
870 * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz
871 * via the workqueue/eventd.
872 * Add the CPU number into the expiration time to minimize the possibility of
873 * the CPUs getting into lockstep and contending for the global cache chain
874 * lock.
875 */
Adrian Bunk897e6792007-07-15 23:38:20 -0700876static void __cpuinit start_cpu_timer(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877{
Tejun Heo1871e522009-10-29 22:34:13 +0900878 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879
880 /*
881 * When this gets called from do_initcalls via cpucache_init(),
882 * init_workqueues() has already run, so keventd will be setup
883 * at that time.
884 */
David Howells52bad642006-11-22 14:54:01 +0000885 if (keventd_up() && reap_work->work.func == NULL) {
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800886 init_reap_node(cpu);
David Howells65f27f32006-11-22 14:55:48 +0000887 INIT_DELAYED_WORK(reap_work, cache_reap);
Arjan van de Ven2b284212006-12-10 02:21:28 -0800888 schedule_delayed_work_on(cpu, reap_work,
889 __round_jiffies_relative(HZ, cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 }
891}
892
Christoph Lametere498be72005-09-09 13:03:32 -0700893static struct array_cache *alloc_arraycache(int node, int entries,
Pekka Enberg83b519e2009-06-10 19:40:04 +0300894 int batchcount, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895{
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800896 int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 struct array_cache *nc = NULL;
898
Pekka Enberg83b519e2009-06-10 19:40:04 +0300899 nc = kmalloc_node(memsize, gfp, node);
Catalin Marinasd5cff632009-06-11 13:22:40 +0100900 /*
901 * The array_cache structures contain pointers to free object.
902 * However, when such objects are allocated or transfered to another
903 * cache the pointers are not cleared and they could be counted as
904 * valid references during a kmemleak scan. Therefore, kmemleak must
905 * not scan such objects.
906 */
907 kmemleak_no_scan(nc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 if (nc) {
909 nc->avail = 0;
910 nc->limit = entries;
911 nc->batchcount = batchcount;
912 nc->touched = 0;
Christoph Lametere498be72005-09-09 13:03:32 -0700913 spin_lock_init(&nc->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 }
915 return nc;
916}
917
Christoph Lameter3ded1752006-03-25 03:06:44 -0800918/*
919 * Transfer objects in one arraycache to another.
920 * Locking must be handled by the caller.
921 *
922 * Return the number of entries transferred.
923 */
924static int transfer_objects(struct array_cache *to,
925 struct array_cache *from, unsigned int max)
926{
927 /* Figure out how many entries to transfer */
928 int nr = min(min(from->avail, max), to->limit - to->avail);
929
930 if (!nr)
931 return 0;
932
933 memcpy(to->entry + to->avail, from->entry + from->avail -nr,
934 sizeof(void *) *nr);
935
936 from->avail -= nr;
937 to->avail += nr;
Christoph Lameter3ded1752006-03-25 03:06:44 -0800938 return nr;
939}
940
Christoph Lameter765c4502006-09-27 01:50:08 -0700941#ifndef CONFIG_NUMA
942
943#define drain_alien_cache(cachep, alien) do { } while (0)
944#define reap_alien(cachep, l3) do { } while (0)
945
Pekka Enberg83b519e2009-06-10 19:40:04 +0300946static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
Christoph Lameter765c4502006-09-27 01:50:08 -0700947{
948 return (struct array_cache **)BAD_ALIEN_MAGIC;
949}
950
951static inline void free_alien_cache(struct array_cache **ac_ptr)
952{
953}
954
955static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
956{
957 return 0;
958}
959
960static inline void *alternate_node_alloc(struct kmem_cache *cachep,
961 gfp_t flags)
962{
963 return NULL;
964}
965
Christoph Hellwig8b98c162006-12-06 20:32:30 -0800966static inline void *____cache_alloc_node(struct kmem_cache *cachep,
Christoph Lameter765c4502006-09-27 01:50:08 -0700967 gfp_t flags, int nodeid)
968{
969 return NULL;
970}
971
972#else /* CONFIG_NUMA */
973
Christoph Hellwig8b98c162006-12-06 20:32:30 -0800974static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
Paul Jacksonc61afb12006-03-24 03:16:08 -0800975static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
Christoph Lameterdc85da12006-01-18 17:42:36 -0800976
Pekka Enberg83b519e2009-06-10 19:40:04 +0300977static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
Christoph Lametere498be72005-09-09 13:03:32 -0700978{
979 struct array_cache **ac_ptr;
Christoph Lameter8ef82862007-02-20 13:57:52 -0800980 int memsize = sizeof(void *) * nr_node_ids;
Christoph Lametere498be72005-09-09 13:03:32 -0700981 int i;
982
983 if (limit > 1)
984 limit = 12;
Haicheng Lif3186a92010-01-06 15:25:23 +0800985 ac_ptr = kzalloc_node(memsize, gfp, node);
Christoph Lametere498be72005-09-09 13:03:32 -0700986 if (ac_ptr) {
987 for_each_node(i) {
Haicheng Lif3186a92010-01-06 15:25:23 +0800988 if (i == node || !node_online(i))
Christoph Lametere498be72005-09-09 13:03:32 -0700989 continue;
Pekka Enberg83b519e2009-06-10 19:40:04 +0300990 ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp);
Christoph Lametere498be72005-09-09 13:03:32 -0700991 if (!ac_ptr[i]) {
Akinobu Mitacc550de2007-11-14 16:58:35 -0800992 for (i--; i >= 0; i--)
Christoph Lametere498be72005-09-09 13:03:32 -0700993 kfree(ac_ptr[i]);
994 kfree(ac_ptr);
995 return NULL;
996 }
997 }
998 }
999 return ac_ptr;
1000}
1001
Pekka Enberg5295a742006-02-01 03:05:48 -08001002static void free_alien_cache(struct array_cache **ac_ptr)
Christoph Lametere498be72005-09-09 13:03:32 -07001003{
1004 int i;
1005
1006 if (!ac_ptr)
1007 return;
Christoph Lametere498be72005-09-09 13:03:32 -07001008 for_each_node(i)
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001009 kfree(ac_ptr[i]);
Christoph Lametere498be72005-09-09 13:03:32 -07001010 kfree(ac_ptr);
1011}
1012
Pekka Enberg343e0d72006-02-01 03:05:50 -08001013static void __drain_alien_cache(struct kmem_cache *cachep,
Pekka Enberg5295a742006-02-01 03:05:48 -08001014 struct array_cache *ac, int node)
Christoph Lametere498be72005-09-09 13:03:32 -07001015{
1016 struct kmem_list3 *rl3 = cachep->nodelists[node];
1017
1018 if (ac->avail) {
1019 spin_lock(&rl3->list_lock);
Christoph Lametere00946f2006-03-25 03:06:45 -08001020 /*
1021 * Stuff objects into the remote nodes shared array first.
1022 * That way we could avoid the overhead of putting the objects
1023 * into the free lists and getting them back later.
1024 */
shin, jacob693f7d32006-04-28 10:54:37 -05001025 if (rl3->shared)
1026 transfer_objects(rl3->shared, ac, ac->limit);
Christoph Lametere00946f2006-03-25 03:06:45 -08001027
Christoph Lameterff694162005-09-22 21:44:02 -07001028 free_block(cachep, ac->entry, ac->avail, node);
Christoph Lametere498be72005-09-09 13:03:32 -07001029 ac->avail = 0;
1030 spin_unlock(&rl3->list_lock);
1031 }
1032}
1033
Christoph Lameter8fce4d82006-03-09 17:33:54 -08001034/*
1035 * Called from cache_reap() to regularly drain alien caches round robin.
1036 */
1037static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
1038{
Tejun Heo1871e522009-10-29 22:34:13 +09001039 int node = __get_cpu_var(slab_reap_node);
Christoph Lameter8fce4d82006-03-09 17:33:54 -08001040
1041 if (l3->alien) {
1042 struct array_cache *ac = l3->alien[node];
Christoph Lametere00946f2006-03-25 03:06:45 -08001043
1044 if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
Christoph Lameter8fce4d82006-03-09 17:33:54 -08001045 __drain_alien_cache(cachep, ac, node);
1046 spin_unlock_irq(&ac->lock);
1047 }
1048 }
1049}
1050
Andrew Mortona737b3e2006-03-22 00:08:11 -08001051static void drain_alien_cache(struct kmem_cache *cachep,
1052 struct array_cache **alien)
Christoph Lametere498be72005-09-09 13:03:32 -07001053{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001054 int i = 0;
Christoph Lametere498be72005-09-09 13:03:32 -07001055 struct array_cache *ac;
1056 unsigned long flags;
1057
1058 for_each_online_node(i) {
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001059 ac = alien[i];
Christoph Lametere498be72005-09-09 13:03:32 -07001060 if (ac) {
1061 spin_lock_irqsave(&ac->lock, flags);
1062 __drain_alien_cache(cachep, ac, i);
1063 spin_unlock_irqrestore(&ac->lock, flags);
1064 }
1065 }
1066}
Pekka Enberg729bd0b2006-06-23 02:03:05 -07001067
Ingo Molnar873623d2006-07-13 14:44:38 +02001068static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
Pekka Enberg729bd0b2006-06-23 02:03:05 -07001069{
1070 struct slab *slabp = virt_to_slab(objp);
1071 int nodeid = slabp->nodeid;
1072 struct kmem_list3 *l3;
1073 struct array_cache *alien = NULL;
Pekka Enberg1ca4cb22006-10-06 00:43:52 -07001074 int node;
1075
1076 node = numa_node_id();
Pekka Enberg729bd0b2006-06-23 02:03:05 -07001077
1078 /*
1079 * Make sure we are not freeing a object from another node to the array
1080 * cache on this cpu.
1081 */
Siddha, Suresh B62918a02007-05-02 19:27:18 +02001082 if (likely(slabp->nodeid == node))
Pekka Enberg729bd0b2006-06-23 02:03:05 -07001083 return 0;
1084
Pekka Enberg1ca4cb22006-10-06 00:43:52 -07001085 l3 = cachep->nodelists[node];
Pekka Enberg729bd0b2006-06-23 02:03:05 -07001086 STATS_INC_NODEFREES(cachep);
1087 if (l3->alien && l3->alien[nodeid]) {
1088 alien = l3->alien[nodeid];
Ingo Molnar873623d2006-07-13 14:44:38 +02001089 spin_lock(&alien->lock);
Pekka Enberg729bd0b2006-06-23 02:03:05 -07001090 if (unlikely(alien->avail == alien->limit)) {
1091 STATS_INC_ACOVERFLOW(cachep);
1092 __drain_alien_cache(cachep, alien, nodeid);
1093 }
1094 alien->entry[alien->avail++] = objp;
1095 spin_unlock(&alien->lock);
1096 } else {
1097 spin_lock(&(cachep->nodelists[nodeid])->list_lock);
1098 free_block(cachep, &objp, 1, nodeid);
1099 spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
1100 }
1101 return 1;
1102}
Christoph Lametere498be72005-09-09 13:03:32 -07001103#endif
1104
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001105static void __cpuinit cpuup_canceled(long cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106{
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001107 struct kmem_cache *cachep;
1108 struct kmem_list3 *l3 = NULL;
1109 int node = cpu_to_node(cpu);
Rusty Russella70f7302009-03-13 14:49:46 +10301110 const struct cpumask *mask = cpumask_of_node(node);
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001111
1112 list_for_each_entry(cachep, &cache_chain, next) {
1113 struct array_cache *nc;
1114 struct array_cache *shared;
1115 struct array_cache **alien;
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001116
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001117 /* cpu is dead; no one can alloc from it. */
1118 nc = cachep->array[cpu];
1119 cachep->array[cpu] = NULL;
1120 l3 = cachep->nodelists[node];
1121
1122 if (!l3)
1123 goto free_array_cache;
1124
1125 spin_lock_irq(&l3->list_lock);
1126
1127 /* Free limit for this kmem_list3 */
1128 l3->free_limit -= cachep->batchcount;
1129 if (nc)
1130 free_block(cachep, nc->entry, nc->avail, node);
1131
Rusty Russell58463c12009-12-17 11:43:12 -06001132 if (!cpumask_empty(mask)) {
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001133 spin_unlock_irq(&l3->list_lock);
1134 goto free_array_cache;
1135 }
1136
1137 shared = l3->shared;
1138 if (shared) {
1139 free_block(cachep, shared->entry,
1140 shared->avail, node);
1141 l3->shared = NULL;
1142 }
1143
1144 alien = l3->alien;
1145 l3->alien = NULL;
1146
1147 spin_unlock_irq(&l3->list_lock);
1148
1149 kfree(shared);
1150 if (alien) {
1151 drain_alien_cache(cachep, alien);
1152 free_alien_cache(alien);
1153 }
1154free_array_cache:
1155 kfree(nc);
1156 }
1157 /*
1158 * In the previous loop, all the objects were freed to
1159 * the respective cache's slabs, now we can go ahead and
1160 * shrink each nodelist to its limit.
1161 */
1162 list_for_each_entry(cachep, &cache_chain, next) {
1163 l3 = cachep->nodelists[node];
1164 if (!l3)
1165 continue;
1166 drain_freelist(cachep, l3, l3->free_objects);
1167 }
1168}
1169
1170static int __cpuinit cpuup_prepare(long cpu)
1171{
Pekka Enberg343e0d72006-02-01 03:05:50 -08001172 struct kmem_cache *cachep;
Christoph Lametere498be72005-09-09 13:03:32 -07001173 struct kmem_list3 *l3 = NULL;
1174 int node = cpu_to_node(cpu);
David Howellsea02e3d2007-07-19 01:49:09 -07001175 const int memsize = sizeof(struct kmem_list3);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001177 /*
1178 * We need to do this right in the beginning since
1179 * alloc_arraycache's are going to use this list.
1180 * kmalloc_node allows us to add the slab to the right
1181 * kmem_list3 and not this cpu's kmem_list3
1182 */
1183
1184 list_for_each_entry(cachep, &cache_chain, next) {
1185 /*
1186 * Set up the size64 kmemlist for cpu before we can
1187 * begin anything. Make sure some other cpu on this
1188 * node has not already allocated this
1189 */
1190 if (!cachep->nodelists[node]) {
1191 l3 = kmalloc_node(memsize, GFP_KERNEL, node);
1192 if (!l3)
1193 goto bad;
1194 kmem_list3_init(l3);
1195 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
1196 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1197
1198 /*
1199 * The l3s don't come and go as CPUs come and
1200 * go. cache_chain_mutex is sufficient
1201 * protection here.
1202 */
1203 cachep->nodelists[node] = l3;
1204 }
1205
1206 spin_lock_irq(&cachep->nodelists[node]->list_lock);
1207 cachep->nodelists[node]->free_limit =
1208 (1 + nr_cpus_node(node)) *
1209 cachep->batchcount + cachep->num;
1210 spin_unlock_irq(&cachep->nodelists[node]->list_lock);
1211 }
1212
1213 /*
1214 * Now we can go ahead with allocating the shared arrays and
1215 * array caches
1216 */
1217 list_for_each_entry(cachep, &cache_chain, next) {
1218 struct array_cache *nc;
1219 struct array_cache *shared = NULL;
1220 struct array_cache **alien = NULL;
1221
1222 nc = alloc_arraycache(node, cachep->limit,
Pekka Enberg83b519e2009-06-10 19:40:04 +03001223 cachep->batchcount, GFP_KERNEL);
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001224 if (!nc)
1225 goto bad;
1226 if (cachep->shared) {
1227 shared = alloc_arraycache(node,
1228 cachep->shared * cachep->batchcount,
Pekka Enberg83b519e2009-06-10 19:40:04 +03001229 0xbaadf00d, GFP_KERNEL);
Akinobu Mita12d00f62007-10-18 03:05:11 -07001230 if (!shared) {
1231 kfree(nc);
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001232 goto bad;
Akinobu Mita12d00f62007-10-18 03:05:11 -07001233 }
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001234 }
1235 if (use_alien_caches) {
Pekka Enberg83b519e2009-06-10 19:40:04 +03001236 alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL);
Akinobu Mita12d00f62007-10-18 03:05:11 -07001237 if (!alien) {
1238 kfree(shared);
1239 kfree(nc);
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001240 goto bad;
Akinobu Mita12d00f62007-10-18 03:05:11 -07001241 }
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001242 }
1243 cachep->array[cpu] = nc;
1244 l3 = cachep->nodelists[node];
1245 BUG_ON(!l3);
1246
1247 spin_lock_irq(&l3->list_lock);
1248 if (!l3->shared) {
1249 /*
1250 * We are serialised from CPU_DEAD or
1251 * CPU_UP_CANCELLED by the cpucontrol lock
1252 */
1253 l3->shared = shared;
1254 shared = NULL;
1255 }
1256#ifdef CONFIG_NUMA
1257 if (!l3->alien) {
1258 l3->alien = alien;
1259 alien = NULL;
1260 }
1261#endif
1262 spin_unlock_irq(&l3->list_lock);
1263 kfree(shared);
1264 free_alien_cache(alien);
1265 }
Pekka Enbergce79ddc2009-11-23 22:01:15 +02001266 init_node_lock_keys(node);
1267
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001268 return 0;
1269bad:
Akinobu Mita12d00f62007-10-18 03:05:11 -07001270 cpuup_canceled(cpu);
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001271 return -ENOMEM;
1272}
1273
1274static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1275 unsigned long action, void *hcpu)
1276{
1277 long cpu = (long)hcpu;
1278 int err = 0;
1279
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 switch (action) {
Heiko Carstens38c3bd92007-05-09 02:34:05 -07001281 case CPU_UP_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001282 case CPU_UP_PREPARE_FROZEN:
Gautham R Shenoy95402b32008-01-25 21:08:02 +01001283 mutex_lock(&cache_chain_mutex);
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001284 err = cpuup_prepare(cpu);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01001285 mutex_unlock(&cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 break;
1287 case CPU_ONLINE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001288 case CPU_ONLINE_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 start_cpu_timer(cpu);
1290 break;
1291#ifdef CONFIG_HOTPLUG_CPU
Christoph Lameter5830c592007-05-09 02:34:22 -07001292 case CPU_DOWN_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001293 case CPU_DOWN_PREPARE_FROZEN:
Christoph Lameter5830c592007-05-09 02:34:22 -07001294 /*
1295 * Shutdown cache reaper. Note that the cache_chain_mutex is
1296 * held so that if cache_reap() is invoked it cannot do
1297 * anything expensive but will only modify reap_work
1298 * and reschedule the timer.
1299 */
Tejun Heo1871e522009-10-29 22:34:13 +09001300 cancel_rearming_delayed_work(&per_cpu(slab_reap_work, cpu));
Christoph Lameter5830c592007-05-09 02:34:22 -07001301 /* Now the cache_reaper is guaranteed to be not running. */
Tejun Heo1871e522009-10-29 22:34:13 +09001302 per_cpu(slab_reap_work, cpu).work.func = NULL;
Christoph Lameter5830c592007-05-09 02:34:22 -07001303 break;
1304 case CPU_DOWN_FAILED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001305 case CPU_DOWN_FAILED_FROZEN:
Christoph Lameter5830c592007-05-09 02:34:22 -07001306 start_cpu_timer(cpu);
1307 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308 case CPU_DEAD:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001309 case CPU_DEAD_FROZEN:
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001310 /*
1311 * Even if all the cpus of a node are down, we don't free the
1312 * kmem_list3 of any cache. This to avoid a race between
1313 * cpu_down, and a kmalloc allocation from another cpu for
1314 * memory from the node of the cpu going down. The list3
1315 * structure is usually allocated from kmem_cache_create() and
1316 * gets destroyed at kmem_cache_destroy().
1317 */
Simon Arlott183ff222007-10-20 01:27:18 +02001318 /* fall through */
Ravikiran G Thirumalai8f5be202006-12-06 20:32:14 -08001319#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 case CPU_UP_CANCELED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001321 case CPU_UP_CANCELED_FROZEN:
Gautham R Shenoy95402b32008-01-25 21:08:02 +01001322 mutex_lock(&cache_chain_mutex);
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001323 cpuup_canceled(cpu);
Ingo Molnarfc0abb12006-01-18 17:42:33 -08001324 mutex_unlock(&cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 }
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001327 return err ? NOTIFY_BAD : NOTIFY_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328}
1329
Chandra Seetharaman74b85f32006-06-27 02:54:09 -07001330static struct notifier_block __cpuinitdata cpucache_notifier = {
1331 &cpuup_callback, NULL, 0
1332};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333
Christoph Lametere498be72005-09-09 13:03:32 -07001334/*
1335 * swap the static kmem_list3 with kmalloced memory
1336 */
Andrew Mortona737b3e2006-03-22 00:08:11 -08001337static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
1338 int nodeid)
Christoph Lametere498be72005-09-09 13:03:32 -07001339{
1340 struct kmem_list3 *ptr;
1341
Pekka Enberg83b519e2009-06-10 19:40:04 +03001342 ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_NOWAIT, nodeid);
Christoph Lametere498be72005-09-09 13:03:32 -07001343 BUG_ON(!ptr);
1344
Christoph Lametere498be72005-09-09 13:03:32 -07001345 memcpy(ptr, list, sizeof(struct kmem_list3));
Ingo Molnar2b2d5492006-07-03 00:25:28 -07001346 /*
1347 * Do not assume that spinlocks can be initialized via memcpy:
1348 */
1349 spin_lock_init(&ptr->list_lock);
1350
Christoph Lametere498be72005-09-09 13:03:32 -07001351 MAKE_ALL_LISTS(cachep, ptr, nodeid);
1352 cachep->nodelists[nodeid] = ptr;
Christoph Lametere498be72005-09-09 13:03:32 -07001353}
1354
Andrew Mortona737b3e2006-03-22 00:08:11 -08001355/*
Pekka Enberg556a1692008-01-25 08:20:51 +02001356 * For setting up all the kmem_list3s for cache whose buffer_size is same as
1357 * size of kmem_list3.
1358 */
1359static void __init set_up_list3s(struct kmem_cache *cachep, int index)
1360{
1361 int node;
1362
1363 for_each_online_node(node) {
1364 cachep->nodelists[node] = &initkmem_list3[index + node];
1365 cachep->nodelists[node]->next_reap = jiffies +
1366 REAPTIMEOUT_LIST3 +
1367 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1368 }
1369}
1370
1371/*
Andrew Mortona737b3e2006-03-22 00:08:11 -08001372 * Initialisation. Called after the page allocator have been initialised and
1373 * before smp_init().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 */
1375void __init kmem_cache_init(void)
1376{
1377 size_t left_over;
1378 struct cache_sizes *sizes;
1379 struct cache_names *names;
Christoph Lametere498be72005-09-09 13:03:32 -07001380 int i;
Jack Steiner07ed76b2006-03-07 21:55:46 -08001381 int order;
Pekka Enberg1ca4cb22006-10-06 00:43:52 -07001382 int node;
Christoph Lametere498be72005-09-09 13:03:32 -07001383
Mel Gormanb6e68bc2009-06-16 15:32:16 -07001384 if (num_possible_nodes() == 1)
Siddha, Suresh B62918a02007-05-02 19:27:18 +02001385 use_alien_caches = 0;
1386
Christoph Lametere498be72005-09-09 13:03:32 -07001387 for (i = 0; i < NUM_INIT_LISTS; i++) {
1388 kmem_list3_init(&initkmem_list3[i]);
1389 if (i < MAX_NUMNODES)
1390 cache_cache.nodelists[i] = NULL;
1391 }
Pekka Enberg556a1692008-01-25 08:20:51 +02001392 set_up_list3s(&cache_cache, CACHE_CACHE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393
1394 /*
1395 * Fragmentation resistance on low memory - only use bigger
1396 * page orders on machines with more than 32MB of memory.
1397 */
Jan Beulich44813742009-09-21 17:03:05 -07001398 if (totalram_pages > (32 << 20) >> PAGE_SHIFT)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399 slab_break_gfp_order = BREAK_GFP_ORDER_HI;
1400
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 /* Bootstrap is tricky, because several objects are allocated
1402 * from caches that do not exist yet:
Andrew Mortona737b3e2006-03-22 00:08:11 -08001403 * 1) initialize the cache_cache cache: it contains the struct
1404 * kmem_cache structures of all caches, except cache_cache itself:
1405 * cache_cache is statically allocated.
Christoph Lametere498be72005-09-09 13:03:32 -07001406 * Initially an __init data area is used for the head array and the
1407 * kmem_list3 structures, it's replaced with a kmalloc allocated
1408 * array at the end of the bootstrap.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 * 2) Create the first kmalloc cache.
Pekka Enberg343e0d72006-02-01 03:05:50 -08001410 * The struct kmem_cache for the new cache is allocated normally.
Christoph Lametere498be72005-09-09 13:03:32 -07001411 * An __init data area is used for the head array.
1412 * 3) Create the remaining kmalloc caches, with minimally sized
1413 * head arrays.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 * 4) Replace the __init data head arrays for cache_cache and the first
1415 * kmalloc cache with kmalloc allocated arrays.
Christoph Lametere498be72005-09-09 13:03:32 -07001416 * 5) Replace the __init data for kmem_list3 for cache_cache and
1417 * the other cache's with kmalloc allocated memory.
1418 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 */
1420
Pekka Enberg1ca4cb22006-10-06 00:43:52 -07001421 node = numa_node_id();
1422
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 /* 1) create the cache_cache */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424 INIT_LIST_HEAD(&cache_chain);
1425 list_add(&cache_cache.next, &cache_chain);
1426 cache_cache.colour_off = cache_line_size();
1427 cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
Daniel Yeisleyec1f5ee2008-03-25 23:59:08 +02001428 cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429
Eric Dumazet8da34302007-05-06 14:49:29 -07001430 /*
1431 * struct kmem_cache size depends on nr_node_ids, which
1432 * can be less than MAX_NUMNODES.
1433 */
1434 cache_cache.buffer_size = offsetof(struct kmem_cache, nodelists) +
1435 nr_node_ids * sizeof(struct kmem_list3 *);
1436#if DEBUG
1437 cache_cache.obj_size = cache_cache.buffer_size;
1438#endif
Andrew Mortona737b3e2006-03-22 00:08:11 -08001439 cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
1440 cache_line_size());
Eric Dumazet6a2d7a92006-12-13 00:34:27 -08001441 cache_cache.reciprocal_buffer_size =
1442 reciprocal_value(cache_cache.buffer_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443
Jack Steiner07ed76b2006-03-07 21:55:46 -08001444 for (order = 0; order < MAX_ORDER; order++) {
1445 cache_estimate(order, cache_cache.buffer_size,
1446 cache_line_size(), 0, &left_over, &cache_cache.num);
1447 if (cache_cache.num)
1448 break;
1449 }
Eric Sesterhenn40094fa2006-04-02 13:49:25 +02001450 BUG_ON(!cache_cache.num);
Jack Steiner07ed76b2006-03-07 21:55:46 -08001451 cache_cache.gfporder = order;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001452 cache_cache.colour = left_over / cache_cache.colour_off;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001453 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
1454 sizeof(struct slab), cache_line_size());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455
1456 /* 2+3) create the kmalloc caches */
1457 sizes = malloc_sizes;
1458 names = cache_names;
1459
Andrew Mortona737b3e2006-03-22 00:08:11 -08001460 /*
1461 * Initialize the caches that provide memory for the array cache and the
1462 * kmem_list3 structures first. Without this, further allocations will
1463 * bug.
Christoph Lametere498be72005-09-09 13:03:32 -07001464 */
1465
1466 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
Andrew Mortona737b3e2006-03-22 00:08:11 -08001467 sizes[INDEX_AC].cs_size,
1468 ARCH_KMALLOC_MINALIGN,
1469 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
Paul Mundt20c2df82007-07-20 10:11:58 +09001470 NULL);
Christoph Lametere498be72005-09-09 13:03:32 -07001471
Andrew Mortona737b3e2006-03-22 00:08:11 -08001472 if (INDEX_AC != INDEX_L3) {
Christoph Lametere498be72005-09-09 13:03:32 -07001473 sizes[INDEX_L3].cs_cachep =
Andrew Mortona737b3e2006-03-22 00:08:11 -08001474 kmem_cache_create(names[INDEX_L3].name,
1475 sizes[INDEX_L3].cs_size,
1476 ARCH_KMALLOC_MINALIGN,
1477 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
Paul Mundt20c2df82007-07-20 10:11:58 +09001478 NULL);
Andrew Mortona737b3e2006-03-22 00:08:11 -08001479 }
Christoph Lametere498be72005-09-09 13:03:32 -07001480
Ingo Molnare0a42722006-06-23 02:03:46 -07001481 slab_early_init = 0;
1482
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 while (sizes->cs_size != ULONG_MAX) {
Christoph Lametere498be72005-09-09 13:03:32 -07001484 /*
1485 * For performance, all the general caches are L1 aligned.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 * This should be particularly beneficial on SMP boxes, as it
1487 * eliminates "false sharing".
1488 * Note for systems short on memory removing the alignment will
Christoph Lametere498be72005-09-09 13:03:32 -07001489 * allow tighter packing of the smaller caches.
1490 */
Andrew Mortona737b3e2006-03-22 00:08:11 -08001491 if (!sizes->cs_cachep) {
Christoph Lametere498be72005-09-09 13:03:32 -07001492 sizes->cs_cachep = kmem_cache_create(names->name,
Andrew Mortona737b3e2006-03-22 00:08:11 -08001493 sizes->cs_size,
1494 ARCH_KMALLOC_MINALIGN,
1495 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
Paul Mundt20c2df82007-07-20 10:11:58 +09001496 NULL);
Andrew Mortona737b3e2006-03-22 00:08:11 -08001497 }
Christoph Lameter4b51d662007-02-10 01:43:10 -08001498#ifdef CONFIG_ZONE_DMA
1499 sizes->cs_dmacachep = kmem_cache_create(
1500 names->name_dma,
Andrew Mortona737b3e2006-03-22 00:08:11 -08001501 sizes->cs_size,
1502 ARCH_KMALLOC_MINALIGN,
1503 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
1504 SLAB_PANIC,
Paul Mundt20c2df82007-07-20 10:11:58 +09001505 NULL);
Christoph Lameter4b51d662007-02-10 01:43:10 -08001506#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 sizes++;
1508 names++;
1509 }
1510 /* 4) Replace the bootstrap head arrays */
1511 {
Ingo Molnar2b2d5492006-07-03 00:25:28 -07001512 struct array_cache *ptr;
Christoph Lametere498be72005-09-09 13:03:32 -07001513
Pekka Enberg83b519e2009-06-10 19:40:04 +03001514 ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
Christoph Lametere498be72005-09-09 13:03:32 -07001515
Pekka Enberg9a2dba42006-02-01 03:05:49 -08001516 BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
1517 memcpy(ptr, cpu_cache_get(&cache_cache),
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001518 sizeof(struct arraycache_init));
Ingo Molnar2b2d5492006-07-03 00:25:28 -07001519 /*
1520 * Do not assume that spinlocks can be initialized via memcpy:
1521 */
1522 spin_lock_init(&ptr->lock);
1523
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 cache_cache.array[smp_processor_id()] = ptr;
Christoph Lametere498be72005-09-09 13:03:32 -07001525
Pekka Enberg83b519e2009-06-10 19:40:04 +03001526 ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
Christoph Lametere498be72005-09-09 13:03:32 -07001527
Pekka Enberg9a2dba42006-02-01 03:05:49 -08001528 BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001529 != &initarray_generic.cache);
Pekka Enberg9a2dba42006-02-01 03:05:49 -08001530 memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001531 sizeof(struct arraycache_init));
Ingo Molnar2b2d5492006-07-03 00:25:28 -07001532 /*
1533 * Do not assume that spinlocks can be initialized via memcpy:
1534 */
1535 spin_lock_init(&ptr->lock);
1536
Christoph Lametere498be72005-09-09 13:03:32 -07001537 malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001538 ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 }
Christoph Lametere498be72005-09-09 13:03:32 -07001540 /* 5) Replace the bootstrap kmem_list3's */
1541 {
Pekka Enberg1ca4cb22006-10-06 00:43:52 -07001542 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543
Mel Gorman9c09a952008-01-24 05:49:54 -08001544 for_each_online_node(nid) {
Daniel Yeisleyec1f5ee2008-03-25 23:59:08 +02001545 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
Pekka Enberg556a1692008-01-25 08:20:51 +02001546
Christoph Lametere498be72005-09-09 13:03:32 -07001547 init_list(malloc_sizes[INDEX_AC].cs_cachep,
Pekka Enberg1ca4cb22006-10-06 00:43:52 -07001548 &initkmem_list3[SIZE_AC + nid], nid);
Christoph Lametere498be72005-09-09 13:03:32 -07001549
1550 if (INDEX_AC != INDEX_L3) {
1551 init_list(malloc_sizes[INDEX_L3].cs_cachep,
Pekka Enberg1ca4cb22006-10-06 00:43:52 -07001552 &initkmem_list3[SIZE_L3 + nid], nid);
Christoph Lametere498be72005-09-09 13:03:32 -07001553 }
1554 }
1555 }
1556
Pekka Enberg8429db52009-06-12 15:58:59 +03001557 g_cpucache_up = EARLY;
Pekka Enberg8429db52009-06-12 15:58:59 +03001558}
Ravikiran G Thirumalai056c6242006-09-25 23:31:38 -07001559
Pekka Enberg8429db52009-06-12 15:58:59 +03001560void __init kmem_cache_init_late(void)
1561{
1562 struct kmem_cache *cachep;
1563
Pekka Enberg8429db52009-06-12 15:58:59 +03001564 /* 6) resize the head arrays to their final sizes */
1565 mutex_lock(&cache_chain_mutex);
1566 list_for_each_entry(cachep, &cache_chain, next)
1567 if (enable_cpucache(cachep, GFP_NOWAIT))
1568 BUG();
1569 mutex_unlock(&cache_chain_mutex);
Ravikiran G Thirumalai056c6242006-09-25 23:31:38 -07001570
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 /* Done! */
1572 g_cpucache_up = FULL;
1573
Pekka Enbergec5a36f2009-06-29 09:57:10 +03001574 /* Annotate slab for lockdep -- annotate the malloc caches */
1575 init_lock_keys();
1576
Andrew Mortona737b3e2006-03-22 00:08:11 -08001577 /*
1578 * Register a cpu startup notifier callback that initializes
1579 * cpu_cache_get for all new cpus
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 */
1581 register_cpu_notifier(&cpucache_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582
Andrew Mortona737b3e2006-03-22 00:08:11 -08001583 /*
1584 * The reap timers are started later, with a module init call: That part
1585 * of the kernel is not yet operational.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 */
1587}
1588
1589static int __init cpucache_init(void)
1590{
1591 int cpu;
1592
Andrew Mortona737b3e2006-03-22 00:08:11 -08001593 /*
1594 * Register the timers that return unneeded pages to the page allocator
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 */
Christoph Lametere498be72005-09-09 13:03:32 -07001596 for_each_online_cpu(cpu)
Andrew Mortona737b3e2006-03-22 00:08:11 -08001597 start_cpu_timer(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 return 0;
1599}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600__initcall(cpucache_init);
1601
1602/*
1603 * Interface to system's page allocator. No need to hold the cache-lock.
1604 *
1605 * If we requested dmaable memory, we will get it. Even if we
1606 * did not request dmaable memory, we might get it, but that
1607 * would be relatively rare and ignorable.
1608 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08001609static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610{
1611 struct page *page;
Christoph Hellwige1b6aa62006-06-23 02:03:17 -07001612 int nr_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613 int i;
1614
Luke Yangd6fef9d2006-04-10 22:52:56 -07001615#ifndef CONFIG_MMU
Christoph Hellwige1b6aa62006-06-23 02:03:17 -07001616 /*
1617 * Nommu uses slab's for process anonymous memory allocations, and thus
1618 * requires __GFP_COMP to properly refcount higher order allocations
Luke Yangd6fef9d2006-04-10 22:52:56 -07001619 */
Christoph Hellwige1b6aa62006-06-23 02:03:17 -07001620 flags |= __GFP_COMP;
Luke Yangd6fef9d2006-04-10 22:52:56 -07001621#endif
Christoph Lameter765c4502006-09-27 01:50:08 -07001622
Christoph Lameter3c517a62006-12-06 20:33:29 -08001623 flags |= cachep->gfpflags;
Mel Gormane12ba742007-10-16 01:25:52 -07001624 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1625 flags |= __GFP_RECLAIMABLE;
Christoph Hellwige1b6aa62006-06-23 02:03:17 -07001626
Linus Torvalds517d0862009-06-16 19:50:13 -07001627 page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 if (!page)
1629 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630
Christoph Hellwige1b6aa62006-06-23 02:03:17 -07001631 nr_pages = (1 << cachep->gfporder);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
Christoph Lameter972d1a72006-09-25 23:31:51 -07001633 add_zone_page_state(page_zone(page),
1634 NR_SLAB_RECLAIMABLE, nr_pages);
1635 else
1636 add_zone_page_state(page_zone(page),
1637 NR_SLAB_UNRECLAIMABLE, nr_pages);
Christoph Hellwige1b6aa62006-06-23 02:03:17 -07001638 for (i = 0; i < nr_pages; i++)
1639 __SetPageSlab(page + i);
Pekka Enbergc175eea2008-05-09 20:35:53 +02001640
Vegard Nossumb1eeab62008-11-25 16:55:53 +01001641 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
1642 kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
1643
1644 if (cachep->ctor)
1645 kmemcheck_mark_uninitialized_pages(page, nr_pages);
1646 else
1647 kmemcheck_mark_unallocated_pages(page, nr_pages);
1648 }
Pekka Enbergc175eea2008-05-09 20:35:53 +02001649
Christoph Hellwige1b6aa62006-06-23 02:03:17 -07001650 return page_address(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651}
1652
1653/*
1654 * Interface to system's page release.
1655 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08001656static void kmem_freepages(struct kmem_cache *cachep, void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001658 unsigned long i = (1 << cachep->gfporder);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 struct page *page = virt_to_page(addr);
1660 const unsigned long nr_freed = i;
1661
Vegard Nossumb1eeab62008-11-25 16:55:53 +01001662 kmemcheck_free_shadow(page, cachep->gfporder);
Pekka Enbergc175eea2008-05-09 20:35:53 +02001663
Christoph Lameter972d1a72006-09-25 23:31:51 -07001664 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1665 sub_zone_page_state(page_zone(page),
1666 NR_SLAB_RECLAIMABLE, nr_freed);
1667 else
1668 sub_zone_page_state(page_zone(page),
1669 NR_SLAB_UNRECLAIMABLE, nr_freed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 while (i--) {
Nick Pigginf205b2f2006-03-22 00:08:02 -08001671 BUG_ON(!PageSlab(page));
1672 __ClearPageSlab(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 page++;
1674 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 if (current->reclaim_state)
1676 current->reclaim_state->reclaimed_slab += nr_freed;
1677 free_pages((unsigned long)addr, cachep->gfporder);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678}
1679
1680static void kmem_rcu_free(struct rcu_head *head)
1681{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001682 struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
Pekka Enberg343e0d72006-02-01 03:05:50 -08001683 struct kmem_cache *cachep = slab_rcu->cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684
1685 kmem_freepages(cachep, slab_rcu->addr);
1686 if (OFF_SLAB(cachep))
1687 kmem_cache_free(cachep->slabp_cache, slab_rcu);
1688}
1689
1690#if DEBUG
1691
1692#ifdef CONFIG_DEBUG_PAGEALLOC
Pekka Enberg343e0d72006-02-01 03:05:50 -08001693static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001694 unsigned long caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695{
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001696 int size = obj_size(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001698 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001700 if (size < 5 * sizeof(unsigned long))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 return;
1702
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001703 *addr++ = 0x12345678;
1704 *addr++ = caller;
1705 *addr++ = smp_processor_id();
1706 size -= 3 * sizeof(unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707 {
1708 unsigned long *sptr = &caller;
1709 unsigned long svalue;
1710
1711 while (!kstack_end(sptr)) {
1712 svalue = *sptr++;
1713 if (kernel_text_address(svalue)) {
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001714 *addr++ = svalue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 size -= sizeof(unsigned long);
1716 if (size <= sizeof(unsigned long))
1717 break;
1718 }
1719 }
1720
1721 }
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001722 *addr++ = 0x87654321;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723}
1724#endif
1725
Pekka Enberg343e0d72006-02-01 03:05:50 -08001726static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727{
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001728 int size = obj_size(cachep);
1729 addr = &((char *)addr)[obj_offset(cachep)];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730
1731 memset(addr, val, size);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001732 *(unsigned char *)(addr + size - 1) = POISON_END;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733}
1734
1735static void dump_line(char *data, int offset, int limit)
1736{
1737 int i;
Dave Jonesaa83aa42006-09-29 01:59:51 -07001738 unsigned char error = 0;
1739 int bad_count = 0;
1740
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 printk(KERN_ERR "%03x:", offset);
Dave Jonesaa83aa42006-09-29 01:59:51 -07001742 for (i = 0; i < limit; i++) {
1743 if (data[offset + i] != POISON_FREE) {
1744 error = data[offset + i];
1745 bad_count++;
1746 }
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001747 printk(" %02x", (unsigned char)data[offset + i]);
Dave Jonesaa83aa42006-09-29 01:59:51 -07001748 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 printk("\n");
Dave Jonesaa83aa42006-09-29 01:59:51 -07001750
1751 if (bad_count == 1) {
1752 error ^= POISON_FREE;
1753 if (!(error & (error - 1))) {
1754 printk(KERN_ERR "Single bit error detected. Probably "
1755 "bad RAM.\n");
1756#ifdef CONFIG_X86
1757 printk(KERN_ERR "Run memtest86+ or a similar memory "
1758 "test tool.\n");
1759#else
1760 printk(KERN_ERR "Run a memory test tool.\n");
1761#endif
1762 }
1763 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764}
1765#endif
1766
1767#if DEBUG
1768
Pekka Enberg343e0d72006-02-01 03:05:50 -08001769static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770{
1771 int i, size;
1772 char *realobj;
1773
1774 if (cachep->flags & SLAB_RED_ZONE) {
David Woodhouseb46b8f12007-05-08 00:22:59 -07001775 printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
Andrew Mortona737b3e2006-03-22 00:08:11 -08001776 *dbg_redzone1(cachep, objp),
1777 *dbg_redzone2(cachep, objp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 }
1779
1780 if (cachep->flags & SLAB_STORE_USER) {
1781 printk(KERN_ERR "Last user: [<%p>]",
Andrew Mortona737b3e2006-03-22 00:08:11 -08001782 *dbg_userword(cachep, objp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783 print_symbol("(%s)",
Andrew Mortona737b3e2006-03-22 00:08:11 -08001784 (unsigned long)*dbg_userword(cachep, objp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785 printk("\n");
1786 }
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001787 realobj = (char *)objp + obj_offset(cachep);
1788 size = obj_size(cachep);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001789 for (i = 0; i < size && lines; i += 16, lines--) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 int limit;
1791 limit = 16;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001792 if (i + limit > size)
1793 limit = size - i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 dump_line(realobj, i, limit);
1795 }
1796}
1797
Pekka Enberg343e0d72006-02-01 03:05:50 -08001798static void check_poison_obj(struct kmem_cache *cachep, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799{
1800 char *realobj;
1801 int size, i;
1802 int lines = 0;
1803
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001804 realobj = (char *)objp + obj_offset(cachep);
1805 size = obj_size(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001807 for (i = 0; i < size; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808 char exp = POISON_FREE;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001809 if (i == size - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 exp = POISON_END;
1811 if (realobj[i] != exp) {
1812 int limit;
1813 /* Mismatch ! */
1814 /* Print header */
1815 if (lines == 0) {
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001816 printk(KERN_ERR
David Howellse94a40c2007-04-02 23:46:28 +01001817 "Slab corruption: %s start=%p, len=%d\n",
1818 cachep->name, realobj, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 print_objinfo(cachep, objp, 0);
1820 }
1821 /* Hexdump the affected line */
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001822 i = (i / 16) * 16;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823 limit = 16;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001824 if (i + limit > size)
1825 limit = size - i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826 dump_line(realobj, i, limit);
1827 i += 16;
1828 lines++;
1829 /* Limit to 5 lines */
1830 if (lines > 5)
1831 break;
1832 }
1833 }
1834 if (lines != 0) {
1835 /* Print some data about the neighboring objects, if they
1836 * exist:
1837 */
Pekka Enberg6ed5eb22006-02-01 03:05:49 -08001838 struct slab *slabp = virt_to_slab(objp);
Pekka Enberg8fea4e92006-03-22 00:08:10 -08001839 unsigned int objnr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840
Pekka Enberg8fea4e92006-03-22 00:08:10 -08001841 objnr = obj_to_index(cachep, slabp, objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842 if (objnr) {
Pekka Enberg8fea4e92006-03-22 00:08:10 -08001843 objp = index_to_obj(cachep, slabp, objnr - 1);
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001844 realobj = (char *)objp + obj_offset(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845 printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001846 realobj, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 print_objinfo(cachep, objp, 2);
1848 }
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001849 if (objnr + 1 < cachep->num) {
Pekka Enberg8fea4e92006-03-22 00:08:10 -08001850 objp = index_to_obj(cachep, slabp, objnr + 1);
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001851 realobj = (char *)objp + obj_offset(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 printk(KERN_ERR "Next obj: start=%p, len=%d\n",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001853 realobj, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 print_objinfo(cachep, objp, 2);
1855 }
1856 }
1857}
1858#endif
1859
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860#if DEBUG
Rabin Vincente79aec22008-07-04 00:40:32 +05301861static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001862{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863 int i;
1864 for (i = 0; i < cachep->num; i++) {
Pekka Enberg8fea4e92006-03-22 00:08:10 -08001865 void *objp = index_to_obj(cachep, slabp, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866
1867 if (cachep->flags & SLAB_POISON) {
1868#ifdef CONFIG_DEBUG_PAGEALLOC
Andrew Mortona737b3e2006-03-22 00:08:11 -08001869 if (cachep->buffer_size % PAGE_SIZE == 0 &&
1870 OFF_SLAB(cachep))
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001871 kernel_map_pages(virt_to_page(objp),
Andrew Mortona737b3e2006-03-22 00:08:11 -08001872 cachep->buffer_size / PAGE_SIZE, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873 else
1874 check_poison_obj(cachep, objp);
1875#else
1876 check_poison_obj(cachep, objp);
1877#endif
1878 }
1879 if (cachep->flags & SLAB_RED_ZONE) {
1880 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1881 slab_error(cachep, "start of a freed object "
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001882 "was overwritten");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1884 slab_error(cachep, "end of a freed object "
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001885 "was overwritten");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 }
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001888}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889#else
Rabin Vincente79aec22008-07-04 00:40:32 +05301890static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001891{
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001892}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893#endif
1894
Randy Dunlap911851e2006-03-22 00:08:14 -08001895/**
1896 * slab_destroy - destroy and release all objects in a slab
1897 * @cachep: cache pointer being destroyed
1898 * @slabp: slab pointer being destroyed
1899 *
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001900 * Destroy all the objs in a slab, and release the mem back to the system.
Andrew Mortona737b3e2006-03-22 00:08:11 -08001901 * Before calling the slab must have been unlinked from the cache. The
1902 * cache-lock is not held/needed.
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001903 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08001904static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001905{
1906 void *addr = slabp->s_mem - slabp->colouroff;
1907
Rabin Vincente79aec22008-07-04 00:40:32 +05301908 slab_destroy_debugcheck(cachep, slabp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
1910 struct slab_rcu *slab_rcu;
1911
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001912 slab_rcu = (struct slab_rcu *)slabp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913 slab_rcu->cachep = cachep;
1914 slab_rcu->addr = addr;
1915 call_rcu(&slab_rcu->head, kmem_rcu_free);
1916 } else {
1917 kmem_freepages(cachep, addr);
Ingo Molnar873623d2006-07-13 14:44:38 +02001918 if (OFF_SLAB(cachep))
1919 kmem_cache_free(cachep->slabp_cache, slabp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 }
1921}
1922
Christoph Lameter117f6eb2006-09-25 23:31:37 -07001923static void __kmem_cache_destroy(struct kmem_cache *cachep)
1924{
1925 int i;
1926 struct kmem_list3 *l3;
1927
1928 for_each_online_cpu(i)
1929 kfree(cachep->array[i]);
1930
1931 /* NUMA: free the list3 structures */
1932 for_each_online_node(i) {
1933 l3 = cachep->nodelists[i];
1934 if (l3) {
1935 kfree(l3->shared);
1936 free_alien_cache(l3->alien);
1937 kfree(l3);
1938 }
1939 }
1940 kmem_cache_free(&cache_cache, cachep);
1941}
1942
1943
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944/**
Randy.Dunlapa70773d2006-02-01 03:05:52 -08001945 * calculate_slab_order - calculate size (page order) of slabs
1946 * @cachep: pointer to the cache that is being created
1947 * @size: size of objects to be created in this cache.
1948 * @align: required alignment for the objects.
1949 * @flags: slab allocation flags
1950 *
1951 * Also calculates the number of objects per slab.
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001952 *
1953 * This could be made much more intelligent. For now, try to avoid using
1954 * high order pages for slabs. When the gfp() functions are more friendly
1955 * towards high-order requests, this should be changed.
1956 */
Andrew Mortona737b3e2006-03-22 00:08:11 -08001957static size_t calculate_slab_order(struct kmem_cache *cachep,
Randy Dunlapee13d782006-02-01 03:05:53 -08001958 size_t size, size_t align, unsigned long flags)
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001959{
Ingo Molnarb1ab41c2006-06-02 15:44:58 +02001960 unsigned long offslab_limit;
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001961 size_t left_over = 0;
Linus Torvalds9888e6f2006-03-06 17:44:43 -08001962 int gfporder;
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001963
Christoph Lameter0aa817f2007-05-16 22:11:01 -07001964 for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001965 unsigned int num;
1966 size_t remainder;
1967
Linus Torvalds9888e6f2006-03-06 17:44:43 -08001968 cache_estimate(gfporder, size, align, flags, &remainder, &num);
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001969 if (!num)
1970 continue;
Linus Torvalds9888e6f2006-03-06 17:44:43 -08001971
Ingo Molnarb1ab41c2006-06-02 15:44:58 +02001972 if (flags & CFLGS_OFF_SLAB) {
1973 /*
1974 * Max number of objs-per-slab for caches which
1975 * use off-slab slabs. Needed to avoid a possible
1976 * looping condition in cache_grow().
1977 */
1978 offslab_limit = size - sizeof(struct slab);
1979 offslab_limit /= sizeof(kmem_bufctl_t);
1980
1981 if (num > offslab_limit)
1982 break;
1983 }
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001984
Linus Torvalds9888e6f2006-03-06 17:44:43 -08001985 /* Found something acceptable - save it away */
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001986 cachep->num = num;
Linus Torvalds9888e6f2006-03-06 17:44:43 -08001987 cachep->gfporder = gfporder;
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001988 left_over = remainder;
1989
1990 /*
Linus Torvaldsf78bb8a2006-03-08 10:33:05 -08001991 * A VFS-reclaimable slab tends to have most allocations
1992 * as GFP_NOFS and we really don't want to have to be allocating
1993 * higher-order pages when we are unable to shrink dcache.
1994 */
1995 if (flags & SLAB_RECLAIM_ACCOUNT)
1996 break;
1997
1998 /*
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001999 * Large number of objects is good, but very large slabs are
2000 * currently bad for the gfp()s.
2001 */
Linus Torvalds9888e6f2006-03-06 17:44:43 -08002002 if (gfporder >= slab_break_gfp_order)
Pekka Enberg4d268eb2006-01-08 01:00:36 -08002003 break;
2004
Linus Torvalds9888e6f2006-03-06 17:44:43 -08002005 /*
2006 * Acceptable internal fragmentation?
2007 */
Andrew Mortona737b3e2006-03-22 00:08:11 -08002008 if (left_over * 8 <= (PAGE_SIZE << gfporder))
Pekka Enberg4d268eb2006-01-08 01:00:36 -08002009 break;
2010 }
2011 return left_over;
2012}
2013
Pekka Enberg83b519e2009-06-10 19:40:04 +03002014static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
Pekka Enbergf30cf7d2006-03-22 00:08:11 -08002015{
Christoph Lameter2ed3a4e2006-09-25 23:31:38 -07002016 if (g_cpucache_up == FULL)
Pekka Enberg83b519e2009-06-10 19:40:04 +03002017 return enable_cpucache(cachep, gfp);
Christoph Lameter2ed3a4e2006-09-25 23:31:38 -07002018
Pekka Enbergf30cf7d2006-03-22 00:08:11 -08002019 if (g_cpucache_up == NONE) {
2020 /*
2021 * Note: the first kmem_cache_create must create the cache
2022 * that's used by kmalloc(24), otherwise the creation of
2023 * further caches will BUG().
2024 */
2025 cachep->array[smp_processor_id()] = &initarray_generic.cache;
2026
2027 /*
2028 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is
2029 * the first cache, then we need to set up all its list3s,
2030 * otherwise the creation of further caches will BUG().
2031 */
2032 set_up_list3s(cachep, SIZE_AC);
2033 if (INDEX_AC == INDEX_L3)
2034 g_cpucache_up = PARTIAL_L3;
2035 else
2036 g_cpucache_up = PARTIAL_AC;
2037 } else {
2038 cachep->array[smp_processor_id()] =
Pekka Enberg83b519e2009-06-10 19:40:04 +03002039 kmalloc(sizeof(struct arraycache_init), gfp);
Pekka Enbergf30cf7d2006-03-22 00:08:11 -08002040
2041 if (g_cpucache_up == PARTIAL_AC) {
2042 set_up_list3s(cachep, SIZE_L3);
2043 g_cpucache_up = PARTIAL_L3;
2044 } else {
2045 int node;
Pekka Enberg556a1692008-01-25 08:20:51 +02002046 for_each_online_node(node) {
Pekka Enbergf30cf7d2006-03-22 00:08:11 -08002047 cachep->nodelists[node] =
2048 kmalloc_node(sizeof(struct kmem_list3),
Pekka Enbergeb91f1d2009-06-12 14:56:09 +03002049 gfp, node);
Pekka Enbergf30cf7d2006-03-22 00:08:11 -08002050 BUG_ON(!cachep->nodelists[node]);
2051 kmem_list3_init(cachep->nodelists[node]);
2052 }
2053 }
2054 }
2055 cachep->nodelists[numa_node_id()]->next_reap =
2056 jiffies + REAPTIMEOUT_LIST3 +
2057 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
2058
2059 cpu_cache_get(cachep)->avail = 0;
2060 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
2061 cpu_cache_get(cachep)->batchcount = 1;
2062 cpu_cache_get(cachep)->touched = 0;
2063 cachep->batchcount = 1;
2064 cachep->limit = BOOT_CPUCACHE_ENTRIES;
Christoph Lameter2ed3a4e2006-09-25 23:31:38 -07002065 return 0;
Pekka Enbergf30cf7d2006-03-22 00:08:11 -08002066}
2067
Pekka Enberg4d268eb2006-01-08 01:00:36 -08002068/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 * kmem_cache_create - Create a cache.
2070 * @name: A string which is used in /proc/slabinfo to identify this cache.
2071 * @size: The size of objects to be created in this cache.
2072 * @align: The required alignment for the objects.
2073 * @flags: SLAB flags
2074 * @ctor: A constructor for the objects.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075 *
2076 * Returns a ptr to the cache on success, NULL on failure.
2077 * Cannot be called within a int, but can be interrupted.
Paul Mundt20c2df82007-07-20 10:11:58 +09002078 * The @ctor is run when new pages are allocated by the cache.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 *
2080 * @name must be valid until the cache is destroyed. This implies that
Andrew Mortona737b3e2006-03-22 00:08:11 -08002081 * the module calling this has to destroy the cache before getting unloaded.
Catalin Marinas249da162008-11-21 12:56:22 +00002082 * Note that kmem_cache_name() is not guaranteed to return the same pointer,
2083 * therefore applications must manage it themselves.
Andrew Mortona737b3e2006-03-22 00:08:11 -08002084 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 * The flags are
2086 *
2087 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
2088 * to catch references to uninitialised memory.
2089 *
2090 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
2091 * for buffer overruns.
2092 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
2094 * cacheline. This can be beneficial if you're counting cycles as closely
2095 * as davem.
2096 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08002097struct kmem_cache *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098kmem_cache_create (const char *name, size_t size, size_t align,
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07002099 unsigned long flags, void (*ctor)(void *))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100{
2101 size_t left_over, slab_size, ralign;
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07002102 struct kmem_cache *cachep = NULL, *pc;
Pekka Enberg83b519e2009-06-10 19:40:04 +03002103 gfp_t gfp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104
2105 /*
2106 * Sanity checks... these are all serious usage bugs.
2107 */
Andrew Mortona737b3e2006-03-22 00:08:11 -08002108 if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
Paul Mundt20c2df82007-07-20 10:11:58 +09002109 size > KMALLOC_MAX_SIZE) {
Harvey Harrisond40cee22008-04-30 00:55:07 -07002110 printk(KERN_ERR "%s: Early error in slab %s\n", __func__,
Andrew Mortona737b3e2006-03-22 00:08:11 -08002111 name);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002112 BUG();
2113 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114
Ravikiran G Thirumalaif0188f42006-02-10 01:51:13 -08002115 /*
Ravikiran G Thirumalai8f5be202006-12-06 20:32:14 -08002116 * We use cache_chain_mutex to ensure a consistent view of
Rusty Russell174596a2009-01-01 10:12:29 +10302117 * cpu_online_mask as well. Please see cpuup_callback
Ravikiran G Thirumalaif0188f42006-02-10 01:51:13 -08002118 */
Pekka Enberg83b519e2009-06-10 19:40:04 +03002119 if (slab_is_available()) {
2120 get_online_cpus();
2121 mutex_lock(&cache_chain_mutex);
2122 }
Andrew Morton4f12bb42005-11-07 00:58:00 -08002123
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07002124 list_for_each_entry(pc, &cache_chain, next) {
Andrew Morton4f12bb42005-11-07 00:58:00 -08002125 char tmp;
2126 int res;
2127
2128 /*
2129 * This happens when the module gets unloaded and doesn't
2130 * destroy its slab cache and no-one else reuses the vmalloc
2131 * area of the module. Print a warning.
2132 */
Andrew Morton138ae662006-12-06 20:36:41 -08002133 res = probe_kernel_address(pc->name, tmp);
Andrew Morton4f12bb42005-11-07 00:58:00 -08002134 if (res) {
matzeb4169522007-05-06 14:49:52 -07002135 printk(KERN_ERR
2136 "SLAB: cache with size %d has lost its name\n",
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002137 pc->buffer_size);
Andrew Morton4f12bb42005-11-07 00:58:00 -08002138 continue;
2139 }
2140
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002141 if (!strcmp(pc->name, name)) {
matzeb4169522007-05-06 14:49:52 -07002142 printk(KERN_ERR
2143 "kmem_cache_create: duplicate cache %s\n", name);
Andrew Morton4f12bb42005-11-07 00:58:00 -08002144 dump_stack();
2145 goto oops;
2146 }
2147 }
2148
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149#if DEBUG
2150 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151#if FORCED_DEBUG
2152 /*
2153 * Enable redzoning and last user accounting, except for caches with
2154 * large objects, if the increased size would increase the object size
2155 * above the next power of two: caches with object sizes just above a
2156 * power of two have a significant amount of internal fragmentation.
2157 */
David Woodhouse87a927c2007-07-04 21:26:44 -04002158 if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2159 2 * sizeof(unsigned long long)))
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002160 flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161 if (!(flags & SLAB_DESTROY_BY_RCU))
2162 flags |= SLAB_POISON;
2163#endif
2164 if (flags & SLAB_DESTROY_BY_RCU)
2165 BUG_ON(flags & SLAB_POISON);
2166#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 /*
Andrew Mortona737b3e2006-03-22 00:08:11 -08002168 * Always checks flags, a caller might be expecting debug support which
2169 * isn't available.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 */
Eric Sesterhenn40094fa2006-04-02 13:49:25 +02002171 BUG_ON(flags & ~CREATE_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172
Andrew Mortona737b3e2006-03-22 00:08:11 -08002173 /*
2174 * Check that size is in terms of words. This is needed to avoid
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 * unaligned accesses for some archs when redzoning is used, and makes
2176 * sure any on-slab bufctl's are also correctly aligned.
2177 */
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002178 if (size & (BYTES_PER_WORD - 1)) {
2179 size += (BYTES_PER_WORD - 1);
2180 size &= ~(BYTES_PER_WORD - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 }
2182
Andrew Mortona737b3e2006-03-22 00:08:11 -08002183 /* calculate the final buffer alignment: */
2184
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 /* 1) arch recommendation: can be overridden for debug */
2186 if (flags & SLAB_HWCACHE_ALIGN) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08002187 /*
2188 * Default alignment: as specified by the arch code. Except if
2189 * an object is really small, then squeeze multiple objects into
2190 * one cacheline.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 */
2192 ralign = cache_line_size();
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002193 while (size <= ralign / 2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 ralign /= 2;
2195 } else {
2196 ralign = BYTES_PER_WORD;
2197 }
Pekka Enbergca5f9702006-09-25 23:31:25 -07002198
2199 /*
David Woodhouse87a927c2007-07-04 21:26:44 -04002200 * Redzoning and user store require word alignment or possibly larger.
2201 * Note this will be overridden by architecture or caller mandated
2202 * alignment if either is greater than BYTES_PER_WORD.
Pekka Enbergca5f9702006-09-25 23:31:25 -07002203 */
David Woodhouse87a927c2007-07-04 21:26:44 -04002204 if (flags & SLAB_STORE_USER)
2205 ralign = BYTES_PER_WORD;
2206
2207 if (flags & SLAB_RED_ZONE) {
2208 ralign = REDZONE_ALIGN;
2209 /* If redzoning, ensure that the second redzone is suitably
2210 * aligned, by adjusting the object size accordingly. */
2211 size += REDZONE_ALIGN - 1;
2212 size &= ~(REDZONE_ALIGN - 1);
2213 }
Pekka Enbergca5f9702006-09-25 23:31:25 -07002214
Kevin Hilmana44b56d2006-12-06 20:32:11 -08002215 /* 2) arch mandated alignment */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216 if (ralign < ARCH_SLAB_MINALIGN) {
2217 ralign = ARCH_SLAB_MINALIGN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 }
Kevin Hilmana44b56d2006-12-06 20:32:11 -08002219 /* 3) caller mandated alignment */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220 if (ralign < align) {
2221 ralign = align;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222 }
Kevin Hilmana44b56d2006-12-06 20:32:11 -08002223 /* disable debug if necessary */
David Woodhouseb46b8f12007-05-08 00:22:59 -07002224 if (ralign > __alignof__(unsigned long long))
Kevin Hilmana44b56d2006-12-06 20:32:11 -08002225 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
Andrew Mortona737b3e2006-03-22 00:08:11 -08002226 /*
Pekka Enbergca5f9702006-09-25 23:31:25 -07002227 * 4) Store it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228 */
2229 align = ralign;
2230
Pekka Enberg83b519e2009-06-10 19:40:04 +03002231 if (slab_is_available())
2232 gfp = GFP_KERNEL;
2233 else
2234 gfp = GFP_NOWAIT;
2235
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236 /* Get cache's description obj. */
Pekka Enberg83b519e2009-06-10 19:40:04 +03002237 cachep = kmem_cache_zalloc(&cache_cache, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 if (!cachep)
Andrew Morton4f12bb42005-11-07 00:58:00 -08002239 goto oops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240
2241#if DEBUG
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002242 cachep->obj_size = size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243
Pekka Enbergca5f9702006-09-25 23:31:25 -07002244 /*
2245 * Both debugging options require word-alignment which is calculated
2246 * into align above.
2247 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 if (flags & SLAB_RED_ZONE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249 /* add space for red zone words */
David Woodhouseb46b8f12007-05-08 00:22:59 -07002250 cachep->obj_offset += sizeof(unsigned long long);
2251 size += 2 * sizeof(unsigned long long);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 }
2253 if (flags & SLAB_STORE_USER) {
Pekka Enbergca5f9702006-09-25 23:31:25 -07002254 /* user store requires one word storage behind the end of
David Woodhouse87a927c2007-07-04 21:26:44 -04002255 * the real object. But if the second red zone needs to be
2256 * aligned to 64 bits, we must allow that much space.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 */
David Woodhouse87a927c2007-07-04 21:26:44 -04002258 if (flags & SLAB_RED_ZONE)
2259 size += REDZONE_ALIGN;
2260 else
2261 size += BYTES_PER_WORD;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 }
2263#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002264 if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002265 && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
2266 cachep->obj_offset += PAGE_SIZE - size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267 size = PAGE_SIZE;
2268 }
2269#endif
2270#endif
2271
Ingo Molnare0a42722006-06-23 02:03:46 -07002272 /*
2273 * Determine if the slab management is 'on' or 'off' slab.
2274 * (bootstrapping cannot cope with offslab caches so don't do
Catalin Marinase7cb55b2009-10-28 13:33:08 +00002275 * it too early on. Always use on-slab management when
2276 * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
Ingo Molnare0a42722006-06-23 02:03:46 -07002277 */
Catalin Marinase7cb55b2009-10-28 13:33:08 +00002278 if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init &&
2279 !(flags & SLAB_NOLEAKTRACE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280 /*
2281 * Size is large, assume best to place the slab management obj
2282 * off-slab (should allow better packing of objs).
2283 */
2284 flags |= CFLGS_OFF_SLAB;
2285
2286 size = ALIGN(size, align);
2287
Linus Torvaldsf78bb8a2006-03-08 10:33:05 -08002288 left_over = calculate_slab_order(cachep, size, align, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289
2290 if (!cachep->num) {
matzeb4169522007-05-06 14:49:52 -07002291 printk(KERN_ERR
2292 "kmem_cache_create: couldn't create cache %s.\n", name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 kmem_cache_free(&cache_cache, cachep);
2294 cachep = NULL;
Andrew Morton4f12bb42005-11-07 00:58:00 -08002295 goto oops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296 }
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002297 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
2298 + sizeof(struct slab), align);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299
2300 /*
2301 * If the slab has been placed off-slab, and we have enough space then
2302 * move it on-slab. This is at the expense of any extra colouring.
2303 */
2304 if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
2305 flags &= ~CFLGS_OFF_SLAB;
2306 left_over -= slab_size;
2307 }
2308
2309 if (flags & CFLGS_OFF_SLAB) {
2310 /* really off slab. No need for manual alignment */
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002311 slab_size =
2312 cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
Ron Lee67461362009-05-22 04:58:22 +09302313
2314#ifdef CONFIG_PAGE_POISONING
2315 /* If we're going to use the generic kernel_map_pages()
2316 * poisoning, then it's going to smash the contents of
2317 * the redzone and userword anyhow, so switch them off.
2318 */
2319 if (size % PAGE_SIZE == 0 && flags & SLAB_POISON)
2320 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2321#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 }
2323
2324 cachep->colour_off = cache_line_size();
2325 /* Offset must be a multiple of the alignment. */
2326 if (cachep->colour_off < align)
2327 cachep->colour_off = align;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002328 cachep->colour = left_over / cachep->colour_off;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 cachep->slab_size = slab_size;
2330 cachep->flags = flags;
2331 cachep->gfpflags = 0;
Christoph Lameter4b51d662007-02-10 01:43:10 -08002332 if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333 cachep->gfpflags |= GFP_DMA;
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002334 cachep->buffer_size = size;
Eric Dumazet6a2d7a92006-12-13 00:34:27 -08002335 cachep->reciprocal_buffer_size = reciprocal_value(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336
Ravikiran G Thirumalaie5ac9c52006-09-25 23:31:34 -07002337 if (flags & CFLGS_OFF_SLAB) {
Victor Fuscob2d55072005-09-10 00:26:36 -07002338 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
Ravikiran G Thirumalaie5ac9c52006-09-25 23:31:34 -07002339 /*
2340 * This is a possibility for one of the malloc_sizes caches.
2341 * But since we go off slab only for object size greater than
2342 * PAGE_SIZE/8, and malloc_sizes gets created in ascending order,
2343 * this should not happen at all.
2344 * But leave a BUG_ON for some lucky dude.
2345 */
Christoph Lameter6cb8f912007-07-17 04:03:22 -07002346 BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
Ravikiran G Thirumalaie5ac9c52006-09-25 23:31:34 -07002347 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348 cachep->ctor = ctor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349 cachep->name = name;
2350
Pekka Enberg83b519e2009-06-10 19:40:04 +03002351 if (setup_cpu_cache(cachep, gfp)) {
Christoph Lameter2ed3a4e2006-09-25 23:31:38 -07002352 __kmem_cache_destroy(cachep);
2353 cachep = NULL;
2354 goto oops;
2355 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357 /* cache setup completed, link it into the list */
2358 list_add(&cachep->next, &cache_chain);
Andrew Mortona737b3e2006-03-22 00:08:11 -08002359oops:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360 if (!cachep && (flags & SLAB_PANIC))
2361 panic("kmem_cache_create(): failed to create slab `%s'\n",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002362 name);
Pekka Enberg83b519e2009-06-10 19:40:04 +03002363 if (slab_is_available()) {
2364 mutex_unlock(&cache_chain_mutex);
2365 put_online_cpus();
2366 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367 return cachep;
2368}
2369EXPORT_SYMBOL(kmem_cache_create);
2370
2371#if DEBUG
2372static void check_irq_off(void)
2373{
2374 BUG_ON(!irqs_disabled());
2375}
2376
2377static void check_irq_on(void)
2378{
2379 BUG_ON(irqs_disabled());
2380}
2381
Pekka Enberg343e0d72006-02-01 03:05:50 -08002382static void check_spinlock_acquired(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383{
2384#ifdef CONFIG_SMP
2385 check_irq_off();
Christoph Lametere498be72005-09-09 13:03:32 -07002386 assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387#endif
2388}
Christoph Lametere498be72005-09-09 13:03:32 -07002389
Pekka Enberg343e0d72006-02-01 03:05:50 -08002390static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
Christoph Lametere498be72005-09-09 13:03:32 -07002391{
2392#ifdef CONFIG_SMP
2393 check_irq_off();
2394 assert_spin_locked(&cachep->nodelists[node]->list_lock);
2395#endif
2396}
2397
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398#else
2399#define check_irq_off() do { } while(0)
2400#define check_irq_on() do { } while(0)
2401#define check_spinlock_acquired(x) do { } while(0)
Christoph Lametere498be72005-09-09 13:03:32 -07002402#define check_spinlock_acquired_node(x, y) do { } while(0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403#endif
2404
Christoph Lameteraab22072006-03-22 00:09:06 -08002405static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
2406 struct array_cache *ac,
2407 int force, int node);
2408
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409static void do_drain(void *arg)
2410{
Andrew Mortona737b3e2006-03-22 00:08:11 -08002411 struct kmem_cache *cachep = arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412 struct array_cache *ac;
Christoph Lameterff694162005-09-22 21:44:02 -07002413 int node = numa_node_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414
2415 check_irq_off();
Pekka Enberg9a2dba42006-02-01 03:05:49 -08002416 ac = cpu_cache_get(cachep);
Christoph Lameterff694162005-09-22 21:44:02 -07002417 spin_lock(&cachep->nodelists[node]->list_lock);
2418 free_block(cachep, ac->entry, ac->avail, node);
2419 spin_unlock(&cachep->nodelists[node]->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420 ac->avail = 0;
2421}
2422
Pekka Enberg343e0d72006-02-01 03:05:50 -08002423static void drain_cpu_caches(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424{
Christoph Lametere498be72005-09-09 13:03:32 -07002425 struct kmem_list3 *l3;
2426 int node;
2427
Jens Axboe15c8b6c2008-05-09 09:39:44 +02002428 on_each_cpu(do_drain, cachep, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 check_irq_on();
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002430 for_each_online_node(node) {
Christoph Lametere498be72005-09-09 13:03:32 -07002431 l3 = cachep->nodelists[node];
Roland Dreiera4523a82006-05-15 11:41:00 -07002432 if (l3 && l3->alien)
2433 drain_alien_cache(cachep, l3->alien);
2434 }
2435
2436 for_each_online_node(node) {
2437 l3 = cachep->nodelists[node];
2438 if (l3)
Christoph Lameteraab22072006-03-22 00:09:06 -08002439 drain_array(cachep, l3, l3->shared, 1, node);
Christoph Lametere498be72005-09-09 13:03:32 -07002440 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441}
2442
Christoph Lametered11d9e2006-06-30 01:55:45 -07002443/*
2444 * Remove slabs from the list of free slabs.
2445 * Specify the number of slabs to drain in tofree.
2446 *
2447 * Returns the actual number of slabs released.
2448 */
2449static int drain_freelist(struct kmem_cache *cache,
2450 struct kmem_list3 *l3, int tofree)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451{
Christoph Lametered11d9e2006-06-30 01:55:45 -07002452 struct list_head *p;
2453 int nr_freed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454 struct slab *slabp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455
Christoph Lametered11d9e2006-06-30 01:55:45 -07002456 nr_freed = 0;
2457 while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458
Christoph Lametered11d9e2006-06-30 01:55:45 -07002459 spin_lock_irq(&l3->list_lock);
Christoph Lametere498be72005-09-09 13:03:32 -07002460 p = l3->slabs_free.prev;
Christoph Lametered11d9e2006-06-30 01:55:45 -07002461 if (p == &l3->slabs_free) {
2462 spin_unlock_irq(&l3->list_lock);
2463 goto out;
2464 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465
Christoph Lametered11d9e2006-06-30 01:55:45 -07002466 slabp = list_entry(p, struct slab, list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467#if DEBUG
Eric Sesterhenn40094fa2006-04-02 13:49:25 +02002468 BUG_ON(slabp->inuse);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469#endif
2470 list_del(&slabp->list);
Christoph Lametered11d9e2006-06-30 01:55:45 -07002471 /*
2472 * Safe to drop the lock. The slab is no longer linked
2473 * to the cache.
2474 */
2475 l3->free_objects -= cache->num;
Christoph Lametere498be72005-09-09 13:03:32 -07002476 spin_unlock_irq(&l3->list_lock);
Christoph Lametered11d9e2006-06-30 01:55:45 -07002477 slab_destroy(cache, slabp);
2478 nr_freed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479 }
Christoph Lametered11d9e2006-06-30 01:55:45 -07002480out:
2481 return nr_freed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482}
2483
Ravikiran G Thirumalai8f5be202006-12-06 20:32:14 -08002484/* Called with cache_chain_mutex held to protect against cpu hotplug */
Pekka Enberg343e0d72006-02-01 03:05:50 -08002485static int __cache_shrink(struct kmem_cache *cachep)
Christoph Lametere498be72005-09-09 13:03:32 -07002486{
2487 int ret = 0, i = 0;
2488 struct kmem_list3 *l3;
2489
2490 drain_cpu_caches(cachep);
2491
2492 check_irq_on();
2493 for_each_online_node(i) {
2494 l3 = cachep->nodelists[i];
Christoph Lametered11d9e2006-06-30 01:55:45 -07002495 if (!l3)
2496 continue;
2497
2498 drain_freelist(cachep, l3, l3->free_objects);
2499
2500 ret += !list_empty(&l3->slabs_full) ||
2501 !list_empty(&l3->slabs_partial);
Christoph Lametere498be72005-09-09 13:03:32 -07002502 }
2503 return (ret ? 1 : 0);
2504}
2505
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506/**
2507 * kmem_cache_shrink - Shrink a cache.
2508 * @cachep: The cache to shrink.
2509 *
2510 * Releases as many slabs as possible for a cache.
2511 * To help debugging, a zero exit status indicates all slabs were released.
2512 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08002513int kmem_cache_shrink(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514{
Ravikiran G Thirumalai8f5be202006-12-06 20:32:14 -08002515 int ret;
Eric Sesterhenn40094fa2006-04-02 13:49:25 +02002516 BUG_ON(!cachep || in_interrupt());
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517
Gautham R Shenoy95402b32008-01-25 21:08:02 +01002518 get_online_cpus();
Ravikiran G Thirumalai8f5be202006-12-06 20:32:14 -08002519 mutex_lock(&cache_chain_mutex);
2520 ret = __cache_shrink(cachep);
2521 mutex_unlock(&cache_chain_mutex);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01002522 put_online_cpus();
Ravikiran G Thirumalai8f5be202006-12-06 20:32:14 -08002523 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524}
2525EXPORT_SYMBOL(kmem_cache_shrink);
2526
2527/**
2528 * kmem_cache_destroy - delete a cache
2529 * @cachep: the cache to destroy
2530 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08002531 * Remove a &struct kmem_cache object from the slab cache.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532 *
2533 * It is expected this function will be called by a module when it is
2534 * unloaded. This will remove the cache completely, and avoid a duplicate
2535 * cache being allocated each time a module is loaded and unloaded, if the
2536 * module doesn't have persistent in-kernel storage across loads and unloads.
2537 *
2538 * The cache must be empty before calling this function.
2539 *
2540 * The caller must guarantee that noone will allocate memory from the cache
2541 * during the kmem_cache_destroy().
2542 */
Alexey Dobriyan133d2052006-09-27 01:49:41 -07002543void kmem_cache_destroy(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544{
Eric Sesterhenn40094fa2006-04-02 13:49:25 +02002545 BUG_ON(!cachep || in_interrupt());
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547 /* Find the cache in the chain of caches. */
Gautham R Shenoy95402b32008-01-25 21:08:02 +01002548 get_online_cpus();
Ingo Molnarfc0abb12006-01-18 17:42:33 -08002549 mutex_lock(&cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 /*
2551 * the chain is never empty, cache_cache is never destroyed
2552 */
2553 list_del(&cachep->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554 if (__cache_shrink(cachep)) {
2555 slab_error(cachep, "Can't free all objects");
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002556 list_add(&cachep->next, &cache_chain);
Ingo Molnarfc0abb12006-01-18 17:42:33 -08002557 mutex_unlock(&cache_chain_mutex);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01002558 put_online_cpus();
Alexey Dobriyan133d2052006-09-27 01:49:41 -07002559 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560 }
2561
2562 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
Paul E. McKenney7ed9f7e2009-06-25 12:31:37 -07002563 rcu_barrier();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564
Christoph Lameter117f6eb2006-09-25 23:31:37 -07002565 __kmem_cache_destroy(cachep);
Ravikiran G Thirumalai8f5be202006-12-06 20:32:14 -08002566 mutex_unlock(&cache_chain_mutex);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01002567 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568}
2569EXPORT_SYMBOL(kmem_cache_destroy);
2570
Ravikiran G Thirumalaie5ac9c52006-09-25 23:31:34 -07002571/*
2572 * Get the memory for a slab management obj.
2573 * For a slab cache when the slab descriptor is off-slab, slab descriptors
2574 * always come from malloc_sizes caches. The slab descriptor cannot
2575 * come from the same cache which is getting created because,
2576 * when we are searching for an appropriate cache for these
2577 * descriptors in kmem_cache_create, we search through the malloc_sizes array.
2578 * If we are creating a malloc_sizes cache here it would not be visible to
2579 * kmem_find_general_cachep till the initialization is complete.
2580 * Hence we cannot have slabp_cache same as the original cache.
2581 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08002582static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
Ravikiran G Thirumalai5b74ada2006-04-10 22:52:53 -07002583 int colour_off, gfp_t local_flags,
2584 int nodeid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585{
2586 struct slab *slabp;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002587
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588 if (OFF_SLAB(cachep)) {
2589 /* Slab management obj is off-slab. */
Ravikiran G Thirumalai5b74ada2006-04-10 22:52:53 -07002590 slabp = kmem_cache_alloc_node(cachep->slabp_cache,
Pekka Enberg8759ec52008-11-26 10:01:31 +02002591 local_flags, nodeid);
Catalin Marinasd5cff632009-06-11 13:22:40 +01002592 /*
2593 * If the first object in the slab is leaked (it's allocated
2594 * but no one has a reference to it), we want to make sure
2595 * kmemleak does not treat the ->s_mem pointer as a reference
2596 * to the object. Otherwise we will not report the leak.
2597 */
Catalin Marinasc017b4b2009-10-28 13:33:09 +00002598 kmemleak_scan_area(&slabp->list, sizeof(struct list_head),
2599 local_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 if (!slabp)
2601 return NULL;
2602 } else {
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002603 slabp = objp + colour_off;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604 colour_off += cachep->slab_size;
2605 }
2606 slabp->inuse = 0;
2607 slabp->colouroff = colour_off;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002608 slabp->s_mem = objp + colour_off;
Ravikiran G Thirumalai5b74ada2006-04-10 22:52:53 -07002609 slabp->nodeid = nodeid;
Marcin Slusarze51bfd02008-02-10 11:21:54 +01002610 slabp->free = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611 return slabp;
2612}
2613
2614static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
2615{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002616 return (kmem_bufctl_t *) (slabp + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617}
2618
Pekka Enberg343e0d72006-02-01 03:05:50 -08002619static void cache_init_objs(struct kmem_cache *cachep,
Christoph Lametera35afb82007-05-16 22:10:57 -07002620 struct slab *slabp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621{
2622 int i;
2623
2624 for (i = 0; i < cachep->num; i++) {
Pekka Enberg8fea4e92006-03-22 00:08:10 -08002625 void *objp = index_to_obj(cachep, slabp, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626#if DEBUG
2627 /* need to poison the objs? */
2628 if (cachep->flags & SLAB_POISON)
2629 poison_obj(cachep, objp, POISON_FREE);
2630 if (cachep->flags & SLAB_STORE_USER)
2631 *dbg_userword(cachep, objp) = NULL;
2632
2633 if (cachep->flags & SLAB_RED_ZONE) {
2634 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2635 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2636 }
2637 /*
Andrew Mortona737b3e2006-03-22 00:08:11 -08002638 * Constructors are not allowed to allocate memory from the same
2639 * cache which they are a constructor for. Otherwise, deadlock.
2640 * They must also be threaded.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002641 */
2642 if (cachep->ctor && !(cachep->flags & SLAB_POISON))
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07002643 cachep->ctor(objp + obj_offset(cachep));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644
2645 if (cachep->flags & SLAB_RED_ZONE) {
2646 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2647 slab_error(cachep, "constructor overwrote the"
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002648 " end of an object");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002649 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2650 slab_error(cachep, "constructor overwrote the"
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002651 " start of an object");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652 }
Andrew Mortona737b3e2006-03-22 00:08:11 -08002653 if ((cachep->buffer_size % PAGE_SIZE) == 0 &&
2654 OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002655 kernel_map_pages(virt_to_page(objp),
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002656 cachep->buffer_size / PAGE_SIZE, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657#else
2658 if (cachep->ctor)
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07002659 cachep->ctor(objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660#endif
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002661 slab_bufctl(slabp)[i] = i + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662 }
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002663 slab_bufctl(slabp)[i - 1] = BUFCTL_END;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664}
2665
Pekka Enberg343e0d72006-02-01 03:05:50 -08002666static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667{
Christoph Lameter4b51d662007-02-10 01:43:10 -08002668 if (CONFIG_ZONE_DMA_FLAG) {
2669 if (flags & GFP_DMA)
2670 BUG_ON(!(cachep->gfpflags & GFP_DMA));
2671 else
2672 BUG_ON(cachep->gfpflags & GFP_DMA);
2673 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674}
2675
Andrew Mortona737b3e2006-03-22 00:08:11 -08002676static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
2677 int nodeid)
Matthew Dobson78d382d2006-02-01 03:05:47 -08002678{
Pekka Enberg8fea4e92006-03-22 00:08:10 -08002679 void *objp = index_to_obj(cachep, slabp, slabp->free);
Matthew Dobson78d382d2006-02-01 03:05:47 -08002680 kmem_bufctl_t next;
2681
2682 slabp->inuse++;
2683 next = slab_bufctl(slabp)[slabp->free];
2684#if DEBUG
2685 slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
2686 WARN_ON(slabp->nodeid != nodeid);
2687#endif
2688 slabp->free = next;
2689
2690 return objp;
2691}
2692
Andrew Mortona737b3e2006-03-22 00:08:11 -08002693static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
2694 void *objp, int nodeid)
Matthew Dobson78d382d2006-02-01 03:05:47 -08002695{
Pekka Enberg8fea4e92006-03-22 00:08:10 -08002696 unsigned int objnr = obj_to_index(cachep, slabp, objp);
Matthew Dobson78d382d2006-02-01 03:05:47 -08002697
2698#if DEBUG
2699 /* Verify that the slab belongs to the intended node */
2700 WARN_ON(slabp->nodeid != nodeid);
2701
Al Viro871751e2006-03-25 03:06:39 -08002702 if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
Matthew Dobson78d382d2006-02-01 03:05:47 -08002703 printk(KERN_ERR "slab: double free detected in cache "
Andrew Mortona737b3e2006-03-22 00:08:11 -08002704 "'%s', objp %p\n", cachep->name, objp);
Matthew Dobson78d382d2006-02-01 03:05:47 -08002705 BUG();
2706 }
2707#endif
2708 slab_bufctl(slabp)[objnr] = slabp->free;
2709 slabp->free = objnr;
2710 slabp->inuse--;
2711}
2712
Pekka Enberg47768742006-06-23 02:03:07 -07002713/*
2714 * Map pages beginning at addr to the given cache and slab. This is required
2715 * for the slab allocator to be able to lookup the cache and slab of a
2716 * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging.
2717 */
2718static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
2719 void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002720{
Pekka Enberg47768742006-06-23 02:03:07 -07002721 int nr_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722 struct page *page;
2723
Pekka Enberg47768742006-06-23 02:03:07 -07002724 page = virt_to_page(addr);
Nick Piggin84097512006-03-22 00:08:34 -08002725
Pekka Enberg47768742006-06-23 02:03:07 -07002726 nr_pages = 1;
Nick Piggin84097512006-03-22 00:08:34 -08002727 if (likely(!PageCompound(page)))
Pekka Enberg47768742006-06-23 02:03:07 -07002728 nr_pages <<= cache->gfporder;
2729
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730 do {
Pekka Enberg47768742006-06-23 02:03:07 -07002731 page_set_cache(page, cache);
2732 page_set_slab(page, slab);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733 page++;
Pekka Enberg47768742006-06-23 02:03:07 -07002734 } while (--nr_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735}
2736
2737/*
2738 * Grow (by 1) the number of slabs within a cache. This is called by
2739 * kmem_cache_alloc() when there are no active objs left in a cache.
2740 */
Christoph Lameter3c517a62006-12-06 20:33:29 -08002741static int cache_grow(struct kmem_cache *cachep,
2742 gfp_t flags, int nodeid, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002744 struct slab *slabp;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002745 size_t offset;
2746 gfp_t local_flags;
Christoph Lametere498be72005-09-09 13:03:32 -07002747 struct kmem_list3 *l3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748
Andrew Mortona737b3e2006-03-22 00:08:11 -08002749 /*
2750 * Be lazy and only check for valid flags here, keeping it out of the
2751 * critical path in kmem_cache_alloc().
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752 */
Christoph Lameter6cb06222007-10-16 01:25:41 -07002753 BUG_ON(flags & GFP_SLAB_BUG_MASK);
2754 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755
Ravikiran G Thirumalai2e1217c2006-02-04 23:27:56 -08002756 /* Take the l3 list lock to change the colour_next on this node */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757 check_irq_off();
Ravikiran G Thirumalai2e1217c2006-02-04 23:27:56 -08002758 l3 = cachep->nodelists[nodeid];
2759 spin_lock(&l3->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760
2761 /* Get colour for the slab, and cal the next value. */
Ravikiran G Thirumalai2e1217c2006-02-04 23:27:56 -08002762 offset = l3->colour_next;
2763 l3->colour_next++;
2764 if (l3->colour_next >= cachep->colour)
2765 l3->colour_next = 0;
2766 spin_unlock(&l3->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767
Ravikiran G Thirumalai2e1217c2006-02-04 23:27:56 -08002768 offset *= cachep->colour_off;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769
2770 if (local_flags & __GFP_WAIT)
2771 local_irq_enable();
2772
2773 /*
2774 * The test for missing atomic flag is performed here, rather than
2775 * the more obvious place, simply to reduce the critical path length
2776 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
2777 * will eventually be caught here (where it matters).
2778 */
2779 kmem_flagcheck(cachep, flags);
2780
Andrew Mortona737b3e2006-03-22 00:08:11 -08002781 /*
2782 * Get mem for the objs. Attempt to allocate a physical page from
2783 * 'nodeid'.
Christoph Lametere498be72005-09-09 13:03:32 -07002784 */
Christoph Lameter3c517a62006-12-06 20:33:29 -08002785 if (!objp)
Andrew Mortonb8c1c5d2007-07-24 12:02:40 -07002786 objp = kmem_getpages(cachep, local_flags, nodeid);
Andrew Mortona737b3e2006-03-22 00:08:11 -08002787 if (!objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788 goto failed;
2789
2790 /* Get slab management. */
Christoph Lameter3c517a62006-12-06 20:33:29 -08002791 slabp = alloc_slabmgmt(cachep, objp, offset,
Christoph Lameter6cb06222007-10-16 01:25:41 -07002792 local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
Andrew Mortona737b3e2006-03-22 00:08:11 -08002793 if (!slabp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002794 goto opps1;
2795
Pekka Enberg47768742006-06-23 02:03:07 -07002796 slab_map_pages(cachep, slabp, objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797
Christoph Lametera35afb82007-05-16 22:10:57 -07002798 cache_init_objs(cachep, slabp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799
2800 if (local_flags & __GFP_WAIT)
2801 local_irq_disable();
2802 check_irq_off();
Christoph Lametere498be72005-09-09 13:03:32 -07002803 spin_lock(&l3->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804
2805 /* Make slab active. */
Christoph Lametere498be72005-09-09 13:03:32 -07002806 list_add_tail(&slabp->list, &(l3->slabs_free));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807 STATS_INC_GROWN(cachep);
Christoph Lametere498be72005-09-09 13:03:32 -07002808 l3->free_objects += cachep->num;
2809 spin_unlock(&l3->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810 return 1;
Andrew Mortona737b3e2006-03-22 00:08:11 -08002811opps1:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812 kmem_freepages(cachep, objp);
Andrew Mortona737b3e2006-03-22 00:08:11 -08002813failed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814 if (local_flags & __GFP_WAIT)
2815 local_irq_disable();
2816 return 0;
2817}
2818
2819#if DEBUG
2820
2821/*
2822 * Perform extra freeing checks:
2823 * - detect bad pointers.
2824 * - POISON/RED_ZONE checking
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825 */
2826static void kfree_debugcheck(const void *objp)
2827{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828 if (!virt_addr_valid(objp)) {
2829 printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002830 (unsigned long)objp);
2831 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833}
2834
Pekka Enberg58ce1fd2006-06-23 02:03:24 -07002835static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2836{
David Woodhouseb46b8f12007-05-08 00:22:59 -07002837 unsigned long long redzone1, redzone2;
Pekka Enberg58ce1fd2006-06-23 02:03:24 -07002838
2839 redzone1 = *dbg_redzone1(cache, obj);
2840 redzone2 = *dbg_redzone2(cache, obj);
2841
2842 /*
2843 * Redzone is ok.
2844 */
2845 if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2846 return;
2847
2848 if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2849 slab_error(cache, "double free detected");
2850 else
2851 slab_error(cache, "memory outside object was overwritten");
2852
David Woodhouseb46b8f12007-05-08 00:22:59 -07002853 printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
Pekka Enberg58ce1fd2006-06-23 02:03:24 -07002854 obj, redzone1, redzone2);
2855}
2856
Pekka Enberg343e0d72006-02-01 03:05:50 -08002857static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002858 void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859{
2860 struct page *page;
2861 unsigned int objnr;
2862 struct slab *slabp;
2863
Matthew Wilcox80cbd912007-11-29 12:05:13 -07002864 BUG_ON(virt_to_cache(objp) != cachep);
2865
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002866 objp -= obj_offset(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 kfree_debugcheck(objp);
Christoph Lameterb49af682007-05-06 14:49:41 -07002868 page = virt_to_head_page(objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869
Pekka Enberg065d41c2005-11-13 16:06:46 -08002870 slabp = page_get_slab(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871
2872 if (cachep->flags & SLAB_RED_ZONE) {
Pekka Enberg58ce1fd2006-06-23 02:03:24 -07002873 verify_redzone_free(cachep, objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2875 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2876 }
2877 if (cachep->flags & SLAB_STORE_USER)
2878 *dbg_userword(cachep, objp) = caller;
2879
Pekka Enberg8fea4e92006-03-22 00:08:10 -08002880 objnr = obj_to_index(cachep, slabp, objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881
2882 BUG_ON(objnr >= cachep->num);
Pekka Enberg8fea4e92006-03-22 00:08:10 -08002883 BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884
Al Viro871751e2006-03-25 03:06:39 -08002885#ifdef CONFIG_DEBUG_SLAB_LEAK
2886 slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
2887#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002888 if (cachep->flags & SLAB_POISON) {
2889#ifdef CONFIG_DEBUG_PAGEALLOC
Andrew Mortona737b3e2006-03-22 00:08:11 -08002890 if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002891 store_stackinfo(cachep, objp, (unsigned long)caller);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002892 kernel_map_pages(virt_to_page(objp),
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002893 cachep->buffer_size / PAGE_SIZE, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894 } else {
2895 poison_obj(cachep, objp, POISON_FREE);
2896 }
2897#else
2898 poison_obj(cachep, objp, POISON_FREE);
2899#endif
2900 }
2901 return objp;
2902}
2903
Pekka Enberg343e0d72006-02-01 03:05:50 -08002904static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905{
2906 kmem_bufctl_t i;
2907 int entries = 0;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002908
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909 /* Check slab's freelist to see if this obj is there. */
2910 for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
2911 entries++;
2912 if (entries > cachep->num || i >= cachep->num)
2913 goto bad;
2914 }
2915 if (entries != cachep->num - slabp->inuse) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08002916bad:
2917 printk(KERN_ERR "slab: Internal list corruption detected in "
2918 "cache '%s'(%d), slabp %p(%d). Hexdump:\n",
2919 cachep->name, cachep->num, slabp, slabp->inuse);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002920 for (i = 0;
Linus Torvalds264132b2006-03-06 12:10:07 -08002921 i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002922 i++) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08002923 if (i % 16 == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924 printk("\n%03x:", i);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002925 printk(" %02x", ((unsigned char *)slabp)[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926 }
2927 printk("\n");
2928 BUG();
2929 }
2930}
2931#else
2932#define kfree_debugcheck(x) do { } while(0)
2933#define cache_free_debugcheck(x,objp,z) (objp)
2934#define check_slabp(x,y) do { } while(0)
2935#endif
2936
Pekka Enberg343e0d72006-02-01 03:05:50 -08002937static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938{
2939 int batchcount;
2940 struct kmem_list3 *l3;
2941 struct array_cache *ac;
Pekka Enberg1ca4cb22006-10-06 00:43:52 -07002942 int node;
2943
Andrew Mortona737b3e2006-03-22 00:08:11 -08002944retry:
Joe Korty6d2144d2008-03-05 15:04:59 -08002945 check_irq_off();
2946 node = numa_node_id();
2947 ac = cpu_cache_get(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948 batchcount = ac->batchcount;
2949 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08002950 /*
2951 * If there was little recent activity on this cache, then
2952 * perform only a partial refill. Otherwise we could generate
2953 * refill bouncing.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954 */
2955 batchcount = BATCHREFILL_LIMIT;
2956 }
Pekka Enberg1ca4cb22006-10-06 00:43:52 -07002957 l3 = cachep->nodelists[node];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002958
Christoph Lametere498be72005-09-09 13:03:32 -07002959 BUG_ON(ac->avail > 0 || !l3);
2960 spin_lock(&l3->list_lock);
2961
Christoph Lameter3ded1752006-03-25 03:06:44 -08002962 /* See if we can refill from the shared array */
Nick Piggin44b57f12010-01-27 22:27:40 +11002963 if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) {
2964 l3->shared->touched = 1;
Christoph Lameter3ded1752006-03-25 03:06:44 -08002965 goto alloc_done;
Nick Piggin44b57f12010-01-27 22:27:40 +11002966 }
Christoph Lameter3ded1752006-03-25 03:06:44 -08002967
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968 while (batchcount > 0) {
2969 struct list_head *entry;
2970 struct slab *slabp;
2971 /* Get slab alloc is to come from. */
2972 entry = l3->slabs_partial.next;
2973 if (entry == &l3->slabs_partial) {
2974 l3->free_touched = 1;
2975 entry = l3->slabs_free.next;
2976 if (entry == &l3->slabs_free)
2977 goto must_grow;
2978 }
2979
2980 slabp = list_entry(entry, struct slab, list);
2981 check_slabp(cachep, slabp);
2982 check_spinlock_acquired(cachep);
Pekka Enberg714b8172007-05-06 14:49:03 -07002983
2984 /*
2985 * The slab was either on partial or free list so
2986 * there must be at least one object available for
2987 * allocation.
2988 */
roel kluin249b9f32008-10-29 17:18:07 -04002989 BUG_ON(slabp->inuse >= cachep->num);
Pekka Enberg714b8172007-05-06 14:49:03 -07002990
Linus Torvalds1da177e2005-04-16 15:20:36 -07002991 while (slabp->inuse < cachep->num && batchcount--) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992 STATS_INC_ALLOCED(cachep);
2993 STATS_INC_ACTIVE(cachep);
2994 STATS_SET_HIGH(cachep);
2995
Matthew Dobson78d382d2006-02-01 03:05:47 -08002996 ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
Pekka Enberg1ca4cb22006-10-06 00:43:52 -07002997 node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998 }
2999 check_slabp(cachep, slabp);
3000
3001 /* move slabp to correct slabp list: */
3002 list_del(&slabp->list);
3003 if (slabp->free == BUFCTL_END)
3004 list_add(&slabp->list, &l3->slabs_full);
3005 else
3006 list_add(&slabp->list, &l3->slabs_partial);
3007 }
3008
Andrew Mortona737b3e2006-03-22 00:08:11 -08003009must_grow:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003010 l3->free_objects -= ac->avail;
Andrew Mortona737b3e2006-03-22 00:08:11 -08003011alloc_done:
Christoph Lametere498be72005-09-09 13:03:32 -07003012 spin_unlock(&l3->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003013
3014 if (unlikely(!ac->avail)) {
3015 int x;
Christoph Lameter3c517a62006-12-06 20:33:29 -08003016 x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
Christoph Lametere498be72005-09-09 13:03:32 -07003017
Andrew Mortona737b3e2006-03-22 00:08:11 -08003018 /* cache_grow can reenable interrupts, then ac could change. */
Pekka Enberg9a2dba42006-02-01 03:05:49 -08003019 ac = cpu_cache_get(cachep);
Andrew Mortona737b3e2006-03-22 00:08:11 -08003020 if (!x && ac->avail == 0) /* no objects in sight? abort */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003021 return NULL;
3022
Andrew Mortona737b3e2006-03-22 00:08:11 -08003023 if (!ac->avail) /* objects refilled by interrupt? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024 goto retry;
3025 }
3026 ac->touched = 1;
Christoph Lametere498be72005-09-09 13:03:32 -07003027 return ac->entry[--ac->avail];
Linus Torvalds1da177e2005-04-16 15:20:36 -07003028}
3029
Andrew Mortona737b3e2006-03-22 00:08:11 -08003030static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
3031 gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003032{
3033 might_sleep_if(flags & __GFP_WAIT);
3034#if DEBUG
3035 kmem_flagcheck(cachep, flags);
3036#endif
3037}
3038
3039#if DEBUG
Andrew Mortona737b3e2006-03-22 00:08:11 -08003040static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3041 gfp_t flags, void *objp, void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003043 if (!objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044 return objp;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003045 if (cachep->flags & SLAB_POISON) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003046#ifdef CONFIG_DEBUG_PAGEALLOC
Manfred Spraul3dafccf2006-02-01 03:05:42 -08003047 if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003048 kernel_map_pages(virt_to_page(objp),
Manfred Spraul3dafccf2006-02-01 03:05:42 -08003049 cachep->buffer_size / PAGE_SIZE, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003050 else
3051 check_poison_obj(cachep, objp);
3052#else
3053 check_poison_obj(cachep, objp);
3054#endif
3055 poison_obj(cachep, objp, POISON_INUSE);
3056 }
3057 if (cachep->flags & SLAB_STORE_USER)
3058 *dbg_userword(cachep, objp) = caller;
3059
3060 if (cachep->flags & SLAB_RED_ZONE) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08003061 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3062 *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3063 slab_error(cachep, "double free, or memory outside"
3064 " object was overwritten");
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003065 printk(KERN_ERR
David Woodhouseb46b8f12007-05-08 00:22:59 -07003066 "%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
Andrew Mortona737b3e2006-03-22 00:08:11 -08003067 objp, *dbg_redzone1(cachep, objp),
3068 *dbg_redzone2(cachep, objp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003069 }
3070 *dbg_redzone1(cachep, objp) = RED_ACTIVE;
3071 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
3072 }
Al Viro871751e2006-03-25 03:06:39 -08003073#ifdef CONFIG_DEBUG_SLAB_LEAK
3074 {
3075 struct slab *slabp;
3076 unsigned objnr;
3077
Christoph Lameterb49af682007-05-06 14:49:41 -07003078 slabp = page_get_slab(virt_to_head_page(objp));
Al Viro871751e2006-03-25 03:06:39 -08003079 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
3080 slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
3081 }
3082#endif
Manfred Spraul3dafccf2006-02-01 03:05:42 -08003083 objp += obj_offset(cachep);
Christoph Lameter4f104932007-05-06 14:50:17 -07003084 if (cachep->ctor && cachep->flags & SLAB_POISON)
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07003085 cachep->ctor(objp);
Kevin Hilmana44b56d2006-12-06 20:32:11 -08003086#if ARCH_SLAB_MINALIGN
3087 if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
3088 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3089 objp, ARCH_SLAB_MINALIGN);
3090 }
3091#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003092 return objp;
3093}
3094#else
3095#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3096#endif
3097
Akinobu Mita773ff602008-12-23 19:37:01 +09003098static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
Akinobu Mita8a8b6502006-12-08 02:39:44 -08003099{
3100 if (cachep == &cache_cache)
Akinobu Mita773ff602008-12-23 19:37:01 +09003101 return false;
Akinobu Mita8a8b6502006-12-08 02:39:44 -08003102
Dmitry Monakhov4c13dd32010-02-26 09:36:12 +03003103 return should_failslab(obj_size(cachep), flags, cachep->flags);
Akinobu Mita8a8b6502006-12-08 02:39:44 -08003104}
3105
Pekka Enberg343e0d72006-02-01 03:05:50 -08003106static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003107{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003108 void *objp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003109 struct array_cache *ac;
3110
Alok N Kataria5c382302005-09-27 21:45:46 -07003111 check_irq_off();
Akinobu Mita8a8b6502006-12-08 02:39:44 -08003112
Pekka Enberg9a2dba42006-02-01 03:05:49 -08003113 ac = cpu_cache_get(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003114 if (likely(ac->avail)) {
3115 STATS_INC_ALLOCHIT(cachep);
3116 ac->touched = 1;
Christoph Lametere498be72005-09-09 13:03:32 -07003117 objp = ac->entry[--ac->avail];
Linus Torvalds1da177e2005-04-16 15:20:36 -07003118 } else {
3119 STATS_INC_ALLOCMISS(cachep);
3120 objp = cache_alloc_refill(cachep, flags);
J. R. Okajimaddbf2e82009-12-02 16:55:50 +09003121 /*
3122 * the 'ac' may be updated by cache_alloc_refill(),
3123 * and kmemleak_erase() requires its correct value.
3124 */
3125 ac = cpu_cache_get(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003126 }
Catalin Marinasd5cff632009-06-11 13:22:40 +01003127 /*
3128 * To avoid a false negative, if an object that is in one of the
3129 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
3130 * treat the array pointers as a reference to the object.
3131 */
J. R. Okajimaf3d8b532009-12-02 16:55:49 +09003132 if (objp)
3133 kmemleak_erase(&ac->entry[ac->avail]);
Alok N Kataria5c382302005-09-27 21:45:46 -07003134 return objp;
3135}
3136
Christoph Lametere498be72005-09-09 13:03:32 -07003137#ifdef CONFIG_NUMA
3138/*
Paul Jacksonb2455392006-03-24 03:16:12 -08003139 * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
Paul Jacksonc61afb12006-03-24 03:16:08 -08003140 *
3141 * If we are in_interrupt, then process context, including cpusets and
3142 * mempolicy, may not apply and should not be used for allocation policy.
3143 */
3144static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3145{
3146 int nid_alloc, nid_here;
3147
Christoph Lameter765c4502006-09-27 01:50:08 -07003148 if (in_interrupt() || (flags & __GFP_THISNODE))
Paul Jacksonc61afb12006-03-24 03:16:08 -08003149 return NULL;
3150 nid_alloc = nid_here = numa_node_id();
3151 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3152 nid_alloc = cpuset_mem_spread_node();
3153 else if (current->mempolicy)
3154 nid_alloc = slab_node(current->mempolicy);
3155 if (nid_alloc != nid_here)
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003156 return ____cache_alloc_node(cachep, flags, nid_alloc);
Paul Jacksonc61afb12006-03-24 03:16:08 -08003157 return NULL;
3158}
3159
3160/*
Christoph Lameter765c4502006-09-27 01:50:08 -07003161 * Fallback function if there was no memory available and no objects on a
Christoph Lameter3c517a62006-12-06 20:33:29 -08003162 * certain node and fall back is permitted. First we scan all the
3163 * available nodelists for available objects. If that fails then we
3164 * perform an allocation without specifying a node. This allows the page
3165 * allocator to do its reclaim / fallback magic. We then insert the
3166 * slab into the proper nodelist and then allocate from it.
Christoph Lameter765c4502006-09-27 01:50:08 -07003167 */
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003168static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
Christoph Lameter765c4502006-09-27 01:50:08 -07003169{
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003170 struct zonelist *zonelist;
3171 gfp_t local_flags;
Mel Gormandd1a2392008-04-28 02:12:17 -07003172 struct zoneref *z;
Mel Gorman54a6eb52008-04-28 02:12:16 -07003173 struct zone *zone;
3174 enum zone_type high_zoneidx = gfp_zone(flags);
Christoph Lameter765c4502006-09-27 01:50:08 -07003175 void *obj = NULL;
Christoph Lameter3c517a62006-12-06 20:33:29 -08003176 int nid;
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003177
3178 if (flags & __GFP_THISNODE)
3179 return NULL;
3180
Mel Gorman0e884602008-04-28 02:12:14 -07003181 zonelist = node_zonelist(slab_node(current->mempolicy), flags);
Christoph Lameter6cb06222007-10-16 01:25:41 -07003182 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
Christoph Lameter765c4502006-09-27 01:50:08 -07003183
Christoph Lameter3c517a62006-12-06 20:33:29 -08003184retry:
3185 /*
3186 * Look through allowed nodes for objects available
3187 * from existing per node queues.
3188 */
Mel Gorman54a6eb52008-04-28 02:12:16 -07003189 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
3190 nid = zone_to_nid(zone);
Christoph Lameteraedb0eb2006-10-21 10:24:16 -07003191
Mel Gorman54a6eb52008-04-28 02:12:16 -07003192 if (cpuset_zone_allowed_hardwall(zone, flags) &&
Christoph Lameter3c517a62006-12-06 20:33:29 -08003193 cache->nodelists[nid] &&
Christoph Lameter481c5342008-06-21 16:46:35 -07003194 cache->nodelists[nid]->free_objects) {
Christoph Lameter3c517a62006-12-06 20:33:29 -08003195 obj = ____cache_alloc_node(cache,
3196 flags | GFP_THISNODE, nid);
Christoph Lameter481c5342008-06-21 16:46:35 -07003197 if (obj)
3198 break;
3199 }
Christoph Lameter3c517a62006-12-06 20:33:29 -08003200 }
3201
Christoph Lametercfce6602007-05-06 14:50:17 -07003202 if (!obj) {
Christoph Lameter3c517a62006-12-06 20:33:29 -08003203 /*
3204 * This allocation will be performed within the constraints
3205 * of the current cpuset / memory policy requirements.
3206 * We may trigger various forms of reclaim on the allowed
3207 * set and go into memory reserves if necessary.
3208 */
Christoph Lameterdd47ea72006-12-13 00:34:11 -08003209 if (local_flags & __GFP_WAIT)
3210 local_irq_enable();
3211 kmem_flagcheck(cache, flags);
Mel Gorman6484eb32009-06-16 15:31:54 -07003212 obj = kmem_getpages(cache, local_flags, numa_node_id());
Christoph Lameterdd47ea72006-12-13 00:34:11 -08003213 if (local_flags & __GFP_WAIT)
3214 local_irq_disable();
Christoph Lameter3c517a62006-12-06 20:33:29 -08003215 if (obj) {
3216 /*
3217 * Insert into the appropriate per node queues
3218 */
3219 nid = page_to_nid(virt_to_page(obj));
3220 if (cache_grow(cache, flags, nid, obj)) {
3221 obj = ____cache_alloc_node(cache,
3222 flags | GFP_THISNODE, nid);
3223 if (!obj)
3224 /*
3225 * Another processor may allocate the
3226 * objects in the slab since we are
3227 * not holding any locks.
3228 */
3229 goto retry;
3230 } else {
Hugh Dickinsb6a60452007-01-05 16:36:36 -08003231 /* cache_grow already freed obj */
Christoph Lameter3c517a62006-12-06 20:33:29 -08003232 obj = NULL;
3233 }
3234 }
Christoph Lameteraedb0eb2006-10-21 10:24:16 -07003235 }
Christoph Lameter765c4502006-09-27 01:50:08 -07003236 return obj;
3237}
3238
3239/*
Christoph Lametere498be72005-09-09 13:03:32 -07003240 * A interface to enable slab creation on nodeid
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241 */
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003242static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
Andrew Mortona737b3e2006-03-22 00:08:11 -08003243 int nodeid)
Christoph Lametere498be72005-09-09 13:03:32 -07003244{
3245 struct list_head *entry;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003246 struct slab *slabp;
3247 struct kmem_list3 *l3;
3248 void *obj;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003249 int x;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003250
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003251 l3 = cachep->nodelists[nodeid];
3252 BUG_ON(!l3);
Christoph Lametere498be72005-09-09 13:03:32 -07003253
Andrew Mortona737b3e2006-03-22 00:08:11 -08003254retry:
Ravikiran G Thirumalaica3b9b92006-02-04 23:27:58 -08003255 check_irq_off();
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003256 spin_lock(&l3->list_lock);
3257 entry = l3->slabs_partial.next;
3258 if (entry == &l3->slabs_partial) {
3259 l3->free_touched = 1;
3260 entry = l3->slabs_free.next;
3261 if (entry == &l3->slabs_free)
3262 goto must_grow;
3263 }
Christoph Lametere498be72005-09-09 13:03:32 -07003264
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003265 slabp = list_entry(entry, struct slab, list);
3266 check_spinlock_acquired_node(cachep, nodeid);
3267 check_slabp(cachep, slabp);
Christoph Lametere498be72005-09-09 13:03:32 -07003268
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003269 STATS_INC_NODEALLOCS(cachep);
3270 STATS_INC_ACTIVE(cachep);
3271 STATS_SET_HIGH(cachep);
Christoph Lametere498be72005-09-09 13:03:32 -07003272
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003273 BUG_ON(slabp->inuse == cachep->num);
Christoph Lametere498be72005-09-09 13:03:32 -07003274
Matthew Dobson78d382d2006-02-01 03:05:47 -08003275 obj = slab_get_obj(cachep, slabp, nodeid);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003276 check_slabp(cachep, slabp);
3277 l3->free_objects--;
3278 /* move slabp to correct slabp list: */
3279 list_del(&slabp->list);
Christoph Lametere498be72005-09-09 13:03:32 -07003280
Andrew Mortona737b3e2006-03-22 00:08:11 -08003281 if (slabp->free == BUFCTL_END)
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003282 list_add(&slabp->list, &l3->slabs_full);
Andrew Mortona737b3e2006-03-22 00:08:11 -08003283 else
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003284 list_add(&slabp->list, &l3->slabs_partial);
Christoph Lametere498be72005-09-09 13:03:32 -07003285
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003286 spin_unlock(&l3->list_lock);
3287 goto done;
Christoph Lametere498be72005-09-09 13:03:32 -07003288
Andrew Mortona737b3e2006-03-22 00:08:11 -08003289must_grow:
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003290 spin_unlock(&l3->list_lock);
Christoph Lameter3c517a62006-12-06 20:33:29 -08003291 x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
Christoph Lameter765c4502006-09-27 01:50:08 -07003292 if (x)
3293 goto retry;
Christoph Lametere498be72005-09-09 13:03:32 -07003294
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003295 return fallback_alloc(cachep, flags);
Christoph Lameter765c4502006-09-27 01:50:08 -07003296
Andrew Mortona737b3e2006-03-22 00:08:11 -08003297done:
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003298 return obj;
Christoph Lametere498be72005-09-09 13:03:32 -07003299}
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003300
3301/**
3302 * kmem_cache_alloc_node - Allocate an object on the specified node
3303 * @cachep: The cache to allocate from.
3304 * @flags: See kmalloc().
3305 * @nodeid: node number of the target node.
3306 * @caller: return address of caller, used for debug information
3307 *
3308 * Identical to kmem_cache_alloc but it will allocate memory on the given
3309 * node, which can improve the performance for cpu bound structures.
3310 *
3311 * Fallback to other node is possible if __GFP_THISNODE is not set.
3312 */
3313static __always_inline void *
3314__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3315 void *caller)
3316{
3317 unsigned long save_flags;
3318 void *ptr;
3319
Benjamin Herrenschmidtdcce2842009-06-18 13:24:12 +10003320 flags &= gfp_allowed_mask;
Pekka Enberg7e85ee02009-06-12 14:03:06 +03003321
Nick Piggincf40bd12009-01-21 08:12:39 +01003322 lockdep_trace_alloc(flags);
3323
Akinobu Mita773ff602008-12-23 19:37:01 +09003324 if (slab_should_failslab(cachep, flags))
Akinobu Mita824ebef2007-05-06 14:49:58 -07003325 return NULL;
3326
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003327 cache_alloc_debugcheck_before(cachep, flags);
3328 local_irq_save(save_flags);
3329
Tim Blechmann8e15b792009-11-30 18:59:34 +01003330 if (nodeid == -1)
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003331 nodeid = numa_node_id();
3332
3333 if (unlikely(!cachep->nodelists[nodeid])) {
3334 /* Node not bootstrapped yet */
3335 ptr = fallback_alloc(cachep, flags);
3336 goto out;
3337 }
3338
3339 if (nodeid == numa_node_id()) {
3340 /*
3341 * Use the locally cached objects if possible.
3342 * However ____cache_alloc does not allow fallback
3343 * to other nodes. It may fail while we still have
3344 * objects on other nodes available.
3345 */
3346 ptr = ____cache_alloc(cachep, flags);
3347 if (ptr)
3348 goto out;
3349 }
3350 /* ___cache_alloc_node can fall back to other nodes */
3351 ptr = ____cache_alloc_node(cachep, flags, nodeid);
3352 out:
3353 local_irq_restore(save_flags);
3354 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
Catalin Marinasd5cff632009-06-11 13:22:40 +01003355 kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
3356 flags);
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003357
Pekka Enbergc175eea2008-05-09 20:35:53 +02003358 if (likely(ptr))
3359 kmemcheck_slab_alloc(cachep, flags, ptr, obj_size(cachep));
3360
Christoph Lameterd07dbea2007-07-17 04:03:23 -07003361 if (unlikely((flags & __GFP_ZERO) && ptr))
3362 memset(ptr, 0, obj_size(cachep));
3363
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003364 return ptr;
3365}
3366
3367static __always_inline void *
3368__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3369{
3370 void *objp;
3371
3372 if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
3373 objp = alternate_node_alloc(cache, flags);
3374 if (objp)
3375 goto out;
3376 }
3377 objp = ____cache_alloc(cache, flags);
3378
3379 /*
3380 * We may just have run out of memory on the local node.
3381 * ____cache_alloc_node() knows how to locate memory on other nodes
3382 */
3383 if (!objp)
3384 objp = ____cache_alloc_node(cache, flags, numa_node_id());
3385
3386 out:
3387 return objp;
3388}
3389#else
3390
3391static __always_inline void *
3392__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3393{
3394 return ____cache_alloc(cachep, flags);
3395}
3396
3397#endif /* CONFIG_NUMA */
3398
3399static __always_inline void *
3400__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3401{
3402 unsigned long save_flags;
3403 void *objp;
3404
Benjamin Herrenschmidtdcce2842009-06-18 13:24:12 +10003405 flags &= gfp_allowed_mask;
Pekka Enberg7e85ee02009-06-12 14:03:06 +03003406
Nick Piggincf40bd12009-01-21 08:12:39 +01003407 lockdep_trace_alloc(flags);
3408
Akinobu Mita773ff602008-12-23 19:37:01 +09003409 if (slab_should_failslab(cachep, flags))
Akinobu Mita824ebef2007-05-06 14:49:58 -07003410 return NULL;
3411
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003412 cache_alloc_debugcheck_before(cachep, flags);
3413 local_irq_save(save_flags);
3414 objp = __do_cache_alloc(cachep, flags);
3415 local_irq_restore(save_flags);
3416 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
Catalin Marinasd5cff632009-06-11 13:22:40 +01003417 kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
3418 flags);
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003419 prefetchw(objp);
3420
Pekka Enbergc175eea2008-05-09 20:35:53 +02003421 if (likely(objp))
3422 kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep));
3423
Christoph Lameterd07dbea2007-07-17 04:03:23 -07003424 if (unlikely((flags & __GFP_ZERO) && objp))
3425 memset(objp, 0, obj_size(cachep));
3426
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003427 return objp;
3428}
Christoph Lametere498be72005-09-09 13:03:32 -07003429
3430/*
3431 * Caller needs to acquire correct kmem_list's list_lock
3432 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08003433static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003434 int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003435{
3436 int i;
Christoph Lametere498be72005-09-09 13:03:32 -07003437 struct kmem_list3 *l3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003438
3439 for (i = 0; i < nr_objects; i++) {
3440 void *objp = objpp[i];
3441 struct slab *slabp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442
Pekka Enberg6ed5eb22006-02-01 03:05:49 -08003443 slabp = virt_to_slab(objp);
Christoph Lameterff694162005-09-22 21:44:02 -07003444 l3 = cachep->nodelists[node];
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445 list_del(&slabp->list);
Christoph Lameterff694162005-09-22 21:44:02 -07003446 check_spinlock_acquired_node(cachep, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003447 check_slabp(cachep, slabp);
Matthew Dobson78d382d2006-02-01 03:05:47 -08003448 slab_put_obj(cachep, slabp, objp, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003449 STATS_DEC_ACTIVE(cachep);
Christoph Lametere498be72005-09-09 13:03:32 -07003450 l3->free_objects++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003451 check_slabp(cachep, slabp);
3452
3453 /* fixup slab chains */
3454 if (slabp->inuse == 0) {
Christoph Lametere498be72005-09-09 13:03:32 -07003455 if (l3->free_objects > l3->free_limit) {
3456 l3->free_objects -= cachep->num;
Ravikiran G Thirumalaie5ac9c52006-09-25 23:31:34 -07003457 /* No need to drop any previously held
3458 * lock here, even if we have a off-slab slab
3459 * descriptor it is guaranteed to come from
3460 * a different cache, refer to comments before
3461 * alloc_slabmgmt.
3462 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463 slab_destroy(cachep, slabp);
3464 } else {
Christoph Lametere498be72005-09-09 13:03:32 -07003465 list_add(&slabp->list, &l3->slabs_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003466 }
3467 } else {
3468 /* Unconditionally move a slab to the end of the
3469 * partial list on free - maximum time for the
3470 * other objects to be freed, too.
3471 */
Christoph Lametere498be72005-09-09 13:03:32 -07003472 list_add_tail(&slabp->list, &l3->slabs_partial);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473 }
3474 }
3475}
3476
Pekka Enberg343e0d72006-02-01 03:05:50 -08003477static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478{
3479 int batchcount;
Christoph Lametere498be72005-09-09 13:03:32 -07003480 struct kmem_list3 *l3;
Christoph Lameterff694162005-09-22 21:44:02 -07003481 int node = numa_node_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003482
3483 batchcount = ac->batchcount;
3484#if DEBUG
3485 BUG_ON(!batchcount || batchcount > ac->avail);
3486#endif
3487 check_irq_off();
Christoph Lameterff694162005-09-22 21:44:02 -07003488 l3 = cachep->nodelists[node];
Ingo Molnar873623d2006-07-13 14:44:38 +02003489 spin_lock(&l3->list_lock);
Christoph Lametere498be72005-09-09 13:03:32 -07003490 if (l3->shared) {
3491 struct array_cache *shared_array = l3->shared;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003492 int max = shared_array->limit - shared_array->avail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493 if (max) {
3494 if (batchcount > max)
3495 batchcount = max;
Christoph Lametere498be72005-09-09 13:03:32 -07003496 memcpy(&(shared_array->entry[shared_array->avail]),
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003497 ac->entry, sizeof(void *) * batchcount);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003498 shared_array->avail += batchcount;
3499 goto free_done;
3500 }
3501 }
3502
Christoph Lameterff694162005-09-22 21:44:02 -07003503 free_block(cachep, ac->entry, batchcount, node);
Andrew Mortona737b3e2006-03-22 00:08:11 -08003504free_done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505#if STATS
3506 {
3507 int i = 0;
3508 struct list_head *p;
3509
Christoph Lametere498be72005-09-09 13:03:32 -07003510 p = l3->slabs_free.next;
3511 while (p != &(l3->slabs_free)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003512 struct slab *slabp;
3513
3514 slabp = list_entry(p, struct slab, list);
3515 BUG_ON(slabp->inuse);
3516
3517 i++;
3518 p = p->next;
3519 }
3520 STATS_SET_FREEABLE(cachep, i);
3521 }
3522#endif
Christoph Lametere498be72005-09-09 13:03:32 -07003523 spin_unlock(&l3->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524 ac->avail -= batchcount;
Andrew Mortona737b3e2006-03-22 00:08:11 -08003525 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003526}
3527
3528/*
Andrew Mortona737b3e2006-03-22 00:08:11 -08003529 * Release an obj back to its cache. If the obj has a constructed state, it must
3530 * be in this state _before_ it is released. Called with disabled ints.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003531 */
Ingo Molnar873623d2006-07-13 14:44:38 +02003532static inline void __cache_free(struct kmem_cache *cachep, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003533{
Pekka Enberg9a2dba42006-02-01 03:05:49 -08003534 struct array_cache *ac = cpu_cache_get(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535
3536 check_irq_off();
Catalin Marinasd5cff632009-06-11 13:22:40 +01003537 kmemleak_free_recursive(objp, cachep->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003538 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
3539
Pekka Enbergc175eea2008-05-09 20:35:53 +02003540 kmemcheck_slab_free(cachep, objp, obj_size(cachep));
3541
Siddha, Suresh B1807a1a2007-08-22 14:01:49 -07003542 /*
3543 * Skip calling cache_free_alien() when the platform is not numa.
3544 * This will avoid cache misses that happen while accessing slabp (which
3545 * is per page memory reference) to get nodeid. Instead use a global
3546 * variable to skip the call, which is mostly likely to be present in
3547 * the cache.
3548 */
Mel Gormanb6e68bc2009-06-16 15:32:16 -07003549 if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
Pekka Enberg729bd0b2006-06-23 02:03:05 -07003550 return;
Christoph Lametere498be72005-09-09 13:03:32 -07003551
Linus Torvalds1da177e2005-04-16 15:20:36 -07003552 if (likely(ac->avail < ac->limit)) {
3553 STATS_INC_FREEHIT(cachep);
Christoph Lametere498be72005-09-09 13:03:32 -07003554 ac->entry[ac->avail++] = objp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003555 return;
3556 } else {
3557 STATS_INC_FREEMISS(cachep);
3558 cache_flusharray(cachep, ac);
Christoph Lametere498be72005-09-09 13:03:32 -07003559 ac->entry[ac->avail++] = objp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003560 }
3561}
3562
3563/**
3564 * kmem_cache_alloc - Allocate an object
3565 * @cachep: The cache to allocate from.
3566 * @flags: See kmalloc().
3567 *
3568 * Allocate an object from this cache. The flags are only relevant
3569 * if the cache has no available objects.
3570 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08003571void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003572{
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003573 void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
3574
Eduard - Gabriel Munteanuca2b84c2009-03-23 15:12:24 +02003575 trace_kmem_cache_alloc(_RET_IP_, ret,
3576 obj_size(cachep), cachep->buffer_size, flags);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003577
3578 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003579}
3580EXPORT_SYMBOL(kmem_cache_alloc);
3581
Li Zefan0f24f122009-12-11 15:45:30 +08003582#ifdef CONFIG_TRACING
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003583void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
3584{
3585 return __cache_alloc(cachep, flags, __builtin_return_address(0));
3586}
3587EXPORT_SYMBOL(kmem_cache_alloc_notrace);
3588#endif
3589
Linus Torvalds1da177e2005-04-16 15:20:36 -07003590/**
Randy Dunlap76824862008-03-19 17:00:40 -07003591 * kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003592 * @cachep: the cache we're checking against
3593 * @ptr: pointer to validate
3594 *
Randy Dunlap76824862008-03-19 17:00:40 -07003595 * This verifies that the untrusted pointer looks sane;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003596 * it is _not_ a guarantee that the pointer is actually
3597 * part of the slab cache in question, but it at least
3598 * validates that the pointer can be dereferenced and
3599 * looks half-way sane.
3600 *
3601 * Currently only used for dentry validation.
3602 */
Christoph Lameterb7f869a2006-12-22 01:06:44 -08003603int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003604{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003605 unsigned long addr = (unsigned long)ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606 unsigned long min_addr = PAGE_OFFSET;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003607 unsigned long align_mask = BYTES_PER_WORD - 1;
Manfred Spraul3dafccf2006-02-01 03:05:42 -08003608 unsigned long size = cachep->buffer_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003609 struct page *page;
3610
3611 if (unlikely(addr < min_addr))
3612 goto out;
3613 if (unlikely(addr > (unsigned long)high_memory - size))
3614 goto out;
3615 if (unlikely(addr & align_mask))
3616 goto out;
3617 if (unlikely(!kern_addr_valid(addr)))
3618 goto out;
3619 if (unlikely(!kern_addr_valid(addr + size - 1)))
3620 goto out;
3621 page = virt_to_page(ptr);
3622 if (unlikely(!PageSlab(page)))
3623 goto out;
Pekka Enberg065d41c2005-11-13 16:06:46 -08003624 if (unlikely(page_get_cache(page) != cachep))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625 goto out;
3626 return 1;
Andrew Mortona737b3e2006-03-22 00:08:11 -08003627out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003628 return 0;
3629}
3630
3631#ifdef CONFIG_NUMA
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003632void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3633{
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003634 void *ret = __cache_alloc_node(cachep, flags, nodeid,
3635 __builtin_return_address(0));
3636
Eduard - Gabriel Munteanuca2b84c2009-03-23 15:12:24 +02003637 trace_kmem_cache_alloc_node(_RET_IP_, ret,
3638 obj_size(cachep), cachep->buffer_size,
3639 flags, nodeid);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003640
3641 return ret;
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003642}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003643EXPORT_SYMBOL(kmem_cache_alloc_node);
3644
Li Zefan0f24f122009-12-11 15:45:30 +08003645#ifdef CONFIG_TRACING
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003646void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
3647 gfp_t flags,
3648 int nodeid)
3649{
3650 return __cache_alloc_node(cachep, flags, nodeid,
3651 __builtin_return_address(0));
3652}
3653EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
3654#endif
3655
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003656static __always_inline void *
3657__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
Manfred Spraul97e2bde2005-05-01 08:58:38 -07003658{
Pekka Enberg343e0d72006-02-01 03:05:50 -08003659 struct kmem_cache *cachep;
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003660 void *ret;
Manfred Spraul97e2bde2005-05-01 08:58:38 -07003661
3662 cachep = kmem_find_general_cachep(size, flags);
Christoph Lameter6cb8f912007-07-17 04:03:22 -07003663 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3664 return cachep;
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003665 ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
3666
Eduard - Gabriel Munteanuca2b84c2009-03-23 15:12:24 +02003667 trace_kmalloc_node((unsigned long) caller, ret,
3668 size, cachep->buffer_size, flags, node);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003669
3670 return ret;
Manfred Spraul97e2bde2005-05-01 08:58:38 -07003671}
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003672
Li Zefan0bb38a52009-12-11 15:45:50 +08003673#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003674void *__kmalloc_node(size_t size, gfp_t flags, int node)
3675{
3676 return __do_kmalloc_node(size, flags, node,
3677 __builtin_return_address(0));
3678}
Christoph Hellwigdbe5e692006-09-25 23:31:36 -07003679EXPORT_SYMBOL(__kmalloc_node);
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003680
3681void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03003682 int node, unsigned long caller)
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003683{
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03003684 return __do_kmalloc_node(size, flags, node, (void *)caller);
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003685}
3686EXPORT_SYMBOL(__kmalloc_node_track_caller);
3687#else
3688void *__kmalloc_node(size_t size, gfp_t flags, int node)
3689{
3690 return __do_kmalloc_node(size, flags, node, NULL);
3691}
3692EXPORT_SYMBOL(__kmalloc_node);
Li Zefan0bb38a52009-12-11 15:45:50 +08003693#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003694#endif /* CONFIG_NUMA */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695
3696/**
Paul Drynoff800590f2006-06-23 02:03:48 -07003697 * __do_kmalloc - allocate memory
Linus Torvalds1da177e2005-04-16 15:20:36 -07003698 * @size: how many bytes of memory are required.
Paul Drynoff800590f2006-06-23 02:03:48 -07003699 * @flags: the type of memory to allocate (see kmalloc).
Randy Dunlap911851e2006-03-22 00:08:14 -08003700 * @caller: function caller for debug tracking of the caller
Linus Torvalds1da177e2005-04-16 15:20:36 -07003701 */
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003702static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3703 void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003704{
Pekka Enberg343e0d72006-02-01 03:05:50 -08003705 struct kmem_cache *cachep;
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003706 void *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003707
Manfred Spraul97e2bde2005-05-01 08:58:38 -07003708 /* If you want to save a few bytes .text space: replace
3709 * __ with kmem_.
3710 * Then kmalloc uses the uninlined functions instead of the inline
3711 * functions.
3712 */
3713 cachep = __find_general_cachep(size, flags);
Linus Torvaldsa5c96d82007-07-19 13:17:15 -07003714 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3715 return cachep;
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003716 ret = __cache_alloc(cachep, flags, caller);
3717
Eduard - Gabriel Munteanuca2b84c2009-03-23 15:12:24 +02003718 trace_kmalloc((unsigned long) caller, ret,
3719 size, cachep->buffer_size, flags);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003720
3721 return ret;
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003722}
3723
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003724
Li Zefan0bb38a52009-12-11 15:45:50 +08003725#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003726void *__kmalloc(size_t size, gfp_t flags)
3727{
Al Viro871751e2006-03-25 03:06:39 -08003728 return __do_kmalloc(size, flags, __builtin_return_address(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003729}
3730EXPORT_SYMBOL(__kmalloc);
3731
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03003732void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003733{
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03003734 return __do_kmalloc(size, flags, (void *)caller);
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003735}
3736EXPORT_SYMBOL(__kmalloc_track_caller);
Christoph Hellwig1d2c8ee2006-10-04 02:15:25 -07003737
3738#else
3739void *__kmalloc(size_t size, gfp_t flags)
3740{
3741 return __do_kmalloc(size, flags, NULL);
3742}
3743EXPORT_SYMBOL(__kmalloc);
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003744#endif
3745
Linus Torvalds1da177e2005-04-16 15:20:36 -07003746/**
3747 * kmem_cache_free - Deallocate an object
3748 * @cachep: The cache the allocation was from.
3749 * @objp: The previously allocated object.
3750 *
3751 * Free an object which was previously allocated from this
3752 * cache.
3753 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08003754void kmem_cache_free(struct kmem_cache *cachep, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003755{
3756 unsigned long flags;
3757
3758 local_irq_save(flags);
Ingo Molnar898552c2007-02-10 01:44:57 -08003759 debug_check_no_locks_freed(objp, obj_size(cachep));
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07003760 if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3761 debug_check_no_obj_freed(objp, obj_size(cachep));
Ingo Molnar873623d2006-07-13 14:44:38 +02003762 __cache_free(cachep, objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003763 local_irq_restore(flags);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003764
Eduard - Gabriel Munteanuca2b84c2009-03-23 15:12:24 +02003765 trace_kmem_cache_free(_RET_IP_, objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003766}
3767EXPORT_SYMBOL(kmem_cache_free);
3768
3769/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003770 * kfree - free previously allocated memory
3771 * @objp: pointer returned by kmalloc.
3772 *
Pekka Enberg80e93ef2005-09-09 13:10:16 -07003773 * If @objp is NULL, no operation is performed.
3774 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003775 * Don't free memory not originally allocated by kmalloc()
3776 * or you will run into trouble.
3777 */
3778void kfree(const void *objp)
3779{
Pekka Enberg343e0d72006-02-01 03:05:50 -08003780 struct kmem_cache *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003781 unsigned long flags;
3782
Pekka Enberg2121db72009-03-25 11:05:57 +02003783 trace_kfree(_RET_IP_, objp);
3784
Christoph Lameter6cb8f912007-07-17 04:03:22 -07003785 if (unlikely(ZERO_OR_NULL_PTR(objp)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003786 return;
3787 local_irq_save(flags);
3788 kfree_debugcheck(objp);
Pekka Enberg6ed5eb22006-02-01 03:05:49 -08003789 c = virt_to_cache(objp);
Ingo Molnarf9b84042006-06-27 02:54:49 -07003790 debug_check_no_locks_freed(objp, obj_size(c));
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07003791 debug_check_no_obj_freed(objp, obj_size(c));
Ingo Molnar873623d2006-07-13 14:44:38 +02003792 __cache_free(c, (void *)objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003793 local_irq_restore(flags);
3794}
3795EXPORT_SYMBOL(kfree);
3796
Pekka Enberg343e0d72006-02-01 03:05:50 -08003797unsigned int kmem_cache_size(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003798{
Manfred Spraul3dafccf2006-02-01 03:05:42 -08003799 return obj_size(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003800}
3801EXPORT_SYMBOL(kmem_cache_size);
3802
Pekka Enberg343e0d72006-02-01 03:05:50 -08003803const char *kmem_cache_name(struct kmem_cache *cachep)
Arnaldo Carvalho de Melo19449722005-06-18 22:46:19 -07003804{
3805 return cachep->name;
3806}
3807EXPORT_SYMBOL_GPL(kmem_cache_name);
3808
Christoph Lametere498be72005-09-09 13:03:32 -07003809/*
Simon Arlott183ff222007-10-20 01:27:18 +02003810 * This initializes kmem_list3 or resizes various caches for all nodes.
Christoph Lametere498be72005-09-09 13:03:32 -07003811 */
Pekka Enberg83b519e2009-06-10 19:40:04 +03003812static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
Christoph Lametere498be72005-09-09 13:03:32 -07003813{
3814 int node;
3815 struct kmem_list3 *l3;
Christoph Lametercafeb022006-03-25 03:06:46 -08003816 struct array_cache *new_shared;
Paul Menage3395ee02006-12-06 20:32:16 -08003817 struct array_cache **new_alien = NULL;
Christoph Lametere498be72005-09-09 13:03:32 -07003818
Mel Gorman9c09a952008-01-24 05:49:54 -08003819 for_each_online_node(node) {
Christoph Lametercafeb022006-03-25 03:06:46 -08003820
Paul Menage3395ee02006-12-06 20:32:16 -08003821 if (use_alien_caches) {
Pekka Enberg83b519e2009-06-10 19:40:04 +03003822 new_alien = alloc_alien_cache(node, cachep->limit, gfp);
Paul Menage3395ee02006-12-06 20:32:16 -08003823 if (!new_alien)
3824 goto fail;
3825 }
Christoph Lametercafeb022006-03-25 03:06:46 -08003826
Eric Dumazet63109842007-05-06 14:49:28 -07003827 new_shared = NULL;
3828 if (cachep->shared) {
3829 new_shared = alloc_arraycache(node,
Christoph Lameter0718dc22006-03-25 03:06:47 -08003830 cachep->shared*cachep->batchcount,
Pekka Enberg83b519e2009-06-10 19:40:04 +03003831 0xbaadf00d, gfp);
Eric Dumazet63109842007-05-06 14:49:28 -07003832 if (!new_shared) {
3833 free_alien_cache(new_alien);
3834 goto fail;
3835 }
Christoph Lameter0718dc22006-03-25 03:06:47 -08003836 }
Christoph Lametercafeb022006-03-25 03:06:46 -08003837
Andrew Mortona737b3e2006-03-22 00:08:11 -08003838 l3 = cachep->nodelists[node];
3839 if (l3) {
Christoph Lametercafeb022006-03-25 03:06:46 -08003840 struct array_cache *shared = l3->shared;
3841
Christoph Lametere498be72005-09-09 13:03:32 -07003842 spin_lock_irq(&l3->list_lock);
3843
Christoph Lametercafeb022006-03-25 03:06:46 -08003844 if (shared)
Christoph Lameter0718dc22006-03-25 03:06:47 -08003845 free_block(cachep, shared->entry,
3846 shared->avail, node);
Christoph Lametere498be72005-09-09 13:03:32 -07003847
Christoph Lametercafeb022006-03-25 03:06:46 -08003848 l3->shared = new_shared;
3849 if (!l3->alien) {
Christoph Lametere498be72005-09-09 13:03:32 -07003850 l3->alien = new_alien;
3851 new_alien = NULL;
3852 }
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003853 l3->free_limit = (1 + nr_cpus_node(node)) *
Andrew Mortona737b3e2006-03-22 00:08:11 -08003854 cachep->batchcount + cachep->num;
Christoph Lametere498be72005-09-09 13:03:32 -07003855 spin_unlock_irq(&l3->list_lock);
Christoph Lametercafeb022006-03-25 03:06:46 -08003856 kfree(shared);
Christoph Lametere498be72005-09-09 13:03:32 -07003857 free_alien_cache(new_alien);
3858 continue;
3859 }
Pekka Enberg83b519e2009-06-10 19:40:04 +03003860 l3 = kmalloc_node(sizeof(struct kmem_list3), gfp, node);
Christoph Lameter0718dc22006-03-25 03:06:47 -08003861 if (!l3) {
3862 free_alien_cache(new_alien);
3863 kfree(new_shared);
Christoph Lametere498be72005-09-09 13:03:32 -07003864 goto fail;
Christoph Lameter0718dc22006-03-25 03:06:47 -08003865 }
Christoph Lametere498be72005-09-09 13:03:32 -07003866
3867 kmem_list3_init(l3);
3868 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
Andrew Mortona737b3e2006-03-22 00:08:11 -08003869 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
Christoph Lametercafeb022006-03-25 03:06:46 -08003870 l3->shared = new_shared;
Christoph Lametere498be72005-09-09 13:03:32 -07003871 l3->alien = new_alien;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003872 l3->free_limit = (1 + nr_cpus_node(node)) *
Andrew Mortona737b3e2006-03-22 00:08:11 -08003873 cachep->batchcount + cachep->num;
Christoph Lametere498be72005-09-09 13:03:32 -07003874 cachep->nodelists[node] = l3;
3875 }
Christoph Lametercafeb022006-03-25 03:06:46 -08003876 return 0;
Christoph Lameter0718dc22006-03-25 03:06:47 -08003877
Andrew Mortona737b3e2006-03-22 00:08:11 -08003878fail:
Christoph Lameter0718dc22006-03-25 03:06:47 -08003879 if (!cachep->next.next) {
3880 /* Cache is not active yet. Roll back what we did */
3881 node--;
3882 while (node >= 0) {
3883 if (cachep->nodelists[node]) {
3884 l3 = cachep->nodelists[node];
3885
3886 kfree(l3->shared);
3887 free_alien_cache(l3->alien);
3888 kfree(l3);
3889 cachep->nodelists[node] = NULL;
3890 }
3891 node--;
3892 }
3893 }
Christoph Lametercafeb022006-03-25 03:06:46 -08003894 return -ENOMEM;
Christoph Lametere498be72005-09-09 13:03:32 -07003895}
3896
Linus Torvalds1da177e2005-04-16 15:20:36 -07003897struct ccupdate_struct {
Pekka Enberg343e0d72006-02-01 03:05:50 -08003898 struct kmem_cache *cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003899 struct array_cache *new[NR_CPUS];
3900};
3901
3902static void do_ccupdate_local(void *info)
3903{
Andrew Mortona737b3e2006-03-22 00:08:11 -08003904 struct ccupdate_struct *new = info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003905 struct array_cache *old;
3906
3907 check_irq_off();
Pekka Enberg9a2dba42006-02-01 03:05:49 -08003908 old = cpu_cache_get(new->cachep);
Christoph Lametere498be72005-09-09 13:03:32 -07003909
Linus Torvalds1da177e2005-04-16 15:20:36 -07003910 new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
3911 new->new[smp_processor_id()] = old;
3912}
3913
Ravikiran G Thirumalaib5d8ca72006-03-22 00:08:12 -08003914/* Always called with the cache_chain_mutex held */
Andrew Mortona737b3e2006-03-22 00:08:11 -08003915static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
Pekka Enberg83b519e2009-06-10 19:40:04 +03003916 int batchcount, int shared, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003917{
Siddha, Suresh Bd2e7b7d2006-09-25 23:31:47 -07003918 struct ccupdate_struct *new;
Christoph Lameter2ed3a4e2006-09-25 23:31:38 -07003919 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003920
Pekka Enberg83b519e2009-06-10 19:40:04 +03003921 new = kzalloc(sizeof(*new), gfp);
Siddha, Suresh Bd2e7b7d2006-09-25 23:31:47 -07003922 if (!new)
3923 return -ENOMEM;
3924
Christoph Lametere498be72005-09-09 13:03:32 -07003925 for_each_online_cpu(i) {
Siddha, Suresh Bd2e7b7d2006-09-25 23:31:47 -07003926 new->new[i] = alloc_arraycache(cpu_to_node(i), limit,
Pekka Enberg83b519e2009-06-10 19:40:04 +03003927 batchcount, gfp);
Siddha, Suresh Bd2e7b7d2006-09-25 23:31:47 -07003928 if (!new->new[i]) {
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003929 for (i--; i >= 0; i--)
Siddha, Suresh Bd2e7b7d2006-09-25 23:31:47 -07003930 kfree(new->new[i]);
3931 kfree(new);
Christoph Lametere498be72005-09-09 13:03:32 -07003932 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003933 }
3934 }
Siddha, Suresh Bd2e7b7d2006-09-25 23:31:47 -07003935 new->cachep = cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003936
Jens Axboe15c8b6c2008-05-09 09:39:44 +02003937 on_each_cpu(do_ccupdate_local, (void *)new, 1);
Christoph Lametere498be72005-09-09 13:03:32 -07003938
Linus Torvalds1da177e2005-04-16 15:20:36 -07003939 check_irq_on();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003940 cachep->batchcount = batchcount;
3941 cachep->limit = limit;
Christoph Lametere498be72005-09-09 13:03:32 -07003942 cachep->shared = shared;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003943
Christoph Lametere498be72005-09-09 13:03:32 -07003944 for_each_online_cpu(i) {
Siddha, Suresh Bd2e7b7d2006-09-25 23:31:47 -07003945 struct array_cache *ccold = new->new[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07003946 if (!ccold)
3947 continue;
Christoph Lametere498be72005-09-09 13:03:32 -07003948 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
Christoph Lameterff694162005-09-22 21:44:02 -07003949 free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
Christoph Lametere498be72005-09-09 13:03:32 -07003950 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003951 kfree(ccold);
3952 }
Siddha, Suresh Bd2e7b7d2006-09-25 23:31:47 -07003953 kfree(new);
Pekka Enberg83b519e2009-06-10 19:40:04 +03003954 return alloc_kmemlist(cachep, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003955}
3956
Ravikiran G Thirumalaib5d8ca72006-03-22 00:08:12 -08003957/* Called with cache_chain_mutex held always */
Pekka Enberg83b519e2009-06-10 19:40:04 +03003958static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003959{
3960 int err;
3961 int limit, shared;
3962
Andrew Mortona737b3e2006-03-22 00:08:11 -08003963 /*
3964 * The head array serves three purposes:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003965 * - create a LIFO ordering, i.e. return objects that are cache-warm
3966 * - reduce the number of spinlock operations.
Andrew Mortona737b3e2006-03-22 00:08:11 -08003967 * - reduce the number of linked list operations on the slab and
Linus Torvalds1da177e2005-04-16 15:20:36 -07003968 * bufctl chains: array operations are cheaper.
3969 * The numbers are guessed, we should auto-tune as described by
3970 * Bonwick.
3971 */
Manfred Spraul3dafccf2006-02-01 03:05:42 -08003972 if (cachep->buffer_size > 131072)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003973 limit = 1;
Manfred Spraul3dafccf2006-02-01 03:05:42 -08003974 else if (cachep->buffer_size > PAGE_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003975 limit = 8;
Manfred Spraul3dafccf2006-02-01 03:05:42 -08003976 else if (cachep->buffer_size > 1024)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003977 limit = 24;
Manfred Spraul3dafccf2006-02-01 03:05:42 -08003978 else if (cachep->buffer_size > 256)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003979 limit = 54;
3980 else
3981 limit = 120;
3982
Andrew Mortona737b3e2006-03-22 00:08:11 -08003983 /*
3984 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
Linus Torvalds1da177e2005-04-16 15:20:36 -07003985 * allocation behaviour: Most allocs on one cpu, most free operations
3986 * on another cpu. For these cases, an efficient object passing between
3987 * cpus is necessary. This is provided by a shared array. The array
3988 * replaces Bonwick's magazine layer.
3989 * On uniprocessor, it's functionally equivalent (but less efficient)
3990 * to a larger limit. Thus disabled by default.
3991 */
3992 shared = 0;
Eric Dumazet364fbb22007-05-06 14:49:27 -07003993 if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003994 shared = 8;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003995
3996#if DEBUG
Andrew Mortona737b3e2006-03-22 00:08:11 -08003997 /*
3998 * With debugging enabled, large batchcount lead to excessively long
3999 * periods with disabled local interrupts. Limit the batchcount
Linus Torvalds1da177e2005-04-16 15:20:36 -07004000 */
4001 if (limit > 32)
4002 limit = 32;
4003#endif
Pekka Enberg83b519e2009-06-10 19:40:04 +03004004 err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004005 if (err)
4006 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004007 cachep->name, -err);
Christoph Lameter2ed3a4e2006-09-25 23:31:38 -07004008 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004009}
4010
Christoph Lameter1b552532006-03-22 00:09:07 -08004011/*
4012 * Drain an array if it contains any elements taking the l3 lock only if
Christoph Lameterb18e7e62006-03-22 00:09:07 -08004013 * necessary. Note that the l3 listlock also protects the array_cache
4014 * if drain_array() is used on the shared array.
Christoph Lameter1b552532006-03-22 00:09:07 -08004015 */
4016void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
4017 struct array_cache *ac, int force, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004018{
4019 int tofree;
4020
Christoph Lameter1b552532006-03-22 00:09:07 -08004021 if (!ac || !ac->avail)
4022 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004023 if (ac->touched && !force) {
4024 ac->touched = 0;
Christoph Lameterb18e7e62006-03-22 00:09:07 -08004025 } else {
Christoph Lameter1b552532006-03-22 00:09:07 -08004026 spin_lock_irq(&l3->list_lock);
Christoph Lameterb18e7e62006-03-22 00:09:07 -08004027 if (ac->avail) {
4028 tofree = force ? ac->avail : (ac->limit + 4) / 5;
4029 if (tofree > ac->avail)
4030 tofree = (ac->avail + 1) / 2;
4031 free_block(cachep, ac->entry, tofree, node);
4032 ac->avail -= tofree;
4033 memmove(ac->entry, &(ac->entry[tofree]),
4034 sizeof(void *) * ac->avail);
4035 }
Christoph Lameter1b552532006-03-22 00:09:07 -08004036 spin_unlock_irq(&l3->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004037 }
4038}
4039
4040/**
4041 * cache_reap - Reclaim memory from caches.
Randy Dunlap05fb6bf2007-02-28 20:12:13 -08004042 * @w: work descriptor
Linus Torvalds1da177e2005-04-16 15:20:36 -07004043 *
4044 * Called from workqueue/eventd every few seconds.
4045 * Purpose:
4046 * - clear the per-cpu caches for this CPU.
4047 * - return freeable pages to the main free memory pool.
4048 *
Andrew Mortona737b3e2006-03-22 00:08:11 -08004049 * If we cannot acquire the cache chain mutex then just give up - we'll try
4050 * again on the next iteration.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004051 */
Christoph Lameter7c5cae32007-02-10 01:42:55 -08004052static void cache_reap(struct work_struct *w)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004053{
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07004054 struct kmem_cache *searchp;
Christoph Lametere498be72005-09-09 13:03:32 -07004055 struct kmem_list3 *l3;
Christoph Lameteraab22072006-03-22 00:09:06 -08004056 int node = numa_node_id();
Jean Delvarebf6aede2009-04-02 16:56:54 -07004057 struct delayed_work *work = to_delayed_work(w);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004058
Christoph Lameter7c5cae32007-02-10 01:42:55 -08004059 if (!mutex_trylock(&cache_chain_mutex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004060 /* Give up. Setup the next iteration. */
Christoph Lameter7c5cae32007-02-10 01:42:55 -08004061 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004062
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07004063 list_for_each_entry(searchp, &cache_chain, next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004064 check_irq_on();
4065
Christoph Lameter35386e32006-03-22 00:09:05 -08004066 /*
4067 * We only take the l3 lock if absolutely necessary and we
4068 * have established with reasonable certainty that
4069 * we can do some work if the lock was obtained.
4070 */
Christoph Lameteraab22072006-03-22 00:09:06 -08004071 l3 = searchp->nodelists[node];
Christoph Lameter35386e32006-03-22 00:09:05 -08004072
Christoph Lameter8fce4d82006-03-09 17:33:54 -08004073 reap_alien(searchp, l3);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004074
Christoph Lameteraab22072006-03-22 00:09:06 -08004075 drain_array(searchp, l3, cpu_cache_get(searchp), 0, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004076
Christoph Lameter35386e32006-03-22 00:09:05 -08004077 /*
4078 * These are racy checks but it does not matter
4079 * if we skip one check or scan twice.
4080 */
Christoph Lametere498be72005-09-09 13:03:32 -07004081 if (time_after(l3->next_reap, jiffies))
Christoph Lameter35386e32006-03-22 00:09:05 -08004082 goto next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004083
Christoph Lametere498be72005-09-09 13:03:32 -07004084 l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004085
Christoph Lameteraab22072006-03-22 00:09:06 -08004086 drain_array(searchp, l3, l3->shared, 0, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004087
Christoph Lametered11d9e2006-06-30 01:55:45 -07004088 if (l3->free_touched)
Christoph Lametere498be72005-09-09 13:03:32 -07004089 l3->free_touched = 0;
Christoph Lametered11d9e2006-06-30 01:55:45 -07004090 else {
4091 int freed;
4092
4093 freed = drain_freelist(searchp, l3, (l3->free_limit +
4094 5 * searchp->num - 1) / (5 * searchp->num));
4095 STATS_ADD_REAPED(searchp, freed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004096 }
Christoph Lameter35386e32006-03-22 00:09:05 -08004097next:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004098 cond_resched();
4099 }
4100 check_irq_on();
Ingo Molnarfc0abb12006-01-18 17:42:33 -08004101 mutex_unlock(&cache_chain_mutex);
Christoph Lameter8fce4d82006-03-09 17:33:54 -08004102 next_reap_node();
Christoph Lameter7c5cae32007-02-10 01:42:55 -08004103out:
Andrew Mortona737b3e2006-03-22 00:08:11 -08004104 /* Set up the next iteration */
Christoph Lameter7c5cae32007-02-10 01:42:55 -08004105 schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004106}
4107
Linus Torvalds158a9622008-01-02 13:04:48 -08004108#ifdef CONFIG_SLABINFO
Linus Torvalds1da177e2005-04-16 15:20:36 -07004109
Pekka Enberg85289f92006-01-08 01:00:36 -08004110static void print_slabinfo_header(struct seq_file *m)
4111{
4112 /*
4113 * Output format version, so at least we can change it
4114 * without _too_ many complaints.
4115 */
4116#if STATS
4117 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
4118#else
4119 seq_puts(m, "slabinfo - version: 2.1\n");
4120#endif
4121 seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
4122 "<objperslab> <pagesperslab>");
4123 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
4124 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
4125#if STATS
4126 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
Ravikiran G Thirumalaifb7faf32006-04-10 22:52:54 -07004127 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
Pekka Enberg85289f92006-01-08 01:00:36 -08004128 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
4129#endif
4130 seq_putc(m, '\n');
4131}
4132
Linus Torvalds1da177e2005-04-16 15:20:36 -07004133static void *s_start(struct seq_file *m, loff_t *pos)
4134{
4135 loff_t n = *pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004136
Ingo Molnarfc0abb12006-01-18 17:42:33 -08004137 mutex_lock(&cache_chain_mutex);
Pekka Enberg85289f92006-01-08 01:00:36 -08004138 if (!n)
4139 print_slabinfo_header(m);
Pavel Emelianovb92151b2007-07-15 23:38:04 -07004140
4141 return seq_list_start(&cache_chain, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004142}
4143
4144static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4145{
Pavel Emelianovb92151b2007-07-15 23:38:04 -07004146 return seq_list_next(p, &cache_chain, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004147}
4148
4149static void s_stop(struct seq_file *m, void *p)
4150{
Ingo Molnarfc0abb12006-01-18 17:42:33 -08004151 mutex_unlock(&cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004152}
4153
4154static int s_show(struct seq_file *m, void *p)
4155{
Pavel Emelianovb92151b2007-07-15 23:38:04 -07004156 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004157 struct slab *slabp;
4158 unsigned long active_objs;
4159 unsigned long num_objs;
4160 unsigned long active_slabs = 0;
4161 unsigned long num_slabs, free_objects = 0, shared_avail = 0;
Christoph Lametere498be72005-09-09 13:03:32 -07004162 const char *name;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004163 char *error = NULL;
Christoph Lametere498be72005-09-09 13:03:32 -07004164 int node;
4165 struct kmem_list3 *l3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004166
Linus Torvalds1da177e2005-04-16 15:20:36 -07004167 active_objs = 0;
4168 num_slabs = 0;
Christoph Lametere498be72005-09-09 13:03:32 -07004169 for_each_online_node(node) {
4170 l3 = cachep->nodelists[node];
4171 if (!l3)
4172 continue;
4173
Ravikiran G Thirumalaica3b9b92006-02-04 23:27:58 -08004174 check_irq_on();
4175 spin_lock_irq(&l3->list_lock);
Christoph Lametere498be72005-09-09 13:03:32 -07004176
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07004177 list_for_each_entry(slabp, &l3->slabs_full, list) {
Christoph Lametere498be72005-09-09 13:03:32 -07004178 if (slabp->inuse != cachep->num && !error)
4179 error = "slabs_full accounting error";
4180 active_objs += cachep->num;
4181 active_slabs++;
4182 }
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07004183 list_for_each_entry(slabp, &l3->slabs_partial, list) {
Christoph Lametere498be72005-09-09 13:03:32 -07004184 if (slabp->inuse == cachep->num && !error)
4185 error = "slabs_partial inuse accounting error";
4186 if (!slabp->inuse && !error)
4187 error = "slabs_partial/inuse accounting error";
4188 active_objs += slabp->inuse;
4189 active_slabs++;
4190 }
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07004191 list_for_each_entry(slabp, &l3->slabs_free, list) {
Christoph Lametere498be72005-09-09 13:03:32 -07004192 if (slabp->inuse && !error)
4193 error = "slabs_free/inuse accounting error";
4194 num_slabs++;
4195 }
4196 free_objects += l3->free_objects;
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08004197 if (l3->shared)
4198 shared_avail += l3->shared->avail;
Christoph Lametere498be72005-09-09 13:03:32 -07004199
Ravikiran G Thirumalaica3b9b92006-02-04 23:27:58 -08004200 spin_unlock_irq(&l3->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004201 }
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004202 num_slabs += active_slabs;
4203 num_objs = num_slabs * cachep->num;
Christoph Lametere498be72005-09-09 13:03:32 -07004204 if (num_objs - active_objs != free_objects && !error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004205 error = "free_objects accounting error";
4206
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004207 name = cachep->name;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004208 if (error)
4209 printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
4210
4211 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
Manfred Spraul3dafccf2006-02-01 03:05:42 -08004212 name, active_objs, num_objs, cachep->buffer_size,
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004213 cachep->num, (1 << cachep->gfporder));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004214 seq_printf(m, " : tunables %4u %4u %4u",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004215 cachep->limit, cachep->batchcount, cachep->shared);
Christoph Lametere498be72005-09-09 13:03:32 -07004216 seq_printf(m, " : slabdata %6lu %6lu %6lu",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004217 active_slabs, num_slabs, shared_avail);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004218#if STATS
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004219 { /* list3 stats */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004220 unsigned long high = cachep->high_mark;
4221 unsigned long allocs = cachep->num_allocations;
4222 unsigned long grown = cachep->grown;
4223 unsigned long reaped = cachep->reaped;
4224 unsigned long errors = cachep->errors;
4225 unsigned long max_freeable = cachep->max_freeable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004226 unsigned long node_allocs = cachep->node_allocs;
Christoph Lametere498be72005-09-09 13:03:32 -07004227 unsigned long node_frees = cachep->node_frees;
Ravikiran G Thirumalaifb7faf32006-04-10 22:52:54 -07004228 unsigned long overflows = cachep->node_overflow;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004229
Christoph Lametere498be72005-09-09 13:03:32 -07004230 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
Ravikiran G Thirumalaifb7faf32006-04-10 22:52:54 -07004231 %4lu %4lu %4lu %4lu %4lu", allocs, high, grown,
Andrew Mortona737b3e2006-03-22 00:08:11 -08004232 reaped, errors, max_freeable, node_allocs,
Ravikiran G Thirumalaifb7faf32006-04-10 22:52:54 -07004233 node_frees, overflows);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004234 }
4235 /* cpu stats */
4236 {
4237 unsigned long allochit = atomic_read(&cachep->allochit);
4238 unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4239 unsigned long freehit = atomic_read(&cachep->freehit);
4240 unsigned long freemiss = atomic_read(&cachep->freemiss);
4241
4242 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004243 allochit, allocmiss, freehit, freemiss);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004244 }
4245#endif
4246 seq_putc(m, '\n');
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247 return 0;
4248}
4249
4250/*
4251 * slabinfo_op - iterator that generates /proc/slabinfo
4252 *
4253 * Output layout:
4254 * cache-name
4255 * num-active-objs
4256 * total-objs
4257 * object size
4258 * num-active-slabs
4259 * total-slabs
4260 * num-pages-per-slab
4261 * + further values on SMP and with statistics enabled
4262 */
4263
Alexey Dobriyan7b3c3a52008-10-06 02:42:17 +04004264static const struct seq_operations slabinfo_op = {
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004265 .start = s_start,
4266 .next = s_next,
4267 .stop = s_stop,
4268 .show = s_show,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004269};
4270
4271#define MAX_SLABINFO_WRITE 128
4272/**
4273 * slabinfo_write - Tuning for the slab allocator
4274 * @file: unused
4275 * @buffer: user buffer
4276 * @count: data length
4277 * @ppos: unused
4278 */
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004279ssize_t slabinfo_write(struct file *file, const char __user * buffer,
4280 size_t count, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004281{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004282 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004283 int limit, batchcount, shared, res;
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07004284 struct kmem_cache *cachep;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004285
Linus Torvalds1da177e2005-04-16 15:20:36 -07004286 if (count > MAX_SLABINFO_WRITE)
4287 return -EINVAL;
4288 if (copy_from_user(&kbuf, buffer, count))
4289 return -EFAULT;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004290 kbuf[MAX_SLABINFO_WRITE] = '\0';
Linus Torvalds1da177e2005-04-16 15:20:36 -07004291
4292 tmp = strchr(kbuf, ' ');
4293 if (!tmp)
4294 return -EINVAL;
4295 *tmp = '\0';
4296 tmp++;
4297 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4298 return -EINVAL;
4299
4300 /* Find the cache in the chain of caches. */
Ingo Molnarfc0abb12006-01-18 17:42:33 -08004301 mutex_lock(&cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004302 res = -EINVAL;
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07004303 list_for_each_entry(cachep, &cache_chain, next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004304 if (!strcmp(cachep->name, kbuf)) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08004305 if (limit < 1 || batchcount < 1 ||
4306 batchcount > limit || shared < 0) {
Christoph Lametere498be72005-09-09 13:03:32 -07004307 res = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004308 } else {
Christoph Lametere498be72005-09-09 13:03:32 -07004309 res = do_tune_cpucache(cachep, limit,
Pekka Enberg83b519e2009-06-10 19:40:04 +03004310 batchcount, shared,
4311 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004312 }
4313 break;
4314 }
4315 }
Ingo Molnarfc0abb12006-01-18 17:42:33 -08004316 mutex_unlock(&cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004317 if (res >= 0)
4318 res = count;
4319 return res;
4320}
Al Viro871751e2006-03-25 03:06:39 -08004321
Alexey Dobriyan7b3c3a52008-10-06 02:42:17 +04004322static int slabinfo_open(struct inode *inode, struct file *file)
4323{
4324 return seq_open(file, &slabinfo_op);
4325}
4326
4327static const struct file_operations proc_slabinfo_operations = {
4328 .open = slabinfo_open,
4329 .read = seq_read,
4330 .write = slabinfo_write,
4331 .llseek = seq_lseek,
4332 .release = seq_release,
4333};
4334
Al Viro871751e2006-03-25 03:06:39 -08004335#ifdef CONFIG_DEBUG_SLAB_LEAK
4336
4337static void *leaks_start(struct seq_file *m, loff_t *pos)
4338{
Al Viro871751e2006-03-25 03:06:39 -08004339 mutex_lock(&cache_chain_mutex);
Pavel Emelianovb92151b2007-07-15 23:38:04 -07004340 return seq_list_start(&cache_chain, *pos);
Al Viro871751e2006-03-25 03:06:39 -08004341}
4342
4343static inline int add_caller(unsigned long *n, unsigned long v)
4344{
4345 unsigned long *p;
4346 int l;
4347 if (!v)
4348 return 1;
4349 l = n[1];
4350 p = n + 2;
4351 while (l) {
4352 int i = l/2;
4353 unsigned long *q = p + 2 * i;
4354 if (*q == v) {
4355 q[1]++;
4356 return 1;
4357 }
4358 if (*q > v) {
4359 l = i;
4360 } else {
4361 p = q + 2;
4362 l -= i + 1;
4363 }
4364 }
4365 if (++n[1] == n[0])
4366 return 0;
4367 memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4368 p[0] = v;
4369 p[1] = 1;
4370 return 1;
4371}
4372
4373static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
4374{
4375 void *p;
4376 int i;
4377 if (n[0] == n[1])
4378 return;
4379 for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) {
4380 if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
4381 continue;
4382 if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
4383 return;
4384 }
4385}
4386
4387static void show_symbol(struct seq_file *m, unsigned long address)
4388{
4389#ifdef CONFIG_KALLSYMS
Al Viro871751e2006-03-25 03:06:39 -08004390 unsigned long offset, size;
Tejun Heo9281ace2007-07-17 04:03:51 -07004391 char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
Al Viro871751e2006-03-25 03:06:39 -08004392
Alexey Dobriyana5c43da2007-05-08 00:28:47 -07004393 if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
Al Viro871751e2006-03-25 03:06:39 -08004394 seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
Alexey Dobriyana5c43da2007-05-08 00:28:47 -07004395 if (modname[0])
Al Viro871751e2006-03-25 03:06:39 -08004396 seq_printf(m, " [%s]", modname);
4397 return;
4398 }
4399#endif
4400 seq_printf(m, "%p", (void *)address);
4401}
4402
4403static int leaks_show(struct seq_file *m, void *p)
4404{
Pavel Emelianovb92151b2007-07-15 23:38:04 -07004405 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
Al Viro871751e2006-03-25 03:06:39 -08004406 struct slab *slabp;
4407 struct kmem_list3 *l3;
4408 const char *name;
4409 unsigned long *n = m->private;
4410 int node;
4411 int i;
4412
4413 if (!(cachep->flags & SLAB_STORE_USER))
4414 return 0;
4415 if (!(cachep->flags & SLAB_RED_ZONE))
4416 return 0;
4417
4418 /* OK, we can do it */
4419
4420 n[1] = 0;
4421
4422 for_each_online_node(node) {
4423 l3 = cachep->nodelists[node];
4424 if (!l3)
4425 continue;
4426
4427 check_irq_on();
4428 spin_lock_irq(&l3->list_lock);
4429
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07004430 list_for_each_entry(slabp, &l3->slabs_full, list)
Al Viro871751e2006-03-25 03:06:39 -08004431 handle_slab(n, cachep, slabp);
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07004432 list_for_each_entry(slabp, &l3->slabs_partial, list)
Al Viro871751e2006-03-25 03:06:39 -08004433 handle_slab(n, cachep, slabp);
Al Viro871751e2006-03-25 03:06:39 -08004434 spin_unlock_irq(&l3->list_lock);
4435 }
4436 name = cachep->name;
4437 if (n[0] == n[1]) {
4438 /* Increase the buffer size */
4439 mutex_unlock(&cache_chain_mutex);
4440 m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4441 if (!m->private) {
4442 /* Too bad, we are really out */
4443 m->private = n;
4444 mutex_lock(&cache_chain_mutex);
4445 return -ENOMEM;
4446 }
4447 *(unsigned long *)m->private = n[0] * 2;
4448 kfree(n);
4449 mutex_lock(&cache_chain_mutex);
4450 /* Now make sure this entry will be retried */
4451 m->count = m->size;
4452 return 0;
4453 }
4454 for (i = 0; i < n[1]; i++) {
4455 seq_printf(m, "%s: %lu ", name, n[2*i+3]);
4456 show_symbol(m, n[2*i+2]);
4457 seq_putc(m, '\n');
4458 }
Siddha, Suresh Bd2e7b7d2006-09-25 23:31:47 -07004459
Al Viro871751e2006-03-25 03:06:39 -08004460 return 0;
4461}
4462
Alexey Dobriyana0ec95a82008-10-06 00:59:10 +04004463static const struct seq_operations slabstats_op = {
Al Viro871751e2006-03-25 03:06:39 -08004464 .start = leaks_start,
4465 .next = s_next,
4466 .stop = s_stop,
4467 .show = leaks_show,
4468};
Alexey Dobriyana0ec95a82008-10-06 00:59:10 +04004469
4470static int slabstats_open(struct inode *inode, struct file *file)
4471{
4472 unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL);
4473 int ret = -ENOMEM;
4474 if (n) {
4475 ret = seq_open(file, &slabstats_op);
4476 if (!ret) {
4477 struct seq_file *m = file->private_data;
4478 *n = PAGE_SIZE / (2 * sizeof(unsigned long));
4479 m->private = n;
4480 n = NULL;
4481 }
4482 kfree(n);
4483 }
4484 return ret;
4485}
4486
4487static const struct file_operations proc_slabstats_operations = {
4488 .open = slabstats_open,
4489 .read = seq_read,
4490 .llseek = seq_lseek,
4491 .release = seq_release_private,
4492};
Al Viro871751e2006-03-25 03:06:39 -08004493#endif
Alexey Dobriyana0ec95a82008-10-06 00:59:10 +04004494
4495static int __init slab_proc_init(void)
4496{
Alexey Dobriyan7b3c3a52008-10-06 02:42:17 +04004497 proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
Alexey Dobriyana0ec95a82008-10-06 00:59:10 +04004498#ifdef CONFIG_DEBUG_SLAB_LEAK
4499 proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4500#endif
4501 return 0;
4502}
4503module_init(slab_proc_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004504#endif
4505
Manfred Spraul00e145b2005-09-03 15:55:07 -07004506/**
4507 * ksize - get the actual amount of memory allocated for a given object
4508 * @objp: Pointer to the object
4509 *
4510 * kmalloc may internally round up allocations and return more memory
4511 * than requested. ksize() can be used to determine the actual amount of
4512 * memory allocated. The caller may use this additional memory, even though
4513 * a smaller amount of memory was initially specified with the kmalloc call.
4514 * The caller must guarantee that objp points to a valid object previously
4515 * allocated with either kmalloc() or kmem_cache_alloc(). The object
4516 * must not be freed during the duration of the call.
4517 */
Pekka Enbergfd76bab2007-05-06 14:48:40 -07004518size_t ksize(const void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004519{
Christoph Lameteref8b4522007-10-16 01:24:46 -07004520 BUG_ON(!objp);
4521 if (unlikely(objp == ZERO_SIZE_PTR))
Manfred Spraul00e145b2005-09-03 15:55:07 -07004522 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004523
Pekka Enberg6ed5eb22006-02-01 03:05:49 -08004524 return obj_size(virt_to_cache(objp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004525}
Kirill A. Shutemovb1aabec2009-02-10 15:21:44 +02004526EXPORT_SYMBOL(ksize);