blob: 18fa1a65f57ba906275ff1bc5062fbc91797f4e1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/slab.c
3 * Written by Mark Hemment, 1996/97.
4 * (markhe@nextd.demon.co.uk)
5 *
6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
7 *
8 * Major cleanup, different bufctl logic, per-cpu arrays
9 * (c) 2000 Manfred Spraul
10 *
11 * Cleanup, make the head arrays unconditional, preparation for NUMA
12 * (c) 2002 Manfred Spraul
13 *
14 * An implementation of the Slab Allocator as described in outline in;
15 * UNIX Internals: The New Frontiers by Uresh Vahalia
16 * Pub: Prentice Hall ISBN 0-13-101908-2
17 * or with a little more detail in;
18 * The Slab Allocator: An Object-Caching Kernel Memory Allocator
19 * Jeff Bonwick (Sun Microsystems).
20 * Presented at: USENIX Summer 1994 Technical Conference
21 *
22 * The memory is organized in caches, one cache for each object type.
23 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
24 * Each cache consists out of many slabs (they are small (usually one
25 * page long) and always contiguous), and each slab contains multiple
26 * initialized objects.
27 *
28 * This means, that your constructor is used only for newly allocated
29 * slabs and you must pass objects with the same intializations to
30 * kmem_cache_free.
31 *
32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
33 * normal). If you need a special memory type, then must create a new
34 * cache for that memory type.
35 *
36 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
37 * full slabs with 0 free objects
38 * partial slabs
39 * empty slabs with no allocated objects
40 *
41 * If partial slabs exist, then new allocations come from these slabs,
42 * otherwise from empty slabs or new slabs are allocated.
43 *
44 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
45 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
46 *
47 * Each cache has a short per-cpu head array, most allocs
48 * and frees go into that array, and if that array overflows, then 1/2
49 * of the entries in the array are given back into the global cache.
50 * The head array is strictly LIFO and should improve the cache hit rates.
51 * On SMP, it additionally reduces the spinlock operations.
52 *
Andrew Mortona737b3e2006-03-22 00:08:11 -080053 * The c_cpuarray may not be read with enabled local interrupts -
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 * it's changed with a smp_call_function().
55 *
56 * SMP synchronization:
57 * constructors and destructors are called without any locking.
Pekka Enberg343e0d72006-02-01 03:05:50 -080058 * Several members in struct kmem_cache and struct slab never change, they
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 * are accessed without any locking.
60 * The per-cpu arrays are never accessed from the wrong cpu, no locking,
61 * and local interrupts are disabled so slab code is preempt-safe.
62 * The non-constant members are protected with a per-cache irq spinlock.
63 *
64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
65 * in 2000 - many ideas in the current implementation are derived from
66 * his patch.
67 *
68 * Further notes from the original documentation:
69 *
70 * 11 April '97. Started multi-threading - markhe
Ingo Molnarfc0abb12006-01-18 17:42:33 -080071 * The global cache-chain is protected by the mutex 'cache_chain_mutex'.
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 * The sem is only needed when accessing/extending the cache-chain, which
73 * can never happen inside an interrupt (kmem_cache_create(),
74 * kmem_cache_shrink() and kmem_cache_reap()).
75 *
76 * At present, each engine can be growing a cache. This should be blocked.
77 *
Christoph Lametere498be72005-09-09 13:03:32 -070078 * 15 March 2005. NUMA slab allocator.
79 * Shai Fultheim <shai@scalex86.org>.
80 * Shobhit Dayal <shobhit@calsoftinc.com>
81 * Alok N Kataria <alokk@calsoftinc.com>
82 * Christoph Lameter <christoph@lameter.com>
83 *
84 * Modified the slab allocator to be node aware on NUMA systems.
85 * Each node has its own list of partial, free and full slabs.
86 * All object allocations for a node occur from node specific slab lists.
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 */
88
Linus Torvalds1da177e2005-04-16 15:20:36 -070089#include <linux/slab.h>
90#include <linux/mm.h>
Randy Dunlapc9cf5522006-06-27 02:53:52 -070091#include <linux/poison.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070092#include <linux/swap.h>
93#include <linux/cache.h>
94#include <linux/interrupt.h>
95#include <linux/init.h>
96#include <linux/compiler.h>
Paul Jackson101a5002006-03-24 03:16:07 -080097#include <linux/cpuset.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <linux/seq_file.h>
99#include <linux/notifier.h>
100#include <linux/kallsyms.h>
101#include <linux/cpu.h>
102#include <linux/sysctl.h>
103#include <linux/module.h>
104#include <linux/rcupdate.h>
Paulo Marques543537b2005-06-23 00:09:02 -0700105#include <linux/string.h>
Andrew Morton138ae662006-12-06 20:36:41 -0800106#include <linux/uaccess.h>
Christoph Lametere498be72005-09-09 13:03:32 -0700107#include <linux/nodemask.h>
Christoph Lameterdc85da12006-01-18 17:42:36 -0800108#include <linux/mempolicy.h>
Ingo Molnarfc0abb12006-01-18 17:42:33 -0800109#include <linux/mutex.h>
Akinobu Mita8a8b6502006-12-08 02:39:44 -0800110#include <linux/fault-inject.h>
Ingo Molnare7eebaf2006-06-27 02:54:55 -0700111#include <linux/rtmutex.h>
Eric Dumazet6a2d7a92006-12-13 00:34:27 -0800112#include <linux/reciprocal_div.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114#include <asm/cacheflush.h>
115#include <asm/tlbflush.h>
116#include <asm/page.h>
117
118/*
Christoph Lameter50953fe2007-05-06 14:50:16 -0700119 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 * 0 for faster, smaller code (especially in the critical paths).
121 *
122 * STATS - 1 to collect stats for /proc/slabinfo.
123 * 0 for faster, smaller code (especially in the critical paths).
124 *
125 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
126 */
127
128#ifdef CONFIG_DEBUG_SLAB
129#define DEBUG 1
130#define STATS 1
131#define FORCED_DEBUG 1
132#else
133#define DEBUG 0
134#define STATS 0
135#define FORCED_DEBUG 0
136#endif
137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138/* Shouldn't this be in a header file somewhere? */
139#define BYTES_PER_WORD sizeof(void *)
David Woodhouse87a927c2007-07-04 21:26:44 -0400140#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
142#ifndef cache_line_size
143#define cache_line_size() L1_CACHE_BYTES
144#endif
145
146#ifndef ARCH_KMALLOC_MINALIGN
147/*
148 * Enforce a minimum alignment for the kmalloc caches.
149 * Usually, the kmalloc caches are cache_line_size() aligned, except when
150 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
151 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
David Woodhouseb46b8f12007-05-08 00:22:59 -0700152 * alignment larger than the alignment of a 64-bit integer.
153 * ARCH_KMALLOC_MINALIGN allows that.
154 * Note that increasing this value may disable some debug features.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 */
David Woodhouseb46b8f12007-05-08 00:22:59 -0700156#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157#endif
158
159#ifndef ARCH_SLAB_MINALIGN
160/*
161 * Enforce a minimum alignment for all caches.
162 * Intended for archs that get misalignment faults even for BYTES_PER_WORD
163 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
164 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
165 * some debug features.
166 */
167#define ARCH_SLAB_MINALIGN 0
168#endif
169
170#ifndef ARCH_KMALLOC_FLAGS
171#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
172#endif
173
174/* Legal flag mask for kmem_cache_create(). */
175#if DEBUG
Christoph Lameter50953fe2007-05-06 14:50:16 -0700176# define CREATE_MASK (SLAB_RED_ZONE | \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
Christoph Lameterac2b8982006-03-22 00:08:15 -0800178 SLAB_CACHE_DMA | \
Christoph Lameter5af60832007-05-06 14:49:56 -0700179 SLAB_STORE_USER | \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
Paul Jackson101a5002006-03-24 03:16:07 -0800181 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182#else
Christoph Lameterac2b8982006-03-22 00:08:15 -0800183# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
Christoph Lameter5af60832007-05-06 14:49:56 -0700184 SLAB_CACHE_DMA | \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
Paul Jackson101a5002006-03-24 03:16:07 -0800186 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187#endif
188
189/*
190 * kmem_bufctl_t:
191 *
192 * Bufctl's are used for linking objs within a slab
193 * linked offsets.
194 *
195 * This implementation relies on "struct page" for locating the cache &
196 * slab an object belongs to.
197 * This allows the bufctl structure to be small (one int), but limits
198 * the number of objects a slab (not a cache) can contain when off-slab
199 * bufctls are used. The limit is the size of the largest general cache
200 * that does not use off-slab slabs.
201 * For 32bit archs with 4 kB pages, is this 56.
202 * This is not serious, as it is only for large objects, when it is unwise
203 * to have too many per slab.
204 * Note: This limit can be raised by introducing a general cache whose size
205 * is less than 512 (PAGE_SIZE<<3), but greater than 256.
206 */
207
Kyle Moffettfa5b08d2005-09-03 15:55:03 -0700208typedef unsigned int kmem_bufctl_t;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0)
210#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1)
Al Viro871751e2006-03-25 03:06:39 -0800211#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
212#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214/*
215 * struct slab
216 *
217 * Manages the objs in a slab. Placed either at the beginning of mem allocated
218 * for a slab, or allocated from an general cache.
219 * Slabs are chained into three list: fully used, partial, fully free slabs.
220 */
221struct slab {
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800222 struct list_head list;
223 unsigned long colouroff;
224 void *s_mem; /* including colour offset */
225 unsigned int inuse; /* num of objs active in slab */
226 kmem_bufctl_t free;
227 unsigned short nodeid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228};
229
230/*
231 * struct slab_rcu
232 *
233 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
234 * arrange for kmem_freepages to be called via RCU. This is useful if
235 * we need to approach a kernel structure obliquely, from its address
236 * obtained without the usual locking. We can lock the structure to
237 * stabilize it and check it's still at the given address, only if we
238 * can be sure that the memory has not been meanwhile reused for some
239 * other kind of object (which our subsystem's lock might corrupt).
240 *
241 * rcu_read_lock before reading the address, then rcu_read_unlock after
242 * taking the spinlock within the structure expected at that address.
243 *
244 * We assume struct slab_rcu can overlay struct slab when destroying.
245 */
246struct slab_rcu {
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800247 struct rcu_head head;
Pekka Enberg343e0d72006-02-01 03:05:50 -0800248 struct kmem_cache *cachep;
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800249 void *addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250};
251
252/*
253 * struct array_cache
254 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 * Purpose:
256 * - LIFO ordering, to hand out cache-warm objects from _alloc
257 * - reduce the number of linked list operations
258 * - reduce spinlock operations
259 *
260 * The limit is stored in the per-cpu structure to reduce the data cache
261 * footprint.
262 *
263 */
264struct array_cache {
265 unsigned int avail;
266 unsigned int limit;
267 unsigned int batchcount;
268 unsigned int touched;
Christoph Lametere498be72005-09-09 13:03:32 -0700269 spinlock_t lock;
Andrew Mortona737b3e2006-03-22 00:08:11 -0800270 void *entry[0]; /*
271 * Must have this definition in here for the proper
272 * alignment of array_cache. Also simplifies accessing
273 * the entries.
274 * [0] is for gcc 2.95. It should really be [].
275 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276};
277
Andrew Mortona737b3e2006-03-22 00:08:11 -0800278/*
279 * bootstrap: The caches do not work without cpuarrays anymore, but the
280 * cpuarrays are allocated from the generic caches...
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 */
282#define BOOT_CPUCACHE_ENTRIES 1
283struct arraycache_init {
284 struct array_cache cache;
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800285 void *entries[BOOT_CPUCACHE_ENTRIES];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286};
287
288/*
Christoph Lametere498be72005-09-09 13:03:32 -0700289 * The slab lists for all objects.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 */
291struct kmem_list3 {
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800292 struct list_head slabs_partial; /* partial list first, better asm code */
293 struct list_head slabs_full;
294 struct list_head slabs_free;
295 unsigned long free_objects;
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800296 unsigned int free_limit;
Ravikiran G Thirumalai2e1217c2006-02-04 23:27:56 -0800297 unsigned int colour_next; /* Per-node cache coloring */
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800298 spinlock_t list_lock;
299 struct array_cache *shared; /* shared per node */
300 struct array_cache **alien; /* on other nodes */
Christoph Lameter35386e32006-03-22 00:09:05 -0800301 unsigned long next_reap; /* updated without locking */
302 int free_touched; /* updated without locking */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303};
304
Christoph Lametere498be72005-09-09 13:03:32 -0700305/*
306 * Need this for bootstrapping a per node allocator.
307 */
308#define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1)
309struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
310#define CACHE_CACHE 0
311#define SIZE_AC 1
312#define SIZE_L3 (1 + MAX_NUMNODES)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
Christoph Lametered11d9e2006-06-30 01:55:45 -0700314static int drain_freelist(struct kmem_cache *cache,
315 struct kmem_list3 *l3, int tofree);
316static void free_block(struct kmem_cache *cachep, void **objpp, int len,
317 int node);
Christoph Lameter2ed3a4e2006-09-25 23:31:38 -0700318static int enable_cpucache(struct kmem_cache *cachep);
David Howells65f27f32006-11-22 14:55:48 +0000319static void cache_reap(struct work_struct *unused);
Christoph Lametered11d9e2006-06-30 01:55:45 -0700320
Christoph Lametere498be72005-09-09 13:03:32 -0700321/*
Andrew Mortona737b3e2006-03-22 00:08:11 -0800322 * This function must be completely optimized away if a constant is passed to
323 * it. Mostly the same as what is in linux/slab.h except it returns an index.
Christoph Lametere498be72005-09-09 13:03:32 -0700324 */
Ivan Kokshaysky7243cc02005-09-22 21:43:58 -0700325static __always_inline int index_of(const size_t size)
Christoph Lametere498be72005-09-09 13:03:32 -0700326{
Steven Rostedt5ec8a842006-02-01 03:05:44 -0800327 extern void __bad_size(void);
328
Christoph Lametere498be72005-09-09 13:03:32 -0700329 if (__builtin_constant_p(size)) {
330 int i = 0;
331
332#define CACHE(x) \
333 if (size <=x) \
334 return i; \
335 else \
336 i++;
337#include "linux/kmalloc_sizes.h"
338#undef CACHE
Steven Rostedt5ec8a842006-02-01 03:05:44 -0800339 __bad_size();
Ivan Kokshaysky7243cc02005-09-22 21:43:58 -0700340 } else
Steven Rostedt5ec8a842006-02-01 03:05:44 -0800341 __bad_size();
Christoph Lametere498be72005-09-09 13:03:32 -0700342 return 0;
343}
344
Ingo Molnare0a42722006-06-23 02:03:46 -0700345static int slab_early_init = 1;
346
Christoph Lametere498be72005-09-09 13:03:32 -0700347#define INDEX_AC index_of(sizeof(struct arraycache_init))
348#define INDEX_L3 index_of(sizeof(struct kmem_list3))
349
Pekka Enberg5295a742006-02-01 03:05:48 -0800350static void kmem_list3_init(struct kmem_list3 *parent)
Christoph Lametere498be72005-09-09 13:03:32 -0700351{
352 INIT_LIST_HEAD(&parent->slabs_full);
353 INIT_LIST_HEAD(&parent->slabs_partial);
354 INIT_LIST_HEAD(&parent->slabs_free);
355 parent->shared = NULL;
356 parent->alien = NULL;
Ravikiran G Thirumalai2e1217c2006-02-04 23:27:56 -0800357 parent->colour_next = 0;
Christoph Lametere498be72005-09-09 13:03:32 -0700358 spin_lock_init(&parent->list_lock);
359 parent->free_objects = 0;
360 parent->free_touched = 0;
361}
362
Andrew Mortona737b3e2006-03-22 00:08:11 -0800363#define MAKE_LIST(cachep, listp, slab, nodeid) \
364 do { \
365 INIT_LIST_HEAD(listp); \
366 list_splice(&(cachep->nodelists[nodeid]->slab), listp); \
Christoph Lametere498be72005-09-09 13:03:32 -0700367 } while (0)
368
Andrew Mortona737b3e2006-03-22 00:08:11 -0800369#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
370 do { \
Christoph Lametere498be72005-09-09 13:03:32 -0700371 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
372 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
373 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
374 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375
376/*
Pekka Enberg343e0d72006-02-01 03:05:50 -0800377 * struct kmem_cache
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 *
379 * manages a cache.
380 */
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800381
Pekka J Enberg2109a2d2005-11-07 00:58:01 -0800382struct kmem_cache {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383/* 1) per-cpu data, touched during every alloc/free */
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800384 struct array_cache *array[NR_CPUS];
Ravikiran G Thirumalaib5d8ca72006-03-22 00:08:12 -0800385/* 2) Cache tunables. Protected by cache_chain_mutex */
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800386 unsigned int batchcount;
387 unsigned int limit;
388 unsigned int shared;
Ravikiran G Thirumalaib5d8ca72006-03-22 00:08:12 -0800389
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800390 unsigned int buffer_size;
Eric Dumazet6a2d7a92006-12-13 00:34:27 -0800391 u32 reciprocal_buffer_size;
Ravikiran G Thirumalaib5d8ca72006-03-22 00:08:12 -0800392/* 3) touched by every alloc & free from the backend */
Ravikiran G Thirumalaib5d8ca72006-03-22 00:08:12 -0800393
Andrew Mortona737b3e2006-03-22 00:08:11 -0800394 unsigned int flags; /* constant flags */
395 unsigned int num; /* # of objs per slab */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396
Ravikiran G Thirumalaib5d8ca72006-03-22 00:08:12 -0800397/* 4) cache_grow/shrink */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 /* order of pgs per slab (2^n) */
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800399 unsigned int gfporder;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400
401 /* force GFP flags, e.g. GFP_DMA */
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800402 gfp_t gfpflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403
Andrew Mortona737b3e2006-03-22 00:08:11 -0800404 size_t colour; /* cache colouring range */
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800405 unsigned int colour_off; /* colour offset */
Pekka Enberg343e0d72006-02-01 03:05:50 -0800406 struct kmem_cache *slabp_cache;
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800407 unsigned int slab_size;
Andrew Mortona737b3e2006-03-22 00:08:11 -0800408 unsigned int dflags; /* dynamic flags */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
410 /* constructor func */
Christoph Lameter4ba9b9d2007-10-16 23:25:51 -0700411 void (*ctor)(struct kmem_cache *, void *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
Ravikiran G Thirumalaib5d8ca72006-03-22 00:08:12 -0800413/* 5) cache creation/removal */
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800414 const char *name;
415 struct list_head next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416
Ravikiran G Thirumalaib5d8ca72006-03-22 00:08:12 -0800417/* 6) statistics */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418#if STATS
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800419 unsigned long num_active;
420 unsigned long num_allocations;
421 unsigned long high_mark;
422 unsigned long grown;
423 unsigned long reaped;
424 unsigned long errors;
425 unsigned long max_freeable;
426 unsigned long node_allocs;
427 unsigned long node_frees;
Ravikiran G Thirumalaifb7faf32006-04-10 22:52:54 -0700428 unsigned long node_overflow;
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800429 atomic_t allochit;
430 atomic_t allocmiss;
431 atomic_t freehit;
432 atomic_t freemiss;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433#endif
434#if DEBUG
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800435 /*
436 * If debugging is enabled, then the allocator can add additional
437 * fields and/or padding to every object. buffer_size contains the total
438 * object size including these internal fields, the following two
439 * variables contain the offset to the user object and its size.
440 */
441 int obj_offset;
442 int obj_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443#endif
Eric Dumazet8da34302007-05-06 14:49:29 -0700444 /*
445 * We put nodelists[] at the end of kmem_cache, because we want to size
446 * this array to nr_node_ids slots instead of MAX_NUMNODES
447 * (see kmem_cache_init())
448 * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
449 * is statically defined, so we reserve the max number of nodes.
450 */
451 struct kmem_list3 *nodelists[MAX_NUMNODES];
452 /*
453 * Do not add fields after nodelists[]
454 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455};
456
457#define CFLGS_OFF_SLAB (0x80000000UL)
458#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
459
460#define BATCHREFILL_LIMIT 16
Andrew Mortona737b3e2006-03-22 00:08:11 -0800461/*
462 * Optimization question: fewer reaps means less probability for unnessary
463 * cpucache drain/refill cycles.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 *
Adrian Bunkdc6f3f22005-11-08 16:44:08 +0100465 * OTOH the cpuarrays can contain lots of objects,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 * which could lock up otherwise freeable slabs.
467 */
468#define REAPTIMEOUT_CPUC (2*HZ)
469#define REAPTIMEOUT_LIST3 (4*HZ)
470
471#if STATS
472#define STATS_INC_ACTIVE(x) ((x)->num_active++)
473#define STATS_DEC_ACTIVE(x) ((x)->num_active--)
474#define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
475#define STATS_INC_GROWN(x) ((x)->grown++)
Christoph Lametered11d9e2006-06-30 01:55:45 -0700476#define STATS_ADD_REAPED(x,y) ((x)->reaped += (y))
Andrew Mortona737b3e2006-03-22 00:08:11 -0800477#define STATS_SET_HIGH(x) \
478 do { \
479 if ((x)->num_active > (x)->high_mark) \
480 (x)->high_mark = (x)->num_active; \
481 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482#define STATS_INC_ERR(x) ((x)->errors++)
483#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
Christoph Lametere498be72005-09-09 13:03:32 -0700484#define STATS_INC_NODEFREES(x) ((x)->node_frees++)
Ravikiran G Thirumalaifb7faf32006-04-10 22:52:54 -0700485#define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++)
Andrew Mortona737b3e2006-03-22 00:08:11 -0800486#define STATS_SET_FREEABLE(x, i) \
487 do { \
488 if ((x)->max_freeable < i) \
489 (x)->max_freeable = i; \
490 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
492#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
493#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
494#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
495#else
496#define STATS_INC_ACTIVE(x) do { } while (0)
497#define STATS_DEC_ACTIVE(x) do { } while (0)
498#define STATS_INC_ALLOCED(x) do { } while (0)
499#define STATS_INC_GROWN(x) do { } while (0)
Christoph Lametered11d9e2006-06-30 01:55:45 -0700500#define STATS_ADD_REAPED(x,y) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501#define STATS_SET_HIGH(x) do { } while (0)
502#define STATS_INC_ERR(x) do { } while (0)
503#define STATS_INC_NODEALLOCS(x) do { } while (0)
Christoph Lametere498be72005-09-09 13:03:32 -0700504#define STATS_INC_NODEFREES(x) do { } while (0)
Ravikiran G Thirumalaifb7faf32006-04-10 22:52:54 -0700505#define STATS_INC_ACOVERFLOW(x) do { } while (0)
Andrew Mortona737b3e2006-03-22 00:08:11 -0800506#define STATS_SET_FREEABLE(x, i) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507#define STATS_INC_ALLOCHIT(x) do { } while (0)
508#define STATS_INC_ALLOCMISS(x) do { } while (0)
509#define STATS_INC_FREEHIT(x) do { } while (0)
510#define STATS_INC_FREEMISS(x) do { } while (0)
511#endif
512
513#if DEBUG
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514
Andrew Mortona737b3e2006-03-22 00:08:11 -0800515/*
516 * memory layout of objects:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 * 0 : objp
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800518 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 * the end of an object is aligned with the end of the real
520 * allocation. Catches writes behind the end of the allocation.
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800521 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 * redzone word.
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800523 * cachep->obj_offset: The real object.
524 * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
Andrew Mortona737b3e2006-03-22 00:08:11 -0800525 * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
526 * [BYTES_PER_WORD long]
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 */
Pekka Enberg343e0d72006-02-01 03:05:50 -0800528static int obj_offset(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529{
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800530 return cachep->obj_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531}
532
Pekka Enberg343e0d72006-02-01 03:05:50 -0800533static int obj_size(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534{
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800535 return cachep->obj_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536}
537
David Woodhouseb46b8f12007-05-08 00:22:59 -0700538static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539{
540 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
David Woodhouseb46b8f12007-05-08 00:22:59 -0700541 return (unsigned long long*) (objp + obj_offset(cachep) -
542 sizeof(unsigned long long));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543}
544
David Woodhouseb46b8f12007-05-08 00:22:59 -0700545static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546{
547 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
548 if (cachep->flags & SLAB_STORE_USER)
David Woodhouseb46b8f12007-05-08 00:22:59 -0700549 return (unsigned long long *)(objp + cachep->buffer_size -
550 sizeof(unsigned long long) -
David Woodhouse87a927c2007-07-04 21:26:44 -0400551 REDZONE_ALIGN);
David Woodhouseb46b8f12007-05-08 00:22:59 -0700552 return (unsigned long long *) (objp + cachep->buffer_size -
553 sizeof(unsigned long long));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554}
555
Pekka Enberg343e0d72006-02-01 03:05:50 -0800556static void **dbg_userword(struct kmem_cache *cachep, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557{
558 BUG_ON(!(cachep->flags & SLAB_STORE_USER));
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800559 return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560}
561
562#else
563
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800564#define obj_offset(x) 0
565#define obj_size(cachep) (cachep->buffer_size)
David Woodhouseb46b8f12007-05-08 00:22:59 -0700566#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
567#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
569
570#endif
571
572/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 * Do not go above this order unless 0 objects fit into the slab.
574 */
575#define BREAK_GFP_ORDER_HI 1
576#define BREAK_GFP_ORDER_LO 0
577static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
578
Andrew Mortona737b3e2006-03-22 00:08:11 -0800579/*
580 * Functions for storing/retrieving the cachep and or slab from the page
581 * allocator. These are used to find the slab an obj belongs to. With kfree(),
582 * these are used to find the cache which an obj belongs to.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 */
Pekka Enberg065d41c2005-11-13 16:06:46 -0800584static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
585{
586 page->lru.next = (struct list_head *)cache;
587}
588
589static inline struct kmem_cache *page_get_cache(struct page *page)
590{
Christoph Lameterd85f3382007-05-06 14:49:39 -0700591 page = compound_head(page);
Pekka Enbergddc2e812006-06-23 02:03:40 -0700592 BUG_ON(!PageSlab(page));
Pekka Enberg065d41c2005-11-13 16:06:46 -0800593 return (struct kmem_cache *)page->lru.next;
594}
595
596static inline void page_set_slab(struct page *page, struct slab *slab)
597{
598 page->lru.prev = (struct list_head *)slab;
599}
600
601static inline struct slab *page_get_slab(struct page *page)
602{
Pekka Enbergddc2e812006-06-23 02:03:40 -0700603 BUG_ON(!PageSlab(page));
Pekka Enberg065d41c2005-11-13 16:06:46 -0800604 return (struct slab *)page->lru.prev;
605}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606
Pekka Enberg6ed5eb22006-02-01 03:05:49 -0800607static inline struct kmem_cache *virt_to_cache(const void *obj)
608{
Christoph Lameterb49af682007-05-06 14:49:41 -0700609 struct page *page = virt_to_head_page(obj);
Pekka Enberg6ed5eb22006-02-01 03:05:49 -0800610 return page_get_cache(page);
611}
612
613static inline struct slab *virt_to_slab(const void *obj)
614{
Christoph Lameterb49af682007-05-06 14:49:41 -0700615 struct page *page = virt_to_head_page(obj);
Pekka Enberg6ed5eb22006-02-01 03:05:49 -0800616 return page_get_slab(page);
617}
618
Pekka Enberg8fea4e92006-03-22 00:08:10 -0800619static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
620 unsigned int idx)
621{
622 return slab->s_mem + cache->buffer_size * idx;
623}
624
Eric Dumazet6a2d7a92006-12-13 00:34:27 -0800625/*
626 * We want to avoid an expensive divide : (offset / cache->buffer_size)
627 * Using the fact that buffer_size is a constant for a particular cache,
628 * we can replace (offset / cache->buffer_size) by
629 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
630 */
631static inline unsigned int obj_to_index(const struct kmem_cache *cache,
632 const struct slab *slab, void *obj)
Pekka Enberg8fea4e92006-03-22 00:08:10 -0800633{
Eric Dumazet6a2d7a92006-12-13 00:34:27 -0800634 u32 offset = (obj - slab->s_mem);
635 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
Pekka Enberg8fea4e92006-03-22 00:08:10 -0800636}
637
Andrew Mortona737b3e2006-03-22 00:08:11 -0800638/*
639 * These are the default caches for kmalloc. Custom caches can have other sizes.
640 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641struct cache_sizes malloc_sizes[] = {
642#define CACHE(x) { .cs_size = (x) },
643#include <linux/kmalloc_sizes.h>
644 CACHE(ULONG_MAX)
645#undef CACHE
646};
647EXPORT_SYMBOL(malloc_sizes);
648
649/* Must match cache_sizes above. Out of line to keep cache footprint low. */
650struct cache_names {
651 char *name;
652 char *name_dma;
653};
654
655static struct cache_names __initdata cache_names[] = {
656#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
657#include <linux/kmalloc_sizes.h>
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800658 {NULL,}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659#undef CACHE
660};
661
662static struct arraycache_init initarray_cache __initdata =
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800663 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664static struct arraycache_init initarray_generic =
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800665 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666
667/* internal cache of cache description objs */
Pekka Enberg343e0d72006-02-01 03:05:50 -0800668static struct kmem_cache cache_cache = {
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800669 .batchcount = 1,
670 .limit = BOOT_CPUCACHE_ENTRIES,
671 .shared = 1,
Pekka Enberg343e0d72006-02-01 03:05:50 -0800672 .buffer_size = sizeof(struct kmem_cache),
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800673 .name = "kmem_cache",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674};
675
Ravikiran G Thirumalai056c6242006-09-25 23:31:38 -0700676#define BAD_ALIEN_MAGIC 0x01020304ul
677
Arjan van de Venf1aaee52006-07-13 14:46:03 +0200678#ifdef CONFIG_LOCKDEP
679
680/*
681 * Slab sometimes uses the kmalloc slabs to store the slab headers
682 * for other slabs "off slab".
683 * The locking for this is tricky in that it nests within the locks
684 * of all other slabs in a few places; to deal with this special
685 * locking we put on-slab caches into a separate lock-class.
Ravikiran G Thirumalai056c6242006-09-25 23:31:38 -0700686 *
687 * We set lock class for alien array caches which are up during init.
688 * The lock annotation will be lost if all cpus of a node goes down and
689 * then comes back up during hotplug
Arjan van de Venf1aaee52006-07-13 14:46:03 +0200690 */
Ravikiran G Thirumalai056c6242006-09-25 23:31:38 -0700691static struct lock_class_key on_slab_l3_key;
692static struct lock_class_key on_slab_alc_key;
Arjan van de Venf1aaee52006-07-13 14:46:03 +0200693
Ravikiran G Thirumalai056c6242006-09-25 23:31:38 -0700694static inline void init_lock_keys(void)
695
Arjan van de Venf1aaee52006-07-13 14:46:03 +0200696{
697 int q;
Ravikiran G Thirumalai056c6242006-09-25 23:31:38 -0700698 struct cache_sizes *s = malloc_sizes;
Arjan van de Venf1aaee52006-07-13 14:46:03 +0200699
Ravikiran G Thirumalai056c6242006-09-25 23:31:38 -0700700 while (s->cs_size != ULONG_MAX) {
701 for_each_node(q) {
702 struct array_cache **alc;
703 int r;
704 struct kmem_list3 *l3 = s->cs_cachep->nodelists[q];
705 if (!l3 || OFF_SLAB(s->cs_cachep))
706 continue;
707 lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
708 alc = l3->alien;
709 /*
710 * FIXME: This check for BAD_ALIEN_MAGIC
711 * should go away when common slab code is taught to
712 * work even without alien caches.
713 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
714 * for alloc_alien_cache,
715 */
716 if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
717 continue;
718 for_each_node(r) {
719 if (alc[r])
720 lockdep_set_class(&alc[r]->lock,
721 &on_slab_alc_key);
722 }
723 }
724 s++;
Arjan van de Venf1aaee52006-07-13 14:46:03 +0200725 }
726}
Arjan van de Venf1aaee52006-07-13 14:46:03 +0200727#else
Ravikiran G Thirumalai056c6242006-09-25 23:31:38 -0700728static inline void init_lock_keys(void)
Arjan van de Venf1aaee52006-07-13 14:46:03 +0200729{
730}
731#endif
732
Ravikiran G Thirumalai8f5be202006-12-06 20:32:14 -0800733/*
734 * 1. Guard access to the cache-chain.
735 * 2. Protect sanity of cpu_online_map against cpu hotplug events
736 */
Ingo Molnarfc0abb12006-01-18 17:42:33 -0800737static DEFINE_MUTEX(cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738static struct list_head cache_chain;
739
740/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 * chicken and egg problem: delay the per-cpu array allocation
742 * until the general caches are up.
743 */
744static enum {
745 NONE,
Christoph Lametere498be72005-09-09 13:03:32 -0700746 PARTIAL_AC,
747 PARTIAL_L3,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 FULL
749} g_cpucache_up;
750
Mike Kravetz39d24e62006-05-15 09:44:13 -0700751/*
752 * used by boot code to determine if it can use slab based allocator
753 */
754int slab_is_available(void)
755{
756 return g_cpucache_up == FULL;
757}
758
David Howells52bad642006-11-22 14:54:01 +0000759static DEFINE_PER_CPU(struct delayed_work, reap_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760
Pekka Enberg343e0d72006-02-01 03:05:50 -0800761static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762{
763 return cachep->array[smp_processor_id()];
764}
765
Andrew Mortona737b3e2006-03-22 00:08:11 -0800766static inline struct kmem_cache *__find_general_cachep(size_t size,
767 gfp_t gfpflags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768{
769 struct cache_sizes *csizep = malloc_sizes;
770
771#if DEBUG
772 /* This happens if someone tries to call
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800773 * kmem_cache_create(), or __kmalloc(), before
774 * the generic caches are initialized.
775 */
Alok Katariac7e43c72005-09-14 12:17:53 -0700776 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777#endif
Christoph Lameter6cb8f912007-07-17 04:03:22 -0700778 if (!size)
779 return ZERO_SIZE_PTR;
780
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 while (size > csizep->cs_size)
782 csizep++;
783
784 /*
Martin Hicks0abf40c2005-09-03 15:54:54 -0700785 * Really subtle: The last entry with cs->cs_size==ULONG_MAX
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 * has cs_{dma,}cachep==NULL. Thus no special case
787 * for large kmalloc calls required.
788 */
Christoph Lameter4b51d662007-02-10 01:43:10 -0800789#ifdef CONFIG_ZONE_DMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 if (unlikely(gfpflags & GFP_DMA))
791 return csizep->cs_dmacachep;
Christoph Lameter4b51d662007-02-10 01:43:10 -0800792#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 return csizep->cs_cachep;
794}
795
Adrian Bunkb2213852006-09-25 23:31:02 -0700796static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
Manfred Spraul97e2bde2005-05-01 08:58:38 -0700797{
798 return __find_general_cachep(size, gfpflags);
799}
Manfred Spraul97e2bde2005-05-01 08:58:38 -0700800
Steven Rostedtfbaccac2006-02-01 03:05:45 -0800801static size_t slab_mgmt_size(size_t nr_objs, size_t align)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802{
Steven Rostedtfbaccac2006-02-01 03:05:45 -0800803 return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
804}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805
Andrew Mortona737b3e2006-03-22 00:08:11 -0800806/*
807 * Calculate the number of objects and left-over bytes for a given buffer size.
808 */
Steven Rostedtfbaccac2006-02-01 03:05:45 -0800809static void cache_estimate(unsigned long gfporder, size_t buffer_size,
810 size_t align, int flags, size_t *left_over,
811 unsigned int *num)
812{
813 int nr_objs;
814 size_t mgmt_size;
815 size_t slab_size = PAGE_SIZE << gfporder;
816
817 /*
818 * The slab management structure can be either off the slab or
819 * on it. For the latter case, the memory allocated for a
820 * slab is used for:
821 *
822 * - The struct slab
823 * - One kmem_bufctl_t for each object
824 * - Padding to respect alignment of @align
825 * - @buffer_size bytes for each object
826 *
827 * If the slab management structure is off the slab, then the
828 * alignment will already be calculated into the size. Because
829 * the slabs are all pages aligned, the objects will be at the
830 * correct alignment when allocated.
831 */
832 if (flags & CFLGS_OFF_SLAB) {
833 mgmt_size = 0;
834 nr_objs = slab_size / buffer_size;
835
836 if (nr_objs > SLAB_LIMIT)
837 nr_objs = SLAB_LIMIT;
838 } else {
839 /*
840 * Ignore padding for the initial guess. The padding
841 * is at most @align-1 bytes, and @buffer_size is at
842 * least @align. In the worst case, this result will
843 * be one greater than the number of objects that fit
844 * into the memory allocation when taking the padding
845 * into account.
846 */
847 nr_objs = (slab_size - sizeof(struct slab)) /
848 (buffer_size + sizeof(kmem_bufctl_t));
849
850 /*
851 * This calculated number will be either the right
852 * amount, or one greater than what we want.
853 */
854 if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
855 > slab_size)
856 nr_objs--;
857
858 if (nr_objs > SLAB_LIMIT)
859 nr_objs = SLAB_LIMIT;
860
861 mgmt_size = slab_mgmt_size(nr_objs, align);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 }
Steven Rostedtfbaccac2006-02-01 03:05:45 -0800863 *num = nr_objs;
864 *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865}
866
867#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)
868
Andrew Mortona737b3e2006-03-22 00:08:11 -0800869static void __slab_error(const char *function, struct kmem_cache *cachep,
870 char *msg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871{
872 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800873 function, cachep->name, msg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 dump_stack();
875}
876
Paul Menage3395ee02006-12-06 20:32:16 -0800877/*
878 * By default on NUMA we use alien caches to stage the freeing of
879 * objects allocated from other nodes. This causes massive memory
880 * inefficiencies when using fake NUMA setup to split memory into a
881 * large number of small nodes, so it can be disabled on the command
882 * line
883 */
884
885static int use_alien_caches __read_mostly = 1;
Siddha, Suresh B1807a1a2007-08-22 14:01:49 -0700886static int numa_platform __read_mostly = 1;
Paul Menage3395ee02006-12-06 20:32:16 -0800887static int __init noaliencache_setup(char *s)
888{
889 use_alien_caches = 0;
890 return 1;
891}
892__setup("noaliencache", noaliencache_setup);
893
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800894#ifdef CONFIG_NUMA
895/*
896 * Special reaping functions for NUMA systems called from cache_reap().
897 * These take care of doing round robin flushing of alien caches (containing
898 * objects freed on different nodes from which they were allocated) and the
899 * flushing of remote pcps by calling drain_node_pages.
900 */
901static DEFINE_PER_CPU(unsigned long, reap_node);
902
903static void init_reap_node(int cpu)
904{
905 int node;
906
907 node = next_node(cpu_to_node(cpu), node_online_map);
908 if (node == MAX_NUMNODES)
Paul Jackson442295c2006-03-22 00:09:11 -0800909 node = first_node(node_online_map);
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800910
Daniel Yeisley7f6b8872006-11-02 22:07:14 -0800911 per_cpu(reap_node, cpu) = node;
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800912}
913
914static void next_reap_node(void)
915{
916 int node = __get_cpu_var(reap_node);
917
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800918 node = next_node(node, node_online_map);
919 if (unlikely(node >= MAX_NUMNODES))
920 node = first_node(node_online_map);
921 __get_cpu_var(reap_node) = node;
922}
923
924#else
925#define init_reap_node(cpu) do { } while (0)
926#define next_reap_node(void) do { } while (0)
927#endif
928
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929/*
930 * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz
931 * via the workqueue/eventd.
932 * Add the CPU number into the expiration time to minimize the possibility of
933 * the CPUs getting into lockstep and contending for the global cache chain
934 * lock.
935 */
Adrian Bunk897e6792007-07-15 23:38:20 -0700936static void __cpuinit start_cpu_timer(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937{
David Howells52bad642006-11-22 14:54:01 +0000938 struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939
940 /*
941 * When this gets called from do_initcalls via cpucache_init(),
942 * init_workqueues() has already run, so keventd will be setup
943 * at that time.
944 */
David Howells52bad642006-11-22 14:54:01 +0000945 if (keventd_up() && reap_work->work.func == NULL) {
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800946 init_reap_node(cpu);
David Howells65f27f32006-11-22 14:55:48 +0000947 INIT_DELAYED_WORK(reap_work, cache_reap);
Arjan van de Ven2b284212006-12-10 02:21:28 -0800948 schedule_delayed_work_on(cpu, reap_work,
949 __round_jiffies_relative(HZ, cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 }
951}
952
Christoph Lametere498be72005-09-09 13:03:32 -0700953static struct array_cache *alloc_arraycache(int node, int entries,
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800954 int batchcount)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955{
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800956 int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 struct array_cache *nc = NULL;
958
Christoph Lametere498be72005-09-09 13:03:32 -0700959 nc = kmalloc_node(memsize, GFP_KERNEL, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 if (nc) {
961 nc->avail = 0;
962 nc->limit = entries;
963 nc->batchcount = batchcount;
964 nc->touched = 0;
Christoph Lametere498be72005-09-09 13:03:32 -0700965 spin_lock_init(&nc->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 }
967 return nc;
968}
969
Christoph Lameter3ded1752006-03-25 03:06:44 -0800970/*
971 * Transfer objects in one arraycache to another.
972 * Locking must be handled by the caller.
973 *
974 * Return the number of entries transferred.
975 */
976static int transfer_objects(struct array_cache *to,
977 struct array_cache *from, unsigned int max)
978{
979 /* Figure out how many entries to transfer */
980 int nr = min(min(from->avail, max), to->limit - to->avail);
981
982 if (!nr)
983 return 0;
984
985 memcpy(to->entry + to->avail, from->entry + from->avail -nr,
986 sizeof(void *) *nr);
987
988 from->avail -= nr;
989 to->avail += nr;
990 to->touched = 1;
991 return nr;
992}
993
Christoph Lameter765c4502006-09-27 01:50:08 -0700994#ifndef CONFIG_NUMA
995
996#define drain_alien_cache(cachep, alien) do { } while (0)
997#define reap_alien(cachep, l3) do { } while (0)
998
999static inline struct array_cache **alloc_alien_cache(int node, int limit)
1000{
1001 return (struct array_cache **)BAD_ALIEN_MAGIC;
1002}
1003
1004static inline void free_alien_cache(struct array_cache **ac_ptr)
1005{
1006}
1007
1008static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1009{
1010 return 0;
1011}
1012
1013static inline void *alternate_node_alloc(struct kmem_cache *cachep,
1014 gfp_t flags)
1015{
1016 return NULL;
1017}
1018
Christoph Hellwig8b98c162006-12-06 20:32:30 -08001019static inline void *____cache_alloc_node(struct kmem_cache *cachep,
Christoph Lameter765c4502006-09-27 01:50:08 -07001020 gfp_t flags, int nodeid)
1021{
1022 return NULL;
1023}
1024
1025#else /* CONFIG_NUMA */
1026
Christoph Hellwig8b98c162006-12-06 20:32:30 -08001027static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
Paul Jacksonc61afb12006-03-24 03:16:08 -08001028static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
Christoph Lameterdc85da12006-01-18 17:42:36 -08001029
Pekka Enberg5295a742006-02-01 03:05:48 -08001030static struct array_cache **alloc_alien_cache(int node, int limit)
Christoph Lametere498be72005-09-09 13:03:32 -07001031{
1032 struct array_cache **ac_ptr;
Christoph Lameter8ef82862007-02-20 13:57:52 -08001033 int memsize = sizeof(void *) * nr_node_ids;
Christoph Lametere498be72005-09-09 13:03:32 -07001034 int i;
1035
1036 if (limit > 1)
1037 limit = 12;
1038 ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node);
1039 if (ac_ptr) {
1040 for_each_node(i) {
1041 if (i == node || !node_online(i)) {
1042 ac_ptr[i] = NULL;
1043 continue;
1044 }
1045 ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d);
1046 if (!ac_ptr[i]) {
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001047 for (i--; i <= 0; i--)
Christoph Lametere498be72005-09-09 13:03:32 -07001048 kfree(ac_ptr[i]);
1049 kfree(ac_ptr);
1050 return NULL;
1051 }
1052 }
1053 }
1054 return ac_ptr;
1055}
1056
Pekka Enberg5295a742006-02-01 03:05:48 -08001057static void free_alien_cache(struct array_cache **ac_ptr)
Christoph Lametere498be72005-09-09 13:03:32 -07001058{
1059 int i;
1060
1061 if (!ac_ptr)
1062 return;
Christoph Lametere498be72005-09-09 13:03:32 -07001063 for_each_node(i)
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001064 kfree(ac_ptr[i]);
Christoph Lametere498be72005-09-09 13:03:32 -07001065 kfree(ac_ptr);
1066}
1067
Pekka Enberg343e0d72006-02-01 03:05:50 -08001068static void __drain_alien_cache(struct kmem_cache *cachep,
Pekka Enberg5295a742006-02-01 03:05:48 -08001069 struct array_cache *ac, int node)
Christoph Lametere498be72005-09-09 13:03:32 -07001070{
1071 struct kmem_list3 *rl3 = cachep->nodelists[node];
1072
1073 if (ac->avail) {
1074 spin_lock(&rl3->list_lock);
Christoph Lametere00946f2006-03-25 03:06:45 -08001075 /*
1076 * Stuff objects into the remote nodes shared array first.
1077 * That way we could avoid the overhead of putting the objects
1078 * into the free lists and getting them back later.
1079 */
shin, jacob693f7d32006-04-28 10:54:37 -05001080 if (rl3->shared)
1081 transfer_objects(rl3->shared, ac, ac->limit);
Christoph Lametere00946f2006-03-25 03:06:45 -08001082
Christoph Lameterff694162005-09-22 21:44:02 -07001083 free_block(cachep, ac->entry, ac->avail, node);
Christoph Lametere498be72005-09-09 13:03:32 -07001084 ac->avail = 0;
1085 spin_unlock(&rl3->list_lock);
1086 }
1087}
1088
Christoph Lameter8fce4d82006-03-09 17:33:54 -08001089/*
1090 * Called from cache_reap() to regularly drain alien caches round robin.
1091 */
1092static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
1093{
1094 int node = __get_cpu_var(reap_node);
1095
1096 if (l3->alien) {
1097 struct array_cache *ac = l3->alien[node];
Christoph Lametere00946f2006-03-25 03:06:45 -08001098
1099 if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
Christoph Lameter8fce4d82006-03-09 17:33:54 -08001100 __drain_alien_cache(cachep, ac, node);
1101 spin_unlock_irq(&ac->lock);
1102 }
1103 }
1104}
1105
Andrew Mortona737b3e2006-03-22 00:08:11 -08001106static void drain_alien_cache(struct kmem_cache *cachep,
1107 struct array_cache **alien)
Christoph Lametere498be72005-09-09 13:03:32 -07001108{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001109 int i = 0;
Christoph Lametere498be72005-09-09 13:03:32 -07001110 struct array_cache *ac;
1111 unsigned long flags;
1112
1113 for_each_online_node(i) {
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001114 ac = alien[i];
Christoph Lametere498be72005-09-09 13:03:32 -07001115 if (ac) {
1116 spin_lock_irqsave(&ac->lock, flags);
1117 __drain_alien_cache(cachep, ac, i);
1118 spin_unlock_irqrestore(&ac->lock, flags);
1119 }
1120 }
1121}
Pekka Enberg729bd0b2006-06-23 02:03:05 -07001122
Ingo Molnar873623d2006-07-13 14:44:38 +02001123static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
Pekka Enberg729bd0b2006-06-23 02:03:05 -07001124{
1125 struct slab *slabp = virt_to_slab(objp);
1126 int nodeid = slabp->nodeid;
1127 struct kmem_list3 *l3;
1128 struct array_cache *alien = NULL;
Pekka Enberg1ca4cb22006-10-06 00:43:52 -07001129 int node;
1130
1131 node = numa_node_id();
Pekka Enberg729bd0b2006-06-23 02:03:05 -07001132
1133 /*
1134 * Make sure we are not freeing a object from another node to the array
1135 * cache on this cpu.
1136 */
Siddha, Suresh B62918a02007-05-02 19:27:18 +02001137 if (likely(slabp->nodeid == node))
Pekka Enberg729bd0b2006-06-23 02:03:05 -07001138 return 0;
1139
Pekka Enberg1ca4cb22006-10-06 00:43:52 -07001140 l3 = cachep->nodelists[node];
Pekka Enberg729bd0b2006-06-23 02:03:05 -07001141 STATS_INC_NODEFREES(cachep);
1142 if (l3->alien && l3->alien[nodeid]) {
1143 alien = l3->alien[nodeid];
Ingo Molnar873623d2006-07-13 14:44:38 +02001144 spin_lock(&alien->lock);
Pekka Enberg729bd0b2006-06-23 02:03:05 -07001145 if (unlikely(alien->avail == alien->limit)) {
1146 STATS_INC_ACOVERFLOW(cachep);
1147 __drain_alien_cache(cachep, alien, nodeid);
1148 }
1149 alien->entry[alien->avail++] = objp;
1150 spin_unlock(&alien->lock);
1151 } else {
1152 spin_lock(&(cachep->nodelists[nodeid])->list_lock);
1153 free_block(cachep, &objp, 1, nodeid);
1154 spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
1155 }
1156 return 1;
1157}
Christoph Lametere498be72005-09-09 13:03:32 -07001158#endif
1159
Chandra Seetharaman8c78f302006-07-30 03:03:35 -07001160static int __cpuinit cpuup_callback(struct notifier_block *nfb,
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001161 unsigned long action, void *hcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162{
1163 long cpu = (long)hcpu;
Pekka Enberg343e0d72006-02-01 03:05:50 -08001164 struct kmem_cache *cachep;
Christoph Lametere498be72005-09-09 13:03:32 -07001165 struct kmem_list3 *l3 = NULL;
1166 int node = cpu_to_node(cpu);
David Howellsea02e3d2007-07-19 01:49:09 -07001167 const int memsize = sizeof(struct kmem_list3);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168
1169 switch (action) {
Heiko Carstens38c3bd92007-05-09 02:34:05 -07001170 case CPU_LOCK_ACQUIRE:
Ingo Molnarfc0abb12006-01-18 17:42:33 -08001171 mutex_lock(&cache_chain_mutex);
Heiko Carstens38c3bd92007-05-09 02:34:05 -07001172 break;
1173 case CPU_UP_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001174 case CPU_UP_PREPARE_FROZEN:
Andrew Mortona737b3e2006-03-22 00:08:11 -08001175 /*
1176 * We need to do this right in the beginning since
Christoph Lametere498be72005-09-09 13:03:32 -07001177 * alloc_arraycache's are going to use this list.
1178 * kmalloc_node allows us to add the slab to the right
1179 * kmem_list3 and not this cpu's kmem_list3
1180 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181
Christoph Lametere498be72005-09-09 13:03:32 -07001182 list_for_each_entry(cachep, &cache_chain, next) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08001183 /*
1184 * Set up the size64 kmemlist for cpu before we can
Christoph Lametere498be72005-09-09 13:03:32 -07001185 * begin anything. Make sure some other cpu on this
1186 * node has not already allocated this
1187 */
1188 if (!cachep->nodelists[node]) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08001189 l3 = kmalloc_node(memsize, GFP_KERNEL, node);
1190 if (!l3)
Christoph Lametere498be72005-09-09 13:03:32 -07001191 goto bad;
1192 kmem_list3_init(l3);
1193 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001194 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
Christoph Lametere498be72005-09-09 13:03:32 -07001195
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001196 /*
1197 * The l3s don't come and go as CPUs come and
1198 * go. cache_chain_mutex is sufficient
1199 * protection here.
1200 */
Christoph Lametere498be72005-09-09 13:03:32 -07001201 cachep->nodelists[node] = l3;
1202 }
1203
1204 spin_lock_irq(&cachep->nodelists[node]->list_lock);
1205 cachep->nodelists[node]->free_limit =
Andrew Mortona737b3e2006-03-22 00:08:11 -08001206 (1 + nr_cpus_node(node)) *
1207 cachep->batchcount + cachep->num;
Christoph Lametere498be72005-09-09 13:03:32 -07001208 spin_unlock_irq(&cachep->nodelists[node]->list_lock);
1209 }
1210
Andrew Mortona737b3e2006-03-22 00:08:11 -08001211 /*
1212 * Now we can go ahead with allocating the shared arrays and
1213 * array caches
1214 */
Christoph Lametere498be72005-09-09 13:03:32 -07001215 list_for_each_entry(cachep, &cache_chain, next) {
Tobias Klausercd105df2006-01-08 01:00:59 -08001216 struct array_cache *nc;
Eric Dumazet63109842007-05-06 14:49:28 -07001217 struct array_cache *shared = NULL;
Paul Menage3395ee02006-12-06 20:32:16 -08001218 struct array_cache **alien = NULL;
Tobias Klausercd105df2006-01-08 01:00:59 -08001219
Christoph Lametere498be72005-09-09 13:03:32 -07001220 nc = alloc_arraycache(node, cachep->limit,
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001221 cachep->batchcount);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 if (!nc)
1223 goto bad;
Eric Dumazet63109842007-05-06 14:49:28 -07001224 if (cachep->shared) {
1225 shared = alloc_arraycache(node,
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001226 cachep->shared * cachep->batchcount,
1227 0xbaadf00d);
Eric Dumazet63109842007-05-06 14:49:28 -07001228 if (!shared)
1229 goto bad;
1230 }
Paul Menage3395ee02006-12-06 20:32:16 -08001231 if (use_alien_caches) {
1232 alien = alloc_alien_cache(node, cachep->limit);
1233 if (!alien)
1234 goto bad;
1235 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 cachep->array[cpu] = nc;
Christoph Lametere498be72005-09-09 13:03:32 -07001237 l3 = cachep->nodelists[node];
1238 BUG_ON(!l3);
Christoph Lametere498be72005-09-09 13:03:32 -07001239
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001240 spin_lock_irq(&l3->list_lock);
1241 if (!l3->shared) {
1242 /*
1243 * We are serialised from CPU_DEAD or
1244 * CPU_UP_CANCELLED by the cpucontrol lock
1245 */
1246 l3->shared = shared;
1247 shared = NULL;
Christoph Lametere498be72005-09-09 13:03:32 -07001248 }
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001249#ifdef CONFIG_NUMA
1250 if (!l3->alien) {
1251 l3->alien = alien;
1252 alien = NULL;
1253 }
1254#endif
1255 spin_unlock_irq(&l3->list_lock);
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001256 kfree(shared);
1257 free_alien_cache(alien);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 break;
1260 case CPU_ONLINE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001261 case CPU_ONLINE_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 start_cpu_timer(cpu);
1263 break;
1264#ifdef CONFIG_HOTPLUG_CPU
Christoph Lameter5830c592007-05-09 02:34:22 -07001265 case CPU_DOWN_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001266 case CPU_DOWN_PREPARE_FROZEN:
Christoph Lameter5830c592007-05-09 02:34:22 -07001267 /*
1268 * Shutdown cache reaper. Note that the cache_chain_mutex is
1269 * held so that if cache_reap() is invoked it cannot do
1270 * anything expensive but will only modify reap_work
1271 * and reschedule the timer.
1272 */
1273 cancel_rearming_delayed_work(&per_cpu(reap_work, cpu));
1274 /* Now the cache_reaper is guaranteed to be not running. */
1275 per_cpu(reap_work, cpu).work.func = NULL;
1276 break;
1277 case CPU_DOWN_FAILED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001278 case CPU_DOWN_FAILED_FROZEN:
Christoph Lameter5830c592007-05-09 02:34:22 -07001279 start_cpu_timer(cpu);
1280 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 case CPU_DEAD:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001282 case CPU_DEAD_FROZEN:
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001283 /*
1284 * Even if all the cpus of a node are down, we don't free the
1285 * kmem_list3 of any cache. This to avoid a race between
1286 * cpu_down, and a kmalloc allocation from another cpu for
1287 * memory from the node of the cpu going down. The list3
1288 * structure is usually allocated from kmem_cache_create() and
1289 * gets destroyed at kmem_cache_destroy().
1290 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 /* fall thru */
Ravikiran G Thirumalai8f5be202006-12-06 20:32:14 -08001292#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 case CPU_UP_CANCELED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001294 case CPU_UP_CANCELED_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 list_for_each_entry(cachep, &cache_chain, next) {
1296 struct array_cache *nc;
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001297 struct array_cache *shared;
1298 struct array_cache **alien;
Christoph Lametere498be72005-09-09 13:03:32 -07001299 cpumask_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300
Christoph Lametere498be72005-09-09 13:03:32 -07001301 mask = node_to_cpumask(node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 /* cpu is dead; no one can alloc from it. */
1303 nc = cachep->array[cpu];
1304 cachep->array[cpu] = NULL;
Christoph Lametere498be72005-09-09 13:03:32 -07001305 l3 = cachep->nodelists[node];
1306
1307 if (!l3)
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001308 goto free_array_cache;
Christoph Lametere498be72005-09-09 13:03:32 -07001309
Ravikiran G Thirumalaica3b9b92006-02-04 23:27:58 -08001310 spin_lock_irq(&l3->list_lock);
Christoph Lametere498be72005-09-09 13:03:32 -07001311
1312 /* Free limit for this kmem_list3 */
1313 l3->free_limit -= cachep->batchcount;
1314 if (nc)
Christoph Lameterff694162005-09-22 21:44:02 -07001315 free_block(cachep, nc->entry, nc->avail, node);
Christoph Lametere498be72005-09-09 13:03:32 -07001316
1317 if (!cpus_empty(mask)) {
Ravikiran G Thirumalaica3b9b92006-02-04 23:27:58 -08001318 spin_unlock_irq(&l3->list_lock);
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001319 goto free_array_cache;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001320 }
Christoph Lametere498be72005-09-09 13:03:32 -07001321
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001322 shared = l3->shared;
1323 if (shared) {
Eric Dumazet63109842007-05-06 14:49:28 -07001324 free_block(cachep, shared->entry,
1325 shared->avail, node);
Christoph Lametere498be72005-09-09 13:03:32 -07001326 l3->shared = NULL;
1327 }
Christoph Lametere498be72005-09-09 13:03:32 -07001328
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001329 alien = l3->alien;
1330 l3->alien = NULL;
1331
1332 spin_unlock_irq(&l3->list_lock);
1333
1334 kfree(shared);
1335 if (alien) {
1336 drain_alien_cache(cachep, alien);
1337 free_alien_cache(alien);
Christoph Lametere498be72005-09-09 13:03:32 -07001338 }
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001339free_array_cache:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 kfree(nc);
1341 }
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001342 /*
1343 * In the previous loop, all the objects were freed to
1344 * the respective cache's slabs, now we can go ahead and
1345 * shrink each nodelist to its limit.
1346 */
1347 list_for_each_entry(cachep, &cache_chain, next) {
1348 l3 = cachep->nodelists[node];
1349 if (!l3)
1350 continue;
Christoph Lametered11d9e2006-06-30 01:55:45 -07001351 drain_freelist(cachep, l3, l3->free_objects);
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001352 }
Heiko Carstens38c3bd92007-05-09 02:34:05 -07001353 break;
1354 case CPU_LOCK_RELEASE:
Ingo Molnarfc0abb12006-01-18 17:42:33 -08001355 mutex_unlock(&cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 }
1358 return NOTIFY_OK;
Andrew Mortona737b3e2006-03-22 00:08:11 -08001359bad:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 return NOTIFY_BAD;
1361}
1362
Chandra Seetharaman74b85f32006-06-27 02:54:09 -07001363static struct notifier_block __cpuinitdata cpucache_notifier = {
1364 &cpuup_callback, NULL, 0
1365};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366
Christoph Lametere498be72005-09-09 13:03:32 -07001367/*
1368 * swap the static kmem_list3 with kmalloced memory
1369 */
Andrew Mortona737b3e2006-03-22 00:08:11 -08001370static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
1371 int nodeid)
Christoph Lametere498be72005-09-09 13:03:32 -07001372{
1373 struct kmem_list3 *ptr;
1374
Christoph Lametere498be72005-09-09 13:03:32 -07001375 ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid);
1376 BUG_ON(!ptr);
1377
1378 local_irq_disable();
1379 memcpy(ptr, list, sizeof(struct kmem_list3));
Ingo Molnar2b2d5492006-07-03 00:25:28 -07001380 /*
1381 * Do not assume that spinlocks can be initialized via memcpy:
1382 */
1383 spin_lock_init(&ptr->list_lock);
1384
Christoph Lametere498be72005-09-09 13:03:32 -07001385 MAKE_ALL_LISTS(cachep, ptr, nodeid);
1386 cachep->nodelists[nodeid] = ptr;
1387 local_irq_enable();
1388}
1389
Andrew Mortona737b3e2006-03-22 00:08:11 -08001390/*
1391 * Initialisation. Called after the page allocator have been initialised and
1392 * before smp_init().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 */
1394void __init kmem_cache_init(void)
1395{
1396 size_t left_over;
1397 struct cache_sizes *sizes;
1398 struct cache_names *names;
Christoph Lametere498be72005-09-09 13:03:32 -07001399 int i;
Jack Steiner07ed76b2006-03-07 21:55:46 -08001400 int order;
Pekka Enberg1ca4cb22006-10-06 00:43:52 -07001401 int node;
Christoph Lametere498be72005-09-09 13:03:32 -07001402
Siddha, Suresh B1807a1a2007-08-22 14:01:49 -07001403 if (num_possible_nodes() == 1) {
Siddha, Suresh B62918a02007-05-02 19:27:18 +02001404 use_alien_caches = 0;
Siddha, Suresh B1807a1a2007-08-22 14:01:49 -07001405 numa_platform = 0;
1406 }
Siddha, Suresh B62918a02007-05-02 19:27:18 +02001407
Christoph Lametere498be72005-09-09 13:03:32 -07001408 for (i = 0; i < NUM_INIT_LISTS; i++) {
1409 kmem_list3_init(&initkmem_list3[i]);
1410 if (i < MAX_NUMNODES)
1411 cache_cache.nodelists[i] = NULL;
1412 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413
1414 /*
1415 * Fragmentation resistance on low memory - only use bigger
1416 * page orders on machines with more than 32MB of memory.
1417 */
1418 if (num_physpages > (32 << 20) >> PAGE_SHIFT)
1419 slab_break_gfp_order = BREAK_GFP_ORDER_HI;
1420
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 /* Bootstrap is tricky, because several objects are allocated
1422 * from caches that do not exist yet:
Andrew Mortona737b3e2006-03-22 00:08:11 -08001423 * 1) initialize the cache_cache cache: it contains the struct
1424 * kmem_cache structures of all caches, except cache_cache itself:
1425 * cache_cache is statically allocated.
Christoph Lametere498be72005-09-09 13:03:32 -07001426 * Initially an __init data area is used for the head array and the
1427 * kmem_list3 structures, it's replaced with a kmalloc allocated
1428 * array at the end of the bootstrap.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 * 2) Create the first kmalloc cache.
Pekka Enberg343e0d72006-02-01 03:05:50 -08001430 * The struct kmem_cache for the new cache is allocated normally.
Christoph Lametere498be72005-09-09 13:03:32 -07001431 * An __init data area is used for the head array.
1432 * 3) Create the remaining kmalloc caches, with minimally sized
1433 * head arrays.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 * 4) Replace the __init data head arrays for cache_cache and the first
1435 * kmalloc cache with kmalloc allocated arrays.
Christoph Lametere498be72005-09-09 13:03:32 -07001436 * 5) Replace the __init data for kmem_list3 for cache_cache and
1437 * the other cache's with kmalloc allocated memory.
1438 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 */
1440
Pekka Enberg1ca4cb22006-10-06 00:43:52 -07001441 node = numa_node_id();
1442
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 /* 1) create the cache_cache */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 INIT_LIST_HEAD(&cache_chain);
1445 list_add(&cache_cache.next, &cache_chain);
1446 cache_cache.colour_off = cache_line_size();
1447 cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
Pekka Enberg1ca4cb22006-10-06 00:43:52 -07001448 cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449
Eric Dumazet8da34302007-05-06 14:49:29 -07001450 /*
1451 * struct kmem_cache size depends on nr_node_ids, which
1452 * can be less than MAX_NUMNODES.
1453 */
1454 cache_cache.buffer_size = offsetof(struct kmem_cache, nodelists) +
1455 nr_node_ids * sizeof(struct kmem_list3 *);
1456#if DEBUG
1457 cache_cache.obj_size = cache_cache.buffer_size;
1458#endif
Andrew Mortona737b3e2006-03-22 00:08:11 -08001459 cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
1460 cache_line_size());
Eric Dumazet6a2d7a92006-12-13 00:34:27 -08001461 cache_cache.reciprocal_buffer_size =
1462 reciprocal_value(cache_cache.buffer_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463
Jack Steiner07ed76b2006-03-07 21:55:46 -08001464 for (order = 0; order < MAX_ORDER; order++) {
1465 cache_estimate(order, cache_cache.buffer_size,
1466 cache_line_size(), 0, &left_over, &cache_cache.num);
1467 if (cache_cache.num)
1468 break;
1469 }
Eric Sesterhenn40094fa2006-04-02 13:49:25 +02001470 BUG_ON(!cache_cache.num);
Jack Steiner07ed76b2006-03-07 21:55:46 -08001471 cache_cache.gfporder = order;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001472 cache_cache.colour = left_over / cache_cache.colour_off;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001473 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
1474 sizeof(struct slab), cache_line_size());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475
1476 /* 2+3) create the kmalloc caches */
1477 sizes = malloc_sizes;
1478 names = cache_names;
1479
Andrew Mortona737b3e2006-03-22 00:08:11 -08001480 /*
1481 * Initialize the caches that provide memory for the array cache and the
1482 * kmem_list3 structures first. Without this, further allocations will
1483 * bug.
Christoph Lametere498be72005-09-09 13:03:32 -07001484 */
1485
1486 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
Andrew Mortona737b3e2006-03-22 00:08:11 -08001487 sizes[INDEX_AC].cs_size,
1488 ARCH_KMALLOC_MINALIGN,
1489 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
Paul Mundt20c2df82007-07-20 10:11:58 +09001490 NULL);
Christoph Lametere498be72005-09-09 13:03:32 -07001491
Andrew Mortona737b3e2006-03-22 00:08:11 -08001492 if (INDEX_AC != INDEX_L3) {
Christoph Lametere498be72005-09-09 13:03:32 -07001493 sizes[INDEX_L3].cs_cachep =
Andrew Mortona737b3e2006-03-22 00:08:11 -08001494 kmem_cache_create(names[INDEX_L3].name,
1495 sizes[INDEX_L3].cs_size,
1496 ARCH_KMALLOC_MINALIGN,
1497 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
Paul Mundt20c2df82007-07-20 10:11:58 +09001498 NULL);
Andrew Mortona737b3e2006-03-22 00:08:11 -08001499 }
Christoph Lametere498be72005-09-09 13:03:32 -07001500
Ingo Molnare0a42722006-06-23 02:03:46 -07001501 slab_early_init = 0;
1502
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 while (sizes->cs_size != ULONG_MAX) {
Christoph Lametere498be72005-09-09 13:03:32 -07001504 /*
1505 * For performance, all the general caches are L1 aligned.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 * This should be particularly beneficial on SMP boxes, as it
1507 * eliminates "false sharing".
1508 * Note for systems short on memory removing the alignment will
Christoph Lametere498be72005-09-09 13:03:32 -07001509 * allow tighter packing of the smaller caches.
1510 */
Andrew Mortona737b3e2006-03-22 00:08:11 -08001511 if (!sizes->cs_cachep) {
Christoph Lametere498be72005-09-09 13:03:32 -07001512 sizes->cs_cachep = kmem_cache_create(names->name,
Andrew Mortona737b3e2006-03-22 00:08:11 -08001513 sizes->cs_size,
1514 ARCH_KMALLOC_MINALIGN,
1515 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
Paul Mundt20c2df82007-07-20 10:11:58 +09001516 NULL);
Andrew Mortona737b3e2006-03-22 00:08:11 -08001517 }
Christoph Lameter4b51d662007-02-10 01:43:10 -08001518#ifdef CONFIG_ZONE_DMA
1519 sizes->cs_dmacachep = kmem_cache_create(
1520 names->name_dma,
Andrew Mortona737b3e2006-03-22 00:08:11 -08001521 sizes->cs_size,
1522 ARCH_KMALLOC_MINALIGN,
1523 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
1524 SLAB_PANIC,
Paul Mundt20c2df82007-07-20 10:11:58 +09001525 NULL);
Christoph Lameter4b51d662007-02-10 01:43:10 -08001526#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 sizes++;
1528 names++;
1529 }
1530 /* 4) Replace the bootstrap head arrays */
1531 {
Ingo Molnar2b2d5492006-07-03 00:25:28 -07001532 struct array_cache *ptr;
Christoph Lametere498be72005-09-09 13:03:32 -07001533
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
Christoph Lametere498be72005-09-09 13:03:32 -07001535
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 local_irq_disable();
Pekka Enberg9a2dba42006-02-01 03:05:49 -08001537 BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
1538 memcpy(ptr, cpu_cache_get(&cache_cache),
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001539 sizeof(struct arraycache_init));
Ingo Molnar2b2d5492006-07-03 00:25:28 -07001540 /*
1541 * Do not assume that spinlocks can be initialized via memcpy:
1542 */
1543 spin_lock_init(&ptr->lock);
1544
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 cache_cache.array[smp_processor_id()] = ptr;
1546 local_irq_enable();
Christoph Lametere498be72005-09-09 13:03:32 -07001547
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
Christoph Lametere498be72005-09-09 13:03:32 -07001549
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 local_irq_disable();
Pekka Enberg9a2dba42006-02-01 03:05:49 -08001551 BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001552 != &initarray_generic.cache);
Pekka Enberg9a2dba42006-02-01 03:05:49 -08001553 memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001554 sizeof(struct arraycache_init));
Ingo Molnar2b2d5492006-07-03 00:25:28 -07001555 /*
1556 * Do not assume that spinlocks can be initialized via memcpy:
1557 */
1558 spin_lock_init(&ptr->lock);
1559
Christoph Lametere498be72005-09-09 13:03:32 -07001560 malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001561 ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 local_irq_enable();
1563 }
Christoph Lametere498be72005-09-09 13:03:32 -07001564 /* 5) Replace the bootstrap kmem_list3's */
1565 {
Pekka Enberg1ca4cb22006-10-06 00:43:52 -07001566 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567
Pekka Enberg1ca4cb22006-10-06 00:43:52 -07001568 /* Replace the static kmem_list3 structures for the boot cpu */
1569 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], node);
1570
Christoph Lameter04231b32007-10-16 01:25:32 -07001571 for_each_node_state(nid, N_NORMAL_MEMORY) {
Christoph Lametere498be72005-09-09 13:03:32 -07001572 init_list(malloc_sizes[INDEX_AC].cs_cachep,
Pekka Enberg1ca4cb22006-10-06 00:43:52 -07001573 &initkmem_list3[SIZE_AC + nid], nid);
Christoph Lametere498be72005-09-09 13:03:32 -07001574
1575 if (INDEX_AC != INDEX_L3) {
1576 init_list(malloc_sizes[INDEX_L3].cs_cachep,
Pekka Enberg1ca4cb22006-10-06 00:43:52 -07001577 &initkmem_list3[SIZE_L3 + nid], nid);
Christoph Lametere498be72005-09-09 13:03:32 -07001578 }
1579 }
1580 }
1581
1582 /* 6) resize the head arrays to their final sizes */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 {
Pekka Enberg343e0d72006-02-01 03:05:50 -08001584 struct kmem_cache *cachep;
Ingo Molnarfc0abb12006-01-18 17:42:33 -08001585 mutex_lock(&cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 list_for_each_entry(cachep, &cache_chain, next)
Christoph Lameter2ed3a4e2006-09-25 23:31:38 -07001587 if (enable_cpucache(cachep))
1588 BUG();
Ingo Molnarfc0abb12006-01-18 17:42:33 -08001589 mutex_unlock(&cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 }
1591
Ravikiran G Thirumalai056c6242006-09-25 23:31:38 -07001592 /* Annotate slab for lockdep -- annotate the malloc caches */
1593 init_lock_keys();
1594
1595
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 /* Done! */
1597 g_cpucache_up = FULL;
1598
Andrew Mortona737b3e2006-03-22 00:08:11 -08001599 /*
1600 * Register a cpu startup notifier callback that initializes
1601 * cpu_cache_get for all new cpus
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 */
1603 register_cpu_notifier(&cpucache_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604
Andrew Mortona737b3e2006-03-22 00:08:11 -08001605 /*
1606 * The reap timers are started later, with a module init call: That part
1607 * of the kernel is not yet operational.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 */
1609}
1610
1611static int __init cpucache_init(void)
1612{
1613 int cpu;
1614
Andrew Mortona737b3e2006-03-22 00:08:11 -08001615 /*
1616 * Register the timers that return unneeded pages to the page allocator
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 */
Christoph Lametere498be72005-09-09 13:03:32 -07001618 for_each_online_cpu(cpu)
Andrew Mortona737b3e2006-03-22 00:08:11 -08001619 start_cpu_timer(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 return 0;
1621}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622__initcall(cpucache_init);
1623
1624/*
1625 * Interface to system's page allocator. No need to hold the cache-lock.
1626 *
1627 * If we requested dmaable memory, we will get it. Even if we
1628 * did not request dmaable memory, we might get it, but that
1629 * would be relatively rare and ignorable.
1630 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08001631static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632{
1633 struct page *page;
Christoph Hellwige1b6aa62006-06-23 02:03:17 -07001634 int nr_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 int i;
1636
Luke Yangd6fef9d2006-04-10 22:52:56 -07001637#ifndef CONFIG_MMU
Christoph Hellwige1b6aa62006-06-23 02:03:17 -07001638 /*
1639 * Nommu uses slab's for process anonymous memory allocations, and thus
1640 * requires __GFP_COMP to properly refcount higher order allocations
Luke Yangd6fef9d2006-04-10 22:52:56 -07001641 */
Christoph Hellwige1b6aa62006-06-23 02:03:17 -07001642 flags |= __GFP_COMP;
Luke Yangd6fef9d2006-04-10 22:52:56 -07001643#endif
Christoph Lameter765c4502006-09-27 01:50:08 -07001644
Christoph Lameter3c517a62006-12-06 20:33:29 -08001645 flags |= cachep->gfpflags;
Mel Gormane12ba742007-10-16 01:25:52 -07001646 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1647 flags |= __GFP_RECLAIMABLE;
Christoph Hellwige1b6aa62006-06-23 02:03:17 -07001648
1649 page = alloc_pages_node(nodeid, flags, cachep->gfporder);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 if (!page)
1651 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652
Christoph Hellwige1b6aa62006-06-23 02:03:17 -07001653 nr_pages = (1 << cachep->gfporder);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
Christoph Lameter972d1a72006-09-25 23:31:51 -07001655 add_zone_page_state(page_zone(page),
1656 NR_SLAB_RECLAIMABLE, nr_pages);
1657 else
1658 add_zone_page_state(page_zone(page),
1659 NR_SLAB_UNRECLAIMABLE, nr_pages);
Christoph Hellwige1b6aa62006-06-23 02:03:17 -07001660 for (i = 0; i < nr_pages; i++)
1661 __SetPageSlab(page + i);
1662 return page_address(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663}
1664
1665/*
1666 * Interface to system's page release.
1667 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08001668static void kmem_freepages(struct kmem_cache *cachep, void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001670 unsigned long i = (1 << cachep->gfporder);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 struct page *page = virt_to_page(addr);
1672 const unsigned long nr_freed = i;
1673
Christoph Lameter972d1a72006-09-25 23:31:51 -07001674 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1675 sub_zone_page_state(page_zone(page),
1676 NR_SLAB_RECLAIMABLE, nr_freed);
1677 else
1678 sub_zone_page_state(page_zone(page),
1679 NR_SLAB_UNRECLAIMABLE, nr_freed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 while (i--) {
Nick Pigginf205b2f2006-03-22 00:08:02 -08001681 BUG_ON(!PageSlab(page));
1682 __ClearPageSlab(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 page++;
1684 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 if (current->reclaim_state)
1686 current->reclaim_state->reclaimed_slab += nr_freed;
1687 free_pages((unsigned long)addr, cachep->gfporder);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688}
1689
1690static void kmem_rcu_free(struct rcu_head *head)
1691{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001692 struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
Pekka Enberg343e0d72006-02-01 03:05:50 -08001693 struct kmem_cache *cachep = slab_rcu->cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694
1695 kmem_freepages(cachep, slab_rcu->addr);
1696 if (OFF_SLAB(cachep))
1697 kmem_cache_free(cachep->slabp_cache, slab_rcu);
1698}
1699
1700#if DEBUG
1701
1702#ifdef CONFIG_DEBUG_PAGEALLOC
Pekka Enberg343e0d72006-02-01 03:05:50 -08001703static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001704 unsigned long caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705{
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001706 int size = obj_size(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001708 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001710 if (size < 5 * sizeof(unsigned long))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 return;
1712
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001713 *addr++ = 0x12345678;
1714 *addr++ = caller;
1715 *addr++ = smp_processor_id();
1716 size -= 3 * sizeof(unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 {
1718 unsigned long *sptr = &caller;
1719 unsigned long svalue;
1720
1721 while (!kstack_end(sptr)) {
1722 svalue = *sptr++;
1723 if (kernel_text_address(svalue)) {
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001724 *addr++ = svalue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 size -= sizeof(unsigned long);
1726 if (size <= sizeof(unsigned long))
1727 break;
1728 }
1729 }
1730
1731 }
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001732 *addr++ = 0x87654321;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733}
1734#endif
1735
Pekka Enberg343e0d72006-02-01 03:05:50 -08001736static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737{
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001738 int size = obj_size(cachep);
1739 addr = &((char *)addr)[obj_offset(cachep)];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740
1741 memset(addr, val, size);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001742 *(unsigned char *)(addr + size - 1) = POISON_END;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743}
1744
1745static void dump_line(char *data, int offset, int limit)
1746{
1747 int i;
Dave Jonesaa83aa42006-09-29 01:59:51 -07001748 unsigned char error = 0;
1749 int bad_count = 0;
1750
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 printk(KERN_ERR "%03x:", offset);
Dave Jonesaa83aa42006-09-29 01:59:51 -07001752 for (i = 0; i < limit; i++) {
1753 if (data[offset + i] != POISON_FREE) {
1754 error = data[offset + i];
1755 bad_count++;
1756 }
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001757 printk(" %02x", (unsigned char)data[offset + i]);
Dave Jonesaa83aa42006-09-29 01:59:51 -07001758 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 printk("\n");
Dave Jonesaa83aa42006-09-29 01:59:51 -07001760
1761 if (bad_count == 1) {
1762 error ^= POISON_FREE;
1763 if (!(error & (error - 1))) {
1764 printk(KERN_ERR "Single bit error detected. Probably "
1765 "bad RAM.\n");
1766#ifdef CONFIG_X86
1767 printk(KERN_ERR "Run memtest86+ or a similar memory "
1768 "test tool.\n");
1769#else
1770 printk(KERN_ERR "Run a memory test tool.\n");
1771#endif
1772 }
1773 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774}
1775#endif
1776
1777#if DEBUG
1778
Pekka Enberg343e0d72006-02-01 03:05:50 -08001779static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780{
1781 int i, size;
1782 char *realobj;
1783
1784 if (cachep->flags & SLAB_RED_ZONE) {
David Woodhouseb46b8f12007-05-08 00:22:59 -07001785 printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
Andrew Mortona737b3e2006-03-22 00:08:11 -08001786 *dbg_redzone1(cachep, objp),
1787 *dbg_redzone2(cachep, objp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788 }
1789
1790 if (cachep->flags & SLAB_STORE_USER) {
1791 printk(KERN_ERR "Last user: [<%p>]",
Andrew Mortona737b3e2006-03-22 00:08:11 -08001792 *dbg_userword(cachep, objp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 print_symbol("(%s)",
Andrew Mortona737b3e2006-03-22 00:08:11 -08001794 (unsigned long)*dbg_userword(cachep, objp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 printk("\n");
1796 }
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001797 realobj = (char *)objp + obj_offset(cachep);
1798 size = obj_size(cachep);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001799 for (i = 0; i < size && lines; i += 16, lines--) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 int limit;
1801 limit = 16;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001802 if (i + limit > size)
1803 limit = size - i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804 dump_line(realobj, i, limit);
1805 }
1806}
1807
Pekka Enberg343e0d72006-02-01 03:05:50 -08001808static void check_poison_obj(struct kmem_cache *cachep, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809{
1810 char *realobj;
1811 int size, i;
1812 int lines = 0;
1813
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001814 realobj = (char *)objp + obj_offset(cachep);
1815 size = obj_size(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001817 for (i = 0; i < size; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 char exp = POISON_FREE;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001819 if (i == size - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 exp = POISON_END;
1821 if (realobj[i] != exp) {
1822 int limit;
1823 /* Mismatch ! */
1824 /* Print header */
1825 if (lines == 0) {
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001826 printk(KERN_ERR
David Howellse94a40c2007-04-02 23:46:28 +01001827 "Slab corruption: %s start=%p, len=%d\n",
1828 cachep->name, realobj, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829 print_objinfo(cachep, objp, 0);
1830 }
1831 /* Hexdump the affected line */
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001832 i = (i / 16) * 16;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833 limit = 16;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001834 if (i + limit > size)
1835 limit = size - i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 dump_line(realobj, i, limit);
1837 i += 16;
1838 lines++;
1839 /* Limit to 5 lines */
1840 if (lines > 5)
1841 break;
1842 }
1843 }
1844 if (lines != 0) {
1845 /* Print some data about the neighboring objects, if they
1846 * exist:
1847 */
Pekka Enberg6ed5eb22006-02-01 03:05:49 -08001848 struct slab *slabp = virt_to_slab(objp);
Pekka Enberg8fea4e92006-03-22 00:08:10 -08001849 unsigned int objnr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850
Pekka Enberg8fea4e92006-03-22 00:08:10 -08001851 objnr = obj_to_index(cachep, slabp, objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 if (objnr) {
Pekka Enberg8fea4e92006-03-22 00:08:10 -08001853 objp = index_to_obj(cachep, slabp, objnr - 1);
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001854 realobj = (char *)objp + obj_offset(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855 printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001856 realobj, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857 print_objinfo(cachep, objp, 2);
1858 }
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001859 if (objnr + 1 < cachep->num) {
Pekka Enberg8fea4e92006-03-22 00:08:10 -08001860 objp = index_to_obj(cachep, slabp, objnr + 1);
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001861 realobj = (char *)objp + obj_offset(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862 printk(KERN_ERR "Next obj: start=%p, len=%d\n",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001863 realobj, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864 print_objinfo(cachep, objp, 2);
1865 }
1866 }
1867}
1868#endif
1869
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870#if DEBUG
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001871/**
Randy Dunlap911851e2006-03-22 00:08:14 -08001872 * slab_destroy_objs - destroy a slab and its objects
1873 * @cachep: cache pointer being destroyed
1874 * @slabp: slab pointer being destroyed
1875 *
1876 * Call the registered destructor for each object in a slab that is being
1877 * destroyed.
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001878 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08001879static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001880{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 int i;
1882 for (i = 0; i < cachep->num; i++) {
Pekka Enberg8fea4e92006-03-22 00:08:10 -08001883 void *objp = index_to_obj(cachep, slabp, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884
1885 if (cachep->flags & SLAB_POISON) {
1886#ifdef CONFIG_DEBUG_PAGEALLOC
Andrew Mortona737b3e2006-03-22 00:08:11 -08001887 if (cachep->buffer_size % PAGE_SIZE == 0 &&
1888 OFF_SLAB(cachep))
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001889 kernel_map_pages(virt_to_page(objp),
Andrew Mortona737b3e2006-03-22 00:08:11 -08001890 cachep->buffer_size / PAGE_SIZE, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 else
1892 check_poison_obj(cachep, objp);
1893#else
1894 check_poison_obj(cachep, objp);
1895#endif
1896 }
1897 if (cachep->flags & SLAB_RED_ZONE) {
1898 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1899 slab_error(cachep, "start of a freed object "
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001900 "was overwritten");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1902 slab_error(cachep, "end of a freed object "
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001903 "was overwritten");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 }
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001906}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907#else
Pekka Enberg343e0d72006-02-01 03:05:50 -08001908static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001909{
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001910}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911#endif
1912
Randy Dunlap911851e2006-03-22 00:08:14 -08001913/**
1914 * slab_destroy - destroy and release all objects in a slab
1915 * @cachep: cache pointer being destroyed
1916 * @slabp: slab pointer being destroyed
1917 *
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001918 * Destroy all the objs in a slab, and release the mem back to the system.
Andrew Mortona737b3e2006-03-22 00:08:11 -08001919 * Before calling the slab must have been unlinked from the cache. The
1920 * cache-lock is not held/needed.
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001921 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08001922static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001923{
1924 void *addr = slabp->s_mem - slabp->colouroff;
1925
1926 slab_destroy_objs(cachep, slabp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
1928 struct slab_rcu *slab_rcu;
1929
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001930 slab_rcu = (struct slab_rcu *)slabp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931 slab_rcu->cachep = cachep;
1932 slab_rcu->addr = addr;
1933 call_rcu(&slab_rcu->head, kmem_rcu_free);
1934 } else {
1935 kmem_freepages(cachep, addr);
Ingo Molnar873623d2006-07-13 14:44:38 +02001936 if (OFF_SLAB(cachep))
1937 kmem_cache_free(cachep->slabp_cache, slabp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 }
1939}
1940
Andrew Mortona737b3e2006-03-22 00:08:11 -08001941/*
1942 * For setting up all the kmem_list3s for cache whose buffer_size is same as
1943 * size of kmem_list3.
1944 */
Andrew Mortona3a02be2007-05-06 14:49:31 -07001945static void __init set_up_list3s(struct kmem_cache *cachep, int index)
Christoph Lametere498be72005-09-09 13:03:32 -07001946{
1947 int node;
1948
Christoph Lameter04231b32007-10-16 01:25:32 -07001949 for_each_node_state(node, N_NORMAL_MEMORY) {
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001950 cachep->nodelists[node] = &initkmem_list3[index + node];
Christoph Lametere498be72005-09-09 13:03:32 -07001951 cachep->nodelists[node]->next_reap = jiffies +
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001952 REAPTIMEOUT_LIST3 +
1953 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
Christoph Lametere498be72005-09-09 13:03:32 -07001954 }
1955}
1956
Christoph Lameter117f6eb2006-09-25 23:31:37 -07001957static void __kmem_cache_destroy(struct kmem_cache *cachep)
1958{
1959 int i;
1960 struct kmem_list3 *l3;
1961
1962 for_each_online_cpu(i)
1963 kfree(cachep->array[i]);
1964
1965 /* NUMA: free the list3 structures */
1966 for_each_online_node(i) {
1967 l3 = cachep->nodelists[i];
1968 if (l3) {
1969 kfree(l3->shared);
1970 free_alien_cache(l3->alien);
1971 kfree(l3);
1972 }
1973 }
1974 kmem_cache_free(&cache_cache, cachep);
1975}
1976
1977
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978/**
Randy.Dunlapa70773d2006-02-01 03:05:52 -08001979 * calculate_slab_order - calculate size (page order) of slabs
1980 * @cachep: pointer to the cache that is being created
1981 * @size: size of objects to be created in this cache.
1982 * @align: required alignment for the objects.
1983 * @flags: slab allocation flags
1984 *
1985 * Also calculates the number of objects per slab.
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001986 *
1987 * This could be made much more intelligent. For now, try to avoid using
1988 * high order pages for slabs. When the gfp() functions are more friendly
1989 * towards high-order requests, this should be changed.
1990 */
Andrew Mortona737b3e2006-03-22 00:08:11 -08001991static size_t calculate_slab_order(struct kmem_cache *cachep,
Randy Dunlapee13d782006-02-01 03:05:53 -08001992 size_t size, size_t align, unsigned long flags)
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001993{
Ingo Molnarb1ab41c2006-06-02 15:44:58 +02001994 unsigned long offslab_limit;
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001995 size_t left_over = 0;
Linus Torvalds9888e6f2006-03-06 17:44:43 -08001996 int gfporder;
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001997
Christoph Lameter0aa817f2007-05-16 22:11:01 -07001998 for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001999 unsigned int num;
2000 size_t remainder;
2001
Linus Torvalds9888e6f2006-03-06 17:44:43 -08002002 cache_estimate(gfporder, size, align, flags, &remainder, &num);
Pekka Enberg4d268eb2006-01-08 01:00:36 -08002003 if (!num)
2004 continue;
Linus Torvalds9888e6f2006-03-06 17:44:43 -08002005
Ingo Molnarb1ab41c2006-06-02 15:44:58 +02002006 if (flags & CFLGS_OFF_SLAB) {
2007 /*
2008 * Max number of objs-per-slab for caches which
2009 * use off-slab slabs. Needed to avoid a possible
2010 * looping condition in cache_grow().
2011 */
2012 offslab_limit = size - sizeof(struct slab);
2013 offslab_limit /= sizeof(kmem_bufctl_t);
2014
2015 if (num > offslab_limit)
2016 break;
2017 }
Pekka Enberg4d268eb2006-01-08 01:00:36 -08002018
Linus Torvalds9888e6f2006-03-06 17:44:43 -08002019 /* Found something acceptable - save it away */
Pekka Enberg4d268eb2006-01-08 01:00:36 -08002020 cachep->num = num;
Linus Torvalds9888e6f2006-03-06 17:44:43 -08002021 cachep->gfporder = gfporder;
Pekka Enberg4d268eb2006-01-08 01:00:36 -08002022 left_over = remainder;
2023
2024 /*
Linus Torvaldsf78bb8a2006-03-08 10:33:05 -08002025 * A VFS-reclaimable slab tends to have most allocations
2026 * as GFP_NOFS and we really don't want to have to be allocating
2027 * higher-order pages when we are unable to shrink dcache.
2028 */
2029 if (flags & SLAB_RECLAIM_ACCOUNT)
2030 break;
2031
2032 /*
Pekka Enberg4d268eb2006-01-08 01:00:36 -08002033 * Large number of objects is good, but very large slabs are
2034 * currently bad for the gfp()s.
2035 */
Linus Torvalds9888e6f2006-03-06 17:44:43 -08002036 if (gfporder >= slab_break_gfp_order)
Pekka Enberg4d268eb2006-01-08 01:00:36 -08002037 break;
2038
Linus Torvalds9888e6f2006-03-06 17:44:43 -08002039 /*
2040 * Acceptable internal fragmentation?
2041 */
Andrew Mortona737b3e2006-03-22 00:08:11 -08002042 if (left_over * 8 <= (PAGE_SIZE << gfporder))
Pekka Enberg4d268eb2006-01-08 01:00:36 -08002043 break;
2044 }
2045 return left_over;
2046}
2047
Sam Ravnborg38bdc322007-05-17 23:48:19 +02002048static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
Pekka Enbergf30cf7d2006-03-22 00:08:11 -08002049{
Christoph Lameter2ed3a4e2006-09-25 23:31:38 -07002050 if (g_cpucache_up == FULL)
2051 return enable_cpucache(cachep);
2052
Pekka Enbergf30cf7d2006-03-22 00:08:11 -08002053 if (g_cpucache_up == NONE) {
2054 /*
2055 * Note: the first kmem_cache_create must create the cache
2056 * that's used by kmalloc(24), otherwise the creation of
2057 * further caches will BUG().
2058 */
2059 cachep->array[smp_processor_id()] = &initarray_generic.cache;
2060
2061 /*
2062 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is
2063 * the first cache, then we need to set up all its list3s,
2064 * otherwise the creation of further caches will BUG().
2065 */
2066 set_up_list3s(cachep, SIZE_AC);
2067 if (INDEX_AC == INDEX_L3)
2068 g_cpucache_up = PARTIAL_L3;
2069 else
2070 g_cpucache_up = PARTIAL_AC;
2071 } else {
2072 cachep->array[smp_processor_id()] =
2073 kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
2074
2075 if (g_cpucache_up == PARTIAL_AC) {
2076 set_up_list3s(cachep, SIZE_L3);
2077 g_cpucache_up = PARTIAL_L3;
2078 } else {
2079 int node;
Christoph Lameter04231b32007-10-16 01:25:32 -07002080 for_each_node_state(node, N_NORMAL_MEMORY) {
Pekka Enbergf30cf7d2006-03-22 00:08:11 -08002081 cachep->nodelists[node] =
2082 kmalloc_node(sizeof(struct kmem_list3),
2083 GFP_KERNEL, node);
2084 BUG_ON(!cachep->nodelists[node]);
2085 kmem_list3_init(cachep->nodelists[node]);
2086 }
2087 }
2088 }
2089 cachep->nodelists[numa_node_id()]->next_reap =
2090 jiffies + REAPTIMEOUT_LIST3 +
2091 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
2092
2093 cpu_cache_get(cachep)->avail = 0;
2094 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
2095 cpu_cache_get(cachep)->batchcount = 1;
2096 cpu_cache_get(cachep)->touched = 0;
2097 cachep->batchcount = 1;
2098 cachep->limit = BOOT_CPUCACHE_ENTRIES;
Christoph Lameter2ed3a4e2006-09-25 23:31:38 -07002099 return 0;
Pekka Enbergf30cf7d2006-03-22 00:08:11 -08002100}
2101
Pekka Enberg4d268eb2006-01-08 01:00:36 -08002102/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 * kmem_cache_create - Create a cache.
2104 * @name: A string which is used in /proc/slabinfo to identify this cache.
2105 * @size: The size of objects to be created in this cache.
2106 * @align: The required alignment for the objects.
2107 * @flags: SLAB flags
2108 * @ctor: A constructor for the objects.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109 *
2110 * Returns a ptr to the cache on success, NULL on failure.
2111 * Cannot be called within a int, but can be interrupted.
Paul Mundt20c2df82007-07-20 10:11:58 +09002112 * The @ctor is run when new pages are allocated by the cache.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113 *
2114 * @name must be valid until the cache is destroyed. This implies that
Andrew Mortona737b3e2006-03-22 00:08:11 -08002115 * the module calling this has to destroy the cache before getting unloaded.
2116 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117 * The flags are
2118 *
2119 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
2120 * to catch references to uninitialised memory.
2121 *
2122 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
2123 * for buffer overruns.
2124 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
2126 * cacheline. This can be beneficial if you're counting cycles as closely
2127 * as davem.
2128 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08002129struct kmem_cache *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130kmem_cache_create (const char *name, size_t size, size_t align,
Andrew Mortona737b3e2006-03-22 00:08:11 -08002131 unsigned long flags,
Christoph Lameter4ba9b9d2007-10-16 23:25:51 -07002132 void (*ctor)(struct kmem_cache *, void *))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133{
2134 size_t left_over, slab_size, ralign;
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07002135 struct kmem_cache *cachep = NULL, *pc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136
2137 /*
2138 * Sanity checks... these are all serious usage bugs.
2139 */
Andrew Mortona737b3e2006-03-22 00:08:11 -08002140 if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
Paul Mundt20c2df82007-07-20 10:11:58 +09002141 size > KMALLOC_MAX_SIZE) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08002142 printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__,
2143 name);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002144 BUG();
2145 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146
Ravikiran G Thirumalaif0188f42006-02-10 01:51:13 -08002147 /*
Ravikiran G Thirumalai8f5be202006-12-06 20:32:14 -08002148 * We use cache_chain_mutex to ensure a consistent view of
2149 * cpu_online_map as well. Please see cpuup_callback
Ravikiran G Thirumalaif0188f42006-02-10 01:51:13 -08002150 */
Ingo Molnarfc0abb12006-01-18 17:42:33 -08002151 mutex_lock(&cache_chain_mutex);
Andrew Morton4f12bb42005-11-07 00:58:00 -08002152
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07002153 list_for_each_entry(pc, &cache_chain, next) {
Andrew Morton4f12bb42005-11-07 00:58:00 -08002154 char tmp;
2155 int res;
2156
2157 /*
2158 * This happens when the module gets unloaded and doesn't
2159 * destroy its slab cache and no-one else reuses the vmalloc
2160 * area of the module. Print a warning.
2161 */
Andrew Morton138ae662006-12-06 20:36:41 -08002162 res = probe_kernel_address(pc->name, tmp);
Andrew Morton4f12bb42005-11-07 00:58:00 -08002163 if (res) {
matzeb4169522007-05-06 14:49:52 -07002164 printk(KERN_ERR
2165 "SLAB: cache with size %d has lost its name\n",
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002166 pc->buffer_size);
Andrew Morton4f12bb42005-11-07 00:58:00 -08002167 continue;
2168 }
2169
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002170 if (!strcmp(pc->name, name)) {
matzeb4169522007-05-06 14:49:52 -07002171 printk(KERN_ERR
2172 "kmem_cache_create: duplicate cache %s\n", name);
Andrew Morton4f12bb42005-11-07 00:58:00 -08002173 dump_stack();
2174 goto oops;
2175 }
2176 }
2177
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178#if DEBUG
2179 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180#if FORCED_DEBUG
2181 /*
2182 * Enable redzoning and last user accounting, except for caches with
2183 * large objects, if the increased size would increase the object size
2184 * above the next power of two: caches with object sizes just above a
2185 * power of two have a significant amount of internal fragmentation.
2186 */
David Woodhouse87a927c2007-07-04 21:26:44 -04002187 if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2188 2 * sizeof(unsigned long long)))
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002189 flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 if (!(flags & SLAB_DESTROY_BY_RCU))
2191 flags |= SLAB_POISON;
2192#endif
2193 if (flags & SLAB_DESTROY_BY_RCU)
2194 BUG_ON(flags & SLAB_POISON);
2195#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196 /*
Andrew Mortona737b3e2006-03-22 00:08:11 -08002197 * Always checks flags, a caller might be expecting debug support which
2198 * isn't available.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 */
Eric Sesterhenn40094fa2006-04-02 13:49:25 +02002200 BUG_ON(flags & ~CREATE_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201
Andrew Mortona737b3e2006-03-22 00:08:11 -08002202 /*
2203 * Check that size is in terms of words. This is needed to avoid
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204 * unaligned accesses for some archs when redzoning is used, and makes
2205 * sure any on-slab bufctl's are also correctly aligned.
2206 */
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002207 if (size & (BYTES_PER_WORD - 1)) {
2208 size += (BYTES_PER_WORD - 1);
2209 size &= ~(BYTES_PER_WORD - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 }
2211
Andrew Mortona737b3e2006-03-22 00:08:11 -08002212 /* calculate the final buffer alignment: */
2213
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214 /* 1) arch recommendation: can be overridden for debug */
2215 if (flags & SLAB_HWCACHE_ALIGN) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08002216 /*
2217 * Default alignment: as specified by the arch code. Except if
2218 * an object is really small, then squeeze multiple objects into
2219 * one cacheline.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220 */
2221 ralign = cache_line_size();
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002222 while (size <= ralign / 2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223 ralign /= 2;
2224 } else {
2225 ralign = BYTES_PER_WORD;
2226 }
Pekka Enbergca5f9702006-09-25 23:31:25 -07002227
2228 /*
David Woodhouse87a927c2007-07-04 21:26:44 -04002229 * Redzoning and user store require word alignment or possibly larger.
2230 * Note this will be overridden by architecture or caller mandated
2231 * alignment if either is greater than BYTES_PER_WORD.
Pekka Enbergca5f9702006-09-25 23:31:25 -07002232 */
David Woodhouse87a927c2007-07-04 21:26:44 -04002233 if (flags & SLAB_STORE_USER)
2234 ralign = BYTES_PER_WORD;
2235
2236 if (flags & SLAB_RED_ZONE) {
2237 ralign = REDZONE_ALIGN;
2238 /* If redzoning, ensure that the second redzone is suitably
2239 * aligned, by adjusting the object size accordingly. */
2240 size += REDZONE_ALIGN - 1;
2241 size &= ~(REDZONE_ALIGN - 1);
2242 }
Pekka Enbergca5f9702006-09-25 23:31:25 -07002243
Kevin Hilmana44b56d2006-12-06 20:32:11 -08002244 /* 2) arch mandated alignment */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245 if (ralign < ARCH_SLAB_MINALIGN) {
2246 ralign = ARCH_SLAB_MINALIGN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 }
Kevin Hilmana44b56d2006-12-06 20:32:11 -08002248 /* 3) caller mandated alignment */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249 if (ralign < align) {
2250 ralign = align;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 }
Kevin Hilmana44b56d2006-12-06 20:32:11 -08002252 /* disable debug if necessary */
David Woodhouseb46b8f12007-05-08 00:22:59 -07002253 if (ralign > __alignof__(unsigned long long))
Kevin Hilmana44b56d2006-12-06 20:32:11 -08002254 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
Andrew Mortona737b3e2006-03-22 00:08:11 -08002255 /*
Pekka Enbergca5f9702006-09-25 23:31:25 -07002256 * 4) Store it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 */
2258 align = ralign;
2259
2260 /* Get cache's description obj. */
Christoph Lametere94b1762006-12-06 20:33:17 -08002261 cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 if (!cachep)
Andrew Morton4f12bb42005-11-07 00:58:00 -08002263 goto oops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264
2265#if DEBUG
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002266 cachep->obj_size = size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267
Pekka Enbergca5f9702006-09-25 23:31:25 -07002268 /*
2269 * Both debugging options require word-alignment which is calculated
2270 * into align above.
2271 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 if (flags & SLAB_RED_ZONE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 /* add space for red zone words */
David Woodhouseb46b8f12007-05-08 00:22:59 -07002274 cachep->obj_offset += sizeof(unsigned long long);
2275 size += 2 * sizeof(unsigned long long);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276 }
2277 if (flags & SLAB_STORE_USER) {
Pekka Enbergca5f9702006-09-25 23:31:25 -07002278 /* user store requires one word storage behind the end of
David Woodhouse87a927c2007-07-04 21:26:44 -04002279 * the real object. But if the second red zone needs to be
2280 * aligned to 64 bits, we must allow that much space.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281 */
David Woodhouse87a927c2007-07-04 21:26:44 -04002282 if (flags & SLAB_RED_ZONE)
2283 size += REDZONE_ALIGN;
2284 else
2285 size += BYTES_PER_WORD;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286 }
2287#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002288 if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002289 && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
2290 cachep->obj_offset += PAGE_SIZE - size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 size = PAGE_SIZE;
2292 }
2293#endif
2294#endif
2295
Ingo Molnare0a42722006-06-23 02:03:46 -07002296 /*
2297 * Determine if the slab management is 'on' or 'off' slab.
2298 * (bootstrapping cannot cope with offslab caches so don't do
2299 * it too early on.)
2300 */
2301 if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 /*
2303 * Size is large, assume best to place the slab management obj
2304 * off-slab (should allow better packing of objs).
2305 */
2306 flags |= CFLGS_OFF_SLAB;
2307
2308 size = ALIGN(size, align);
2309
Linus Torvaldsf78bb8a2006-03-08 10:33:05 -08002310 left_over = calculate_slab_order(cachep, size, align, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311
2312 if (!cachep->num) {
matzeb4169522007-05-06 14:49:52 -07002313 printk(KERN_ERR
2314 "kmem_cache_create: couldn't create cache %s.\n", name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 kmem_cache_free(&cache_cache, cachep);
2316 cachep = NULL;
Andrew Morton4f12bb42005-11-07 00:58:00 -08002317 goto oops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318 }
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002319 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
2320 + sizeof(struct slab), align);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321
2322 /*
2323 * If the slab has been placed off-slab, and we have enough space then
2324 * move it on-slab. This is at the expense of any extra colouring.
2325 */
2326 if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
2327 flags &= ~CFLGS_OFF_SLAB;
2328 left_over -= slab_size;
2329 }
2330
2331 if (flags & CFLGS_OFF_SLAB) {
2332 /* really off slab. No need for manual alignment */
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002333 slab_size =
2334 cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 }
2336
2337 cachep->colour_off = cache_line_size();
2338 /* Offset must be a multiple of the alignment. */
2339 if (cachep->colour_off < align)
2340 cachep->colour_off = align;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002341 cachep->colour = left_over / cachep->colour_off;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 cachep->slab_size = slab_size;
2343 cachep->flags = flags;
2344 cachep->gfpflags = 0;
Christoph Lameter4b51d662007-02-10 01:43:10 -08002345 if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346 cachep->gfpflags |= GFP_DMA;
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002347 cachep->buffer_size = size;
Eric Dumazet6a2d7a92006-12-13 00:34:27 -08002348 cachep->reciprocal_buffer_size = reciprocal_value(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349
Ravikiran G Thirumalaie5ac9c52006-09-25 23:31:34 -07002350 if (flags & CFLGS_OFF_SLAB) {
Victor Fuscob2d55072005-09-10 00:26:36 -07002351 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
Ravikiran G Thirumalaie5ac9c52006-09-25 23:31:34 -07002352 /*
2353 * This is a possibility for one of the malloc_sizes caches.
2354 * But since we go off slab only for object size greater than
2355 * PAGE_SIZE/8, and malloc_sizes gets created in ascending order,
2356 * this should not happen at all.
2357 * But leave a BUG_ON for some lucky dude.
2358 */
Christoph Lameter6cb8f912007-07-17 04:03:22 -07002359 BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
Ravikiran G Thirumalaie5ac9c52006-09-25 23:31:34 -07002360 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361 cachep->ctor = ctor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 cachep->name = name;
2363
Christoph Lameter2ed3a4e2006-09-25 23:31:38 -07002364 if (setup_cpu_cache(cachep)) {
2365 __kmem_cache_destroy(cachep);
2366 cachep = NULL;
2367 goto oops;
2368 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370 /* cache setup completed, link it into the list */
2371 list_add(&cachep->next, &cache_chain);
Andrew Mortona737b3e2006-03-22 00:08:11 -08002372oops:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373 if (!cachep && (flags & SLAB_PANIC))
2374 panic("kmem_cache_create(): failed to create slab `%s'\n",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002375 name);
Ingo Molnarfc0abb12006-01-18 17:42:33 -08002376 mutex_unlock(&cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377 return cachep;
2378}
2379EXPORT_SYMBOL(kmem_cache_create);
2380
2381#if DEBUG
2382static void check_irq_off(void)
2383{
2384 BUG_ON(!irqs_disabled());
2385}
2386
2387static void check_irq_on(void)
2388{
2389 BUG_ON(irqs_disabled());
2390}
2391
Pekka Enberg343e0d72006-02-01 03:05:50 -08002392static void check_spinlock_acquired(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393{
2394#ifdef CONFIG_SMP
2395 check_irq_off();
Christoph Lametere498be72005-09-09 13:03:32 -07002396 assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397#endif
2398}
Christoph Lametere498be72005-09-09 13:03:32 -07002399
Pekka Enberg343e0d72006-02-01 03:05:50 -08002400static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
Christoph Lametere498be72005-09-09 13:03:32 -07002401{
2402#ifdef CONFIG_SMP
2403 check_irq_off();
2404 assert_spin_locked(&cachep->nodelists[node]->list_lock);
2405#endif
2406}
2407
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408#else
2409#define check_irq_off() do { } while(0)
2410#define check_irq_on() do { } while(0)
2411#define check_spinlock_acquired(x) do { } while(0)
Christoph Lametere498be72005-09-09 13:03:32 -07002412#define check_spinlock_acquired_node(x, y) do { } while(0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413#endif
2414
Christoph Lameteraab22072006-03-22 00:09:06 -08002415static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
2416 struct array_cache *ac,
2417 int force, int node);
2418
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419static void do_drain(void *arg)
2420{
Andrew Mortona737b3e2006-03-22 00:08:11 -08002421 struct kmem_cache *cachep = arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422 struct array_cache *ac;
Christoph Lameterff694162005-09-22 21:44:02 -07002423 int node = numa_node_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424
2425 check_irq_off();
Pekka Enberg9a2dba42006-02-01 03:05:49 -08002426 ac = cpu_cache_get(cachep);
Christoph Lameterff694162005-09-22 21:44:02 -07002427 spin_lock(&cachep->nodelists[node]->list_lock);
2428 free_block(cachep, ac->entry, ac->avail, node);
2429 spin_unlock(&cachep->nodelists[node]->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430 ac->avail = 0;
2431}
2432
Pekka Enberg343e0d72006-02-01 03:05:50 -08002433static void drain_cpu_caches(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434{
Christoph Lametere498be72005-09-09 13:03:32 -07002435 struct kmem_list3 *l3;
2436 int node;
2437
Andrew Mortona07fa392006-03-22 00:08:17 -08002438 on_each_cpu(do_drain, cachep, 1, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439 check_irq_on();
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002440 for_each_online_node(node) {
Christoph Lametere498be72005-09-09 13:03:32 -07002441 l3 = cachep->nodelists[node];
Roland Dreiera4523a82006-05-15 11:41:00 -07002442 if (l3 && l3->alien)
2443 drain_alien_cache(cachep, l3->alien);
2444 }
2445
2446 for_each_online_node(node) {
2447 l3 = cachep->nodelists[node];
2448 if (l3)
Christoph Lameteraab22072006-03-22 00:09:06 -08002449 drain_array(cachep, l3, l3->shared, 1, node);
Christoph Lametere498be72005-09-09 13:03:32 -07002450 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451}
2452
Christoph Lametered11d9e2006-06-30 01:55:45 -07002453/*
2454 * Remove slabs from the list of free slabs.
2455 * Specify the number of slabs to drain in tofree.
2456 *
2457 * Returns the actual number of slabs released.
2458 */
2459static int drain_freelist(struct kmem_cache *cache,
2460 struct kmem_list3 *l3, int tofree)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461{
Christoph Lametered11d9e2006-06-30 01:55:45 -07002462 struct list_head *p;
2463 int nr_freed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464 struct slab *slabp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465
Christoph Lametered11d9e2006-06-30 01:55:45 -07002466 nr_freed = 0;
2467 while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468
Christoph Lametered11d9e2006-06-30 01:55:45 -07002469 spin_lock_irq(&l3->list_lock);
Christoph Lametere498be72005-09-09 13:03:32 -07002470 p = l3->slabs_free.prev;
Christoph Lametered11d9e2006-06-30 01:55:45 -07002471 if (p == &l3->slabs_free) {
2472 spin_unlock_irq(&l3->list_lock);
2473 goto out;
2474 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475
Christoph Lametered11d9e2006-06-30 01:55:45 -07002476 slabp = list_entry(p, struct slab, list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477#if DEBUG
Eric Sesterhenn40094fa2006-04-02 13:49:25 +02002478 BUG_ON(slabp->inuse);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479#endif
2480 list_del(&slabp->list);
Christoph Lametered11d9e2006-06-30 01:55:45 -07002481 /*
2482 * Safe to drop the lock. The slab is no longer linked
2483 * to the cache.
2484 */
2485 l3->free_objects -= cache->num;
Christoph Lametere498be72005-09-09 13:03:32 -07002486 spin_unlock_irq(&l3->list_lock);
Christoph Lametered11d9e2006-06-30 01:55:45 -07002487 slab_destroy(cache, slabp);
2488 nr_freed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489 }
Christoph Lametered11d9e2006-06-30 01:55:45 -07002490out:
2491 return nr_freed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492}
2493
Ravikiran G Thirumalai8f5be202006-12-06 20:32:14 -08002494/* Called with cache_chain_mutex held to protect against cpu hotplug */
Pekka Enberg343e0d72006-02-01 03:05:50 -08002495static int __cache_shrink(struct kmem_cache *cachep)
Christoph Lametere498be72005-09-09 13:03:32 -07002496{
2497 int ret = 0, i = 0;
2498 struct kmem_list3 *l3;
2499
2500 drain_cpu_caches(cachep);
2501
2502 check_irq_on();
2503 for_each_online_node(i) {
2504 l3 = cachep->nodelists[i];
Christoph Lametered11d9e2006-06-30 01:55:45 -07002505 if (!l3)
2506 continue;
2507
2508 drain_freelist(cachep, l3, l3->free_objects);
2509
2510 ret += !list_empty(&l3->slabs_full) ||
2511 !list_empty(&l3->slabs_partial);
Christoph Lametere498be72005-09-09 13:03:32 -07002512 }
2513 return (ret ? 1 : 0);
2514}
2515
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516/**
2517 * kmem_cache_shrink - Shrink a cache.
2518 * @cachep: The cache to shrink.
2519 *
2520 * Releases as many slabs as possible for a cache.
2521 * To help debugging, a zero exit status indicates all slabs were released.
2522 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08002523int kmem_cache_shrink(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524{
Ravikiran G Thirumalai8f5be202006-12-06 20:32:14 -08002525 int ret;
Eric Sesterhenn40094fa2006-04-02 13:49:25 +02002526 BUG_ON(!cachep || in_interrupt());
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527
Ravikiran G Thirumalai8f5be202006-12-06 20:32:14 -08002528 mutex_lock(&cache_chain_mutex);
2529 ret = __cache_shrink(cachep);
2530 mutex_unlock(&cache_chain_mutex);
2531 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532}
2533EXPORT_SYMBOL(kmem_cache_shrink);
2534
2535/**
2536 * kmem_cache_destroy - delete a cache
2537 * @cachep: the cache to destroy
2538 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08002539 * Remove a &struct kmem_cache object from the slab cache.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540 *
2541 * It is expected this function will be called by a module when it is
2542 * unloaded. This will remove the cache completely, and avoid a duplicate
2543 * cache being allocated each time a module is loaded and unloaded, if the
2544 * module doesn't have persistent in-kernel storage across loads and unloads.
2545 *
2546 * The cache must be empty before calling this function.
2547 *
2548 * The caller must guarantee that noone will allocate memory from the cache
2549 * during the kmem_cache_destroy().
2550 */
Alexey Dobriyan133d2052006-09-27 01:49:41 -07002551void kmem_cache_destroy(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552{
Eric Sesterhenn40094fa2006-04-02 13:49:25 +02002553 BUG_ON(!cachep || in_interrupt());
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555 /* Find the cache in the chain of caches. */
Ingo Molnarfc0abb12006-01-18 17:42:33 -08002556 mutex_lock(&cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557 /*
2558 * the chain is never empty, cache_cache is never destroyed
2559 */
2560 list_del(&cachep->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561 if (__cache_shrink(cachep)) {
2562 slab_error(cachep, "Can't free all objects");
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002563 list_add(&cachep->next, &cache_chain);
Ingo Molnarfc0abb12006-01-18 17:42:33 -08002564 mutex_unlock(&cache_chain_mutex);
Alexey Dobriyan133d2052006-09-27 01:49:41 -07002565 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566 }
2567
2568 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07002569 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570
Christoph Lameter117f6eb2006-09-25 23:31:37 -07002571 __kmem_cache_destroy(cachep);
Ravikiran G Thirumalai8f5be202006-12-06 20:32:14 -08002572 mutex_unlock(&cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573}
2574EXPORT_SYMBOL(kmem_cache_destroy);
2575
Ravikiran G Thirumalaie5ac9c52006-09-25 23:31:34 -07002576/*
2577 * Get the memory for a slab management obj.
2578 * For a slab cache when the slab descriptor is off-slab, slab descriptors
2579 * always come from malloc_sizes caches. The slab descriptor cannot
2580 * come from the same cache which is getting created because,
2581 * when we are searching for an appropriate cache for these
2582 * descriptors in kmem_cache_create, we search through the malloc_sizes array.
2583 * If we are creating a malloc_sizes cache here it would not be visible to
2584 * kmem_find_general_cachep till the initialization is complete.
2585 * Hence we cannot have slabp_cache same as the original cache.
2586 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08002587static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
Ravikiran G Thirumalai5b74ada2006-04-10 22:52:53 -07002588 int colour_off, gfp_t local_flags,
2589 int nodeid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590{
2591 struct slab *slabp;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002592
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593 if (OFF_SLAB(cachep)) {
2594 /* Slab management obj is off-slab. */
Ravikiran G Thirumalai5b74ada2006-04-10 22:52:53 -07002595 slabp = kmem_cache_alloc_node(cachep->slabp_cache,
Christoph Lameter3c517a62006-12-06 20:33:29 -08002596 local_flags & ~GFP_THISNODE, nodeid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 if (!slabp)
2598 return NULL;
2599 } else {
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002600 slabp = objp + colour_off;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601 colour_off += cachep->slab_size;
2602 }
2603 slabp->inuse = 0;
2604 slabp->colouroff = colour_off;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002605 slabp->s_mem = objp + colour_off;
Ravikiran G Thirumalai5b74ada2006-04-10 22:52:53 -07002606 slabp->nodeid = nodeid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 return slabp;
2608}
2609
2610static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
2611{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002612 return (kmem_bufctl_t *) (slabp + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613}
2614
Pekka Enberg343e0d72006-02-01 03:05:50 -08002615static void cache_init_objs(struct kmem_cache *cachep,
Christoph Lametera35afb82007-05-16 22:10:57 -07002616 struct slab *slabp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617{
2618 int i;
2619
2620 for (i = 0; i < cachep->num; i++) {
Pekka Enberg8fea4e92006-03-22 00:08:10 -08002621 void *objp = index_to_obj(cachep, slabp, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622#if DEBUG
2623 /* need to poison the objs? */
2624 if (cachep->flags & SLAB_POISON)
2625 poison_obj(cachep, objp, POISON_FREE);
2626 if (cachep->flags & SLAB_STORE_USER)
2627 *dbg_userword(cachep, objp) = NULL;
2628
2629 if (cachep->flags & SLAB_RED_ZONE) {
2630 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2631 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2632 }
2633 /*
Andrew Mortona737b3e2006-03-22 00:08:11 -08002634 * Constructors are not allowed to allocate memory from the same
2635 * cache which they are a constructor for. Otherwise, deadlock.
2636 * They must also be threaded.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002637 */
2638 if (cachep->ctor && !(cachep->flags & SLAB_POISON))
Christoph Lameter4ba9b9d2007-10-16 23:25:51 -07002639 cachep->ctor(cachep, objp + obj_offset(cachep));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640
2641 if (cachep->flags & SLAB_RED_ZONE) {
2642 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2643 slab_error(cachep, "constructor overwrote the"
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002644 " end of an object");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002645 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2646 slab_error(cachep, "constructor overwrote the"
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002647 " start of an object");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648 }
Andrew Mortona737b3e2006-03-22 00:08:11 -08002649 if ((cachep->buffer_size % PAGE_SIZE) == 0 &&
2650 OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002651 kernel_map_pages(virt_to_page(objp),
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002652 cachep->buffer_size / PAGE_SIZE, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653#else
2654 if (cachep->ctor)
Christoph Lameter4ba9b9d2007-10-16 23:25:51 -07002655 cachep->ctor(cachep, objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656#endif
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002657 slab_bufctl(slabp)[i] = i + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658 }
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002659 slab_bufctl(slabp)[i - 1] = BUFCTL_END;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660 slabp->free = 0;
2661}
2662
Pekka Enberg343e0d72006-02-01 03:05:50 -08002663static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664{
Christoph Lameter4b51d662007-02-10 01:43:10 -08002665 if (CONFIG_ZONE_DMA_FLAG) {
2666 if (flags & GFP_DMA)
2667 BUG_ON(!(cachep->gfpflags & GFP_DMA));
2668 else
2669 BUG_ON(cachep->gfpflags & GFP_DMA);
2670 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671}
2672
Andrew Mortona737b3e2006-03-22 00:08:11 -08002673static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
2674 int nodeid)
Matthew Dobson78d382d2006-02-01 03:05:47 -08002675{
Pekka Enberg8fea4e92006-03-22 00:08:10 -08002676 void *objp = index_to_obj(cachep, slabp, slabp->free);
Matthew Dobson78d382d2006-02-01 03:05:47 -08002677 kmem_bufctl_t next;
2678
2679 slabp->inuse++;
2680 next = slab_bufctl(slabp)[slabp->free];
2681#if DEBUG
2682 slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
2683 WARN_ON(slabp->nodeid != nodeid);
2684#endif
2685 slabp->free = next;
2686
2687 return objp;
2688}
2689
Andrew Mortona737b3e2006-03-22 00:08:11 -08002690static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
2691 void *objp, int nodeid)
Matthew Dobson78d382d2006-02-01 03:05:47 -08002692{
Pekka Enberg8fea4e92006-03-22 00:08:10 -08002693 unsigned int objnr = obj_to_index(cachep, slabp, objp);
Matthew Dobson78d382d2006-02-01 03:05:47 -08002694
2695#if DEBUG
2696 /* Verify that the slab belongs to the intended node */
2697 WARN_ON(slabp->nodeid != nodeid);
2698
Al Viro871751e2006-03-25 03:06:39 -08002699 if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
Matthew Dobson78d382d2006-02-01 03:05:47 -08002700 printk(KERN_ERR "slab: double free detected in cache "
Andrew Mortona737b3e2006-03-22 00:08:11 -08002701 "'%s', objp %p\n", cachep->name, objp);
Matthew Dobson78d382d2006-02-01 03:05:47 -08002702 BUG();
2703 }
2704#endif
2705 slab_bufctl(slabp)[objnr] = slabp->free;
2706 slabp->free = objnr;
2707 slabp->inuse--;
2708}
2709
Pekka Enberg47768742006-06-23 02:03:07 -07002710/*
2711 * Map pages beginning at addr to the given cache and slab. This is required
2712 * for the slab allocator to be able to lookup the cache and slab of a
2713 * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging.
2714 */
2715static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
2716 void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002717{
Pekka Enberg47768742006-06-23 02:03:07 -07002718 int nr_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719 struct page *page;
2720
Pekka Enberg47768742006-06-23 02:03:07 -07002721 page = virt_to_page(addr);
Nick Piggin84097512006-03-22 00:08:34 -08002722
Pekka Enberg47768742006-06-23 02:03:07 -07002723 nr_pages = 1;
Nick Piggin84097512006-03-22 00:08:34 -08002724 if (likely(!PageCompound(page)))
Pekka Enberg47768742006-06-23 02:03:07 -07002725 nr_pages <<= cache->gfporder;
2726
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727 do {
Pekka Enberg47768742006-06-23 02:03:07 -07002728 page_set_cache(page, cache);
2729 page_set_slab(page, slab);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730 page++;
Pekka Enberg47768742006-06-23 02:03:07 -07002731 } while (--nr_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732}
2733
2734/*
2735 * Grow (by 1) the number of slabs within a cache. This is called by
2736 * kmem_cache_alloc() when there are no active objs left in a cache.
2737 */
Christoph Lameter3c517a62006-12-06 20:33:29 -08002738static int cache_grow(struct kmem_cache *cachep,
2739 gfp_t flags, int nodeid, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002741 struct slab *slabp;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002742 size_t offset;
2743 gfp_t local_flags;
Christoph Lametere498be72005-09-09 13:03:32 -07002744 struct kmem_list3 *l3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745
Andrew Mortona737b3e2006-03-22 00:08:11 -08002746 /*
2747 * Be lazy and only check for valid flags here, keeping it out of the
2748 * critical path in kmem_cache_alloc().
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749 */
Christoph Lameter6cb06222007-10-16 01:25:41 -07002750 BUG_ON(flags & GFP_SLAB_BUG_MASK);
2751 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752
Ravikiran G Thirumalai2e1217c2006-02-04 23:27:56 -08002753 /* Take the l3 list lock to change the colour_next on this node */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754 check_irq_off();
Ravikiran G Thirumalai2e1217c2006-02-04 23:27:56 -08002755 l3 = cachep->nodelists[nodeid];
2756 spin_lock(&l3->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757
2758 /* Get colour for the slab, and cal the next value. */
Ravikiran G Thirumalai2e1217c2006-02-04 23:27:56 -08002759 offset = l3->colour_next;
2760 l3->colour_next++;
2761 if (l3->colour_next >= cachep->colour)
2762 l3->colour_next = 0;
2763 spin_unlock(&l3->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764
Ravikiran G Thirumalai2e1217c2006-02-04 23:27:56 -08002765 offset *= cachep->colour_off;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766
2767 if (local_flags & __GFP_WAIT)
2768 local_irq_enable();
2769
2770 /*
2771 * The test for missing atomic flag is performed here, rather than
2772 * the more obvious place, simply to reduce the critical path length
2773 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
2774 * will eventually be caught here (where it matters).
2775 */
2776 kmem_flagcheck(cachep, flags);
2777
Andrew Mortona737b3e2006-03-22 00:08:11 -08002778 /*
2779 * Get mem for the objs. Attempt to allocate a physical page from
2780 * 'nodeid'.
Christoph Lametere498be72005-09-09 13:03:32 -07002781 */
Christoph Lameter3c517a62006-12-06 20:33:29 -08002782 if (!objp)
Andrew Mortonb8c1c5d2007-07-24 12:02:40 -07002783 objp = kmem_getpages(cachep, local_flags, nodeid);
Andrew Mortona737b3e2006-03-22 00:08:11 -08002784 if (!objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785 goto failed;
2786
2787 /* Get slab management. */
Christoph Lameter3c517a62006-12-06 20:33:29 -08002788 slabp = alloc_slabmgmt(cachep, objp, offset,
Christoph Lameter6cb06222007-10-16 01:25:41 -07002789 local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
Andrew Mortona737b3e2006-03-22 00:08:11 -08002790 if (!slabp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791 goto opps1;
2792
Christoph Lametere498be72005-09-09 13:03:32 -07002793 slabp->nodeid = nodeid;
Pekka Enberg47768742006-06-23 02:03:07 -07002794 slab_map_pages(cachep, slabp, objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795
Christoph Lametera35afb82007-05-16 22:10:57 -07002796 cache_init_objs(cachep, slabp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797
2798 if (local_flags & __GFP_WAIT)
2799 local_irq_disable();
2800 check_irq_off();
Christoph Lametere498be72005-09-09 13:03:32 -07002801 spin_lock(&l3->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802
2803 /* Make slab active. */
Christoph Lametere498be72005-09-09 13:03:32 -07002804 list_add_tail(&slabp->list, &(l3->slabs_free));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 STATS_INC_GROWN(cachep);
Christoph Lametere498be72005-09-09 13:03:32 -07002806 l3->free_objects += cachep->num;
2807 spin_unlock(&l3->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808 return 1;
Andrew Mortona737b3e2006-03-22 00:08:11 -08002809opps1:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810 kmem_freepages(cachep, objp);
Andrew Mortona737b3e2006-03-22 00:08:11 -08002811failed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812 if (local_flags & __GFP_WAIT)
2813 local_irq_disable();
2814 return 0;
2815}
2816
2817#if DEBUG
2818
2819/*
2820 * Perform extra freeing checks:
2821 * - detect bad pointers.
2822 * - POISON/RED_ZONE checking
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823 */
2824static void kfree_debugcheck(const void *objp)
2825{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826 if (!virt_addr_valid(objp)) {
2827 printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002828 (unsigned long)objp);
2829 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831}
2832
Pekka Enberg58ce1fd2006-06-23 02:03:24 -07002833static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2834{
David Woodhouseb46b8f12007-05-08 00:22:59 -07002835 unsigned long long redzone1, redzone2;
Pekka Enberg58ce1fd2006-06-23 02:03:24 -07002836
2837 redzone1 = *dbg_redzone1(cache, obj);
2838 redzone2 = *dbg_redzone2(cache, obj);
2839
2840 /*
2841 * Redzone is ok.
2842 */
2843 if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2844 return;
2845
2846 if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2847 slab_error(cache, "double free detected");
2848 else
2849 slab_error(cache, "memory outside object was overwritten");
2850
David Woodhouseb46b8f12007-05-08 00:22:59 -07002851 printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
Pekka Enberg58ce1fd2006-06-23 02:03:24 -07002852 obj, redzone1, redzone2);
2853}
2854
Pekka Enberg343e0d72006-02-01 03:05:50 -08002855static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002856 void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857{
2858 struct page *page;
2859 unsigned int objnr;
2860 struct slab *slabp;
2861
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002862 objp -= obj_offset(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863 kfree_debugcheck(objp);
Christoph Lameterb49af682007-05-06 14:49:41 -07002864 page = virt_to_head_page(objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865
Pekka Enberg065d41c2005-11-13 16:06:46 -08002866 slabp = page_get_slab(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867
2868 if (cachep->flags & SLAB_RED_ZONE) {
Pekka Enberg58ce1fd2006-06-23 02:03:24 -07002869 verify_redzone_free(cachep, objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2871 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2872 }
2873 if (cachep->flags & SLAB_STORE_USER)
2874 *dbg_userword(cachep, objp) = caller;
2875
Pekka Enberg8fea4e92006-03-22 00:08:10 -08002876 objnr = obj_to_index(cachep, slabp, objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877
2878 BUG_ON(objnr >= cachep->num);
Pekka Enberg8fea4e92006-03-22 00:08:10 -08002879 BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880
Al Viro871751e2006-03-25 03:06:39 -08002881#ifdef CONFIG_DEBUG_SLAB_LEAK
2882 slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
2883#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884 if (cachep->flags & SLAB_POISON) {
2885#ifdef CONFIG_DEBUG_PAGEALLOC
Andrew Mortona737b3e2006-03-22 00:08:11 -08002886 if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887 store_stackinfo(cachep, objp, (unsigned long)caller);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002888 kernel_map_pages(virt_to_page(objp),
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002889 cachep->buffer_size / PAGE_SIZE, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890 } else {
2891 poison_obj(cachep, objp, POISON_FREE);
2892 }
2893#else
2894 poison_obj(cachep, objp, POISON_FREE);
2895#endif
2896 }
2897 return objp;
2898}
2899
Pekka Enberg343e0d72006-02-01 03:05:50 -08002900static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901{
2902 kmem_bufctl_t i;
2903 int entries = 0;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002904
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905 /* Check slab's freelist to see if this obj is there. */
2906 for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
2907 entries++;
2908 if (entries > cachep->num || i >= cachep->num)
2909 goto bad;
2910 }
2911 if (entries != cachep->num - slabp->inuse) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08002912bad:
2913 printk(KERN_ERR "slab: Internal list corruption detected in "
2914 "cache '%s'(%d), slabp %p(%d). Hexdump:\n",
2915 cachep->name, cachep->num, slabp, slabp->inuse);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002916 for (i = 0;
Linus Torvalds264132b2006-03-06 12:10:07 -08002917 i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002918 i++) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08002919 if (i % 16 == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920 printk("\n%03x:", i);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002921 printk(" %02x", ((unsigned char *)slabp)[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922 }
2923 printk("\n");
2924 BUG();
2925 }
2926}
2927#else
2928#define kfree_debugcheck(x) do { } while(0)
2929#define cache_free_debugcheck(x,objp,z) (objp)
2930#define check_slabp(x,y) do { } while(0)
2931#endif
2932
Pekka Enberg343e0d72006-02-01 03:05:50 -08002933static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934{
2935 int batchcount;
2936 struct kmem_list3 *l3;
2937 struct array_cache *ac;
Pekka Enberg1ca4cb22006-10-06 00:43:52 -07002938 int node;
2939
2940 node = numa_node_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002941
2942 check_irq_off();
Pekka Enberg9a2dba42006-02-01 03:05:49 -08002943 ac = cpu_cache_get(cachep);
Andrew Mortona737b3e2006-03-22 00:08:11 -08002944retry:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945 batchcount = ac->batchcount;
2946 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08002947 /*
2948 * If there was little recent activity on this cache, then
2949 * perform only a partial refill. Otherwise we could generate
2950 * refill bouncing.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951 */
2952 batchcount = BATCHREFILL_LIMIT;
2953 }
Pekka Enberg1ca4cb22006-10-06 00:43:52 -07002954 l3 = cachep->nodelists[node];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955
Christoph Lametere498be72005-09-09 13:03:32 -07002956 BUG_ON(ac->avail > 0 || !l3);
2957 spin_lock(&l3->list_lock);
2958
Christoph Lameter3ded1752006-03-25 03:06:44 -08002959 /* See if we can refill from the shared array */
2960 if (l3->shared && transfer_objects(ac, l3->shared, batchcount))
2961 goto alloc_done;
2962
Linus Torvalds1da177e2005-04-16 15:20:36 -07002963 while (batchcount > 0) {
2964 struct list_head *entry;
2965 struct slab *slabp;
2966 /* Get slab alloc is to come from. */
2967 entry = l3->slabs_partial.next;
2968 if (entry == &l3->slabs_partial) {
2969 l3->free_touched = 1;
2970 entry = l3->slabs_free.next;
2971 if (entry == &l3->slabs_free)
2972 goto must_grow;
2973 }
2974
2975 slabp = list_entry(entry, struct slab, list);
2976 check_slabp(cachep, slabp);
2977 check_spinlock_acquired(cachep);
Pekka Enberg714b8172007-05-06 14:49:03 -07002978
2979 /*
2980 * The slab was either on partial or free list so
2981 * there must be at least one object available for
2982 * allocation.
2983 */
2984 BUG_ON(slabp->inuse < 0 || slabp->inuse >= cachep->num);
2985
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986 while (slabp->inuse < cachep->num && batchcount--) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002987 STATS_INC_ALLOCED(cachep);
2988 STATS_INC_ACTIVE(cachep);
2989 STATS_SET_HIGH(cachep);
2990
Matthew Dobson78d382d2006-02-01 03:05:47 -08002991 ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
Pekka Enberg1ca4cb22006-10-06 00:43:52 -07002992 node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993 }
2994 check_slabp(cachep, slabp);
2995
2996 /* move slabp to correct slabp list: */
2997 list_del(&slabp->list);
2998 if (slabp->free == BUFCTL_END)
2999 list_add(&slabp->list, &l3->slabs_full);
3000 else
3001 list_add(&slabp->list, &l3->slabs_partial);
3002 }
3003
Andrew Mortona737b3e2006-03-22 00:08:11 -08003004must_grow:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003005 l3->free_objects -= ac->avail;
Andrew Mortona737b3e2006-03-22 00:08:11 -08003006alloc_done:
Christoph Lametere498be72005-09-09 13:03:32 -07003007 spin_unlock(&l3->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003008
3009 if (unlikely(!ac->avail)) {
3010 int x;
Christoph Lameter3c517a62006-12-06 20:33:29 -08003011 x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
Christoph Lametere498be72005-09-09 13:03:32 -07003012
Andrew Mortona737b3e2006-03-22 00:08:11 -08003013 /* cache_grow can reenable interrupts, then ac could change. */
Pekka Enberg9a2dba42006-02-01 03:05:49 -08003014 ac = cpu_cache_get(cachep);
Andrew Mortona737b3e2006-03-22 00:08:11 -08003015 if (!x && ac->avail == 0) /* no objects in sight? abort */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003016 return NULL;
3017
Andrew Mortona737b3e2006-03-22 00:08:11 -08003018 if (!ac->avail) /* objects refilled by interrupt? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003019 goto retry;
3020 }
3021 ac->touched = 1;
Christoph Lametere498be72005-09-09 13:03:32 -07003022 return ac->entry[--ac->avail];
Linus Torvalds1da177e2005-04-16 15:20:36 -07003023}
3024
Andrew Mortona737b3e2006-03-22 00:08:11 -08003025static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
3026 gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027{
3028 might_sleep_if(flags & __GFP_WAIT);
3029#if DEBUG
3030 kmem_flagcheck(cachep, flags);
3031#endif
3032}
3033
3034#if DEBUG
Andrew Mortona737b3e2006-03-22 00:08:11 -08003035static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3036 gfp_t flags, void *objp, void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003037{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003038 if (!objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039 return objp;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003040 if (cachep->flags & SLAB_POISON) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003041#ifdef CONFIG_DEBUG_PAGEALLOC
Manfred Spraul3dafccf2006-02-01 03:05:42 -08003042 if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003043 kernel_map_pages(virt_to_page(objp),
Manfred Spraul3dafccf2006-02-01 03:05:42 -08003044 cachep->buffer_size / PAGE_SIZE, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003045 else
3046 check_poison_obj(cachep, objp);
3047#else
3048 check_poison_obj(cachep, objp);
3049#endif
3050 poison_obj(cachep, objp, POISON_INUSE);
3051 }
3052 if (cachep->flags & SLAB_STORE_USER)
3053 *dbg_userword(cachep, objp) = caller;
3054
3055 if (cachep->flags & SLAB_RED_ZONE) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08003056 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3057 *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3058 slab_error(cachep, "double free, or memory outside"
3059 " object was overwritten");
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003060 printk(KERN_ERR
David Woodhouseb46b8f12007-05-08 00:22:59 -07003061 "%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
Andrew Mortona737b3e2006-03-22 00:08:11 -08003062 objp, *dbg_redzone1(cachep, objp),
3063 *dbg_redzone2(cachep, objp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003064 }
3065 *dbg_redzone1(cachep, objp) = RED_ACTIVE;
3066 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
3067 }
Al Viro871751e2006-03-25 03:06:39 -08003068#ifdef CONFIG_DEBUG_SLAB_LEAK
3069 {
3070 struct slab *slabp;
3071 unsigned objnr;
3072
Christoph Lameterb49af682007-05-06 14:49:41 -07003073 slabp = page_get_slab(virt_to_head_page(objp));
Al Viro871751e2006-03-25 03:06:39 -08003074 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
3075 slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
3076 }
3077#endif
Manfred Spraul3dafccf2006-02-01 03:05:42 -08003078 objp += obj_offset(cachep);
Christoph Lameter4f104932007-05-06 14:50:17 -07003079 if (cachep->ctor && cachep->flags & SLAB_POISON)
Christoph Lameter4ba9b9d2007-10-16 23:25:51 -07003080 cachep->ctor(cachep, objp);
Kevin Hilmana44b56d2006-12-06 20:32:11 -08003081#if ARCH_SLAB_MINALIGN
3082 if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
3083 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3084 objp, ARCH_SLAB_MINALIGN);
3085 }
3086#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003087 return objp;
3088}
3089#else
3090#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3091#endif
3092
Akinobu Mita8a8b6502006-12-08 02:39:44 -08003093#ifdef CONFIG_FAILSLAB
3094
3095static struct failslab_attr {
3096
3097 struct fault_attr attr;
3098
3099 u32 ignore_gfp_wait;
3100#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3101 struct dentry *ignore_gfp_wait_file;
3102#endif
3103
3104} failslab = {
3105 .attr = FAULT_ATTR_INITIALIZER,
Don Mullis6b1b60f2006-12-08 02:39:53 -08003106 .ignore_gfp_wait = 1,
Akinobu Mita8a8b6502006-12-08 02:39:44 -08003107};
3108
3109static int __init setup_failslab(char *str)
3110{
3111 return setup_fault_attr(&failslab.attr, str);
3112}
3113__setup("failslab=", setup_failslab);
3114
3115static int should_failslab(struct kmem_cache *cachep, gfp_t flags)
3116{
3117 if (cachep == &cache_cache)
3118 return 0;
3119 if (flags & __GFP_NOFAIL)
3120 return 0;
3121 if (failslab.ignore_gfp_wait && (flags & __GFP_WAIT))
3122 return 0;
3123
3124 return should_fail(&failslab.attr, obj_size(cachep));
3125}
3126
3127#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3128
3129static int __init failslab_debugfs(void)
3130{
3131 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
3132 struct dentry *dir;
3133 int err;
3134
Akinobu Mita824ebef2007-05-06 14:49:58 -07003135 err = init_fault_attr_dentries(&failslab.attr, "failslab");
Akinobu Mita8a8b6502006-12-08 02:39:44 -08003136 if (err)
3137 return err;
3138 dir = failslab.attr.dentries.dir;
3139
3140 failslab.ignore_gfp_wait_file =
3141 debugfs_create_bool("ignore-gfp-wait", mode, dir,
3142 &failslab.ignore_gfp_wait);
3143
3144 if (!failslab.ignore_gfp_wait_file) {
3145 err = -ENOMEM;
3146 debugfs_remove(failslab.ignore_gfp_wait_file);
3147 cleanup_fault_attr_dentries(&failslab.attr);
3148 }
3149
3150 return err;
3151}
3152
3153late_initcall(failslab_debugfs);
3154
3155#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
3156
3157#else /* CONFIG_FAILSLAB */
3158
3159static inline int should_failslab(struct kmem_cache *cachep, gfp_t flags)
3160{
3161 return 0;
3162}
3163
3164#endif /* CONFIG_FAILSLAB */
3165
Pekka Enberg343e0d72006-02-01 03:05:50 -08003166static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003168 void *objp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169 struct array_cache *ac;
3170
Alok N Kataria5c382302005-09-27 21:45:46 -07003171 check_irq_off();
Akinobu Mita8a8b6502006-12-08 02:39:44 -08003172
Pekka Enberg9a2dba42006-02-01 03:05:49 -08003173 ac = cpu_cache_get(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003174 if (likely(ac->avail)) {
3175 STATS_INC_ALLOCHIT(cachep);
3176 ac->touched = 1;
Christoph Lametere498be72005-09-09 13:03:32 -07003177 objp = ac->entry[--ac->avail];
Linus Torvalds1da177e2005-04-16 15:20:36 -07003178 } else {
3179 STATS_INC_ALLOCMISS(cachep);
3180 objp = cache_alloc_refill(cachep, flags);
3181 }
Alok N Kataria5c382302005-09-27 21:45:46 -07003182 return objp;
3183}
3184
Christoph Lametere498be72005-09-09 13:03:32 -07003185#ifdef CONFIG_NUMA
3186/*
Paul Jacksonb2455392006-03-24 03:16:12 -08003187 * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
Paul Jacksonc61afb12006-03-24 03:16:08 -08003188 *
3189 * If we are in_interrupt, then process context, including cpusets and
3190 * mempolicy, may not apply and should not be used for allocation policy.
3191 */
3192static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3193{
3194 int nid_alloc, nid_here;
3195
Christoph Lameter765c4502006-09-27 01:50:08 -07003196 if (in_interrupt() || (flags & __GFP_THISNODE))
Paul Jacksonc61afb12006-03-24 03:16:08 -08003197 return NULL;
3198 nid_alloc = nid_here = numa_node_id();
3199 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3200 nid_alloc = cpuset_mem_spread_node();
3201 else if (current->mempolicy)
3202 nid_alloc = slab_node(current->mempolicy);
3203 if (nid_alloc != nid_here)
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003204 return ____cache_alloc_node(cachep, flags, nid_alloc);
Paul Jacksonc61afb12006-03-24 03:16:08 -08003205 return NULL;
3206}
3207
3208/*
Christoph Lameter765c4502006-09-27 01:50:08 -07003209 * Fallback function if there was no memory available and no objects on a
Christoph Lameter3c517a62006-12-06 20:33:29 -08003210 * certain node and fall back is permitted. First we scan all the
3211 * available nodelists for available objects. If that fails then we
3212 * perform an allocation without specifying a node. This allows the page
3213 * allocator to do its reclaim / fallback magic. We then insert the
3214 * slab into the proper nodelist and then allocate from it.
Christoph Lameter765c4502006-09-27 01:50:08 -07003215 */
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003216static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
Christoph Lameter765c4502006-09-27 01:50:08 -07003217{
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003218 struct zonelist *zonelist;
3219 gfp_t local_flags;
Christoph Lameter765c4502006-09-27 01:50:08 -07003220 struct zone **z;
3221 void *obj = NULL;
Christoph Lameter3c517a62006-12-06 20:33:29 -08003222 int nid;
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003223
3224 if (flags & __GFP_THISNODE)
3225 return NULL;
3226
3227 zonelist = &NODE_DATA(slab_node(current->mempolicy))
3228 ->node_zonelists[gfp_zone(flags)];
Christoph Lameter6cb06222007-10-16 01:25:41 -07003229 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
Christoph Lameter765c4502006-09-27 01:50:08 -07003230
Christoph Lameter3c517a62006-12-06 20:33:29 -08003231retry:
3232 /*
3233 * Look through allowed nodes for objects available
3234 * from existing per node queues.
3235 */
Christoph Lameteraedb0eb2006-10-21 10:24:16 -07003236 for (z = zonelist->zones; *z && !obj; z++) {
Christoph Lameter3c517a62006-12-06 20:33:29 -08003237 nid = zone_to_nid(*z);
Christoph Lameteraedb0eb2006-10-21 10:24:16 -07003238
Paul Jackson02a0e532006-12-13 00:34:25 -08003239 if (cpuset_zone_allowed_hardwall(*z, flags) &&
Christoph Lameter3c517a62006-12-06 20:33:29 -08003240 cache->nodelists[nid] &&
3241 cache->nodelists[nid]->free_objects)
3242 obj = ____cache_alloc_node(cache,
3243 flags | GFP_THISNODE, nid);
3244 }
3245
Christoph Lametercfce6602007-05-06 14:50:17 -07003246 if (!obj) {
Christoph Lameter3c517a62006-12-06 20:33:29 -08003247 /*
3248 * This allocation will be performed within the constraints
3249 * of the current cpuset / memory policy requirements.
3250 * We may trigger various forms of reclaim on the allowed
3251 * set and go into memory reserves if necessary.
3252 */
Christoph Lameterdd47ea72006-12-13 00:34:11 -08003253 if (local_flags & __GFP_WAIT)
3254 local_irq_enable();
3255 kmem_flagcheck(cache, flags);
Christoph Lameter3c517a62006-12-06 20:33:29 -08003256 obj = kmem_getpages(cache, flags, -1);
Christoph Lameterdd47ea72006-12-13 00:34:11 -08003257 if (local_flags & __GFP_WAIT)
3258 local_irq_disable();
Christoph Lameter3c517a62006-12-06 20:33:29 -08003259 if (obj) {
3260 /*
3261 * Insert into the appropriate per node queues
3262 */
3263 nid = page_to_nid(virt_to_page(obj));
3264 if (cache_grow(cache, flags, nid, obj)) {
3265 obj = ____cache_alloc_node(cache,
3266 flags | GFP_THISNODE, nid);
3267 if (!obj)
3268 /*
3269 * Another processor may allocate the
3270 * objects in the slab since we are
3271 * not holding any locks.
3272 */
3273 goto retry;
3274 } else {
Hugh Dickinsb6a60452007-01-05 16:36:36 -08003275 /* cache_grow already freed obj */
Christoph Lameter3c517a62006-12-06 20:33:29 -08003276 obj = NULL;
3277 }
3278 }
Christoph Lameteraedb0eb2006-10-21 10:24:16 -07003279 }
Christoph Lameter765c4502006-09-27 01:50:08 -07003280 return obj;
3281}
3282
3283/*
Christoph Lametere498be72005-09-09 13:03:32 -07003284 * A interface to enable slab creation on nodeid
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285 */
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003286static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
Andrew Mortona737b3e2006-03-22 00:08:11 -08003287 int nodeid)
Christoph Lametere498be72005-09-09 13:03:32 -07003288{
3289 struct list_head *entry;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003290 struct slab *slabp;
3291 struct kmem_list3 *l3;
3292 void *obj;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003293 int x;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003294
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003295 l3 = cachep->nodelists[nodeid];
3296 BUG_ON(!l3);
Christoph Lametere498be72005-09-09 13:03:32 -07003297
Andrew Mortona737b3e2006-03-22 00:08:11 -08003298retry:
Ravikiran G Thirumalaica3b9b92006-02-04 23:27:58 -08003299 check_irq_off();
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003300 spin_lock(&l3->list_lock);
3301 entry = l3->slabs_partial.next;
3302 if (entry == &l3->slabs_partial) {
3303 l3->free_touched = 1;
3304 entry = l3->slabs_free.next;
3305 if (entry == &l3->slabs_free)
3306 goto must_grow;
3307 }
Christoph Lametere498be72005-09-09 13:03:32 -07003308
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003309 slabp = list_entry(entry, struct slab, list);
3310 check_spinlock_acquired_node(cachep, nodeid);
3311 check_slabp(cachep, slabp);
Christoph Lametere498be72005-09-09 13:03:32 -07003312
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003313 STATS_INC_NODEALLOCS(cachep);
3314 STATS_INC_ACTIVE(cachep);
3315 STATS_SET_HIGH(cachep);
Christoph Lametere498be72005-09-09 13:03:32 -07003316
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003317 BUG_ON(slabp->inuse == cachep->num);
Christoph Lametere498be72005-09-09 13:03:32 -07003318
Matthew Dobson78d382d2006-02-01 03:05:47 -08003319 obj = slab_get_obj(cachep, slabp, nodeid);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003320 check_slabp(cachep, slabp);
3321 l3->free_objects--;
3322 /* move slabp to correct slabp list: */
3323 list_del(&slabp->list);
Christoph Lametere498be72005-09-09 13:03:32 -07003324
Andrew Mortona737b3e2006-03-22 00:08:11 -08003325 if (slabp->free == BUFCTL_END)
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003326 list_add(&slabp->list, &l3->slabs_full);
Andrew Mortona737b3e2006-03-22 00:08:11 -08003327 else
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003328 list_add(&slabp->list, &l3->slabs_partial);
Christoph Lametere498be72005-09-09 13:03:32 -07003329
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003330 spin_unlock(&l3->list_lock);
3331 goto done;
Christoph Lametere498be72005-09-09 13:03:32 -07003332
Andrew Mortona737b3e2006-03-22 00:08:11 -08003333must_grow:
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003334 spin_unlock(&l3->list_lock);
Christoph Lameter3c517a62006-12-06 20:33:29 -08003335 x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
Christoph Lameter765c4502006-09-27 01:50:08 -07003336 if (x)
3337 goto retry;
Christoph Lametere498be72005-09-09 13:03:32 -07003338
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003339 return fallback_alloc(cachep, flags);
Christoph Lameter765c4502006-09-27 01:50:08 -07003340
Andrew Mortona737b3e2006-03-22 00:08:11 -08003341done:
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003342 return obj;
Christoph Lametere498be72005-09-09 13:03:32 -07003343}
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003344
3345/**
3346 * kmem_cache_alloc_node - Allocate an object on the specified node
3347 * @cachep: The cache to allocate from.
3348 * @flags: See kmalloc().
3349 * @nodeid: node number of the target node.
3350 * @caller: return address of caller, used for debug information
3351 *
3352 * Identical to kmem_cache_alloc but it will allocate memory on the given
3353 * node, which can improve the performance for cpu bound structures.
3354 *
3355 * Fallback to other node is possible if __GFP_THISNODE is not set.
3356 */
3357static __always_inline void *
3358__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3359 void *caller)
3360{
3361 unsigned long save_flags;
3362 void *ptr;
3363
Akinobu Mita824ebef2007-05-06 14:49:58 -07003364 if (should_failslab(cachep, flags))
3365 return NULL;
3366
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003367 cache_alloc_debugcheck_before(cachep, flags);
3368 local_irq_save(save_flags);
3369
3370 if (unlikely(nodeid == -1))
3371 nodeid = numa_node_id();
3372
3373 if (unlikely(!cachep->nodelists[nodeid])) {
3374 /* Node not bootstrapped yet */
3375 ptr = fallback_alloc(cachep, flags);
3376 goto out;
3377 }
3378
3379 if (nodeid == numa_node_id()) {
3380 /*
3381 * Use the locally cached objects if possible.
3382 * However ____cache_alloc does not allow fallback
3383 * to other nodes. It may fail while we still have
3384 * objects on other nodes available.
3385 */
3386 ptr = ____cache_alloc(cachep, flags);
3387 if (ptr)
3388 goto out;
3389 }
3390 /* ___cache_alloc_node can fall back to other nodes */
3391 ptr = ____cache_alloc_node(cachep, flags, nodeid);
3392 out:
3393 local_irq_restore(save_flags);
3394 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3395
Christoph Lameterd07dbea2007-07-17 04:03:23 -07003396 if (unlikely((flags & __GFP_ZERO) && ptr))
3397 memset(ptr, 0, obj_size(cachep));
3398
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003399 return ptr;
3400}
3401
3402static __always_inline void *
3403__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3404{
3405 void *objp;
3406
3407 if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
3408 objp = alternate_node_alloc(cache, flags);
3409 if (objp)
3410 goto out;
3411 }
3412 objp = ____cache_alloc(cache, flags);
3413
3414 /*
3415 * We may just have run out of memory on the local node.
3416 * ____cache_alloc_node() knows how to locate memory on other nodes
3417 */
3418 if (!objp)
3419 objp = ____cache_alloc_node(cache, flags, numa_node_id());
3420
3421 out:
3422 return objp;
3423}
3424#else
3425
3426static __always_inline void *
3427__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3428{
3429 return ____cache_alloc(cachep, flags);
3430}
3431
3432#endif /* CONFIG_NUMA */
3433
3434static __always_inline void *
3435__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3436{
3437 unsigned long save_flags;
3438 void *objp;
3439
Akinobu Mita824ebef2007-05-06 14:49:58 -07003440 if (should_failslab(cachep, flags))
3441 return NULL;
3442
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003443 cache_alloc_debugcheck_before(cachep, flags);
3444 local_irq_save(save_flags);
3445 objp = __do_cache_alloc(cachep, flags);
3446 local_irq_restore(save_flags);
3447 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3448 prefetchw(objp);
3449
Christoph Lameterd07dbea2007-07-17 04:03:23 -07003450 if (unlikely((flags & __GFP_ZERO) && objp))
3451 memset(objp, 0, obj_size(cachep));
3452
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003453 return objp;
3454}
Christoph Lametere498be72005-09-09 13:03:32 -07003455
3456/*
3457 * Caller needs to acquire correct kmem_list's list_lock
3458 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08003459static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003460 int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003461{
3462 int i;
Christoph Lametere498be72005-09-09 13:03:32 -07003463 struct kmem_list3 *l3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464
3465 for (i = 0; i < nr_objects; i++) {
3466 void *objp = objpp[i];
3467 struct slab *slabp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003468
Pekka Enberg6ed5eb22006-02-01 03:05:49 -08003469 slabp = virt_to_slab(objp);
Christoph Lameterff694162005-09-22 21:44:02 -07003470 l3 = cachep->nodelists[node];
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471 list_del(&slabp->list);
Christoph Lameterff694162005-09-22 21:44:02 -07003472 check_spinlock_acquired_node(cachep, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473 check_slabp(cachep, slabp);
Matthew Dobson78d382d2006-02-01 03:05:47 -08003474 slab_put_obj(cachep, slabp, objp, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003475 STATS_DEC_ACTIVE(cachep);
Christoph Lametere498be72005-09-09 13:03:32 -07003476 l3->free_objects++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003477 check_slabp(cachep, slabp);
3478
3479 /* fixup slab chains */
3480 if (slabp->inuse == 0) {
Christoph Lametere498be72005-09-09 13:03:32 -07003481 if (l3->free_objects > l3->free_limit) {
3482 l3->free_objects -= cachep->num;
Ravikiran G Thirumalaie5ac9c52006-09-25 23:31:34 -07003483 /* No need to drop any previously held
3484 * lock here, even if we have a off-slab slab
3485 * descriptor it is guaranteed to come from
3486 * a different cache, refer to comments before
3487 * alloc_slabmgmt.
3488 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003489 slab_destroy(cachep, slabp);
3490 } else {
Christoph Lametere498be72005-09-09 13:03:32 -07003491 list_add(&slabp->list, &l3->slabs_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003492 }
3493 } else {
3494 /* Unconditionally move a slab to the end of the
3495 * partial list on free - maximum time for the
3496 * other objects to be freed, too.
3497 */
Christoph Lametere498be72005-09-09 13:03:32 -07003498 list_add_tail(&slabp->list, &l3->slabs_partial);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003499 }
3500 }
3501}
3502
Pekka Enberg343e0d72006-02-01 03:05:50 -08003503static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504{
3505 int batchcount;
Christoph Lametere498be72005-09-09 13:03:32 -07003506 struct kmem_list3 *l3;
Christoph Lameterff694162005-09-22 21:44:02 -07003507 int node = numa_node_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508
3509 batchcount = ac->batchcount;
3510#if DEBUG
3511 BUG_ON(!batchcount || batchcount > ac->avail);
3512#endif
3513 check_irq_off();
Christoph Lameterff694162005-09-22 21:44:02 -07003514 l3 = cachep->nodelists[node];
Ingo Molnar873623d2006-07-13 14:44:38 +02003515 spin_lock(&l3->list_lock);
Christoph Lametere498be72005-09-09 13:03:32 -07003516 if (l3->shared) {
3517 struct array_cache *shared_array = l3->shared;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003518 int max = shared_array->limit - shared_array->avail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519 if (max) {
3520 if (batchcount > max)
3521 batchcount = max;
Christoph Lametere498be72005-09-09 13:03:32 -07003522 memcpy(&(shared_array->entry[shared_array->avail]),
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003523 ac->entry, sizeof(void *) * batchcount);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524 shared_array->avail += batchcount;
3525 goto free_done;
3526 }
3527 }
3528
Christoph Lameterff694162005-09-22 21:44:02 -07003529 free_block(cachep, ac->entry, batchcount, node);
Andrew Mortona737b3e2006-03-22 00:08:11 -08003530free_done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003531#if STATS
3532 {
3533 int i = 0;
3534 struct list_head *p;
3535
Christoph Lametere498be72005-09-09 13:03:32 -07003536 p = l3->slabs_free.next;
3537 while (p != &(l3->slabs_free)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003538 struct slab *slabp;
3539
3540 slabp = list_entry(p, struct slab, list);
3541 BUG_ON(slabp->inuse);
3542
3543 i++;
3544 p = p->next;
3545 }
3546 STATS_SET_FREEABLE(cachep, i);
3547 }
3548#endif
Christoph Lametere498be72005-09-09 13:03:32 -07003549 spin_unlock(&l3->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003550 ac->avail -= batchcount;
Andrew Mortona737b3e2006-03-22 00:08:11 -08003551 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003552}
3553
3554/*
Andrew Mortona737b3e2006-03-22 00:08:11 -08003555 * Release an obj back to its cache. If the obj has a constructed state, it must
3556 * be in this state _before_ it is released. Called with disabled ints.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003557 */
Ingo Molnar873623d2006-07-13 14:44:38 +02003558static inline void __cache_free(struct kmem_cache *cachep, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003559{
Pekka Enberg9a2dba42006-02-01 03:05:49 -08003560 struct array_cache *ac = cpu_cache_get(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003561
3562 check_irq_off();
3563 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
3564
Siddha, Suresh B1807a1a2007-08-22 14:01:49 -07003565 /*
3566 * Skip calling cache_free_alien() when the platform is not numa.
3567 * This will avoid cache misses that happen while accessing slabp (which
3568 * is per page memory reference) to get nodeid. Instead use a global
3569 * variable to skip the call, which is mostly likely to be present in
3570 * the cache.
3571 */
3572 if (numa_platform && cache_free_alien(cachep, objp))
Pekka Enberg729bd0b2006-06-23 02:03:05 -07003573 return;
Christoph Lametere498be72005-09-09 13:03:32 -07003574
Linus Torvalds1da177e2005-04-16 15:20:36 -07003575 if (likely(ac->avail < ac->limit)) {
3576 STATS_INC_FREEHIT(cachep);
Christoph Lametere498be72005-09-09 13:03:32 -07003577 ac->entry[ac->avail++] = objp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578 return;
3579 } else {
3580 STATS_INC_FREEMISS(cachep);
3581 cache_flusharray(cachep, ac);
Christoph Lametere498be72005-09-09 13:03:32 -07003582 ac->entry[ac->avail++] = objp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003583 }
3584}
3585
3586/**
3587 * kmem_cache_alloc - Allocate an object
3588 * @cachep: The cache to allocate from.
3589 * @flags: See kmalloc().
3590 *
3591 * Allocate an object from this cache. The flags are only relevant
3592 * if the cache has no available objects.
3593 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08003594void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003595{
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003596 return __cache_alloc(cachep, flags, __builtin_return_address(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597}
3598EXPORT_SYMBOL(kmem_cache_alloc);
3599
3600/**
3601 * kmem_ptr_validate - check if an untrusted pointer might
3602 * be a slab entry.
3603 * @cachep: the cache we're checking against
3604 * @ptr: pointer to validate
3605 *
3606 * This verifies that the untrusted pointer looks sane:
3607 * it is _not_ a guarantee that the pointer is actually
3608 * part of the slab cache in question, but it at least
3609 * validates that the pointer can be dereferenced and
3610 * looks half-way sane.
3611 *
3612 * Currently only used for dentry validation.
3613 */
Christoph Lameterb7f869a2006-12-22 01:06:44 -08003614int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003615{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003616 unsigned long addr = (unsigned long)ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003617 unsigned long min_addr = PAGE_OFFSET;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003618 unsigned long align_mask = BYTES_PER_WORD - 1;
Manfred Spraul3dafccf2006-02-01 03:05:42 -08003619 unsigned long size = cachep->buffer_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003620 struct page *page;
3621
3622 if (unlikely(addr < min_addr))
3623 goto out;
3624 if (unlikely(addr > (unsigned long)high_memory - size))
3625 goto out;
3626 if (unlikely(addr & align_mask))
3627 goto out;
3628 if (unlikely(!kern_addr_valid(addr)))
3629 goto out;
3630 if (unlikely(!kern_addr_valid(addr + size - 1)))
3631 goto out;
3632 page = virt_to_page(ptr);
3633 if (unlikely(!PageSlab(page)))
3634 goto out;
Pekka Enberg065d41c2005-11-13 16:06:46 -08003635 if (unlikely(page_get_cache(page) != cachep))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003636 goto out;
3637 return 1;
Andrew Mortona737b3e2006-03-22 00:08:11 -08003638out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003639 return 0;
3640}
3641
3642#ifdef CONFIG_NUMA
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003643void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3644{
3645 return __cache_alloc_node(cachep, flags, nodeid,
3646 __builtin_return_address(0));
3647}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003648EXPORT_SYMBOL(kmem_cache_alloc_node);
3649
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003650static __always_inline void *
3651__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
Manfred Spraul97e2bde2005-05-01 08:58:38 -07003652{
Pekka Enberg343e0d72006-02-01 03:05:50 -08003653 struct kmem_cache *cachep;
Manfred Spraul97e2bde2005-05-01 08:58:38 -07003654
3655 cachep = kmem_find_general_cachep(size, flags);
Christoph Lameter6cb8f912007-07-17 04:03:22 -07003656 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3657 return cachep;
Manfred Spraul97e2bde2005-05-01 08:58:38 -07003658 return kmem_cache_alloc_node(cachep, flags, node);
3659}
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003660
3661#ifdef CONFIG_DEBUG_SLAB
3662void *__kmalloc_node(size_t size, gfp_t flags, int node)
3663{
3664 return __do_kmalloc_node(size, flags, node,
3665 __builtin_return_address(0));
3666}
Christoph Hellwigdbe5e692006-09-25 23:31:36 -07003667EXPORT_SYMBOL(__kmalloc_node);
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003668
3669void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3670 int node, void *caller)
3671{
3672 return __do_kmalloc_node(size, flags, node, caller);
3673}
3674EXPORT_SYMBOL(__kmalloc_node_track_caller);
3675#else
3676void *__kmalloc_node(size_t size, gfp_t flags, int node)
3677{
3678 return __do_kmalloc_node(size, flags, node, NULL);
3679}
3680EXPORT_SYMBOL(__kmalloc_node);
3681#endif /* CONFIG_DEBUG_SLAB */
3682#endif /* CONFIG_NUMA */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003683
3684/**
Paul Drynoff800590f2006-06-23 02:03:48 -07003685 * __do_kmalloc - allocate memory
Linus Torvalds1da177e2005-04-16 15:20:36 -07003686 * @size: how many bytes of memory are required.
Paul Drynoff800590f2006-06-23 02:03:48 -07003687 * @flags: the type of memory to allocate (see kmalloc).
Randy Dunlap911851e2006-03-22 00:08:14 -08003688 * @caller: function caller for debug tracking of the caller
Linus Torvalds1da177e2005-04-16 15:20:36 -07003689 */
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003690static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3691 void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003692{
Pekka Enberg343e0d72006-02-01 03:05:50 -08003693 struct kmem_cache *cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003694
Manfred Spraul97e2bde2005-05-01 08:58:38 -07003695 /* If you want to save a few bytes .text space: replace
3696 * __ with kmem_.
3697 * Then kmalloc uses the uninlined functions instead of the inline
3698 * functions.
3699 */
3700 cachep = __find_general_cachep(size, flags);
Linus Torvaldsa5c96d82007-07-19 13:17:15 -07003701 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3702 return cachep;
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003703 return __cache_alloc(cachep, flags, caller);
3704}
3705
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003706
Christoph Hellwig1d2c8ee2006-10-04 02:15:25 -07003707#ifdef CONFIG_DEBUG_SLAB
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003708void *__kmalloc(size_t size, gfp_t flags)
3709{
Al Viro871751e2006-03-25 03:06:39 -08003710 return __do_kmalloc(size, flags, __builtin_return_address(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711}
3712EXPORT_SYMBOL(__kmalloc);
3713
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003714void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
3715{
3716 return __do_kmalloc(size, flags, caller);
3717}
3718EXPORT_SYMBOL(__kmalloc_track_caller);
Christoph Hellwig1d2c8ee2006-10-04 02:15:25 -07003719
3720#else
3721void *__kmalloc(size_t size, gfp_t flags)
3722{
3723 return __do_kmalloc(size, flags, NULL);
3724}
3725EXPORT_SYMBOL(__kmalloc);
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003726#endif
3727
Linus Torvalds1da177e2005-04-16 15:20:36 -07003728/**
3729 * kmem_cache_free - Deallocate an object
3730 * @cachep: The cache the allocation was from.
3731 * @objp: The previously allocated object.
3732 *
3733 * Free an object which was previously allocated from this
3734 * cache.
3735 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08003736void kmem_cache_free(struct kmem_cache *cachep, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737{
3738 unsigned long flags;
3739
Pekka Enbergddc2e812006-06-23 02:03:40 -07003740 BUG_ON(virt_to_cache(objp) != cachep);
3741
Linus Torvalds1da177e2005-04-16 15:20:36 -07003742 local_irq_save(flags);
Ingo Molnar898552c2007-02-10 01:44:57 -08003743 debug_check_no_locks_freed(objp, obj_size(cachep));
Ingo Molnar873623d2006-07-13 14:44:38 +02003744 __cache_free(cachep, objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003745 local_irq_restore(flags);
3746}
3747EXPORT_SYMBOL(kmem_cache_free);
3748
3749/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003750 * kfree - free previously allocated memory
3751 * @objp: pointer returned by kmalloc.
3752 *
Pekka Enberg80e93ef2005-09-09 13:10:16 -07003753 * If @objp is NULL, no operation is performed.
3754 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003755 * Don't free memory not originally allocated by kmalloc()
3756 * or you will run into trouble.
3757 */
3758void kfree(const void *objp)
3759{
Pekka Enberg343e0d72006-02-01 03:05:50 -08003760 struct kmem_cache *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003761 unsigned long flags;
3762
Christoph Lameter6cb8f912007-07-17 04:03:22 -07003763 if (unlikely(ZERO_OR_NULL_PTR(objp)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003764 return;
3765 local_irq_save(flags);
3766 kfree_debugcheck(objp);
Pekka Enberg6ed5eb22006-02-01 03:05:49 -08003767 c = virt_to_cache(objp);
Ingo Molnarf9b84042006-06-27 02:54:49 -07003768 debug_check_no_locks_freed(objp, obj_size(c));
Ingo Molnar873623d2006-07-13 14:44:38 +02003769 __cache_free(c, (void *)objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003770 local_irq_restore(flags);
3771}
3772EXPORT_SYMBOL(kfree);
3773
Pekka Enberg343e0d72006-02-01 03:05:50 -08003774unsigned int kmem_cache_size(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003775{
Manfred Spraul3dafccf2006-02-01 03:05:42 -08003776 return obj_size(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003777}
3778EXPORT_SYMBOL(kmem_cache_size);
3779
Pekka Enberg343e0d72006-02-01 03:05:50 -08003780const char *kmem_cache_name(struct kmem_cache *cachep)
Arnaldo Carvalho de Melo19449722005-06-18 22:46:19 -07003781{
3782 return cachep->name;
3783}
3784EXPORT_SYMBOL_GPL(kmem_cache_name);
3785
Christoph Lametere498be72005-09-09 13:03:32 -07003786/*
Christoph Lameter0718dc22006-03-25 03:06:47 -08003787 * This initializes kmem_list3 or resizes varioius caches for all nodes.
Christoph Lametere498be72005-09-09 13:03:32 -07003788 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08003789static int alloc_kmemlist(struct kmem_cache *cachep)
Christoph Lametere498be72005-09-09 13:03:32 -07003790{
3791 int node;
3792 struct kmem_list3 *l3;
Christoph Lametercafeb022006-03-25 03:06:46 -08003793 struct array_cache *new_shared;
Paul Menage3395ee02006-12-06 20:32:16 -08003794 struct array_cache **new_alien = NULL;
Christoph Lametere498be72005-09-09 13:03:32 -07003795
Christoph Lameter04231b32007-10-16 01:25:32 -07003796 for_each_node_state(node, N_NORMAL_MEMORY) {
Christoph Lametercafeb022006-03-25 03:06:46 -08003797
Paul Menage3395ee02006-12-06 20:32:16 -08003798 if (use_alien_caches) {
3799 new_alien = alloc_alien_cache(node, cachep->limit);
3800 if (!new_alien)
3801 goto fail;
3802 }
Christoph Lametercafeb022006-03-25 03:06:46 -08003803
Eric Dumazet63109842007-05-06 14:49:28 -07003804 new_shared = NULL;
3805 if (cachep->shared) {
3806 new_shared = alloc_arraycache(node,
Christoph Lameter0718dc22006-03-25 03:06:47 -08003807 cachep->shared*cachep->batchcount,
Andrew Mortona737b3e2006-03-22 00:08:11 -08003808 0xbaadf00d);
Eric Dumazet63109842007-05-06 14:49:28 -07003809 if (!new_shared) {
3810 free_alien_cache(new_alien);
3811 goto fail;
3812 }
Christoph Lameter0718dc22006-03-25 03:06:47 -08003813 }
Christoph Lametercafeb022006-03-25 03:06:46 -08003814
Andrew Mortona737b3e2006-03-22 00:08:11 -08003815 l3 = cachep->nodelists[node];
3816 if (l3) {
Christoph Lametercafeb022006-03-25 03:06:46 -08003817 struct array_cache *shared = l3->shared;
3818
Christoph Lametere498be72005-09-09 13:03:32 -07003819 spin_lock_irq(&l3->list_lock);
3820
Christoph Lametercafeb022006-03-25 03:06:46 -08003821 if (shared)
Christoph Lameter0718dc22006-03-25 03:06:47 -08003822 free_block(cachep, shared->entry,
3823 shared->avail, node);
Christoph Lametere498be72005-09-09 13:03:32 -07003824
Christoph Lametercafeb022006-03-25 03:06:46 -08003825 l3->shared = new_shared;
3826 if (!l3->alien) {
Christoph Lametere498be72005-09-09 13:03:32 -07003827 l3->alien = new_alien;
3828 new_alien = NULL;
3829 }
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003830 l3->free_limit = (1 + nr_cpus_node(node)) *
Andrew Mortona737b3e2006-03-22 00:08:11 -08003831 cachep->batchcount + cachep->num;
Christoph Lametere498be72005-09-09 13:03:32 -07003832 spin_unlock_irq(&l3->list_lock);
Christoph Lametercafeb022006-03-25 03:06:46 -08003833 kfree(shared);
Christoph Lametere498be72005-09-09 13:03:32 -07003834 free_alien_cache(new_alien);
3835 continue;
3836 }
Andrew Mortona737b3e2006-03-22 00:08:11 -08003837 l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
Christoph Lameter0718dc22006-03-25 03:06:47 -08003838 if (!l3) {
3839 free_alien_cache(new_alien);
3840 kfree(new_shared);
Christoph Lametere498be72005-09-09 13:03:32 -07003841 goto fail;
Christoph Lameter0718dc22006-03-25 03:06:47 -08003842 }
Christoph Lametere498be72005-09-09 13:03:32 -07003843
3844 kmem_list3_init(l3);
3845 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
Andrew Mortona737b3e2006-03-22 00:08:11 -08003846 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
Christoph Lametercafeb022006-03-25 03:06:46 -08003847 l3->shared = new_shared;
Christoph Lametere498be72005-09-09 13:03:32 -07003848 l3->alien = new_alien;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003849 l3->free_limit = (1 + nr_cpus_node(node)) *
Andrew Mortona737b3e2006-03-22 00:08:11 -08003850 cachep->batchcount + cachep->num;
Christoph Lametere498be72005-09-09 13:03:32 -07003851 cachep->nodelists[node] = l3;
3852 }
Christoph Lametercafeb022006-03-25 03:06:46 -08003853 return 0;
Christoph Lameter0718dc22006-03-25 03:06:47 -08003854
Andrew Mortona737b3e2006-03-22 00:08:11 -08003855fail:
Christoph Lameter0718dc22006-03-25 03:06:47 -08003856 if (!cachep->next.next) {
3857 /* Cache is not active yet. Roll back what we did */
3858 node--;
3859 while (node >= 0) {
3860 if (cachep->nodelists[node]) {
3861 l3 = cachep->nodelists[node];
3862
3863 kfree(l3->shared);
3864 free_alien_cache(l3->alien);
3865 kfree(l3);
3866 cachep->nodelists[node] = NULL;
3867 }
3868 node--;
3869 }
3870 }
Christoph Lametercafeb022006-03-25 03:06:46 -08003871 return -ENOMEM;
Christoph Lametere498be72005-09-09 13:03:32 -07003872}
3873
Linus Torvalds1da177e2005-04-16 15:20:36 -07003874struct ccupdate_struct {
Pekka Enberg343e0d72006-02-01 03:05:50 -08003875 struct kmem_cache *cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003876 struct array_cache *new[NR_CPUS];
3877};
3878
3879static void do_ccupdate_local(void *info)
3880{
Andrew Mortona737b3e2006-03-22 00:08:11 -08003881 struct ccupdate_struct *new = info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003882 struct array_cache *old;
3883
3884 check_irq_off();
Pekka Enberg9a2dba42006-02-01 03:05:49 -08003885 old = cpu_cache_get(new->cachep);
Christoph Lametere498be72005-09-09 13:03:32 -07003886
Linus Torvalds1da177e2005-04-16 15:20:36 -07003887 new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
3888 new->new[smp_processor_id()] = old;
3889}
3890
Ravikiran G Thirumalaib5d8ca72006-03-22 00:08:12 -08003891/* Always called with the cache_chain_mutex held */
Andrew Mortona737b3e2006-03-22 00:08:11 -08003892static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3893 int batchcount, int shared)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003894{
Siddha, Suresh Bd2e7b7d2006-09-25 23:31:47 -07003895 struct ccupdate_struct *new;
Christoph Lameter2ed3a4e2006-09-25 23:31:38 -07003896 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003897
Siddha, Suresh Bd2e7b7d2006-09-25 23:31:47 -07003898 new = kzalloc(sizeof(*new), GFP_KERNEL);
3899 if (!new)
3900 return -ENOMEM;
3901
Christoph Lametere498be72005-09-09 13:03:32 -07003902 for_each_online_cpu(i) {
Siddha, Suresh Bd2e7b7d2006-09-25 23:31:47 -07003903 new->new[i] = alloc_arraycache(cpu_to_node(i), limit,
Andrew Mortona737b3e2006-03-22 00:08:11 -08003904 batchcount);
Siddha, Suresh Bd2e7b7d2006-09-25 23:31:47 -07003905 if (!new->new[i]) {
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003906 for (i--; i >= 0; i--)
Siddha, Suresh Bd2e7b7d2006-09-25 23:31:47 -07003907 kfree(new->new[i]);
3908 kfree(new);
Christoph Lametere498be72005-09-09 13:03:32 -07003909 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003910 }
3911 }
Siddha, Suresh Bd2e7b7d2006-09-25 23:31:47 -07003912 new->cachep = cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003913
Siddha, Suresh Bd2e7b7d2006-09-25 23:31:47 -07003914 on_each_cpu(do_ccupdate_local, (void *)new, 1, 1);
Christoph Lametere498be72005-09-09 13:03:32 -07003915
Linus Torvalds1da177e2005-04-16 15:20:36 -07003916 check_irq_on();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003917 cachep->batchcount = batchcount;
3918 cachep->limit = limit;
Christoph Lametere498be72005-09-09 13:03:32 -07003919 cachep->shared = shared;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003920
Christoph Lametere498be72005-09-09 13:03:32 -07003921 for_each_online_cpu(i) {
Siddha, Suresh Bd2e7b7d2006-09-25 23:31:47 -07003922 struct array_cache *ccold = new->new[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07003923 if (!ccold)
3924 continue;
Christoph Lametere498be72005-09-09 13:03:32 -07003925 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
Christoph Lameterff694162005-09-22 21:44:02 -07003926 free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
Christoph Lametere498be72005-09-09 13:03:32 -07003927 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003928 kfree(ccold);
3929 }
Siddha, Suresh Bd2e7b7d2006-09-25 23:31:47 -07003930 kfree(new);
Christoph Lameter2ed3a4e2006-09-25 23:31:38 -07003931 return alloc_kmemlist(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003932}
3933
Ravikiran G Thirumalaib5d8ca72006-03-22 00:08:12 -08003934/* Called with cache_chain_mutex held always */
Christoph Lameter2ed3a4e2006-09-25 23:31:38 -07003935static int enable_cpucache(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003936{
3937 int err;
3938 int limit, shared;
3939
Andrew Mortona737b3e2006-03-22 00:08:11 -08003940 /*
3941 * The head array serves three purposes:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003942 * - create a LIFO ordering, i.e. return objects that are cache-warm
3943 * - reduce the number of spinlock operations.
Andrew Mortona737b3e2006-03-22 00:08:11 -08003944 * - reduce the number of linked list operations on the slab and
Linus Torvalds1da177e2005-04-16 15:20:36 -07003945 * bufctl chains: array operations are cheaper.
3946 * The numbers are guessed, we should auto-tune as described by
3947 * Bonwick.
3948 */
Manfred Spraul3dafccf2006-02-01 03:05:42 -08003949 if (cachep->buffer_size > 131072)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003950 limit = 1;
Manfred Spraul3dafccf2006-02-01 03:05:42 -08003951 else if (cachep->buffer_size > PAGE_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003952 limit = 8;
Manfred Spraul3dafccf2006-02-01 03:05:42 -08003953 else if (cachep->buffer_size > 1024)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003954 limit = 24;
Manfred Spraul3dafccf2006-02-01 03:05:42 -08003955 else if (cachep->buffer_size > 256)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003956 limit = 54;
3957 else
3958 limit = 120;
3959
Andrew Mortona737b3e2006-03-22 00:08:11 -08003960 /*
3961 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
Linus Torvalds1da177e2005-04-16 15:20:36 -07003962 * allocation behaviour: Most allocs on one cpu, most free operations
3963 * on another cpu. For these cases, an efficient object passing between
3964 * cpus is necessary. This is provided by a shared array. The array
3965 * replaces Bonwick's magazine layer.
3966 * On uniprocessor, it's functionally equivalent (but less efficient)
3967 * to a larger limit. Thus disabled by default.
3968 */
3969 shared = 0;
Eric Dumazet364fbb22007-05-06 14:49:27 -07003970 if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003971 shared = 8;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003972
3973#if DEBUG
Andrew Mortona737b3e2006-03-22 00:08:11 -08003974 /*
3975 * With debugging enabled, large batchcount lead to excessively long
3976 * periods with disabled local interrupts. Limit the batchcount
Linus Torvalds1da177e2005-04-16 15:20:36 -07003977 */
3978 if (limit > 32)
3979 limit = 32;
3980#endif
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003981 err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003982 if (err)
3983 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003984 cachep->name, -err);
Christoph Lameter2ed3a4e2006-09-25 23:31:38 -07003985 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003986}
3987
Christoph Lameter1b552532006-03-22 00:09:07 -08003988/*
3989 * Drain an array if it contains any elements taking the l3 lock only if
Christoph Lameterb18e7e62006-03-22 00:09:07 -08003990 * necessary. Note that the l3 listlock also protects the array_cache
3991 * if drain_array() is used on the shared array.
Christoph Lameter1b552532006-03-22 00:09:07 -08003992 */
3993void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
3994 struct array_cache *ac, int force, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003995{
3996 int tofree;
3997
Christoph Lameter1b552532006-03-22 00:09:07 -08003998 if (!ac || !ac->avail)
3999 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004000 if (ac->touched && !force) {
4001 ac->touched = 0;
Christoph Lameterb18e7e62006-03-22 00:09:07 -08004002 } else {
Christoph Lameter1b552532006-03-22 00:09:07 -08004003 spin_lock_irq(&l3->list_lock);
Christoph Lameterb18e7e62006-03-22 00:09:07 -08004004 if (ac->avail) {
4005 tofree = force ? ac->avail : (ac->limit + 4) / 5;
4006 if (tofree > ac->avail)
4007 tofree = (ac->avail + 1) / 2;
4008 free_block(cachep, ac->entry, tofree, node);
4009 ac->avail -= tofree;
4010 memmove(ac->entry, &(ac->entry[tofree]),
4011 sizeof(void *) * ac->avail);
4012 }
Christoph Lameter1b552532006-03-22 00:09:07 -08004013 spin_unlock_irq(&l3->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004014 }
4015}
4016
4017/**
4018 * cache_reap - Reclaim memory from caches.
Randy Dunlap05fb6bf2007-02-28 20:12:13 -08004019 * @w: work descriptor
Linus Torvalds1da177e2005-04-16 15:20:36 -07004020 *
4021 * Called from workqueue/eventd every few seconds.
4022 * Purpose:
4023 * - clear the per-cpu caches for this CPU.
4024 * - return freeable pages to the main free memory pool.
4025 *
Andrew Mortona737b3e2006-03-22 00:08:11 -08004026 * If we cannot acquire the cache chain mutex then just give up - we'll try
4027 * again on the next iteration.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004028 */
Christoph Lameter7c5cae32007-02-10 01:42:55 -08004029static void cache_reap(struct work_struct *w)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004030{
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07004031 struct kmem_cache *searchp;
Christoph Lametere498be72005-09-09 13:03:32 -07004032 struct kmem_list3 *l3;
Christoph Lameteraab22072006-03-22 00:09:06 -08004033 int node = numa_node_id();
Christoph Lameter7c5cae32007-02-10 01:42:55 -08004034 struct delayed_work *work =
4035 container_of(w, struct delayed_work, work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004036
Christoph Lameter7c5cae32007-02-10 01:42:55 -08004037 if (!mutex_trylock(&cache_chain_mutex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004038 /* Give up. Setup the next iteration. */
Christoph Lameter7c5cae32007-02-10 01:42:55 -08004039 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004040
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07004041 list_for_each_entry(searchp, &cache_chain, next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004042 check_irq_on();
4043
Christoph Lameter35386e32006-03-22 00:09:05 -08004044 /*
4045 * We only take the l3 lock if absolutely necessary and we
4046 * have established with reasonable certainty that
4047 * we can do some work if the lock was obtained.
4048 */
Christoph Lameteraab22072006-03-22 00:09:06 -08004049 l3 = searchp->nodelists[node];
Christoph Lameter35386e32006-03-22 00:09:05 -08004050
Christoph Lameter8fce4d82006-03-09 17:33:54 -08004051 reap_alien(searchp, l3);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004052
Christoph Lameteraab22072006-03-22 00:09:06 -08004053 drain_array(searchp, l3, cpu_cache_get(searchp), 0, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004054
Christoph Lameter35386e32006-03-22 00:09:05 -08004055 /*
4056 * These are racy checks but it does not matter
4057 * if we skip one check or scan twice.
4058 */
Christoph Lametere498be72005-09-09 13:03:32 -07004059 if (time_after(l3->next_reap, jiffies))
Christoph Lameter35386e32006-03-22 00:09:05 -08004060 goto next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004061
Christoph Lametere498be72005-09-09 13:03:32 -07004062 l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004063
Christoph Lameteraab22072006-03-22 00:09:06 -08004064 drain_array(searchp, l3, l3->shared, 0, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004065
Christoph Lametered11d9e2006-06-30 01:55:45 -07004066 if (l3->free_touched)
Christoph Lametere498be72005-09-09 13:03:32 -07004067 l3->free_touched = 0;
Christoph Lametered11d9e2006-06-30 01:55:45 -07004068 else {
4069 int freed;
4070
4071 freed = drain_freelist(searchp, l3, (l3->free_limit +
4072 5 * searchp->num - 1) / (5 * searchp->num));
4073 STATS_ADD_REAPED(searchp, freed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004074 }
Christoph Lameter35386e32006-03-22 00:09:05 -08004075next:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004076 cond_resched();
4077 }
4078 check_irq_on();
Ingo Molnarfc0abb12006-01-18 17:42:33 -08004079 mutex_unlock(&cache_chain_mutex);
Christoph Lameter8fce4d82006-03-09 17:33:54 -08004080 next_reap_node();
Christoph Lameter7c5cae32007-02-10 01:42:55 -08004081out:
Andrew Mortona737b3e2006-03-22 00:08:11 -08004082 /* Set up the next iteration */
Christoph Lameter7c5cae32007-02-10 01:42:55 -08004083 schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004084}
4085
4086#ifdef CONFIG_PROC_FS
4087
Pekka Enberg85289f92006-01-08 01:00:36 -08004088static void print_slabinfo_header(struct seq_file *m)
4089{
4090 /*
4091 * Output format version, so at least we can change it
4092 * without _too_ many complaints.
4093 */
4094#if STATS
4095 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
4096#else
4097 seq_puts(m, "slabinfo - version: 2.1\n");
4098#endif
4099 seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
4100 "<objperslab> <pagesperslab>");
4101 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
4102 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
4103#if STATS
4104 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
Ravikiran G Thirumalaifb7faf32006-04-10 22:52:54 -07004105 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
Pekka Enberg85289f92006-01-08 01:00:36 -08004106 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
4107#endif
4108 seq_putc(m, '\n');
4109}
4110
Linus Torvalds1da177e2005-04-16 15:20:36 -07004111static void *s_start(struct seq_file *m, loff_t *pos)
4112{
4113 loff_t n = *pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004114
Ingo Molnarfc0abb12006-01-18 17:42:33 -08004115 mutex_lock(&cache_chain_mutex);
Pekka Enberg85289f92006-01-08 01:00:36 -08004116 if (!n)
4117 print_slabinfo_header(m);
Pavel Emelianovb92151b2007-07-15 23:38:04 -07004118
4119 return seq_list_start(&cache_chain, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004120}
4121
4122static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4123{
Pavel Emelianovb92151b2007-07-15 23:38:04 -07004124 return seq_list_next(p, &cache_chain, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004125}
4126
4127static void s_stop(struct seq_file *m, void *p)
4128{
Ingo Molnarfc0abb12006-01-18 17:42:33 -08004129 mutex_unlock(&cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004130}
4131
4132static int s_show(struct seq_file *m, void *p)
4133{
Pavel Emelianovb92151b2007-07-15 23:38:04 -07004134 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004135 struct slab *slabp;
4136 unsigned long active_objs;
4137 unsigned long num_objs;
4138 unsigned long active_slabs = 0;
4139 unsigned long num_slabs, free_objects = 0, shared_avail = 0;
Christoph Lametere498be72005-09-09 13:03:32 -07004140 const char *name;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004141 char *error = NULL;
Christoph Lametere498be72005-09-09 13:03:32 -07004142 int node;
4143 struct kmem_list3 *l3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004144
Linus Torvalds1da177e2005-04-16 15:20:36 -07004145 active_objs = 0;
4146 num_slabs = 0;
Christoph Lametere498be72005-09-09 13:03:32 -07004147 for_each_online_node(node) {
4148 l3 = cachep->nodelists[node];
4149 if (!l3)
4150 continue;
4151
Ravikiran G Thirumalaica3b9b92006-02-04 23:27:58 -08004152 check_irq_on();
4153 spin_lock_irq(&l3->list_lock);
Christoph Lametere498be72005-09-09 13:03:32 -07004154
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07004155 list_for_each_entry(slabp, &l3->slabs_full, list) {
Christoph Lametere498be72005-09-09 13:03:32 -07004156 if (slabp->inuse != cachep->num && !error)
4157 error = "slabs_full accounting error";
4158 active_objs += cachep->num;
4159 active_slabs++;
4160 }
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07004161 list_for_each_entry(slabp, &l3->slabs_partial, list) {
Christoph Lametere498be72005-09-09 13:03:32 -07004162 if (slabp->inuse == cachep->num && !error)
4163 error = "slabs_partial inuse accounting error";
4164 if (!slabp->inuse && !error)
4165 error = "slabs_partial/inuse accounting error";
4166 active_objs += slabp->inuse;
4167 active_slabs++;
4168 }
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07004169 list_for_each_entry(slabp, &l3->slabs_free, list) {
Christoph Lametere498be72005-09-09 13:03:32 -07004170 if (slabp->inuse && !error)
4171 error = "slabs_free/inuse accounting error";
4172 num_slabs++;
4173 }
4174 free_objects += l3->free_objects;
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08004175 if (l3->shared)
4176 shared_avail += l3->shared->avail;
Christoph Lametere498be72005-09-09 13:03:32 -07004177
Ravikiran G Thirumalaica3b9b92006-02-04 23:27:58 -08004178 spin_unlock_irq(&l3->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004179 }
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004180 num_slabs += active_slabs;
4181 num_objs = num_slabs * cachep->num;
Christoph Lametere498be72005-09-09 13:03:32 -07004182 if (num_objs - active_objs != free_objects && !error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004183 error = "free_objects accounting error";
4184
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004185 name = cachep->name;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004186 if (error)
4187 printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
4188
4189 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
Manfred Spraul3dafccf2006-02-01 03:05:42 -08004190 name, active_objs, num_objs, cachep->buffer_size,
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004191 cachep->num, (1 << cachep->gfporder));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004192 seq_printf(m, " : tunables %4u %4u %4u",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004193 cachep->limit, cachep->batchcount, cachep->shared);
Christoph Lametere498be72005-09-09 13:03:32 -07004194 seq_printf(m, " : slabdata %6lu %6lu %6lu",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004195 active_slabs, num_slabs, shared_avail);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004196#if STATS
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004197 { /* list3 stats */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004198 unsigned long high = cachep->high_mark;
4199 unsigned long allocs = cachep->num_allocations;
4200 unsigned long grown = cachep->grown;
4201 unsigned long reaped = cachep->reaped;
4202 unsigned long errors = cachep->errors;
4203 unsigned long max_freeable = cachep->max_freeable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004204 unsigned long node_allocs = cachep->node_allocs;
Christoph Lametere498be72005-09-09 13:03:32 -07004205 unsigned long node_frees = cachep->node_frees;
Ravikiran G Thirumalaifb7faf32006-04-10 22:52:54 -07004206 unsigned long overflows = cachep->node_overflow;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004207
Christoph Lametere498be72005-09-09 13:03:32 -07004208 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
Ravikiran G Thirumalaifb7faf32006-04-10 22:52:54 -07004209 %4lu %4lu %4lu %4lu %4lu", allocs, high, grown,
Andrew Mortona737b3e2006-03-22 00:08:11 -08004210 reaped, errors, max_freeable, node_allocs,
Ravikiran G Thirumalaifb7faf32006-04-10 22:52:54 -07004211 node_frees, overflows);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004212 }
4213 /* cpu stats */
4214 {
4215 unsigned long allochit = atomic_read(&cachep->allochit);
4216 unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4217 unsigned long freehit = atomic_read(&cachep->freehit);
4218 unsigned long freemiss = atomic_read(&cachep->freemiss);
4219
4220 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004221 allochit, allocmiss, freehit, freemiss);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004222 }
4223#endif
4224 seq_putc(m, '\n');
Linus Torvalds1da177e2005-04-16 15:20:36 -07004225 return 0;
4226}
4227
4228/*
4229 * slabinfo_op - iterator that generates /proc/slabinfo
4230 *
4231 * Output layout:
4232 * cache-name
4233 * num-active-objs
4234 * total-objs
4235 * object size
4236 * num-active-slabs
4237 * total-slabs
4238 * num-pages-per-slab
4239 * + further values on SMP and with statistics enabled
4240 */
4241
Helge Deller15ad7cd2006-12-06 20:40:36 -08004242const struct seq_operations slabinfo_op = {
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004243 .start = s_start,
4244 .next = s_next,
4245 .stop = s_stop,
4246 .show = s_show,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247};
4248
4249#define MAX_SLABINFO_WRITE 128
4250/**
4251 * slabinfo_write - Tuning for the slab allocator
4252 * @file: unused
4253 * @buffer: user buffer
4254 * @count: data length
4255 * @ppos: unused
4256 */
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004257ssize_t slabinfo_write(struct file *file, const char __user * buffer,
4258 size_t count, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004259{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004260 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004261 int limit, batchcount, shared, res;
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07004262 struct kmem_cache *cachep;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004263
Linus Torvalds1da177e2005-04-16 15:20:36 -07004264 if (count > MAX_SLABINFO_WRITE)
4265 return -EINVAL;
4266 if (copy_from_user(&kbuf, buffer, count))
4267 return -EFAULT;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004268 kbuf[MAX_SLABINFO_WRITE] = '\0';
Linus Torvalds1da177e2005-04-16 15:20:36 -07004269
4270 tmp = strchr(kbuf, ' ');
4271 if (!tmp)
4272 return -EINVAL;
4273 *tmp = '\0';
4274 tmp++;
4275 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4276 return -EINVAL;
4277
4278 /* Find the cache in the chain of caches. */
Ingo Molnarfc0abb12006-01-18 17:42:33 -08004279 mutex_lock(&cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004280 res = -EINVAL;
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07004281 list_for_each_entry(cachep, &cache_chain, next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004282 if (!strcmp(cachep->name, kbuf)) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08004283 if (limit < 1 || batchcount < 1 ||
4284 batchcount > limit || shared < 0) {
Christoph Lametere498be72005-09-09 13:03:32 -07004285 res = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004286 } else {
Christoph Lametere498be72005-09-09 13:03:32 -07004287 res = do_tune_cpucache(cachep, limit,
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004288 batchcount, shared);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004289 }
4290 break;
4291 }
4292 }
Ingo Molnarfc0abb12006-01-18 17:42:33 -08004293 mutex_unlock(&cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004294 if (res >= 0)
4295 res = count;
4296 return res;
4297}
Al Viro871751e2006-03-25 03:06:39 -08004298
4299#ifdef CONFIG_DEBUG_SLAB_LEAK
4300
4301static void *leaks_start(struct seq_file *m, loff_t *pos)
4302{
Al Viro871751e2006-03-25 03:06:39 -08004303 mutex_lock(&cache_chain_mutex);
Pavel Emelianovb92151b2007-07-15 23:38:04 -07004304 return seq_list_start(&cache_chain, *pos);
Al Viro871751e2006-03-25 03:06:39 -08004305}
4306
4307static inline int add_caller(unsigned long *n, unsigned long v)
4308{
4309 unsigned long *p;
4310 int l;
4311 if (!v)
4312 return 1;
4313 l = n[1];
4314 p = n + 2;
4315 while (l) {
4316 int i = l/2;
4317 unsigned long *q = p + 2 * i;
4318 if (*q == v) {
4319 q[1]++;
4320 return 1;
4321 }
4322 if (*q > v) {
4323 l = i;
4324 } else {
4325 p = q + 2;
4326 l -= i + 1;
4327 }
4328 }
4329 if (++n[1] == n[0])
4330 return 0;
4331 memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4332 p[0] = v;
4333 p[1] = 1;
4334 return 1;
4335}
4336
4337static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
4338{
4339 void *p;
4340 int i;
4341 if (n[0] == n[1])
4342 return;
4343 for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) {
4344 if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
4345 continue;
4346 if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
4347 return;
4348 }
4349}
4350
4351static void show_symbol(struct seq_file *m, unsigned long address)
4352{
4353#ifdef CONFIG_KALLSYMS
Al Viro871751e2006-03-25 03:06:39 -08004354 unsigned long offset, size;
Tejun Heo9281ace2007-07-17 04:03:51 -07004355 char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
Al Viro871751e2006-03-25 03:06:39 -08004356
Alexey Dobriyana5c43da2007-05-08 00:28:47 -07004357 if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
Al Viro871751e2006-03-25 03:06:39 -08004358 seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
Alexey Dobriyana5c43da2007-05-08 00:28:47 -07004359 if (modname[0])
Al Viro871751e2006-03-25 03:06:39 -08004360 seq_printf(m, " [%s]", modname);
4361 return;
4362 }
4363#endif
4364 seq_printf(m, "%p", (void *)address);
4365}
4366
4367static int leaks_show(struct seq_file *m, void *p)
4368{
Pavel Emelianovb92151b2007-07-15 23:38:04 -07004369 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
Al Viro871751e2006-03-25 03:06:39 -08004370 struct slab *slabp;
4371 struct kmem_list3 *l3;
4372 const char *name;
4373 unsigned long *n = m->private;
4374 int node;
4375 int i;
4376
4377 if (!(cachep->flags & SLAB_STORE_USER))
4378 return 0;
4379 if (!(cachep->flags & SLAB_RED_ZONE))
4380 return 0;
4381
4382 /* OK, we can do it */
4383
4384 n[1] = 0;
4385
4386 for_each_online_node(node) {
4387 l3 = cachep->nodelists[node];
4388 if (!l3)
4389 continue;
4390
4391 check_irq_on();
4392 spin_lock_irq(&l3->list_lock);
4393
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07004394 list_for_each_entry(slabp, &l3->slabs_full, list)
Al Viro871751e2006-03-25 03:06:39 -08004395 handle_slab(n, cachep, slabp);
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07004396 list_for_each_entry(slabp, &l3->slabs_partial, list)
Al Viro871751e2006-03-25 03:06:39 -08004397 handle_slab(n, cachep, slabp);
Al Viro871751e2006-03-25 03:06:39 -08004398 spin_unlock_irq(&l3->list_lock);
4399 }
4400 name = cachep->name;
4401 if (n[0] == n[1]) {
4402 /* Increase the buffer size */
4403 mutex_unlock(&cache_chain_mutex);
4404 m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4405 if (!m->private) {
4406 /* Too bad, we are really out */
4407 m->private = n;
4408 mutex_lock(&cache_chain_mutex);
4409 return -ENOMEM;
4410 }
4411 *(unsigned long *)m->private = n[0] * 2;
4412 kfree(n);
4413 mutex_lock(&cache_chain_mutex);
4414 /* Now make sure this entry will be retried */
4415 m->count = m->size;
4416 return 0;
4417 }
4418 for (i = 0; i < n[1]; i++) {
4419 seq_printf(m, "%s: %lu ", name, n[2*i+3]);
4420 show_symbol(m, n[2*i+2]);
4421 seq_putc(m, '\n');
4422 }
Siddha, Suresh Bd2e7b7d2006-09-25 23:31:47 -07004423
Al Viro871751e2006-03-25 03:06:39 -08004424 return 0;
4425}
4426
Helge Deller15ad7cd2006-12-06 20:40:36 -08004427const struct seq_operations slabstats_op = {
Al Viro871751e2006-03-25 03:06:39 -08004428 .start = leaks_start,
4429 .next = s_next,
4430 .stop = s_stop,
4431 .show = leaks_show,
4432};
4433#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004434#endif
4435
Manfred Spraul00e145b2005-09-03 15:55:07 -07004436/**
4437 * ksize - get the actual amount of memory allocated for a given object
4438 * @objp: Pointer to the object
4439 *
4440 * kmalloc may internally round up allocations and return more memory
4441 * than requested. ksize() can be used to determine the actual amount of
4442 * memory allocated. The caller may use this additional memory, even though
4443 * a smaller amount of memory was initially specified with the kmalloc call.
4444 * The caller must guarantee that objp points to a valid object previously
4445 * allocated with either kmalloc() or kmem_cache_alloc(). The object
4446 * must not be freed during the duration of the call.
4447 */
Pekka Enbergfd76bab2007-05-06 14:48:40 -07004448size_t ksize(const void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004449{
Christoph Lameteref8b4522007-10-16 01:24:46 -07004450 BUG_ON(!objp);
4451 if (unlikely(objp == ZERO_SIZE_PTR))
Manfred Spraul00e145b2005-09-03 15:55:07 -07004452 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004453
Pekka Enberg6ed5eb22006-02-01 03:05:49 -08004454 return obj_size(virt_to_cache(objp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004455}