blob: 233e39d14caf5ae2966c9df788c9c73dc6a563e3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/slab.c
3 * Written by Mark Hemment, 1996/97.
4 * (markhe@nextd.demon.co.uk)
5 *
6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
7 *
8 * Major cleanup, different bufctl logic, per-cpu arrays
9 * (c) 2000 Manfred Spraul
10 *
11 * Cleanup, make the head arrays unconditional, preparation for NUMA
12 * (c) 2002 Manfred Spraul
13 *
14 * An implementation of the Slab Allocator as described in outline in;
15 * UNIX Internals: The New Frontiers by Uresh Vahalia
16 * Pub: Prentice Hall ISBN 0-13-101908-2
17 * or with a little more detail in;
18 * The Slab Allocator: An Object-Caching Kernel Memory Allocator
19 * Jeff Bonwick (Sun Microsystems).
20 * Presented at: USENIX Summer 1994 Technical Conference
21 *
22 * The memory is organized in caches, one cache for each object type.
23 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
24 * Each cache consists out of many slabs (they are small (usually one
25 * page long) and always contiguous), and each slab contains multiple
26 * initialized objects.
27 *
28 * This means, that your constructor is used only for newly allocated
29 * slabs and you must pass objects with the same intializations to
30 * kmem_cache_free.
31 *
32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
33 * normal). If you need a special memory type, then must create a new
34 * cache for that memory type.
35 *
36 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
37 * full slabs with 0 free objects
38 * partial slabs
39 * empty slabs with no allocated objects
40 *
41 * If partial slabs exist, then new allocations come from these slabs,
42 * otherwise from empty slabs or new slabs are allocated.
43 *
44 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
45 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
46 *
47 * Each cache has a short per-cpu head array, most allocs
48 * and frees go into that array, and if that array overflows, then 1/2
49 * of the entries in the array are given back into the global cache.
50 * The head array is strictly LIFO and should improve the cache hit rates.
51 * On SMP, it additionally reduces the spinlock operations.
52 *
Andrew Mortona737b3e2006-03-22 00:08:11 -080053 * The c_cpuarray may not be read with enabled local interrupts -
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 * it's changed with a smp_call_function().
55 *
56 * SMP synchronization:
57 * constructors and destructors are called without any locking.
Pekka Enberg343e0d72006-02-01 03:05:50 -080058 * Several members in struct kmem_cache and struct slab never change, they
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 * are accessed without any locking.
60 * The per-cpu arrays are never accessed from the wrong cpu, no locking,
61 * and local interrupts are disabled so slab code is preempt-safe.
62 * The non-constant members are protected with a per-cache irq spinlock.
63 *
64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
65 * in 2000 - many ideas in the current implementation are derived from
66 * his patch.
67 *
68 * Further notes from the original documentation:
69 *
70 * 11 April '97. Started multi-threading - markhe
Ingo Molnarfc0abb12006-01-18 17:42:33 -080071 * The global cache-chain is protected by the mutex 'cache_chain_mutex'.
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 * The sem is only needed when accessing/extending the cache-chain, which
73 * can never happen inside an interrupt (kmem_cache_create(),
74 * kmem_cache_shrink() and kmem_cache_reap()).
75 *
76 * At present, each engine can be growing a cache. This should be blocked.
77 *
Christoph Lametere498be72005-09-09 13:03:32 -070078 * 15 March 2005. NUMA slab allocator.
79 * Shai Fultheim <shai@scalex86.org>.
80 * Shobhit Dayal <shobhit@calsoftinc.com>
81 * Alok N Kataria <alokk@calsoftinc.com>
82 * Christoph Lameter <christoph@lameter.com>
83 *
84 * Modified the slab allocator to be node aware on NUMA systems.
85 * Each node has its own list of partial, free and full slabs.
86 * All object allocations for a node occur from node specific slab lists.
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 */
88
89#include <linux/config.h>
90#include <linux/slab.h>
91#include <linux/mm.h>
Randy Dunlapc9cf5522006-06-27 02:53:52 -070092#include <linux/poison.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070093#include <linux/swap.h>
94#include <linux/cache.h>
95#include <linux/interrupt.h>
96#include <linux/init.h>
97#include <linux/compiler.h>
Paul Jackson101a5002006-03-24 03:16:07 -080098#include <linux/cpuset.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070099#include <linux/seq_file.h>
100#include <linux/notifier.h>
101#include <linux/kallsyms.h>
102#include <linux/cpu.h>
103#include <linux/sysctl.h>
104#include <linux/module.h>
105#include <linux/rcupdate.h>
Paulo Marques543537b2005-06-23 00:09:02 -0700106#include <linux/string.h>
Christoph Lametere498be72005-09-09 13:03:32 -0700107#include <linux/nodemask.h>
Christoph Lameterdc85da12006-01-18 17:42:36 -0800108#include <linux/mempolicy.h>
Ingo Molnarfc0abb12006-01-18 17:42:33 -0800109#include <linux/mutex.h>
Ingo Molnare7eebaf2006-06-27 02:54:55 -0700110#include <linux/rtmutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
112#include <asm/uaccess.h>
113#include <asm/cacheflush.h>
114#include <asm/tlbflush.h>
115#include <asm/page.h>
116
117/*
118 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL,
119 * SLAB_RED_ZONE & SLAB_POISON.
120 * 0 for faster, smaller code (especially in the critical paths).
121 *
122 * STATS - 1 to collect stats for /proc/slabinfo.
123 * 0 for faster, smaller code (especially in the critical paths).
124 *
125 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
126 */
127
128#ifdef CONFIG_DEBUG_SLAB
129#define DEBUG 1
130#define STATS 1
131#define FORCED_DEBUG 1
132#else
133#define DEBUG 0
134#define STATS 0
135#define FORCED_DEBUG 0
136#endif
137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138/* Shouldn't this be in a header file somewhere? */
139#define BYTES_PER_WORD sizeof(void *)
140
141#ifndef cache_line_size
142#define cache_line_size() L1_CACHE_BYTES
143#endif
144
145#ifndef ARCH_KMALLOC_MINALIGN
146/*
147 * Enforce a minimum alignment for the kmalloc caches.
148 * Usually, the kmalloc caches are cache_line_size() aligned, except when
149 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
150 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
151 * alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that.
152 * Note that this flag disables some debug features.
153 */
154#define ARCH_KMALLOC_MINALIGN 0
155#endif
156
157#ifndef ARCH_SLAB_MINALIGN
158/*
159 * Enforce a minimum alignment for all caches.
160 * Intended for archs that get misalignment faults even for BYTES_PER_WORD
161 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
162 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
163 * some debug features.
164 */
165#define ARCH_SLAB_MINALIGN 0
166#endif
167
168#ifndef ARCH_KMALLOC_FLAGS
169#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
170#endif
171
172/* Legal flag mask for kmem_cache_create(). */
173#if DEBUG
174# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
175 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
Christoph Lameterac2b8982006-03-22 00:08:15 -0800176 SLAB_CACHE_DMA | \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \
178 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
Paul Jackson101a5002006-03-24 03:16:07 -0800179 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180#else
Christoph Lameterac2b8982006-03-22 00:08:15 -0800181# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \
183 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
Paul Jackson101a5002006-03-24 03:16:07 -0800184 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185#endif
186
187/*
188 * kmem_bufctl_t:
189 *
190 * Bufctl's are used for linking objs within a slab
191 * linked offsets.
192 *
193 * This implementation relies on "struct page" for locating the cache &
194 * slab an object belongs to.
195 * This allows the bufctl structure to be small (one int), but limits
196 * the number of objects a slab (not a cache) can contain when off-slab
197 * bufctls are used. The limit is the size of the largest general cache
198 * that does not use off-slab slabs.
199 * For 32bit archs with 4 kB pages, is this 56.
200 * This is not serious, as it is only for large objects, when it is unwise
201 * to have too many per slab.
202 * Note: This limit can be raised by introducing a general cache whose size
203 * is less than 512 (PAGE_SIZE<<3), but greater than 256.
204 */
205
Kyle Moffettfa5b08d2005-09-03 15:55:03 -0700206typedef unsigned int kmem_bufctl_t;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0)
208#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1)
Al Viro871751e2006-03-25 03:06:39 -0800209#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
210#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212/*
213 * struct slab
214 *
215 * Manages the objs in a slab. Placed either at the beginning of mem allocated
216 * for a slab, or allocated from an general cache.
217 * Slabs are chained into three list: fully used, partial, fully free slabs.
218 */
219struct slab {
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800220 struct list_head list;
221 unsigned long colouroff;
222 void *s_mem; /* including colour offset */
223 unsigned int inuse; /* num of objs active in slab */
224 kmem_bufctl_t free;
225 unsigned short nodeid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226};
227
228/*
229 * struct slab_rcu
230 *
231 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
232 * arrange for kmem_freepages to be called via RCU. This is useful if
233 * we need to approach a kernel structure obliquely, from its address
234 * obtained without the usual locking. We can lock the structure to
235 * stabilize it and check it's still at the given address, only if we
236 * can be sure that the memory has not been meanwhile reused for some
237 * other kind of object (which our subsystem's lock might corrupt).
238 *
239 * rcu_read_lock before reading the address, then rcu_read_unlock after
240 * taking the spinlock within the structure expected at that address.
241 *
242 * We assume struct slab_rcu can overlay struct slab when destroying.
243 */
244struct slab_rcu {
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800245 struct rcu_head head;
Pekka Enberg343e0d72006-02-01 03:05:50 -0800246 struct kmem_cache *cachep;
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800247 void *addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248};
249
250/*
251 * struct array_cache
252 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 * Purpose:
254 * - LIFO ordering, to hand out cache-warm objects from _alloc
255 * - reduce the number of linked list operations
256 * - reduce spinlock operations
257 *
258 * The limit is stored in the per-cpu structure to reduce the data cache
259 * footprint.
260 *
261 */
262struct array_cache {
263 unsigned int avail;
264 unsigned int limit;
265 unsigned int batchcount;
266 unsigned int touched;
Christoph Lametere498be72005-09-09 13:03:32 -0700267 spinlock_t lock;
Andrew Mortona737b3e2006-03-22 00:08:11 -0800268 void *entry[0]; /*
269 * Must have this definition in here for the proper
270 * alignment of array_cache. Also simplifies accessing
271 * the entries.
272 * [0] is for gcc 2.95. It should really be [].
273 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274};
275
Andrew Mortona737b3e2006-03-22 00:08:11 -0800276/*
277 * bootstrap: The caches do not work without cpuarrays anymore, but the
278 * cpuarrays are allocated from the generic caches...
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 */
280#define BOOT_CPUCACHE_ENTRIES 1
281struct arraycache_init {
282 struct array_cache cache;
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800283 void *entries[BOOT_CPUCACHE_ENTRIES];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284};
285
286/*
Christoph Lametere498be72005-09-09 13:03:32 -0700287 * The slab lists for all objects.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 */
289struct kmem_list3 {
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800290 struct list_head slabs_partial; /* partial list first, better asm code */
291 struct list_head slabs_full;
292 struct list_head slabs_free;
293 unsigned long free_objects;
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800294 unsigned int free_limit;
Ravikiran G Thirumalai2e1217c2006-02-04 23:27:56 -0800295 unsigned int colour_next; /* Per-node cache coloring */
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800296 spinlock_t list_lock;
297 struct array_cache *shared; /* shared per node */
298 struct array_cache **alien; /* on other nodes */
Christoph Lameter35386e32006-03-22 00:09:05 -0800299 unsigned long next_reap; /* updated without locking */
300 int free_touched; /* updated without locking */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301};
302
Christoph Lametere498be72005-09-09 13:03:32 -0700303/*
304 * Need this for bootstrapping a per node allocator.
305 */
306#define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1)
307struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
308#define CACHE_CACHE 0
309#define SIZE_AC 1
310#define SIZE_L3 (1 + MAX_NUMNODES)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311
Christoph Lametere498be72005-09-09 13:03:32 -0700312/*
Andrew Mortona737b3e2006-03-22 00:08:11 -0800313 * This function must be completely optimized away if a constant is passed to
314 * it. Mostly the same as what is in linux/slab.h except it returns an index.
Christoph Lametere498be72005-09-09 13:03:32 -0700315 */
Ivan Kokshaysky7243cc02005-09-22 21:43:58 -0700316static __always_inline int index_of(const size_t size)
Christoph Lametere498be72005-09-09 13:03:32 -0700317{
Steven Rostedt5ec8a842006-02-01 03:05:44 -0800318 extern void __bad_size(void);
319
Christoph Lametere498be72005-09-09 13:03:32 -0700320 if (__builtin_constant_p(size)) {
321 int i = 0;
322
323#define CACHE(x) \
324 if (size <=x) \
325 return i; \
326 else \
327 i++;
328#include "linux/kmalloc_sizes.h"
329#undef CACHE
Steven Rostedt5ec8a842006-02-01 03:05:44 -0800330 __bad_size();
Ivan Kokshaysky7243cc02005-09-22 21:43:58 -0700331 } else
Steven Rostedt5ec8a842006-02-01 03:05:44 -0800332 __bad_size();
Christoph Lametere498be72005-09-09 13:03:32 -0700333 return 0;
334}
335
Ingo Molnare0a42722006-06-23 02:03:46 -0700336static int slab_early_init = 1;
337
Christoph Lametere498be72005-09-09 13:03:32 -0700338#define INDEX_AC index_of(sizeof(struct arraycache_init))
339#define INDEX_L3 index_of(sizeof(struct kmem_list3))
340
Pekka Enberg5295a742006-02-01 03:05:48 -0800341static void kmem_list3_init(struct kmem_list3 *parent)
Christoph Lametere498be72005-09-09 13:03:32 -0700342{
343 INIT_LIST_HEAD(&parent->slabs_full);
344 INIT_LIST_HEAD(&parent->slabs_partial);
345 INIT_LIST_HEAD(&parent->slabs_free);
346 parent->shared = NULL;
347 parent->alien = NULL;
Ravikiran G Thirumalai2e1217c2006-02-04 23:27:56 -0800348 parent->colour_next = 0;
Christoph Lametere498be72005-09-09 13:03:32 -0700349 spin_lock_init(&parent->list_lock);
350 parent->free_objects = 0;
351 parent->free_touched = 0;
352}
353
Andrew Mortona737b3e2006-03-22 00:08:11 -0800354#define MAKE_LIST(cachep, listp, slab, nodeid) \
355 do { \
356 INIT_LIST_HEAD(listp); \
357 list_splice(&(cachep->nodelists[nodeid]->slab), listp); \
Christoph Lametere498be72005-09-09 13:03:32 -0700358 } while (0)
359
Andrew Mortona737b3e2006-03-22 00:08:11 -0800360#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
361 do { \
Christoph Lametere498be72005-09-09 13:03:32 -0700362 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
363 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
364 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
365 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
367/*
Pekka Enberg343e0d72006-02-01 03:05:50 -0800368 * struct kmem_cache
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 *
370 * manages a cache.
371 */
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800372
Pekka J Enberg2109a2d2005-11-07 00:58:01 -0800373struct kmem_cache {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374/* 1) per-cpu data, touched during every alloc/free */
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800375 struct array_cache *array[NR_CPUS];
Ravikiran G Thirumalaib5d8ca72006-03-22 00:08:12 -0800376/* 2) Cache tunables. Protected by cache_chain_mutex */
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800377 unsigned int batchcount;
378 unsigned int limit;
379 unsigned int shared;
Ravikiran G Thirumalaib5d8ca72006-03-22 00:08:12 -0800380
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800381 unsigned int buffer_size;
Ravikiran G Thirumalaib5d8ca72006-03-22 00:08:12 -0800382/* 3) touched by every alloc & free from the backend */
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800383 struct kmem_list3 *nodelists[MAX_NUMNODES];
Ravikiran G Thirumalaib5d8ca72006-03-22 00:08:12 -0800384
Andrew Mortona737b3e2006-03-22 00:08:11 -0800385 unsigned int flags; /* constant flags */
386 unsigned int num; /* # of objs per slab */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
Ravikiran G Thirumalaib5d8ca72006-03-22 00:08:12 -0800388/* 4) cache_grow/shrink */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 /* order of pgs per slab (2^n) */
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800390 unsigned int gfporder;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391
392 /* force GFP flags, e.g. GFP_DMA */
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800393 gfp_t gfpflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394
Andrew Mortona737b3e2006-03-22 00:08:11 -0800395 size_t colour; /* cache colouring range */
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800396 unsigned int colour_off; /* colour offset */
Pekka Enberg343e0d72006-02-01 03:05:50 -0800397 struct kmem_cache *slabp_cache;
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800398 unsigned int slab_size;
Andrew Mortona737b3e2006-03-22 00:08:11 -0800399 unsigned int dflags; /* dynamic flags */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400
401 /* constructor func */
Pekka Enberg343e0d72006-02-01 03:05:50 -0800402 void (*ctor) (void *, struct kmem_cache *, unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403
404 /* de-constructor func */
Pekka Enberg343e0d72006-02-01 03:05:50 -0800405 void (*dtor) (void *, struct kmem_cache *, unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406
Ravikiran G Thirumalaib5d8ca72006-03-22 00:08:12 -0800407/* 5) cache creation/removal */
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800408 const char *name;
409 struct list_head next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410
Ravikiran G Thirumalaib5d8ca72006-03-22 00:08:12 -0800411/* 6) statistics */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412#if STATS
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800413 unsigned long num_active;
414 unsigned long num_allocations;
415 unsigned long high_mark;
416 unsigned long grown;
417 unsigned long reaped;
418 unsigned long errors;
419 unsigned long max_freeable;
420 unsigned long node_allocs;
421 unsigned long node_frees;
Ravikiran G Thirumalaifb7faf32006-04-10 22:52:54 -0700422 unsigned long node_overflow;
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800423 atomic_t allochit;
424 atomic_t allocmiss;
425 atomic_t freehit;
426 atomic_t freemiss;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427#endif
428#if DEBUG
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800429 /*
430 * If debugging is enabled, then the allocator can add additional
431 * fields and/or padding to every object. buffer_size contains the total
432 * object size including these internal fields, the following two
433 * variables contain the offset to the user object and its size.
434 */
435 int obj_offset;
436 int obj_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437#endif
438};
439
440#define CFLGS_OFF_SLAB (0x80000000UL)
441#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
442
443#define BATCHREFILL_LIMIT 16
Andrew Mortona737b3e2006-03-22 00:08:11 -0800444/*
445 * Optimization question: fewer reaps means less probability for unnessary
446 * cpucache drain/refill cycles.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 *
Adrian Bunkdc6f3f22005-11-08 16:44:08 +0100448 * OTOH the cpuarrays can contain lots of objects,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 * which could lock up otherwise freeable slabs.
450 */
451#define REAPTIMEOUT_CPUC (2*HZ)
452#define REAPTIMEOUT_LIST3 (4*HZ)
453
454#if STATS
455#define STATS_INC_ACTIVE(x) ((x)->num_active++)
456#define STATS_DEC_ACTIVE(x) ((x)->num_active--)
457#define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
458#define STATS_INC_GROWN(x) ((x)->grown++)
459#define STATS_INC_REAPED(x) ((x)->reaped++)
Andrew Mortona737b3e2006-03-22 00:08:11 -0800460#define STATS_SET_HIGH(x) \
461 do { \
462 if ((x)->num_active > (x)->high_mark) \
463 (x)->high_mark = (x)->num_active; \
464 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465#define STATS_INC_ERR(x) ((x)->errors++)
466#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
Christoph Lametere498be72005-09-09 13:03:32 -0700467#define STATS_INC_NODEFREES(x) ((x)->node_frees++)
Ravikiran G Thirumalaifb7faf32006-04-10 22:52:54 -0700468#define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++)
Andrew Mortona737b3e2006-03-22 00:08:11 -0800469#define STATS_SET_FREEABLE(x, i) \
470 do { \
471 if ((x)->max_freeable < i) \
472 (x)->max_freeable = i; \
473 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
475#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
476#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
477#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
478#else
479#define STATS_INC_ACTIVE(x) do { } while (0)
480#define STATS_DEC_ACTIVE(x) do { } while (0)
481#define STATS_INC_ALLOCED(x) do { } while (0)
482#define STATS_INC_GROWN(x) do { } while (0)
483#define STATS_INC_REAPED(x) do { } while (0)
484#define STATS_SET_HIGH(x) do { } while (0)
485#define STATS_INC_ERR(x) do { } while (0)
486#define STATS_INC_NODEALLOCS(x) do { } while (0)
Christoph Lametere498be72005-09-09 13:03:32 -0700487#define STATS_INC_NODEFREES(x) do { } while (0)
Ravikiran G Thirumalaifb7faf32006-04-10 22:52:54 -0700488#define STATS_INC_ACOVERFLOW(x) do { } while (0)
Andrew Mortona737b3e2006-03-22 00:08:11 -0800489#define STATS_SET_FREEABLE(x, i) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490#define STATS_INC_ALLOCHIT(x) do { } while (0)
491#define STATS_INC_ALLOCMISS(x) do { } while (0)
492#define STATS_INC_FREEHIT(x) do { } while (0)
493#define STATS_INC_FREEMISS(x) do { } while (0)
494#endif
495
496#if DEBUG
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
Andrew Mortona737b3e2006-03-22 00:08:11 -0800498/*
499 * memory layout of objects:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 * 0 : objp
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800501 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 * the end of an object is aligned with the end of the real
503 * allocation. Catches writes behind the end of the allocation.
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800504 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 * redzone word.
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800506 * cachep->obj_offset: The real object.
507 * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
Andrew Mortona737b3e2006-03-22 00:08:11 -0800508 * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
509 * [BYTES_PER_WORD long]
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 */
Pekka Enberg343e0d72006-02-01 03:05:50 -0800511static int obj_offset(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512{
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800513 return cachep->obj_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514}
515
Pekka Enberg343e0d72006-02-01 03:05:50 -0800516static int obj_size(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517{
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800518 return cachep->obj_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519}
520
Pekka Enberg343e0d72006-02-01 03:05:50 -0800521static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522{
523 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800524 return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525}
526
Pekka Enberg343e0d72006-02-01 03:05:50 -0800527static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528{
529 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
530 if (cachep->flags & SLAB_STORE_USER)
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800531 return (unsigned long *)(objp + cachep->buffer_size -
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800532 2 * BYTES_PER_WORD);
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800533 return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534}
535
Pekka Enberg343e0d72006-02-01 03:05:50 -0800536static void **dbg_userword(struct kmem_cache *cachep, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537{
538 BUG_ON(!(cachep->flags & SLAB_STORE_USER));
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800539 return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540}
541
542#else
543
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800544#define obj_offset(x) 0
545#define obj_size(cachep) (cachep->buffer_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long *)NULL;})
547#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long *)NULL;})
548#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
549
550#endif
551
552/*
Andrew Mortona737b3e2006-03-22 00:08:11 -0800553 * Maximum size of an obj (in 2^order pages) and absolute limit for the gfp
554 * order.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 */
556#if defined(CONFIG_LARGE_ALLOCS)
557#define MAX_OBJ_ORDER 13 /* up to 32Mb */
558#define MAX_GFP_ORDER 13 /* up to 32Mb */
559#elif defined(CONFIG_MMU)
560#define MAX_OBJ_ORDER 5 /* 32 pages */
561#define MAX_GFP_ORDER 5 /* 32 pages */
562#else
563#define MAX_OBJ_ORDER 8 /* up to 1Mb */
564#define MAX_GFP_ORDER 8 /* up to 1Mb */
565#endif
566
567/*
568 * Do not go above this order unless 0 objects fit into the slab.
569 */
570#define BREAK_GFP_ORDER_HI 1
571#define BREAK_GFP_ORDER_LO 0
572static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
573
Andrew Mortona737b3e2006-03-22 00:08:11 -0800574/*
575 * Functions for storing/retrieving the cachep and or slab from the page
576 * allocator. These are used to find the slab an obj belongs to. With kfree(),
577 * these are used to find the cache which an obj belongs to.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 */
Pekka Enberg065d41c2005-11-13 16:06:46 -0800579static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
580{
581 page->lru.next = (struct list_head *)cache;
582}
583
584static inline struct kmem_cache *page_get_cache(struct page *page)
585{
Nick Piggin84097512006-03-22 00:08:34 -0800586 if (unlikely(PageCompound(page)))
587 page = (struct page *)page_private(page);
Pekka Enbergddc2e812006-06-23 02:03:40 -0700588 BUG_ON(!PageSlab(page));
Pekka Enberg065d41c2005-11-13 16:06:46 -0800589 return (struct kmem_cache *)page->lru.next;
590}
591
592static inline void page_set_slab(struct page *page, struct slab *slab)
593{
594 page->lru.prev = (struct list_head *)slab;
595}
596
597static inline struct slab *page_get_slab(struct page *page)
598{
Nick Piggin84097512006-03-22 00:08:34 -0800599 if (unlikely(PageCompound(page)))
600 page = (struct page *)page_private(page);
Pekka Enbergddc2e812006-06-23 02:03:40 -0700601 BUG_ON(!PageSlab(page));
Pekka Enberg065d41c2005-11-13 16:06:46 -0800602 return (struct slab *)page->lru.prev;
603}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604
Pekka Enberg6ed5eb22006-02-01 03:05:49 -0800605static inline struct kmem_cache *virt_to_cache(const void *obj)
606{
607 struct page *page = virt_to_page(obj);
608 return page_get_cache(page);
609}
610
611static inline struct slab *virt_to_slab(const void *obj)
612{
613 struct page *page = virt_to_page(obj);
614 return page_get_slab(page);
615}
616
Pekka Enberg8fea4e92006-03-22 00:08:10 -0800617static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
618 unsigned int idx)
619{
620 return slab->s_mem + cache->buffer_size * idx;
621}
622
623static inline unsigned int obj_to_index(struct kmem_cache *cache,
624 struct slab *slab, void *obj)
625{
626 return (unsigned)(obj - slab->s_mem) / cache->buffer_size;
627}
628
Andrew Mortona737b3e2006-03-22 00:08:11 -0800629/*
630 * These are the default caches for kmalloc. Custom caches can have other sizes.
631 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632struct cache_sizes malloc_sizes[] = {
633#define CACHE(x) { .cs_size = (x) },
634#include <linux/kmalloc_sizes.h>
635 CACHE(ULONG_MAX)
636#undef CACHE
637};
638EXPORT_SYMBOL(malloc_sizes);
639
640/* Must match cache_sizes above. Out of line to keep cache footprint low. */
641struct cache_names {
642 char *name;
643 char *name_dma;
644};
645
646static struct cache_names __initdata cache_names[] = {
647#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
648#include <linux/kmalloc_sizes.h>
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800649 {NULL,}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650#undef CACHE
651};
652
653static struct arraycache_init initarray_cache __initdata =
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800654 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655static struct arraycache_init initarray_generic =
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800656 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657
658/* internal cache of cache description objs */
Pekka Enberg343e0d72006-02-01 03:05:50 -0800659static struct kmem_cache cache_cache = {
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800660 .batchcount = 1,
661 .limit = BOOT_CPUCACHE_ENTRIES,
662 .shared = 1,
Pekka Enberg343e0d72006-02-01 03:05:50 -0800663 .buffer_size = sizeof(struct kmem_cache),
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800664 .name = "kmem_cache",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665#if DEBUG
Pekka Enberg343e0d72006-02-01 03:05:50 -0800666 .obj_size = sizeof(struct kmem_cache),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667#endif
668};
669
670/* Guard access to the cache-chain. */
Ingo Molnarfc0abb12006-01-18 17:42:33 -0800671static DEFINE_MUTEX(cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672static struct list_head cache_chain;
673
674/*
Andrew Mortona737b3e2006-03-22 00:08:11 -0800675 * vm_enough_memory() looks at this to determine how many slab-allocated pages
676 * are possibly freeable under pressure
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 *
678 * SLAB_RECLAIM_ACCOUNT turns this on per-slab
679 */
680atomic_t slab_reclaim_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681
682/*
683 * chicken and egg problem: delay the per-cpu array allocation
684 * until the general caches are up.
685 */
686static enum {
687 NONE,
Christoph Lametere498be72005-09-09 13:03:32 -0700688 PARTIAL_AC,
689 PARTIAL_L3,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 FULL
691} g_cpucache_up;
692
Mike Kravetz39d24e62006-05-15 09:44:13 -0700693/*
694 * used by boot code to determine if it can use slab based allocator
695 */
696int slab_is_available(void)
697{
698 return g_cpucache_up == FULL;
699}
700
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701static DEFINE_PER_CPU(struct work_struct, reap_work);
702
Andrew Mortona737b3e2006-03-22 00:08:11 -0800703static void free_block(struct kmem_cache *cachep, void **objpp, int len,
704 int node);
Pekka Enberg343e0d72006-02-01 03:05:50 -0800705static void enable_cpucache(struct kmem_cache *cachep);
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800706static void cache_reap(void *unused);
Pekka Enberg343e0d72006-02-01 03:05:50 -0800707static int __node_shrink(struct kmem_cache *cachep, int node);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708
Pekka Enberg343e0d72006-02-01 03:05:50 -0800709static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710{
711 return cachep->array[smp_processor_id()];
712}
713
Andrew Mortona737b3e2006-03-22 00:08:11 -0800714static inline struct kmem_cache *__find_general_cachep(size_t size,
715 gfp_t gfpflags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716{
717 struct cache_sizes *csizep = malloc_sizes;
718
719#if DEBUG
720 /* This happens if someone tries to call
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800721 * kmem_cache_create(), or __kmalloc(), before
722 * the generic caches are initialized.
723 */
Alok Katariac7e43c72005-09-14 12:17:53 -0700724 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725#endif
726 while (size > csizep->cs_size)
727 csizep++;
728
729 /*
Martin Hicks0abf40c2005-09-03 15:54:54 -0700730 * Really subtle: The last entry with cs->cs_size==ULONG_MAX
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 * has cs_{dma,}cachep==NULL. Thus no special case
732 * for large kmalloc calls required.
733 */
734 if (unlikely(gfpflags & GFP_DMA))
735 return csizep->cs_dmacachep;
736 return csizep->cs_cachep;
737}
738
Pekka Enberg343e0d72006-02-01 03:05:50 -0800739struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
Manfred Spraul97e2bde2005-05-01 08:58:38 -0700740{
741 return __find_general_cachep(size, gfpflags);
742}
743EXPORT_SYMBOL(kmem_find_general_cachep);
744
Steven Rostedtfbaccac2006-02-01 03:05:45 -0800745static size_t slab_mgmt_size(size_t nr_objs, size_t align)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746{
Steven Rostedtfbaccac2006-02-01 03:05:45 -0800747 return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
748}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749
Andrew Mortona737b3e2006-03-22 00:08:11 -0800750/*
751 * Calculate the number of objects and left-over bytes for a given buffer size.
752 */
Steven Rostedtfbaccac2006-02-01 03:05:45 -0800753static void cache_estimate(unsigned long gfporder, size_t buffer_size,
754 size_t align, int flags, size_t *left_over,
755 unsigned int *num)
756{
757 int nr_objs;
758 size_t mgmt_size;
759 size_t slab_size = PAGE_SIZE << gfporder;
760
761 /*
762 * The slab management structure can be either off the slab or
763 * on it. For the latter case, the memory allocated for a
764 * slab is used for:
765 *
766 * - The struct slab
767 * - One kmem_bufctl_t for each object
768 * - Padding to respect alignment of @align
769 * - @buffer_size bytes for each object
770 *
771 * If the slab management structure is off the slab, then the
772 * alignment will already be calculated into the size. Because
773 * the slabs are all pages aligned, the objects will be at the
774 * correct alignment when allocated.
775 */
776 if (flags & CFLGS_OFF_SLAB) {
777 mgmt_size = 0;
778 nr_objs = slab_size / buffer_size;
779
780 if (nr_objs > SLAB_LIMIT)
781 nr_objs = SLAB_LIMIT;
782 } else {
783 /*
784 * Ignore padding for the initial guess. The padding
785 * is at most @align-1 bytes, and @buffer_size is at
786 * least @align. In the worst case, this result will
787 * be one greater than the number of objects that fit
788 * into the memory allocation when taking the padding
789 * into account.
790 */
791 nr_objs = (slab_size - sizeof(struct slab)) /
792 (buffer_size + sizeof(kmem_bufctl_t));
793
794 /*
795 * This calculated number will be either the right
796 * amount, or one greater than what we want.
797 */
798 if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
799 > slab_size)
800 nr_objs--;
801
802 if (nr_objs > SLAB_LIMIT)
803 nr_objs = SLAB_LIMIT;
804
805 mgmt_size = slab_mgmt_size(nr_objs, align);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 }
Steven Rostedtfbaccac2006-02-01 03:05:45 -0800807 *num = nr_objs;
808 *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809}
810
811#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)
812
Andrew Mortona737b3e2006-03-22 00:08:11 -0800813static void __slab_error(const char *function, struct kmem_cache *cachep,
814 char *msg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815{
816 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800817 function, cachep->name, msg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 dump_stack();
819}
820
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800821#ifdef CONFIG_NUMA
822/*
823 * Special reaping functions for NUMA systems called from cache_reap().
824 * These take care of doing round robin flushing of alien caches (containing
825 * objects freed on different nodes from which they were allocated) and the
826 * flushing of remote pcps by calling drain_node_pages.
827 */
828static DEFINE_PER_CPU(unsigned long, reap_node);
829
830static void init_reap_node(int cpu)
831{
832 int node;
833
834 node = next_node(cpu_to_node(cpu), node_online_map);
835 if (node == MAX_NUMNODES)
Paul Jackson442295c2006-03-22 00:09:11 -0800836 node = first_node(node_online_map);
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800837
838 __get_cpu_var(reap_node) = node;
839}
840
841static void next_reap_node(void)
842{
843 int node = __get_cpu_var(reap_node);
844
845 /*
846 * Also drain per cpu pages on remote zones
847 */
848 if (node != numa_node_id())
849 drain_node_pages(node);
850
851 node = next_node(node, node_online_map);
852 if (unlikely(node >= MAX_NUMNODES))
853 node = first_node(node_online_map);
854 __get_cpu_var(reap_node) = node;
855}
856
857#else
858#define init_reap_node(cpu) do { } while (0)
859#define next_reap_node(void) do { } while (0)
860#endif
861
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862/*
863 * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz
864 * via the workqueue/eventd.
865 * Add the CPU number into the expiration time to minimize the possibility of
866 * the CPUs getting into lockstep and contending for the global cache chain
867 * lock.
868 */
869static void __devinit start_cpu_timer(int cpu)
870{
871 struct work_struct *reap_work = &per_cpu(reap_work, cpu);
872
873 /*
874 * When this gets called from do_initcalls via cpucache_init(),
875 * init_workqueues() has already run, so keventd will be setup
876 * at that time.
877 */
878 if (keventd_up() && reap_work->func == NULL) {
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800879 init_reap_node(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 INIT_WORK(reap_work, cache_reap, NULL);
881 schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
882 }
883}
884
Christoph Lametere498be72005-09-09 13:03:32 -0700885static struct array_cache *alloc_arraycache(int node, int entries,
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800886 int batchcount)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887{
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800888 int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 struct array_cache *nc = NULL;
890
Christoph Lametere498be72005-09-09 13:03:32 -0700891 nc = kmalloc_node(memsize, GFP_KERNEL, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 if (nc) {
893 nc->avail = 0;
894 nc->limit = entries;
895 nc->batchcount = batchcount;
896 nc->touched = 0;
Christoph Lametere498be72005-09-09 13:03:32 -0700897 spin_lock_init(&nc->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 }
899 return nc;
900}
901
Christoph Lameter3ded1752006-03-25 03:06:44 -0800902/*
903 * Transfer objects in one arraycache to another.
904 * Locking must be handled by the caller.
905 *
906 * Return the number of entries transferred.
907 */
908static int transfer_objects(struct array_cache *to,
909 struct array_cache *from, unsigned int max)
910{
911 /* Figure out how many entries to transfer */
912 int nr = min(min(from->avail, max), to->limit - to->avail);
913
914 if (!nr)
915 return 0;
916
917 memcpy(to->entry + to->avail, from->entry + from->avail -nr,
918 sizeof(void *) *nr);
919
920 from->avail -= nr;
921 to->avail += nr;
922 to->touched = 1;
923 return nr;
924}
925
Christoph Lametere498be72005-09-09 13:03:32 -0700926#ifdef CONFIG_NUMA
Pekka Enberg343e0d72006-02-01 03:05:50 -0800927static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int);
Paul Jacksonc61afb12006-03-24 03:16:08 -0800928static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
Christoph Lameterdc85da12006-01-18 17:42:36 -0800929
Pekka Enberg5295a742006-02-01 03:05:48 -0800930static struct array_cache **alloc_alien_cache(int node, int limit)
Christoph Lametere498be72005-09-09 13:03:32 -0700931{
932 struct array_cache **ac_ptr;
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800933 int memsize = sizeof(void *) * MAX_NUMNODES;
Christoph Lametere498be72005-09-09 13:03:32 -0700934 int i;
935
936 if (limit > 1)
937 limit = 12;
938 ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node);
939 if (ac_ptr) {
940 for_each_node(i) {
941 if (i == node || !node_online(i)) {
942 ac_ptr[i] = NULL;
943 continue;
944 }
945 ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d);
946 if (!ac_ptr[i]) {
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800947 for (i--; i <= 0; i--)
Christoph Lametere498be72005-09-09 13:03:32 -0700948 kfree(ac_ptr[i]);
949 kfree(ac_ptr);
950 return NULL;
951 }
952 }
953 }
954 return ac_ptr;
955}
956
Pekka Enberg5295a742006-02-01 03:05:48 -0800957static void free_alien_cache(struct array_cache **ac_ptr)
Christoph Lametere498be72005-09-09 13:03:32 -0700958{
959 int i;
960
961 if (!ac_ptr)
962 return;
Christoph Lametere498be72005-09-09 13:03:32 -0700963 for_each_node(i)
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800964 kfree(ac_ptr[i]);
Christoph Lametere498be72005-09-09 13:03:32 -0700965 kfree(ac_ptr);
966}
967
Pekka Enberg343e0d72006-02-01 03:05:50 -0800968static void __drain_alien_cache(struct kmem_cache *cachep,
Pekka Enberg5295a742006-02-01 03:05:48 -0800969 struct array_cache *ac, int node)
Christoph Lametere498be72005-09-09 13:03:32 -0700970{
971 struct kmem_list3 *rl3 = cachep->nodelists[node];
972
973 if (ac->avail) {
974 spin_lock(&rl3->list_lock);
Christoph Lametere00946f2006-03-25 03:06:45 -0800975 /*
976 * Stuff objects into the remote nodes shared array first.
977 * That way we could avoid the overhead of putting the objects
978 * into the free lists and getting them back later.
979 */
shin, jacob693f7d32006-04-28 10:54:37 -0500980 if (rl3->shared)
981 transfer_objects(rl3->shared, ac, ac->limit);
Christoph Lametere00946f2006-03-25 03:06:45 -0800982
Christoph Lameterff694162005-09-22 21:44:02 -0700983 free_block(cachep, ac->entry, ac->avail, node);
Christoph Lametere498be72005-09-09 13:03:32 -0700984 ac->avail = 0;
985 spin_unlock(&rl3->list_lock);
986 }
987}
988
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800989/*
990 * Called from cache_reap() to regularly drain alien caches round robin.
991 */
992static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
993{
994 int node = __get_cpu_var(reap_node);
995
996 if (l3->alien) {
997 struct array_cache *ac = l3->alien[node];
Christoph Lametere00946f2006-03-25 03:06:45 -0800998
999 if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
Christoph Lameter8fce4d82006-03-09 17:33:54 -08001000 __drain_alien_cache(cachep, ac, node);
1001 spin_unlock_irq(&ac->lock);
1002 }
1003 }
1004}
1005
Andrew Mortona737b3e2006-03-22 00:08:11 -08001006static void drain_alien_cache(struct kmem_cache *cachep,
1007 struct array_cache **alien)
Christoph Lametere498be72005-09-09 13:03:32 -07001008{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001009 int i = 0;
Christoph Lametere498be72005-09-09 13:03:32 -07001010 struct array_cache *ac;
1011 unsigned long flags;
1012
1013 for_each_online_node(i) {
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001014 ac = alien[i];
Christoph Lametere498be72005-09-09 13:03:32 -07001015 if (ac) {
1016 spin_lock_irqsave(&ac->lock, flags);
1017 __drain_alien_cache(cachep, ac, i);
1018 spin_unlock_irqrestore(&ac->lock, flags);
1019 }
1020 }
1021}
Pekka Enberg729bd0b2006-06-23 02:03:05 -07001022
1023static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1024{
1025 struct slab *slabp = virt_to_slab(objp);
1026 int nodeid = slabp->nodeid;
1027 struct kmem_list3 *l3;
1028 struct array_cache *alien = NULL;
1029
1030 /*
1031 * Make sure we are not freeing a object from another node to the array
1032 * cache on this cpu.
1033 */
1034 if (likely(slabp->nodeid == numa_node_id()))
1035 return 0;
1036
1037 l3 = cachep->nodelists[numa_node_id()];
1038 STATS_INC_NODEFREES(cachep);
1039 if (l3->alien && l3->alien[nodeid]) {
1040 alien = l3->alien[nodeid];
1041 spin_lock(&alien->lock);
1042 if (unlikely(alien->avail == alien->limit)) {
1043 STATS_INC_ACOVERFLOW(cachep);
1044 __drain_alien_cache(cachep, alien, nodeid);
1045 }
1046 alien->entry[alien->avail++] = objp;
1047 spin_unlock(&alien->lock);
1048 } else {
1049 spin_lock(&(cachep->nodelists[nodeid])->list_lock);
1050 free_block(cachep, &objp, 1, nodeid);
1051 spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
1052 }
1053 return 1;
1054}
1055
Christoph Lametere498be72005-09-09 13:03:32 -07001056#else
Linus Torvalds7a21ef62006-02-05 11:26:38 -08001057
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001058#define drain_alien_cache(cachep, alien) do { } while (0)
Christoph Lameter8fce4d82006-03-09 17:33:54 -08001059#define reap_alien(cachep, l3) do { } while (0)
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001060
Linus Torvalds7a21ef62006-02-05 11:26:38 -08001061static inline struct array_cache **alloc_alien_cache(int node, int limit)
1062{
1063 return (struct array_cache **) 0x01020304ul;
1064}
1065
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001066static inline void free_alien_cache(struct array_cache **ac_ptr)
1067{
1068}
Linus Torvalds7a21ef62006-02-05 11:26:38 -08001069
Pekka Enberg729bd0b2006-06-23 02:03:05 -07001070static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1071{
1072 return 0;
1073}
1074
Christoph Lametere498be72005-09-09 13:03:32 -07001075#endif
1076
Chandra Seetharaman9c7b2162006-06-27 02:54:07 -07001077static int __devinit cpuup_callback(struct notifier_block *nfb,
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001078 unsigned long action, void *hcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079{
1080 long cpu = (long)hcpu;
Pekka Enberg343e0d72006-02-01 03:05:50 -08001081 struct kmem_cache *cachep;
Christoph Lametere498be72005-09-09 13:03:32 -07001082 struct kmem_list3 *l3 = NULL;
1083 int node = cpu_to_node(cpu);
1084 int memsize = sizeof(struct kmem_list3);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085
1086 switch (action) {
1087 case CPU_UP_PREPARE:
Ingo Molnarfc0abb12006-01-18 17:42:33 -08001088 mutex_lock(&cache_chain_mutex);
Andrew Mortona737b3e2006-03-22 00:08:11 -08001089 /*
1090 * We need to do this right in the beginning since
Christoph Lametere498be72005-09-09 13:03:32 -07001091 * alloc_arraycache's are going to use this list.
1092 * kmalloc_node allows us to add the slab to the right
1093 * kmem_list3 and not this cpu's kmem_list3
1094 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095
Christoph Lametere498be72005-09-09 13:03:32 -07001096 list_for_each_entry(cachep, &cache_chain, next) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08001097 /*
1098 * Set up the size64 kmemlist for cpu before we can
Christoph Lametere498be72005-09-09 13:03:32 -07001099 * begin anything. Make sure some other cpu on this
1100 * node has not already allocated this
1101 */
1102 if (!cachep->nodelists[node]) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08001103 l3 = kmalloc_node(memsize, GFP_KERNEL, node);
1104 if (!l3)
Christoph Lametere498be72005-09-09 13:03:32 -07001105 goto bad;
1106 kmem_list3_init(l3);
1107 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001108 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
Christoph Lametere498be72005-09-09 13:03:32 -07001109
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001110 /*
1111 * The l3s don't come and go as CPUs come and
1112 * go. cache_chain_mutex is sufficient
1113 * protection here.
1114 */
Christoph Lametere498be72005-09-09 13:03:32 -07001115 cachep->nodelists[node] = l3;
1116 }
1117
1118 spin_lock_irq(&cachep->nodelists[node]->list_lock);
1119 cachep->nodelists[node]->free_limit =
Andrew Mortona737b3e2006-03-22 00:08:11 -08001120 (1 + nr_cpus_node(node)) *
1121 cachep->batchcount + cachep->num;
Christoph Lametere498be72005-09-09 13:03:32 -07001122 spin_unlock_irq(&cachep->nodelists[node]->list_lock);
1123 }
1124
Andrew Mortona737b3e2006-03-22 00:08:11 -08001125 /*
1126 * Now we can go ahead with allocating the shared arrays and
1127 * array caches
1128 */
Christoph Lametere498be72005-09-09 13:03:32 -07001129 list_for_each_entry(cachep, &cache_chain, next) {
Tobias Klausercd105df2006-01-08 01:00:59 -08001130 struct array_cache *nc;
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001131 struct array_cache *shared;
1132 struct array_cache **alien;
Tobias Klausercd105df2006-01-08 01:00:59 -08001133
Christoph Lametere498be72005-09-09 13:03:32 -07001134 nc = alloc_arraycache(node, cachep->limit,
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001135 cachep->batchcount);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 if (!nc)
1137 goto bad;
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001138 shared = alloc_arraycache(node,
1139 cachep->shared * cachep->batchcount,
1140 0xbaadf00d);
1141 if (!shared)
1142 goto bad;
Linus Torvalds7a21ef62006-02-05 11:26:38 -08001143
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001144 alien = alloc_alien_cache(node, cachep->limit);
1145 if (!alien)
1146 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 cachep->array[cpu] = nc;
Christoph Lametere498be72005-09-09 13:03:32 -07001148 l3 = cachep->nodelists[node];
1149 BUG_ON(!l3);
Christoph Lametere498be72005-09-09 13:03:32 -07001150
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001151 spin_lock_irq(&l3->list_lock);
1152 if (!l3->shared) {
1153 /*
1154 * We are serialised from CPU_DEAD or
1155 * CPU_UP_CANCELLED by the cpucontrol lock
1156 */
1157 l3->shared = shared;
1158 shared = NULL;
Christoph Lametere498be72005-09-09 13:03:32 -07001159 }
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001160#ifdef CONFIG_NUMA
1161 if (!l3->alien) {
1162 l3->alien = alien;
1163 alien = NULL;
1164 }
1165#endif
1166 spin_unlock_irq(&l3->list_lock);
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001167 kfree(shared);
1168 free_alien_cache(alien);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 }
Ingo Molnarfc0abb12006-01-18 17:42:33 -08001170 mutex_unlock(&cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 break;
1172 case CPU_ONLINE:
1173 start_cpu_timer(cpu);
1174 break;
1175#ifdef CONFIG_HOTPLUG_CPU
1176 case CPU_DEAD:
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001177 /*
1178 * Even if all the cpus of a node are down, we don't free the
1179 * kmem_list3 of any cache. This to avoid a race between
1180 * cpu_down, and a kmalloc allocation from another cpu for
1181 * memory from the node of the cpu going down. The list3
1182 * structure is usually allocated from kmem_cache_create() and
1183 * gets destroyed at kmem_cache_destroy().
1184 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 /* fall thru */
1186 case CPU_UP_CANCELED:
Ingo Molnarfc0abb12006-01-18 17:42:33 -08001187 mutex_lock(&cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 list_for_each_entry(cachep, &cache_chain, next) {
1189 struct array_cache *nc;
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001190 struct array_cache *shared;
1191 struct array_cache **alien;
Christoph Lametere498be72005-09-09 13:03:32 -07001192 cpumask_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193
Christoph Lametere498be72005-09-09 13:03:32 -07001194 mask = node_to_cpumask(node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 /* cpu is dead; no one can alloc from it. */
1196 nc = cachep->array[cpu];
1197 cachep->array[cpu] = NULL;
Christoph Lametere498be72005-09-09 13:03:32 -07001198 l3 = cachep->nodelists[node];
1199
1200 if (!l3)
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001201 goto free_array_cache;
Christoph Lametere498be72005-09-09 13:03:32 -07001202
Ravikiran G Thirumalaica3b9b92006-02-04 23:27:58 -08001203 spin_lock_irq(&l3->list_lock);
Christoph Lametere498be72005-09-09 13:03:32 -07001204
1205 /* Free limit for this kmem_list3 */
1206 l3->free_limit -= cachep->batchcount;
1207 if (nc)
Christoph Lameterff694162005-09-22 21:44:02 -07001208 free_block(cachep, nc->entry, nc->avail, node);
Christoph Lametere498be72005-09-09 13:03:32 -07001209
1210 if (!cpus_empty(mask)) {
Ravikiran G Thirumalaica3b9b92006-02-04 23:27:58 -08001211 spin_unlock_irq(&l3->list_lock);
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001212 goto free_array_cache;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001213 }
Christoph Lametere498be72005-09-09 13:03:32 -07001214
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001215 shared = l3->shared;
1216 if (shared) {
Christoph Lametere498be72005-09-09 13:03:32 -07001217 free_block(cachep, l3->shared->entry,
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001218 l3->shared->avail, node);
Christoph Lametere498be72005-09-09 13:03:32 -07001219 l3->shared = NULL;
1220 }
Christoph Lametere498be72005-09-09 13:03:32 -07001221
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001222 alien = l3->alien;
1223 l3->alien = NULL;
1224
1225 spin_unlock_irq(&l3->list_lock);
1226
1227 kfree(shared);
1228 if (alien) {
1229 drain_alien_cache(cachep, alien);
1230 free_alien_cache(alien);
Christoph Lametere498be72005-09-09 13:03:32 -07001231 }
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001232free_array_cache:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 kfree(nc);
1234 }
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001235 /*
1236 * In the previous loop, all the objects were freed to
1237 * the respective cache's slabs, now we can go ahead and
1238 * shrink each nodelist to its limit.
1239 */
1240 list_for_each_entry(cachep, &cache_chain, next) {
1241 l3 = cachep->nodelists[node];
1242 if (!l3)
1243 continue;
1244 spin_lock_irq(&l3->list_lock);
1245 /* free slabs belonging to this node */
1246 __node_shrink(cachep, node);
1247 spin_unlock_irq(&l3->list_lock);
1248 }
Ingo Molnarfc0abb12006-01-18 17:42:33 -08001249 mutex_unlock(&cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 break;
1251#endif
1252 }
1253 return NOTIFY_OK;
Andrew Mortona737b3e2006-03-22 00:08:11 -08001254bad:
Ingo Molnarfc0abb12006-01-18 17:42:33 -08001255 mutex_unlock(&cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 return NOTIFY_BAD;
1257}
1258
Chandra Seetharaman74b85f32006-06-27 02:54:09 -07001259static struct notifier_block __cpuinitdata cpucache_notifier = {
1260 &cpuup_callback, NULL, 0
1261};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262
Christoph Lametere498be72005-09-09 13:03:32 -07001263/*
1264 * swap the static kmem_list3 with kmalloced memory
1265 */
Andrew Mortona737b3e2006-03-22 00:08:11 -08001266static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
1267 int nodeid)
Christoph Lametere498be72005-09-09 13:03:32 -07001268{
1269 struct kmem_list3 *ptr;
1270
1271 BUG_ON(cachep->nodelists[nodeid] != list);
1272 ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid);
1273 BUG_ON(!ptr);
1274
1275 local_irq_disable();
1276 memcpy(ptr, list, sizeof(struct kmem_list3));
1277 MAKE_ALL_LISTS(cachep, ptr, nodeid);
1278 cachep->nodelists[nodeid] = ptr;
1279 local_irq_enable();
1280}
1281
Andrew Mortona737b3e2006-03-22 00:08:11 -08001282/*
1283 * Initialisation. Called after the page allocator have been initialised and
1284 * before smp_init().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 */
1286void __init kmem_cache_init(void)
1287{
1288 size_t left_over;
1289 struct cache_sizes *sizes;
1290 struct cache_names *names;
Christoph Lametere498be72005-09-09 13:03:32 -07001291 int i;
Jack Steiner07ed76b2006-03-07 21:55:46 -08001292 int order;
Christoph Lametere498be72005-09-09 13:03:32 -07001293
1294 for (i = 0; i < NUM_INIT_LISTS; i++) {
1295 kmem_list3_init(&initkmem_list3[i]);
1296 if (i < MAX_NUMNODES)
1297 cache_cache.nodelists[i] = NULL;
1298 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299
1300 /*
1301 * Fragmentation resistance on low memory - only use bigger
1302 * page orders on machines with more than 32MB of memory.
1303 */
1304 if (num_physpages > (32 << 20) >> PAGE_SHIFT)
1305 slab_break_gfp_order = BREAK_GFP_ORDER_HI;
1306
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 /* Bootstrap is tricky, because several objects are allocated
1308 * from caches that do not exist yet:
Andrew Mortona737b3e2006-03-22 00:08:11 -08001309 * 1) initialize the cache_cache cache: it contains the struct
1310 * kmem_cache structures of all caches, except cache_cache itself:
1311 * cache_cache is statically allocated.
Christoph Lametere498be72005-09-09 13:03:32 -07001312 * Initially an __init data area is used for the head array and the
1313 * kmem_list3 structures, it's replaced with a kmalloc allocated
1314 * array at the end of the bootstrap.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 * 2) Create the first kmalloc cache.
Pekka Enberg343e0d72006-02-01 03:05:50 -08001316 * The struct kmem_cache for the new cache is allocated normally.
Christoph Lametere498be72005-09-09 13:03:32 -07001317 * An __init data area is used for the head array.
1318 * 3) Create the remaining kmalloc caches, with minimally sized
1319 * head arrays.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 * 4) Replace the __init data head arrays for cache_cache and the first
1321 * kmalloc cache with kmalloc allocated arrays.
Christoph Lametere498be72005-09-09 13:03:32 -07001322 * 5) Replace the __init data for kmem_list3 for cache_cache and
1323 * the other cache's with kmalloc allocated memory.
1324 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 */
1326
1327 /* 1) create the cache_cache */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 INIT_LIST_HEAD(&cache_chain);
1329 list_add(&cache_cache.next, &cache_chain);
1330 cache_cache.colour_off = cache_line_size();
1331 cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
Christoph Lametere498be72005-09-09 13:03:32 -07001332 cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333
Andrew Mortona737b3e2006-03-22 00:08:11 -08001334 cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
1335 cache_line_size());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336
Jack Steiner07ed76b2006-03-07 21:55:46 -08001337 for (order = 0; order < MAX_ORDER; order++) {
1338 cache_estimate(order, cache_cache.buffer_size,
1339 cache_line_size(), 0, &left_over, &cache_cache.num);
1340 if (cache_cache.num)
1341 break;
1342 }
Eric Sesterhenn40094fa2006-04-02 13:49:25 +02001343 BUG_ON(!cache_cache.num);
Jack Steiner07ed76b2006-03-07 21:55:46 -08001344 cache_cache.gfporder = order;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001345 cache_cache.colour = left_over / cache_cache.colour_off;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001346 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
1347 sizeof(struct slab), cache_line_size());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348
1349 /* 2+3) create the kmalloc caches */
1350 sizes = malloc_sizes;
1351 names = cache_names;
1352
Andrew Mortona737b3e2006-03-22 00:08:11 -08001353 /*
1354 * Initialize the caches that provide memory for the array cache and the
1355 * kmem_list3 structures first. Without this, further allocations will
1356 * bug.
Christoph Lametere498be72005-09-09 13:03:32 -07001357 */
1358
1359 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
Andrew Mortona737b3e2006-03-22 00:08:11 -08001360 sizes[INDEX_AC].cs_size,
1361 ARCH_KMALLOC_MINALIGN,
1362 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1363 NULL, NULL);
Christoph Lametere498be72005-09-09 13:03:32 -07001364
Andrew Mortona737b3e2006-03-22 00:08:11 -08001365 if (INDEX_AC != INDEX_L3) {
Christoph Lametere498be72005-09-09 13:03:32 -07001366 sizes[INDEX_L3].cs_cachep =
Andrew Mortona737b3e2006-03-22 00:08:11 -08001367 kmem_cache_create(names[INDEX_L3].name,
1368 sizes[INDEX_L3].cs_size,
1369 ARCH_KMALLOC_MINALIGN,
1370 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1371 NULL, NULL);
1372 }
Christoph Lametere498be72005-09-09 13:03:32 -07001373
Ingo Molnare0a42722006-06-23 02:03:46 -07001374 slab_early_init = 0;
1375
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 while (sizes->cs_size != ULONG_MAX) {
Christoph Lametere498be72005-09-09 13:03:32 -07001377 /*
1378 * For performance, all the general caches are L1 aligned.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 * This should be particularly beneficial on SMP boxes, as it
1380 * eliminates "false sharing".
1381 * Note for systems short on memory removing the alignment will
Christoph Lametere498be72005-09-09 13:03:32 -07001382 * allow tighter packing of the smaller caches.
1383 */
Andrew Mortona737b3e2006-03-22 00:08:11 -08001384 if (!sizes->cs_cachep) {
Christoph Lametere498be72005-09-09 13:03:32 -07001385 sizes->cs_cachep = kmem_cache_create(names->name,
Andrew Mortona737b3e2006-03-22 00:08:11 -08001386 sizes->cs_size,
1387 ARCH_KMALLOC_MINALIGN,
1388 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1389 NULL, NULL);
1390 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
Andrew Mortona737b3e2006-03-22 00:08:11 -08001393 sizes->cs_size,
1394 ARCH_KMALLOC_MINALIGN,
1395 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
1396 SLAB_PANIC,
1397 NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 sizes++;
1399 names++;
1400 }
1401 /* 4) Replace the bootstrap head arrays */
1402 {
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001403 void *ptr;
Christoph Lametere498be72005-09-09 13:03:32 -07001404
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
Christoph Lametere498be72005-09-09 13:03:32 -07001406
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 local_irq_disable();
Pekka Enberg9a2dba42006-02-01 03:05:49 -08001408 BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
1409 memcpy(ptr, cpu_cache_get(&cache_cache),
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001410 sizeof(struct arraycache_init));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 cache_cache.array[smp_processor_id()] = ptr;
1412 local_irq_enable();
Christoph Lametere498be72005-09-09 13:03:32 -07001413
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
Christoph Lametere498be72005-09-09 13:03:32 -07001415
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 local_irq_disable();
Pekka Enberg9a2dba42006-02-01 03:05:49 -08001417 BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001418 != &initarray_generic.cache);
Pekka Enberg9a2dba42006-02-01 03:05:49 -08001419 memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001420 sizeof(struct arraycache_init));
Christoph Lametere498be72005-09-09 13:03:32 -07001421 malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001422 ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 local_irq_enable();
1424 }
Christoph Lametere498be72005-09-09 13:03:32 -07001425 /* 5) Replace the bootstrap kmem_list3's */
1426 {
1427 int node;
1428 /* Replace the static kmem_list3 structures for the boot cpu */
1429 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE],
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001430 numa_node_id());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431
Christoph Lametere498be72005-09-09 13:03:32 -07001432 for_each_online_node(node) {
1433 init_list(malloc_sizes[INDEX_AC].cs_cachep,
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001434 &initkmem_list3[SIZE_AC + node], node);
Christoph Lametere498be72005-09-09 13:03:32 -07001435
1436 if (INDEX_AC != INDEX_L3) {
1437 init_list(malloc_sizes[INDEX_L3].cs_cachep,
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001438 &initkmem_list3[SIZE_L3 + node],
1439 node);
Christoph Lametere498be72005-09-09 13:03:32 -07001440 }
1441 }
1442 }
1443
1444 /* 6) resize the head arrays to their final sizes */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 {
Pekka Enberg343e0d72006-02-01 03:05:50 -08001446 struct kmem_cache *cachep;
Ingo Molnarfc0abb12006-01-18 17:42:33 -08001447 mutex_lock(&cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 list_for_each_entry(cachep, &cache_chain, next)
Andrew Mortona737b3e2006-03-22 00:08:11 -08001449 enable_cpucache(cachep);
Ingo Molnarfc0abb12006-01-18 17:42:33 -08001450 mutex_unlock(&cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 }
1452
1453 /* Done! */
1454 g_cpucache_up = FULL;
1455
Andrew Mortona737b3e2006-03-22 00:08:11 -08001456 /*
1457 * Register a cpu startup notifier callback that initializes
1458 * cpu_cache_get for all new cpus
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 */
1460 register_cpu_notifier(&cpucache_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461
Andrew Mortona737b3e2006-03-22 00:08:11 -08001462 /*
1463 * The reap timers are started later, with a module init call: That part
1464 * of the kernel is not yet operational.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 */
1466}
1467
1468static int __init cpucache_init(void)
1469{
1470 int cpu;
1471
Andrew Mortona737b3e2006-03-22 00:08:11 -08001472 /*
1473 * Register the timers that return unneeded pages to the page allocator
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474 */
Christoph Lametere498be72005-09-09 13:03:32 -07001475 for_each_online_cpu(cpu)
Andrew Mortona737b3e2006-03-22 00:08:11 -08001476 start_cpu_timer(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477 return 0;
1478}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479__initcall(cpucache_init);
1480
1481/*
1482 * Interface to system's page allocator. No need to hold the cache-lock.
1483 *
1484 * If we requested dmaable memory, we will get it. Even if we
1485 * did not request dmaable memory, we might get it, but that
1486 * would be relatively rare and ignorable.
1487 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08001488static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489{
1490 struct page *page;
Christoph Hellwige1b6aa62006-06-23 02:03:17 -07001491 int nr_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 int i;
1493
Luke Yangd6fef9d2006-04-10 22:52:56 -07001494#ifndef CONFIG_MMU
Christoph Hellwige1b6aa62006-06-23 02:03:17 -07001495 /*
1496 * Nommu uses slab's for process anonymous memory allocations, and thus
1497 * requires __GFP_COMP to properly refcount higher order allocations
Luke Yangd6fef9d2006-04-10 22:52:56 -07001498 */
Christoph Hellwige1b6aa62006-06-23 02:03:17 -07001499 flags |= __GFP_COMP;
Luke Yangd6fef9d2006-04-10 22:52:56 -07001500#endif
Christoph Hellwige1b6aa62006-06-23 02:03:17 -07001501 flags |= cachep->gfpflags;
1502
1503 page = alloc_pages_node(nodeid, flags, cachep->gfporder);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 if (!page)
1505 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506
Christoph Hellwige1b6aa62006-06-23 02:03:17 -07001507 nr_pages = (1 << cachep->gfporder);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
Christoph Hellwige1b6aa62006-06-23 02:03:17 -07001509 atomic_add(nr_pages, &slab_reclaim_pages);
1510 add_page_state(nr_slab, nr_pages);
1511 for (i = 0; i < nr_pages; i++)
1512 __SetPageSlab(page + i);
1513 return page_address(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514}
1515
1516/*
1517 * Interface to system's page release.
1518 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08001519static void kmem_freepages(struct kmem_cache *cachep, void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001521 unsigned long i = (1 << cachep->gfporder);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 struct page *page = virt_to_page(addr);
1523 const unsigned long nr_freed = i;
1524
1525 while (i--) {
Nick Pigginf205b2f2006-03-22 00:08:02 -08001526 BUG_ON(!PageSlab(page));
1527 __ClearPageSlab(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 page++;
1529 }
1530 sub_page_state(nr_slab, nr_freed);
1531 if (current->reclaim_state)
1532 current->reclaim_state->reclaimed_slab += nr_freed;
1533 free_pages((unsigned long)addr, cachep->gfporder);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001534 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1535 atomic_sub(1 << cachep->gfporder, &slab_reclaim_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536}
1537
1538static void kmem_rcu_free(struct rcu_head *head)
1539{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001540 struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
Pekka Enberg343e0d72006-02-01 03:05:50 -08001541 struct kmem_cache *cachep = slab_rcu->cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542
1543 kmem_freepages(cachep, slab_rcu->addr);
1544 if (OFF_SLAB(cachep))
1545 kmem_cache_free(cachep->slabp_cache, slab_rcu);
1546}
1547
1548#if DEBUG
1549
1550#ifdef CONFIG_DEBUG_PAGEALLOC
Pekka Enberg343e0d72006-02-01 03:05:50 -08001551static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001552 unsigned long caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553{
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001554 int size = obj_size(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001556 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001558 if (size < 5 * sizeof(unsigned long))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 return;
1560
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001561 *addr++ = 0x12345678;
1562 *addr++ = caller;
1563 *addr++ = smp_processor_id();
1564 size -= 3 * sizeof(unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 {
1566 unsigned long *sptr = &caller;
1567 unsigned long svalue;
1568
1569 while (!kstack_end(sptr)) {
1570 svalue = *sptr++;
1571 if (kernel_text_address(svalue)) {
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001572 *addr++ = svalue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 size -= sizeof(unsigned long);
1574 if (size <= sizeof(unsigned long))
1575 break;
1576 }
1577 }
1578
1579 }
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001580 *addr++ = 0x87654321;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581}
1582#endif
1583
Pekka Enberg343e0d72006-02-01 03:05:50 -08001584static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585{
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001586 int size = obj_size(cachep);
1587 addr = &((char *)addr)[obj_offset(cachep)];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588
1589 memset(addr, val, size);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001590 *(unsigned char *)(addr + size - 1) = POISON_END;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591}
1592
1593static void dump_line(char *data, int offset, int limit)
1594{
1595 int i;
1596 printk(KERN_ERR "%03x:", offset);
Andrew Mortona737b3e2006-03-22 00:08:11 -08001597 for (i = 0; i < limit; i++)
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001598 printk(" %02x", (unsigned char)data[offset + i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 printk("\n");
1600}
1601#endif
1602
1603#if DEBUG
1604
Pekka Enberg343e0d72006-02-01 03:05:50 -08001605static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606{
1607 int i, size;
1608 char *realobj;
1609
1610 if (cachep->flags & SLAB_RED_ZONE) {
1611 printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n",
Andrew Mortona737b3e2006-03-22 00:08:11 -08001612 *dbg_redzone1(cachep, objp),
1613 *dbg_redzone2(cachep, objp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 }
1615
1616 if (cachep->flags & SLAB_STORE_USER) {
1617 printk(KERN_ERR "Last user: [<%p>]",
Andrew Mortona737b3e2006-03-22 00:08:11 -08001618 *dbg_userword(cachep, objp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 print_symbol("(%s)",
Andrew Mortona737b3e2006-03-22 00:08:11 -08001620 (unsigned long)*dbg_userword(cachep, objp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621 printk("\n");
1622 }
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001623 realobj = (char *)objp + obj_offset(cachep);
1624 size = obj_size(cachep);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001625 for (i = 0; i < size && lines; i += 16, lines--) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 int limit;
1627 limit = 16;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001628 if (i + limit > size)
1629 limit = size - i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630 dump_line(realobj, i, limit);
1631 }
1632}
1633
Pekka Enberg343e0d72006-02-01 03:05:50 -08001634static void check_poison_obj(struct kmem_cache *cachep, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635{
1636 char *realobj;
1637 int size, i;
1638 int lines = 0;
1639
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001640 realobj = (char *)objp + obj_offset(cachep);
1641 size = obj_size(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001643 for (i = 0; i < size; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 char exp = POISON_FREE;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001645 if (i == size - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 exp = POISON_END;
1647 if (realobj[i] != exp) {
1648 int limit;
1649 /* Mismatch ! */
1650 /* Print header */
1651 if (lines == 0) {
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001652 printk(KERN_ERR
Andrew Mortona737b3e2006-03-22 00:08:11 -08001653 "Slab corruption: start=%p, len=%d\n",
1654 realobj, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 print_objinfo(cachep, objp, 0);
1656 }
1657 /* Hexdump the affected line */
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001658 i = (i / 16) * 16;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 limit = 16;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001660 if (i + limit > size)
1661 limit = size - i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 dump_line(realobj, i, limit);
1663 i += 16;
1664 lines++;
1665 /* Limit to 5 lines */
1666 if (lines > 5)
1667 break;
1668 }
1669 }
1670 if (lines != 0) {
1671 /* Print some data about the neighboring objects, if they
1672 * exist:
1673 */
Pekka Enberg6ed5eb22006-02-01 03:05:49 -08001674 struct slab *slabp = virt_to_slab(objp);
Pekka Enberg8fea4e92006-03-22 00:08:10 -08001675 unsigned int objnr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676
Pekka Enberg8fea4e92006-03-22 00:08:10 -08001677 objnr = obj_to_index(cachep, slabp, objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 if (objnr) {
Pekka Enberg8fea4e92006-03-22 00:08:10 -08001679 objp = index_to_obj(cachep, slabp, objnr - 1);
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001680 realobj = (char *)objp + obj_offset(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001682 realobj, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 print_objinfo(cachep, objp, 2);
1684 }
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001685 if (objnr + 1 < cachep->num) {
Pekka Enberg8fea4e92006-03-22 00:08:10 -08001686 objp = index_to_obj(cachep, slabp, objnr + 1);
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001687 realobj = (char *)objp + obj_offset(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688 printk(KERN_ERR "Next obj: start=%p, len=%d\n",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001689 realobj, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 print_objinfo(cachep, objp, 2);
1691 }
1692 }
1693}
1694#endif
1695
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696#if DEBUG
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001697/**
Randy Dunlap911851e2006-03-22 00:08:14 -08001698 * slab_destroy_objs - destroy a slab and its objects
1699 * @cachep: cache pointer being destroyed
1700 * @slabp: slab pointer being destroyed
1701 *
1702 * Call the registered destructor for each object in a slab that is being
1703 * destroyed.
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001704 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08001705static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001706{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707 int i;
1708 for (i = 0; i < cachep->num; i++) {
Pekka Enberg8fea4e92006-03-22 00:08:10 -08001709 void *objp = index_to_obj(cachep, slabp, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710
1711 if (cachep->flags & SLAB_POISON) {
1712#ifdef CONFIG_DEBUG_PAGEALLOC
Andrew Mortona737b3e2006-03-22 00:08:11 -08001713 if (cachep->buffer_size % PAGE_SIZE == 0 &&
1714 OFF_SLAB(cachep))
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001715 kernel_map_pages(virt_to_page(objp),
Andrew Mortona737b3e2006-03-22 00:08:11 -08001716 cachep->buffer_size / PAGE_SIZE, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 else
1718 check_poison_obj(cachep, objp);
1719#else
1720 check_poison_obj(cachep, objp);
1721#endif
1722 }
1723 if (cachep->flags & SLAB_RED_ZONE) {
1724 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1725 slab_error(cachep, "start of a freed object "
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001726 "was overwritten");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1728 slab_error(cachep, "end of a freed object "
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001729 "was overwritten");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 }
1731 if (cachep->dtor && !(cachep->flags & SLAB_POISON))
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001732 (cachep->dtor) (objp + obj_offset(cachep), cachep, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 }
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001734}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735#else
Pekka Enberg343e0d72006-02-01 03:05:50 -08001736static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001737{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 if (cachep->dtor) {
1739 int i;
1740 for (i = 0; i < cachep->num; i++) {
Pekka Enberg8fea4e92006-03-22 00:08:10 -08001741 void *objp = index_to_obj(cachep, slabp, i);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001742 (cachep->dtor) (objp, cachep, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 }
1744 }
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001745}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746#endif
1747
Randy Dunlap911851e2006-03-22 00:08:14 -08001748/**
1749 * slab_destroy - destroy and release all objects in a slab
1750 * @cachep: cache pointer being destroyed
1751 * @slabp: slab pointer being destroyed
1752 *
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001753 * Destroy all the objs in a slab, and release the mem back to the system.
Andrew Mortona737b3e2006-03-22 00:08:11 -08001754 * Before calling the slab must have been unlinked from the cache. The
1755 * cache-lock is not held/needed.
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001756 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08001757static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001758{
1759 void *addr = slabp->s_mem - slabp->colouroff;
1760
1761 slab_destroy_objs(cachep, slabp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
1763 struct slab_rcu *slab_rcu;
1764
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001765 slab_rcu = (struct slab_rcu *)slabp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 slab_rcu->cachep = cachep;
1767 slab_rcu->addr = addr;
1768 call_rcu(&slab_rcu->head, kmem_rcu_free);
1769 } else {
1770 kmem_freepages(cachep, addr);
1771 if (OFF_SLAB(cachep))
1772 kmem_cache_free(cachep->slabp_cache, slabp);
1773 }
1774}
1775
Andrew Mortona737b3e2006-03-22 00:08:11 -08001776/*
1777 * For setting up all the kmem_list3s for cache whose buffer_size is same as
1778 * size of kmem_list3.
1779 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08001780static void set_up_list3s(struct kmem_cache *cachep, int index)
Christoph Lametere498be72005-09-09 13:03:32 -07001781{
1782 int node;
1783
1784 for_each_online_node(node) {
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001785 cachep->nodelists[node] = &initkmem_list3[index + node];
Christoph Lametere498be72005-09-09 13:03:32 -07001786 cachep->nodelists[node]->next_reap = jiffies +
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001787 REAPTIMEOUT_LIST3 +
1788 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
Christoph Lametere498be72005-09-09 13:03:32 -07001789 }
1790}
1791
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792/**
Randy.Dunlapa70773d2006-02-01 03:05:52 -08001793 * calculate_slab_order - calculate size (page order) of slabs
1794 * @cachep: pointer to the cache that is being created
1795 * @size: size of objects to be created in this cache.
1796 * @align: required alignment for the objects.
1797 * @flags: slab allocation flags
1798 *
1799 * Also calculates the number of objects per slab.
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001800 *
1801 * This could be made much more intelligent. For now, try to avoid using
1802 * high order pages for slabs. When the gfp() functions are more friendly
1803 * towards high-order requests, this should be changed.
1804 */
Andrew Mortona737b3e2006-03-22 00:08:11 -08001805static size_t calculate_slab_order(struct kmem_cache *cachep,
Randy Dunlapee13d782006-02-01 03:05:53 -08001806 size_t size, size_t align, unsigned long flags)
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001807{
Ingo Molnarb1ab41c2006-06-02 15:44:58 +02001808 unsigned long offslab_limit;
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001809 size_t left_over = 0;
Linus Torvalds9888e6f2006-03-06 17:44:43 -08001810 int gfporder;
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001811
Andrew Mortona737b3e2006-03-22 00:08:11 -08001812 for (gfporder = 0; gfporder <= MAX_GFP_ORDER; gfporder++) {
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001813 unsigned int num;
1814 size_t remainder;
1815
Linus Torvalds9888e6f2006-03-06 17:44:43 -08001816 cache_estimate(gfporder, size, align, flags, &remainder, &num);
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001817 if (!num)
1818 continue;
Linus Torvalds9888e6f2006-03-06 17:44:43 -08001819
Ingo Molnarb1ab41c2006-06-02 15:44:58 +02001820 if (flags & CFLGS_OFF_SLAB) {
1821 /*
1822 * Max number of objs-per-slab for caches which
1823 * use off-slab slabs. Needed to avoid a possible
1824 * looping condition in cache_grow().
1825 */
1826 offslab_limit = size - sizeof(struct slab);
1827 offslab_limit /= sizeof(kmem_bufctl_t);
1828
1829 if (num > offslab_limit)
1830 break;
1831 }
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001832
Linus Torvalds9888e6f2006-03-06 17:44:43 -08001833 /* Found something acceptable - save it away */
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001834 cachep->num = num;
Linus Torvalds9888e6f2006-03-06 17:44:43 -08001835 cachep->gfporder = gfporder;
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001836 left_over = remainder;
1837
1838 /*
Linus Torvaldsf78bb8a2006-03-08 10:33:05 -08001839 * A VFS-reclaimable slab tends to have most allocations
1840 * as GFP_NOFS and we really don't want to have to be allocating
1841 * higher-order pages when we are unable to shrink dcache.
1842 */
1843 if (flags & SLAB_RECLAIM_ACCOUNT)
1844 break;
1845
1846 /*
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001847 * Large number of objects is good, but very large slabs are
1848 * currently bad for the gfp()s.
1849 */
Linus Torvalds9888e6f2006-03-06 17:44:43 -08001850 if (gfporder >= slab_break_gfp_order)
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001851 break;
1852
Linus Torvalds9888e6f2006-03-06 17:44:43 -08001853 /*
1854 * Acceptable internal fragmentation?
1855 */
Andrew Mortona737b3e2006-03-22 00:08:11 -08001856 if (left_over * 8 <= (PAGE_SIZE << gfporder))
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001857 break;
1858 }
1859 return left_over;
1860}
1861
Pekka Enbergf30cf7d2006-03-22 00:08:11 -08001862static void setup_cpu_cache(struct kmem_cache *cachep)
1863{
1864 if (g_cpucache_up == FULL) {
1865 enable_cpucache(cachep);
1866 return;
1867 }
1868 if (g_cpucache_up == NONE) {
1869 /*
1870 * Note: the first kmem_cache_create must create the cache
1871 * that's used by kmalloc(24), otherwise the creation of
1872 * further caches will BUG().
1873 */
1874 cachep->array[smp_processor_id()] = &initarray_generic.cache;
1875
1876 /*
1877 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is
1878 * the first cache, then we need to set up all its list3s,
1879 * otherwise the creation of further caches will BUG().
1880 */
1881 set_up_list3s(cachep, SIZE_AC);
1882 if (INDEX_AC == INDEX_L3)
1883 g_cpucache_up = PARTIAL_L3;
1884 else
1885 g_cpucache_up = PARTIAL_AC;
1886 } else {
1887 cachep->array[smp_processor_id()] =
1888 kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
1889
1890 if (g_cpucache_up == PARTIAL_AC) {
1891 set_up_list3s(cachep, SIZE_L3);
1892 g_cpucache_up = PARTIAL_L3;
1893 } else {
1894 int node;
1895 for_each_online_node(node) {
1896 cachep->nodelists[node] =
1897 kmalloc_node(sizeof(struct kmem_list3),
1898 GFP_KERNEL, node);
1899 BUG_ON(!cachep->nodelists[node]);
1900 kmem_list3_init(cachep->nodelists[node]);
1901 }
1902 }
1903 }
1904 cachep->nodelists[numa_node_id()]->next_reap =
1905 jiffies + REAPTIMEOUT_LIST3 +
1906 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1907
1908 cpu_cache_get(cachep)->avail = 0;
1909 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
1910 cpu_cache_get(cachep)->batchcount = 1;
1911 cpu_cache_get(cachep)->touched = 0;
1912 cachep->batchcount = 1;
1913 cachep->limit = BOOT_CPUCACHE_ENTRIES;
1914}
1915
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001916/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 * kmem_cache_create - Create a cache.
1918 * @name: A string which is used in /proc/slabinfo to identify this cache.
1919 * @size: The size of objects to be created in this cache.
1920 * @align: The required alignment for the objects.
1921 * @flags: SLAB flags
1922 * @ctor: A constructor for the objects.
1923 * @dtor: A destructor for the objects.
1924 *
1925 * Returns a ptr to the cache on success, NULL on failure.
1926 * Cannot be called within a int, but can be interrupted.
1927 * The @ctor is run when new pages are allocated by the cache
1928 * and the @dtor is run before the pages are handed back.
1929 *
1930 * @name must be valid until the cache is destroyed. This implies that
Andrew Mortona737b3e2006-03-22 00:08:11 -08001931 * the module calling this has to destroy the cache before getting unloaded.
1932 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 * The flags are
1934 *
1935 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
1936 * to catch references to uninitialised memory.
1937 *
1938 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
1939 * for buffer overruns.
1940 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
1942 * cacheline. This can be beneficial if you're counting cycles as closely
1943 * as davem.
1944 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08001945struct kmem_cache *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946kmem_cache_create (const char *name, size_t size, size_t align,
Andrew Mortona737b3e2006-03-22 00:08:11 -08001947 unsigned long flags,
1948 void (*ctor)(void*, struct kmem_cache *, unsigned long),
Pekka Enberg343e0d72006-02-01 03:05:50 -08001949 void (*dtor)(void*, struct kmem_cache *, unsigned long))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950{
1951 size_t left_over, slab_size, ralign;
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07001952 struct kmem_cache *cachep = NULL, *pc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953
1954 /*
1955 * Sanity checks... these are all serious usage bugs.
1956 */
Andrew Mortona737b3e2006-03-22 00:08:11 -08001957 if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001958 (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08001959 printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__,
1960 name);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001961 BUG();
1962 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963
Ravikiran G Thirumalaif0188f42006-02-10 01:51:13 -08001964 /*
1965 * Prevent CPUs from coming and going.
1966 * lock_cpu_hotplug() nests outside cache_chain_mutex
1967 */
1968 lock_cpu_hotplug();
1969
Ingo Molnarfc0abb12006-01-18 17:42:33 -08001970 mutex_lock(&cache_chain_mutex);
Andrew Morton4f12bb42005-11-07 00:58:00 -08001971
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07001972 list_for_each_entry(pc, &cache_chain, next) {
Andrew Morton4f12bb42005-11-07 00:58:00 -08001973 mm_segment_t old_fs = get_fs();
1974 char tmp;
1975 int res;
1976
1977 /*
1978 * This happens when the module gets unloaded and doesn't
1979 * destroy its slab cache and no-one else reuses the vmalloc
1980 * area of the module. Print a warning.
1981 */
1982 set_fs(KERNEL_DS);
1983 res = __get_user(tmp, pc->name);
1984 set_fs(old_fs);
1985 if (res) {
1986 printk("SLAB: cache with size %d has lost its name\n",
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001987 pc->buffer_size);
Andrew Morton4f12bb42005-11-07 00:58:00 -08001988 continue;
1989 }
1990
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001991 if (!strcmp(pc->name, name)) {
Andrew Morton4f12bb42005-11-07 00:58:00 -08001992 printk("kmem_cache_create: duplicate cache %s\n", name);
1993 dump_stack();
1994 goto oops;
1995 }
1996 }
1997
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998#if DEBUG
1999 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
2000 if ((flags & SLAB_DEBUG_INITIAL) && !ctor) {
2001 /* No constructor, but inital state check requested */
2002 printk(KERN_ERR "%s: No con, but init state check "
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002003 "requested - %s\n", __FUNCTION__, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004 flags &= ~SLAB_DEBUG_INITIAL;
2005 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006#if FORCED_DEBUG
2007 /*
2008 * Enable redzoning and last user accounting, except for caches with
2009 * large objects, if the increased size would increase the object size
2010 * above the next power of two: caches with object sizes just above a
2011 * power of two have a significant amount of internal fragmentation.
2012 */
Andrew Mortona737b3e2006-03-22 00:08:11 -08002013 if (size < 4096 || fls(size - 1) == fls(size-1 + 3 * BYTES_PER_WORD))
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002014 flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015 if (!(flags & SLAB_DESTROY_BY_RCU))
2016 flags |= SLAB_POISON;
2017#endif
2018 if (flags & SLAB_DESTROY_BY_RCU)
2019 BUG_ON(flags & SLAB_POISON);
2020#endif
2021 if (flags & SLAB_DESTROY_BY_RCU)
2022 BUG_ON(dtor);
2023
2024 /*
Andrew Mortona737b3e2006-03-22 00:08:11 -08002025 * Always checks flags, a caller might be expecting debug support which
2026 * isn't available.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027 */
Eric Sesterhenn40094fa2006-04-02 13:49:25 +02002028 BUG_ON(flags & ~CREATE_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029
Andrew Mortona737b3e2006-03-22 00:08:11 -08002030 /*
2031 * Check that size is in terms of words. This is needed to avoid
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032 * unaligned accesses for some archs when redzoning is used, and makes
2033 * sure any on-slab bufctl's are also correctly aligned.
2034 */
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002035 if (size & (BYTES_PER_WORD - 1)) {
2036 size += (BYTES_PER_WORD - 1);
2037 size &= ~(BYTES_PER_WORD - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 }
2039
Andrew Mortona737b3e2006-03-22 00:08:11 -08002040 /* calculate the final buffer alignment: */
2041
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042 /* 1) arch recommendation: can be overridden for debug */
2043 if (flags & SLAB_HWCACHE_ALIGN) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08002044 /*
2045 * Default alignment: as specified by the arch code. Except if
2046 * an object is really small, then squeeze multiple objects into
2047 * one cacheline.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 */
2049 ralign = cache_line_size();
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002050 while (size <= ralign / 2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051 ralign /= 2;
2052 } else {
2053 ralign = BYTES_PER_WORD;
2054 }
2055 /* 2) arch mandated alignment: disables debug if necessary */
2056 if (ralign < ARCH_SLAB_MINALIGN) {
2057 ralign = ARCH_SLAB_MINALIGN;
2058 if (ralign > BYTES_PER_WORD)
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002059 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 }
2061 /* 3) caller mandated alignment: disables debug if necessary */
2062 if (ralign < align) {
2063 ralign = align;
2064 if (ralign > BYTES_PER_WORD)
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002065 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 }
Andrew Mortona737b3e2006-03-22 00:08:11 -08002067 /*
2068 * 4) Store it. Note that the debug code below can reduce
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 * the alignment to BYTES_PER_WORD.
2070 */
2071 align = ralign;
2072
2073 /* Get cache's description obj. */
Pekka Enbergc5e3b832006-03-25 03:06:43 -08002074 cachep = kmem_cache_zalloc(&cache_cache, SLAB_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075 if (!cachep)
Andrew Morton4f12bb42005-11-07 00:58:00 -08002076 goto oops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077
2078#if DEBUG
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002079 cachep->obj_size = size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080
2081 if (flags & SLAB_RED_ZONE) {
2082 /* redzoning only works with word aligned caches */
2083 align = BYTES_PER_WORD;
2084
2085 /* add space for red zone words */
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002086 cachep->obj_offset += BYTES_PER_WORD;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002087 size += 2 * BYTES_PER_WORD;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088 }
2089 if (flags & SLAB_STORE_USER) {
2090 /* user store requires word alignment and
2091 * one word storage behind the end of the real
2092 * object.
2093 */
2094 align = BYTES_PER_WORD;
2095 size += BYTES_PER_WORD;
2096 }
2097#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002098 if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002099 && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
2100 cachep->obj_offset += PAGE_SIZE - size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 size = PAGE_SIZE;
2102 }
2103#endif
2104#endif
2105
Ingo Molnare0a42722006-06-23 02:03:46 -07002106 /*
2107 * Determine if the slab management is 'on' or 'off' slab.
2108 * (bootstrapping cannot cope with offslab caches so don't do
2109 * it too early on.)
2110 */
2111 if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112 /*
2113 * Size is large, assume best to place the slab management obj
2114 * off-slab (should allow better packing of objs).
2115 */
2116 flags |= CFLGS_OFF_SLAB;
2117
2118 size = ALIGN(size, align);
2119
Linus Torvaldsf78bb8a2006-03-08 10:33:05 -08002120 left_over = calculate_slab_order(cachep, size, align, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121
2122 if (!cachep->num) {
2123 printk("kmem_cache_create: couldn't create cache %s.\n", name);
2124 kmem_cache_free(&cache_cache, cachep);
2125 cachep = NULL;
Andrew Morton4f12bb42005-11-07 00:58:00 -08002126 goto oops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127 }
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002128 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
2129 + sizeof(struct slab), align);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130
2131 /*
2132 * If the slab has been placed off-slab, and we have enough space then
2133 * move it on-slab. This is at the expense of any extra colouring.
2134 */
2135 if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
2136 flags &= ~CFLGS_OFF_SLAB;
2137 left_over -= slab_size;
2138 }
2139
2140 if (flags & CFLGS_OFF_SLAB) {
2141 /* really off slab. No need for manual alignment */
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002142 slab_size =
2143 cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 }
2145
2146 cachep->colour_off = cache_line_size();
2147 /* Offset must be a multiple of the alignment. */
2148 if (cachep->colour_off < align)
2149 cachep->colour_off = align;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002150 cachep->colour = left_over / cachep->colour_off;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151 cachep->slab_size = slab_size;
2152 cachep->flags = flags;
2153 cachep->gfpflags = 0;
2154 if (flags & SLAB_CACHE_DMA)
2155 cachep->gfpflags |= GFP_DMA;
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002156 cachep->buffer_size = size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157
2158 if (flags & CFLGS_OFF_SLAB)
Victor Fuscob2d55072005-09-10 00:26:36 -07002159 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 cachep->ctor = ctor;
2161 cachep->dtor = dtor;
2162 cachep->name = name;
2163
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164
Pekka Enbergf30cf7d2006-03-22 00:08:11 -08002165 setup_cpu_cache(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 /* cache setup completed, link it into the list */
2168 list_add(&cachep->next, &cache_chain);
Andrew Mortona737b3e2006-03-22 00:08:11 -08002169oops:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 if (!cachep && (flags & SLAB_PANIC))
2171 panic("kmem_cache_create(): failed to create slab `%s'\n",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002172 name);
Ingo Molnarfc0abb12006-01-18 17:42:33 -08002173 mutex_unlock(&cache_chain_mutex);
Ravikiran G Thirumalaif0188f42006-02-10 01:51:13 -08002174 unlock_cpu_hotplug();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 return cachep;
2176}
2177EXPORT_SYMBOL(kmem_cache_create);
2178
2179#if DEBUG
2180static void check_irq_off(void)
2181{
2182 BUG_ON(!irqs_disabled());
2183}
2184
2185static void check_irq_on(void)
2186{
2187 BUG_ON(irqs_disabled());
2188}
2189
Pekka Enberg343e0d72006-02-01 03:05:50 -08002190static void check_spinlock_acquired(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191{
2192#ifdef CONFIG_SMP
2193 check_irq_off();
Christoph Lametere498be72005-09-09 13:03:32 -07002194 assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195#endif
2196}
Christoph Lametere498be72005-09-09 13:03:32 -07002197
Pekka Enberg343e0d72006-02-01 03:05:50 -08002198static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
Christoph Lametere498be72005-09-09 13:03:32 -07002199{
2200#ifdef CONFIG_SMP
2201 check_irq_off();
2202 assert_spin_locked(&cachep->nodelists[node]->list_lock);
2203#endif
2204}
2205
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206#else
2207#define check_irq_off() do { } while(0)
2208#define check_irq_on() do { } while(0)
2209#define check_spinlock_acquired(x) do { } while(0)
Christoph Lametere498be72005-09-09 13:03:32 -07002210#define check_spinlock_acquired_node(x, y) do { } while(0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211#endif
2212
Christoph Lameteraab22072006-03-22 00:09:06 -08002213static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
2214 struct array_cache *ac,
2215 int force, int node);
2216
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217static void do_drain(void *arg)
2218{
Andrew Mortona737b3e2006-03-22 00:08:11 -08002219 struct kmem_cache *cachep = arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220 struct array_cache *ac;
Christoph Lameterff694162005-09-22 21:44:02 -07002221 int node = numa_node_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222
2223 check_irq_off();
Pekka Enberg9a2dba42006-02-01 03:05:49 -08002224 ac = cpu_cache_get(cachep);
Christoph Lameterff694162005-09-22 21:44:02 -07002225 spin_lock(&cachep->nodelists[node]->list_lock);
2226 free_block(cachep, ac->entry, ac->avail, node);
2227 spin_unlock(&cachep->nodelists[node]->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228 ac->avail = 0;
2229}
2230
Pekka Enberg343e0d72006-02-01 03:05:50 -08002231static void drain_cpu_caches(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232{
Christoph Lametere498be72005-09-09 13:03:32 -07002233 struct kmem_list3 *l3;
2234 int node;
2235
Andrew Mortona07fa392006-03-22 00:08:17 -08002236 on_each_cpu(do_drain, cachep, 1, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 check_irq_on();
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002238 for_each_online_node(node) {
Christoph Lametere498be72005-09-09 13:03:32 -07002239 l3 = cachep->nodelists[node];
Roland Dreiera4523a82006-05-15 11:41:00 -07002240 if (l3 && l3->alien)
2241 drain_alien_cache(cachep, l3->alien);
2242 }
2243
2244 for_each_online_node(node) {
2245 l3 = cachep->nodelists[node];
2246 if (l3)
Christoph Lameteraab22072006-03-22 00:09:06 -08002247 drain_array(cachep, l3, l3->shared, 1, node);
Christoph Lametere498be72005-09-09 13:03:32 -07002248 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249}
2250
Pekka Enberg343e0d72006-02-01 03:05:50 -08002251static int __node_shrink(struct kmem_cache *cachep, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252{
2253 struct slab *slabp;
Christoph Lametere498be72005-09-09 13:03:32 -07002254 struct kmem_list3 *l3 = cachep->nodelists[node];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255 int ret;
2256
Christoph Lametere498be72005-09-09 13:03:32 -07002257 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258 struct list_head *p;
2259
Christoph Lametere498be72005-09-09 13:03:32 -07002260 p = l3->slabs_free.prev;
2261 if (p == &l3->slabs_free)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 break;
2263
Christoph Lametere498be72005-09-09 13:03:32 -07002264 slabp = list_entry(l3->slabs_free.prev, struct slab, list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265#if DEBUG
Eric Sesterhenn40094fa2006-04-02 13:49:25 +02002266 BUG_ON(slabp->inuse);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267#endif
2268 list_del(&slabp->list);
2269
Christoph Lametere498be72005-09-09 13:03:32 -07002270 l3->free_objects -= cachep->num;
2271 spin_unlock_irq(&l3->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 slab_destroy(cachep, slabp);
Christoph Lametere498be72005-09-09 13:03:32 -07002273 spin_lock_irq(&l3->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 }
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002275 ret = !list_empty(&l3->slabs_full) || !list_empty(&l3->slabs_partial);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276 return ret;
2277}
2278
Pekka Enberg343e0d72006-02-01 03:05:50 -08002279static int __cache_shrink(struct kmem_cache *cachep)
Christoph Lametere498be72005-09-09 13:03:32 -07002280{
2281 int ret = 0, i = 0;
2282 struct kmem_list3 *l3;
2283
2284 drain_cpu_caches(cachep);
2285
2286 check_irq_on();
2287 for_each_online_node(i) {
2288 l3 = cachep->nodelists[i];
2289 if (l3) {
2290 spin_lock_irq(&l3->list_lock);
2291 ret += __node_shrink(cachep, i);
2292 spin_unlock_irq(&l3->list_lock);
2293 }
2294 }
2295 return (ret ? 1 : 0);
2296}
2297
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298/**
2299 * kmem_cache_shrink - Shrink a cache.
2300 * @cachep: The cache to shrink.
2301 *
2302 * Releases as many slabs as possible for a cache.
2303 * To help debugging, a zero exit status indicates all slabs were released.
2304 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08002305int kmem_cache_shrink(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306{
Eric Sesterhenn40094fa2006-04-02 13:49:25 +02002307 BUG_ON(!cachep || in_interrupt());
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308
2309 return __cache_shrink(cachep);
2310}
2311EXPORT_SYMBOL(kmem_cache_shrink);
2312
2313/**
2314 * kmem_cache_destroy - delete a cache
2315 * @cachep: the cache to destroy
2316 *
Pekka Enberg343e0d72006-02-01 03:05:50 -08002317 * Remove a struct kmem_cache object from the slab cache.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318 * Returns 0 on success.
2319 *
2320 * It is expected this function will be called by a module when it is
2321 * unloaded. This will remove the cache completely, and avoid a duplicate
2322 * cache being allocated each time a module is loaded and unloaded, if the
2323 * module doesn't have persistent in-kernel storage across loads and unloads.
2324 *
2325 * The cache must be empty before calling this function.
2326 *
2327 * The caller must guarantee that noone will allocate memory from the cache
2328 * during the kmem_cache_destroy().
2329 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08002330int kmem_cache_destroy(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331{
2332 int i;
Christoph Lametere498be72005-09-09 13:03:32 -07002333 struct kmem_list3 *l3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334
Eric Sesterhenn40094fa2006-04-02 13:49:25 +02002335 BUG_ON(!cachep || in_interrupt());
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336
2337 /* Don't let CPUs to come and go */
2338 lock_cpu_hotplug();
2339
2340 /* Find the cache in the chain of caches. */
Ingo Molnarfc0abb12006-01-18 17:42:33 -08002341 mutex_lock(&cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 /*
2343 * the chain is never empty, cache_cache is never destroyed
2344 */
2345 list_del(&cachep->next);
Ingo Molnarfc0abb12006-01-18 17:42:33 -08002346 mutex_unlock(&cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347
2348 if (__cache_shrink(cachep)) {
2349 slab_error(cachep, "Can't free all objects");
Ingo Molnarfc0abb12006-01-18 17:42:33 -08002350 mutex_lock(&cache_chain_mutex);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002351 list_add(&cachep->next, &cache_chain);
Ingo Molnarfc0abb12006-01-18 17:42:33 -08002352 mutex_unlock(&cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353 unlock_cpu_hotplug();
2354 return 1;
2355 }
2356
2357 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07002358 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359
Christoph Lametere498be72005-09-09 13:03:32 -07002360 for_each_online_cpu(i)
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002361 kfree(cachep->array[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362
2363 /* NUMA: free the list3 structures */
Christoph Lametere498be72005-09-09 13:03:32 -07002364 for_each_online_node(i) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08002365 l3 = cachep->nodelists[i];
2366 if (l3) {
Christoph Lametere498be72005-09-09 13:03:32 -07002367 kfree(l3->shared);
2368 free_alien_cache(l3->alien);
2369 kfree(l3);
2370 }
2371 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372 kmem_cache_free(&cache_cache, cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373 unlock_cpu_hotplug();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374 return 0;
2375}
2376EXPORT_SYMBOL(kmem_cache_destroy);
2377
2378/* Get the memory for a slab management obj. */
Pekka Enberg343e0d72006-02-01 03:05:50 -08002379static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
Ravikiran G Thirumalai5b74ada2006-04-10 22:52:53 -07002380 int colour_off, gfp_t local_flags,
2381 int nodeid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382{
2383 struct slab *slabp;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002384
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385 if (OFF_SLAB(cachep)) {
2386 /* Slab management obj is off-slab. */
Ravikiran G Thirumalai5b74ada2006-04-10 22:52:53 -07002387 slabp = kmem_cache_alloc_node(cachep->slabp_cache,
2388 local_flags, nodeid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389 if (!slabp)
2390 return NULL;
2391 } else {
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002392 slabp = objp + colour_off;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393 colour_off += cachep->slab_size;
2394 }
2395 slabp->inuse = 0;
2396 slabp->colouroff = colour_off;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002397 slabp->s_mem = objp + colour_off;
Ravikiran G Thirumalai5b74ada2006-04-10 22:52:53 -07002398 slabp->nodeid = nodeid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399 return slabp;
2400}
2401
2402static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
2403{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002404 return (kmem_bufctl_t *) (slabp + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405}
2406
Pekka Enberg343e0d72006-02-01 03:05:50 -08002407static void cache_init_objs(struct kmem_cache *cachep,
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002408 struct slab *slabp, unsigned long ctor_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409{
2410 int i;
2411
2412 for (i = 0; i < cachep->num; i++) {
Pekka Enberg8fea4e92006-03-22 00:08:10 -08002413 void *objp = index_to_obj(cachep, slabp, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414#if DEBUG
2415 /* need to poison the objs? */
2416 if (cachep->flags & SLAB_POISON)
2417 poison_obj(cachep, objp, POISON_FREE);
2418 if (cachep->flags & SLAB_STORE_USER)
2419 *dbg_userword(cachep, objp) = NULL;
2420
2421 if (cachep->flags & SLAB_RED_ZONE) {
2422 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2423 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2424 }
2425 /*
Andrew Mortona737b3e2006-03-22 00:08:11 -08002426 * Constructors are not allowed to allocate memory from the same
2427 * cache which they are a constructor for. Otherwise, deadlock.
2428 * They must also be threaded.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 */
2430 if (cachep->ctor && !(cachep->flags & SLAB_POISON))
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002431 cachep->ctor(objp + obj_offset(cachep), cachep,
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002432 ctor_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433
2434 if (cachep->flags & SLAB_RED_ZONE) {
2435 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2436 slab_error(cachep, "constructor overwrote the"
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002437 " end of an object");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2439 slab_error(cachep, "constructor overwrote the"
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002440 " start of an object");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441 }
Andrew Mortona737b3e2006-03-22 00:08:11 -08002442 if ((cachep->buffer_size % PAGE_SIZE) == 0 &&
2443 OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002444 kernel_map_pages(virt_to_page(objp),
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002445 cachep->buffer_size / PAGE_SIZE, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446#else
2447 if (cachep->ctor)
2448 cachep->ctor(objp, cachep, ctor_flags);
2449#endif
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002450 slab_bufctl(slabp)[i] = i + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 }
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002452 slab_bufctl(slabp)[i - 1] = BUFCTL_END;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453 slabp->free = 0;
2454}
2455
Pekka Enberg343e0d72006-02-01 03:05:50 -08002456static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457{
Andrew Mortona737b3e2006-03-22 00:08:11 -08002458 if (flags & SLAB_DMA)
2459 BUG_ON(!(cachep->gfpflags & GFP_DMA));
2460 else
2461 BUG_ON(cachep->gfpflags & GFP_DMA);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462}
2463
Andrew Mortona737b3e2006-03-22 00:08:11 -08002464static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
2465 int nodeid)
Matthew Dobson78d382d2006-02-01 03:05:47 -08002466{
Pekka Enberg8fea4e92006-03-22 00:08:10 -08002467 void *objp = index_to_obj(cachep, slabp, slabp->free);
Matthew Dobson78d382d2006-02-01 03:05:47 -08002468 kmem_bufctl_t next;
2469
2470 slabp->inuse++;
2471 next = slab_bufctl(slabp)[slabp->free];
2472#if DEBUG
2473 slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
2474 WARN_ON(slabp->nodeid != nodeid);
2475#endif
2476 slabp->free = next;
2477
2478 return objp;
2479}
2480
Andrew Mortona737b3e2006-03-22 00:08:11 -08002481static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
2482 void *objp, int nodeid)
Matthew Dobson78d382d2006-02-01 03:05:47 -08002483{
Pekka Enberg8fea4e92006-03-22 00:08:10 -08002484 unsigned int objnr = obj_to_index(cachep, slabp, objp);
Matthew Dobson78d382d2006-02-01 03:05:47 -08002485
2486#if DEBUG
2487 /* Verify that the slab belongs to the intended node */
2488 WARN_ON(slabp->nodeid != nodeid);
2489
Al Viro871751e2006-03-25 03:06:39 -08002490 if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
Matthew Dobson78d382d2006-02-01 03:05:47 -08002491 printk(KERN_ERR "slab: double free detected in cache "
Andrew Mortona737b3e2006-03-22 00:08:11 -08002492 "'%s', objp %p\n", cachep->name, objp);
Matthew Dobson78d382d2006-02-01 03:05:47 -08002493 BUG();
2494 }
2495#endif
2496 slab_bufctl(slabp)[objnr] = slabp->free;
2497 slabp->free = objnr;
2498 slabp->inuse--;
2499}
2500
Pekka Enberg47768742006-06-23 02:03:07 -07002501/*
2502 * Map pages beginning at addr to the given cache and slab. This is required
2503 * for the slab allocator to be able to lookup the cache and slab of a
2504 * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging.
2505 */
2506static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
2507 void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508{
Pekka Enberg47768742006-06-23 02:03:07 -07002509 int nr_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510 struct page *page;
2511
Pekka Enberg47768742006-06-23 02:03:07 -07002512 page = virt_to_page(addr);
Nick Piggin84097512006-03-22 00:08:34 -08002513
Pekka Enberg47768742006-06-23 02:03:07 -07002514 nr_pages = 1;
Nick Piggin84097512006-03-22 00:08:34 -08002515 if (likely(!PageCompound(page)))
Pekka Enberg47768742006-06-23 02:03:07 -07002516 nr_pages <<= cache->gfporder;
2517
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518 do {
Pekka Enberg47768742006-06-23 02:03:07 -07002519 page_set_cache(page, cache);
2520 page_set_slab(page, slab);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521 page++;
Pekka Enberg47768742006-06-23 02:03:07 -07002522 } while (--nr_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523}
2524
2525/*
2526 * Grow (by 1) the number of slabs within a cache. This is called by
2527 * kmem_cache_alloc() when there are no active objs left in a cache.
2528 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08002529static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002531 struct slab *slabp;
2532 void *objp;
2533 size_t offset;
2534 gfp_t local_flags;
2535 unsigned long ctor_flags;
Christoph Lametere498be72005-09-09 13:03:32 -07002536 struct kmem_list3 *l3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537
Andrew Mortona737b3e2006-03-22 00:08:11 -08002538 /*
2539 * Be lazy and only check for valid flags here, keeping it out of the
2540 * critical path in kmem_cache_alloc().
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541 */
Eric Sesterhenn40094fa2006-04-02 13:49:25 +02002542 BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543 if (flags & SLAB_NO_GROW)
2544 return 0;
2545
2546 ctor_flags = SLAB_CTOR_CONSTRUCTOR;
2547 local_flags = (flags & SLAB_LEVEL_MASK);
2548 if (!(local_flags & __GFP_WAIT))
2549 /*
2550 * Not allowed to sleep. Need to tell a constructor about
2551 * this - it might need to know...
2552 */
2553 ctor_flags |= SLAB_CTOR_ATOMIC;
2554
Ravikiran G Thirumalai2e1217c2006-02-04 23:27:56 -08002555 /* Take the l3 list lock to change the colour_next on this node */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556 check_irq_off();
Ravikiran G Thirumalai2e1217c2006-02-04 23:27:56 -08002557 l3 = cachep->nodelists[nodeid];
2558 spin_lock(&l3->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002559
2560 /* Get colour for the slab, and cal the next value. */
Ravikiran G Thirumalai2e1217c2006-02-04 23:27:56 -08002561 offset = l3->colour_next;
2562 l3->colour_next++;
2563 if (l3->colour_next >= cachep->colour)
2564 l3->colour_next = 0;
2565 spin_unlock(&l3->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566
Ravikiran G Thirumalai2e1217c2006-02-04 23:27:56 -08002567 offset *= cachep->colour_off;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568
2569 if (local_flags & __GFP_WAIT)
2570 local_irq_enable();
2571
2572 /*
2573 * The test for missing atomic flag is performed here, rather than
2574 * the more obvious place, simply to reduce the critical path length
2575 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
2576 * will eventually be caught here (where it matters).
2577 */
2578 kmem_flagcheck(cachep, flags);
2579
Andrew Mortona737b3e2006-03-22 00:08:11 -08002580 /*
2581 * Get mem for the objs. Attempt to allocate a physical page from
2582 * 'nodeid'.
Christoph Lametere498be72005-09-09 13:03:32 -07002583 */
Andrew Mortona737b3e2006-03-22 00:08:11 -08002584 objp = kmem_getpages(cachep, flags, nodeid);
2585 if (!objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586 goto failed;
2587
2588 /* Get slab management. */
Ravikiran G Thirumalai5b74ada2006-04-10 22:52:53 -07002589 slabp = alloc_slabmgmt(cachep, objp, offset, local_flags, nodeid);
Andrew Mortona737b3e2006-03-22 00:08:11 -08002590 if (!slabp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591 goto opps1;
2592
Christoph Lametere498be72005-09-09 13:03:32 -07002593 slabp->nodeid = nodeid;
Pekka Enberg47768742006-06-23 02:03:07 -07002594 slab_map_pages(cachep, slabp, objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595
2596 cache_init_objs(cachep, slabp, ctor_flags);
2597
2598 if (local_flags & __GFP_WAIT)
2599 local_irq_disable();
2600 check_irq_off();
Christoph Lametere498be72005-09-09 13:03:32 -07002601 spin_lock(&l3->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602
2603 /* Make slab active. */
Christoph Lametere498be72005-09-09 13:03:32 -07002604 list_add_tail(&slabp->list, &(l3->slabs_free));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002605 STATS_INC_GROWN(cachep);
Christoph Lametere498be72005-09-09 13:03:32 -07002606 l3->free_objects += cachep->num;
2607 spin_unlock(&l3->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608 return 1;
Andrew Mortona737b3e2006-03-22 00:08:11 -08002609opps1:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610 kmem_freepages(cachep, objp);
Andrew Mortona737b3e2006-03-22 00:08:11 -08002611failed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612 if (local_flags & __GFP_WAIT)
2613 local_irq_disable();
2614 return 0;
2615}
2616
2617#if DEBUG
2618
2619/*
2620 * Perform extra freeing checks:
2621 * - detect bad pointers.
2622 * - POISON/RED_ZONE checking
2623 * - destructor calls, for caches with POISON+dtor
2624 */
2625static void kfree_debugcheck(const void *objp)
2626{
2627 struct page *page;
2628
2629 if (!virt_addr_valid(objp)) {
2630 printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002631 (unsigned long)objp);
2632 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633 }
2634 page = virt_to_page(objp);
2635 if (!PageSlab(page)) {
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002636 printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n",
2637 (unsigned long)objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638 BUG();
2639 }
2640}
2641
Pekka Enberg58ce1fd2006-06-23 02:03:24 -07002642static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2643{
2644 unsigned long redzone1, redzone2;
2645
2646 redzone1 = *dbg_redzone1(cache, obj);
2647 redzone2 = *dbg_redzone2(cache, obj);
2648
2649 /*
2650 * Redzone is ok.
2651 */
2652 if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2653 return;
2654
2655 if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2656 slab_error(cache, "double free detected");
2657 else
2658 slab_error(cache, "memory outside object was overwritten");
2659
2660 printk(KERN_ERR "%p: redzone 1:0x%lx, redzone 2:0x%lx.\n",
2661 obj, redzone1, redzone2);
2662}
2663
Pekka Enberg343e0d72006-02-01 03:05:50 -08002664static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002665 void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666{
2667 struct page *page;
2668 unsigned int objnr;
2669 struct slab *slabp;
2670
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002671 objp -= obj_offset(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672 kfree_debugcheck(objp);
2673 page = virt_to_page(objp);
2674
Pekka Enberg065d41c2005-11-13 16:06:46 -08002675 slabp = page_get_slab(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676
2677 if (cachep->flags & SLAB_RED_ZONE) {
Pekka Enberg58ce1fd2006-06-23 02:03:24 -07002678 verify_redzone_free(cachep, objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2680 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2681 }
2682 if (cachep->flags & SLAB_STORE_USER)
2683 *dbg_userword(cachep, objp) = caller;
2684
Pekka Enberg8fea4e92006-03-22 00:08:10 -08002685 objnr = obj_to_index(cachep, slabp, objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686
2687 BUG_ON(objnr >= cachep->num);
Pekka Enberg8fea4e92006-03-22 00:08:10 -08002688 BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689
2690 if (cachep->flags & SLAB_DEBUG_INITIAL) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08002691 /*
2692 * Need to call the slab's constructor so the caller can
2693 * perform a verify of its state (debugging). Called without
2694 * the cache-lock held.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695 */
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002696 cachep->ctor(objp + obj_offset(cachep),
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002697 cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698 }
2699 if (cachep->flags & SLAB_POISON && cachep->dtor) {
2700 /* we want to cache poison the object,
2701 * call the destruction callback
2702 */
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002703 cachep->dtor(objp + obj_offset(cachep), cachep, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704 }
Al Viro871751e2006-03-25 03:06:39 -08002705#ifdef CONFIG_DEBUG_SLAB_LEAK
2706 slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
2707#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708 if (cachep->flags & SLAB_POISON) {
2709#ifdef CONFIG_DEBUG_PAGEALLOC
Andrew Mortona737b3e2006-03-22 00:08:11 -08002710 if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711 store_stackinfo(cachep, objp, (unsigned long)caller);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002712 kernel_map_pages(virt_to_page(objp),
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002713 cachep->buffer_size / PAGE_SIZE, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714 } else {
2715 poison_obj(cachep, objp, POISON_FREE);
2716 }
2717#else
2718 poison_obj(cachep, objp, POISON_FREE);
2719#endif
2720 }
2721 return objp;
2722}
2723
Pekka Enberg343e0d72006-02-01 03:05:50 -08002724static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725{
2726 kmem_bufctl_t i;
2727 int entries = 0;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002728
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729 /* Check slab's freelist to see if this obj is there. */
2730 for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
2731 entries++;
2732 if (entries > cachep->num || i >= cachep->num)
2733 goto bad;
2734 }
2735 if (entries != cachep->num - slabp->inuse) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08002736bad:
2737 printk(KERN_ERR "slab: Internal list corruption detected in "
2738 "cache '%s'(%d), slabp %p(%d). Hexdump:\n",
2739 cachep->name, cachep->num, slabp, slabp->inuse);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002740 for (i = 0;
Linus Torvalds264132b2006-03-06 12:10:07 -08002741 i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002742 i++) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08002743 if (i % 16 == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744 printk("\n%03x:", i);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002745 printk(" %02x", ((unsigned char *)slabp)[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746 }
2747 printk("\n");
2748 BUG();
2749 }
2750}
2751#else
2752#define kfree_debugcheck(x) do { } while(0)
2753#define cache_free_debugcheck(x,objp,z) (objp)
2754#define check_slabp(x,y) do { } while(0)
2755#endif
2756
Pekka Enberg343e0d72006-02-01 03:05:50 -08002757static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758{
2759 int batchcount;
2760 struct kmem_list3 *l3;
2761 struct array_cache *ac;
2762
2763 check_irq_off();
Pekka Enberg9a2dba42006-02-01 03:05:49 -08002764 ac = cpu_cache_get(cachep);
Andrew Mortona737b3e2006-03-22 00:08:11 -08002765retry:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766 batchcount = ac->batchcount;
2767 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08002768 /*
2769 * If there was little recent activity on this cache, then
2770 * perform only a partial refill. Otherwise we could generate
2771 * refill bouncing.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772 */
2773 batchcount = BATCHREFILL_LIMIT;
2774 }
Christoph Lametere498be72005-09-09 13:03:32 -07002775 l3 = cachep->nodelists[numa_node_id()];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776
Christoph Lametere498be72005-09-09 13:03:32 -07002777 BUG_ON(ac->avail > 0 || !l3);
2778 spin_lock(&l3->list_lock);
2779
Christoph Lameter3ded1752006-03-25 03:06:44 -08002780 /* See if we can refill from the shared array */
2781 if (l3->shared && transfer_objects(ac, l3->shared, batchcount))
2782 goto alloc_done;
2783
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 while (batchcount > 0) {
2785 struct list_head *entry;
2786 struct slab *slabp;
2787 /* Get slab alloc is to come from. */
2788 entry = l3->slabs_partial.next;
2789 if (entry == &l3->slabs_partial) {
2790 l3->free_touched = 1;
2791 entry = l3->slabs_free.next;
2792 if (entry == &l3->slabs_free)
2793 goto must_grow;
2794 }
2795
2796 slabp = list_entry(entry, struct slab, list);
2797 check_slabp(cachep, slabp);
2798 check_spinlock_acquired(cachep);
2799 while (slabp->inuse < cachep->num && batchcount--) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800 STATS_INC_ALLOCED(cachep);
2801 STATS_INC_ACTIVE(cachep);
2802 STATS_SET_HIGH(cachep);
2803
Matthew Dobson78d382d2006-02-01 03:05:47 -08002804 ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
2805 numa_node_id());
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806 }
2807 check_slabp(cachep, slabp);
2808
2809 /* move slabp to correct slabp list: */
2810 list_del(&slabp->list);
2811 if (slabp->free == BUFCTL_END)
2812 list_add(&slabp->list, &l3->slabs_full);
2813 else
2814 list_add(&slabp->list, &l3->slabs_partial);
2815 }
2816
Andrew Mortona737b3e2006-03-22 00:08:11 -08002817must_grow:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818 l3->free_objects -= ac->avail;
Andrew Mortona737b3e2006-03-22 00:08:11 -08002819alloc_done:
Christoph Lametere498be72005-09-09 13:03:32 -07002820 spin_unlock(&l3->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821
2822 if (unlikely(!ac->avail)) {
2823 int x;
Christoph Lametere498be72005-09-09 13:03:32 -07002824 x = cache_grow(cachep, flags, numa_node_id());
2825
Andrew Mortona737b3e2006-03-22 00:08:11 -08002826 /* cache_grow can reenable interrupts, then ac could change. */
Pekka Enberg9a2dba42006-02-01 03:05:49 -08002827 ac = cpu_cache_get(cachep);
Andrew Mortona737b3e2006-03-22 00:08:11 -08002828 if (!x && ac->avail == 0) /* no objects in sight? abort */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829 return NULL;
2830
Andrew Mortona737b3e2006-03-22 00:08:11 -08002831 if (!ac->avail) /* objects refilled by interrupt? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832 goto retry;
2833 }
2834 ac->touched = 1;
Christoph Lametere498be72005-09-09 13:03:32 -07002835 return ac->entry[--ac->avail];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836}
2837
Andrew Mortona737b3e2006-03-22 00:08:11 -08002838static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
2839 gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840{
2841 might_sleep_if(flags & __GFP_WAIT);
2842#if DEBUG
2843 kmem_flagcheck(cachep, flags);
2844#endif
2845}
2846
2847#if DEBUG
Andrew Mortona737b3e2006-03-22 00:08:11 -08002848static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
2849 gfp_t flags, void *objp, void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002851 if (!objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852 return objp;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002853 if (cachep->flags & SLAB_POISON) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854#ifdef CONFIG_DEBUG_PAGEALLOC
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002855 if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002856 kernel_map_pages(virt_to_page(objp),
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002857 cachep->buffer_size / PAGE_SIZE, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 else
2859 check_poison_obj(cachep, objp);
2860#else
2861 check_poison_obj(cachep, objp);
2862#endif
2863 poison_obj(cachep, objp, POISON_INUSE);
2864 }
2865 if (cachep->flags & SLAB_STORE_USER)
2866 *dbg_userword(cachep, objp) = caller;
2867
2868 if (cachep->flags & SLAB_RED_ZONE) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08002869 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
2870 *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
2871 slab_error(cachep, "double free, or memory outside"
2872 " object was overwritten");
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002873 printk(KERN_ERR
Andrew Mortona737b3e2006-03-22 00:08:11 -08002874 "%p: redzone 1:0x%lx, redzone 2:0x%lx\n",
2875 objp, *dbg_redzone1(cachep, objp),
2876 *dbg_redzone2(cachep, objp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877 }
2878 *dbg_redzone1(cachep, objp) = RED_ACTIVE;
2879 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
2880 }
Al Viro871751e2006-03-25 03:06:39 -08002881#ifdef CONFIG_DEBUG_SLAB_LEAK
2882 {
2883 struct slab *slabp;
2884 unsigned objnr;
2885
2886 slabp = page_get_slab(virt_to_page(objp));
2887 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
2888 slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
2889 }
2890#endif
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002891 objp += obj_offset(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002892 if (cachep->ctor && cachep->flags & SLAB_POISON) {
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002893 unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894
2895 if (!(flags & __GFP_WAIT))
2896 ctor_flags |= SLAB_CTOR_ATOMIC;
2897
2898 cachep->ctor(objp, cachep, ctor_flags);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002899 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900 return objp;
2901}
2902#else
2903#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
2904#endif
2905
Pekka Enberg343e0d72006-02-01 03:05:50 -08002906static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002908 void *objp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909 struct array_cache *ac;
2910
Christoph Lameterdc85da12006-01-18 17:42:36 -08002911#ifdef CONFIG_NUMA
Paul Jacksonb2455392006-03-24 03:16:12 -08002912 if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
Paul Jacksonc61afb12006-03-24 03:16:08 -08002913 objp = alternate_node_alloc(cachep, flags);
2914 if (objp != NULL)
2915 return objp;
Paul Jackson101a5002006-03-24 03:16:07 -08002916 }
Christoph Lameterdc85da12006-01-18 17:42:36 -08002917#endif
2918
Alok N Kataria5c382302005-09-27 21:45:46 -07002919 check_irq_off();
Pekka Enberg9a2dba42006-02-01 03:05:49 -08002920 ac = cpu_cache_get(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921 if (likely(ac->avail)) {
2922 STATS_INC_ALLOCHIT(cachep);
2923 ac->touched = 1;
Christoph Lametere498be72005-09-09 13:03:32 -07002924 objp = ac->entry[--ac->avail];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925 } else {
2926 STATS_INC_ALLOCMISS(cachep);
2927 objp = cache_alloc_refill(cachep, flags);
2928 }
Alok N Kataria5c382302005-09-27 21:45:46 -07002929 return objp;
2930}
2931
Andrew Mortona737b3e2006-03-22 00:08:11 -08002932static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
2933 gfp_t flags, void *caller)
Alok N Kataria5c382302005-09-27 21:45:46 -07002934{
2935 unsigned long save_flags;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002936 void *objp;
Alok N Kataria5c382302005-09-27 21:45:46 -07002937
2938 cache_alloc_debugcheck_before(cachep, flags);
2939
2940 local_irq_save(save_flags);
2941 objp = ____cache_alloc(cachep, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002942 local_irq_restore(save_flags);
Eric Dumazet34342e82005-09-03 15:55:06 -07002943 objp = cache_alloc_debugcheck_after(cachep, flags, objp,
Pekka Enberg7fd6b142006-02-01 03:05:52 -08002944 caller);
Eric Dumazet34342e82005-09-03 15:55:06 -07002945 prefetchw(objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946 return objp;
2947}
2948
Christoph Lametere498be72005-09-09 13:03:32 -07002949#ifdef CONFIG_NUMA
2950/*
Paul Jacksonb2455392006-03-24 03:16:12 -08002951 * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
Paul Jacksonc61afb12006-03-24 03:16:08 -08002952 *
2953 * If we are in_interrupt, then process context, including cpusets and
2954 * mempolicy, may not apply and should not be used for allocation policy.
2955 */
2956static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
2957{
2958 int nid_alloc, nid_here;
2959
2960 if (in_interrupt())
2961 return NULL;
2962 nid_alloc = nid_here = numa_node_id();
2963 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
2964 nid_alloc = cpuset_mem_spread_node();
2965 else if (current->mempolicy)
2966 nid_alloc = slab_node(current->mempolicy);
2967 if (nid_alloc != nid_here)
2968 return __cache_alloc_node(cachep, flags, nid_alloc);
2969 return NULL;
2970}
2971
2972/*
Christoph Lametere498be72005-09-09 13:03:32 -07002973 * A interface to enable slab creation on nodeid
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974 */
Andrew Mortona737b3e2006-03-22 00:08:11 -08002975static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
2976 int nodeid)
Christoph Lametere498be72005-09-09 13:03:32 -07002977{
2978 struct list_head *entry;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002979 struct slab *slabp;
2980 struct kmem_list3 *l3;
2981 void *obj;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002982 int x;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002983
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002984 l3 = cachep->nodelists[nodeid];
2985 BUG_ON(!l3);
Christoph Lametere498be72005-09-09 13:03:32 -07002986
Andrew Mortona737b3e2006-03-22 00:08:11 -08002987retry:
Ravikiran G Thirumalaica3b9b92006-02-04 23:27:58 -08002988 check_irq_off();
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002989 spin_lock(&l3->list_lock);
2990 entry = l3->slabs_partial.next;
2991 if (entry == &l3->slabs_partial) {
2992 l3->free_touched = 1;
2993 entry = l3->slabs_free.next;
2994 if (entry == &l3->slabs_free)
2995 goto must_grow;
2996 }
Christoph Lametere498be72005-09-09 13:03:32 -07002997
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002998 slabp = list_entry(entry, struct slab, list);
2999 check_spinlock_acquired_node(cachep, nodeid);
3000 check_slabp(cachep, slabp);
Christoph Lametere498be72005-09-09 13:03:32 -07003001
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003002 STATS_INC_NODEALLOCS(cachep);
3003 STATS_INC_ACTIVE(cachep);
3004 STATS_SET_HIGH(cachep);
Christoph Lametere498be72005-09-09 13:03:32 -07003005
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003006 BUG_ON(slabp->inuse == cachep->num);
Christoph Lametere498be72005-09-09 13:03:32 -07003007
Matthew Dobson78d382d2006-02-01 03:05:47 -08003008 obj = slab_get_obj(cachep, slabp, nodeid);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003009 check_slabp(cachep, slabp);
3010 l3->free_objects--;
3011 /* move slabp to correct slabp list: */
3012 list_del(&slabp->list);
Christoph Lametere498be72005-09-09 13:03:32 -07003013
Andrew Mortona737b3e2006-03-22 00:08:11 -08003014 if (slabp->free == BUFCTL_END)
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003015 list_add(&slabp->list, &l3->slabs_full);
Andrew Mortona737b3e2006-03-22 00:08:11 -08003016 else
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003017 list_add(&slabp->list, &l3->slabs_partial);
Christoph Lametere498be72005-09-09 13:03:32 -07003018
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003019 spin_unlock(&l3->list_lock);
3020 goto done;
Christoph Lametere498be72005-09-09 13:03:32 -07003021
Andrew Mortona737b3e2006-03-22 00:08:11 -08003022must_grow:
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003023 spin_unlock(&l3->list_lock);
3024 x = cache_grow(cachep, flags, nodeid);
Christoph Lametere498be72005-09-09 13:03:32 -07003025
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003026 if (!x)
3027 return NULL;
Christoph Lametere498be72005-09-09 13:03:32 -07003028
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003029 goto retry;
Andrew Mortona737b3e2006-03-22 00:08:11 -08003030done:
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003031 return obj;
Christoph Lametere498be72005-09-09 13:03:32 -07003032}
3033#endif
3034
3035/*
3036 * Caller needs to acquire correct kmem_list's list_lock
3037 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08003038static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003039 int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003040{
3041 int i;
Christoph Lametere498be72005-09-09 13:03:32 -07003042 struct kmem_list3 *l3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003043
3044 for (i = 0; i < nr_objects; i++) {
3045 void *objp = objpp[i];
3046 struct slab *slabp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047
Pekka Enberg6ed5eb22006-02-01 03:05:49 -08003048 slabp = virt_to_slab(objp);
Christoph Lameterff694162005-09-22 21:44:02 -07003049 l3 = cachep->nodelists[node];
Linus Torvalds1da177e2005-04-16 15:20:36 -07003050 list_del(&slabp->list);
Christoph Lameterff694162005-09-22 21:44:02 -07003051 check_spinlock_acquired_node(cachep, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052 check_slabp(cachep, slabp);
Matthew Dobson78d382d2006-02-01 03:05:47 -08003053 slab_put_obj(cachep, slabp, objp, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003054 STATS_DEC_ACTIVE(cachep);
Christoph Lametere498be72005-09-09 13:03:32 -07003055 l3->free_objects++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003056 check_slabp(cachep, slabp);
3057
3058 /* fixup slab chains */
3059 if (slabp->inuse == 0) {
Christoph Lametere498be72005-09-09 13:03:32 -07003060 if (l3->free_objects > l3->free_limit) {
3061 l3->free_objects -= cachep->num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003062 slab_destroy(cachep, slabp);
3063 } else {
Christoph Lametere498be72005-09-09 13:03:32 -07003064 list_add(&slabp->list, &l3->slabs_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003065 }
3066 } else {
3067 /* Unconditionally move a slab to the end of the
3068 * partial list on free - maximum time for the
3069 * other objects to be freed, too.
3070 */
Christoph Lametere498be72005-09-09 13:03:32 -07003071 list_add_tail(&slabp->list, &l3->slabs_partial);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003072 }
3073 }
3074}
3075
Pekka Enberg343e0d72006-02-01 03:05:50 -08003076static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003077{
3078 int batchcount;
Christoph Lametere498be72005-09-09 13:03:32 -07003079 struct kmem_list3 *l3;
Christoph Lameterff694162005-09-22 21:44:02 -07003080 int node = numa_node_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003081
3082 batchcount = ac->batchcount;
3083#if DEBUG
3084 BUG_ON(!batchcount || batchcount > ac->avail);
3085#endif
3086 check_irq_off();
Christoph Lameterff694162005-09-22 21:44:02 -07003087 l3 = cachep->nodelists[node];
Christoph Lametere498be72005-09-09 13:03:32 -07003088 spin_lock(&l3->list_lock);
3089 if (l3->shared) {
3090 struct array_cache *shared_array = l3->shared;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003091 int max = shared_array->limit - shared_array->avail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003092 if (max) {
3093 if (batchcount > max)
3094 batchcount = max;
Christoph Lametere498be72005-09-09 13:03:32 -07003095 memcpy(&(shared_array->entry[shared_array->avail]),
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003096 ac->entry, sizeof(void *) * batchcount);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003097 shared_array->avail += batchcount;
3098 goto free_done;
3099 }
3100 }
3101
Christoph Lameterff694162005-09-22 21:44:02 -07003102 free_block(cachep, ac->entry, batchcount, node);
Andrew Mortona737b3e2006-03-22 00:08:11 -08003103free_done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003104#if STATS
3105 {
3106 int i = 0;
3107 struct list_head *p;
3108
Christoph Lametere498be72005-09-09 13:03:32 -07003109 p = l3->slabs_free.next;
3110 while (p != &(l3->slabs_free)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003111 struct slab *slabp;
3112
3113 slabp = list_entry(p, struct slab, list);
3114 BUG_ON(slabp->inuse);
3115
3116 i++;
3117 p = p->next;
3118 }
3119 STATS_SET_FREEABLE(cachep, i);
3120 }
3121#endif
Christoph Lametere498be72005-09-09 13:03:32 -07003122 spin_unlock(&l3->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003123 ac->avail -= batchcount;
Andrew Mortona737b3e2006-03-22 00:08:11 -08003124 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003125}
3126
3127/*
Andrew Mortona737b3e2006-03-22 00:08:11 -08003128 * Release an obj back to its cache. If the obj has a constructed state, it must
3129 * be in this state _before_ it is released. Called with disabled ints.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003130 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08003131static inline void __cache_free(struct kmem_cache *cachep, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003132{
Pekka Enberg9a2dba42006-02-01 03:05:49 -08003133 struct array_cache *ac = cpu_cache_get(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003134
3135 check_irq_off();
3136 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
3137
Pekka Enberg729bd0b2006-06-23 02:03:05 -07003138 if (cache_free_alien(cachep, objp))
3139 return;
Christoph Lametere498be72005-09-09 13:03:32 -07003140
Linus Torvalds1da177e2005-04-16 15:20:36 -07003141 if (likely(ac->avail < ac->limit)) {
3142 STATS_INC_FREEHIT(cachep);
Christoph Lametere498be72005-09-09 13:03:32 -07003143 ac->entry[ac->avail++] = objp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003144 return;
3145 } else {
3146 STATS_INC_FREEMISS(cachep);
3147 cache_flusharray(cachep, ac);
Christoph Lametere498be72005-09-09 13:03:32 -07003148 ac->entry[ac->avail++] = objp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003149 }
3150}
3151
3152/**
3153 * kmem_cache_alloc - Allocate an object
3154 * @cachep: The cache to allocate from.
3155 * @flags: See kmalloc().
3156 *
3157 * Allocate an object from this cache. The flags are only relevant
3158 * if the cache has no available objects.
3159 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08003160void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003161{
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003162 return __cache_alloc(cachep, flags, __builtin_return_address(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163}
3164EXPORT_SYMBOL(kmem_cache_alloc);
3165
3166/**
Pekka Enberga8c0f9a2006-03-25 03:06:42 -08003167 * kmem_cache_alloc - Allocate an object. The memory is set to zero.
3168 * @cache: The cache to allocate from.
3169 * @flags: See kmalloc().
3170 *
3171 * Allocate an object from this cache and set the allocated memory to zero.
3172 * The flags are only relevant if the cache has no available objects.
3173 */
3174void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags)
3175{
3176 void *ret = __cache_alloc(cache, flags, __builtin_return_address(0));
3177 if (ret)
3178 memset(ret, 0, obj_size(cache));
3179 return ret;
3180}
3181EXPORT_SYMBOL(kmem_cache_zalloc);
3182
3183/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003184 * kmem_ptr_validate - check if an untrusted pointer might
3185 * be a slab entry.
3186 * @cachep: the cache we're checking against
3187 * @ptr: pointer to validate
3188 *
3189 * This verifies that the untrusted pointer looks sane:
3190 * it is _not_ a guarantee that the pointer is actually
3191 * part of the slab cache in question, but it at least
3192 * validates that the pointer can be dereferenced and
3193 * looks half-way sane.
3194 *
3195 * Currently only used for dentry validation.
3196 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08003197int fastcall kmem_ptr_validate(struct kmem_cache *cachep, void *ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003198{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003199 unsigned long addr = (unsigned long)ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003200 unsigned long min_addr = PAGE_OFFSET;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003201 unsigned long align_mask = BYTES_PER_WORD - 1;
Manfred Spraul3dafccf2006-02-01 03:05:42 -08003202 unsigned long size = cachep->buffer_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203 struct page *page;
3204
3205 if (unlikely(addr < min_addr))
3206 goto out;
3207 if (unlikely(addr > (unsigned long)high_memory - size))
3208 goto out;
3209 if (unlikely(addr & align_mask))
3210 goto out;
3211 if (unlikely(!kern_addr_valid(addr)))
3212 goto out;
3213 if (unlikely(!kern_addr_valid(addr + size - 1)))
3214 goto out;
3215 page = virt_to_page(ptr);
3216 if (unlikely(!PageSlab(page)))
3217 goto out;
Pekka Enberg065d41c2005-11-13 16:06:46 -08003218 if (unlikely(page_get_cache(page) != cachep))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003219 goto out;
3220 return 1;
Andrew Mortona737b3e2006-03-22 00:08:11 -08003221out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003222 return 0;
3223}
3224
3225#ifdef CONFIG_NUMA
3226/**
3227 * kmem_cache_alloc_node - Allocate an object on the specified node
3228 * @cachep: The cache to allocate from.
3229 * @flags: See kmalloc().
3230 * @nodeid: node number of the target node.
3231 *
3232 * Identical to kmem_cache_alloc, except that this function is slow
3233 * and can sleep. And it will allocate memory on the given node, which
3234 * can improve the performance for cpu bound structures.
Christoph Lametere498be72005-09-09 13:03:32 -07003235 * New and improved: it will now make sure that the object gets
3236 * put on the correct node list so that there is no false sharing.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003237 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08003238void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239{
Christoph Lametere498be72005-09-09 13:03:32 -07003240 unsigned long save_flags;
3241 void *ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242
Christoph Lametere498be72005-09-09 13:03:32 -07003243 cache_alloc_debugcheck_before(cachep, flags);
3244 local_irq_save(save_flags);
Christoph Lameter18f820f2006-02-01 03:05:43 -08003245
3246 if (nodeid == -1 || nodeid == numa_node_id() ||
Andrew Mortona737b3e2006-03-22 00:08:11 -08003247 !cachep->nodelists[nodeid])
Alok N Kataria5c382302005-09-27 21:45:46 -07003248 ptr = ____cache_alloc(cachep, flags);
3249 else
3250 ptr = __cache_alloc_node(cachep, flags, nodeid);
Christoph Lametere498be72005-09-09 13:03:32 -07003251 local_irq_restore(save_flags);
Christoph Lameter18f820f2006-02-01 03:05:43 -08003252
3253 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr,
3254 __builtin_return_address(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003255
Christoph Lametere498be72005-09-09 13:03:32 -07003256 return ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003257}
3258EXPORT_SYMBOL(kmem_cache_alloc_node);
3259
Al Virodd0fc662005-10-07 07:46:04 +01003260void *kmalloc_node(size_t size, gfp_t flags, int node)
Manfred Spraul97e2bde2005-05-01 08:58:38 -07003261{
Pekka Enberg343e0d72006-02-01 03:05:50 -08003262 struct kmem_cache *cachep;
Manfred Spraul97e2bde2005-05-01 08:58:38 -07003263
3264 cachep = kmem_find_general_cachep(size, flags);
3265 if (unlikely(cachep == NULL))
3266 return NULL;
3267 return kmem_cache_alloc_node(cachep, flags, node);
3268}
3269EXPORT_SYMBOL(kmalloc_node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003270#endif
3271
3272/**
Paul Drynoff800590f2006-06-23 02:03:48 -07003273 * __do_kmalloc - allocate memory
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274 * @size: how many bytes of memory are required.
Paul Drynoff800590f2006-06-23 02:03:48 -07003275 * @flags: the type of memory to allocate (see kmalloc).
Randy Dunlap911851e2006-03-22 00:08:14 -08003276 * @caller: function caller for debug tracking of the caller
Linus Torvalds1da177e2005-04-16 15:20:36 -07003277 */
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003278static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3279 void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003280{
Pekka Enberg343e0d72006-02-01 03:05:50 -08003281 struct kmem_cache *cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003282
Manfred Spraul97e2bde2005-05-01 08:58:38 -07003283 /* If you want to save a few bytes .text space: replace
3284 * __ with kmem_.
3285 * Then kmalloc uses the uninlined functions instead of the inline
3286 * functions.
3287 */
3288 cachep = __find_general_cachep(size, flags);
Andrew Mortondbdb9042005-09-23 13:24:10 -07003289 if (unlikely(cachep == NULL))
3290 return NULL;
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003291 return __cache_alloc(cachep, flags, caller);
3292}
3293
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003294
3295void *__kmalloc(size_t size, gfp_t flags)
3296{
Al Viro871751e2006-03-25 03:06:39 -08003297#ifndef CONFIG_DEBUG_SLAB
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003298 return __do_kmalloc(size, flags, NULL);
Al Viro871751e2006-03-25 03:06:39 -08003299#else
3300 return __do_kmalloc(size, flags, __builtin_return_address(0));
3301#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302}
3303EXPORT_SYMBOL(__kmalloc);
3304
Al Viro871751e2006-03-25 03:06:39 -08003305#ifdef CONFIG_DEBUG_SLAB
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003306void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
3307{
3308 return __do_kmalloc(size, flags, caller);
3309}
3310EXPORT_SYMBOL(__kmalloc_track_caller);
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003311#endif
3312
Linus Torvalds1da177e2005-04-16 15:20:36 -07003313#ifdef CONFIG_SMP
3314/**
3315 * __alloc_percpu - allocate one copy of the object for every present
3316 * cpu in the system, zeroing them.
3317 * Objects should be dereferenced using the per_cpu_ptr macro only.
3318 *
3319 * @size: how many bytes of memory are required.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003320 */
Pekka Enbergf9f75002006-01-08 01:00:33 -08003321void *__alloc_percpu(size_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003322{
3323 int i;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003324 struct percpu_data *pdata = kmalloc(sizeof(*pdata), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003325
3326 if (!pdata)
3327 return NULL;
3328
Christoph Lametere498be72005-09-09 13:03:32 -07003329 /*
3330 * Cannot use for_each_online_cpu since a cpu may come online
3331 * and we have no way of figuring out how to fix the array
3332 * that we have allocated then....
3333 */
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08003334 for_each_possible_cpu(i) {
Christoph Lametere498be72005-09-09 13:03:32 -07003335 int node = cpu_to_node(i);
3336
3337 if (node_online(node))
3338 pdata->ptrs[i] = kmalloc_node(size, GFP_KERNEL, node);
3339 else
3340 pdata->ptrs[i] = kmalloc(size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003341
3342 if (!pdata->ptrs[i])
3343 goto unwind_oom;
3344 memset(pdata->ptrs[i], 0, size);
3345 }
3346
3347 /* Catch derefs w/o wrappers */
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003348 return (void *)(~(unsigned long)pdata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349
Andrew Mortona737b3e2006-03-22 00:08:11 -08003350unwind_oom:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351 while (--i >= 0) {
3352 if (!cpu_possible(i))
3353 continue;
3354 kfree(pdata->ptrs[i]);
3355 }
3356 kfree(pdata);
3357 return NULL;
3358}
3359EXPORT_SYMBOL(__alloc_percpu);
3360#endif
3361
3362/**
3363 * kmem_cache_free - Deallocate an object
3364 * @cachep: The cache the allocation was from.
3365 * @objp: The previously allocated object.
3366 *
3367 * Free an object which was previously allocated from this
3368 * cache.
3369 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08003370void kmem_cache_free(struct kmem_cache *cachep, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371{
3372 unsigned long flags;
3373
Pekka Enbergddc2e812006-06-23 02:03:40 -07003374 BUG_ON(virt_to_cache(objp) != cachep);
3375
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376 local_irq_save(flags);
3377 __cache_free(cachep, objp);
3378 local_irq_restore(flags);
3379}
3380EXPORT_SYMBOL(kmem_cache_free);
3381
3382/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003383 * kfree - free previously allocated memory
3384 * @objp: pointer returned by kmalloc.
3385 *
Pekka Enberg80e93ef2005-09-09 13:10:16 -07003386 * If @objp is NULL, no operation is performed.
3387 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003388 * Don't free memory not originally allocated by kmalloc()
3389 * or you will run into trouble.
3390 */
3391void kfree(const void *objp)
3392{
Pekka Enberg343e0d72006-02-01 03:05:50 -08003393 struct kmem_cache *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003394 unsigned long flags;
3395
3396 if (unlikely(!objp))
3397 return;
3398 local_irq_save(flags);
3399 kfree_debugcheck(objp);
Pekka Enberg6ed5eb22006-02-01 03:05:49 -08003400 c = virt_to_cache(objp);
Ingo Molnarf9b84042006-06-27 02:54:49 -07003401 debug_check_no_locks_freed(objp, obj_size(c));
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003402 __cache_free(c, (void *)objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403 local_irq_restore(flags);
3404}
3405EXPORT_SYMBOL(kfree);
3406
3407#ifdef CONFIG_SMP
3408/**
3409 * free_percpu - free previously allocated percpu memory
3410 * @objp: pointer returned by alloc_percpu.
3411 *
3412 * Don't free memory not originally allocated by alloc_percpu()
3413 * The complemented objp is to check for that.
3414 */
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003415void free_percpu(const void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003416{
3417 int i;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003418 struct percpu_data *p = (struct percpu_data *)(~(unsigned long)objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419
Christoph Lametere498be72005-09-09 13:03:32 -07003420 /*
3421 * We allocate for all cpus so we cannot use for online cpu here.
3422 */
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08003423 for_each_possible_cpu(i)
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003424 kfree(p->ptrs[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003425 kfree(p);
3426}
3427EXPORT_SYMBOL(free_percpu);
3428#endif
3429
Pekka Enberg343e0d72006-02-01 03:05:50 -08003430unsigned int kmem_cache_size(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431{
Manfred Spraul3dafccf2006-02-01 03:05:42 -08003432 return obj_size(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433}
3434EXPORT_SYMBOL(kmem_cache_size);
3435
Pekka Enberg343e0d72006-02-01 03:05:50 -08003436const char *kmem_cache_name(struct kmem_cache *cachep)
Arnaldo Carvalho de Melo19449722005-06-18 22:46:19 -07003437{
3438 return cachep->name;
3439}
3440EXPORT_SYMBOL_GPL(kmem_cache_name);
3441
Christoph Lametere498be72005-09-09 13:03:32 -07003442/*
Christoph Lameter0718dc22006-03-25 03:06:47 -08003443 * This initializes kmem_list3 or resizes varioius caches for all nodes.
Christoph Lametere498be72005-09-09 13:03:32 -07003444 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08003445static int alloc_kmemlist(struct kmem_cache *cachep)
Christoph Lametere498be72005-09-09 13:03:32 -07003446{
3447 int node;
3448 struct kmem_list3 *l3;
Christoph Lametercafeb022006-03-25 03:06:46 -08003449 struct array_cache *new_shared;
3450 struct array_cache **new_alien;
Christoph Lametere498be72005-09-09 13:03:32 -07003451
3452 for_each_online_node(node) {
Christoph Lametercafeb022006-03-25 03:06:46 -08003453
Andrew Mortona737b3e2006-03-22 00:08:11 -08003454 new_alien = alloc_alien_cache(node, cachep->limit);
3455 if (!new_alien)
Christoph Lametere498be72005-09-09 13:03:32 -07003456 goto fail;
Christoph Lametercafeb022006-03-25 03:06:46 -08003457
Christoph Lameter0718dc22006-03-25 03:06:47 -08003458 new_shared = alloc_arraycache(node,
3459 cachep->shared*cachep->batchcount,
Andrew Mortona737b3e2006-03-22 00:08:11 -08003460 0xbaadf00d);
Christoph Lameter0718dc22006-03-25 03:06:47 -08003461 if (!new_shared) {
3462 free_alien_cache(new_alien);
Christoph Lametere498be72005-09-09 13:03:32 -07003463 goto fail;
Christoph Lameter0718dc22006-03-25 03:06:47 -08003464 }
Christoph Lametercafeb022006-03-25 03:06:46 -08003465
Andrew Mortona737b3e2006-03-22 00:08:11 -08003466 l3 = cachep->nodelists[node];
3467 if (l3) {
Christoph Lametercafeb022006-03-25 03:06:46 -08003468 struct array_cache *shared = l3->shared;
3469
Christoph Lametere498be72005-09-09 13:03:32 -07003470 spin_lock_irq(&l3->list_lock);
3471
Christoph Lametercafeb022006-03-25 03:06:46 -08003472 if (shared)
Christoph Lameter0718dc22006-03-25 03:06:47 -08003473 free_block(cachep, shared->entry,
3474 shared->avail, node);
Christoph Lametere498be72005-09-09 13:03:32 -07003475
Christoph Lametercafeb022006-03-25 03:06:46 -08003476 l3->shared = new_shared;
3477 if (!l3->alien) {
Christoph Lametere498be72005-09-09 13:03:32 -07003478 l3->alien = new_alien;
3479 new_alien = NULL;
3480 }
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003481 l3->free_limit = (1 + nr_cpus_node(node)) *
Andrew Mortona737b3e2006-03-22 00:08:11 -08003482 cachep->batchcount + cachep->num;
Christoph Lametere498be72005-09-09 13:03:32 -07003483 spin_unlock_irq(&l3->list_lock);
Christoph Lametercafeb022006-03-25 03:06:46 -08003484 kfree(shared);
Christoph Lametere498be72005-09-09 13:03:32 -07003485 free_alien_cache(new_alien);
3486 continue;
3487 }
Andrew Mortona737b3e2006-03-22 00:08:11 -08003488 l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
Christoph Lameter0718dc22006-03-25 03:06:47 -08003489 if (!l3) {
3490 free_alien_cache(new_alien);
3491 kfree(new_shared);
Christoph Lametere498be72005-09-09 13:03:32 -07003492 goto fail;
Christoph Lameter0718dc22006-03-25 03:06:47 -08003493 }
Christoph Lametere498be72005-09-09 13:03:32 -07003494
3495 kmem_list3_init(l3);
3496 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
Andrew Mortona737b3e2006-03-22 00:08:11 -08003497 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
Christoph Lametercafeb022006-03-25 03:06:46 -08003498 l3->shared = new_shared;
Christoph Lametere498be72005-09-09 13:03:32 -07003499 l3->alien = new_alien;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003500 l3->free_limit = (1 + nr_cpus_node(node)) *
Andrew Mortona737b3e2006-03-22 00:08:11 -08003501 cachep->batchcount + cachep->num;
Christoph Lametere498be72005-09-09 13:03:32 -07003502 cachep->nodelists[node] = l3;
3503 }
Christoph Lametercafeb022006-03-25 03:06:46 -08003504 return 0;
Christoph Lameter0718dc22006-03-25 03:06:47 -08003505
Andrew Mortona737b3e2006-03-22 00:08:11 -08003506fail:
Christoph Lameter0718dc22006-03-25 03:06:47 -08003507 if (!cachep->next.next) {
3508 /* Cache is not active yet. Roll back what we did */
3509 node--;
3510 while (node >= 0) {
3511 if (cachep->nodelists[node]) {
3512 l3 = cachep->nodelists[node];
3513
3514 kfree(l3->shared);
3515 free_alien_cache(l3->alien);
3516 kfree(l3);
3517 cachep->nodelists[node] = NULL;
3518 }
3519 node--;
3520 }
3521 }
Christoph Lametercafeb022006-03-25 03:06:46 -08003522 return -ENOMEM;
Christoph Lametere498be72005-09-09 13:03:32 -07003523}
3524
Linus Torvalds1da177e2005-04-16 15:20:36 -07003525struct ccupdate_struct {
Pekka Enberg343e0d72006-02-01 03:05:50 -08003526 struct kmem_cache *cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003527 struct array_cache *new[NR_CPUS];
3528};
3529
3530static void do_ccupdate_local(void *info)
3531{
Andrew Mortona737b3e2006-03-22 00:08:11 -08003532 struct ccupdate_struct *new = info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003533 struct array_cache *old;
3534
3535 check_irq_off();
Pekka Enberg9a2dba42006-02-01 03:05:49 -08003536 old = cpu_cache_get(new->cachep);
Christoph Lametere498be72005-09-09 13:03:32 -07003537
Linus Torvalds1da177e2005-04-16 15:20:36 -07003538 new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
3539 new->new[smp_processor_id()] = old;
3540}
3541
Ravikiran G Thirumalaib5d8ca72006-03-22 00:08:12 -08003542/* Always called with the cache_chain_mutex held */
Andrew Mortona737b3e2006-03-22 00:08:11 -08003543static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3544 int batchcount, int shared)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003545{
3546 struct ccupdate_struct new;
Christoph Lametere498be72005-09-09 13:03:32 -07003547 int i, err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003548
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003549 memset(&new.new, 0, sizeof(new.new));
Christoph Lametere498be72005-09-09 13:03:32 -07003550 for_each_online_cpu(i) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08003551 new.new[i] = alloc_arraycache(cpu_to_node(i), limit,
3552 batchcount);
Christoph Lametere498be72005-09-09 13:03:32 -07003553 if (!new.new[i]) {
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003554 for (i--; i >= 0; i--)
3555 kfree(new.new[i]);
Christoph Lametere498be72005-09-09 13:03:32 -07003556 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003557 }
3558 }
3559 new.cachep = cachep;
3560
Andrew Mortona07fa392006-03-22 00:08:17 -08003561 on_each_cpu(do_ccupdate_local, (void *)&new, 1, 1);
Christoph Lametere498be72005-09-09 13:03:32 -07003562
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563 check_irq_on();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003564 cachep->batchcount = batchcount;
3565 cachep->limit = limit;
Christoph Lametere498be72005-09-09 13:03:32 -07003566 cachep->shared = shared;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003567
Christoph Lametere498be72005-09-09 13:03:32 -07003568 for_each_online_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003569 struct array_cache *ccold = new.new[i];
3570 if (!ccold)
3571 continue;
Christoph Lametere498be72005-09-09 13:03:32 -07003572 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
Christoph Lameterff694162005-09-22 21:44:02 -07003573 free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
Christoph Lametere498be72005-09-09 13:03:32 -07003574 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003575 kfree(ccold);
3576 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577
Christoph Lametere498be72005-09-09 13:03:32 -07003578 err = alloc_kmemlist(cachep);
3579 if (err) {
3580 printk(KERN_ERR "alloc_kmemlist failed for %s, error %d.\n",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003581 cachep->name, -err);
Christoph Lametere498be72005-09-09 13:03:32 -07003582 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003583 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003584 return 0;
3585}
3586
Ravikiran G Thirumalaib5d8ca72006-03-22 00:08:12 -08003587/* Called with cache_chain_mutex held always */
Pekka Enberg343e0d72006-02-01 03:05:50 -08003588static void enable_cpucache(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003589{
3590 int err;
3591 int limit, shared;
3592
Andrew Mortona737b3e2006-03-22 00:08:11 -08003593 /*
3594 * The head array serves three purposes:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003595 * - create a LIFO ordering, i.e. return objects that are cache-warm
3596 * - reduce the number of spinlock operations.
Andrew Mortona737b3e2006-03-22 00:08:11 -08003597 * - reduce the number of linked list operations on the slab and
Linus Torvalds1da177e2005-04-16 15:20:36 -07003598 * bufctl chains: array operations are cheaper.
3599 * The numbers are guessed, we should auto-tune as described by
3600 * Bonwick.
3601 */
Manfred Spraul3dafccf2006-02-01 03:05:42 -08003602 if (cachep->buffer_size > 131072)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603 limit = 1;
Manfred Spraul3dafccf2006-02-01 03:05:42 -08003604 else if (cachep->buffer_size > PAGE_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003605 limit = 8;
Manfred Spraul3dafccf2006-02-01 03:05:42 -08003606 else if (cachep->buffer_size > 1024)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003607 limit = 24;
Manfred Spraul3dafccf2006-02-01 03:05:42 -08003608 else if (cachep->buffer_size > 256)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003609 limit = 54;
3610 else
3611 limit = 120;
3612
Andrew Mortona737b3e2006-03-22 00:08:11 -08003613 /*
3614 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
Linus Torvalds1da177e2005-04-16 15:20:36 -07003615 * allocation behaviour: Most allocs on one cpu, most free operations
3616 * on another cpu. For these cases, an efficient object passing between
3617 * cpus is necessary. This is provided by a shared array. The array
3618 * replaces Bonwick's magazine layer.
3619 * On uniprocessor, it's functionally equivalent (but less efficient)
3620 * to a larger limit. Thus disabled by default.
3621 */
3622 shared = 0;
3623#ifdef CONFIG_SMP
Manfred Spraul3dafccf2006-02-01 03:05:42 -08003624 if (cachep->buffer_size <= PAGE_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625 shared = 8;
3626#endif
3627
3628#if DEBUG
Andrew Mortona737b3e2006-03-22 00:08:11 -08003629 /*
3630 * With debugging enabled, large batchcount lead to excessively long
3631 * periods with disabled local interrupts. Limit the batchcount
Linus Torvalds1da177e2005-04-16 15:20:36 -07003632 */
3633 if (limit > 32)
3634 limit = 32;
3635#endif
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003636 err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003637 if (err)
3638 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003639 cachep->name, -err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003640}
3641
Christoph Lameter1b552532006-03-22 00:09:07 -08003642/*
3643 * Drain an array if it contains any elements taking the l3 lock only if
Christoph Lameterb18e7e62006-03-22 00:09:07 -08003644 * necessary. Note that the l3 listlock also protects the array_cache
3645 * if drain_array() is used on the shared array.
Christoph Lameter1b552532006-03-22 00:09:07 -08003646 */
3647void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
3648 struct array_cache *ac, int force, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003649{
3650 int tofree;
3651
Christoph Lameter1b552532006-03-22 00:09:07 -08003652 if (!ac || !ac->avail)
3653 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003654 if (ac->touched && !force) {
3655 ac->touched = 0;
Christoph Lameterb18e7e62006-03-22 00:09:07 -08003656 } else {
Christoph Lameter1b552532006-03-22 00:09:07 -08003657 spin_lock_irq(&l3->list_lock);
Christoph Lameterb18e7e62006-03-22 00:09:07 -08003658 if (ac->avail) {
3659 tofree = force ? ac->avail : (ac->limit + 4) / 5;
3660 if (tofree > ac->avail)
3661 tofree = (ac->avail + 1) / 2;
3662 free_block(cachep, ac->entry, tofree, node);
3663 ac->avail -= tofree;
3664 memmove(ac->entry, &(ac->entry[tofree]),
3665 sizeof(void *) * ac->avail);
3666 }
Christoph Lameter1b552532006-03-22 00:09:07 -08003667 spin_unlock_irq(&l3->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003668 }
3669}
3670
3671/**
3672 * cache_reap - Reclaim memory from caches.
Randy Dunlap1e5d5332005-11-07 01:01:06 -08003673 * @unused: unused parameter
Linus Torvalds1da177e2005-04-16 15:20:36 -07003674 *
3675 * Called from workqueue/eventd every few seconds.
3676 * Purpose:
3677 * - clear the per-cpu caches for this CPU.
3678 * - return freeable pages to the main free memory pool.
3679 *
Andrew Mortona737b3e2006-03-22 00:08:11 -08003680 * If we cannot acquire the cache chain mutex then just give up - we'll try
3681 * again on the next iteration.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003682 */
3683static void cache_reap(void *unused)
3684{
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07003685 struct kmem_cache *searchp;
Christoph Lametere498be72005-09-09 13:03:32 -07003686 struct kmem_list3 *l3;
Christoph Lameteraab22072006-03-22 00:09:06 -08003687 int node = numa_node_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003688
Ingo Molnarfc0abb12006-01-18 17:42:33 -08003689 if (!mutex_trylock(&cache_chain_mutex)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003690 /* Give up. Setup the next iteration. */
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003691 schedule_delayed_work(&__get_cpu_var(reap_work),
3692 REAPTIMEOUT_CPUC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003693 return;
3694 }
3695
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07003696 list_for_each_entry(searchp, &cache_chain, next) {
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003697 struct list_head *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003698 int tofree;
3699 struct slab *slabp;
3700
Linus Torvalds1da177e2005-04-16 15:20:36 -07003701 check_irq_on();
3702
Christoph Lameter35386e32006-03-22 00:09:05 -08003703 /*
3704 * We only take the l3 lock if absolutely necessary and we
3705 * have established with reasonable certainty that
3706 * we can do some work if the lock was obtained.
3707 */
Christoph Lameteraab22072006-03-22 00:09:06 -08003708 l3 = searchp->nodelists[node];
Christoph Lameter35386e32006-03-22 00:09:05 -08003709
Christoph Lameter8fce4d82006-03-09 17:33:54 -08003710 reap_alien(searchp, l3);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711
Christoph Lameteraab22072006-03-22 00:09:06 -08003712 drain_array(searchp, l3, cpu_cache_get(searchp), 0, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003713
Christoph Lameter35386e32006-03-22 00:09:05 -08003714 /*
3715 * These are racy checks but it does not matter
3716 * if we skip one check or scan twice.
3717 */
Christoph Lametere498be72005-09-09 13:03:32 -07003718 if (time_after(l3->next_reap, jiffies))
Christoph Lameter35386e32006-03-22 00:09:05 -08003719 goto next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003720
Christoph Lametere498be72005-09-09 13:03:32 -07003721 l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003722
Christoph Lameteraab22072006-03-22 00:09:06 -08003723 drain_array(searchp, l3, l3->shared, 0, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003724
Christoph Lametere498be72005-09-09 13:03:32 -07003725 if (l3->free_touched) {
3726 l3->free_touched = 0;
Christoph Lameter35386e32006-03-22 00:09:05 -08003727 goto next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003728 }
3729
Andrew Mortona737b3e2006-03-22 00:08:11 -08003730 tofree = (l3->free_limit + 5 * searchp->num - 1) /
3731 (5 * searchp->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003732 do {
Christoph Lameter35386e32006-03-22 00:09:05 -08003733 /*
3734 * Do not lock if there are no free blocks.
3735 */
3736 if (list_empty(&l3->slabs_free))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737 break;
3738
Christoph Lameter35386e32006-03-22 00:09:05 -08003739 spin_lock_irq(&l3->list_lock);
3740 p = l3->slabs_free.next;
3741 if (p == &(l3->slabs_free)) {
3742 spin_unlock_irq(&l3->list_lock);
3743 break;
3744 }
3745
Linus Torvalds1da177e2005-04-16 15:20:36 -07003746 slabp = list_entry(p, struct slab, list);
3747 BUG_ON(slabp->inuse);
3748 list_del(&slabp->list);
3749 STATS_INC_REAPED(searchp);
3750
Andrew Mortona737b3e2006-03-22 00:08:11 -08003751 /*
3752 * Safe to drop the lock. The slab is no longer linked
3753 * to the cache. searchp cannot disappear, we hold
Linus Torvalds1da177e2005-04-16 15:20:36 -07003754 * cache_chain_lock
3755 */
Christoph Lametere498be72005-09-09 13:03:32 -07003756 l3->free_objects -= searchp->num;
3757 spin_unlock_irq(&l3->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003758 slab_destroy(searchp, slabp);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003759 } while (--tofree > 0);
Christoph Lameter35386e32006-03-22 00:09:05 -08003760next:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003761 cond_resched();
3762 }
3763 check_irq_on();
Ingo Molnarfc0abb12006-01-18 17:42:33 -08003764 mutex_unlock(&cache_chain_mutex);
Christoph Lameter8fce4d82006-03-09 17:33:54 -08003765 next_reap_node();
Andrew Mortona737b3e2006-03-22 00:08:11 -08003766 /* Set up the next iteration */
Manfred Spraulcd61ef62005-11-07 00:58:02 -08003767 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003768}
3769
3770#ifdef CONFIG_PROC_FS
3771
Pekka Enberg85289f92006-01-08 01:00:36 -08003772static void print_slabinfo_header(struct seq_file *m)
3773{
3774 /*
3775 * Output format version, so at least we can change it
3776 * without _too_ many complaints.
3777 */
3778#if STATS
3779 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
3780#else
3781 seq_puts(m, "slabinfo - version: 2.1\n");
3782#endif
3783 seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
3784 "<objperslab> <pagesperslab>");
3785 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
3786 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
3787#if STATS
3788 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
Ravikiran G Thirumalaifb7faf32006-04-10 22:52:54 -07003789 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
Pekka Enberg85289f92006-01-08 01:00:36 -08003790 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
3791#endif
3792 seq_putc(m, '\n');
3793}
3794
Linus Torvalds1da177e2005-04-16 15:20:36 -07003795static void *s_start(struct seq_file *m, loff_t *pos)
3796{
3797 loff_t n = *pos;
3798 struct list_head *p;
3799
Ingo Molnarfc0abb12006-01-18 17:42:33 -08003800 mutex_lock(&cache_chain_mutex);
Pekka Enberg85289f92006-01-08 01:00:36 -08003801 if (!n)
3802 print_slabinfo_header(m);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003803 p = cache_chain.next;
3804 while (n--) {
3805 p = p->next;
3806 if (p == &cache_chain)
3807 return NULL;
3808 }
Pekka Enberg343e0d72006-02-01 03:05:50 -08003809 return list_entry(p, struct kmem_cache, next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003810}
3811
3812static void *s_next(struct seq_file *m, void *p, loff_t *pos)
3813{
Pekka Enberg343e0d72006-02-01 03:05:50 -08003814 struct kmem_cache *cachep = p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003815 ++*pos;
Andrew Mortona737b3e2006-03-22 00:08:11 -08003816 return cachep->next.next == &cache_chain ?
3817 NULL : list_entry(cachep->next.next, struct kmem_cache, next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003818}
3819
3820static void s_stop(struct seq_file *m, void *p)
3821{
Ingo Molnarfc0abb12006-01-18 17:42:33 -08003822 mutex_unlock(&cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003823}
3824
3825static int s_show(struct seq_file *m, void *p)
3826{
Pekka Enberg343e0d72006-02-01 03:05:50 -08003827 struct kmem_cache *cachep = p;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003828 struct slab *slabp;
3829 unsigned long active_objs;
3830 unsigned long num_objs;
3831 unsigned long active_slabs = 0;
3832 unsigned long num_slabs, free_objects = 0, shared_avail = 0;
Christoph Lametere498be72005-09-09 13:03:32 -07003833 const char *name;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003834 char *error = NULL;
Christoph Lametere498be72005-09-09 13:03:32 -07003835 int node;
3836 struct kmem_list3 *l3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003837
Linus Torvalds1da177e2005-04-16 15:20:36 -07003838 active_objs = 0;
3839 num_slabs = 0;
Christoph Lametere498be72005-09-09 13:03:32 -07003840 for_each_online_node(node) {
3841 l3 = cachep->nodelists[node];
3842 if (!l3)
3843 continue;
3844
Ravikiran G Thirumalaica3b9b92006-02-04 23:27:58 -08003845 check_irq_on();
3846 spin_lock_irq(&l3->list_lock);
Christoph Lametere498be72005-09-09 13:03:32 -07003847
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07003848 list_for_each_entry(slabp, &l3->slabs_full, list) {
Christoph Lametere498be72005-09-09 13:03:32 -07003849 if (slabp->inuse != cachep->num && !error)
3850 error = "slabs_full accounting error";
3851 active_objs += cachep->num;
3852 active_slabs++;
3853 }
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07003854 list_for_each_entry(slabp, &l3->slabs_partial, list) {
Christoph Lametere498be72005-09-09 13:03:32 -07003855 if (slabp->inuse == cachep->num && !error)
3856 error = "slabs_partial inuse accounting error";
3857 if (!slabp->inuse && !error)
3858 error = "slabs_partial/inuse accounting error";
3859 active_objs += slabp->inuse;
3860 active_slabs++;
3861 }
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07003862 list_for_each_entry(slabp, &l3->slabs_free, list) {
Christoph Lametere498be72005-09-09 13:03:32 -07003863 if (slabp->inuse && !error)
3864 error = "slabs_free/inuse accounting error";
3865 num_slabs++;
3866 }
3867 free_objects += l3->free_objects;
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08003868 if (l3->shared)
3869 shared_avail += l3->shared->avail;
Christoph Lametere498be72005-09-09 13:03:32 -07003870
Ravikiran G Thirumalaica3b9b92006-02-04 23:27:58 -08003871 spin_unlock_irq(&l3->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003872 }
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003873 num_slabs += active_slabs;
3874 num_objs = num_slabs * cachep->num;
Christoph Lametere498be72005-09-09 13:03:32 -07003875 if (num_objs - active_objs != free_objects && !error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003876 error = "free_objects accounting error";
3877
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003878 name = cachep->name;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003879 if (error)
3880 printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
3881
3882 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
Manfred Spraul3dafccf2006-02-01 03:05:42 -08003883 name, active_objs, num_objs, cachep->buffer_size,
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003884 cachep->num, (1 << cachep->gfporder));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003885 seq_printf(m, " : tunables %4u %4u %4u",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003886 cachep->limit, cachep->batchcount, cachep->shared);
Christoph Lametere498be72005-09-09 13:03:32 -07003887 seq_printf(m, " : slabdata %6lu %6lu %6lu",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003888 active_slabs, num_slabs, shared_avail);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003889#if STATS
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003890 { /* list3 stats */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003891 unsigned long high = cachep->high_mark;
3892 unsigned long allocs = cachep->num_allocations;
3893 unsigned long grown = cachep->grown;
3894 unsigned long reaped = cachep->reaped;
3895 unsigned long errors = cachep->errors;
3896 unsigned long max_freeable = cachep->max_freeable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003897 unsigned long node_allocs = cachep->node_allocs;
Christoph Lametere498be72005-09-09 13:03:32 -07003898 unsigned long node_frees = cachep->node_frees;
Ravikiran G Thirumalaifb7faf32006-04-10 22:52:54 -07003899 unsigned long overflows = cachep->node_overflow;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003900
Christoph Lametere498be72005-09-09 13:03:32 -07003901 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
Ravikiran G Thirumalaifb7faf32006-04-10 22:52:54 -07003902 %4lu %4lu %4lu %4lu %4lu", allocs, high, grown,
Andrew Mortona737b3e2006-03-22 00:08:11 -08003903 reaped, errors, max_freeable, node_allocs,
Ravikiran G Thirumalaifb7faf32006-04-10 22:52:54 -07003904 node_frees, overflows);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003905 }
3906 /* cpu stats */
3907 {
3908 unsigned long allochit = atomic_read(&cachep->allochit);
3909 unsigned long allocmiss = atomic_read(&cachep->allocmiss);
3910 unsigned long freehit = atomic_read(&cachep->freehit);
3911 unsigned long freemiss = atomic_read(&cachep->freemiss);
3912
3913 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003914 allochit, allocmiss, freehit, freemiss);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003915 }
3916#endif
3917 seq_putc(m, '\n');
Linus Torvalds1da177e2005-04-16 15:20:36 -07003918 return 0;
3919}
3920
3921/*
3922 * slabinfo_op - iterator that generates /proc/slabinfo
3923 *
3924 * Output layout:
3925 * cache-name
3926 * num-active-objs
3927 * total-objs
3928 * object size
3929 * num-active-slabs
3930 * total-slabs
3931 * num-pages-per-slab
3932 * + further values on SMP and with statistics enabled
3933 */
3934
3935struct seq_operations slabinfo_op = {
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003936 .start = s_start,
3937 .next = s_next,
3938 .stop = s_stop,
3939 .show = s_show,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003940};
3941
3942#define MAX_SLABINFO_WRITE 128
3943/**
3944 * slabinfo_write - Tuning for the slab allocator
3945 * @file: unused
3946 * @buffer: user buffer
3947 * @count: data length
3948 * @ppos: unused
3949 */
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003950ssize_t slabinfo_write(struct file *file, const char __user * buffer,
3951 size_t count, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003952{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003953 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003954 int limit, batchcount, shared, res;
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07003955 struct kmem_cache *cachep;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003956
Linus Torvalds1da177e2005-04-16 15:20:36 -07003957 if (count > MAX_SLABINFO_WRITE)
3958 return -EINVAL;
3959 if (copy_from_user(&kbuf, buffer, count))
3960 return -EFAULT;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003961 kbuf[MAX_SLABINFO_WRITE] = '\0';
Linus Torvalds1da177e2005-04-16 15:20:36 -07003962
3963 tmp = strchr(kbuf, ' ');
3964 if (!tmp)
3965 return -EINVAL;
3966 *tmp = '\0';
3967 tmp++;
3968 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
3969 return -EINVAL;
3970
3971 /* Find the cache in the chain of caches. */
Ingo Molnarfc0abb12006-01-18 17:42:33 -08003972 mutex_lock(&cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003973 res = -EINVAL;
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07003974 list_for_each_entry(cachep, &cache_chain, next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003975 if (!strcmp(cachep->name, kbuf)) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08003976 if (limit < 1 || batchcount < 1 ||
3977 batchcount > limit || shared < 0) {
Christoph Lametere498be72005-09-09 13:03:32 -07003978 res = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003979 } else {
Christoph Lametere498be72005-09-09 13:03:32 -07003980 res = do_tune_cpucache(cachep, limit,
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003981 batchcount, shared);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003982 }
3983 break;
3984 }
3985 }
Ingo Molnarfc0abb12006-01-18 17:42:33 -08003986 mutex_unlock(&cache_chain_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003987 if (res >= 0)
3988 res = count;
3989 return res;
3990}
Al Viro871751e2006-03-25 03:06:39 -08003991
3992#ifdef CONFIG_DEBUG_SLAB_LEAK
3993
3994static void *leaks_start(struct seq_file *m, loff_t *pos)
3995{
3996 loff_t n = *pos;
3997 struct list_head *p;
3998
3999 mutex_lock(&cache_chain_mutex);
4000 p = cache_chain.next;
4001 while (n--) {
4002 p = p->next;
4003 if (p == &cache_chain)
4004 return NULL;
4005 }
4006 return list_entry(p, struct kmem_cache, next);
4007}
4008
4009static inline int add_caller(unsigned long *n, unsigned long v)
4010{
4011 unsigned long *p;
4012 int l;
4013 if (!v)
4014 return 1;
4015 l = n[1];
4016 p = n + 2;
4017 while (l) {
4018 int i = l/2;
4019 unsigned long *q = p + 2 * i;
4020 if (*q == v) {
4021 q[1]++;
4022 return 1;
4023 }
4024 if (*q > v) {
4025 l = i;
4026 } else {
4027 p = q + 2;
4028 l -= i + 1;
4029 }
4030 }
4031 if (++n[1] == n[0])
4032 return 0;
4033 memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4034 p[0] = v;
4035 p[1] = 1;
4036 return 1;
4037}
4038
4039static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
4040{
4041 void *p;
4042 int i;
4043 if (n[0] == n[1])
4044 return;
4045 for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) {
4046 if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
4047 continue;
4048 if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
4049 return;
4050 }
4051}
4052
4053static void show_symbol(struct seq_file *m, unsigned long address)
4054{
4055#ifdef CONFIG_KALLSYMS
4056 char *modname;
4057 const char *name;
4058 unsigned long offset, size;
4059 char namebuf[KSYM_NAME_LEN+1];
4060
4061 name = kallsyms_lookup(address, &size, &offset, &modname, namebuf);
4062
4063 if (name) {
4064 seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4065 if (modname)
4066 seq_printf(m, " [%s]", modname);
4067 return;
4068 }
4069#endif
4070 seq_printf(m, "%p", (void *)address);
4071}
4072
4073static int leaks_show(struct seq_file *m, void *p)
4074{
4075 struct kmem_cache *cachep = p;
Al Viro871751e2006-03-25 03:06:39 -08004076 struct slab *slabp;
4077 struct kmem_list3 *l3;
4078 const char *name;
4079 unsigned long *n = m->private;
4080 int node;
4081 int i;
4082
4083 if (!(cachep->flags & SLAB_STORE_USER))
4084 return 0;
4085 if (!(cachep->flags & SLAB_RED_ZONE))
4086 return 0;
4087
4088 /* OK, we can do it */
4089
4090 n[1] = 0;
4091
4092 for_each_online_node(node) {
4093 l3 = cachep->nodelists[node];
4094 if (!l3)
4095 continue;
4096
4097 check_irq_on();
4098 spin_lock_irq(&l3->list_lock);
4099
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07004100 list_for_each_entry(slabp, &l3->slabs_full, list)
Al Viro871751e2006-03-25 03:06:39 -08004101 handle_slab(n, cachep, slabp);
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07004102 list_for_each_entry(slabp, &l3->slabs_partial, list)
Al Viro871751e2006-03-25 03:06:39 -08004103 handle_slab(n, cachep, slabp);
Al Viro871751e2006-03-25 03:06:39 -08004104 spin_unlock_irq(&l3->list_lock);
4105 }
4106 name = cachep->name;
4107 if (n[0] == n[1]) {
4108 /* Increase the buffer size */
4109 mutex_unlock(&cache_chain_mutex);
4110 m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4111 if (!m->private) {
4112 /* Too bad, we are really out */
4113 m->private = n;
4114 mutex_lock(&cache_chain_mutex);
4115 return -ENOMEM;
4116 }
4117 *(unsigned long *)m->private = n[0] * 2;
4118 kfree(n);
4119 mutex_lock(&cache_chain_mutex);
4120 /* Now make sure this entry will be retried */
4121 m->count = m->size;
4122 return 0;
4123 }
4124 for (i = 0; i < n[1]; i++) {
4125 seq_printf(m, "%s: %lu ", name, n[2*i+3]);
4126 show_symbol(m, n[2*i+2]);
4127 seq_putc(m, '\n');
4128 }
4129 return 0;
4130}
4131
4132struct seq_operations slabstats_op = {
4133 .start = leaks_start,
4134 .next = s_next,
4135 .stop = s_stop,
4136 .show = leaks_show,
4137};
4138#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004139#endif
4140
Manfred Spraul00e145b2005-09-03 15:55:07 -07004141/**
4142 * ksize - get the actual amount of memory allocated for a given object
4143 * @objp: Pointer to the object
4144 *
4145 * kmalloc may internally round up allocations and return more memory
4146 * than requested. ksize() can be used to determine the actual amount of
4147 * memory allocated. The caller may use this additional memory, even though
4148 * a smaller amount of memory was initially specified with the kmalloc call.
4149 * The caller must guarantee that objp points to a valid object previously
4150 * allocated with either kmalloc() or kmem_cache_alloc(). The object
4151 * must not be freed during the duration of the call.
4152 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004153unsigned int ksize(const void *objp)
4154{
Manfred Spraul00e145b2005-09-03 15:55:07 -07004155 if (unlikely(objp == NULL))
4156 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004157
Pekka Enberg6ed5eb22006-02-01 03:05:49 -08004158 return obj_size(virt_to_cache(objp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004159}