| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * linux/mm/slab.c | 
|  | 3 | * Written by Mark Hemment, 1996/97. | 
|  | 4 | * (markhe@nextd.demon.co.uk) | 
|  | 5 | * | 
|  | 6 | * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli | 
|  | 7 | * | 
|  | 8 | * Major cleanup, different bufctl logic, per-cpu arrays | 
|  | 9 | *	(c) 2000 Manfred Spraul | 
|  | 10 | * | 
|  | 11 | * Cleanup, make the head arrays unconditional, preparation for NUMA | 
|  | 12 | * 	(c) 2002 Manfred Spraul | 
|  | 13 | * | 
|  | 14 | * An implementation of the Slab Allocator as described in outline in; | 
|  | 15 | *	UNIX Internals: The New Frontiers by Uresh Vahalia | 
|  | 16 | *	Pub: Prentice Hall	ISBN 0-13-101908-2 | 
|  | 17 | * or with a little more detail in; | 
|  | 18 | *	The Slab Allocator: An Object-Caching Kernel Memory Allocator | 
|  | 19 | *	Jeff Bonwick (Sun Microsystems). | 
|  | 20 | *	Presented at: USENIX Summer 1994 Technical Conference | 
|  | 21 | * | 
|  | 22 | * The memory is organized in caches, one cache for each object type. | 
|  | 23 | * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct) | 
|  | 24 | * Each cache consists out of many slabs (they are small (usually one | 
|  | 25 | * page long) and always contiguous), and each slab contains multiple | 
|  | 26 | * initialized objects. | 
|  | 27 | * | 
|  | 28 | * This means, that your constructor is used only for newly allocated | 
|  | 29 | * slabs and you must pass objects with the same intializations to | 
|  | 30 | * kmem_cache_free. | 
|  | 31 | * | 
|  | 32 | * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM, | 
|  | 33 | * normal). If you need a special memory type, then must create a new | 
|  | 34 | * cache for that memory type. | 
|  | 35 | * | 
|  | 36 | * In order to reduce fragmentation, the slabs are sorted in 3 groups: | 
|  | 37 | *   full slabs with 0 free objects | 
|  | 38 | *   partial slabs | 
|  | 39 | *   empty slabs with no allocated objects | 
|  | 40 | * | 
|  | 41 | * If partial slabs exist, then new allocations come from these slabs, | 
|  | 42 | * otherwise from empty slabs or new slabs are allocated. | 
|  | 43 | * | 
|  | 44 | * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache | 
|  | 45 | * during kmem_cache_destroy(). The caller must prevent concurrent allocs. | 
|  | 46 | * | 
|  | 47 | * Each cache has a short per-cpu head array, most allocs | 
|  | 48 | * and frees go into that array, and if that array overflows, then 1/2 | 
|  | 49 | * of the entries in the array are given back into the global cache. | 
|  | 50 | * The head array is strictly LIFO and should improve the cache hit rates. | 
|  | 51 | * On SMP, it additionally reduces the spinlock operations. | 
|  | 52 | * | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 53 | * The c_cpuarray may not be read with enabled local interrupts - | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | * it's changed with a smp_call_function(). | 
|  | 55 | * | 
|  | 56 | * SMP synchronization: | 
|  | 57 | *  constructors and destructors are called without any locking. | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 58 | *  Several members in struct kmem_cache and struct slab never change, they | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | *	are accessed without any locking. | 
|  | 60 | *  The per-cpu arrays are never accessed from the wrong cpu, no locking, | 
|  | 61 | *  	and local interrupts are disabled so slab code is preempt-safe. | 
|  | 62 | *  The non-constant members are protected with a per-cache irq spinlock. | 
|  | 63 | * | 
|  | 64 | * Many thanks to Mark Hemment, who wrote another per-cpu slab patch | 
|  | 65 | * in 2000 - many ideas in the current implementation are derived from | 
|  | 66 | * his patch. | 
|  | 67 | * | 
|  | 68 | * Further notes from the original documentation: | 
|  | 69 | * | 
|  | 70 | * 11 April '97.  Started multi-threading - markhe | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 71 | *	The global cache-chain is protected by the mutex 'cache_chain_mutex'. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | *	The sem is only needed when accessing/extending the cache-chain, which | 
|  | 73 | *	can never happen inside an interrupt (kmem_cache_create(), | 
|  | 74 | *	kmem_cache_shrink() and kmem_cache_reap()). | 
|  | 75 | * | 
|  | 76 | *	At present, each engine can be growing a cache.  This should be blocked. | 
|  | 77 | * | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 78 | * 15 March 2005. NUMA slab allocator. | 
|  | 79 | *	Shai Fultheim <shai@scalex86.org>. | 
|  | 80 | *	Shobhit Dayal <shobhit@calsoftinc.com> | 
|  | 81 | *	Alok N Kataria <alokk@calsoftinc.com> | 
|  | 82 | *	Christoph Lameter <christoph@lameter.com> | 
|  | 83 | * | 
|  | 84 | *	Modified the slab allocator to be node aware on NUMA systems. | 
|  | 85 | *	Each node has its own list of partial, free and full slabs. | 
|  | 86 | *	All object allocations for a node occur from node specific slab lists. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | */ | 
|  | 88 |  | 
|  | 89 | #include	<linux/config.h> | 
|  | 90 | #include	<linux/slab.h> | 
|  | 91 | #include	<linux/mm.h> | 
| Randy Dunlap | c9cf552 | 2006-06-27 02:53:52 -0700 | [diff] [blame] | 92 | #include	<linux/poison.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | #include	<linux/swap.h> | 
|  | 94 | #include	<linux/cache.h> | 
|  | 95 | #include	<linux/interrupt.h> | 
|  | 96 | #include	<linux/init.h> | 
|  | 97 | #include	<linux/compiler.h> | 
| Paul Jackson | 101a500 | 2006-03-24 03:16:07 -0800 | [diff] [blame] | 98 | #include	<linux/cpuset.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | #include	<linux/seq_file.h> | 
|  | 100 | #include	<linux/notifier.h> | 
|  | 101 | #include	<linux/kallsyms.h> | 
|  | 102 | #include	<linux/cpu.h> | 
|  | 103 | #include	<linux/sysctl.h> | 
|  | 104 | #include	<linux/module.h> | 
|  | 105 | #include	<linux/rcupdate.h> | 
| Paulo Marques | 543537b | 2005-06-23 00:09:02 -0700 | [diff] [blame] | 106 | #include	<linux/string.h> | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 107 | #include	<linux/nodemask.h> | 
| Christoph Lameter | dc85da1 | 2006-01-18 17:42:36 -0800 | [diff] [blame] | 108 | #include	<linux/mempolicy.h> | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 109 | #include	<linux/mutex.h> | 
| Ingo Molnar | e7eebaf | 2006-06-27 02:54:55 -0700 | [diff] [blame] | 110 | #include	<linux/rtmutex.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 |  | 
|  | 112 | #include	<asm/uaccess.h> | 
|  | 113 | #include	<asm/cacheflush.h> | 
|  | 114 | #include	<asm/tlbflush.h> | 
|  | 115 | #include	<asm/page.h> | 
|  | 116 |  | 
|  | 117 | /* | 
|  | 118 | * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL, | 
|  | 119 | *		  SLAB_RED_ZONE & SLAB_POISON. | 
|  | 120 | *		  0 for faster, smaller code (especially in the critical paths). | 
|  | 121 | * | 
|  | 122 | * STATS	- 1 to collect stats for /proc/slabinfo. | 
|  | 123 | *		  0 for faster, smaller code (especially in the critical paths). | 
|  | 124 | * | 
|  | 125 | * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible) | 
|  | 126 | */ | 
|  | 127 |  | 
|  | 128 | #ifdef CONFIG_DEBUG_SLAB | 
|  | 129 | #define	DEBUG		1 | 
|  | 130 | #define	STATS		1 | 
|  | 131 | #define	FORCED_DEBUG	1 | 
|  | 132 | #else | 
|  | 133 | #define	DEBUG		0 | 
|  | 134 | #define	STATS		0 | 
|  | 135 | #define	FORCED_DEBUG	0 | 
|  | 136 | #endif | 
|  | 137 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | /* Shouldn't this be in a header file somewhere? */ | 
|  | 139 | #define	BYTES_PER_WORD		sizeof(void *) | 
|  | 140 |  | 
|  | 141 | #ifndef cache_line_size | 
|  | 142 | #define cache_line_size()	L1_CACHE_BYTES | 
|  | 143 | #endif | 
|  | 144 |  | 
|  | 145 | #ifndef ARCH_KMALLOC_MINALIGN | 
|  | 146 | /* | 
|  | 147 | * Enforce a minimum alignment for the kmalloc caches. | 
|  | 148 | * Usually, the kmalloc caches are cache_line_size() aligned, except when | 
|  | 149 | * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned. | 
|  | 150 | * Some archs want to perform DMA into kmalloc caches and need a guaranteed | 
|  | 151 | * alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that. | 
|  | 152 | * Note that this flag disables some debug features. | 
|  | 153 | */ | 
|  | 154 | #define ARCH_KMALLOC_MINALIGN 0 | 
|  | 155 | #endif | 
|  | 156 |  | 
|  | 157 | #ifndef ARCH_SLAB_MINALIGN | 
|  | 158 | /* | 
|  | 159 | * Enforce a minimum alignment for all caches. | 
|  | 160 | * Intended for archs that get misalignment faults even for BYTES_PER_WORD | 
|  | 161 | * aligned buffers. Includes ARCH_KMALLOC_MINALIGN. | 
|  | 162 | * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables | 
|  | 163 | * some debug features. | 
|  | 164 | */ | 
|  | 165 | #define ARCH_SLAB_MINALIGN 0 | 
|  | 166 | #endif | 
|  | 167 |  | 
|  | 168 | #ifndef ARCH_KMALLOC_FLAGS | 
|  | 169 | #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN | 
|  | 170 | #endif | 
|  | 171 |  | 
|  | 172 | /* Legal flag mask for kmem_cache_create(). */ | 
|  | 173 | #if DEBUG | 
|  | 174 | # define CREATE_MASK	(SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \ | 
|  | 175 | SLAB_POISON | SLAB_HWCACHE_ALIGN | \ | 
| Christoph Lameter | ac2b898 | 2006-03-22 00:08:15 -0800 | [diff] [blame] | 176 | SLAB_CACHE_DMA | \ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \ | 
|  | 178 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ | 
| Paul Jackson | 101a500 | 2006-03-24 03:16:07 -0800 | [diff] [blame] | 179 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | #else | 
| Christoph Lameter | ac2b898 | 2006-03-22 00:08:15 -0800 | [diff] [blame] | 181 | # define CREATE_MASK	(SLAB_HWCACHE_ALIGN | \ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \ | 
|  | 183 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ | 
| Paul Jackson | 101a500 | 2006-03-24 03:16:07 -0800 | [diff] [blame] | 184 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | #endif | 
|  | 186 |  | 
|  | 187 | /* | 
|  | 188 | * kmem_bufctl_t: | 
|  | 189 | * | 
|  | 190 | * Bufctl's are used for linking objs within a slab | 
|  | 191 | * linked offsets. | 
|  | 192 | * | 
|  | 193 | * This implementation relies on "struct page" for locating the cache & | 
|  | 194 | * slab an object belongs to. | 
|  | 195 | * This allows the bufctl structure to be small (one int), but limits | 
|  | 196 | * the number of objects a slab (not a cache) can contain when off-slab | 
|  | 197 | * bufctls are used. The limit is the size of the largest general cache | 
|  | 198 | * that does not use off-slab slabs. | 
|  | 199 | * For 32bit archs with 4 kB pages, is this 56. | 
|  | 200 | * This is not serious, as it is only for large objects, when it is unwise | 
|  | 201 | * to have too many per slab. | 
|  | 202 | * Note: This limit can be raised by introducing a general cache whose size | 
|  | 203 | * is less than 512 (PAGE_SIZE<<3), but greater than 256. | 
|  | 204 | */ | 
|  | 205 |  | 
| Kyle Moffett | fa5b08d | 2005-09-03 15:55:03 -0700 | [diff] [blame] | 206 | typedef unsigned int kmem_bufctl_t; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | #define BUFCTL_END	(((kmem_bufctl_t)(~0U))-0) | 
|  | 208 | #define BUFCTL_FREE	(((kmem_bufctl_t)(~0U))-1) | 
| Al Viro | 871751e | 2006-03-25 03:06:39 -0800 | [diff] [blame] | 209 | #define	BUFCTL_ACTIVE	(((kmem_bufctl_t)(~0U))-2) | 
|  | 210 | #define	SLAB_LIMIT	(((kmem_bufctl_t)(~0U))-3) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | /* | 
|  | 213 | * struct slab | 
|  | 214 | * | 
|  | 215 | * Manages the objs in a slab. Placed either at the beginning of mem allocated | 
|  | 216 | * for a slab, or allocated from an general cache. | 
|  | 217 | * Slabs are chained into three list: fully used, partial, fully free slabs. | 
|  | 218 | */ | 
|  | 219 | struct slab { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 220 | struct list_head list; | 
|  | 221 | unsigned long colouroff; | 
|  | 222 | void *s_mem;		/* including colour offset */ | 
|  | 223 | unsigned int inuse;	/* num of objs active in slab */ | 
|  | 224 | kmem_bufctl_t free; | 
|  | 225 | unsigned short nodeid; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | }; | 
|  | 227 |  | 
|  | 228 | /* | 
|  | 229 | * struct slab_rcu | 
|  | 230 | * | 
|  | 231 | * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to | 
|  | 232 | * arrange for kmem_freepages to be called via RCU.  This is useful if | 
|  | 233 | * we need to approach a kernel structure obliquely, from its address | 
|  | 234 | * obtained without the usual locking.  We can lock the structure to | 
|  | 235 | * stabilize it and check it's still at the given address, only if we | 
|  | 236 | * can be sure that the memory has not been meanwhile reused for some | 
|  | 237 | * other kind of object (which our subsystem's lock might corrupt). | 
|  | 238 | * | 
|  | 239 | * rcu_read_lock before reading the address, then rcu_read_unlock after | 
|  | 240 | * taking the spinlock within the structure expected at that address. | 
|  | 241 | * | 
|  | 242 | * We assume struct slab_rcu can overlay struct slab when destroying. | 
|  | 243 | */ | 
|  | 244 | struct slab_rcu { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 245 | struct rcu_head head; | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 246 | struct kmem_cache *cachep; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 247 | void *addr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | }; | 
|  | 249 |  | 
|  | 250 | /* | 
|  | 251 | * struct array_cache | 
|  | 252 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | * Purpose: | 
|  | 254 | * - LIFO ordering, to hand out cache-warm objects from _alloc | 
|  | 255 | * - reduce the number of linked list operations | 
|  | 256 | * - reduce spinlock operations | 
|  | 257 | * | 
|  | 258 | * The limit is stored in the per-cpu structure to reduce the data cache | 
|  | 259 | * footprint. | 
|  | 260 | * | 
|  | 261 | */ | 
|  | 262 | struct array_cache { | 
|  | 263 | unsigned int avail; | 
|  | 264 | unsigned int limit; | 
|  | 265 | unsigned int batchcount; | 
|  | 266 | unsigned int touched; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 267 | spinlock_t lock; | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 268 | void *entry[0];	/* | 
|  | 269 | * Must have this definition in here for the proper | 
|  | 270 | * alignment of array_cache. Also simplifies accessing | 
|  | 271 | * the entries. | 
|  | 272 | * [0] is for gcc 2.95. It should really be []. | 
|  | 273 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 | }; | 
|  | 275 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 276 | /* | 
|  | 277 | * bootstrap: The caches do not work without cpuarrays anymore, but the | 
|  | 278 | * cpuarrays are allocated from the generic caches... | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | */ | 
|  | 280 | #define BOOT_CPUCACHE_ENTRIES	1 | 
|  | 281 | struct arraycache_init { | 
|  | 282 | struct array_cache cache; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 283 | void *entries[BOOT_CPUCACHE_ENTRIES]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | }; | 
|  | 285 |  | 
|  | 286 | /* | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 287 | * The slab lists for all objects. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | */ | 
|  | 289 | struct kmem_list3 { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 290 | struct list_head slabs_partial;	/* partial list first, better asm code */ | 
|  | 291 | struct list_head slabs_full; | 
|  | 292 | struct list_head slabs_free; | 
|  | 293 | unsigned long free_objects; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 294 | unsigned int free_limit; | 
| Ravikiran G Thirumalai | 2e1217c | 2006-02-04 23:27:56 -0800 | [diff] [blame] | 295 | unsigned int colour_next;	/* Per-node cache coloring */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 296 | spinlock_t list_lock; | 
|  | 297 | struct array_cache *shared;	/* shared per node */ | 
|  | 298 | struct array_cache **alien;	/* on other nodes */ | 
| Christoph Lameter | 35386e3 | 2006-03-22 00:09:05 -0800 | [diff] [blame] | 299 | unsigned long next_reap;	/* updated without locking */ | 
|  | 300 | int free_touched;		/* updated without locking */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | }; | 
|  | 302 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 303 | /* | 
|  | 304 | * Need this for bootstrapping a per node allocator. | 
|  | 305 | */ | 
|  | 306 | #define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1) | 
|  | 307 | struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; | 
|  | 308 | #define	CACHE_CACHE 0 | 
|  | 309 | #define	SIZE_AC 1 | 
|  | 310 | #define	SIZE_L3 (1 + MAX_NUMNODES) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 |  | 
| Christoph Lameter | ed11d9e | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 312 | static int drain_freelist(struct kmem_cache *cache, | 
|  | 313 | struct kmem_list3 *l3, int tofree); | 
|  | 314 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, | 
|  | 315 | int node); | 
| Christoph Lameter | 2ed3a4e | 2006-09-25 23:31:38 -0700 | [diff] [blame] | 316 | static int enable_cpucache(struct kmem_cache *cachep); | 
| Christoph Lameter | ed11d9e | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 317 | static void cache_reap(void *unused); | 
|  | 318 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 319 | /* | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 320 | * This function must be completely optimized away if a constant is passed to | 
|  | 321 | * it.  Mostly the same as what is in linux/slab.h except it returns an index. | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 322 | */ | 
| Ivan Kokshaysky | 7243cc0 | 2005-09-22 21:43:58 -0700 | [diff] [blame] | 323 | static __always_inline int index_of(const size_t size) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 324 | { | 
| Steven Rostedt | 5ec8a84 | 2006-02-01 03:05:44 -0800 | [diff] [blame] | 325 | extern void __bad_size(void); | 
|  | 326 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 327 | if (__builtin_constant_p(size)) { | 
|  | 328 | int i = 0; | 
|  | 329 |  | 
|  | 330 | #define CACHE(x) \ | 
|  | 331 | if (size <=x) \ | 
|  | 332 | return i; \ | 
|  | 333 | else \ | 
|  | 334 | i++; | 
|  | 335 | #include "linux/kmalloc_sizes.h" | 
|  | 336 | #undef CACHE | 
| Steven Rostedt | 5ec8a84 | 2006-02-01 03:05:44 -0800 | [diff] [blame] | 337 | __bad_size(); | 
| Ivan Kokshaysky | 7243cc0 | 2005-09-22 21:43:58 -0700 | [diff] [blame] | 338 | } else | 
| Steven Rostedt | 5ec8a84 | 2006-02-01 03:05:44 -0800 | [diff] [blame] | 339 | __bad_size(); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 340 | return 0; | 
|  | 341 | } | 
|  | 342 |  | 
| Ingo Molnar | e0a4272 | 2006-06-23 02:03:46 -0700 | [diff] [blame] | 343 | static int slab_early_init = 1; | 
|  | 344 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 345 | #define INDEX_AC index_of(sizeof(struct arraycache_init)) | 
|  | 346 | #define INDEX_L3 index_of(sizeof(struct kmem_list3)) | 
|  | 347 |  | 
| Pekka Enberg | 5295a74 | 2006-02-01 03:05:48 -0800 | [diff] [blame] | 348 | static void kmem_list3_init(struct kmem_list3 *parent) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 349 | { | 
|  | 350 | INIT_LIST_HEAD(&parent->slabs_full); | 
|  | 351 | INIT_LIST_HEAD(&parent->slabs_partial); | 
|  | 352 | INIT_LIST_HEAD(&parent->slabs_free); | 
|  | 353 | parent->shared = NULL; | 
|  | 354 | parent->alien = NULL; | 
| Ravikiran G Thirumalai | 2e1217c | 2006-02-04 23:27:56 -0800 | [diff] [blame] | 355 | parent->colour_next = 0; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 356 | spin_lock_init(&parent->list_lock); | 
|  | 357 | parent->free_objects = 0; | 
|  | 358 | parent->free_touched = 0; | 
|  | 359 | } | 
|  | 360 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 361 | #define MAKE_LIST(cachep, listp, slab, nodeid)				\ | 
|  | 362 | do {								\ | 
|  | 363 | INIT_LIST_HEAD(listp);					\ | 
|  | 364 | list_splice(&(cachep->nodelists[nodeid]->slab), listp);	\ | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 365 | } while (0) | 
|  | 366 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 367 | #define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\ | 
|  | 368 | do {								\ | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 369 | MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\ | 
|  | 370 | MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \ | 
|  | 371 | MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\ | 
|  | 372 | } while (0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 |  | 
|  | 374 | /* | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 375 | * struct kmem_cache | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | * | 
|  | 377 | * manages a cache. | 
|  | 378 | */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 379 |  | 
| Pekka J Enberg | 2109a2d | 2005-11-07 00:58:01 -0800 | [diff] [blame] | 380 | struct kmem_cache { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | /* 1) per-cpu data, touched during every alloc/free */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 382 | struct array_cache *array[NR_CPUS]; | 
| Ravikiran G Thirumalai | b5d8ca7 | 2006-03-22 00:08:12 -0800 | [diff] [blame] | 383 | /* 2) Cache tunables. Protected by cache_chain_mutex */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 384 | unsigned int batchcount; | 
|  | 385 | unsigned int limit; | 
|  | 386 | unsigned int shared; | 
| Ravikiran G Thirumalai | b5d8ca7 | 2006-03-22 00:08:12 -0800 | [diff] [blame] | 387 |  | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 388 | unsigned int buffer_size; | 
| Ravikiran G Thirumalai | b5d8ca7 | 2006-03-22 00:08:12 -0800 | [diff] [blame] | 389 | /* 3) touched by every alloc & free from the backend */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 390 | struct kmem_list3 *nodelists[MAX_NUMNODES]; | 
| Ravikiran G Thirumalai | b5d8ca7 | 2006-03-22 00:08:12 -0800 | [diff] [blame] | 391 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 392 | unsigned int flags;		/* constant flags */ | 
|  | 393 | unsigned int num;		/* # of objs per slab */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 |  | 
| Ravikiran G Thirumalai | b5d8ca7 | 2006-03-22 00:08:12 -0800 | [diff] [blame] | 395 | /* 4) cache_grow/shrink */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | /* order of pgs per slab (2^n) */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 397 | unsigned int gfporder; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 |  | 
|  | 399 | /* force GFP flags, e.g. GFP_DMA */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 400 | gfp_t gfpflags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 402 | size_t colour;			/* cache colouring range */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 403 | unsigned int colour_off;	/* colour offset */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 404 | struct kmem_cache *slabp_cache; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 405 | unsigned int slab_size; | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 406 | unsigned int dflags;		/* dynamic flags */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 |  | 
|  | 408 | /* constructor func */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 409 | void (*ctor) (void *, struct kmem_cache *, unsigned long); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 |  | 
|  | 411 | /* de-constructor func */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 412 | void (*dtor) (void *, struct kmem_cache *, unsigned long); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 413 |  | 
| Ravikiran G Thirumalai | b5d8ca7 | 2006-03-22 00:08:12 -0800 | [diff] [blame] | 414 | /* 5) cache creation/removal */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 415 | const char *name; | 
|  | 416 | struct list_head next; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 |  | 
| Ravikiran G Thirumalai | b5d8ca7 | 2006-03-22 00:08:12 -0800 | [diff] [blame] | 418 | /* 6) statistics */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 | #if STATS | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 420 | unsigned long num_active; | 
|  | 421 | unsigned long num_allocations; | 
|  | 422 | unsigned long high_mark; | 
|  | 423 | unsigned long grown; | 
|  | 424 | unsigned long reaped; | 
|  | 425 | unsigned long errors; | 
|  | 426 | unsigned long max_freeable; | 
|  | 427 | unsigned long node_allocs; | 
|  | 428 | unsigned long node_frees; | 
| Ravikiran G Thirumalai | fb7faf3 | 2006-04-10 22:52:54 -0700 | [diff] [blame] | 429 | unsigned long node_overflow; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 430 | atomic_t allochit; | 
|  | 431 | atomic_t allocmiss; | 
|  | 432 | atomic_t freehit; | 
|  | 433 | atomic_t freemiss; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | #endif | 
|  | 435 | #if DEBUG | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 436 | /* | 
|  | 437 | * If debugging is enabled, then the allocator can add additional | 
|  | 438 | * fields and/or padding to every object. buffer_size contains the total | 
|  | 439 | * object size including these internal fields, the following two | 
|  | 440 | * variables contain the offset to the user object and its size. | 
|  | 441 | */ | 
|  | 442 | int obj_offset; | 
|  | 443 | int obj_size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 444 | #endif | 
|  | 445 | }; | 
|  | 446 |  | 
|  | 447 | #define CFLGS_OFF_SLAB		(0x80000000UL) | 
|  | 448 | #define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB) | 
|  | 449 |  | 
|  | 450 | #define BATCHREFILL_LIMIT	16 | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 451 | /* | 
|  | 452 | * Optimization question: fewer reaps means less probability for unnessary | 
|  | 453 | * cpucache drain/refill cycles. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | * | 
| Adrian Bunk | dc6f3f2 | 2005-11-08 16:44:08 +0100 | [diff] [blame] | 455 | * OTOH the cpuarrays can contain lots of objects, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 456 | * which could lock up otherwise freeable slabs. | 
|  | 457 | */ | 
|  | 458 | #define REAPTIMEOUT_CPUC	(2*HZ) | 
|  | 459 | #define REAPTIMEOUT_LIST3	(4*HZ) | 
|  | 460 |  | 
|  | 461 | #if STATS | 
|  | 462 | #define	STATS_INC_ACTIVE(x)	((x)->num_active++) | 
|  | 463 | #define	STATS_DEC_ACTIVE(x)	((x)->num_active--) | 
|  | 464 | #define	STATS_INC_ALLOCED(x)	((x)->num_allocations++) | 
|  | 465 | #define	STATS_INC_GROWN(x)	((x)->grown++) | 
| Christoph Lameter | ed11d9e | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 466 | #define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y)) | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 467 | #define	STATS_SET_HIGH(x)						\ | 
|  | 468 | do {								\ | 
|  | 469 | if ((x)->num_active > (x)->high_mark)			\ | 
|  | 470 | (x)->high_mark = (x)->num_active;		\ | 
|  | 471 | } while (0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | #define	STATS_INC_ERR(x)	((x)->errors++) | 
|  | 473 | #define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 474 | #define	STATS_INC_NODEFREES(x)	((x)->node_frees++) | 
| Ravikiran G Thirumalai | fb7faf3 | 2006-04-10 22:52:54 -0700 | [diff] [blame] | 475 | #define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++) | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 476 | #define	STATS_SET_FREEABLE(x, i)					\ | 
|  | 477 | do {								\ | 
|  | 478 | if ((x)->max_freeable < i)				\ | 
|  | 479 | (x)->max_freeable = i;				\ | 
|  | 480 | } while (0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 481 | #define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit) | 
|  | 482 | #define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss) | 
|  | 483 | #define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit) | 
|  | 484 | #define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss) | 
|  | 485 | #else | 
|  | 486 | #define	STATS_INC_ACTIVE(x)	do { } while (0) | 
|  | 487 | #define	STATS_DEC_ACTIVE(x)	do { } while (0) | 
|  | 488 | #define	STATS_INC_ALLOCED(x)	do { } while (0) | 
|  | 489 | #define	STATS_INC_GROWN(x)	do { } while (0) | 
| Christoph Lameter | ed11d9e | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 490 | #define	STATS_ADD_REAPED(x,y)	do { } while (0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 491 | #define	STATS_SET_HIGH(x)	do { } while (0) | 
|  | 492 | #define	STATS_INC_ERR(x)	do { } while (0) | 
|  | 493 | #define	STATS_INC_NODEALLOCS(x)	do { } while (0) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 494 | #define	STATS_INC_NODEFREES(x)	do { } while (0) | 
| Ravikiran G Thirumalai | fb7faf3 | 2006-04-10 22:52:54 -0700 | [diff] [blame] | 495 | #define STATS_INC_ACOVERFLOW(x)   do { } while (0) | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 496 | #define	STATS_SET_FREEABLE(x, i) do { } while (0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | #define STATS_INC_ALLOCHIT(x)	do { } while (0) | 
|  | 498 | #define STATS_INC_ALLOCMISS(x)	do { } while (0) | 
|  | 499 | #define STATS_INC_FREEHIT(x)	do { } while (0) | 
|  | 500 | #define STATS_INC_FREEMISS(x)	do { } while (0) | 
|  | 501 | #endif | 
|  | 502 |  | 
|  | 503 | #if DEBUG | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 504 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 505 | /* | 
|  | 506 | * memory layout of objects: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 507 | * 0		: objp | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 508 | * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 509 | * 		the end of an object is aligned with the end of the real | 
|  | 510 | * 		allocation. Catches writes behind the end of the allocation. | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 511 | * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 512 | * 		redzone word. | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 513 | * cachep->obj_offset: The real object. | 
|  | 514 | * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 515 | * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address | 
|  | 516 | *					[BYTES_PER_WORD long] | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 518 | static int obj_offset(struct kmem_cache *cachep) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 519 | { | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 520 | return cachep->obj_offset; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 521 | } | 
|  | 522 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 523 | static int obj_size(struct kmem_cache *cachep) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 524 | { | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 525 | return cachep->obj_size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | } | 
|  | 527 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 528 | static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 529 | { | 
|  | 530 | BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 531 | return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 532 | } | 
|  | 533 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 534 | static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 535 | { | 
|  | 536 | BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); | 
|  | 537 | if (cachep->flags & SLAB_STORE_USER) | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 538 | return (unsigned long *)(objp + cachep->buffer_size - | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 539 | 2 * BYTES_PER_WORD); | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 540 | return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | } | 
|  | 542 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 543 | static void **dbg_userword(struct kmem_cache *cachep, void *objp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 544 | { | 
|  | 545 | BUG_ON(!(cachep->flags & SLAB_STORE_USER)); | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 546 | return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 547 | } | 
|  | 548 |  | 
|  | 549 | #else | 
|  | 550 |  | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 551 | #define obj_offset(x)			0 | 
|  | 552 | #define obj_size(cachep)		(cachep->buffer_size) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 553 | #define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long *)NULL;}) | 
|  | 554 | #define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long *)NULL;}) | 
|  | 555 | #define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;}) | 
|  | 556 |  | 
|  | 557 | #endif | 
|  | 558 |  | 
|  | 559 | /* | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 560 | * Maximum size of an obj (in 2^order pages) and absolute limit for the gfp | 
|  | 561 | * order. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 562 | */ | 
|  | 563 | #if defined(CONFIG_LARGE_ALLOCS) | 
|  | 564 | #define	MAX_OBJ_ORDER	13	/* up to 32Mb */ | 
|  | 565 | #define	MAX_GFP_ORDER	13	/* up to 32Mb */ | 
|  | 566 | #elif defined(CONFIG_MMU) | 
|  | 567 | #define	MAX_OBJ_ORDER	5	/* 32 pages */ | 
|  | 568 | #define	MAX_GFP_ORDER	5	/* 32 pages */ | 
|  | 569 | #else | 
|  | 570 | #define	MAX_OBJ_ORDER	8	/* up to 1Mb */ | 
|  | 571 | #define	MAX_GFP_ORDER	8	/* up to 1Mb */ | 
|  | 572 | #endif | 
|  | 573 |  | 
|  | 574 | /* | 
|  | 575 | * Do not go above this order unless 0 objects fit into the slab. | 
|  | 576 | */ | 
|  | 577 | #define	BREAK_GFP_ORDER_HI	1 | 
|  | 578 | #define	BREAK_GFP_ORDER_LO	0 | 
|  | 579 | static int slab_break_gfp_order = BREAK_GFP_ORDER_LO; | 
|  | 580 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 581 | /* | 
|  | 582 | * Functions for storing/retrieving the cachep and or slab from the page | 
|  | 583 | * allocator.  These are used to find the slab an obj belongs to.  With kfree(), | 
|  | 584 | * these are used to find the cache which an obj belongs to. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 585 | */ | 
| Pekka Enberg | 065d41c | 2005-11-13 16:06:46 -0800 | [diff] [blame] | 586 | static inline void page_set_cache(struct page *page, struct kmem_cache *cache) | 
|  | 587 | { | 
|  | 588 | page->lru.next = (struct list_head *)cache; | 
|  | 589 | } | 
|  | 590 |  | 
|  | 591 | static inline struct kmem_cache *page_get_cache(struct page *page) | 
|  | 592 | { | 
| Nick Piggin | 8409751 | 2006-03-22 00:08:34 -0800 | [diff] [blame] | 593 | if (unlikely(PageCompound(page))) | 
|  | 594 | page = (struct page *)page_private(page); | 
| Pekka Enberg | ddc2e81 | 2006-06-23 02:03:40 -0700 | [diff] [blame] | 595 | BUG_ON(!PageSlab(page)); | 
| Pekka Enberg | 065d41c | 2005-11-13 16:06:46 -0800 | [diff] [blame] | 596 | return (struct kmem_cache *)page->lru.next; | 
|  | 597 | } | 
|  | 598 |  | 
|  | 599 | static inline void page_set_slab(struct page *page, struct slab *slab) | 
|  | 600 | { | 
|  | 601 | page->lru.prev = (struct list_head *)slab; | 
|  | 602 | } | 
|  | 603 |  | 
|  | 604 | static inline struct slab *page_get_slab(struct page *page) | 
|  | 605 | { | 
| Nick Piggin | 8409751 | 2006-03-22 00:08:34 -0800 | [diff] [blame] | 606 | if (unlikely(PageCompound(page))) | 
|  | 607 | page = (struct page *)page_private(page); | 
| Pekka Enberg | ddc2e81 | 2006-06-23 02:03:40 -0700 | [diff] [blame] | 608 | BUG_ON(!PageSlab(page)); | 
| Pekka Enberg | 065d41c | 2005-11-13 16:06:46 -0800 | [diff] [blame] | 609 | return (struct slab *)page->lru.prev; | 
|  | 610 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 611 |  | 
| Pekka Enberg | 6ed5eb2 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 612 | static inline struct kmem_cache *virt_to_cache(const void *obj) | 
|  | 613 | { | 
|  | 614 | struct page *page = virt_to_page(obj); | 
|  | 615 | return page_get_cache(page); | 
|  | 616 | } | 
|  | 617 |  | 
|  | 618 | static inline struct slab *virt_to_slab(const void *obj) | 
|  | 619 | { | 
|  | 620 | struct page *page = virt_to_page(obj); | 
|  | 621 | return page_get_slab(page); | 
|  | 622 | } | 
|  | 623 |  | 
| Pekka Enberg | 8fea4e9 | 2006-03-22 00:08:10 -0800 | [diff] [blame] | 624 | static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab, | 
|  | 625 | unsigned int idx) | 
|  | 626 | { | 
|  | 627 | return slab->s_mem + cache->buffer_size * idx; | 
|  | 628 | } | 
|  | 629 |  | 
|  | 630 | static inline unsigned int obj_to_index(struct kmem_cache *cache, | 
|  | 631 | struct slab *slab, void *obj) | 
|  | 632 | { | 
|  | 633 | return (unsigned)(obj - slab->s_mem) / cache->buffer_size; | 
|  | 634 | } | 
|  | 635 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 636 | /* | 
|  | 637 | * These are the default caches for kmalloc. Custom caches can have other sizes. | 
|  | 638 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 | struct cache_sizes malloc_sizes[] = { | 
|  | 640 | #define CACHE(x) { .cs_size = (x) }, | 
|  | 641 | #include <linux/kmalloc_sizes.h> | 
|  | 642 | CACHE(ULONG_MAX) | 
|  | 643 | #undef CACHE | 
|  | 644 | }; | 
|  | 645 | EXPORT_SYMBOL(malloc_sizes); | 
|  | 646 |  | 
|  | 647 | /* Must match cache_sizes above. Out of line to keep cache footprint low. */ | 
|  | 648 | struct cache_names { | 
|  | 649 | char *name; | 
|  | 650 | char *name_dma; | 
|  | 651 | }; | 
|  | 652 |  | 
|  | 653 | static struct cache_names __initdata cache_names[] = { | 
|  | 654 | #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" }, | 
|  | 655 | #include <linux/kmalloc_sizes.h> | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 656 | {NULL,} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 657 | #undef CACHE | 
|  | 658 | }; | 
|  | 659 |  | 
|  | 660 | static struct arraycache_init initarray_cache __initdata = | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 661 | { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | static struct arraycache_init initarray_generic = | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 663 | { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 664 |  | 
|  | 665 | /* internal cache of cache description objs */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 666 | static struct kmem_cache cache_cache = { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 667 | .batchcount = 1, | 
|  | 668 | .limit = BOOT_CPUCACHE_ENTRIES, | 
|  | 669 | .shared = 1, | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 670 | .buffer_size = sizeof(struct kmem_cache), | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 671 | .name = "kmem_cache", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 672 | #if DEBUG | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 673 | .obj_size = sizeof(struct kmem_cache), | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 674 | #endif | 
|  | 675 | }; | 
|  | 676 |  | 
| Ravikiran G Thirumalai | 056c624 | 2006-09-25 23:31:38 -0700 | [diff] [blame] | 677 | #define BAD_ALIEN_MAGIC 0x01020304ul | 
|  | 678 |  | 
| Arjan van de Ven | f1aaee5 | 2006-07-13 14:46:03 +0200 | [diff] [blame] | 679 | #ifdef CONFIG_LOCKDEP | 
|  | 680 |  | 
|  | 681 | /* | 
|  | 682 | * Slab sometimes uses the kmalloc slabs to store the slab headers | 
|  | 683 | * for other slabs "off slab". | 
|  | 684 | * The locking for this is tricky in that it nests within the locks | 
|  | 685 | * of all other slabs in a few places; to deal with this special | 
|  | 686 | * locking we put on-slab caches into a separate lock-class. | 
| Ravikiran G Thirumalai | 056c624 | 2006-09-25 23:31:38 -0700 | [diff] [blame] | 687 | * | 
|  | 688 | * We set lock class for alien array caches which are up during init. | 
|  | 689 | * The lock annotation will be lost if all cpus of a node goes down and | 
|  | 690 | * then comes back up during hotplug | 
| Arjan van de Ven | f1aaee5 | 2006-07-13 14:46:03 +0200 | [diff] [blame] | 691 | */ | 
| Ravikiran G Thirumalai | 056c624 | 2006-09-25 23:31:38 -0700 | [diff] [blame] | 692 | static struct lock_class_key on_slab_l3_key; | 
|  | 693 | static struct lock_class_key on_slab_alc_key; | 
| Arjan van de Ven | f1aaee5 | 2006-07-13 14:46:03 +0200 | [diff] [blame] | 694 |  | 
| Ravikiran G Thirumalai | 056c624 | 2006-09-25 23:31:38 -0700 | [diff] [blame] | 695 | static inline void init_lock_keys(void) | 
|  | 696 |  | 
| Arjan van de Ven | f1aaee5 | 2006-07-13 14:46:03 +0200 | [diff] [blame] | 697 | { | 
|  | 698 | int q; | 
| Ravikiran G Thirumalai | 056c624 | 2006-09-25 23:31:38 -0700 | [diff] [blame] | 699 | struct cache_sizes *s = malloc_sizes; | 
| Arjan van de Ven | f1aaee5 | 2006-07-13 14:46:03 +0200 | [diff] [blame] | 700 |  | 
| Ravikiran G Thirumalai | 056c624 | 2006-09-25 23:31:38 -0700 | [diff] [blame] | 701 | while (s->cs_size != ULONG_MAX) { | 
|  | 702 | for_each_node(q) { | 
|  | 703 | struct array_cache **alc; | 
|  | 704 | int r; | 
|  | 705 | struct kmem_list3 *l3 = s->cs_cachep->nodelists[q]; | 
|  | 706 | if (!l3 || OFF_SLAB(s->cs_cachep)) | 
|  | 707 | continue; | 
|  | 708 | lockdep_set_class(&l3->list_lock, &on_slab_l3_key); | 
|  | 709 | alc = l3->alien; | 
|  | 710 | /* | 
|  | 711 | * FIXME: This check for BAD_ALIEN_MAGIC | 
|  | 712 | * should go away when common slab code is taught to | 
|  | 713 | * work even without alien caches. | 
|  | 714 | * Currently, non NUMA code returns BAD_ALIEN_MAGIC | 
|  | 715 | * for alloc_alien_cache, | 
|  | 716 | */ | 
|  | 717 | if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) | 
|  | 718 | continue; | 
|  | 719 | for_each_node(r) { | 
|  | 720 | if (alc[r]) | 
|  | 721 | lockdep_set_class(&alc[r]->lock, | 
|  | 722 | &on_slab_alc_key); | 
|  | 723 | } | 
|  | 724 | } | 
|  | 725 | s++; | 
| Arjan van de Ven | f1aaee5 | 2006-07-13 14:46:03 +0200 | [diff] [blame] | 726 | } | 
|  | 727 | } | 
| Arjan van de Ven | f1aaee5 | 2006-07-13 14:46:03 +0200 | [diff] [blame] | 728 | #else | 
| Ravikiran G Thirumalai | 056c624 | 2006-09-25 23:31:38 -0700 | [diff] [blame] | 729 | static inline void init_lock_keys(void) | 
| Arjan van de Ven | f1aaee5 | 2006-07-13 14:46:03 +0200 | [diff] [blame] | 730 | { | 
|  | 731 | } | 
|  | 732 | #endif | 
|  | 733 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 734 | /* Guard access to the cache-chain. */ | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 735 | static DEFINE_MUTEX(cache_chain_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 736 | static struct list_head cache_chain; | 
|  | 737 |  | 
|  | 738 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 739 | * chicken and egg problem: delay the per-cpu array allocation | 
|  | 740 | * until the general caches are up. | 
|  | 741 | */ | 
|  | 742 | static enum { | 
|  | 743 | NONE, | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 744 | PARTIAL_AC, | 
|  | 745 | PARTIAL_L3, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 746 | FULL | 
|  | 747 | } g_cpucache_up; | 
|  | 748 |  | 
| Mike Kravetz | 39d24e6 | 2006-05-15 09:44:13 -0700 | [diff] [blame] | 749 | /* | 
|  | 750 | * used by boot code to determine if it can use slab based allocator | 
|  | 751 | */ | 
|  | 752 | int slab_is_available(void) | 
|  | 753 | { | 
|  | 754 | return g_cpucache_up == FULL; | 
|  | 755 | } | 
|  | 756 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 757 | static DEFINE_PER_CPU(struct work_struct, reap_work); | 
|  | 758 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 759 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 760 | { | 
|  | 761 | return cachep->array[smp_processor_id()]; | 
|  | 762 | } | 
|  | 763 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 764 | static inline struct kmem_cache *__find_general_cachep(size_t size, | 
|  | 765 | gfp_t gfpflags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 766 | { | 
|  | 767 | struct cache_sizes *csizep = malloc_sizes; | 
|  | 768 |  | 
|  | 769 | #if DEBUG | 
|  | 770 | /* This happens if someone tries to call | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 771 | * kmem_cache_create(), or __kmalloc(), before | 
|  | 772 | * the generic caches are initialized. | 
|  | 773 | */ | 
| Alok Kataria | c7e43c7 | 2005-09-14 12:17:53 -0700 | [diff] [blame] | 774 | BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 775 | #endif | 
|  | 776 | while (size > csizep->cs_size) | 
|  | 777 | csizep++; | 
|  | 778 |  | 
|  | 779 | /* | 
| Martin Hicks | 0abf40c | 2005-09-03 15:54:54 -0700 | [diff] [blame] | 780 | * Really subtle: The last entry with cs->cs_size==ULONG_MAX | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 781 | * has cs_{dma,}cachep==NULL. Thus no special case | 
|  | 782 | * for large kmalloc calls required. | 
|  | 783 | */ | 
|  | 784 | if (unlikely(gfpflags & GFP_DMA)) | 
|  | 785 | return csizep->cs_dmacachep; | 
|  | 786 | return csizep->cs_cachep; | 
|  | 787 | } | 
|  | 788 |  | 
| Adrian Bunk | b221385 | 2006-09-25 23:31:02 -0700 | [diff] [blame] | 789 | static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags) | 
| Manfred Spraul | 97e2bde | 2005-05-01 08:58:38 -0700 | [diff] [blame] | 790 | { | 
|  | 791 | return __find_general_cachep(size, gfpflags); | 
|  | 792 | } | 
| Manfred Spraul | 97e2bde | 2005-05-01 08:58:38 -0700 | [diff] [blame] | 793 |  | 
| Steven Rostedt | fbaccac | 2006-02-01 03:05:45 -0800 | [diff] [blame] | 794 | static size_t slab_mgmt_size(size_t nr_objs, size_t align) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 795 | { | 
| Steven Rostedt | fbaccac | 2006-02-01 03:05:45 -0800 | [diff] [blame] | 796 | return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align); | 
|  | 797 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 798 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 799 | /* | 
|  | 800 | * Calculate the number of objects and left-over bytes for a given buffer size. | 
|  | 801 | */ | 
| Steven Rostedt | fbaccac | 2006-02-01 03:05:45 -0800 | [diff] [blame] | 802 | static void cache_estimate(unsigned long gfporder, size_t buffer_size, | 
|  | 803 | size_t align, int flags, size_t *left_over, | 
|  | 804 | unsigned int *num) | 
|  | 805 | { | 
|  | 806 | int nr_objs; | 
|  | 807 | size_t mgmt_size; | 
|  | 808 | size_t slab_size = PAGE_SIZE << gfporder; | 
|  | 809 |  | 
|  | 810 | /* | 
|  | 811 | * The slab management structure can be either off the slab or | 
|  | 812 | * on it. For the latter case, the memory allocated for a | 
|  | 813 | * slab is used for: | 
|  | 814 | * | 
|  | 815 | * - The struct slab | 
|  | 816 | * - One kmem_bufctl_t for each object | 
|  | 817 | * - Padding to respect alignment of @align | 
|  | 818 | * - @buffer_size bytes for each object | 
|  | 819 | * | 
|  | 820 | * If the slab management structure is off the slab, then the | 
|  | 821 | * alignment will already be calculated into the size. Because | 
|  | 822 | * the slabs are all pages aligned, the objects will be at the | 
|  | 823 | * correct alignment when allocated. | 
|  | 824 | */ | 
|  | 825 | if (flags & CFLGS_OFF_SLAB) { | 
|  | 826 | mgmt_size = 0; | 
|  | 827 | nr_objs = slab_size / buffer_size; | 
|  | 828 |  | 
|  | 829 | if (nr_objs > SLAB_LIMIT) | 
|  | 830 | nr_objs = SLAB_LIMIT; | 
|  | 831 | } else { | 
|  | 832 | /* | 
|  | 833 | * Ignore padding for the initial guess. The padding | 
|  | 834 | * is at most @align-1 bytes, and @buffer_size is at | 
|  | 835 | * least @align. In the worst case, this result will | 
|  | 836 | * be one greater than the number of objects that fit | 
|  | 837 | * into the memory allocation when taking the padding | 
|  | 838 | * into account. | 
|  | 839 | */ | 
|  | 840 | nr_objs = (slab_size - sizeof(struct slab)) / | 
|  | 841 | (buffer_size + sizeof(kmem_bufctl_t)); | 
|  | 842 |  | 
|  | 843 | /* | 
|  | 844 | * This calculated number will be either the right | 
|  | 845 | * amount, or one greater than what we want. | 
|  | 846 | */ | 
|  | 847 | if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size | 
|  | 848 | > slab_size) | 
|  | 849 | nr_objs--; | 
|  | 850 |  | 
|  | 851 | if (nr_objs > SLAB_LIMIT) | 
|  | 852 | nr_objs = SLAB_LIMIT; | 
|  | 853 |  | 
|  | 854 | mgmt_size = slab_mgmt_size(nr_objs, align); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 855 | } | 
| Steven Rostedt | fbaccac | 2006-02-01 03:05:45 -0800 | [diff] [blame] | 856 | *num = nr_objs; | 
|  | 857 | *left_over = slab_size - nr_objs*buffer_size - mgmt_size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 858 | } | 
|  | 859 |  | 
|  | 860 | #define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg) | 
|  | 861 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 862 | static void __slab_error(const char *function, struct kmem_cache *cachep, | 
|  | 863 | char *msg) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 864 | { | 
|  | 865 | printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 866 | function, cachep->name, msg); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 867 | dump_stack(); | 
|  | 868 | } | 
|  | 869 |  | 
| Christoph Lameter | 8fce4d8 | 2006-03-09 17:33:54 -0800 | [diff] [blame] | 870 | #ifdef CONFIG_NUMA | 
|  | 871 | /* | 
|  | 872 | * Special reaping functions for NUMA systems called from cache_reap(). | 
|  | 873 | * These take care of doing round robin flushing of alien caches (containing | 
|  | 874 | * objects freed on different nodes from which they were allocated) and the | 
|  | 875 | * flushing of remote pcps by calling drain_node_pages. | 
|  | 876 | */ | 
|  | 877 | static DEFINE_PER_CPU(unsigned long, reap_node); | 
|  | 878 |  | 
|  | 879 | static void init_reap_node(int cpu) | 
|  | 880 | { | 
|  | 881 | int node; | 
|  | 882 |  | 
|  | 883 | node = next_node(cpu_to_node(cpu), node_online_map); | 
|  | 884 | if (node == MAX_NUMNODES) | 
| Paul Jackson | 442295c | 2006-03-22 00:09:11 -0800 | [diff] [blame] | 885 | node = first_node(node_online_map); | 
| Christoph Lameter | 8fce4d8 | 2006-03-09 17:33:54 -0800 | [diff] [blame] | 886 |  | 
|  | 887 | __get_cpu_var(reap_node) = node; | 
|  | 888 | } | 
|  | 889 |  | 
|  | 890 | static void next_reap_node(void) | 
|  | 891 | { | 
|  | 892 | int node = __get_cpu_var(reap_node); | 
|  | 893 |  | 
|  | 894 | /* | 
|  | 895 | * Also drain per cpu pages on remote zones | 
|  | 896 | */ | 
|  | 897 | if (node != numa_node_id()) | 
|  | 898 | drain_node_pages(node); | 
|  | 899 |  | 
|  | 900 | node = next_node(node, node_online_map); | 
|  | 901 | if (unlikely(node >= MAX_NUMNODES)) | 
|  | 902 | node = first_node(node_online_map); | 
|  | 903 | __get_cpu_var(reap_node) = node; | 
|  | 904 | } | 
|  | 905 |  | 
|  | 906 | #else | 
|  | 907 | #define init_reap_node(cpu) do { } while (0) | 
|  | 908 | #define next_reap_node(void) do { } while (0) | 
|  | 909 | #endif | 
|  | 910 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 911 | /* | 
|  | 912 | * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz | 
|  | 913 | * via the workqueue/eventd. | 
|  | 914 | * Add the CPU number into the expiration time to minimize the possibility of | 
|  | 915 | * the CPUs getting into lockstep and contending for the global cache chain | 
|  | 916 | * lock. | 
|  | 917 | */ | 
|  | 918 | static void __devinit start_cpu_timer(int cpu) | 
|  | 919 | { | 
|  | 920 | struct work_struct *reap_work = &per_cpu(reap_work, cpu); | 
|  | 921 |  | 
|  | 922 | /* | 
|  | 923 | * When this gets called from do_initcalls via cpucache_init(), | 
|  | 924 | * init_workqueues() has already run, so keventd will be setup | 
|  | 925 | * at that time. | 
|  | 926 | */ | 
|  | 927 | if (keventd_up() && reap_work->func == NULL) { | 
| Christoph Lameter | 8fce4d8 | 2006-03-09 17:33:54 -0800 | [diff] [blame] | 928 | init_reap_node(cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 929 | INIT_WORK(reap_work, cache_reap, NULL); | 
|  | 930 | schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); | 
|  | 931 | } | 
|  | 932 | } | 
|  | 933 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 934 | static struct array_cache *alloc_arraycache(int node, int entries, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 935 | int batchcount) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 936 | { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 937 | int memsize = sizeof(void *) * entries + sizeof(struct array_cache); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 938 | struct array_cache *nc = NULL; | 
|  | 939 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 940 | nc = kmalloc_node(memsize, GFP_KERNEL, node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 941 | if (nc) { | 
|  | 942 | nc->avail = 0; | 
|  | 943 | nc->limit = entries; | 
|  | 944 | nc->batchcount = batchcount; | 
|  | 945 | nc->touched = 0; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 946 | spin_lock_init(&nc->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 947 | } | 
|  | 948 | return nc; | 
|  | 949 | } | 
|  | 950 |  | 
| Christoph Lameter | 3ded175 | 2006-03-25 03:06:44 -0800 | [diff] [blame] | 951 | /* | 
|  | 952 | * Transfer objects in one arraycache to another. | 
|  | 953 | * Locking must be handled by the caller. | 
|  | 954 | * | 
|  | 955 | * Return the number of entries transferred. | 
|  | 956 | */ | 
|  | 957 | static int transfer_objects(struct array_cache *to, | 
|  | 958 | struct array_cache *from, unsigned int max) | 
|  | 959 | { | 
|  | 960 | /* Figure out how many entries to transfer */ | 
|  | 961 | int nr = min(min(from->avail, max), to->limit - to->avail); | 
|  | 962 |  | 
|  | 963 | if (!nr) | 
|  | 964 | return 0; | 
|  | 965 |  | 
|  | 966 | memcpy(to->entry + to->avail, from->entry + from->avail -nr, | 
|  | 967 | sizeof(void *) *nr); | 
|  | 968 |  | 
|  | 969 | from->avail -= nr; | 
|  | 970 | to->avail += nr; | 
|  | 971 | to->touched = 1; | 
|  | 972 | return nr; | 
|  | 973 | } | 
|  | 974 |  | 
| Christoph Lameter | 765c450 | 2006-09-27 01:50:08 -0700 | [diff] [blame] | 975 | #ifndef CONFIG_NUMA | 
|  | 976 |  | 
|  | 977 | #define drain_alien_cache(cachep, alien) do { } while (0) | 
|  | 978 | #define reap_alien(cachep, l3) do { } while (0) | 
|  | 979 |  | 
|  | 980 | static inline struct array_cache **alloc_alien_cache(int node, int limit) | 
|  | 981 | { | 
|  | 982 | return (struct array_cache **)BAD_ALIEN_MAGIC; | 
|  | 983 | } | 
|  | 984 |  | 
|  | 985 | static inline void free_alien_cache(struct array_cache **ac_ptr) | 
|  | 986 | { | 
|  | 987 | } | 
|  | 988 |  | 
|  | 989 | static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | 
|  | 990 | { | 
|  | 991 | return 0; | 
|  | 992 | } | 
|  | 993 |  | 
|  | 994 | static inline void *alternate_node_alloc(struct kmem_cache *cachep, | 
|  | 995 | gfp_t flags) | 
|  | 996 | { | 
|  | 997 | return NULL; | 
|  | 998 | } | 
|  | 999 |  | 
|  | 1000 | static inline void *__cache_alloc_node(struct kmem_cache *cachep, | 
|  | 1001 | gfp_t flags, int nodeid) | 
|  | 1002 | { | 
|  | 1003 | return NULL; | 
|  | 1004 | } | 
|  | 1005 |  | 
|  | 1006 | #else	/* CONFIG_NUMA */ | 
|  | 1007 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1008 | static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int); | 
| Paul Jackson | c61afb1 | 2006-03-24 03:16:08 -0800 | [diff] [blame] | 1009 | static void *alternate_node_alloc(struct kmem_cache *, gfp_t); | 
| Christoph Lameter | dc85da1 | 2006-01-18 17:42:36 -0800 | [diff] [blame] | 1010 |  | 
| Pekka Enberg | 5295a74 | 2006-02-01 03:05:48 -0800 | [diff] [blame] | 1011 | static struct array_cache **alloc_alien_cache(int node, int limit) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1012 | { | 
|  | 1013 | struct array_cache **ac_ptr; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1014 | int memsize = sizeof(void *) * MAX_NUMNODES; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1015 | int i; | 
|  | 1016 |  | 
|  | 1017 | if (limit > 1) | 
|  | 1018 | limit = 12; | 
|  | 1019 | ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node); | 
|  | 1020 | if (ac_ptr) { | 
|  | 1021 | for_each_node(i) { | 
|  | 1022 | if (i == node || !node_online(i)) { | 
|  | 1023 | ac_ptr[i] = NULL; | 
|  | 1024 | continue; | 
|  | 1025 | } | 
|  | 1026 | ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d); | 
|  | 1027 | if (!ac_ptr[i]) { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1028 | for (i--; i <= 0; i--) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1029 | kfree(ac_ptr[i]); | 
|  | 1030 | kfree(ac_ptr); | 
|  | 1031 | return NULL; | 
|  | 1032 | } | 
|  | 1033 | } | 
|  | 1034 | } | 
|  | 1035 | return ac_ptr; | 
|  | 1036 | } | 
|  | 1037 |  | 
| Pekka Enberg | 5295a74 | 2006-02-01 03:05:48 -0800 | [diff] [blame] | 1038 | static void free_alien_cache(struct array_cache **ac_ptr) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1039 | { | 
|  | 1040 | int i; | 
|  | 1041 |  | 
|  | 1042 | if (!ac_ptr) | 
|  | 1043 | return; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1044 | for_each_node(i) | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1045 | kfree(ac_ptr[i]); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1046 | kfree(ac_ptr); | 
|  | 1047 | } | 
|  | 1048 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1049 | static void __drain_alien_cache(struct kmem_cache *cachep, | 
| Pekka Enberg | 5295a74 | 2006-02-01 03:05:48 -0800 | [diff] [blame] | 1050 | struct array_cache *ac, int node) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1051 | { | 
|  | 1052 | struct kmem_list3 *rl3 = cachep->nodelists[node]; | 
|  | 1053 |  | 
|  | 1054 | if (ac->avail) { | 
|  | 1055 | spin_lock(&rl3->list_lock); | 
| Christoph Lameter | e00946f | 2006-03-25 03:06:45 -0800 | [diff] [blame] | 1056 | /* | 
|  | 1057 | * Stuff objects into the remote nodes shared array first. | 
|  | 1058 | * That way we could avoid the overhead of putting the objects | 
|  | 1059 | * into the free lists and getting them back later. | 
|  | 1060 | */ | 
| shin, jacob | 693f7d3 | 2006-04-28 10:54:37 -0500 | [diff] [blame] | 1061 | if (rl3->shared) | 
|  | 1062 | transfer_objects(rl3->shared, ac, ac->limit); | 
| Christoph Lameter | e00946f | 2006-03-25 03:06:45 -0800 | [diff] [blame] | 1063 |  | 
| Christoph Lameter | ff69416 | 2005-09-22 21:44:02 -0700 | [diff] [blame] | 1064 | free_block(cachep, ac->entry, ac->avail, node); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1065 | ac->avail = 0; | 
|  | 1066 | spin_unlock(&rl3->list_lock); | 
|  | 1067 | } | 
|  | 1068 | } | 
|  | 1069 |  | 
| Christoph Lameter | 8fce4d8 | 2006-03-09 17:33:54 -0800 | [diff] [blame] | 1070 | /* | 
|  | 1071 | * Called from cache_reap() to regularly drain alien caches round robin. | 
|  | 1072 | */ | 
|  | 1073 | static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) | 
|  | 1074 | { | 
|  | 1075 | int node = __get_cpu_var(reap_node); | 
|  | 1076 |  | 
|  | 1077 | if (l3->alien) { | 
|  | 1078 | struct array_cache *ac = l3->alien[node]; | 
| Christoph Lameter | e00946f | 2006-03-25 03:06:45 -0800 | [diff] [blame] | 1079 |  | 
|  | 1080 | if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { | 
| Christoph Lameter | 8fce4d8 | 2006-03-09 17:33:54 -0800 | [diff] [blame] | 1081 | __drain_alien_cache(cachep, ac, node); | 
|  | 1082 | spin_unlock_irq(&ac->lock); | 
|  | 1083 | } | 
|  | 1084 | } | 
|  | 1085 | } | 
|  | 1086 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1087 | static void drain_alien_cache(struct kmem_cache *cachep, | 
|  | 1088 | struct array_cache **alien) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1089 | { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1090 | int i = 0; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1091 | struct array_cache *ac; | 
|  | 1092 | unsigned long flags; | 
|  | 1093 |  | 
|  | 1094 | for_each_online_node(i) { | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1095 | ac = alien[i]; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1096 | if (ac) { | 
|  | 1097 | spin_lock_irqsave(&ac->lock, flags); | 
|  | 1098 | __drain_alien_cache(cachep, ac, i); | 
|  | 1099 | spin_unlock_irqrestore(&ac->lock, flags); | 
|  | 1100 | } | 
|  | 1101 | } | 
|  | 1102 | } | 
| Pekka Enberg | 729bd0b | 2006-06-23 02:03:05 -0700 | [diff] [blame] | 1103 |  | 
| Ingo Molnar | 873623d | 2006-07-13 14:44:38 +0200 | [diff] [blame] | 1104 | static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | 
| Pekka Enberg | 729bd0b | 2006-06-23 02:03:05 -0700 | [diff] [blame] | 1105 | { | 
|  | 1106 | struct slab *slabp = virt_to_slab(objp); | 
|  | 1107 | int nodeid = slabp->nodeid; | 
|  | 1108 | struct kmem_list3 *l3; | 
|  | 1109 | struct array_cache *alien = NULL; | 
|  | 1110 |  | 
|  | 1111 | /* | 
|  | 1112 | * Make sure we are not freeing a object from another node to the array | 
|  | 1113 | * cache on this cpu. | 
|  | 1114 | */ | 
|  | 1115 | if (likely(slabp->nodeid == numa_node_id())) | 
|  | 1116 | return 0; | 
|  | 1117 |  | 
|  | 1118 | l3 = cachep->nodelists[numa_node_id()]; | 
|  | 1119 | STATS_INC_NODEFREES(cachep); | 
|  | 1120 | if (l3->alien && l3->alien[nodeid]) { | 
|  | 1121 | alien = l3->alien[nodeid]; | 
| Ingo Molnar | 873623d | 2006-07-13 14:44:38 +0200 | [diff] [blame] | 1122 | spin_lock(&alien->lock); | 
| Pekka Enberg | 729bd0b | 2006-06-23 02:03:05 -0700 | [diff] [blame] | 1123 | if (unlikely(alien->avail == alien->limit)) { | 
|  | 1124 | STATS_INC_ACOVERFLOW(cachep); | 
|  | 1125 | __drain_alien_cache(cachep, alien, nodeid); | 
|  | 1126 | } | 
|  | 1127 | alien->entry[alien->avail++] = objp; | 
|  | 1128 | spin_unlock(&alien->lock); | 
|  | 1129 | } else { | 
|  | 1130 | spin_lock(&(cachep->nodelists[nodeid])->list_lock); | 
|  | 1131 | free_block(cachep, &objp, 1, nodeid); | 
|  | 1132 | spin_unlock(&(cachep->nodelists[nodeid])->list_lock); | 
|  | 1133 | } | 
|  | 1134 | return 1; | 
|  | 1135 | } | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1136 | #endif | 
|  | 1137 |  | 
| Chandra Seetharaman | 8c78f30 | 2006-07-30 03:03:35 -0700 | [diff] [blame] | 1138 | static int __cpuinit cpuup_callback(struct notifier_block *nfb, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1139 | unsigned long action, void *hcpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1140 | { | 
|  | 1141 | long cpu = (long)hcpu; | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1142 | struct kmem_cache *cachep; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1143 | struct kmem_list3 *l3 = NULL; | 
|  | 1144 | int node = cpu_to_node(cpu); | 
|  | 1145 | int memsize = sizeof(struct kmem_list3); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1146 |  | 
|  | 1147 | switch (action) { | 
|  | 1148 | case CPU_UP_PREPARE: | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 1149 | mutex_lock(&cache_chain_mutex); | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1150 | /* | 
|  | 1151 | * We need to do this right in the beginning since | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1152 | * alloc_arraycache's are going to use this list. | 
|  | 1153 | * kmalloc_node allows us to add the slab to the right | 
|  | 1154 | * kmem_list3 and not this cpu's kmem_list3 | 
|  | 1155 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1156 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1157 | list_for_each_entry(cachep, &cache_chain, next) { | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1158 | /* | 
|  | 1159 | * Set up the size64 kmemlist for cpu before we can | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1160 | * begin anything. Make sure some other cpu on this | 
|  | 1161 | * node has not already allocated this | 
|  | 1162 | */ | 
|  | 1163 | if (!cachep->nodelists[node]) { | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1164 | l3 = kmalloc_node(memsize, GFP_KERNEL, node); | 
|  | 1165 | if (!l3) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1166 | goto bad; | 
|  | 1167 | kmem_list3_init(l3); | 
|  | 1168 | l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1169 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1170 |  | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1171 | /* | 
|  | 1172 | * The l3s don't come and go as CPUs come and | 
|  | 1173 | * go.  cache_chain_mutex is sufficient | 
|  | 1174 | * protection here. | 
|  | 1175 | */ | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1176 | cachep->nodelists[node] = l3; | 
|  | 1177 | } | 
|  | 1178 |  | 
|  | 1179 | spin_lock_irq(&cachep->nodelists[node]->list_lock); | 
|  | 1180 | cachep->nodelists[node]->free_limit = | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1181 | (1 + nr_cpus_node(node)) * | 
|  | 1182 | cachep->batchcount + cachep->num; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1183 | spin_unlock_irq(&cachep->nodelists[node]->list_lock); | 
|  | 1184 | } | 
|  | 1185 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1186 | /* | 
|  | 1187 | * Now we can go ahead with allocating the shared arrays and | 
|  | 1188 | * array caches | 
|  | 1189 | */ | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1190 | list_for_each_entry(cachep, &cache_chain, next) { | 
| Tobias Klauser | cd105df | 2006-01-08 01:00:59 -0800 | [diff] [blame] | 1191 | struct array_cache *nc; | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1192 | struct array_cache *shared; | 
|  | 1193 | struct array_cache **alien; | 
| Tobias Klauser | cd105df | 2006-01-08 01:00:59 -0800 | [diff] [blame] | 1194 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1195 | nc = alloc_arraycache(node, cachep->limit, | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1196 | cachep->batchcount); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1197 | if (!nc) | 
|  | 1198 | goto bad; | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1199 | shared = alloc_arraycache(node, | 
|  | 1200 | cachep->shared * cachep->batchcount, | 
|  | 1201 | 0xbaadf00d); | 
|  | 1202 | if (!shared) | 
|  | 1203 | goto bad; | 
| Linus Torvalds | 7a21ef6 | 2006-02-05 11:26:38 -0800 | [diff] [blame] | 1204 |  | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1205 | alien = alloc_alien_cache(node, cachep->limit); | 
|  | 1206 | if (!alien) | 
|  | 1207 | goto bad; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1208 | cachep->array[cpu] = nc; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1209 | l3 = cachep->nodelists[node]; | 
|  | 1210 | BUG_ON(!l3); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1211 |  | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1212 | spin_lock_irq(&l3->list_lock); | 
|  | 1213 | if (!l3->shared) { | 
|  | 1214 | /* | 
|  | 1215 | * We are serialised from CPU_DEAD or | 
|  | 1216 | * CPU_UP_CANCELLED by the cpucontrol lock | 
|  | 1217 | */ | 
|  | 1218 | l3->shared = shared; | 
|  | 1219 | shared = NULL; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1220 | } | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1221 | #ifdef CONFIG_NUMA | 
|  | 1222 | if (!l3->alien) { | 
|  | 1223 | l3->alien = alien; | 
|  | 1224 | alien = NULL; | 
|  | 1225 | } | 
|  | 1226 | #endif | 
|  | 1227 | spin_unlock_irq(&l3->list_lock); | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1228 | kfree(shared); | 
|  | 1229 | free_alien_cache(alien); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1230 | } | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 1231 | mutex_unlock(&cache_chain_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1232 | break; | 
|  | 1233 | case CPU_ONLINE: | 
|  | 1234 | start_cpu_timer(cpu); | 
|  | 1235 | break; | 
|  | 1236 | #ifdef CONFIG_HOTPLUG_CPU | 
|  | 1237 | case CPU_DEAD: | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1238 | /* | 
|  | 1239 | * Even if all the cpus of a node are down, we don't free the | 
|  | 1240 | * kmem_list3 of any cache. This to avoid a race between | 
|  | 1241 | * cpu_down, and a kmalloc allocation from another cpu for | 
|  | 1242 | * memory from the node of the cpu going down.  The list3 | 
|  | 1243 | * structure is usually allocated from kmem_cache_create() and | 
|  | 1244 | * gets destroyed at kmem_cache_destroy(). | 
|  | 1245 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1246 | /* fall thru */ | 
|  | 1247 | case CPU_UP_CANCELED: | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 1248 | mutex_lock(&cache_chain_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1249 | list_for_each_entry(cachep, &cache_chain, next) { | 
|  | 1250 | struct array_cache *nc; | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1251 | struct array_cache *shared; | 
|  | 1252 | struct array_cache **alien; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1253 | cpumask_t mask; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1254 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1255 | mask = node_to_cpumask(node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1256 | /* cpu is dead; no one can alloc from it. */ | 
|  | 1257 | nc = cachep->array[cpu]; | 
|  | 1258 | cachep->array[cpu] = NULL; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1259 | l3 = cachep->nodelists[node]; | 
|  | 1260 |  | 
|  | 1261 | if (!l3) | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1262 | goto free_array_cache; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1263 |  | 
| Ravikiran G Thirumalai | ca3b9b9 | 2006-02-04 23:27:58 -0800 | [diff] [blame] | 1264 | spin_lock_irq(&l3->list_lock); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1265 |  | 
|  | 1266 | /* Free limit for this kmem_list3 */ | 
|  | 1267 | l3->free_limit -= cachep->batchcount; | 
|  | 1268 | if (nc) | 
| Christoph Lameter | ff69416 | 2005-09-22 21:44:02 -0700 | [diff] [blame] | 1269 | free_block(cachep, nc->entry, nc->avail, node); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1270 |  | 
|  | 1271 | if (!cpus_empty(mask)) { | 
| Ravikiran G Thirumalai | ca3b9b9 | 2006-02-04 23:27:58 -0800 | [diff] [blame] | 1272 | spin_unlock_irq(&l3->list_lock); | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1273 | goto free_array_cache; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1274 | } | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1275 |  | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1276 | shared = l3->shared; | 
|  | 1277 | if (shared) { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1278 | free_block(cachep, l3->shared->entry, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1279 | l3->shared->avail, node); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1280 | l3->shared = NULL; | 
|  | 1281 | } | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1282 |  | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1283 | alien = l3->alien; | 
|  | 1284 | l3->alien = NULL; | 
|  | 1285 |  | 
|  | 1286 | spin_unlock_irq(&l3->list_lock); | 
|  | 1287 |  | 
|  | 1288 | kfree(shared); | 
|  | 1289 | if (alien) { | 
|  | 1290 | drain_alien_cache(cachep, alien); | 
|  | 1291 | free_alien_cache(alien); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1292 | } | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1293 | free_array_cache: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1294 | kfree(nc); | 
|  | 1295 | } | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1296 | /* | 
|  | 1297 | * In the previous loop, all the objects were freed to | 
|  | 1298 | * the respective cache's slabs,  now we can go ahead and | 
|  | 1299 | * shrink each nodelist to its limit. | 
|  | 1300 | */ | 
|  | 1301 | list_for_each_entry(cachep, &cache_chain, next) { | 
|  | 1302 | l3 = cachep->nodelists[node]; | 
|  | 1303 | if (!l3) | 
|  | 1304 | continue; | 
| Christoph Lameter | ed11d9e | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 1305 | drain_freelist(cachep, l3, l3->free_objects); | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1306 | } | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 1307 | mutex_unlock(&cache_chain_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1308 | break; | 
|  | 1309 | #endif | 
|  | 1310 | } | 
|  | 1311 | return NOTIFY_OK; | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1312 | bad: | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 1313 | mutex_unlock(&cache_chain_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1314 | return NOTIFY_BAD; | 
|  | 1315 | } | 
|  | 1316 |  | 
| Chandra Seetharaman | 74b85f3 | 2006-06-27 02:54:09 -0700 | [diff] [blame] | 1317 | static struct notifier_block __cpuinitdata cpucache_notifier = { | 
|  | 1318 | &cpuup_callback, NULL, 0 | 
|  | 1319 | }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1320 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1321 | /* | 
|  | 1322 | * swap the static kmem_list3 with kmalloced memory | 
|  | 1323 | */ | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1324 | static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, | 
|  | 1325 | int nodeid) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1326 | { | 
|  | 1327 | struct kmem_list3 *ptr; | 
|  | 1328 |  | 
|  | 1329 | BUG_ON(cachep->nodelists[nodeid] != list); | 
|  | 1330 | ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid); | 
|  | 1331 | BUG_ON(!ptr); | 
|  | 1332 |  | 
|  | 1333 | local_irq_disable(); | 
|  | 1334 | memcpy(ptr, list, sizeof(struct kmem_list3)); | 
| Ingo Molnar | 2b2d549 | 2006-07-03 00:25:28 -0700 | [diff] [blame] | 1335 | /* | 
|  | 1336 | * Do not assume that spinlocks can be initialized via memcpy: | 
|  | 1337 | */ | 
|  | 1338 | spin_lock_init(&ptr->list_lock); | 
|  | 1339 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1340 | MAKE_ALL_LISTS(cachep, ptr, nodeid); | 
|  | 1341 | cachep->nodelists[nodeid] = ptr; | 
|  | 1342 | local_irq_enable(); | 
|  | 1343 | } | 
|  | 1344 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1345 | /* | 
|  | 1346 | * Initialisation.  Called after the page allocator have been initialised and | 
|  | 1347 | * before smp_init(). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1348 | */ | 
|  | 1349 | void __init kmem_cache_init(void) | 
|  | 1350 | { | 
|  | 1351 | size_t left_over; | 
|  | 1352 | struct cache_sizes *sizes; | 
|  | 1353 | struct cache_names *names; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1354 | int i; | 
| Jack Steiner | 07ed76b | 2006-03-07 21:55:46 -0800 | [diff] [blame] | 1355 | int order; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1356 |  | 
|  | 1357 | for (i = 0; i < NUM_INIT_LISTS; i++) { | 
|  | 1358 | kmem_list3_init(&initkmem_list3[i]); | 
|  | 1359 | if (i < MAX_NUMNODES) | 
|  | 1360 | cache_cache.nodelists[i] = NULL; | 
|  | 1361 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1362 |  | 
|  | 1363 | /* | 
|  | 1364 | * Fragmentation resistance on low memory - only use bigger | 
|  | 1365 | * page orders on machines with more than 32MB of memory. | 
|  | 1366 | */ | 
|  | 1367 | if (num_physpages > (32 << 20) >> PAGE_SHIFT) | 
|  | 1368 | slab_break_gfp_order = BREAK_GFP_ORDER_HI; | 
|  | 1369 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1370 | /* Bootstrap is tricky, because several objects are allocated | 
|  | 1371 | * from caches that do not exist yet: | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1372 | * 1) initialize the cache_cache cache: it contains the struct | 
|  | 1373 | *    kmem_cache structures of all caches, except cache_cache itself: | 
|  | 1374 | *    cache_cache is statically allocated. | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1375 | *    Initially an __init data area is used for the head array and the | 
|  | 1376 | *    kmem_list3 structures, it's replaced with a kmalloc allocated | 
|  | 1377 | *    array at the end of the bootstrap. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1378 | * 2) Create the first kmalloc cache. | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1379 | *    The struct kmem_cache for the new cache is allocated normally. | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1380 | *    An __init data area is used for the head array. | 
|  | 1381 | * 3) Create the remaining kmalloc caches, with minimally sized | 
|  | 1382 | *    head arrays. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1383 | * 4) Replace the __init data head arrays for cache_cache and the first | 
|  | 1384 | *    kmalloc cache with kmalloc allocated arrays. | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1385 | * 5) Replace the __init data for kmem_list3 for cache_cache and | 
|  | 1386 | *    the other cache's with kmalloc allocated memory. | 
|  | 1387 | * 6) Resize the head arrays of the kmalloc caches to their final sizes. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1388 | */ | 
|  | 1389 |  | 
|  | 1390 | /* 1) create the cache_cache */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1391 | INIT_LIST_HEAD(&cache_chain); | 
|  | 1392 | list_add(&cache_cache.next, &cache_chain); | 
|  | 1393 | cache_cache.colour_off = cache_line_size(); | 
|  | 1394 | cache_cache.array[smp_processor_id()] = &initarray_cache.cache; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1395 | cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1396 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1397 | cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, | 
|  | 1398 | cache_line_size()); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1399 |  | 
| Jack Steiner | 07ed76b | 2006-03-07 21:55:46 -0800 | [diff] [blame] | 1400 | for (order = 0; order < MAX_ORDER; order++) { | 
|  | 1401 | cache_estimate(order, cache_cache.buffer_size, | 
|  | 1402 | cache_line_size(), 0, &left_over, &cache_cache.num); | 
|  | 1403 | if (cache_cache.num) | 
|  | 1404 | break; | 
|  | 1405 | } | 
| Eric Sesterhenn | 40094fa | 2006-04-02 13:49:25 +0200 | [diff] [blame] | 1406 | BUG_ON(!cache_cache.num); | 
| Jack Steiner | 07ed76b | 2006-03-07 21:55:46 -0800 | [diff] [blame] | 1407 | cache_cache.gfporder = order; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1408 | cache_cache.colour = left_over / cache_cache.colour_off; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1409 | cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + | 
|  | 1410 | sizeof(struct slab), cache_line_size()); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1411 |  | 
|  | 1412 | /* 2+3) create the kmalloc caches */ | 
|  | 1413 | sizes = malloc_sizes; | 
|  | 1414 | names = cache_names; | 
|  | 1415 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1416 | /* | 
|  | 1417 | * Initialize the caches that provide memory for the array cache and the | 
|  | 1418 | * kmem_list3 structures first.  Without this, further allocations will | 
|  | 1419 | * bug. | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1420 | */ | 
|  | 1421 |  | 
|  | 1422 | sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name, | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1423 | sizes[INDEX_AC].cs_size, | 
|  | 1424 | ARCH_KMALLOC_MINALIGN, | 
|  | 1425 | ARCH_KMALLOC_FLAGS|SLAB_PANIC, | 
|  | 1426 | NULL, NULL); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1427 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1428 | if (INDEX_AC != INDEX_L3) { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1429 | sizes[INDEX_L3].cs_cachep = | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1430 | kmem_cache_create(names[INDEX_L3].name, | 
|  | 1431 | sizes[INDEX_L3].cs_size, | 
|  | 1432 | ARCH_KMALLOC_MINALIGN, | 
|  | 1433 | ARCH_KMALLOC_FLAGS|SLAB_PANIC, | 
|  | 1434 | NULL, NULL); | 
|  | 1435 | } | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1436 |  | 
| Ingo Molnar | e0a4272 | 2006-06-23 02:03:46 -0700 | [diff] [blame] | 1437 | slab_early_init = 0; | 
|  | 1438 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1439 | while (sizes->cs_size != ULONG_MAX) { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1440 | /* | 
|  | 1441 | * For performance, all the general caches are L1 aligned. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1442 | * This should be particularly beneficial on SMP boxes, as it | 
|  | 1443 | * eliminates "false sharing". | 
|  | 1444 | * Note for systems short on memory removing the alignment will | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1445 | * allow tighter packing of the smaller caches. | 
|  | 1446 | */ | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1447 | if (!sizes->cs_cachep) { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1448 | sizes->cs_cachep = kmem_cache_create(names->name, | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1449 | sizes->cs_size, | 
|  | 1450 | ARCH_KMALLOC_MINALIGN, | 
|  | 1451 | ARCH_KMALLOC_FLAGS|SLAB_PANIC, | 
|  | 1452 | NULL, NULL); | 
|  | 1453 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1454 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1455 | sizes->cs_dmacachep = kmem_cache_create(names->name_dma, | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1456 | sizes->cs_size, | 
|  | 1457 | ARCH_KMALLOC_MINALIGN, | 
|  | 1458 | ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| | 
|  | 1459 | SLAB_PANIC, | 
|  | 1460 | NULL, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1461 | sizes++; | 
|  | 1462 | names++; | 
|  | 1463 | } | 
|  | 1464 | /* 4) Replace the bootstrap head arrays */ | 
|  | 1465 | { | 
| Ingo Molnar | 2b2d549 | 2006-07-03 00:25:28 -0700 | [diff] [blame] | 1466 | struct array_cache *ptr; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1467 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1468 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1469 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1470 | local_irq_disable(); | 
| Pekka Enberg | 9a2dba4 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 1471 | BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); | 
|  | 1472 | memcpy(ptr, cpu_cache_get(&cache_cache), | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1473 | sizeof(struct arraycache_init)); | 
| Ingo Molnar | 2b2d549 | 2006-07-03 00:25:28 -0700 | [diff] [blame] | 1474 | /* | 
|  | 1475 | * Do not assume that spinlocks can be initialized via memcpy: | 
|  | 1476 | */ | 
|  | 1477 | spin_lock_init(&ptr->lock); | 
|  | 1478 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1479 | cache_cache.array[smp_processor_id()] = ptr; | 
|  | 1480 | local_irq_enable(); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1481 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1482 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1483 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1484 | local_irq_disable(); | 
| Pekka Enberg | 9a2dba4 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 1485 | BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1486 | != &initarray_generic.cache); | 
| Pekka Enberg | 9a2dba4 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 1487 | memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1488 | sizeof(struct arraycache_init)); | 
| Ingo Molnar | 2b2d549 | 2006-07-03 00:25:28 -0700 | [diff] [blame] | 1489 | /* | 
|  | 1490 | * Do not assume that spinlocks can be initialized via memcpy: | 
|  | 1491 | */ | 
|  | 1492 | spin_lock_init(&ptr->lock); | 
|  | 1493 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1494 | malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1495 | ptr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1496 | local_irq_enable(); | 
|  | 1497 | } | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1498 | /* 5) Replace the bootstrap kmem_list3's */ | 
|  | 1499 | { | 
|  | 1500 | int node; | 
|  | 1501 | /* Replace the static kmem_list3 structures for the boot cpu */ | 
|  | 1502 | init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1503 | numa_node_id()); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1504 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1505 | for_each_online_node(node) { | 
|  | 1506 | init_list(malloc_sizes[INDEX_AC].cs_cachep, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1507 | &initkmem_list3[SIZE_AC + node], node); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1508 |  | 
|  | 1509 | if (INDEX_AC != INDEX_L3) { | 
|  | 1510 | init_list(malloc_sizes[INDEX_L3].cs_cachep, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1511 | &initkmem_list3[SIZE_L3 + node], | 
|  | 1512 | node); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1513 | } | 
|  | 1514 | } | 
|  | 1515 | } | 
|  | 1516 |  | 
|  | 1517 | /* 6) resize the head arrays to their final sizes */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1518 | { | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1519 | struct kmem_cache *cachep; | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 1520 | mutex_lock(&cache_chain_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1521 | list_for_each_entry(cachep, &cache_chain, next) | 
| Christoph Lameter | 2ed3a4e | 2006-09-25 23:31:38 -0700 | [diff] [blame] | 1522 | if (enable_cpucache(cachep)) | 
|  | 1523 | BUG(); | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 1524 | mutex_unlock(&cache_chain_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1525 | } | 
|  | 1526 |  | 
| Ravikiran G Thirumalai | 056c624 | 2006-09-25 23:31:38 -0700 | [diff] [blame] | 1527 | /* Annotate slab for lockdep -- annotate the malloc caches */ | 
|  | 1528 | init_lock_keys(); | 
|  | 1529 |  | 
|  | 1530 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1531 | /* Done! */ | 
|  | 1532 | g_cpucache_up = FULL; | 
|  | 1533 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1534 | /* | 
|  | 1535 | * Register a cpu startup notifier callback that initializes | 
|  | 1536 | * cpu_cache_get for all new cpus | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1537 | */ | 
|  | 1538 | register_cpu_notifier(&cpucache_notifier); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1539 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1540 | /* | 
|  | 1541 | * The reap timers are started later, with a module init call: That part | 
|  | 1542 | * of the kernel is not yet operational. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1543 | */ | 
|  | 1544 | } | 
|  | 1545 |  | 
|  | 1546 | static int __init cpucache_init(void) | 
|  | 1547 | { | 
|  | 1548 | int cpu; | 
|  | 1549 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1550 | /* | 
|  | 1551 | * Register the timers that return unneeded pages to the page allocator | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1552 | */ | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1553 | for_each_online_cpu(cpu) | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1554 | start_cpu_timer(cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1555 | return 0; | 
|  | 1556 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1557 | __initcall(cpucache_init); | 
|  | 1558 |  | 
|  | 1559 | /* | 
|  | 1560 | * Interface to system's page allocator. No need to hold the cache-lock. | 
|  | 1561 | * | 
|  | 1562 | * If we requested dmaable memory, we will get it. Even if we | 
|  | 1563 | * did not request dmaable memory, we might get it, but that | 
|  | 1564 | * would be relatively rare and ignorable. | 
|  | 1565 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1566 | static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1567 | { | 
|  | 1568 | struct page *page; | 
| Christoph Hellwig | e1b6aa6 | 2006-06-23 02:03:17 -0700 | [diff] [blame] | 1569 | int nr_pages; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1570 | int i; | 
|  | 1571 |  | 
| Luke Yang | d6fef9d | 2006-04-10 22:52:56 -0700 | [diff] [blame] | 1572 | #ifndef CONFIG_MMU | 
| Christoph Hellwig | e1b6aa6 | 2006-06-23 02:03:17 -0700 | [diff] [blame] | 1573 | /* | 
|  | 1574 | * Nommu uses slab's for process anonymous memory allocations, and thus | 
|  | 1575 | * requires __GFP_COMP to properly refcount higher order allocations | 
| Luke Yang | d6fef9d | 2006-04-10 22:52:56 -0700 | [diff] [blame] | 1576 | */ | 
| Christoph Hellwig | e1b6aa6 | 2006-06-23 02:03:17 -0700 | [diff] [blame] | 1577 | flags |= __GFP_COMP; | 
| Luke Yang | d6fef9d | 2006-04-10 22:52:56 -0700 | [diff] [blame] | 1578 | #endif | 
| Christoph Lameter | 765c450 | 2006-09-27 01:50:08 -0700 | [diff] [blame] | 1579 |  | 
|  | 1580 | /* | 
|  | 1581 | * Under NUMA we want memory on the indicated node. We will handle | 
|  | 1582 | * the needed fallback ourselves since we want to serve from our | 
|  | 1583 | * per node object lists first for other nodes. | 
|  | 1584 | */ | 
|  | 1585 | flags |= cachep->gfpflags | GFP_THISNODE; | 
| Christoph Hellwig | e1b6aa6 | 2006-06-23 02:03:17 -0700 | [diff] [blame] | 1586 |  | 
|  | 1587 | page = alloc_pages_node(nodeid, flags, cachep->gfporder); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1588 | if (!page) | 
|  | 1589 | return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1590 |  | 
| Christoph Hellwig | e1b6aa6 | 2006-06-23 02:03:17 -0700 | [diff] [blame] | 1591 | nr_pages = (1 << cachep->gfporder); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1592 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | 
| Christoph Lameter | 972d1a7 | 2006-09-25 23:31:51 -0700 | [diff] [blame] | 1593 | add_zone_page_state(page_zone(page), | 
|  | 1594 | NR_SLAB_RECLAIMABLE, nr_pages); | 
|  | 1595 | else | 
|  | 1596 | add_zone_page_state(page_zone(page), | 
|  | 1597 | NR_SLAB_UNRECLAIMABLE, nr_pages); | 
| Christoph Hellwig | e1b6aa6 | 2006-06-23 02:03:17 -0700 | [diff] [blame] | 1598 | for (i = 0; i < nr_pages; i++) | 
|  | 1599 | __SetPageSlab(page + i); | 
|  | 1600 | return page_address(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1601 | } | 
|  | 1602 |  | 
|  | 1603 | /* | 
|  | 1604 | * Interface to system's page release. | 
|  | 1605 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1606 | static void kmem_freepages(struct kmem_cache *cachep, void *addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1607 | { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1608 | unsigned long i = (1 << cachep->gfporder); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1609 | struct page *page = virt_to_page(addr); | 
|  | 1610 | const unsigned long nr_freed = i; | 
|  | 1611 |  | 
| Christoph Lameter | 972d1a7 | 2006-09-25 23:31:51 -0700 | [diff] [blame] | 1612 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | 
|  | 1613 | sub_zone_page_state(page_zone(page), | 
|  | 1614 | NR_SLAB_RECLAIMABLE, nr_freed); | 
|  | 1615 | else | 
|  | 1616 | sub_zone_page_state(page_zone(page), | 
|  | 1617 | NR_SLAB_UNRECLAIMABLE, nr_freed); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1618 | while (i--) { | 
| Nick Piggin | f205b2f | 2006-03-22 00:08:02 -0800 | [diff] [blame] | 1619 | BUG_ON(!PageSlab(page)); | 
|  | 1620 | __ClearPageSlab(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1621 | page++; | 
|  | 1622 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1623 | if (current->reclaim_state) | 
|  | 1624 | current->reclaim_state->reclaimed_slab += nr_freed; | 
|  | 1625 | free_pages((unsigned long)addr, cachep->gfporder); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1626 | } | 
|  | 1627 |  | 
|  | 1628 | static void kmem_rcu_free(struct rcu_head *head) | 
|  | 1629 | { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1630 | struct slab_rcu *slab_rcu = (struct slab_rcu *)head; | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1631 | struct kmem_cache *cachep = slab_rcu->cachep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1632 |  | 
|  | 1633 | kmem_freepages(cachep, slab_rcu->addr); | 
|  | 1634 | if (OFF_SLAB(cachep)) | 
|  | 1635 | kmem_cache_free(cachep->slabp_cache, slab_rcu); | 
|  | 1636 | } | 
|  | 1637 |  | 
|  | 1638 | #if DEBUG | 
|  | 1639 |  | 
|  | 1640 | #ifdef CONFIG_DEBUG_PAGEALLOC | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1641 | static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1642 | unsigned long caller) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1643 | { | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 1644 | int size = obj_size(cachep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1645 |  | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 1646 | addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1647 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1648 | if (size < 5 * sizeof(unsigned long)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1649 | return; | 
|  | 1650 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1651 | *addr++ = 0x12345678; | 
|  | 1652 | *addr++ = caller; | 
|  | 1653 | *addr++ = smp_processor_id(); | 
|  | 1654 | size -= 3 * sizeof(unsigned long); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1655 | { | 
|  | 1656 | unsigned long *sptr = &caller; | 
|  | 1657 | unsigned long svalue; | 
|  | 1658 |  | 
|  | 1659 | while (!kstack_end(sptr)) { | 
|  | 1660 | svalue = *sptr++; | 
|  | 1661 | if (kernel_text_address(svalue)) { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1662 | *addr++ = svalue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1663 | size -= sizeof(unsigned long); | 
|  | 1664 | if (size <= sizeof(unsigned long)) | 
|  | 1665 | break; | 
|  | 1666 | } | 
|  | 1667 | } | 
|  | 1668 |  | 
|  | 1669 | } | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1670 | *addr++ = 0x87654321; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1671 | } | 
|  | 1672 | #endif | 
|  | 1673 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1674 | static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1675 | { | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 1676 | int size = obj_size(cachep); | 
|  | 1677 | addr = &((char *)addr)[obj_offset(cachep)]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1678 |  | 
|  | 1679 | memset(addr, val, size); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1680 | *(unsigned char *)(addr + size - 1) = POISON_END; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1681 | } | 
|  | 1682 |  | 
|  | 1683 | static void dump_line(char *data, int offset, int limit) | 
|  | 1684 | { | 
|  | 1685 | int i; | 
|  | 1686 | printk(KERN_ERR "%03x:", offset); | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1687 | for (i = 0; i < limit; i++) | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1688 | printk(" %02x", (unsigned char)data[offset + i]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1689 | printk("\n"); | 
|  | 1690 | } | 
|  | 1691 | #endif | 
|  | 1692 |  | 
|  | 1693 | #if DEBUG | 
|  | 1694 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1695 | static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1696 | { | 
|  | 1697 | int i, size; | 
|  | 1698 | char *realobj; | 
|  | 1699 |  | 
|  | 1700 | if (cachep->flags & SLAB_RED_ZONE) { | 
|  | 1701 | printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n", | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1702 | *dbg_redzone1(cachep, objp), | 
|  | 1703 | *dbg_redzone2(cachep, objp)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1704 | } | 
|  | 1705 |  | 
|  | 1706 | if (cachep->flags & SLAB_STORE_USER) { | 
|  | 1707 | printk(KERN_ERR "Last user: [<%p>]", | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1708 | *dbg_userword(cachep, objp)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1709 | print_symbol("(%s)", | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1710 | (unsigned long)*dbg_userword(cachep, objp)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1711 | printk("\n"); | 
|  | 1712 | } | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 1713 | realobj = (char *)objp + obj_offset(cachep); | 
|  | 1714 | size = obj_size(cachep); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1715 | for (i = 0; i < size && lines; i += 16, lines--) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1716 | int limit; | 
|  | 1717 | limit = 16; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1718 | if (i + limit > size) | 
|  | 1719 | limit = size - i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1720 | dump_line(realobj, i, limit); | 
|  | 1721 | } | 
|  | 1722 | } | 
|  | 1723 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1724 | static void check_poison_obj(struct kmem_cache *cachep, void *objp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1725 | { | 
|  | 1726 | char *realobj; | 
|  | 1727 | int size, i; | 
|  | 1728 | int lines = 0; | 
|  | 1729 |  | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 1730 | realobj = (char *)objp + obj_offset(cachep); | 
|  | 1731 | size = obj_size(cachep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1732 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1733 | for (i = 0; i < size; i++) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1734 | char exp = POISON_FREE; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1735 | if (i == size - 1) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1736 | exp = POISON_END; | 
|  | 1737 | if (realobj[i] != exp) { | 
|  | 1738 | int limit; | 
|  | 1739 | /* Mismatch ! */ | 
|  | 1740 | /* Print header */ | 
|  | 1741 | if (lines == 0) { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1742 | printk(KERN_ERR | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1743 | "Slab corruption: start=%p, len=%d\n", | 
|  | 1744 | realobj, size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1745 | print_objinfo(cachep, objp, 0); | 
|  | 1746 | } | 
|  | 1747 | /* Hexdump the affected line */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1748 | i = (i / 16) * 16; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1749 | limit = 16; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1750 | if (i + limit > size) | 
|  | 1751 | limit = size - i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1752 | dump_line(realobj, i, limit); | 
|  | 1753 | i += 16; | 
|  | 1754 | lines++; | 
|  | 1755 | /* Limit to 5 lines */ | 
|  | 1756 | if (lines > 5) | 
|  | 1757 | break; | 
|  | 1758 | } | 
|  | 1759 | } | 
|  | 1760 | if (lines != 0) { | 
|  | 1761 | /* Print some data about the neighboring objects, if they | 
|  | 1762 | * exist: | 
|  | 1763 | */ | 
| Pekka Enberg | 6ed5eb2 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 1764 | struct slab *slabp = virt_to_slab(objp); | 
| Pekka Enberg | 8fea4e9 | 2006-03-22 00:08:10 -0800 | [diff] [blame] | 1765 | unsigned int objnr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1766 |  | 
| Pekka Enberg | 8fea4e9 | 2006-03-22 00:08:10 -0800 | [diff] [blame] | 1767 | objnr = obj_to_index(cachep, slabp, objp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1768 | if (objnr) { | 
| Pekka Enberg | 8fea4e9 | 2006-03-22 00:08:10 -0800 | [diff] [blame] | 1769 | objp = index_to_obj(cachep, slabp, objnr - 1); | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 1770 | realobj = (char *)objp + obj_offset(cachep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1771 | printk(KERN_ERR "Prev obj: start=%p, len=%d\n", | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1772 | realobj, size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1773 | print_objinfo(cachep, objp, 2); | 
|  | 1774 | } | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1775 | if (objnr + 1 < cachep->num) { | 
| Pekka Enberg | 8fea4e9 | 2006-03-22 00:08:10 -0800 | [diff] [blame] | 1776 | objp = index_to_obj(cachep, slabp, objnr + 1); | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 1777 | realobj = (char *)objp + obj_offset(cachep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1778 | printk(KERN_ERR "Next obj: start=%p, len=%d\n", | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1779 | realobj, size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1780 | print_objinfo(cachep, objp, 2); | 
|  | 1781 | } | 
|  | 1782 | } | 
|  | 1783 | } | 
|  | 1784 | #endif | 
|  | 1785 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1786 | #if DEBUG | 
| Matthew Dobson | 12dd36f | 2006-02-01 03:05:46 -0800 | [diff] [blame] | 1787 | /** | 
| Randy Dunlap | 911851e | 2006-03-22 00:08:14 -0800 | [diff] [blame] | 1788 | * slab_destroy_objs - destroy a slab and its objects | 
|  | 1789 | * @cachep: cache pointer being destroyed | 
|  | 1790 | * @slabp: slab pointer being destroyed | 
|  | 1791 | * | 
|  | 1792 | * Call the registered destructor for each object in a slab that is being | 
|  | 1793 | * destroyed. | 
| Matthew Dobson | 12dd36f | 2006-02-01 03:05:46 -0800 | [diff] [blame] | 1794 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1795 | static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) | 
| Matthew Dobson | 12dd36f | 2006-02-01 03:05:46 -0800 | [diff] [blame] | 1796 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1797 | int i; | 
|  | 1798 | for (i = 0; i < cachep->num; i++) { | 
| Pekka Enberg | 8fea4e9 | 2006-03-22 00:08:10 -0800 | [diff] [blame] | 1799 | void *objp = index_to_obj(cachep, slabp, i); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1800 |  | 
|  | 1801 | if (cachep->flags & SLAB_POISON) { | 
|  | 1802 | #ifdef CONFIG_DEBUG_PAGEALLOC | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1803 | if (cachep->buffer_size % PAGE_SIZE == 0 && | 
|  | 1804 | OFF_SLAB(cachep)) | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1805 | kernel_map_pages(virt_to_page(objp), | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1806 | cachep->buffer_size / PAGE_SIZE, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1807 | else | 
|  | 1808 | check_poison_obj(cachep, objp); | 
|  | 1809 | #else | 
|  | 1810 | check_poison_obj(cachep, objp); | 
|  | 1811 | #endif | 
|  | 1812 | } | 
|  | 1813 | if (cachep->flags & SLAB_RED_ZONE) { | 
|  | 1814 | if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) | 
|  | 1815 | slab_error(cachep, "start of a freed object " | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1816 | "was overwritten"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1817 | if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) | 
|  | 1818 | slab_error(cachep, "end of a freed object " | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1819 | "was overwritten"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1820 | } | 
|  | 1821 | if (cachep->dtor && !(cachep->flags & SLAB_POISON)) | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 1822 | (cachep->dtor) (objp + obj_offset(cachep), cachep, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1823 | } | 
| Matthew Dobson | 12dd36f | 2006-02-01 03:05:46 -0800 | [diff] [blame] | 1824 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1825 | #else | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1826 | static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) | 
| Matthew Dobson | 12dd36f | 2006-02-01 03:05:46 -0800 | [diff] [blame] | 1827 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1828 | if (cachep->dtor) { | 
|  | 1829 | int i; | 
|  | 1830 | for (i = 0; i < cachep->num; i++) { | 
| Pekka Enberg | 8fea4e9 | 2006-03-22 00:08:10 -0800 | [diff] [blame] | 1831 | void *objp = index_to_obj(cachep, slabp, i); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1832 | (cachep->dtor) (objp, cachep, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1833 | } | 
|  | 1834 | } | 
| Matthew Dobson | 12dd36f | 2006-02-01 03:05:46 -0800 | [diff] [blame] | 1835 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1836 | #endif | 
|  | 1837 |  | 
| Randy Dunlap | 911851e | 2006-03-22 00:08:14 -0800 | [diff] [blame] | 1838 | /** | 
|  | 1839 | * slab_destroy - destroy and release all objects in a slab | 
|  | 1840 | * @cachep: cache pointer being destroyed | 
|  | 1841 | * @slabp: slab pointer being destroyed | 
|  | 1842 | * | 
| Matthew Dobson | 12dd36f | 2006-02-01 03:05:46 -0800 | [diff] [blame] | 1843 | * Destroy all the objs in a slab, and release the mem back to the system. | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1844 | * Before calling the slab must have been unlinked from the cache.  The | 
|  | 1845 | * cache-lock is not held/needed. | 
| Matthew Dobson | 12dd36f | 2006-02-01 03:05:46 -0800 | [diff] [blame] | 1846 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1847 | static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) | 
| Matthew Dobson | 12dd36f | 2006-02-01 03:05:46 -0800 | [diff] [blame] | 1848 | { | 
|  | 1849 | void *addr = slabp->s_mem - slabp->colouroff; | 
|  | 1850 |  | 
|  | 1851 | slab_destroy_objs(cachep, slabp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1852 | if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { | 
|  | 1853 | struct slab_rcu *slab_rcu; | 
|  | 1854 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1855 | slab_rcu = (struct slab_rcu *)slabp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1856 | slab_rcu->cachep = cachep; | 
|  | 1857 | slab_rcu->addr = addr; | 
|  | 1858 | call_rcu(&slab_rcu->head, kmem_rcu_free); | 
|  | 1859 | } else { | 
|  | 1860 | kmem_freepages(cachep, addr); | 
| Ingo Molnar | 873623d | 2006-07-13 14:44:38 +0200 | [diff] [blame] | 1861 | if (OFF_SLAB(cachep)) | 
|  | 1862 | kmem_cache_free(cachep->slabp_cache, slabp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1863 | } | 
|  | 1864 | } | 
|  | 1865 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1866 | /* | 
|  | 1867 | * For setting up all the kmem_list3s for cache whose buffer_size is same as | 
|  | 1868 | * size of kmem_list3. | 
|  | 1869 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1870 | static void set_up_list3s(struct kmem_cache *cachep, int index) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1871 | { | 
|  | 1872 | int node; | 
|  | 1873 |  | 
|  | 1874 | for_each_online_node(node) { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1875 | cachep->nodelists[node] = &initkmem_list3[index + node]; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1876 | cachep->nodelists[node]->next_reap = jiffies + | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1877 | REAPTIMEOUT_LIST3 + | 
|  | 1878 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1879 | } | 
|  | 1880 | } | 
|  | 1881 |  | 
| Christoph Lameter | 117f6eb | 2006-09-25 23:31:37 -0700 | [diff] [blame] | 1882 | static void __kmem_cache_destroy(struct kmem_cache *cachep) | 
|  | 1883 | { | 
|  | 1884 | int i; | 
|  | 1885 | struct kmem_list3 *l3; | 
|  | 1886 |  | 
|  | 1887 | for_each_online_cpu(i) | 
|  | 1888 | kfree(cachep->array[i]); | 
|  | 1889 |  | 
|  | 1890 | /* NUMA: free the list3 structures */ | 
|  | 1891 | for_each_online_node(i) { | 
|  | 1892 | l3 = cachep->nodelists[i]; | 
|  | 1893 | if (l3) { | 
|  | 1894 | kfree(l3->shared); | 
|  | 1895 | free_alien_cache(l3->alien); | 
|  | 1896 | kfree(l3); | 
|  | 1897 | } | 
|  | 1898 | } | 
|  | 1899 | kmem_cache_free(&cache_cache, cachep); | 
|  | 1900 | } | 
|  | 1901 |  | 
|  | 1902 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1903 | /** | 
| Randy.Dunlap | a70773d | 2006-02-01 03:05:52 -0800 | [diff] [blame] | 1904 | * calculate_slab_order - calculate size (page order) of slabs | 
|  | 1905 | * @cachep: pointer to the cache that is being created | 
|  | 1906 | * @size: size of objects to be created in this cache. | 
|  | 1907 | * @align: required alignment for the objects. | 
|  | 1908 | * @flags: slab allocation flags | 
|  | 1909 | * | 
|  | 1910 | * Also calculates the number of objects per slab. | 
| Pekka Enberg | 4d268eb | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 1911 | * | 
|  | 1912 | * This could be made much more intelligent.  For now, try to avoid using | 
|  | 1913 | * high order pages for slabs.  When the gfp() functions are more friendly | 
|  | 1914 | * towards high-order requests, this should be changed. | 
|  | 1915 | */ | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1916 | static size_t calculate_slab_order(struct kmem_cache *cachep, | 
| Randy Dunlap | ee13d78 | 2006-02-01 03:05:53 -0800 | [diff] [blame] | 1917 | size_t size, size_t align, unsigned long flags) | 
| Pekka Enberg | 4d268eb | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 1918 | { | 
| Ingo Molnar | b1ab41c | 2006-06-02 15:44:58 +0200 | [diff] [blame] | 1919 | unsigned long offslab_limit; | 
| Pekka Enberg | 4d268eb | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 1920 | size_t left_over = 0; | 
| Linus Torvalds | 9888e6f | 2006-03-06 17:44:43 -0800 | [diff] [blame] | 1921 | int gfporder; | 
| Pekka Enberg | 4d268eb | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 1922 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1923 | for (gfporder = 0; gfporder <= MAX_GFP_ORDER; gfporder++) { | 
| Pekka Enberg | 4d268eb | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 1924 | unsigned int num; | 
|  | 1925 | size_t remainder; | 
|  | 1926 |  | 
| Linus Torvalds | 9888e6f | 2006-03-06 17:44:43 -0800 | [diff] [blame] | 1927 | cache_estimate(gfporder, size, align, flags, &remainder, &num); | 
| Pekka Enberg | 4d268eb | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 1928 | if (!num) | 
|  | 1929 | continue; | 
| Linus Torvalds | 9888e6f | 2006-03-06 17:44:43 -0800 | [diff] [blame] | 1930 |  | 
| Ingo Molnar | b1ab41c | 2006-06-02 15:44:58 +0200 | [diff] [blame] | 1931 | if (flags & CFLGS_OFF_SLAB) { | 
|  | 1932 | /* | 
|  | 1933 | * Max number of objs-per-slab for caches which | 
|  | 1934 | * use off-slab slabs. Needed to avoid a possible | 
|  | 1935 | * looping condition in cache_grow(). | 
|  | 1936 | */ | 
|  | 1937 | offslab_limit = size - sizeof(struct slab); | 
|  | 1938 | offslab_limit /= sizeof(kmem_bufctl_t); | 
|  | 1939 |  | 
|  | 1940 | if (num > offslab_limit) | 
|  | 1941 | break; | 
|  | 1942 | } | 
| Pekka Enberg | 4d268eb | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 1943 |  | 
| Linus Torvalds | 9888e6f | 2006-03-06 17:44:43 -0800 | [diff] [blame] | 1944 | /* Found something acceptable - save it away */ | 
| Pekka Enberg | 4d268eb | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 1945 | cachep->num = num; | 
| Linus Torvalds | 9888e6f | 2006-03-06 17:44:43 -0800 | [diff] [blame] | 1946 | cachep->gfporder = gfporder; | 
| Pekka Enberg | 4d268eb | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 1947 | left_over = remainder; | 
|  | 1948 |  | 
|  | 1949 | /* | 
| Linus Torvalds | f78bb8a | 2006-03-08 10:33:05 -0800 | [diff] [blame] | 1950 | * A VFS-reclaimable slab tends to have most allocations | 
|  | 1951 | * as GFP_NOFS and we really don't want to have to be allocating | 
|  | 1952 | * higher-order pages when we are unable to shrink dcache. | 
|  | 1953 | */ | 
|  | 1954 | if (flags & SLAB_RECLAIM_ACCOUNT) | 
|  | 1955 | break; | 
|  | 1956 |  | 
|  | 1957 | /* | 
| Pekka Enberg | 4d268eb | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 1958 | * Large number of objects is good, but very large slabs are | 
|  | 1959 | * currently bad for the gfp()s. | 
|  | 1960 | */ | 
| Linus Torvalds | 9888e6f | 2006-03-06 17:44:43 -0800 | [diff] [blame] | 1961 | if (gfporder >= slab_break_gfp_order) | 
| Pekka Enberg | 4d268eb | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 1962 | break; | 
|  | 1963 |  | 
| Linus Torvalds | 9888e6f | 2006-03-06 17:44:43 -0800 | [diff] [blame] | 1964 | /* | 
|  | 1965 | * Acceptable internal fragmentation? | 
|  | 1966 | */ | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1967 | if (left_over * 8 <= (PAGE_SIZE << gfporder)) | 
| Pekka Enberg | 4d268eb | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 1968 | break; | 
|  | 1969 | } | 
|  | 1970 | return left_over; | 
|  | 1971 | } | 
|  | 1972 |  | 
| Christoph Lameter | 2ed3a4e | 2006-09-25 23:31:38 -0700 | [diff] [blame] | 1973 | static int setup_cpu_cache(struct kmem_cache *cachep) | 
| Pekka Enberg | f30cf7d | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1974 | { | 
| Christoph Lameter | 2ed3a4e | 2006-09-25 23:31:38 -0700 | [diff] [blame] | 1975 | if (g_cpucache_up == FULL) | 
|  | 1976 | return enable_cpucache(cachep); | 
|  | 1977 |  | 
| Pekka Enberg | f30cf7d | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 1978 | if (g_cpucache_up == NONE) { | 
|  | 1979 | /* | 
|  | 1980 | * Note: the first kmem_cache_create must create the cache | 
|  | 1981 | * that's used by kmalloc(24), otherwise the creation of | 
|  | 1982 | * further caches will BUG(). | 
|  | 1983 | */ | 
|  | 1984 | cachep->array[smp_processor_id()] = &initarray_generic.cache; | 
|  | 1985 |  | 
|  | 1986 | /* | 
|  | 1987 | * If the cache that's used by kmalloc(sizeof(kmem_list3)) is | 
|  | 1988 | * the first cache, then we need to set up all its list3s, | 
|  | 1989 | * otherwise the creation of further caches will BUG(). | 
|  | 1990 | */ | 
|  | 1991 | set_up_list3s(cachep, SIZE_AC); | 
|  | 1992 | if (INDEX_AC == INDEX_L3) | 
|  | 1993 | g_cpucache_up = PARTIAL_L3; | 
|  | 1994 | else | 
|  | 1995 | g_cpucache_up = PARTIAL_AC; | 
|  | 1996 | } else { | 
|  | 1997 | cachep->array[smp_processor_id()] = | 
|  | 1998 | kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); | 
|  | 1999 |  | 
|  | 2000 | if (g_cpucache_up == PARTIAL_AC) { | 
|  | 2001 | set_up_list3s(cachep, SIZE_L3); | 
|  | 2002 | g_cpucache_up = PARTIAL_L3; | 
|  | 2003 | } else { | 
|  | 2004 | int node; | 
|  | 2005 | for_each_online_node(node) { | 
|  | 2006 | cachep->nodelists[node] = | 
|  | 2007 | kmalloc_node(sizeof(struct kmem_list3), | 
|  | 2008 | GFP_KERNEL, node); | 
|  | 2009 | BUG_ON(!cachep->nodelists[node]); | 
|  | 2010 | kmem_list3_init(cachep->nodelists[node]); | 
|  | 2011 | } | 
|  | 2012 | } | 
|  | 2013 | } | 
|  | 2014 | cachep->nodelists[numa_node_id()]->next_reap = | 
|  | 2015 | jiffies + REAPTIMEOUT_LIST3 + | 
|  | 2016 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; | 
|  | 2017 |  | 
|  | 2018 | cpu_cache_get(cachep)->avail = 0; | 
|  | 2019 | cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; | 
|  | 2020 | cpu_cache_get(cachep)->batchcount = 1; | 
|  | 2021 | cpu_cache_get(cachep)->touched = 0; | 
|  | 2022 | cachep->batchcount = 1; | 
|  | 2023 | cachep->limit = BOOT_CPUCACHE_ENTRIES; | 
| Christoph Lameter | 2ed3a4e | 2006-09-25 23:31:38 -0700 | [diff] [blame] | 2024 | return 0; | 
| Pekka Enberg | f30cf7d | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2025 | } | 
|  | 2026 |  | 
| Pekka Enberg | 4d268eb | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 2027 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2028 | * kmem_cache_create - Create a cache. | 
|  | 2029 | * @name: A string which is used in /proc/slabinfo to identify this cache. | 
|  | 2030 | * @size: The size of objects to be created in this cache. | 
|  | 2031 | * @align: The required alignment for the objects. | 
|  | 2032 | * @flags: SLAB flags | 
|  | 2033 | * @ctor: A constructor for the objects. | 
|  | 2034 | * @dtor: A destructor for the objects. | 
|  | 2035 | * | 
|  | 2036 | * Returns a ptr to the cache on success, NULL on failure. | 
|  | 2037 | * Cannot be called within a int, but can be interrupted. | 
|  | 2038 | * The @ctor is run when new pages are allocated by the cache | 
|  | 2039 | * and the @dtor is run before the pages are handed back. | 
|  | 2040 | * | 
|  | 2041 | * @name must be valid until the cache is destroyed. This implies that | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2042 | * the module calling this has to destroy the cache before getting unloaded. | 
|  | 2043 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2044 | * The flags are | 
|  | 2045 | * | 
|  | 2046 | * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) | 
|  | 2047 | * to catch references to uninitialised memory. | 
|  | 2048 | * | 
|  | 2049 | * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check | 
|  | 2050 | * for buffer overruns. | 
|  | 2051 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2052 | * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware | 
|  | 2053 | * cacheline.  This can be beneficial if you're counting cycles as closely | 
|  | 2054 | * as davem. | 
|  | 2055 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2056 | struct kmem_cache * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2057 | kmem_cache_create (const char *name, size_t size, size_t align, | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2058 | unsigned long flags, | 
|  | 2059 | void (*ctor)(void*, struct kmem_cache *, unsigned long), | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2060 | void (*dtor)(void*, struct kmem_cache *, unsigned long)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2061 | { | 
|  | 2062 | size_t left_over, slab_size, ralign; | 
| Christoph Hellwig | 7a7c381 | 2006-06-23 02:03:17 -0700 | [diff] [blame] | 2063 | struct kmem_cache *cachep = NULL, *pc; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2064 |  | 
|  | 2065 | /* | 
|  | 2066 | * Sanity checks... these are all serious usage bugs. | 
|  | 2067 | */ | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2068 | if (!name || in_interrupt() || (size < BYTES_PER_WORD) || | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2069 | (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) { | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2070 | printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__, | 
|  | 2071 | name); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2072 | BUG(); | 
|  | 2073 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2074 |  | 
| Ravikiran G Thirumalai | f0188f4 | 2006-02-10 01:51:13 -0800 | [diff] [blame] | 2075 | /* | 
|  | 2076 | * Prevent CPUs from coming and going. | 
|  | 2077 | * lock_cpu_hotplug() nests outside cache_chain_mutex | 
|  | 2078 | */ | 
|  | 2079 | lock_cpu_hotplug(); | 
|  | 2080 |  | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 2081 | mutex_lock(&cache_chain_mutex); | 
| Andrew Morton | 4f12bb4 | 2005-11-07 00:58:00 -0800 | [diff] [blame] | 2082 |  | 
| Christoph Hellwig | 7a7c381 | 2006-06-23 02:03:17 -0700 | [diff] [blame] | 2083 | list_for_each_entry(pc, &cache_chain, next) { | 
| Andrew Morton | 4f12bb4 | 2005-11-07 00:58:00 -0800 | [diff] [blame] | 2084 | mm_segment_t old_fs = get_fs(); | 
|  | 2085 | char tmp; | 
|  | 2086 | int res; | 
|  | 2087 |  | 
|  | 2088 | /* | 
|  | 2089 | * This happens when the module gets unloaded and doesn't | 
|  | 2090 | * destroy its slab cache and no-one else reuses the vmalloc | 
|  | 2091 | * area of the module.  Print a warning. | 
|  | 2092 | */ | 
|  | 2093 | set_fs(KERNEL_DS); | 
|  | 2094 | res = __get_user(tmp, pc->name); | 
|  | 2095 | set_fs(old_fs); | 
|  | 2096 | if (res) { | 
|  | 2097 | printk("SLAB: cache with size %d has lost its name\n", | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 2098 | pc->buffer_size); | 
| Andrew Morton | 4f12bb4 | 2005-11-07 00:58:00 -0800 | [diff] [blame] | 2099 | continue; | 
|  | 2100 | } | 
|  | 2101 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2102 | if (!strcmp(pc->name, name)) { | 
| Andrew Morton | 4f12bb4 | 2005-11-07 00:58:00 -0800 | [diff] [blame] | 2103 | printk("kmem_cache_create: duplicate cache %s\n", name); | 
|  | 2104 | dump_stack(); | 
|  | 2105 | goto oops; | 
|  | 2106 | } | 
|  | 2107 | } | 
|  | 2108 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2109 | #if DEBUG | 
|  | 2110 | WARN_ON(strchr(name, ' '));	/* It confuses parsers */ | 
|  | 2111 | if ((flags & SLAB_DEBUG_INITIAL) && !ctor) { | 
|  | 2112 | /* No constructor, but inital state check requested */ | 
|  | 2113 | printk(KERN_ERR "%s: No con, but init state check " | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2114 | "requested - %s\n", __FUNCTION__, name); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2115 | flags &= ~SLAB_DEBUG_INITIAL; | 
|  | 2116 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2117 | #if FORCED_DEBUG | 
|  | 2118 | /* | 
|  | 2119 | * Enable redzoning and last user accounting, except for caches with | 
|  | 2120 | * large objects, if the increased size would increase the object size | 
|  | 2121 | * above the next power of two: caches with object sizes just above a | 
|  | 2122 | * power of two have a significant amount of internal fragmentation. | 
|  | 2123 | */ | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2124 | if (size < 4096 || fls(size - 1) == fls(size-1 + 3 * BYTES_PER_WORD)) | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2125 | flags |= SLAB_RED_ZONE | SLAB_STORE_USER; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2126 | if (!(flags & SLAB_DESTROY_BY_RCU)) | 
|  | 2127 | flags |= SLAB_POISON; | 
|  | 2128 | #endif | 
|  | 2129 | if (flags & SLAB_DESTROY_BY_RCU) | 
|  | 2130 | BUG_ON(flags & SLAB_POISON); | 
|  | 2131 | #endif | 
|  | 2132 | if (flags & SLAB_DESTROY_BY_RCU) | 
|  | 2133 | BUG_ON(dtor); | 
|  | 2134 |  | 
|  | 2135 | /* | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2136 | * Always checks flags, a caller might be expecting debug support which | 
|  | 2137 | * isn't available. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2138 | */ | 
| Eric Sesterhenn | 40094fa | 2006-04-02 13:49:25 +0200 | [diff] [blame] | 2139 | BUG_ON(flags & ~CREATE_MASK); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2140 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2141 | /* | 
|  | 2142 | * Check that size is in terms of words.  This is needed to avoid | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2143 | * unaligned accesses for some archs when redzoning is used, and makes | 
|  | 2144 | * sure any on-slab bufctl's are also correctly aligned. | 
|  | 2145 | */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2146 | if (size & (BYTES_PER_WORD - 1)) { | 
|  | 2147 | size += (BYTES_PER_WORD - 1); | 
|  | 2148 | size &= ~(BYTES_PER_WORD - 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2149 | } | 
|  | 2150 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2151 | /* calculate the final buffer alignment: */ | 
|  | 2152 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2153 | /* 1) arch recommendation: can be overridden for debug */ | 
|  | 2154 | if (flags & SLAB_HWCACHE_ALIGN) { | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2155 | /* | 
|  | 2156 | * Default alignment: as specified by the arch code.  Except if | 
|  | 2157 | * an object is really small, then squeeze multiple objects into | 
|  | 2158 | * one cacheline. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2159 | */ | 
|  | 2160 | ralign = cache_line_size(); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2161 | while (size <= ralign / 2) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2162 | ralign /= 2; | 
|  | 2163 | } else { | 
|  | 2164 | ralign = BYTES_PER_WORD; | 
|  | 2165 | } | 
| Pekka Enberg | ca5f970 | 2006-09-25 23:31:25 -0700 | [diff] [blame] | 2166 |  | 
|  | 2167 | /* | 
|  | 2168 | * Redzoning and user store require word alignment. Note this will be | 
|  | 2169 | * overridden by architecture or caller mandated alignment if either | 
|  | 2170 | * is greater than BYTES_PER_WORD. | 
|  | 2171 | */ | 
|  | 2172 | if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER) | 
|  | 2173 | ralign = BYTES_PER_WORD; | 
|  | 2174 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2175 | /* 2) arch mandated alignment: disables debug if necessary */ | 
|  | 2176 | if (ralign < ARCH_SLAB_MINALIGN) { | 
|  | 2177 | ralign = ARCH_SLAB_MINALIGN; | 
|  | 2178 | if (ralign > BYTES_PER_WORD) | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2179 | flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2180 | } | 
|  | 2181 | /* 3) caller mandated alignment: disables debug if necessary */ | 
|  | 2182 | if (ralign < align) { | 
|  | 2183 | ralign = align; | 
|  | 2184 | if (ralign > BYTES_PER_WORD) | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2185 | flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2186 | } | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2187 | /* | 
| Pekka Enberg | ca5f970 | 2006-09-25 23:31:25 -0700 | [diff] [blame] | 2188 | * 4) Store it. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2189 | */ | 
|  | 2190 | align = ralign; | 
|  | 2191 |  | 
|  | 2192 | /* Get cache's description obj. */ | 
| Pekka Enberg | c5e3b83 | 2006-03-25 03:06:43 -0800 | [diff] [blame] | 2193 | cachep = kmem_cache_zalloc(&cache_cache, SLAB_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2194 | if (!cachep) | 
| Andrew Morton | 4f12bb4 | 2005-11-07 00:58:00 -0800 | [diff] [blame] | 2195 | goto oops; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2196 |  | 
|  | 2197 | #if DEBUG | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 2198 | cachep->obj_size = size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2199 |  | 
| Pekka Enberg | ca5f970 | 2006-09-25 23:31:25 -0700 | [diff] [blame] | 2200 | /* | 
|  | 2201 | * Both debugging options require word-alignment which is calculated | 
|  | 2202 | * into align above. | 
|  | 2203 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2204 | if (flags & SLAB_RED_ZONE) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2205 | /* add space for red zone words */ | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 2206 | cachep->obj_offset += BYTES_PER_WORD; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2207 | size += 2 * BYTES_PER_WORD; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2208 | } | 
|  | 2209 | if (flags & SLAB_STORE_USER) { | 
| Pekka Enberg | ca5f970 | 2006-09-25 23:31:25 -0700 | [diff] [blame] | 2210 | /* user store requires one word storage behind the end of | 
|  | 2211 | * the real object. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2212 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2213 | size += BYTES_PER_WORD; | 
|  | 2214 | } | 
|  | 2215 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2216 | if (size >= malloc_sizes[INDEX_L3 + 1].cs_size | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 2217 | && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) { | 
|  | 2218 | cachep->obj_offset += PAGE_SIZE - size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2219 | size = PAGE_SIZE; | 
|  | 2220 | } | 
|  | 2221 | #endif | 
|  | 2222 | #endif | 
|  | 2223 |  | 
| Ingo Molnar | e0a4272 | 2006-06-23 02:03:46 -0700 | [diff] [blame] | 2224 | /* | 
|  | 2225 | * Determine if the slab management is 'on' or 'off' slab. | 
|  | 2226 | * (bootstrapping cannot cope with offslab caches so don't do | 
|  | 2227 | * it too early on.) | 
|  | 2228 | */ | 
|  | 2229 | if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2230 | /* | 
|  | 2231 | * Size is large, assume best to place the slab management obj | 
|  | 2232 | * off-slab (should allow better packing of objs). | 
|  | 2233 | */ | 
|  | 2234 | flags |= CFLGS_OFF_SLAB; | 
|  | 2235 |  | 
|  | 2236 | size = ALIGN(size, align); | 
|  | 2237 |  | 
| Linus Torvalds | f78bb8a | 2006-03-08 10:33:05 -0800 | [diff] [blame] | 2238 | left_over = calculate_slab_order(cachep, size, align, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2239 |  | 
|  | 2240 | if (!cachep->num) { | 
|  | 2241 | printk("kmem_cache_create: couldn't create cache %s.\n", name); | 
|  | 2242 | kmem_cache_free(&cache_cache, cachep); | 
|  | 2243 | cachep = NULL; | 
| Andrew Morton | 4f12bb4 | 2005-11-07 00:58:00 -0800 | [diff] [blame] | 2244 | goto oops; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2245 | } | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2246 | slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) | 
|  | 2247 | + sizeof(struct slab), align); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2248 |  | 
|  | 2249 | /* | 
|  | 2250 | * If the slab has been placed off-slab, and we have enough space then | 
|  | 2251 | * move it on-slab. This is at the expense of any extra colouring. | 
|  | 2252 | */ | 
|  | 2253 | if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) { | 
|  | 2254 | flags &= ~CFLGS_OFF_SLAB; | 
|  | 2255 | left_over -= slab_size; | 
|  | 2256 | } | 
|  | 2257 |  | 
|  | 2258 | if (flags & CFLGS_OFF_SLAB) { | 
|  | 2259 | /* really off slab. No need for manual alignment */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2260 | slab_size = | 
|  | 2261 | cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2262 | } | 
|  | 2263 |  | 
|  | 2264 | cachep->colour_off = cache_line_size(); | 
|  | 2265 | /* Offset must be a multiple of the alignment. */ | 
|  | 2266 | if (cachep->colour_off < align) | 
|  | 2267 | cachep->colour_off = align; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2268 | cachep->colour = left_over / cachep->colour_off; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2269 | cachep->slab_size = slab_size; | 
|  | 2270 | cachep->flags = flags; | 
|  | 2271 | cachep->gfpflags = 0; | 
|  | 2272 | if (flags & SLAB_CACHE_DMA) | 
|  | 2273 | cachep->gfpflags |= GFP_DMA; | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 2274 | cachep->buffer_size = size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2275 |  | 
| Ravikiran G Thirumalai | e5ac9c5 | 2006-09-25 23:31:34 -0700 | [diff] [blame] | 2276 | if (flags & CFLGS_OFF_SLAB) { | 
| Victor Fusco | b2d5507 | 2005-09-10 00:26:36 -0700 | [diff] [blame] | 2277 | cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u); | 
| Ravikiran G Thirumalai | e5ac9c5 | 2006-09-25 23:31:34 -0700 | [diff] [blame] | 2278 | /* | 
|  | 2279 | * This is a possibility for one of the malloc_sizes caches. | 
|  | 2280 | * But since we go off slab only for object size greater than | 
|  | 2281 | * PAGE_SIZE/8, and malloc_sizes gets created in ascending order, | 
|  | 2282 | * this should not happen at all. | 
|  | 2283 | * But leave a BUG_ON for some lucky dude. | 
|  | 2284 | */ | 
|  | 2285 | BUG_ON(!cachep->slabp_cache); | 
|  | 2286 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2287 | cachep->ctor = ctor; | 
|  | 2288 | cachep->dtor = dtor; | 
|  | 2289 | cachep->name = name; | 
|  | 2290 |  | 
| Christoph Lameter | 2ed3a4e | 2006-09-25 23:31:38 -0700 | [diff] [blame] | 2291 | if (setup_cpu_cache(cachep)) { | 
|  | 2292 | __kmem_cache_destroy(cachep); | 
|  | 2293 | cachep = NULL; | 
|  | 2294 | goto oops; | 
|  | 2295 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2296 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2297 | /* cache setup completed, link it into the list */ | 
|  | 2298 | list_add(&cachep->next, &cache_chain); | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2299 | oops: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2300 | if (!cachep && (flags & SLAB_PANIC)) | 
|  | 2301 | panic("kmem_cache_create(): failed to create slab `%s'\n", | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2302 | name); | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 2303 | mutex_unlock(&cache_chain_mutex); | 
| Ravikiran G Thirumalai | f0188f4 | 2006-02-10 01:51:13 -0800 | [diff] [blame] | 2304 | unlock_cpu_hotplug(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2305 | return cachep; | 
|  | 2306 | } | 
|  | 2307 | EXPORT_SYMBOL(kmem_cache_create); | 
|  | 2308 |  | 
|  | 2309 | #if DEBUG | 
|  | 2310 | static void check_irq_off(void) | 
|  | 2311 | { | 
|  | 2312 | BUG_ON(!irqs_disabled()); | 
|  | 2313 | } | 
|  | 2314 |  | 
|  | 2315 | static void check_irq_on(void) | 
|  | 2316 | { | 
|  | 2317 | BUG_ON(irqs_disabled()); | 
|  | 2318 | } | 
|  | 2319 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2320 | static void check_spinlock_acquired(struct kmem_cache *cachep) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2321 | { | 
|  | 2322 | #ifdef CONFIG_SMP | 
|  | 2323 | check_irq_off(); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2324 | assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2325 | #endif | 
|  | 2326 | } | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2327 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2328 | static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2329 | { | 
|  | 2330 | #ifdef CONFIG_SMP | 
|  | 2331 | check_irq_off(); | 
|  | 2332 | assert_spin_locked(&cachep->nodelists[node]->list_lock); | 
|  | 2333 | #endif | 
|  | 2334 | } | 
|  | 2335 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2336 | #else | 
|  | 2337 | #define check_irq_off()	do { } while(0) | 
|  | 2338 | #define check_irq_on()	do { } while(0) | 
|  | 2339 | #define check_spinlock_acquired(x) do { } while(0) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2340 | #define check_spinlock_acquired_node(x, y) do { } while(0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2341 | #endif | 
|  | 2342 |  | 
| Christoph Lameter | aab2207 | 2006-03-22 00:09:06 -0800 | [diff] [blame] | 2343 | static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, | 
|  | 2344 | struct array_cache *ac, | 
|  | 2345 | int force, int node); | 
|  | 2346 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2347 | static void do_drain(void *arg) | 
|  | 2348 | { | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2349 | struct kmem_cache *cachep = arg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2350 | struct array_cache *ac; | 
| Christoph Lameter | ff69416 | 2005-09-22 21:44:02 -0700 | [diff] [blame] | 2351 | int node = numa_node_id(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2352 |  | 
|  | 2353 | check_irq_off(); | 
| Pekka Enberg | 9a2dba4 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 2354 | ac = cpu_cache_get(cachep); | 
| Christoph Lameter | ff69416 | 2005-09-22 21:44:02 -0700 | [diff] [blame] | 2355 | spin_lock(&cachep->nodelists[node]->list_lock); | 
|  | 2356 | free_block(cachep, ac->entry, ac->avail, node); | 
|  | 2357 | spin_unlock(&cachep->nodelists[node]->list_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2358 | ac->avail = 0; | 
|  | 2359 | } | 
|  | 2360 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2361 | static void drain_cpu_caches(struct kmem_cache *cachep) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2362 | { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2363 | struct kmem_list3 *l3; | 
|  | 2364 | int node; | 
|  | 2365 |  | 
| Andrew Morton | a07fa39 | 2006-03-22 00:08:17 -0800 | [diff] [blame] | 2366 | on_each_cpu(do_drain, cachep, 1, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2367 | check_irq_on(); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2368 | for_each_online_node(node) { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2369 | l3 = cachep->nodelists[node]; | 
| Roland Dreier | a4523a8 | 2006-05-15 11:41:00 -0700 | [diff] [blame] | 2370 | if (l3 && l3->alien) | 
|  | 2371 | drain_alien_cache(cachep, l3->alien); | 
|  | 2372 | } | 
|  | 2373 |  | 
|  | 2374 | for_each_online_node(node) { | 
|  | 2375 | l3 = cachep->nodelists[node]; | 
|  | 2376 | if (l3) | 
| Christoph Lameter | aab2207 | 2006-03-22 00:09:06 -0800 | [diff] [blame] | 2377 | drain_array(cachep, l3, l3->shared, 1, node); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2378 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2379 | } | 
|  | 2380 |  | 
| Christoph Lameter | ed11d9e | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 2381 | /* | 
|  | 2382 | * Remove slabs from the list of free slabs. | 
|  | 2383 | * Specify the number of slabs to drain in tofree. | 
|  | 2384 | * | 
|  | 2385 | * Returns the actual number of slabs released. | 
|  | 2386 | */ | 
|  | 2387 | static int drain_freelist(struct kmem_cache *cache, | 
|  | 2388 | struct kmem_list3 *l3, int tofree) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2389 | { | 
| Christoph Lameter | ed11d9e | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 2390 | struct list_head *p; | 
|  | 2391 | int nr_freed; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2392 | struct slab *slabp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2393 |  | 
| Christoph Lameter | ed11d9e | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 2394 | nr_freed = 0; | 
|  | 2395 | while (nr_freed < tofree && !list_empty(&l3->slabs_free)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2396 |  | 
| Christoph Lameter | ed11d9e | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 2397 | spin_lock_irq(&l3->list_lock); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2398 | p = l3->slabs_free.prev; | 
| Christoph Lameter | ed11d9e | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 2399 | if (p == &l3->slabs_free) { | 
|  | 2400 | spin_unlock_irq(&l3->list_lock); | 
|  | 2401 | goto out; | 
|  | 2402 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2403 |  | 
| Christoph Lameter | ed11d9e | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 2404 | slabp = list_entry(p, struct slab, list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2405 | #if DEBUG | 
| Eric Sesterhenn | 40094fa | 2006-04-02 13:49:25 +0200 | [diff] [blame] | 2406 | BUG_ON(slabp->inuse); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2407 | #endif | 
|  | 2408 | list_del(&slabp->list); | 
| Christoph Lameter | ed11d9e | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 2409 | /* | 
|  | 2410 | * Safe to drop the lock. The slab is no longer linked | 
|  | 2411 | * to the cache. | 
|  | 2412 | */ | 
|  | 2413 | l3->free_objects -= cache->num; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2414 | spin_unlock_irq(&l3->list_lock); | 
| Christoph Lameter | ed11d9e | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 2415 | slab_destroy(cache, slabp); | 
|  | 2416 | nr_freed++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2417 | } | 
| Christoph Lameter | ed11d9e | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 2418 | out: | 
|  | 2419 | return nr_freed; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2420 | } | 
|  | 2421 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2422 | static int __cache_shrink(struct kmem_cache *cachep) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2423 | { | 
|  | 2424 | int ret = 0, i = 0; | 
|  | 2425 | struct kmem_list3 *l3; | 
|  | 2426 |  | 
|  | 2427 | drain_cpu_caches(cachep); | 
|  | 2428 |  | 
|  | 2429 | check_irq_on(); | 
|  | 2430 | for_each_online_node(i) { | 
|  | 2431 | l3 = cachep->nodelists[i]; | 
| Christoph Lameter | ed11d9e | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 2432 | if (!l3) | 
|  | 2433 | continue; | 
|  | 2434 |  | 
|  | 2435 | drain_freelist(cachep, l3, l3->free_objects); | 
|  | 2436 |  | 
|  | 2437 | ret += !list_empty(&l3->slabs_full) || | 
|  | 2438 | !list_empty(&l3->slabs_partial); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2439 | } | 
|  | 2440 | return (ret ? 1 : 0); | 
|  | 2441 | } | 
|  | 2442 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2443 | /** | 
|  | 2444 | * kmem_cache_shrink - Shrink a cache. | 
|  | 2445 | * @cachep: The cache to shrink. | 
|  | 2446 | * | 
|  | 2447 | * Releases as many slabs as possible for a cache. | 
|  | 2448 | * To help debugging, a zero exit status indicates all slabs were released. | 
|  | 2449 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2450 | int kmem_cache_shrink(struct kmem_cache *cachep) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2451 | { | 
| Eric Sesterhenn | 40094fa | 2006-04-02 13:49:25 +0200 | [diff] [blame] | 2452 | BUG_ON(!cachep || in_interrupt()); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2453 |  | 
|  | 2454 | return __cache_shrink(cachep); | 
|  | 2455 | } | 
|  | 2456 | EXPORT_SYMBOL(kmem_cache_shrink); | 
|  | 2457 |  | 
|  | 2458 | /** | 
|  | 2459 | * kmem_cache_destroy - delete a cache | 
|  | 2460 | * @cachep: the cache to destroy | 
|  | 2461 | * | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2462 | * Remove a struct kmem_cache object from the slab cache. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2463 | * | 
|  | 2464 | * It is expected this function will be called by a module when it is | 
|  | 2465 | * unloaded.  This will remove the cache completely, and avoid a duplicate | 
|  | 2466 | * cache being allocated each time a module is loaded and unloaded, if the | 
|  | 2467 | * module doesn't have persistent in-kernel storage across loads and unloads. | 
|  | 2468 | * | 
|  | 2469 | * The cache must be empty before calling this function. | 
|  | 2470 | * | 
|  | 2471 | * The caller must guarantee that noone will allocate memory from the cache | 
|  | 2472 | * during the kmem_cache_destroy(). | 
|  | 2473 | */ | 
| Alexey Dobriyan | 133d205 | 2006-09-27 01:49:41 -0700 | [diff] [blame] | 2474 | void kmem_cache_destroy(struct kmem_cache *cachep) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2475 | { | 
| Eric Sesterhenn | 40094fa | 2006-04-02 13:49:25 +0200 | [diff] [blame] | 2476 | BUG_ON(!cachep || in_interrupt()); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2477 |  | 
|  | 2478 | /* Don't let CPUs to come and go */ | 
|  | 2479 | lock_cpu_hotplug(); | 
|  | 2480 |  | 
|  | 2481 | /* Find the cache in the chain of caches. */ | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 2482 | mutex_lock(&cache_chain_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2483 | /* | 
|  | 2484 | * the chain is never empty, cache_cache is never destroyed | 
|  | 2485 | */ | 
|  | 2486 | list_del(&cachep->next); | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 2487 | mutex_unlock(&cache_chain_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2488 |  | 
|  | 2489 | if (__cache_shrink(cachep)) { | 
|  | 2490 | slab_error(cachep, "Can't free all objects"); | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 2491 | mutex_lock(&cache_chain_mutex); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2492 | list_add(&cachep->next, &cache_chain); | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 2493 | mutex_unlock(&cache_chain_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2494 | unlock_cpu_hotplug(); | 
| Alexey Dobriyan | 133d205 | 2006-09-27 01:49:41 -0700 | [diff] [blame] | 2495 | return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2496 | } | 
|  | 2497 |  | 
|  | 2498 | if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) | 
| Paul E. McKenney | fbd568a3e | 2005-05-01 08:59:04 -0700 | [diff] [blame] | 2499 | synchronize_rcu(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2500 |  | 
| Christoph Lameter | 117f6eb | 2006-09-25 23:31:37 -0700 | [diff] [blame] | 2501 | __kmem_cache_destroy(cachep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2502 | unlock_cpu_hotplug(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2503 | } | 
|  | 2504 | EXPORT_SYMBOL(kmem_cache_destroy); | 
|  | 2505 |  | 
| Ravikiran G Thirumalai | e5ac9c5 | 2006-09-25 23:31:34 -0700 | [diff] [blame] | 2506 | /* | 
|  | 2507 | * Get the memory for a slab management obj. | 
|  | 2508 | * For a slab cache when the slab descriptor is off-slab, slab descriptors | 
|  | 2509 | * always come from malloc_sizes caches.  The slab descriptor cannot | 
|  | 2510 | * come from the same cache which is getting created because, | 
|  | 2511 | * when we are searching for an appropriate cache for these | 
|  | 2512 | * descriptors in kmem_cache_create, we search through the malloc_sizes array. | 
|  | 2513 | * If we are creating a malloc_sizes cache here it would not be visible to | 
|  | 2514 | * kmem_find_general_cachep till the initialization is complete. | 
|  | 2515 | * Hence we cannot have slabp_cache same as the original cache. | 
|  | 2516 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2517 | static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, | 
| Ravikiran G Thirumalai | 5b74ada | 2006-04-10 22:52:53 -0700 | [diff] [blame] | 2518 | int colour_off, gfp_t local_flags, | 
|  | 2519 | int nodeid) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2520 | { | 
|  | 2521 | struct slab *slabp; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2522 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2523 | if (OFF_SLAB(cachep)) { | 
|  | 2524 | /* Slab management obj is off-slab. */ | 
| Ravikiran G Thirumalai | 5b74ada | 2006-04-10 22:52:53 -0700 | [diff] [blame] | 2525 | slabp = kmem_cache_alloc_node(cachep->slabp_cache, | 
|  | 2526 | local_flags, nodeid); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2527 | if (!slabp) | 
|  | 2528 | return NULL; | 
|  | 2529 | } else { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2530 | slabp = objp + colour_off; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2531 | colour_off += cachep->slab_size; | 
|  | 2532 | } | 
|  | 2533 | slabp->inuse = 0; | 
|  | 2534 | slabp->colouroff = colour_off; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2535 | slabp->s_mem = objp + colour_off; | 
| Ravikiran G Thirumalai | 5b74ada | 2006-04-10 22:52:53 -0700 | [diff] [blame] | 2536 | slabp->nodeid = nodeid; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2537 | return slabp; | 
|  | 2538 | } | 
|  | 2539 |  | 
|  | 2540 | static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) | 
|  | 2541 | { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2542 | return (kmem_bufctl_t *) (slabp + 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2543 | } | 
|  | 2544 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2545 | static void cache_init_objs(struct kmem_cache *cachep, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2546 | struct slab *slabp, unsigned long ctor_flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2547 | { | 
|  | 2548 | int i; | 
|  | 2549 |  | 
|  | 2550 | for (i = 0; i < cachep->num; i++) { | 
| Pekka Enberg | 8fea4e9 | 2006-03-22 00:08:10 -0800 | [diff] [blame] | 2551 | void *objp = index_to_obj(cachep, slabp, i); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2552 | #if DEBUG | 
|  | 2553 | /* need to poison the objs? */ | 
|  | 2554 | if (cachep->flags & SLAB_POISON) | 
|  | 2555 | poison_obj(cachep, objp, POISON_FREE); | 
|  | 2556 | if (cachep->flags & SLAB_STORE_USER) | 
|  | 2557 | *dbg_userword(cachep, objp) = NULL; | 
|  | 2558 |  | 
|  | 2559 | if (cachep->flags & SLAB_RED_ZONE) { | 
|  | 2560 | *dbg_redzone1(cachep, objp) = RED_INACTIVE; | 
|  | 2561 | *dbg_redzone2(cachep, objp) = RED_INACTIVE; | 
|  | 2562 | } | 
|  | 2563 | /* | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2564 | * Constructors are not allowed to allocate memory from the same | 
|  | 2565 | * cache which they are a constructor for.  Otherwise, deadlock. | 
|  | 2566 | * They must also be threaded. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2567 | */ | 
|  | 2568 | if (cachep->ctor && !(cachep->flags & SLAB_POISON)) | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 2569 | cachep->ctor(objp + obj_offset(cachep), cachep, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2570 | ctor_flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2571 |  | 
|  | 2572 | if (cachep->flags & SLAB_RED_ZONE) { | 
|  | 2573 | if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) | 
|  | 2574 | slab_error(cachep, "constructor overwrote the" | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2575 | " end of an object"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2576 | if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) | 
|  | 2577 | slab_error(cachep, "constructor overwrote the" | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2578 | " start of an object"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2579 | } | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2580 | if ((cachep->buffer_size % PAGE_SIZE) == 0 && | 
|  | 2581 | OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2582 | kernel_map_pages(virt_to_page(objp), | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 2583 | cachep->buffer_size / PAGE_SIZE, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2584 | #else | 
|  | 2585 | if (cachep->ctor) | 
|  | 2586 | cachep->ctor(objp, cachep, ctor_flags); | 
|  | 2587 | #endif | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2588 | slab_bufctl(slabp)[i] = i + 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2589 | } | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2590 | slab_bufctl(slabp)[i - 1] = BUFCTL_END; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2591 | slabp->free = 0; | 
|  | 2592 | } | 
|  | 2593 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2594 | static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2595 | { | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2596 | if (flags & SLAB_DMA) | 
|  | 2597 | BUG_ON(!(cachep->gfpflags & GFP_DMA)); | 
|  | 2598 | else | 
|  | 2599 | BUG_ON(cachep->gfpflags & GFP_DMA); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2600 | } | 
|  | 2601 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2602 | static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, | 
|  | 2603 | int nodeid) | 
| Matthew Dobson | 78d382d | 2006-02-01 03:05:47 -0800 | [diff] [blame] | 2604 | { | 
| Pekka Enberg | 8fea4e9 | 2006-03-22 00:08:10 -0800 | [diff] [blame] | 2605 | void *objp = index_to_obj(cachep, slabp, slabp->free); | 
| Matthew Dobson | 78d382d | 2006-02-01 03:05:47 -0800 | [diff] [blame] | 2606 | kmem_bufctl_t next; | 
|  | 2607 |  | 
|  | 2608 | slabp->inuse++; | 
|  | 2609 | next = slab_bufctl(slabp)[slabp->free]; | 
|  | 2610 | #if DEBUG | 
|  | 2611 | slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; | 
|  | 2612 | WARN_ON(slabp->nodeid != nodeid); | 
|  | 2613 | #endif | 
|  | 2614 | slabp->free = next; | 
|  | 2615 |  | 
|  | 2616 | return objp; | 
|  | 2617 | } | 
|  | 2618 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2619 | static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, | 
|  | 2620 | void *objp, int nodeid) | 
| Matthew Dobson | 78d382d | 2006-02-01 03:05:47 -0800 | [diff] [blame] | 2621 | { | 
| Pekka Enberg | 8fea4e9 | 2006-03-22 00:08:10 -0800 | [diff] [blame] | 2622 | unsigned int objnr = obj_to_index(cachep, slabp, objp); | 
| Matthew Dobson | 78d382d | 2006-02-01 03:05:47 -0800 | [diff] [blame] | 2623 |  | 
|  | 2624 | #if DEBUG | 
|  | 2625 | /* Verify that the slab belongs to the intended node */ | 
|  | 2626 | WARN_ON(slabp->nodeid != nodeid); | 
|  | 2627 |  | 
| Al Viro | 871751e | 2006-03-25 03:06:39 -0800 | [diff] [blame] | 2628 | if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) { | 
| Matthew Dobson | 78d382d | 2006-02-01 03:05:47 -0800 | [diff] [blame] | 2629 | printk(KERN_ERR "slab: double free detected in cache " | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2630 | "'%s', objp %p\n", cachep->name, objp); | 
| Matthew Dobson | 78d382d | 2006-02-01 03:05:47 -0800 | [diff] [blame] | 2631 | BUG(); | 
|  | 2632 | } | 
|  | 2633 | #endif | 
|  | 2634 | slab_bufctl(slabp)[objnr] = slabp->free; | 
|  | 2635 | slabp->free = objnr; | 
|  | 2636 | slabp->inuse--; | 
|  | 2637 | } | 
|  | 2638 |  | 
| Pekka Enberg | 4776874 | 2006-06-23 02:03:07 -0700 | [diff] [blame] | 2639 | /* | 
|  | 2640 | * Map pages beginning at addr to the given cache and slab. This is required | 
|  | 2641 | * for the slab allocator to be able to lookup the cache and slab of a | 
|  | 2642 | * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging. | 
|  | 2643 | */ | 
|  | 2644 | static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, | 
|  | 2645 | void *addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2646 | { | 
| Pekka Enberg | 4776874 | 2006-06-23 02:03:07 -0700 | [diff] [blame] | 2647 | int nr_pages; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2648 | struct page *page; | 
|  | 2649 |  | 
| Pekka Enberg | 4776874 | 2006-06-23 02:03:07 -0700 | [diff] [blame] | 2650 | page = virt_to_page(addr); | 
| Nick Piggin | 8409751 | 2006-03-22 00:08:34 -0800 | [diff] [blame] | 2651 |  | 
| Pekka Enberg | 4776874 | 2006-06-23 02:03:07 -0700 | [diff] [blame] | 2652 | nr_pages = 1; | 
| Nick Piggin | 8409751 | 2006-03-22 00:08:34 -0800 | [diff] [blame] | 2653 | if (likely(!PageCompound(page))) | 
| Pekka Enberg | 4776874 | 2006-06-23 02:03:07 -0700 | [diff] [blame] | 2654 | nr_pages <<= cache->gfporder; | 
|  | 2655 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2656 | do { | 
| Pekka Enberg | 4776874 | 2006-06-23 02:03:07 -0700 | [diff] [blame] | 2657 | page_set_cache(page, cache); | 
|  | 2658 | page_set_slab(page, slab); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2659 | page++; | 
| Pekka Enberg | 4776874 | 2006-06-23 02:03:07 -0700 | [diff] [blame] | 2660 | } while (--nr_pages); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2661 | } | 
|  | 2662 |  | 
|  | 2663 | /* | 
|  | 2664 | * Grow (by 1) the number of slabs within a cache.  This is called by | 
|  | 2665 | * kmem_cache_alloc() when there are no active objs left in a cache. | 
|  | 2666 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2667 | static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2668 | { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2669 | struct slab *slabp; | 
|  | 2670 | void *objp; | 
|  | 2671 | size_t offset; | 
|  | 2672 | gfp_t local_flags; | 
|  | 2673 | unsigned long ctor_flags; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2674 | struct kmem_list3 *l3; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2675 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2676 | /* | 
|  | 2677 | * Be lazy and only check for valid flags here,  keeping it out of the | 
|  | 2678 | * critical path in kmem_cache_alloc(). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2679 | */ | 
| Eric Sesterhenn | 40094fa | 2006-04-02 13:49:25 +0200 | [diff] [blame] | 2680 | BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2681 | if (flags & SLAB_NO_GROW) | 
|  | 2682 | return 0; | 
|  | 2683 |  | 
|  | 2684 | ctor_flags = SLAB_CTOR_CONSTRUCTOR; | 
|  | 2685 | local_flags = (flags & SLAB_LEVEL_MASK); | 
|  | 2686 | if (!(local_flags & __GFP_WAIT)) | 
|  | 2687 | /* | 
|  | 2688 | * Not allowed to sleep.  Need to tell a constructor about | 
|  | 2689 | * this - it might need to know... | 
|  | 2690 | */ | 
|  | 2691 | ctor_flags |= SLAB_CTOR_ATOMIC; | 
|  | 2692 |  | 
| Ravikiran G Thirumalai | 2e1217c | 2006-02-04 23:27:56 -0800 | [diff] [blame] | 2693 | /* Take the l3 list lock to change the colour_next on this node */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2694 | check_irq_off(); | 
| Ravikiran G Thirumalai | 2e1217c | 2006-02-04 23:27:56 -0800 | [diff] [blame] | 2695 | l3 = cachep->nodelists[nodeid]; | 
|  | 2696 | spin_lock(&l3->list_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2697 |  | 
|  | 2698 | /* Get colour for the slab, and cal the next value. */ | 
| Ravikiran G Thirumalai | 2e1217c | 2006-02-04 23:27:56 -0800 | [diff] [blame] | 2699 | offset = l3->colour_next; | 
|  | 2700 | l3->colour_next++; | 
|  | 2701 | if (l3->colour_next >= cachep->colour) | 
|  | 2702 | l3->colour_next = 0; | 
|  | 2703 | spin_unlock(&l3->list_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2704 |  | 
| Ravikiran G Thirumalai | 2e1217c | 2006-02-04 23:27:56 -0800 | [diff] [blame] | 2705 | offset *= cachep->colour_off; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2706 |  | 
|  | 2707 | if (local_flags & __GFP_WAIT) | 
|  | 2708 | local_irq_enable(); | 
|  | 2709 |  | 
|  | 2710 | /* | 
|  | 2711 | * The test for missing atomic flag is performed here, rather than | 
|  | 2712 | * the more obvious place, simply to reduce the critical path length | 
|  | 2713 | * in kmem_cache_alloc(). If a caller is seriously mis-behaving they | 
|  | 2714 | * will eventually be caught here (where it matters). | 
|  | 2715 | */ | 
|  | 2716 | kmem_flagcheck(cachep, flags); | 
|  | 2717 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2718 | /* | 
|  | 2719 | * Get mem for the objs.  Attempt to allocate a physical page from | 
|  | 2720 | * 'nodeid'. | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2721 | */ | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2722 | objp = kmem_getpages(cachep, flags, nodeid); | 
|  | 2723 | if (!objp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2724 | goto failed; | 
|  | 2725 |  | 
|  | 2726 | /* Get slab management. */ | 
| Ravikiran G Thirumalai | 5b74ada | 2006-04-10 22:52:53 -0700 | [diff] [blame] | 2727 | slabp = alloc_slabmgmt(cachep, objp, offset, local_flags, nodeid); | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2728 | if (!slabp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2729 | goto opps1; | 
|  | 2730 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2731 | slabp->nodeid = nodeid; | 
| Pekka Enberg | 4776874 | 2006-06-23 02:03:07 -0700 | [diff] [blame] | 2732 | slab_map_pages(cachep, slabp, objp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2733 |  | 
|  | 2734 | cache_init_objs(cachep, slabp, ctor_flags); | 
|  | 2735 |  | 
|  | 2736 | if (local_flags & __GFP_WAIT) | 
|  | 2737 | local_irq_disable(); | 
|  | 2738 | check_irq_off(); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2739 | spin_lock(&l3->list_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2740 |  | 
|  | 2741 | /* Make slab active. */ | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2742 | list_add_tail(&slabp->list, &(l3->slabs_free)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2743 | STATS_INC_GROWN(cachep); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2744 | l3->free_objects += cachep->num; | 
|  | 2745 | spin_unlock(&l3->list_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2746 | return 1; | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2747 | opps1: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2748 | kmem_freepages(cachep, objp); | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2749 | failed: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2750 | if (local_flags & __GFP_WAIT) | 
|  | 2751 | local_irq_disable(); | 
|  | 2752 | return 0; | 
|  | 2753 | } | 
|  | 2754 |  | 
|  | 2755 | #if DEBUG | 
|  | 2756 |  | 
|  | 2757 | /* | 
|  | 2758 | * Perform extra freeing checks: | 
|  | 2759 | * - detect bad pointers. | 
|  | 2760 | * - POISON/RED_ZONE checking | 
|  | 2761 | * - destructor calls, for caches with POISON+dtor | 
|  | 2762 | */ | 
|  | 2763 | static void kfree_debugcheck(const void *objp) | 
|  | 2764 | { | 
|  | 2765 | struct page *page; | 
|  | 2766 |  | 
|  | 2767 | if (!virt_addr_valid(objp)) { | 
|  | 2768 | printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n", | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2769 | (unsigned long)objp); | 
|  | 2770 | BUG(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2771 | } | 
|  | 2772 | page = virt_to_page(objp); | 
|  | 2773 | if (!PageSlab(page)) { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2774 | printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n", | 
|  | 2775 | (unsigned long)objp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2776 | BUG(); | 
|  | 2777 | } | 
|  | 2778 | } | 
|  | 2779 |  | 
| Pekka Enberg | 58ce1fd | 2006-06-23 02:03:24 -0700 | [diff] [blame] | 2780 | static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) | 
|  | 2781 | { | 
|  | 2782 | unsigned long redzone1, redzone2; | 
|  | 2783 |  | 
|  | 2784 | redzone1 = *dbg_redzone1(cache, obj); | 
|  | 2785 | redzone2 = *dbg_redzone2(cache, obj); | 
|  | 2786 |  | 
|  | 2787 | /* | 
|  | 2788 | * Redzone is ok. | 
|  | 2789 | */ | 
|  | 2790 | if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE) | 
|  | 2791 | return; | 
|  | 2792 |  | 
|  | 2793 | if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE) | 
|  | 2794 | slab_error(cache, "double free detected"); | 
|  | 2795 | else | 
|  | 2796 | slab_error(cache, "memory outside object was overwritten"); | 
|  | 2797 |  | 
|  | 2798 | printk(KERN_ERR "%p: redzone 1:0x%lx, redzone 2:0x%lx.\n", | 
|  | 2799 | obj, redzone1, redzone2); | 
|  | 2800 | } | 
|  | 2801 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2802 | static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2803 | void *caller) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2804 | { | 
|  | 2805 | struct page *page; | 
|  | 2806 | unsigned int objnr; | 
|  | 2807 | struct slab *slabp; | 
|  | 2808 |  | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 2809 | objp -= obj_offset(cachep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2810 | kfree_debugcheck(objp); | 
|  | 2811 | page = virt_to_page(objp); | 
|  | 2812 |  | 
| Pekka Enberg | 065d41c | 2005-11-13 16:06:46 -0800 | [diff] [blame] | 2813 | slabp = page_get_slab(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2814 |  | 
|  | 2815 | if (cachep->flags & SLAB_RED_ZONE) { | 
| Pekka Enberg | 58ce1fd | 2006-06-23 02:03:24 -0700 | [diff] [blame] | 2816 | verify_redzone_free(cachep, objp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2817 | *dbg_redzone1(cachep, objp) = RED_INACTIVE; | 
|  | 2818 | *dbg_redzone2(cachep, objp) = RED_INACTIVE; | 
|  | 2819 | } | 
|  | 2820 | if (cachep->flags & SLAB_STORE_USER) | 
|  | 2821 | *dbg_userword(cachep, objp) = caller; | 
|  | 2822 |  | 
| Pekka Enberg | 8fea4e9 | 2006-03-22 00:08:10 -0800 | [diff] [blame] | 2823 | objnr = obj_to_index(cachep, slabp, objp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2824 |  | 
|  | 2825 | BUG_ON(objnr >= cachep->num); | 
| Pekka Enberg | 8fea4e9 | 2006-03-22 00:08:10 -0800 | [diff] [blame] | 2826 | BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2827 |  | 
|  | 2828 | if (cachep->flags & SLAB_DEBUG_INITIAL) { | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2829 | /* | 
|  | 2830 | * Need to call the slab's constructor so the caller can | 
|  | 2831 | * perform a verify of its state (debugging).  Called without | 
|  | 2832 | * the cache-lock held. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2833 | */ | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 2834 | cachep->ctor(objp + obj_offset(cachep), | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2835 | cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2836 | } | 
|  | 2837 | if (cachep->flags & SLAB_POISON && cachep->dtor) { | 
|  | 2838 | /* we want to cache poison the object, | 
|  | 2839 | * call the destruction callback | 
|  | 2840 | */ | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 2841 | cachep->dtor(objp + obj_offset(cachep), cachep, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2842 | } | 
| Al Viro | 871751e | 2006-03-25 03:06:39 -0800 | [diff] [blame] | 2843 | #ifdef CONFIG_DEBUG_SLAB_LEAK | 
|  | 2844 | slab_bufctl(slabp)[objnr] = BUFCTL_FREE; | 
|  | 2845 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2846 | if (cachep->flags & SLAB_POISON) { | 
|  | 2847 | #ifdef CONFIG_DEBUG_PAGEALLOC | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2848 | if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2849 | store_stackinfo(cachep, objp, (unsigned long)caller); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2850 | kernel_map_pages(virt_to_page(objp), | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 2851 | cachep->buffer_size / PAGE_SIZE, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2852 | } else { | 
|  | 2853 | poison_obj(cachep, objp, POISON_FREE); | 
|  | 2854 | } | 
|  | 2855 | #else | 
|  | 2856 | poison_obj(cachep, objp, POISON_FREE); | 
|  | 2857 | #endif | 
|  | 2858 | } | 
|  | 2859 | return objp; | 
|  | 2860 | } | 
|  | 2861 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2862 | static void check_slabp(struct kmem_cache *cachep, struct slab *slabp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2863 | { | 
|  | 2864 | kmem_bufctl_t i; | 
|  | 2865 | int entries = 0; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2866 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2867 | /* Check slab's freelist to see if this obj is there. */ | 
|  | 2868 | for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) { | 
|  | 2869 | entries++; | 
|  | 2870 | if (entries > cachep->num || i >= cachep->num) | 
|  | 2871 | goto bad; | 
|  | 2872 | } | 
|  | 2873 | if (entries != cachep->num - slabp->inuse) { | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2874 | bad: | 
|  | 2875 | printk(KERN_ERR "slab: Internal list corruption detected in " | 
|  | 2876 | "cache '%s'(%d), slabp %p(%d). Hexdump:\n", | 
|  | 2877 | cachep->name, cachep->num, slabp, slabp->inuse); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2878 | for (i = 0; | 
| Linus Torvalds | 264132b | 2006-03-06 12:10:07 -0800 | [diff] [blame] | 2879 | i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2880 | i++) { | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2881 | if (i % 16 == 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2882 | printk("\n%03x:", i); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2883 | printk(" %02x", ((unsigned char *)slabp)[i]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2884 | } | 
|  | 2885 | printk("\n"); | 
|  | 2886 | BUG(); | 
|  | 2887 | } | 
|  | 2888 | } | 
|  | 2889 | #else | 
|  | 2890 | #define kfree_debugcheck(x) do { } while(0) | 
|  | 2891 | #define cache_free_debugcheck(x,objp,z) (objp) | 
|  | 2892 | #define check_slabp(x,y) do { } while(0) | 
|  | 2893 | #endif | 
|  | 2894 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2895 | static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2896 | { | 
|  | 2897 | int batchcount; | 
|  | 2898 | struct kmem_list3 *l3; | 
|  | 2899 | struct array_cache *ac; | 
|  | 2900 |  | 
|  | 2901 | check_irq_off(); | 
| Pekka Enberg | 9a2dba4 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 2902 | ac = cpu_cache_get(cachep); | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2903 | retry: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2904 | batchcount = ac->batchcount; | 
|  | 2905 | if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2906 | /* | 
|  | 2907 | * If there was little recent activity on this cache, then | 
|  | 2908 | * perform only a partial refill.  Otherwise we could generate | 
|  | 2909 | * refill bouncing. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2910 | */ | 
|  | 2911 | batchcount = BATCHREFILL_LIMIT; | 
|  | 2912 | } | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2913 | l3 = cachep->nodelists[numa_node_id()]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2914 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2915 | BUG_ON(ac->avail > 0 || !l3); | 
|  | 2916 | spin_lock(&l3->list_lock); | 
|  | 2917 |  | 
| Christoph Lameter | 3ded175 | 2006-03-25 03:06:44 -0800 | [diff] [blame] | 2918 | /* See if we can refill from the shared array */ | 
|  | 2919 | if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) | 
|  | 2920 | goto alloc_done; | 
|  | 2921 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2922 | while (batchcount > 0) { | 
|  | 2923 | struct list_head *entry; | 
|  | 2924 | struct slab *slabp; | 
|  | 2925 | /* Get slab alloc is to come from. */ | 
|  | 2926 | entry = l3->slabs_partial.next; | 
|  | 2927 | if (entry == &l3->slabs_partial) { | 
|  | 2928 | l3->free_touched = 1; | 
|  | 2929 | entry = l3->slabs_free.next; | 
|  | 2930 | if (entry == &l3->slabs_free) | 
|  | 2931 | goto must_grow; | 
|  | 2932 | } | 
|  | 2933 |  | 
|  | 2934 | slabp = list_entry(entry, struct slab, list); | 
|  | 2935 | check_slabp(cachep, slabp); | 
|  | 2936 | check_spinlock_acquired(cachep); | 
|  | 2937 | while (slabp->inuse < cachep->num && batchcount--) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2938 | STATS_INC_ALLOCED(cachep); | 
|  | 2939 | STATS_INC_ACTIVE(cachep); | 
|  | 2940 | STATS_SET_HIGH(cachep); | 
|  | 2941 |  | 
| Matthew Dobson | 78d382d | 2006-02-01 03:05:47 -0800 | [diff] [blame] | 2942 | ac->entry[ac->avail++] = slab_get_obj(cachep, slabp, | 
|  | 2943 | numa_node_id()); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2944 | } | 
|  | 2945 | check_slabp(cachep, slabp); | 
|  | 2946 |  | 
|  | 2947 | /* move slabp to correct slabp list: */ | 
|  | 2948 | list_del(&slabp->list); | 
|  | 2949 | if (slabp->free == BUFCTL_END) | 
|  | 2950 | list_add(&slabp->list, &l3->slabs_full); | 
|  | 2951 | else | 
|  | 2952 | list_add(&slabp->list, &l3->slabs_partial); | 
|  | 2953 | } | 
|  | 2954 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2955 | must_grow: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2956 | l3->free_objects -= ac->avail; | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2957 | alloc_done: | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2958 | spin_unlock(&l3->list_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2959 |  | 
|  | 2960 | if (unlikely(!ac->avail)) { | 
|  | 2961 | int x; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2962 | x = cache_grow(cachep, flags, numa_node_id()); | 
|  | 2963 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2964 | /* cache_grow can reenable interrupts, then ac could change. */ | 
| Pekka Enberg | 9a2dba4 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 2965 | ac = cpu_cache_get(cachep); | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2966 | if (!x && ac->avail == 0)	/* no objects in sight? abort */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2967 | return NULL; | 
|  | 2968 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2969 | if (!ac->avail)		/* objects refilled by interrupt? */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2970 | goto retry; | 
|  | 2971 | } | 
|  | 2972 | ac->touched = 1; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2973 | return ac->entry[--ac->avail]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2974 | } | 
|  | 2975 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2976 | static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, | 
|  | 2977 | gfp_t flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2978 | { | 
|  | 2979 | might_sleep_if(flags & __GFP_WAIT); | 
|  | 2980 | #if DEBUG | 
|  | 2981 | kmem_flagcheck(cachep, flags); | 
|  | 2982 | #endif | 
|  | 2983 | } | 
|  | 2984 |  | 
|  | 2985 | #if DEBUG | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 2986 | static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | 
|  | 2987 | gfp_t flags, void *objp, void *caller) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2988 | { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2989 | if (!objp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2990 | return objp; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2991 | if (cachep->flags & SLAB_POISON) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2992 | #ifdef CONFIG_DEBUG_PAGEALLOC | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 2993 | if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2994 | kernel_map_pages(virt_to_page(objp), | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 2995 | cachep->buffer_size / PAGE_SIZE, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2996 | else | 
|  | 2997 | check_poison_obj(cachep, objp); | 
|  | 2998 | #else | 
|  | 2999 | check_poison_obj(cachep, objp); | 
|  | 3000 | #endif | 
|  | 3001 | poison_obj(cachep, objp, POISON_INUSE); | 
|  | 3002 | } | 
|  | 3003 | if (cachep->flags & SLAB_STORE_USER) | 
|  | 3004 | *dbg_userword(cachep, objp) = caller; | 
|  | 3005 |  | 
|  | 3006 | if (cachep->flags & SLAB_RED_ZONE) { | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3007 | if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || | 
|  | 3008 | *dbg_redzone2(cachep, objp) != RED_INACTIVE) { | 
|  | 3009 | slab_error(cachep, "double free, or memory outside" | 
|  | 3010 | " object was overwritten"); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3011 | printk(KERN_ERR | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3012 | "%p: redzone 1:0x%lx, redzone 2:0x%lx\n", | 
|  | 3013 | objp, *dbg_redzone1(cachep, objp), | 
|  | 3014 | *dbg_redzone2(cachep, objp)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3015 | } | 
|  | 3016 | *dbg_redzone1(cachep, objp) = RED_ACTIVE; | 
|  | 3017 | *dbg_redzone2(cachep, objp) = RED_ACTIVE; | 
|  | 3018 | } | 
| Al Viro | 871751e | 2006-03-25 03:06:39 -0800 | [diff] [blame] | 3019 | #ifdef CONFIG_DEBUG_SLAB_LEAK | 
|  | 3020 | { | 
|  | 3021 | struct slab *slabp; | 
|  | 3022 | unsigned objnr; | 
|  | 3023 |  | 
|  | 3024 | slabp = page_get_slab(virt_to_page(objp)); | 
|  | 3025 | objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; | 
|  | 3026 | slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; | 
|  | 3027 | } | 
|  | 3028 | #endif | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 3029 | objp += obj_offset(cachep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3030 | if (cachep->ctor && cachep->flags & SLAB_POISON) { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3031 | unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3032 |  | 
|  | 3033 | if (!(flags & __GFP_WAIT)) | 
|  | 3034 | ctor_flags |= SLAB_CTOR_ATOMIC; | 
|  | 3035 |  | 
|  | 3036 | cachep->ctor(objp, cachep, ctor_flags); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3037 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3038 | return objp; | 
|  | 3039 | } | 
|  | 3040 | #else | 
|  | 3041 | #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) | 
|  | 3042 | #endif | 
|  | 3043 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3044 | static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3045 | { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3046 | void *objp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3047 | struct array_cache *ac; | 
|  | 3048 |  | 
| Alok N Kataria | 5c38230 | 2005-09-27 21:45:46 -0700 | [diff] [blame] | 3049 | check_irq_off(); | 
| Pekka Enberg | 9a2dba4 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 3050 | ac = cpu_cache_get(cachep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3051 | if (likely(ac->avail)) { | 
|  | 3052 | STATS_INC_ALLOCHIT(cachep); | 
|  | 3053 | ac->touched = 1; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3054 | objp = ac->entry[--ac->avail]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3055 | } else { | 
|  | 3056 | STATS_INC_ALLOCMISS(cachep); | 
|  | 3057 | objp = cache_alloc_refill(cachep, flags); | 
|  | 3058 | } | 
| Alok N Kataria | 5c38230 | 2005-09-27 21:45:46 -0700 | [diff] [blame] | 3059 | return objp; | 
|  | 3060 | } | 
|  | 3061 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3062 | static __always_inline void *__cache_alloc(struct kmem_cache *cachep, | 
|  | 3063 | gfp_t flags, void *caller) | 
| Alok N Kataria | 5c38230 | 2005-09-27 21:45:46 -0700 | [diff] [blame] | 3064 | { | 
|  | 3065 | unsigned long save_flags; | 
| Christoph Lameter | de3083e | 2006-09-27 01:50:03 -0700 | [diff] [blame] | 3066 | void *objp = NULL; | 
| Alok N Kataria | 5c38230 | 2005-09-27 21:45:46 -0700 | [diff] [blame] | 3067 |  | 
|  | 3068 | cache_alloc_debugcheck_before(cachep, flags); | 
|  | 3069 |  | 
|  | 3070 | local_irq_save(save_flags); | 
| Christoph Lameter | de3083e | 2006-09-27 01:50:03 -0700 | [diff] [blame] | 3071 |  | 
| Christoph Lameter | 765c450 | 2006-09-27 01:50:08 -0700 | [diff] [blame] | 3072 | if (unlikely(NUMA_BUILD && | 
|  | 3073 | current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) | 
| Christoph Lameter | de3083e | 2006-09-27 01:50:03 -0700 | [diff] [blame] | 3074 | objp = alternate_node_alloc(cachep, flags); | 
| Christoph Lameter | de3083e | 2006-09-27 01:50:03 -0700 | [diff] [blame] | 3075 |  | 
|  | 3076 | if (!objp) | 
|  | 3077 | objp = ____cache_alloc(cachep, flags); | 
| Christoph Lameter | 765c450 | 2006-09-27 01:50:08 -0700 | [diff] [blame] | 3078 | /* | 
|  | 3079 | * We may just have run out of memory on the local node. | 
|  | 3080 | * __cache_alloc_node() knows how to locate memory on other nodes | 
|  | 3081 | */ | 
|  | 3082 | if (NUMA_BUILD && !objp) | 
|  | 3083 | objp = __cache_alloc_node(cachep, flags, numa_node_id()); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3084 | local_irq_restore(save_flags); | 
| Eric Dumazet | 34342e8 | 2005-09-03 15:55:06 -0700 | [diff] [blame] | 3085 | objp = cache_alloc_debugcheck_after(cachep, flags, objp, | 
| Pekka Enberg | 7fd6b14 | 2006-02-01 03:05:52 -0800 | [diff] [blame] | 3086 | caller); | 
| Eric Dumazet | 34342e8 | 2005-09-03 15:55:06 -0700 | [diff] [blame] | 3087 | prefetchw(objp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3088 | return objp; | 
|  | 3089 | } | 
|  | 3090 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3091 | #ifdef CONFIG_NUMA | 
|  | 3092 | /* | 
| Paul Jackson | b245539 | 2006-03-24 03:16:12 -0800 | [diff] [blame] | 3093 | * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY. | 
| Paul Jackson | c61afb1 | 2006-03-24 03:16:08 -0800 | [diff] [blame] | 3094 | * | 
|  | 3095 | * If we are in_interrupt, then process context, including cpusets and | 
|  | 3096 | * mempolicy, may not apply and should not be used for allocation policy. | 
|  | 3097 | */ | 
|  | 3098 | static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) | 
|  | 3099 | { | 
|  | 3100 | int nid_alloc, nid_here; | 
|  | 3101 |  | 
| Christoph Lameter | 765c450 | 2006-09-27 01:50:08 -0700 | [diff] [blame] | 3102 | if (in_interrupt() || (flags & __GFP_THISNODE)) | 
| Paul Jackson | c61afb1 | 2006-03-24 03:16:08 -0800 | [diff] [blame] | 3103 | return NULL; | 
|  | 3104 | nid_alloc = nid_here = numa_node_id(); | 
|  | 3105 | if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) | 
|  | 3106 | nid_alloc = cpuset_mem_spread_node(); | 
|  | 3107 | else if (current->mempolicy) | 
|  | 3108 | nid_alloc = slab_node(current->mempolicy); | 
|  | 3109 | if (nid_alloc != nid_here) | 
|  | 3110 | return __cache_alloc_node(cachep, flags, nid_alloc); | 
|  | 3111 | return NULL; | 
|  | 3112 | } | 
|  | 3113 |  | 
|  | 3114 | /* | 
| Christoph Lameter | 765c450 | 2006-09-27 01:50:08 -0700 | [diff] [blame] | 3115 | * Fallback function if there was no memory available and no objects on a | 
|  | 3116 | * certain node and we are allowed to fall back. We mimick the behavior of | 
|  | 3117 | * the page allocator. We fall back according to a zonelist determined by | 
|  | 3118 | * the policy layer while obeying cpuset constraints. | 
|  | 3119 | */ | 
|  | 3120 | void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) | 
|  | 3121 | { | 
|  | 3122 | struct zonelist *zonelist = &NODE_DATA(slab_node(current->mempolicy)) | 
|  | 3123 | ->node_zonelists[gfp_zone(flags)]; | 
|  | 3124 | struct zone **z; | 
|  | 3125 | void *obj = NULL; | 
|  | 3126 |  | 
|  | 3127 | for (z = zonelist->zones; *z && !obj; z++) | 
|  | 3128 | if (zone_idx(*z) <= ZONE_NORMAL && | 
|  | 3129 | cpuset_zone_allowed(*z, flags)) | 
|  | 3130 | obj = __cache_alloc_node(cache, | 
|  | 3131 | flags | __GFP_THISNODE, | 
|  | 3132 | zone_to_nid(*z)); | 
|  | 3133 | return obj; | 
|  | 3134 | } | 
|  | 3135 |  | 
|  | 3136 | /* | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3137 | * A interface to enable slab creation on nodeid | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3138 | */ | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3139 | static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, | 
|  | 3140 | int nodeid) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3141 | { | 
|  | 3142 | struct list_head *entry; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3143 | struct slab *slabp; | 
|  | 3144 | struct kmem_list3 *l3; | 
|  | 3145 | void *obj; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3146 | int x; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3147 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3148 | l3 = cachep->nodelists[nodeid]; | 
|  | 3149 | BUG_ON(!l3); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3150 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3151 | retry: | 
| Ravikiran G Thirumalai | ca3b9b9 | 2006-02-04 23:27:58 -0800 | [diff] [blame] | 3152 | check_irq_off(); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3153 | spin_lock(&l3->list_lock); | 
|  | 3154 | entry = l3->slabs_partial.next; | 
|  | 3155 | if (entry == &l3->slabs_partial) { | 
|  | 3156 | l3->free_touched = 1; | 
|  | 3157 | entry = l3->slabs_free.next; | 
|  | 3158 | if (entry == &l3->slabs_free) | 
|  | 3159 | goto must_grow; | 
|  | 3160 | } | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3161 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3162 | slabp = list_entry(entry, struct slab, list); | 
|  | 3163 | check_spinlock_acquired_node(cachep, nodeid); | 
|  | 3164 | check_slabp(cachep, slabp); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3165 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3166 | STATS_INC_NODEALLOCS(cachep); | 
|  | 3167 | STATS_INC_ACTIVE(cachep); | 
|  | 3168 | STATS_SET_HIGH(cachep); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3169 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3170 | BUG_ON(slabp->inuse == cachep->num); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3171 |  | 
| Matthew Dobson | 78d382d | 2006-02-01 03:05:47 -0800 | [diff] [blame] | 3172 | obj = slab_get_obj(cachep, slabp, nodeid); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3173 | check_slabp(cachep, slabp); | 
|  | 3174 | l3->free_objects--; | 
|  | 3175 | /* move slabp to correct slabp list: */ | 
|  | 3176 | list_del(&slabp->list); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3177 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3178 | if (slabp->free == BUFCTL_END) | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3179 | list_add(&slabp->list, &l3->slabs_full); | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3180 | else | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3181 | list_add(&slabp->list, &l3->slabs_partial); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3182 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3183 | spin_unlock(&l3->list_lock); | 
|  | 3184 | goto done; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3185 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3186 | must_grow: | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3187 | spin_unlock(&l3->list_lock); | 
|  | 3188 | x = cache_grow(cachep, flags, nodeid); | 
| Christoph Lameter | 765c450 | 2006-09-27 01:50:08 -0700 | [diff] [blame] | 3189 | if (x) | 
|  | 3190 | goto retry; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3191 |  | 
| Christoph Lameter | 765c450 | 2006-09-27 01:50:08 -0700 | [diff] [blame] | 3192 | if (!(flags & __GFP_THISNODE)) | 
|  | 3193 | /* Unable to grow the cache. Fall back to other nodes. */ | 
|  | 3194 | return fallback_alloc(cachep, flags); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3195 |  | 
| Christoph Lameter | 765c450 | 2006-09-27 01:50:08 -0700 | [diff] [blame] | 3196 | return NULL; | 
|  | 3197 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3198 | done: | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3199 | return obj; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3200 | } | 
|  | 3201 | #endif | 
|  | 3202 |  | 
|  | 3203 | /* | 
|  | 3204 | * Caller needs to acquire correct kmem_list's list_lock | 
|  | 3205 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3206 | static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3207 | int node) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3208 | { | 
|  | 3209 | int i; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3210 | struct kmem_list3 *l3; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3211 |  | 
|  | 3212 | for (i = 0; i < nr_objects; i++) { | 
|  | 3213 | void *objp = objpp[i]; | 
|  | 3214 | struct slab *slabp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3215 |  | 
| Pekka Enberg | 6ed5eb2 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 3216 | slabp = virt_to_slab(objp); | 
| Christoph Lameter | ff69416 | 2005-09-22 21:44:02 -0700 | [diff] [blame] | 3217 | l3 = cachep->nodelists[node]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3218 | list_del(&slabp->list); | 
| Christoph Lameter | ff69416 | 2005-09-22 21:44:02 -0700 | [diff] [blame] | 3219 | check_spinlock_acquired_node(cachep, node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3220 | check_slabp(cachep, slabp); | 
| Matthew Dobson | 78d382d | 2006-02-01 03:05:47 -0800 | [diff] [blame] | 3221 | slab_put_obj(cachep, slabp, objp, node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3222 | STATS_DEC_ACTIVE(cachep); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3223 | l3->free_objects++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3224 | check_slabp(cachep, slabp); | 
|  | 3225 |  | 
|  | 3226 | /* fixup slab chains */ | 
|  | 3227 | if (slabp->inuse == 0) { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3228 | if (l3->free_objects > l3->free_limit) { | 
|  | 3229 | l3->free_objects -= cachep->num; | 
| Ravikiran G Thirumalai | e5ac9c5 | 2006-09-25 23:31:34 -0700 | [diff] [blame] | 3230 | /* No need to drop any previously held | 
|  | 3231 | * lock here, even if we have a off-slab slab | 
|  | 3232 | * descriptor it is guaranteed to come from | 
|  | 3233 | * a different cache, refer to comments before | 
|  | 3234 | * alloc_slabmgmt. | 
|  | 3235 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3236 | slab_destroy(cachep, slabp); | 
|  | 3237 | } else { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3238 | list_add(&slabp->list, &l3->slabs_free); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3239 | } | 
|  | 3240 | } else { | 
|  | 3241 | /* Unconditionally move a slab to the end of the | 
|  | 3242 | * partial list on free - maximum time for the | 
|  | 3243 | * other objects to be freed, too. | 
|  | 3244 | */ | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3245 | list_add_tail(&slabp->list, &l3->slabs_partial); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3246 | } | 
|  | 3247 | } | 
|  | 3248 | } | 
|  | 3249 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3250 | static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3251 | { | 
|  | 3252 | int batchcount; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3253 | struct kmem_list3 *l3; | 
| Christoph Lameter | ff69416 | 2005-09-22 21:44:02 -0700 | [diff] [blame] | 3254 | int node = numa_node_id(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3255 |  | 
|  | 3256 | batchcount = ac->batchcount; | 
|  | 3257 | #if DEBUG | 
|  | 3258 | BUG_ON(!batchcount || batchcount > ac->avail); | 
|  | 3259 | #endif | 
|  | 3260 | check_irq_off(); | 
| Christoph Lameter | ff69416 | 2005-09-22 21:44:02 -0700 | [diff] [blame] | 3261 | l3 = cachep->nodelists[node]; | 
| Ingo Molnar | 873623d | 2006-07-13 14:44:38 +0200 | [diff] [blame] | 3262 | spin_lock(&l3->list_lock); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3263 | if (l3->shared) { | 
|  | 3264 | struct array_cache *shared_array = l3->shared; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3265 | int max = shared_array->limit - shared_array->avail; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3266 | if (max) { | 
|  | 3267 | if (batchcount > max) | 
|  | 3268 | batchcount = max; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3269 | memcpy(&(shared_array->entry[shared_array->avail]), | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3270 | ac->entry, sizeof(void *) * batchcount); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3271 | shared_array->avail += batchcount; | 
|  | 3272 | goto free_done; | 
|  | 3273 | } | 
|  | 3274 | } | 
|  | 3275 |  | 
| Christoph Lameter | ff69416 | 2005-09-22 21:44:02 -0700 | [diff] [blame] | 3276 | free_block(cachep, ac->entry, batchcount, node); | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3277 | free_done: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3278 | #if STATS | 
|  | 3279 | { | 
|  | 3280 | int i = 0; | 
|  | 3281 | struct list_head *p; | 
|  | 3282 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3283 | p = l3->slabs_free.next; | 
|  | 3284 | while (p != &(l3->slabs_free)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3285 | struct slab *slabp; | 
|  | 3286 |  | 
|  | 3287 | slabp = list_entry(p, struct slab, list); | 
|  | 3288 | BUG_ON(slabp->inuse); | 
|  | 3289 |  | 
|  | 3290 | i++; | 
|  | 3291 | p = p->next; | 
|  | 3292 | } | 
|  | 3293 | STATS_SET_FREEABLE(cachep, i); | 
|  | 3294 | } | 
|  | 3295 | #endif | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3296 | spin_unlock(&l3->list_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3297 | ac->avail -= batchcount; | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3298 | memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3299 | } | 
|  | 3300 |  | 
|  | 3301 | /* | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3302 | * Release an obj back to its cache. If the obj has a constructed state, it must | 
|  | 3303 | * be in this state _before_ it is released.  Called with disabled ints. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3304 | */ | 
| Ingo Molnar | 873623d | 2006-07-13 14:44:38 +0200 | [diff] [blame] | 3305 | static inline void __cache_free(struct kmem_cache *cachep, void *objp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3306 | { | 
| Pekka Enberg | 9a2dba4 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 3307 | struct array_cache *ac = cpu_cache_get(cachep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3308 |  | 
|  | 3309 | check_irq_off(); | 
|  | 3310 | objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); | 
|  | 3311 |  | 
| Ingo Molnar | 873623d | 2006-07-13 14:44:38 +0200 | [diff] [blame] | 3312 | if (cache_free_alien(cachep, objp)) | 
| Pekka Enberg | 729bd0b | 2006-06-23 02:03:05 -0700 | [diff] [blame] | 3313 | return; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3314 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3315 | if (likely(ac->avail < ac->limit)) { | 
|  | 3316 | STATS_INC_FREEHIT(cachep); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3317 | ac->entry[ac->avail++] = objp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3318 | return; | 
|  | 3319 | } else { | 
|  | 3320 | STATS_INC_FREEMISS(cachep); | 
|  | 3321 | cache_flusharray(cachep, ac); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3322 | ac->entry[ac->avail++] = objp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3323 | } | 
|  | 3324 | } | 
|  | 3325 |  | 
|  | 3326 | /** | 
|  | 3327 | * kmem_cache_alloc - Allocate an object | 
|  | 3328 | * @cachep: The cache to allocate from. | 
|  | 3329 | * @flags: See kmalloc(). | 
|  | 3330 | * | 
|  | 3331 | * Allocate an object from this cache.  The flags are only relevant | 
|  | 3332 | * if the cache has no available objects. | 
|  | 3333 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3334 | void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3335 | { | 
| Pekka Enberg | 7fd6b14 | 2006-02-01 03:05:52 -0800 | [diff] [blame] | 3336 | return __cache_alloc(cachep, flags, __builtin_return_address(0)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3337 | } | 
|  | 3338 | EXPORT_SYMBOL(kmem_cache_alloc); | 
|  | 3339 |  | 
|  | 3340 | /** | 
| Rolf Eike Beer | b8008b2 | 2006-07-30 03:04:04 -0700 | [diff] [blame] | 3341 | * kmem_cache_zalloc - Allocate an object. The memory is set to zero. | 
| Pekka Enberg | a8c0f9a | 2006-03-25 03:06:42 -0800 | [diff] [blame] | 3342 | * @cache: The cache to allocate from. | 
|  | 3343 | * @flags: See kmalloc(). | 
|  | 3344 | * | 
|  | 3345 | * Allocate an object from this cache and set the allocated memory to zero. | 
|  | 3346 | * The flags are only relevant if the cache has no available objects. | 
|  | 3347 | */ | 
|  | 3348 | void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags) | 
|  | 3349 | { | 
|  | 3350 | void *ret = __cache_alloc(cache, flags, __builtin_return_address(0)); | 
|  | 3351 | if (ret) | 
|  | 3352 | memset(ret, 0, obj_size(cache)); | 
|  | 3353 | return ret; | 
|  | 3354 | } | 
|  | 3355 | EXPORT_SYMBOL(kmem_cache_zalloc); | 
|  | 3356 |  | 
|  | 3357 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3358 | * kmem_ptr_validate - check if an untrusted pointer might | 
|  | 3359 | *	be a slab entry. | 
|  | 3360 | * @cachep: the cache we're checking against | 
|  | 3361 | * @ptr: pointer to validate | 
|  | 3362 | * | 
|  | 3363 | * This verifies that the untrusted pointer looks sane: | 
|  | 3364 | * it is _not_ a guarantee that the pointer is actually | 
|  | 3365 | * part of the slab cache in question, but it at least | 
|  | 3366 | * validates that the pointer can be dereferenced and | 
|  | 3367 | * looks half-way sane. | 
|  | 3368 | * | 
|  | 3369 | * Currently only used for dentry validation. | 
|  | 3370 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3371 | int fastcall kmem_ptr_validate(struct kmem_cache *cachep, void *ptr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3372 | { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3373 | unsigned long addr = (unsigned long)ptr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3374 | unsigned long min_addr = PAGE_OFFSET; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3375 | unsigned long align_mask = BYTES_PER_WORD - 1; | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 3376 | unsigned long size = cachep->buffer_size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3377 | struct page *page; | 
|  | 3378 |  | 
|  | 3379 | if (unlikely(addr < min_addr)) | 
|  | 3380 | goto out; | 
|  | 3381 | if (unlikely(addr > (unsigned long)high_memory - size)) | 
|  | 3382 | goto out; | 
|  | 3383 | if (unlikely(addr & align_mask)) | 
|  | 3384 | goto out; | 
|  | 3385 | if (unlikely(!kern_addr_valid(addr))) | 
|  | 3386 | goto out; | 
|  | 3387 | if (unlikely(!kern_addr_valid(addr + size - 1))) | 
|  | 3388 | goto out; | 
|  | 3389 | page = virt_to_page(ptr); | 
|  | 3390 | if (unlikely(!PageSlab(page))) | 
|  | 3391 | goto out; | 
| Pekka Enberg | 065d41c | 2005-11-13 16:06:46 -0800 | [diff] [blame] | 3392 | if (unlikely(page_get_cache(page) != cachep)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3393 | goto out; | 
|  | 3394 | return 1; | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3395 | out: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3396 | return 0; | 
|  | 3397 | } | 
|  | 3398 |  | 
|  | 3399 | #ifdef CONFIG_NUMA | 
|  | 3400 | /** | 
|  | 3401 | * kmem_cache_alloc_node - Allocate an object on the specified node | 
|  | 3402 | * @cachep: The cache to allocate from. | 
|  | 3403 | * @flags: See kmalloc(). | 
|  | 3404 | * @nodeid: node number of the target node. | 
|  | 3405 | * | 
|  | 3406 | * Identical to kmem_cache_alloc, except that this function is slow | 
|  | 3407 | * and can sleep. And it will allocate memory on the given node, which | 
|  | 3408 | * can improve the performance for cpu bound structures. | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3409 | * New and improved: it will now make sure that the object gets | 
|  | 3410 | * put on the correct node list so that there is no false sharing. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3411 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3412 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3413 | { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3414 | unsigned long save_flags; | 
|  | 3415 | void *ptr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3416 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3417 | cache_alloc_debugcheck_before(cachep, flags); | 
|  | 3418 | local_irq_save(save_flags); | 
| Christoph Lameter | 18f820f | 2006-02-01 03:05:43 -0800 | [diff] [blame] | 3419 |  | 
|  | 3420 | if (nodeid == -1 || nodeid == numa_node_id() || | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3421 | !cachep->nodelists[nodeid]) | 
| Alok N Kataria | 5c38230 | 2005-09-27 21:45:46 -0700 | [diff] [blame] | 3422 | ptr = ____cache_alloc(cachep, flags); | 
|  | 3423 | else | 
|  | 3424 | ptr = __cache_alloc_node(cachep, flags, nodeid); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3425 | local_irq_restore(save_flags); | 
| Christoph Lameter | 18f820f | 2006-02-01 03:05:43 -0800 | [diff] [blame] | 3426 |  | 
|  | 3427 | ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, | 
|  | 3428 | __builtin_return_address(0)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3429 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3430 | return ptr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3431 | } | 
|  | 3432 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 
|  | 3433 |  | 
| Christoph Hellwig | dbe5e69 | 2006-09-25 23:31:36 -0700 | [diff] [blame] | 3434 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 
| Manfred Spraul | 97e2bde | 2005-05-01 08:58:38 -0700 | [diff] [blame] | 3435 | { | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3436 | struct kmem_cache *cachep; | 
| Manfred Spraul | 97e2bde | 2005-05-01 08:58:38 -0700 | [diff] [blame] | 3437 |  | 
|  | 3438 | cachep = kmem_find_general_cachep(size, flags); | 
|  | 3439 | if (unlikely(cachep == NULL)) | 
|  | 3440 | return NULL; | 
|  | 3441 | return kmem_cache_alloc_node(cachep, flags, node); | 
|  | 3442 | } | 
| Christoph Hellwig | dbe5e69 | 2006-09-25 23:31:36 -0700 | [diff] [blame] | 3443 | EXPORT_SYMBOL(__kmalloc_node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3444 | #endif | 
|  | 3445 |  | 
|  | 3446 | /** | 
| Paul Drynoff | 800590f | 2006-06-23 02:03:48 -0700 | [diff] [blame] | 3447 | * __do_kmalloc - allocate memory | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3448 | * @size: how many bytes of memory are required. | 
| Paul Drynoff | 800590f | 2006-06-23 02:03:48 -0700 | [diff] [blame] | 3449 | * @flags: the type of memory to allocate (see kmalloc). | 
| Randy Dunlap | 911851e | 2006-03-22 00:08:14 -0800 | [diff] [blame] | 3450 | * @caller: function caller for debug tracking of the caller | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3451 | */ | 
| Pekka Enberg | 7fd6b14 | 2006-02-01 03:05:52 -0800 | [diff] [blame] | 3452 | static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | 
|  | 3453 | void *caller) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3454 | { | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3455 | struct kmem_cache *cachep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3456 |  | 
| Manfred Spraul | 97e2bde | 2005-05-01 08:58:38 -0700 | [diff] [blame] | 3457 | /* If you want to save a few bytes .text space: replace | 
|  | 3458 | * __ with kmem_. | 
|  | 3459 | * Then kmalloc uses the uninlined functions instead of the inline | 
|  | 3460 | * functions. | 
|  | 3461 | */ | 
|  | 3462 | cachep = __find_general_cachep(size, flags); | 
| Andrew Morton | dbdb904 | 2005-09-23 13:24:10 -0700 | [diff] [blame] | 3463 | if (unlikely(cachep == NULL)) | 
|  | 3464 | return NULL; | 
| Pekka Enberg | 7fd6b14 | 2006-02-01 03:05:52 -0800 | [diff] [blame] | 3465 | return __cache_alloc(cachep, flags, caller); | 
|  | 3466 | } | 
|  | 3467 |  | 
| Pekka Enberg | 7fd6b14 | 2006-02-01 03:05:52 -0800 | [diff] [blame] | 3468 |  | 
|  | 3469 | void *__kmalloc(size_t size, gfp_t flags) | 
|  | 3470 | { | 
| Al Viro | 871751e | 2006-03-25 03:06:39 -0800 | [diff] [blame] | 3471 | #ifndef CONFIG_DEBUG_SLAB | 
| Pekka Enberg | 7fd6b14 | 2006-02-01 03:05:52 -0800 | [diff] [blame] | 3472 | return __do_kmalloc(size, flags, NULL); | 
| Al Viro | 871751e | 2006-03-25 03:06:39 -0800 | [diff] [blame] | 3473 | #else | 
|  | 3474 | return __do_kmalloc(size, flags, __builtin_return_address(0)); | 
|  | 3475 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3476 | } | 
|  | 3477 | EXPORT_SYMBOL(__kmalloc); | 
|  | 3478 |  | 
| Al Viro | 871751e | 2006-03-25 03:06:39 -0800 | [diff] [blame] | 3479 | #ifdef CONFIG_DEBUG_SLAB | 
| Pekka Enberg | 7fd6b14 | 2006-02-01 03:05:52 -0800 | [diff] [blame] | 3480 | void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) | 
|  | 3481 | { | 
|  | 3482 | return __do_kmalloc(size, flags, caller); | 
|  | 3483 | } | 
|  | 3484 | EXPORT_SYMBOL(__kmalloc_track_caller); | 
| Pekka Enberg | 7fd6b14 | 2006-02-01 03:05:52 -0800 | [diff] [blame] | 3485 | #endif | 
|  | 3486 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3487 | /** | 
|  | 3488 | * kmem_cache_free - Deallocate an object | 
|  | 3489 | * @cachep: The cache the allocation was from. | 
|  | 3490 | * @objp: The previously allocated object. | 
|  | 3491 | * | 
|  | 3492 | * Free an object which was previously allocated from this | 
|  | 3493 | * cache. | 
|  | 3494 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3495 | void kmem_cache_free(struct kmem_cache *cachep, void *objp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3496 | { | 
|  | 3497 | unsigned long flags; | 
|  | 3498 |  | 
| Pekka Enberg | ddc2e81 | 2006-06-23 02:03:40 -0700 | [diff] [blame] | 3499 | BUG_ON(virt_to_cache(objp) != cachep); | 
|  | 3500 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3501 | local_irq_save(flags); | 
| Ingo Molnar | 873623d | 2006-07-13 14:44:38 +0200 | [diff] [blame] | 3502 | __cache_free(cachep, objp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3503 | local_irq_restore(flags); | 
|  | 3504 | } | 
|  | 3505 | EXPORT_SYMBOL(kmem_cache_free); | 
|  | 3506 |  | 
|  | 3507 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3508 | * kfree - free previously allocated memory | 
|  | 3509 | * @objp: pointer returned by kmalloc. | 
|  | 3510 | * | 
| Pekka Enberg | 80e93ef | 2005-09-09 13:10:16 -0700 | [diff] [blame] | 3511 | * If @objp is NULL, no operation is performed. | 
|  | 3512 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3513 | * Don't free memory not originally allocated by kmalloc() | 
|  | 3514 | * or you will run into trouble. | 
|  | 3515 | */ | 
|  | 3516 | void kfree(const void *objp) | 
|  | 3517 | { | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3518 | struct kmem_cache *c; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3519 | unsigned long flags; | 
|  | 3520 |  | 
|  | 3521 | if (unlikely(!objp)) | 
|  | 3522 | return; | 
|  | 3523 | local_irq_save(flags); | 
|  | 3524 | kfree_debugcheck(objp); | 
| Pekka Enberg | 6ed5eb2 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 3525 | c = virt_to_cache(objp); | 
| Ingo Molnar | f9b8404 | 2006-06-27 02:54:49 -0700 | [diff] [blame] | 3526 | debug_check_no_locks_freed(objp, obj_size(c)); | 
| Ingo Molnar | 873623d | 2006-07-13 14:44:38 +0200 | [diff] [blame] | 3527 | __cache_free(c, (void *)objp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3528 | local_irq_restore(flags); | 
|  | 3529 | } | 
|  | 3530 | EXPORT_SYMBOL(kfree); | 
|  | 3531 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3532 | unsigned int kmem_cache_size(struct kmem_cache *cachep) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3533 | { | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 3534 | return obj_size(cachep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3535 | } | 
|  | 3536 | EXPORT_SYMBOL(kmem_cache_size); | 
|  | 3537 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3538 | const char *kmem_cache_name(struct kmem_cache *cachep) | 
| Arnaldo Carvalho de Melo | 1944972 | 2005-06-18 22:46:19 -0700 | [diff] [blame] | 3539 | { | 
|  | 3540 | return cachep->name; | 
|  | 3541 | } | 
|  | 3542 | EXPORT_SYMBOL_GPL(kmem_cache_name); | 
|  | 3543 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3544 | /* | 
| Christoph Lameter | 0718dc2 | 2006-03-25 03:06:47 -0800 | [diff] [blame] | 3545 | * This initializes kmem_list3 or resizes varioius caches for all nodes. | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3546 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3547 | static int alloc_kmemlist(struct kmem_cache *cachep) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3548 | { | 
|  | 3549 | int node; | 
|  | 3550 | struct kmem_list3 *l3; | 
| Christoph Lameter | cafeb02 | 2006-03-25 03:06:46 -0800 | [diff] [blame] | 3551 | struct array_cache *new_shared; | 
|  | 3552 | struct array_cache **new_alien; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3553 |  | 
|  | 3554 | for_each_online_node(node) { | 
| Christoph Lameter | cafeb02 | 2006-03-25 03:06:46 -0800 | [diff] [blame] | 3555 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3556 | new_alien = alloc_alien_cache(node, cachep->limit); | 
|  | 3557 | if (!new_alien) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3558 | goto fail; | 
| Christoph Lameter | cafeb02 | 2006-03-25 03:06:46 -0800 | [diff] [blame] | 3559 |  | 
| Christoph Lameter | 0718dc2 | 2006-03-25 03:06:47 -0800 | [diff] [blame] | 3560 | new_shared = alloc_arraycache(node, | 
|  | 3561 | cachep->shared*cachep->batchcount, | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3562 | 0xbaadf00d); | 
| Christoph Lameter | 0718dc2 | 2006-03-25 03:06:47 -0800 | [diff] [blame] | 3563 | if (!new_shared) { | 
|  | 3564 | free_alien_cache(new_alien); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3565 | goto fail; | 
| Christoph Lameter | 0718dc2 | 2006-03-25 03:06:47 -0800 | [diff] [blame] | 3566 | } | 
| Christoph Lameter | cafeb02 | 2006-03-25 03:06:46 -0800 | [diff] [blame] | 3567 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3568 | l3 = cachep->nodelists[node]; | 
|  | 3569 | if (l3) { | 
| Christoph Lameter | cafeb02 | 2006-03-25 03:06:46 -0800 | [diff] [blame] | 3570 | struct array_cache *shared = l3->shared; | 
|  | 3571 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3572 | spin_lock_irq(&l3->list_lock); | 
|  | 3573 |  | 
| Christoph Lameter | cafeb02 | 2006-03-25 03:06:46 -0800 | [diff] [blame] | 3574 | if (shared) | 
| Christoph Lameter | 0718dc2 | 2006-03-25 03:06:47 -0800 | [diff] [blame] | 3575 | free_block(cachep, shared->entry, | 
|  | 3576 | shared->avail, node); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3577 |  | 
| Christoph Lameter | cafeb02 | 2006-03-25 03:06:46 -0800 | [diff] [blame] | 3578 | l3->shared = new_shared; | 
|  | 3579 | if (!l3->alien) { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3580 | l3->alien = new_alien; | 
|  | 3581 | new_alien = NULL; | 
|  | 3582 | } | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3583 | l3->free_limit = (1 + nr_cpus_node(node)) * | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3584 | cachep->batchcount + cachep->num; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3585 | spin_unlock_irq(&l3->list_lock); | 
| Christoph Lameter | cafeb02 | 2006-03-25 03:06:46 -0800 | [diff] [blame] | 3586 | kfree(shared); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3587 | free_alien_cache(new_alien); | 
|  | 3588 | continue; | 
|  | 3589 | } | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3590 | l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node); | 
| Christoph Lameter | 0718dc2 | 2006-03-25 03:06:47 -0800 | [diff] [blame] | 3591 | if (!l3) { | 
|  | 3592 | free_alien_cache(new_alien); | 
|  | 3593 | kfree(new_shared); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3594 | goto fail; | 
| Christoph Lameter | 0718dc2 | 2006-03-25 03:06:47 -0800 | [diff] [blame] | 3595 | } | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3596 |  | 
|  | 3597 | kmem_list3_init(l3); | 
|  | 3598 | l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3599 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; | 
| Christoph Lameter | cafeb02 | 2006-03-25 03:06:46 -0800 | [diff] [blame] | 3600 | l3->shared = new_shared; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3601 | l3->alien = new_alien; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3602 | l3->free_limit = (1 + nr_cpus_node(node)) * | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3603 | cachep->batchcount + cachep->num; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3604 | cachep->nodelists[node] = l3; | 
|  | 3605 | } | 
| Christoph Lameter | cafeb02 | 2006-03-25 03:06:46 -0800 | [diff] [blame] | 3606 | return 0; | 
| Christoph Lameter | 0718dc2 | 2006-03-25 03:06:47 -0800 | [diff] [blame] | 3607 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3608 | fail: | 
| Christoph Lameter | 0718dc2 | 2006-03-25 03:06:47 -0800 | [diff] [blame] | 3609 | if (!cachep->next.next) { | 
|  | 3610 | /* Cache is not active yet. Roll back what we did */ | 
|  | 3611 | node--; | 
|  | 3612 | while (node >= 0) { | 
|  | 3613 | if (cachep->nodelists[node]) { | 
|  | 3614 | l3 = cachep->nodelists[node]; | 
|  | 3615 |  | 
|  | 3616 | kfree(l3->shared); | 
|  | 3617 | free_alien_cache(l3->alien); | 
|  | 3618 | kfree(l3); | 
|  | 3619 | cachep->nodelists[node] = NULL; | 
|  | 3620 | } | 
|  | 3621 | node--; | 
|  | 3622 | } | 
|  | 3623 | } | 
| Christoph Lameter | cafeb02 | 2006-03-25 03:06:46 -0800 | [diff] [blame] | 3624 | return -ENOMEM; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3625 | } | 
|  | 3626 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3627 | struct ccupdate_struct { | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3628 | struct kmem_cache *cachep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3629 | struct array_cache *new[NR_CPUS]; | 
|  | 3630 | }; | 
|  | 3631 |  | 
|  | 3632 | static void do_ccupdate_local(void *info) | 
|  | 3633 | { | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3634 | struct ccupdate_struct *new = info; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3635 | struct array_cache *old; | 
|  | 3636 |  | 
|  | 3637 | check_irq_off(); | 
| Pekka Enberg | 9a2dba4 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 3638 | old = cpu_cache_get(new->cachep); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3639 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3640 | new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()]; | 
|  | 3641 | new->new[smp_processor_id()] = old; | 
|  | 3642 | } | 
|  | 3643 |  | 
| Ravikiran G Thirumalai | b5d8ca7 | 2006-03-22 00:08:12 -0800 | [diff] [blame] | 3644 | /* Always called with the cache_chain_mutex held */ | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3645 | static int do_tune_cpucache(struct kmem_cache *cachep, int limit, | 
|  | 3646 | int batchcount, int shared) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3647 | { | 
| Siddha, Suresh B | d2e7b7d | 2006-09-25 23:31:47 -0700 | [diff] [blame] | 3648 | struct ccupdate_struct *new; | 
| Christoph Lameter | 2ed3a4e | 2006-09-25 23:31:38 -0700 | [diff] [blame] | 3649 | int i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3650 |  | 
| Siddha, Suresh B | d2e7b7d | 2006-09-25 23:31:47 -0700 | [diff] [blame] | 3651 | new = kzalloc(sizeof(*new), GFP_KERNEL); | 
|  | 3652 | if (!new) | 
|  | 3653 | return -ENOMEM; | 
|  | 3654 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3655 | for_each_online_cpu(i) { | 
| Siddha, Suresh B | d2e7b7d | 2006-09-25 23:31:47 -0700 | [diff] [blame] | 3656 | new->new[i] = alloc_arraycache(cpu_to_node(i), limit, | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3657 | batchcount); | 
| Siddha, Suresh B | d2e7b7d | 2006-09-25 23:31:47 -0700 | [diff] [blame] | 3658 | if (!new->new[i]) { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3659 | for (i--; i >= 0; i--) | 
| Siddha, Suresh B | d2e7b7d | 2006-09-25 23:31:47 -0700 | [diff] [blame] | 3660 | kfree(new->new[i]); | 
|  | 3661 | kfree(new); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3662 | return -ENOMEM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3663 | } | 
|  | 3664 | } | 
| Siddha, Suresh B | d2e7b7d | 2006-09-25 23:31:47 -0700 | [diff] [blame] | 3665 | new->cachep = cachep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3666 |  | 
| Siddha, Suresh B | d2e7b7d | 2006-09-25 23:31:47 -0700 | [diff] [blame] | 3667 | on_each_cpu(do_ccupdate_local, (void *)new, 1, 1); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3668 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3669 | check_irq_on(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3670 | cachep->batchcount = batchcount; | 
|  | 3671 | cachep->limit = limit; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3672 | cachep->shared = shared; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3673 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3674 | for_each_online_cpu(i) { | 
| Siddha, Suresh B | d2e7b7d | 2006-09-25 23:31:47 -0700 | [diff] [blame] | 3675 | struct array_cache *ccold = new->new[i]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3676 | if (!ccold) | 
|  | 3677 | continue; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3678 | spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); | 
| Christoph Lameter | ff69416 | 2005-09-22 21:44:02 -0700 | [diff] [blame] | 3679 | free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i)); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3680 | spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3681 | kfree(ccold); | 
|  | 3682 | } | 
| Siddha, Suresh B | d2e7b7d | 2006-09-25 23:31:47 -0700 | [diff] [blame] | 3683 | kfree(new); | 
| Christoph Lameter | 2ed3a4e | 2006-09-25 23:31:38 -0700 | [diff] [blame] | 3684 | return alloc_kmemlist(cachep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3685 | } | 
|  | 3686 |  | 
| Ravikiran G Thirumalai | b5d8ca7 | 2006-03-22 00:08:12 -0800 | [diff] [blame] | 3687 | /* Called with cache_chain_mutex held always */ | 
| Christoph Lameter | 2ed3a4e | 2006-09-25 23:31:38 -0700 | [diff] [blame] | 3688 | static int enable_cpucache(struct kmem_cache *cachep) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3689 | { | 
|  | 3690 | int err; | 
|  | 3691 | int limit, shared; | 
|  | 3692 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3693 | /* | 
|  | 3694 | * The head array serves three purposes: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3695 | * - create a LIFO ordering, i.e. return objects that are cache-warm | 
|  | 3696 | * - reduce the number of spinlock operations. | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3697 | * - reduce the number of linked list operations on the slab and | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3698 | *   bufctl chains: array operations are cheaper. | 
|  | 3699 | * The numbers are guessed, we should auto-tune as described by | 
|  | 3700 | * Bonwick. | 
|  | 3701 | */ | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 3702 | if (cachep->buffer_size > 131072) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3703 | limit = 1; | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 3704 | else if (cachep->buffer_size > PAGE_SIZE) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3705 | limit = 8; | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 3706 | else if (cachep->buffer_size > 1024) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3707 | limit = 24; | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 3708 | else if (cachep->buffer_size > 256) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3709 | limit = 54; | 
|  | 3710 | else | 
|  | 3711 | limit = 120; | 
|  | 3712 |  | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3713 | /* | 
|  | 3714 | * CPU bound tasks (e.g. network routing) can exhibit cpu bound | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3715 | * allocation behaviour: Most allocs on one cpu, most free operations | 
|  | 3716 | * on another cpu. For these cases, an efficient object passing between | 
|  | 3717 | * cpus is necessary. This is provided by a shared array. The array | 
|  | 3718 | * replaces Bonwick's magazine layer. | 
|  | 3719 | * On uniprocessor, it's functionally equivalent (but less efficient) | 
|  | 3720 | * to a larger limit. Thus disabled by default. | 
|  | 3721 | */ | 
|  | 3722 | shared = 0; | 
|  | 3723 | #ifdef CONFIG_SMP | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 3724 | if (cachep->buffer_size <= PAGE_SIZE) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3725 | shared = 8; | 
|  | 3726 | #endif | 
|  | 3727 |  | 
|  | 3728 | #if DEBUG | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3729 | /* | 
|  | 3730 | * With debugging enabled, large batchcount lead to excessively long | 
|  | 3731 | * periods with disabled local interrupts. Limit the batchcount | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3732 | */ | 
|  | 3733 | if (limit > 32) | 
|  | 3734 | limit = 32; | 
|  | 3735 | #endif | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3736 | err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3737 | if (err) | 
|  | 3738 | printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3739 | cachep->name, -err); | 
| Christoph Lameter | 2ed3a4e | 2006-09-25 23:31:38 -0700 | [diff] [blame] | 3740 | return err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3741 | } | 
|  | 3742 |  | 
| Christoph Lameter | 1b55253 | 2006-03-22 00:09:07 -0800 | [diff] [blame] | 3743 | /* | 
|  | 3744 | * Drain an array if it contains any elements taking the l3 lock only if | 
| Christoph Lameter | b18e7e6 | 2006-03-22 00:09:07 -0800 | [diff] [blame] | 3745 | * necessary. Note that the l3 listlock also protects the array_cache | 
|  | 3746 | * if drain_array() is used on the shared array. | 
| Christoph Lameter | 1b55253 | 2006-03-22 00:09:07 -0800 | [diff] [blame] | 3747 | */ | 
|  | 3748 | void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, | 
|  | 3749 | struct array_cache *ac, int force, int node) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3750 | { | 
|  | 3751 | int tofree; | 
|  | 3752 |  | 
| Christoph Lameter | 1b55253 | 2006-03-22 00:09:07 -0800 | [diff] [blame] | 3753 | if (!ac || !ac->avail) | 
|  | 3754 | return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3755 | if (ac->touched && !force) { | 
|  | 3756 | ac->touched = 0; | 
| Christoph Lameter | b18e7e6 | 2006-03-22 00:09:07 -0800 | [diff] [blame] | 3757 | } else { | 
| Christoph Lameter | 1b55253 | 2006-03-22 00:09:07 -0800 | [diff] [blame] | 3758 | spin_lock_irq(&l3->list_lock); | 
| Christoph Lameter | b18e7e6 | 2006-03-22 00:09:07 -0800 | [diff] [blame] | 3759 | if (ac->avail) { | 
|  | 3760 | tofree = force ? ac->avail : (ac->limit + 4) / 5; | 
|  | 3761 | if (tofree > ac->avail) | 
|  | 3762 | tofree = (ac->avail + 1) / 2; | 
|  | 3763 | free_block(cachep, ac->entry, tofree, node); | 
|  | 3764 | ac->avail -= tofree; | 
|  | 3765 | memmove(ac->entry, &(ac->entry[tofree]), | 
|  | 3766 | sizeof(void *) * ac->avail); | 
|  | 3767 | } | 
| Christoph Lameter | 1b55253 | 2006-03-22 00:09:07 -0800 | [diff] [blame] | 3768 | spin_unlock_irq(&l3->list_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3769 | } | 
|  | 3770 | } | 
|  | 3771 |  | 
|  | 3772 | /** | 
|  | 3773 | * cache_reap - Reclaim memory from caches. | 
| Randy Dunlap | 1e5d533 | 2005-11-07 01:01:06 -0800 | [diff] [blame] | 3774 | * @unused: unused parameter | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3775 | * | 
|  | 3776 | * Called from workqueue/eventd every few seconds. | 
|  | 3777 | * Purpose: | 
|  | 3778 | * - clear the per-cpu caches for this CPU. | 
|  | 3779 | * - return freeable pages to the main free memory pool. | 
|  | 3780 | * | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3781 | * If we cannot acquire the cache chain mutex then just give up - we'll try | 
|  | 3782 | * again on the next iteration. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3783 | */ | 
|  | 3784 | static void cache_reap(void *unused) | 
|  | 3785 | { | 
| Christoph Hellwig | 7a7c381 | 2006-06-23 02:03:17 -0700 | [diff] [blame] | 3786 | struct kmem_cache *searchp; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3787 | struct kmem_list3 *l3; | 
| Christoph Lameter | aab2207 | 2006-03-22 00:09:06 -0800 | [diff] [blame] | 3788 | int node = numa_node_id(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3789 |  | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 3790 | if (!mutex_trylock(&cache_chain_mutex)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3791 | /* Give up. Setup the next iteration. */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3792 | schedule_delayed_work(&__get_cpu_var(reap_work), | 
|  | 3793 | REAPTIMEOUT_CPUC); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3794 | return; | 
|  | 3795 | } | 
|  | 3796 |  | 
| Christoph Hellwig | 7a7c381 | 2006-06-23 02:03:17 -0700 | [diff] [blame] | 3797 | list_for_each_entry(searchp, &cache_chain, next) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3798 | check_irq_on(); | 
|  | 3799 |  | 
| Christoph Lameter | 35386e3 | 2006-03-22 00:09:05 -0800 | [diff] [blame] | 3800 | /* | 
|  | 3801 | * We only take the l3 lock if absolutely necessary and we | 
|  | 3802 | * have established with reasonable certainty that | 
|  | 3803 | * we can do some work if the lock was obtained. | 
|  | 3804 | */ | 
| Christoph Lameter | aab2207 | 2006-03-22 00:09:06 -0800 | [diff] [blame] | 3805 | l3 = searchp->nodelists[node]; | 
| Christoph Lameter | 35386e3 | 2006-03-22 00:09:05 -0800 | [diff] [blame] | 3806 |  | 
| Christoph Lameter | 8fce4d8 | 2006-03-09 17:33:54 -0800 | [diff] [blame] | 3807 | reap_alien(searchp, l3); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3808 |  | 
| Christoph Lameter | aab2207 | 2006-03-22 00:09:06 -0800 | [diff] [blame] | 3809 | drain_array(searchp, l3, cpu_cache_get(searchp), 0, node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3810 |  | 
| Christoph Lameter | 35386e3 | 2006-03-22 00:09:05 -0800 | [diff] [blame] | 3811 | /* | 
|  | 3812 | * These are racy checks but it does not matter | 
|  | 3813 | * if we skip one check or scan twice. | 
|  | 3814 | */ | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3815 | if (time_after(l3->next_reap, jiffies)) | 
| Christoph Lameter | 35386e3 | 2006-03-22 00:09:05 -0800 | [diff] [blame] | 3816 | goto next; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3817 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3818 | l3->next_reap = jiffies + REAPTIMEOUT_LIST3; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3819 |  | 
| Christoph Lameter | aab2207 | 2006-03-22 00:09:06 -0800 | [diff] [blame] | 3820 | drain_array(searchp, l3, l3->shared, 0, node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3821 |  | 
| Christoph Lameter | ed11d9e | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 3822 | if (l3->free_touched) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3823 | l3->free_touched = 0; | 
| Christoph Lameter | ed11d9e | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 3824 | else { | 
|  | 3825 | int freed; | 
|  | 3826 |  | 
|  | 3827 | freed = drain_freelist(searchp, l3, (l3->free_limit + | 
|  | 3828 | 5 * searchp->num - 1) / (5 * searchp->num)); | 
|  | 3829 | STATS_ADD_REAPED(searchp, freed); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3830 | } | 
| Christoph Lameter | 35386e3 | 2006-03-22 00:09:05 -0800 | [diff] [blame] | 3831 | next: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3832 | cond_resched(); | 
|  | 3833 | } | 
|  | 3834 | check_irq_on(); | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 3835 | mutex_unlock(&cache_chain_mutex); | 
| Christoph Lameter | 8fce4d8 | 2006-03-09 17:33:54 -0800 | [diff] [blame] | 3836 | next_reap_node(); | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 3837 | refresh_cpu_vm_stats(smp_processor_id()); | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3838 | /* Set up the next iteration */ | 
| Manfred Spraul | cd61ef6 | 2005-11-07 00:58:02 -0800 | [diff] [blame] | 3839 | schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3840 | } | 
|  | 3841 |  | 
|  | 3842 | #ifdef CONFIG_PROC_FS | 
|  | 3843 |  | 
| Pekka Enberg | 85289f9 | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 3844 | static void print_slabinfo_header(struct seq_file *m) | 
|  | 3845 | { | 
|  | 3846 | /* | 
|  | 3847 | * Output format version, so at least we can change it | 
|  | 3848 | * without _too_ many complaints. | 
|  | 3849 | */ | 
|  | 3850 | #if STATS | 
|  | 3851 | seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); | 
|  | 3852 | #else | 
|  | 3853 | seq_puts(m, "slabinfo - version: 2.1\n"); | 
|  | 3854 | #endif | 
|  | 3855 | seq_puts(m, "# name            <active_objs> <num_objs> <objsize> " | 
|  | 3856 | "<objperslab> <pagesperslab>"); | 
|  | 3857 | seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); | 
|  | 3858 | seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); | 
|  | 3859 | #if STATS | 
|  | 3860 | seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> " | 
| Ravikiran G Thirumalai | fb7faf3 | 2006-04-10 22:52:54 -0700 | [diff] [blame] | 3861 | "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>"); | 
| Pekka Enberg | 85289f9 | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 3862 | seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); | 
|  | 3863 | #endif | 
|  | 3864 | seq_putc(m, '\n'); | 
|  | 3865 | } | 
|  | 3866 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3867 | static void *s_start(struct seq_file *m, loff_t *pos) | 
|  | 3868 | { | 
|  | 3869 | loff_t n = *pos; | 
|  | 3870 | struct list_head *p; | 
|  | 3871 |  | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 3872 | mutex_lock(&cache_chain_mutex); | 
| Pekka Enberg | 85289f9 | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 3873 | if (!n) | 
|  | 3874 | print_slabinfo_header(m); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3875 | p = cache_chain.next; | 
|  | 3876 | while (n--) { | 
|  | 3877 | p = p->next; | 
|  | 3878 | if (p == &cache_chain) | 
|  | 3879 | return NULL; | 
|  | 3880 | } | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3881 | return list_entry(p, struct kmem_cache, next); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3882 | } | 
|  | 3883 |  | 
|  | 3884 | static void *s_next(struct seq_file *m, void *p, loff_t *pos) | 
|  | 3885 | { | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3886 | struct kmem_cache *cachep = p; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3887 | ++*pos; | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3888 | return cachep->next.next == &cache_chain ? | 
|  | 3889 | NULL : list_entry(cachep->next.next, struct kmem_cache, next); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3890 | } | 
|  | 3891 |  | 
|  | 3892 | static void s_stop(struct seq_file *m, void *p) | 
|  | 3893 | { | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 3894 | mutex_unlock(&cache_chain_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3895 | } | 
|  | 3896 |  | 
|  | 3897 | static int s_show(struct seq_file *m, void *p) | 
|  | 3898 | { | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3899 | struct kmem_cache *cachep = p; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3900 | struct slab *slabp; | 
|  | 3901 | unsigned long active_objs; | 
|  | 3902 | unsigned long num_objs; | 
|  | 3903 | unsigned long active_slabs = 0; | 
|  | 3904 | unsigned long num_slabs, free_objects = 0, shared_avail = 0; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3905 | const char *name; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3906 | char *error = NULL; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3907 | int node; | 
|  | 3908 | struct kmem_list3 *l3; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3909 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3910 | active_objs = 0; | 
|  | 3911 | num_slabs = 0; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3912 | for_each_online_node(node) { | 
|  | 3913 | l3 = cachep->nodelists[node]; | 
|  | 3914 | if (!l3) | 
|  | 3915 | continue; | 
|  | 3916 |  | 
| Ravikiran G Thirumalai | ca3b9b9 | 2006-02-04 23:27:58 -0800 | [diff] [blame] | 3917 | check_irq_on(); | 
|  | 3918 | spin_lock_irq(&l3->list_lock); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3919 |  | 
| Christoph Hellwig | 7a7c381 | 2006-06-23 02:03:17 -0700 | [diff] [blame] | 3920 | list_for_each_entry(slabp, &l3->slabs_full, list) { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3921 | if (slabp->inuse != cachep->num && !error) | 
|  | 3922 | error = "slabs_full accounting error"; | 
|  | 3923 | active_objs += cachep->num; | 
|  | 3924 | active_slabs++; | 
|  | 3925 | } | 
| Christoph Hellwig | 7a7c381 | 2006-06-23 02:03:17 -0700 | [diff] [blame] | 3926 | list_for_each_entry(slabp, &l3->slabs_partial, list) { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3927 | if (slabp->inuse == cachep->num && !error) | 
|  | 3928 | error = "slabs_partial inuse accounting error"; | 
|  | 3929 | if (!slabp->inuse && !error) | 
|  | 3930 | error = "slabs_partial/inuse accounting error"; | 
|  | 3931 | active_objs += slabp->inuse; | 
|  | 3932 | active_slabs++; | 
|  | 3933 | } | 
| Christoph Hellwig | 7a7c381 | 2006-06-23 02:03:17 -0700 | [diff] [blame] | 3934 | list_for_each_entry(slabp, &l3->slabs_free, list) { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3935 | if (slabp->inuse && !error) | 
|  | 3936 | error = "slabs_free/inuse accounting error"; | 
|  | 3937 | num_slabs++; | 
|  | 3938 | } | 
|  | 3939 | free_objects += l3->free_objects; | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 3940 | if (l3->shared) | 
|  | 3941 | shared_avail += l3->shared->avail; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3942 |  | 
| Ravikiran G Thirumalai | ca3b9b9 | 2006-02-04 23:27:58 -0800 | [diff] [blame] | 3943 | spin_unlock_irq(&l3->list_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3944 | } | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3945 | num_slabs += active_slabs; | 
|  | 3946 | num_objs = num_slabs * cachep->num; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3947 | if (num_objs - active_objs != free_objects && !error) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3948 | error = "free_objects accounting error"; | 
|  | 3949 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3950 | name = cachep->name; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3951 | if (error) | 
|  | 3952 | printk(KERN_ERR "slab: cache %s error: %s\n", name, error); | 
|  | 3953 |  | 
|  | 3954 | seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 3955 | name, active_objs, num_objs, cachep->buffer_size, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3956 | cachep->num, (1 << cachep->gfporder)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3957 | seq_printf(m, " : tunables %4u %4u %4u", | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3958 | cachep->limit, cachep->batchcount, cachep->shared); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3959 | seq_printf(m, " : slabdata %6lu %6lu %6lu", | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3960 | active_slabs, num_slabs, shared_avail); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3961 | #if STATS | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3962 | {			/* list3 stats */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3963 | unsigned long high = cachep->high_mark; | 
|  | 3964 | unsigned long allocs = cachep->num_allocations; | 
|  | 3965 | unsigned long grown = cachep->grown; | 
|  | 3966 | unsigned long reaped = cachep->reaped; | 
|  | 3967 | unsigned long errors = cachep->errors; | 
|  | 3968 | unsigned long max_freeable = cachep->max_freeable; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3969 | unsigned long node_allocs = cachep->node_allocs; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3970 | unsigned long node_frees = cachep->node_frees; | 
| Ravikiran G Thirumalai | fb7faf3 | 2006-04-10 22:52:54 -0700 | [diff] [blame] | 3971 | unsigned long overflows = cachep->node_overflow; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3972 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3973 | seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \ | 
| Ravikiran G Thirumalai | fb7faf3 | 2006-04-10 22:52:54 -0700 | [diff] [blame] | 3974 | %4lu %4lu %4lu %4lu %4lu", allocs, high, grown, | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 3975 | reaped, errors, max_freeable, node_allocs, | 
| Ravikiran G Thirumalai | fb7faf3 | 2006-04-10 22:52:54 -0700 | [diff] [blame] | 3976 | node_frees, overflows); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3977 | } | 
|  | 3978 | /* cpu stats */ | 
|  | 3979 | { | 
|  | 3980 | unsigned long allochit = atomic_read(&cachep->allochit); | 
|  | 3981 | unsigned long allocmiss = atomic_read(&cachep->allocmiss); | 
|  | 3982 | unsigned long freehit = atomic_read(&cachep->freehit); | 
|  | 3983 | unsigned long freemiss = atomic_read(&cachep->freemiss); | 
|  | 3984 |  | 
|  | 3985 | seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3986 | allochit, allocmiss, freehit, freemiss); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3987 | } | 
|  | 3988 | #endif | 
|  | 3989 | seq_putc(m, '\n'); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3990 | return 0; | 
|  | 3991 | } | 
|  | 3992 |  | 
|  | 3993 | /* | 
|  | 3994 | * slabinfo_op - iterator that generates /proc/slabinfo | 
|  | 3995 | * | 
|  | 3996 | * Output layout: | 
|  | 3997 | * cache-name | 
|  | 3998 | * num-active-objs | 
|  | 3999 | * total-objs | 
|  | 4000 | * object size | 
|  | 4001 | * num-active-slabs | 
|  | 4002 | * total-slabs | 
|  | 4003 | * num-pages-per-slab | 
|  | 4004 | * + further values on SMP and with statistics enabled | 
|  | 4005 | */ | 
|  | 4006 |  | 
|  | 4007 | struct seq_operations slabinfo_op = { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 4008 | .start = s_start, | 
|  | 4009 | .next = s_next, | 
|  | 4010 | .stop = s_stop, | 
|  | 4011 | .show = s_show, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4012 | }; | 
|  | 4013 |  | 
|  | 4014 | #define MAX_SLABINFO_WRITE 128 | 
|  | 4015 | /** | 
|  | 4016 | * slabinfo_write - Tuning for the slab allocator | 
|  | 4017 | * @file: unused | 
|  | 4018 | * @buffer: user buffer | 
|  | 4019 | * @count: data length | 
|  | 4020 | * @ppos: unused | 
|  | 4021 | */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 4022 | ssize_t slabinfo_write(struct file *file, const char __user * buffer, | 
|  | 4023 | size_t count, loff_t *ppos) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4024 | { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 4025 | char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4026 | int limit, batchcount, shared, res; | 
| Christoph Hellwig | 7a7c381 | 2006-06-23 02:03:17 -0700 | [diff] [blame] | 4027 | struct kmem_cache *cachep; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 4028 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4029 | if (count > MAX_SLABINFO_WRITE) | 
|  | 4030 | return -EINVAL; | 
|  | 4031 | if (copy_from_user(&kbuf, buffer, count)) | 
|  | 4032 | return -EFAULT; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 4033 | kbuf[MAX_SLABINFO_WRITE] = '\0'; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4034 |  | 
|  | 4035 | tmp = strchr(kbuf, ' '); | 
|  | 4036 | if (!tmp) | 
|  | 4037 | return -EINVAL; | 
|  | 4038 | *tmp = '\0'; | 
|  | 4039 | tmp++; | 
|  | 4040 | if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) | 
|  | 4041 | return -EINVAL; | 
|  | 4042 |  | 
|  | 4043 | /* Find the cache in the chain of caches. */ | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 4044 | mutex_lock(&cache_chain_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4045 | res = -EINVAL; | 
| Christoph Hellwig | 7a7c381 | 2006-06-23 02:03:17 -0700 | [diff] [blame] | 4046 | list_for_each_entry(cachep, &cache_chain, next) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4047 | if (!strcmp(cachep->name, kbuf)) { | 
| Andrew Morton | a737b3e | 2006-03-22 00:08:11 -0800 | [diff] [blame] | 4048 | if (limit < 1 || batchcount < 1 || | 
|  | 4049 | batchcount > limit || shared < 0) { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 4050 | res = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4051 | } else { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 4052 | res = do_tune_cpucache(cachep, limit, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 4053 | batchcount, shared); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4054 | } | 
|  | 4055 | break; | 
|  | 4056 | } | 
|  | 4057 | } | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 4058 | mutex_unlock(&cache_chain_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4059 | if (res >= 0) | 
|  | 4060 | res = count; | 
|  | 4061 | return res; | 
|  | 4062 | } | 
| Al Viro | 871751e | 2006-03-25 03:06:39 -0800 | [diff] [blame] | 4063 |  | 
|  | 4064 | #ifdef CONFIG_DEBUG_SLAB_LEAK | 
|  | 4065 |  | 
|  | 4066 | static void *leaks_start(struct seq_file *m, loff_t *pos) | 
|  | 4067 | { | 
|  | 4068 | loff_t n = *pos; | 
|  | 4069 | struct list_head *p; | 
|  | 4070 |  | 
|  | 4071 | mutex_lock(&cache_chain_mutex); | 
|  | 4072 | p = cache_chain.next; | 
|  | 4073 | while (n--) { | 
|  | 4074 | p = p->next; | 
|  | 4075 | if (p == &cache_chain) | 
|  | 4076 | return NULL; | 
|  | 4077 | } | 
|  | 4078 | return list_entry(p, struct kmem_cache, next); | 
|  | 4079 | } | 
|  | 4080 |  | 
|  | 4081 | static inline int add_caller(unsigned long *n, unsigned long v) | 
|  | 4082 | { | 
|  | 4083 | unsigned long *p; | 
|  | 4084 | int l; | 
|  | 4085 | if (!v) | 
|  | 4086 | return 1; | 
|  | 4087 | l = n[1]; | 
|  | 4088 | p = n + 2; | 
|  | 4089 | while (l) { | 
|  | 4090 | int i = l/2; | 
|  | 4091 | unsigned long *q = p + 2 * i; | 
|  | 4092 | if (*q == v) { | 
|  | 4093 | q[1]++; | 
|  | 4094 | return 1; | 
|  | 4095 | } | 
|  | 4096 | if (*q > v) { | 
|  | 4097 | l = i; | 
|  | 4098 | } else { | 
|  | 4099 | p = q + 2; | 
|  | 4100 | l -= i + 1; | 
|  | 4101 | } | 
|  | 4102 | } | 
|  | 4103 | if (++n[1] == n[0]) | 
|  | 4104 | return 0; | 
|  | 4105 | memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n)); | 
|  | 4106 | p[0] = v; | 
|  | 4107 | p[1] = 1; | 
|  | 4108 | return 1; | 
|  | 4109 | } | 
|  | 4110 |  | 
|  | 4111 | static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s) | 
|  | 4112 | { | 
|  | 4113 | void *p; | 
|  | 4114 | int i; | 
|  | 4115 | if (n[0] == n[1]) | 
|  | 4116 | return; | 
|  | 4117 | for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) { | 
|  | 4118 | if (slab_bufctl(s)[i] != BUFCTL_ACTIVE) | 
|  | 4119 | continue; | 
|  | 4120 | if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) | 
|  | 4121 | return; | 
|  | 4122 | } | 
|  | 4123 | } | 
|  | 4124 |  | 
|  | 4125 | static void show_symbol(struct seq_file *m, unsigned long address) | 
|  | 4126 | { | 
|  | 4127 | #ifdef CONFIG_KALLSYMS | 
|  | 4128 | char *modname; | 
|  | 4129 | const char *name; | 
|  | 4130 | unsigned long offset, size; | 
|  | 4131 | char namebuf[KSYM_NAME_LEN+1]; | 
|  | 4132 |  | 
|  | 4133 | name = kallsyms_lookup(address, &size, &offset, &modname, namebuf); | 
|  | 4134 |  | 
|  | 4135 | if (name) { | 
|  | 4136 | seq_printf(m, "%s+%#lx/%#lx", name, offset, size); | 
|  | 4137 | if (modname) | 
|  | 4138 | seq_printf(m, " [%s]", modname); | 
|  | 4139 | return; | 
|  | 4140 | } | 
|  | 4141 | #endif | 
|  | 4142 | seq_printf(m, "%p", (void *)address); | 
|  | 4143 | } | 
|  | 4144 |  | 
|  | 4145 | static int leaks_show(struct seq_file *m, void *p) | 
|  | 4146 | { | 
|  | 4147 | struct kmem_cache *cachep = p; | 
| Al Viro | 871751e | 2006-03-25 03:06:39 -0800 | [diff] [blame] | 4148 | struct slab *slabp; | 
|  | 4149 | struct kmem_list3 *l3; | 
|  | 4150 | const char *name; | 
|  | 4151 | unsigned long *n = m->private; | 
|  | 4152 | int node; | 
|  | 4153 | int i; | 
|  | 4154 |  | 
|  | 4155 | if (!(cachep->flags & SLAB_STORE_USER)) | 
|  | 4156 | return 0; | 
|  | 4157 | if (!(cachep->flags & SLAB_RED_ZONE)) | 
|  | 4158 | return 0; | 
|  | 4159 |  | 
|  | 4160 | /* OK, we can do it */ | 
|  | 4161 |  | 
|  | 4162 | n[1] = 0; | 
|  | 4163 |  | 
|  | 4164 | for_each_online_node(node) { | 
|  | 4165 | l3 = cachep->nodelists[node]; | 
|  | 4166 | if (!l3) | 
|  | 4167 | continue; | 
|  | 4168 |  | 
|  | 4169 | check_irq_on(); | 
|  | 4170 | spin_lock_irq(&l3->list_lock); | 
|  | 4171 |  | 
| Christoph Hellwig | 7a7c381 | 2006-06-23 02:03:17 -0700 | [diff] [blame] | 4172 | list_for_each_entry(slabp, &l3->slabs_full, list) | 
| Al Viro | 871751e | 2006-03-25 03:06:39 -0800 | [diff] [blame] | 4173 | handle_slab(n, cachep, slabp); | 
| Christoph Hellwig | 7a7c381 | 2006-06-23 02:03:17 -0700 | [diff] [blame] | 4174 | list_for_each_entry(slabp, &l3->slabs_partial, list) | 
| Al Viro | 871751e | 2006-03-25 03:06:39 -0800 | [diff] [blame] | 4175 | handle_slab(n, cachep, slabp); | 
| Al Viro | 871751e | 2006-03-25 03:06:39 -0800 | [diff] [blame] | 4176 | spin_unlock_irq(&l3->list_lock); | 
|  | 4177 | } | 
|  | 4178 | name = cachep->name; | 
|  | 4179 | if (n[0] == n[1]) { | 
|  | 4180 | /* Increase the buffer size */ | 
|  | 4181 | mutex_unlock(&cache_chain_mutex); | 
|  | 4182 | m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL); | 
|  | 4183 | if (!m->private) { | 
|  | 4184 | /* Too bad, we are really out */ | 
|  | 4185 | m->private = n; | 
|  | 4186 | mutex_lock(&cache_chain_mutex); | 
|  | 4187 | return -ENOMEM; | 
|  | 4188 | } | 
|  | 4189 | *(unsigned long *)m->private = n[0] * 2; | 
|  | 4190 | kfree(n); | 
|  | 4191 | mutex_lock(&cache_chain_mutex); | 
|  | 4192 | /* Now make sure this entry will be retried */ | 
|  | 4193 | m->count = m->size; | 
|  | 4194 | return 0; | 
|  | 4195 | } | 
|  | 4196 | for (i = 0; i < n[1]; i++) { | 
|  | 4197 | seq_printf(m, "%s: %lu ", name, n[2*i+3]); | 
|  | 4198 | show_symbol(m, n[2*i+2]); | 
|  | 4199 | seq_putc(m, '\n'); | 
|  | 4200 | } | 
| Siddha, Suresh B | d2e7b7d | 2006-09-25 23:31:47 -0700 | [diff] [blame] | 4201 |  | 
| Al Viro | 871751e | 2006-03-25 03:06:39 -0800 | [diff] [blame] | 4202 | return 0; | 
|  | 4203 | } | 
|  | 4204 |  | 
|  | 4205 | struct seq_operations slabstats_op = { | 
|  | 4206 | .start = leaks_start, | 
|  | 4207 | .next = s_next, | 
|  | 4208 | .stop = s_stop, | 
|  | 4209 | .show = leaks_show, | 
|  | 4210 | }; | 
|  | 4211 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4212 | #endif | 
|  | 4213 |  | 
| Manfred Spraul | 00e145b | 2005-09-03 15:55:07 -0700 | [diff] [blame] | 4214 | /** | 
|  | 4215 | * ksize - get the actual amount of memory allocated for a given object | 
|  | 4216 | * @objp: Pointer to the object | 
|  | 4217 | * | 
|  | 4218 | * kmalloc may internally round up allocations and return more memory | 
|  | 4219 | * than requested. ksize() can be used to determine the actual amount of | 
|  | 4220 | * memory allocated. The caller may use this additional memory, even though | 
|  | 4221 | * a smaller amount of memory was initially specified with the kmalloc call. | 
|  | 4222 | * The caller must guarantee that objp points to a valid object previously | 
|  | 4223 | * allocated with either kmalloc() or kmem_cache_alloc(). The object | 
|  | 4224 | * must not be freed during the duration of the call. | 
|  | 4225 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4226 | unsigned int ksize(const void *objp) | 
|  | 4227 | { | 
| Manfred Spraul | 00e145b | 2005-09-03 15:55:07 -0700 | [diff] [blame] | 4228 | if (unlikely(objp == NULL)) | 
|  | 4229 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4230 |  | 
| Pekka Enberg | 6ed5eb2 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 4231 | return obj_size(virt_to_cache(objp)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4232 | } |