Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 1 | #ifndef _LINUX_SLAB_DEF_H |
| 2 | #define _LINUX_SLAB_DEF_H |
| 3 | |
| 4 | /* |
| 5 | * Definitions unique to the original Linux SLAB allocator. |
| 6 | * |
| 7 | * What we provide here is a way to optimize the frequent kmalloc |
| 8 | * calls in the kernel by selecting the appropriate general cache |
| 9 | * if kmalloc was called with a size that can be established at |
| 10 | * compile time. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/init.h> |
| 14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ |
| 15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ |
| 16 | #include <linux/compiler.h> |
Li Zefan | 039ca4e | 2010-05-26 17:22:17 +0800 | [diff] [blame] | 17 | |
David Woodhouse | 1f0ce8b3 | 2010-05-19 12:01:42 +0100 | [diff] [blame] | 18 | /* |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 19 | * struct kmem_cache |
| 20 | * |
| 21 | * manages a cache. |
| 22 | */ |
| 23 | |
| 24 | struct kmem_cache { |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 25 | /* 1) Cache tunables. Protected by cache_chain_mutex */ |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 26 | unsigned int batchcount; |
| 27 | unsigned int limit; |
| 28 | unsigned int shared; |
| 29 | |
| 30 | unsigned int buffer_size; |
| 31 | u32 reciprocal_buffer_size; |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 32 | /* 2) touched by every alloc & free from the backend */ |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 33 | |
| 34 | unsigned int flags; /* constant flags */ |
| 35 | unsigned int num; /* # of objs per slab */ |
| 36 | |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 37 | /* 3) cache_grow/shrink */ |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 38 | /* order of pgs per slab (2^n) */ |
| 39 | unsigned int gfporder; |
| 40 | |
| 41 | /* force GFP flags, e.g. GFP_DMA */ |
| 42 | gfp_t gfpflags; |
| 43 | |
| 44 | size_t colour; /* cache colouring range */ |
| 45 | unsigned int colour_off; /* colour offset */ |
| 46 | struct kmem_cache *slabp_cache; |
| 47 | unsigned int slab_size; |
| 48 | unsigned int dflags; /* dynamic flags */ |
| 49 | |
| 50 | /* constructor func */ |
| 51 | void (*ctor)(void *obj); |
| 52 | |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 53 | /* 4) cache creation/removal */ |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 54 | const char *name; |
| 55 | struct list_head next; |
| 56 | |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 57 | /* 5) statistics */ |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 58 | #ifdef CONFIG_DEBUG_SLAB |
| 59 | unsigned long num_active; |
| 60 | unsigned long num_allocations; |
| 61 | unsigned long high_mark; |
| 62 | unsigned long grown; |
| 63 | unsigned long reaped; |
| 64 | unsigned long errors; |
| 65 | unsigned long max_freeable; |
| 66 | unsigned long node_allocs; |
| 67 | unsigned long node_frees; |
| 68 | unsigned long node_overflow; |
| 69 | atomic_t allochit; |
| 70 | atomic_t allocmiss; |
| 71 | atomic_t freehit; |
| 72 | atomic_t freemiss; |
| 73 | |
| 74 | /* |
| 75 | * If debugging is enabled, then the allocator can add additional |
| 76 | * fields and/or padding to every object. buffer_size contains the total |
| 77 | * object size including these internal fields, the following two |
| 78 | * variables contain the offset to the user object and its size. |
| 79 | */ |
| 80 | int obj_offset; |
| 81 | int obj_size; |
| 82 | #endif /* CONFIG_DEBUG_SLAB */ |
| 83 | |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 84 | /* 6) per-cpu/per-node data, touched during every alloc/free */ |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 85 | /* |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 86 | * We put array[] at the end of kmem_cache, because we want to size |
| 87 | * this array to nr_cpu_ids slots instead of NR_CPUS |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 88 | * (see kmem_cache_init()) |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 89 | * We still use [NR_CPUS] and not [1] or [0] because cache_cache |
| 90 | * is statically defined, so we reserve the max number of cpus. |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 91 | */ |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 92 | struct kmem_list3 **nodelists; |
| 93 | struct array_cache *array[NR_CPUS]; |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 94 | /* |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 95 | * Do not add fields after array[] |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 96 | */ |
| 97 | }; |
| 98 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 99 | /* Size description struct for general caches. */ |
| 100 | struct cache_sizes { |
| 101 | size_t cs_size; |
| 102 | struct kmem_cache *cs_cachep; |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 103 | #ifdef CONFIG_ZONE_DMA |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 104 | struct kmem_cache *cs_dmacachep; |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 105 | #endif |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 106 | }; |
| 107 | extern struct cache_sizes malloc_sizes[]; |
| 108 | |
Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 109 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
| 110 | void *__kmalloc(size_t size, gfp_t flags); |
| 111 | |
Li Zefan | 0f24f12 | 2009-12-11 15:45:30 +0800 | [diff] [blame] | 112 | #ifdef CONFIG_TRACING |
Steven Rostedt | 85beb58 | 2010-11-24 16:23:34 -0500 | [diff] [blame] | 113 | extern void *kmem_cache_alloc_trace(size_t size, |
| 114 | struct kmem_cache *cachep, gfp_t flags); |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 115 | extern size_t slab_buffer_size(struct kmem_cache *cachep); |
| 116 | #else |
| 117 | static __always_inline void * |
Steven Rostedt | 85beb58 | 2010-11-24 16:23:34 -0500 | [diff] [blame] | 118 | kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags) |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 119 | { |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 120 | return kmem_cache_alloc(cachep, flags); |
| 121 | } |
| 122 | static inline size_t slab_buffer_size(struct kmem_cache *cachep) |
| 123 | { |
| 124 | return 0; |
| 125 | } |
| 126 | #endif |
| 127 | |
| 128 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
| 129 | { |
| 130 | struct kmem_cache *cachep; |
| 131 | void *ret; |
| 132 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 133 | if (__builtin_constant_p(size)) { |
| 134 | int i = 0; |
Christoph Lameter | 6cb8f91 | 2007-07-17 04:03:22 -0700 | [diff] [blame] | 135 | |
| 136 | if (!size) |
| 137 | return ZERO_SIZE_PTR; |
| 138 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 139 | #define CACHE(x) \ |
| 140 | if (size <= x) \ |
| 141 | goto found; \ |
| 142 | else \ |
| 143 | i++; |
Joe Perches | 1c61fc4 | 2008-03-05 13:58:17 -0800 | [diff] [blame] | 144 | #include <linux/kmalloc_sizes.h> |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 145 | #undef CACHE |
Jeff Mahoney | 1cf3eb2 | 2009-01-27 23:48:59 +0200 | [diff] [blame] | 146 | return NULL; |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 147 | found: |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 148 | #ifdef CONFIG_ZONE_DMA |
| 149 | if (flags & GFP_DMA) |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 150 | cachep = malloc_sizes[i].cs_dmacachep; |
| 151 | else |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 152 | #endif |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 153 | cachep = malloc_sizes[i].cs_cachep; |
| 154 | |
Steven Rostedt | 85beb58 | 2010-11-24 16:23:34 -0500 | [diff] [blame] | 155 | ret = kmem_cache_alloc_trace(size, cachep, flags); |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 156 | |
| 157 | return ret; |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 158 | } |
| 159 | return __kmalloc(size, flags); |
| 160 | } |
| 161 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 162 | #ifdef CONFIG_NUMA |
| 163 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); |
Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 164 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 165 | |
Li Zefan | 0f24f12 | 2009-12-11 15:45:30 +0800 | [diff] [blame] | 166 | #ifdef CONFIG_TRACING |
Steven Rostedt | 85beb58 | 2010-11-24 16:23:34 -0500 | [diff] [blame] | 167 | extern void *kmem_cache_alloc_node_trace(size_t size, |
| 168 | struct kmem_cache *cachep, |
| 169 | gfp_t flags, |
| 170 | int nodeid); |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 171 | #else |
| 172 | static __always_inline void * |
Steven Rostedt | 85beb58 | 2010-11-24 16:23:34 -0500 | [diff] [blame] | 173 | kmem_cache_alloc_node_trace(size_t size, |
| 174 | struct kmem_cache *cachep, |
| 175 | gfp_t flags, |
| 176 | int nodeid) |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 177 | { |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 178 | return kmem_cache_alloc_node(cachep, flags, nodeid); |
| 179 | } |
| 180 | #endif |
| 181 | |
| 182 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
| 183 | { |
| 184 | struct kmem_cache *cachep; |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 185 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 186 | if (__builtin_constant_p(size)) { |
| 187 | int i = 0; |
Christoph Lameter | 6cb8f91 | 2007-07-17 04:03:22 -0700 | [diff] [blame] | 188 | |
| 189 | if (!size) |
| 190 | return ZERO_SIZE_PTR; |
| 191 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 192 | #define CACHE(x) \ |
| 193 | if (size <= x) \ |
| 194 | goto found; \ |
| 195 | else \ |
| 196 | i++; |
Joe Perches | 1c61fc4 | 2008-03-05 13:58:17 -0800 | [diff] [blame] | 197 | #include <linux/kmalloc_sizes.h> |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 198 | #undef CACHE |
Jeff Mahoney | 1cf3eb2 | 2009-01-27 23:48:59 +0200 | [diff] [blame] | 199 | return NULL; |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 200 | found: |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 201 | #ifdef CONFIG_ZONE_DMA |
| 202 | if (flags & GFP_DMA) |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 203 | cachep = malloc_sizes[i].cs_dmacachep; |
| 204 | else |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 205 | #endif |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 206 | cachep = malloc_sizes[i].cs_cachep; |
| 207 | |
Steven Rostedt | 85beb58 | 2010-11-24 16:23:34 -0500 | [diff] [blame] | 208 | return kmem_cache_alloc_node_trace(size, cachep, flags, node); |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 209 | } |
| 210 | return __kmalloc_node(size, flags, node); |
| 211 | } |
| 212 | |
| 213 | #endif /* CONFIG_NUMA */ |
| 214 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 215 | #endif /* _LINUX_SLAB_DEF_H */ |