Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 1 | #ifndef _LINUX_SLAB_DEF_H |
| 2 | #define _LINUX_SLAB_DEF_H |
| 3 | |
| 4 | /* |
| 5 | * Definitions unique to the original Linux SLAB allocator. |
| 6 | * |
| 7 | * What we provide here is a way to optimize the frequent kmalloc |
| 8 | * calls in the kernel by selecting the appropriate general cache |
| 9 | * if kmalloc was called with a size that can be established at |
| 10 | * compile time. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/init.h> |
| 14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ |
| 15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ |
| 16 | #include <linux/compiler.h> |
Li Zefan | 039ca4e | 2010-05-26 17:22:17 +0800 | [diff] [blame] | 17 | |
| 18 | #include <trace/events/kmem.h> |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 19 | |
David Woodhouse | 1f0ce8b3 | 2010-05-19 12:01:42 +0100 | [diff] [blame] | 20 | /* |
| 21 | * Enforce a minimum alignment for the kmalloc caches. |
| 22 | * Usually, the kmalloc caches are cache_line_size() aligned, except when |
| 23 | * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned. |
| 24 | * Some archs want to perform DMA into kmalloc caches and need a guaranteed |
| 25 | * alignment larger than the alignment of a 64-bit integer. |
| 26 | * ARCH_KMALLOC_MINALIGN allows that. |
| 27 | * Note that increasing this value may disable some debug features. |
| 28 | */ |
FUJITA Tomonori | a6eb9fe | 2010-08-10 18:03:22 -0700 | [diff] [blame] | 29 | #ifdef ARCH_DMA_MINALIGN |
| 30 | #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN |
| 31 | #else |
David Woodhouse | 1f0ce8b3 | 2010-05-19 12:01:42 +0100 | [diff] [blame] | 32 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) |
| 33 | #endif |
| 34 | |
| 35 | #ifndef ARCH_SLAB_MINALIGN |
| 36 | /* |
| 37 | * Enforce a minimum alignment for all caches. |
| 38 | * Intended for archs that get misalignment faults even for BYTES_PER_WORD |
| 39 | * aligned buffers. Includes ARCH_KMALLOC_MINALIGN. |
| 40 | * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables |
| 41 | * some debug features. |
| 42 | */ |
| 43 | #define ARCH_SLAB_MINALIGN 0 |
| 44 | #endif |
| 45 | |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 46 | /* |
| 47 | * struct kmem_cache |
| 48 | * |
| 49 | * manages a cache. |
| 50 | */ |
| 51 | |
| 52 | struct kmem_cache { |
| 53 | /* 1) per-cpu data, touched during every alloc/free */ |
| 54 | struct array_cache *array[NR_CPUS]; |
| 55 | /* 2) Cache tunables. Protected by cache_chain_mutex */ |
| 56 | unsigned int batchcount; |
| 57 | unsigned int limit; |
| 58 | unsigned int shared; |
| 59 | |
| 60 | unsigned int buffer_size; |
| 61 | u32 reciprocal_buffer_size; |
| 62 | /* 3) touched by every alloc & free from the backend */ |
| 63 | |
| 64 | unsigned int flags; /* constant flags */ |
| 65 | unsigned int num; /* # of objs per slab */ |
| 66 | |
| 67 | /* 4) cache_grow/shrink */ |
| 68 | /* order of pgs per slab (2^n) */ |
| 69 | unsigned int gfporder; |
| 70 | |
| 71 | /* force GFP flags, e.g. GFP_DMA */ |
| 72 | gfp_t gfpflags; |
| 73 | |
| 74 | size_t colour; /* cache colouring range */ |
| 75 | unsigned int colour_off; /* colour offset */ |
| 76 | struct kmem_cache *slabp_cache; |
| 77 | unsigned int slab_size; |
| 78 | unsigned int dflags; /* dynamic flags */ |
| 79 | |
| 80 | /* constructor func */ |
| 81 | void (*ctor)(void *obj); |
| 82 | |
| 83 | /* 5) cache creation/removal */ |
| 84 | const char *name; |
| 85 | struct list_head next; |
| 86 | |
| 87 | /* 6) statistics */ |
| 88 | #ifdef CONFIG_DEBUG_SLAB |
| 89 | unsigned long num_active; |
| 90 | unsigned long num_allocations; |
| 91 | unsigned long high_mark; |
| 92 | unsigned long grown; |
| 93 | unsigned long reaped; |
| 94 | unsigned long errors; |
| 95 | unsigned long max_freeable; |
| 96 | unsigned long node_allocs; |
| 97 | unsigned long node_frees; |
| 98 | unsigned long node_overflow; |
| 99 | atomic_t allochit; |
| 100 | atomic_t allocmiss; |
| 101 | atomic_t freehit; |
| 102 | atomic_t freemiss; |
| 103 | |
| 104 | /* |
| 105 | * If debugging is enabled, then the allocator can add additional |
| 106 | * fields and/or padding to every object. buffer_size contains the total |
| 107 | * object size including these internal fields, the following two |
| 108 | * variables contain the offset to the user object and its size. |
| 109 | */ |
| 110 | int obj_offset; |
| 111 | int obj_size; |
| 112 | #endif /* CONFIG_DEBUG_SLAB */ |
| 113 | |
| 114 | /* |
| 115 | * We put nodelists[] at the end of kmem_cache, because we want to size |
| 116 | * this array to nr_node_ids slots instead of MAX_NUMNODES |
| 117 | * (see kmem_cache_init()) |
| 118 | * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache |
| 119 | * is statically defined, so we reserve the max number of nodes. |
| 120 | */ |
| 121 | struct kmem_list3 *nodelists[MAX_NUMNODES]; |
| 122 | /* |
| 123 | * Do not add fields after nodelists[] |
| 124 | */ |
| 125 | }; |
| 126 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 127 | /* Size description struct for general caches. */ |
| 128 | struct cache_sizes { |
| 129 | size_t cs_size; |
| 130 | struct kmem_cache *cs_cachep; |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 131 | #ifdef CONFIG_ZONE_DMA |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 132 | struct kmem_cache *cs_dmacachep; |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 133 | #endif |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 134 | }; |
| 135 | extern struct cache_sizes malloc_sizes[]; |
| 136 | |
Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 137 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
| 138 | void *__kmalloc(size_t size, gfp_t flags); |
| 139 | |
Li Zefan | 0f24f12 | 2009-12-11 15:45:30 +0800 | [diff] [blame] | 140 | #ifdef CONFIG_TRACING |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 141 | extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags); |
| 142 | extern size_t slab_buffer_size(struct kmem_cache *cachep); |
| 143 | #else |
| 144 | static __always_inline void * |
| 145 | kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 146 | { |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 147 | return kmem_cache_alloc(cachep, flags); |
| 148 | } |
| 149 | static inline size_t slab_buffer_size(struct kmem_cache *cachep) |
| 150 | { |
| 151 | return 0; |
| 152 | } |
| 153 | #endif |
| 154 | |
| 155 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
| 156 | { |
| 157 | struct kmem_cache *cachep; |
| 158 | void *ret; |
| 159 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 160 | if (__builtin_constant_p(size)) { |
| 161 | int i = 0; |
Christoph Lameter | 6cb8f91 | 2007-07-17 04:03:22 -0700 | [diff] [blame] | 162 | |
| 163 | if (!size) |
| 164 | return ZERO_SIZE_PTR; |
| 165 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 166 | #define CACHE(x) \ |
| 167 | if (size <= x) \ |
| 168 | goto found; \ |
| 169 | else \ |
| 170 | i++; |
Joe Perches | 1c61fc4 | 2008-03-05 13:58:17 -0800 | [diff] [blame] | 171 | #include <linux/kmalloc_sizes.h> |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 172 | #undef CACHE |
Jeff Mahoney | 1cf3eb2 | 2009-01-27 23:48:59 +0200 | [diff] [blame] | 173 | return NULL; |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 174 | found: |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 175 | #ifdef CONFIG_ZONE_DMA |
| 176 | if (flags & GFP_DMA) |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 177 | cachep = malloc_sizes[i].cs_dmacachep; |
| 178 | else |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 179 | #endif |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 180 | cachep = malloc_sizes[i].cs_cachep; |
| 181 | |
| 182 | ret = kmem_cache_alloc_notrace(cachep, flags); |
| 183 | |
Eduard - Gabriel Munteanu | ca2b84cb | 2009-03-23 15:12:24 +0200 | [diff] [blame] | 184 | trace_kmalloc(_THIS_IP_, ret, |
| 185 | size, slab_buffer_size(cachep), flags); |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 186 | |
| 187 | return ret; |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 188 | } |
| 189 | return __kmalloc(size, flags); |
| 190 | } |
| 191 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 192 | #ifdef CONFIG_NUMA |
| 193 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); |
Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 194 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 195 | |
Li Zefan | 0f24f12 | 2009-12-11 15:45:30 +0800 | [diff] [blame] | 196 | #ifdef CONFIG_TRACING |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 197 | extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, |
| 198 | gfp_t flags, |
| 199 | int nodeid); |
| 200 | #else |
| 201 | static __always_inline void * |
| 202 | kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, |
| 203 | gfp_t flags, |
| 204 | int nodeid) |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 205 | { |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 206 | return kmem_cache_alloc_node(cachep, flags, nodeid); |
| 207 | } |
| 208 | #endif |
| 209 | |
| 210 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
| 211 | { |
| 212 | struct kmem_cache *cachep; |
| 213 | void *ret; |
| 214 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 215 | if (__builtin_constant_p(size)) { |
| 216 | int i = 0; |
Christoph Lameter | 6cb8f91 | 2007-07-17 04:03:22 -0700 | [diff] [blame] | 217 | |
| 218 | if (!size) |
| 219 | return ZERO_SIZE_PTR; |
| 220 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 221 | #define CACHE(x) \ |
| 222 | if (size <= x) \ |
| 223 | goto found; \ |
| 224 | else \ |
| 225 | i++; |
Joe Perches | 1c61fc4 | 2008-03-05 13:58:17 -0800 | [diff] [blame] | 226 | #include <linux/kmalloc_sizes.h> |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 227 | #undef CACHE |
Jeff Mahoney | 1cf3eb2 | 2009-01-27 23:48:59 +0200 | [diff] [blame] | 228 | return NULL; |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 229 | found: |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 230 | #ifdef CONFIG_ZONE_DMA |
| 231 | if (flags & GFP_DMA) |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 232 | cachep = malloc_sizes[i].cs_dmacachep; |
| 233 | else |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 234 | #endif |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 235 | cachep = malloc_sizes[i].cs_cachep; |
| 236 | |
| 237 | ret = kmem_cache_alloc_node_notrace(cachep, flags, node); |
| 238 | |
Eduard - Gabriel Munteanu | ca2b84cb | 2009-03-23 15:12:24 +0200 | [diff] [blame] | 239 | trace_kmalloc_node(_THIS_IP_, ret, |
| 240 | size, slab_buffer_size(cachep), |
| 241 | flags, node); |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 242 | |
| 243 | return ret; |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 244 | } |
| 245 | return __kmalloc_node(size, flags, node); |
| 246 | } |
| 247 | |
| 248 | #endif /* CONFIG_NUMA */ |
| 249 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 250 | #endif /* _LINUX_SLAB_DEF_H */ |