Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 1 | #ifndef _LINUX_SLAB_DEF_H |
| 2 | #define _LINUX_SLAB_DEF_H |
| 3 | |
| 4 | /* |
| 5 | * Definitions unique to the original Linux SLAB allocator. |
| 6 | * |
| 7 | * What we provide here is a way to optimize the frequent kmalloc |
| 8 | * calls in the kernel by selecting the appropriate general cache |
| 9 | * if kmalloc was called with a size that can be established at |
| 10 | * compile time. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/init.h> |
| 14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ |
| 15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ |
| 16 | #include <linux/compiler.h> |
Li Zefan | 039ca4e | 2010-05-26 17:22:17 +0800 | [diff] [blame] | 17 | |
David Woodhouse | 1f0ce8b3 | 2010-05-19 12:01:42 +0100 | [diff] [blame] | 18 | /* |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 19 | * struct kmem_cache |
| 20 | * |
| 21 | * manages a cache. |
| 22 | */ |
| 23 | |
| 24 | struct kmem_cache { |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 25 | /* 1) Cache tunables. Protected by cache_chain_mutex */ |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 26 | unsigned int batchcount; |
| 27 | unsigned int limit; |
| 28 | unsigned int shared; |
| 29 | |
Christoph Lameter | 3b0efdf | 2012-06-13 10:24:57 -0500 | [diff] [blame] | 30 | unsigned int size; |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 31 | u32 reciprocal_buffer_size; |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 32 | /* 2) touched by every alloc & free from the backend */ |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 33 | |
| 34 | unsigned int flags; /* constant flags */ |
| 35 | unsigned int num; /* # of objs per slab */ |
| 36 | |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 37 | /* 3) cache_grow/shrink */ |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 38 | /* order of pgs per slab (2^n) */ |
| 39 | unsigned int gfporder; |
| 40 | |
| 41 | /* force GFP flags, e.g. GFP_DMA */ |
Glauber Costa | a618e89 | 2012-06-14 16:17:21 +0400 | [diff] [blame] | 42 | gfp_t allocflags; |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 43 | |
| 44 | size_t colour; /* cache colouring range */ |
| 45 | unsigned int colour_off; /* colour offset */ |
| 46 | struct kmem_cache *slabp_cache; |
| 47 | unsigned int slab_size; |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 48 | |
| 49 | /* constructor func */ |
| 50 | void (*ctor)(void *obj); |
| 51 | |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 52 | /* 4) cache creation/removal */ |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 53 | const char *name; |
Christoph Lameter | 3b0efdf | 2012-06-13 10:24:57 -0500 | [diff] [blame] | 54 | struct list_head list; |
| 55 | int refcount; |
| 56 | int object_size; |
| 57 | int align; |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 58 | |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 59 | /* 5) statistics */ |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 60 | #ifdef CONFIG_DEBUG_SLAB |
| 61 | unsigned long num_active; |
| 62 | unsigned long num_allocations; |
| 63 | unsigned long high_mark; |
| 64 | unsigned long grown; |
| 65 | unsigned long reaped; |
| 66 | unsigned long errors; |
| 67 | unsigned long max_freeable; |
| 68 | unsigned long node_allocs; |
| 69 | unsigned long node_frees; |
| 70 | unsigned long node_overflow; |
| 71 | atomic_t allochit; |
| 72 | atomic_t allocmiss; |
| 73 | atomic_t freehit; |
| 74 | atomic_t freemiss; |
| 75 | |
| 76 | /* |
| 77 | * If debugging is enabled, then the allocator can add additional |
Christoph Lameter | 3b0efdf | 2012-06-13 10:24:57 -0500 | [diff] [blame] | 78 | * fields and/or padding to every object. size contains the total |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 79 | * object size including these internal fields, the following two |
| 80 | * variables contain the offset to the user object and its size. |
| 81 | */ |
| 82 | int obj_offset; |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 83 | #endif /* CONFIG_DEBUG_SLAB */ |
| 84 | |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 85 | /* 6) per-cpu/per-node data, touched during every alloc/free */ |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 86 | /* |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 87 | * We put array[] at the end of kmem_cache, because we want to size |
| 88 | * this array to nr_cpu_ids slots instead of NR_CPUS |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 89 | * (see kmem_cache_init()) |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 90 | * We still use [NR_CPUS] and not [1] or [0] because cache_cache |
| 91 | * is statically defined, so we reserve the max number of cpus. |
Christoph Lameter | 3c58346 | 2012-11-28 16:23:01 +0000 | [diff] [blame^] | 92 | * |
| 93 | * We also need to guarantee that the list is able to accomodate a |
| 94 | * pointer for each node since "nodelists" uses the remainder of |
| 95 | * available pointers. |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 96 | */ |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 97 | struct kmem_list3 **nodelists; |
Christoph Lameter | 3c58346 | 2012-11-28 16:23:01 +0000 | [diff] [blame^] | 98 | struct array_cache *array[NR_CPUS + MAX_NUMNODES]; |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 99 | /* |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 100 | * Do not add fields after array[] |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 101 | */ |
| 102 | }; |
| 103 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 104 | /* Size description struct for general caches. */ |
| 105 | struct cache_sizes { |
| 106 | size_t cs_size; |
| 107 | struct kmem_cache *cs_cachep; |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 108 | #ifdef CONFIG_ZONE_DMA |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 109 | struct kmem_cache *cs_dmacachep; |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 110 | #endif |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 111 | }; |
| 112 | extern struct cache_sizes malloc_sizes[]; |
| 113 | |
Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 114 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
| 115 | void *__kmalloc(size_t size, gfp_t flags); |
| 116 | |
Li Zefan | 0f24f12 | 2009-12-11 15:45:30 +0800 | [diff] [blame] | 117 | #ifdef CONFIG_TRACING |
Ezequiel Garcia | 4052147 | 2012-09-08 17:47:56 -0300 | [diff] [blame] | 118 | extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 119 | #else |
| 120 | static __always_inline void * |
Ezequiel Garcia | 4052147 | 2012-09-08 17:47:56 -0300 | [diff] [blame] | 121 | kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 122 | { |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 123 | return kmem_cache_alloc(cachep, flags); |
| 124 | } |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 125 | #endif |
| 126 | |
| 127 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
| 128 | { |
| 129 | struct kmem_cache *cachep; |
| 130 | void *ret; |
| 131 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 132 | if (__builtin_constant_p(size)) { |
| 133 | int i = 0; |
Christoph Lameter | 6cb8f91 | 2007-07-17 04:03:22 -0700 | [diff] [blame] | 134 | |
| 135 | if (!size) |
| 136 | return ZERO_SIZE_PTR; |
| 137 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 138 | #define CACHE(x) \ |
| 139 | if (size <= x) \ |
| 140 | goto found; \ |
| 141 | else \ |
| 142 | i++; |
Joe Perches | 1c61fc4 | 2008-03-05 13:58:17 -0800 | [diff] [blame] | 143 | #include <linux/kmalloc_sizes.h> |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 144 | #undef CACHE |
Jeff Mahoney | 1cf3eb2 | 2009-01-27 23:48:59 +0200 | [diff] [blame] | 145 | return NULL; |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 146 | found: |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 147 | #ifdef CONFIG_ZONE_DMA |
| 148 | if (flags & GFP_DMA) |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 149 | cachep = malloc_sizes[i].cs_dmacachep; |
| 150 | else |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 151 | #endif |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 152 | cachep = malloc_sizes[i].cs_cachep; |
| 153 | |
Ezequiel Garcia | 4052147 | 2012-09-08 17:47:56 -0300 | [diff] [blame] | 154 | ret = kmem_cache_alloc_trace(cachep, flags, size); |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 155 | |
| 156 | return ret; |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 157 | } |
| 158 | return __kmalloc(size, flags); |
| 159 | } |
| 160 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 161 | #ifdef CONFIG_NUMA |
| 162 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); |
Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 163 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 164 | |
Li Zefan | 0f24f12 | 2009-12-11 15:45:30 +0800 | [diff] [blame] | 165 | #ifdef CONFIG_TRACING |
Ezequiel Garcia | dffa3f9 | 2012-09-26 09:21:33 -0300 | [diff] [blame] | 166 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, |
Steven Rostedt | 85beb58 | 2010-11-24 16:23:34 -0500 | [diff] [blame] | 167 | gfp_t flags, |
Ezequiel Garcia | dffa3f9 | 2012-09-26 09:21:33 -0300 | [diff] [blame] | 168 | int nodeid, |
| 169 | size_t size); |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 170 | #else |
| 171 | static __always_inline void * |
Ezequiel Garcia | dffa3f9 | 2012-09-26 09:21:33 -0300 | [diff] [blame] | 172 | kmem_cache_alloc_node_trace(struct kmem_cache *cachep, |
Steven Rostedt | 85beb58 | 2010-11-24 16:23:34 -0500 | [diff] [blame] | 173 | gfp_t flags, |
Ezequiel Garcia | dffa3f9 | 2012-09-26 09:21:33 -0300 | [diff] [blame] | 174 | int nodeid, |
| 175 | size_t size) |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 176 | { |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 177 | return kmem_cache_alloc_node(cachep, flags, nodeid); |
| 178 | } |
| 179 | #endif |
| 180 | |
| 181 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
| 182 | { |
| 183 | struct kmem_cache *cachep; |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 184 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 185 | if (__builtin_constant_p(size)) { |
| 186 | int i = 0; |
Christoph Lameter | 6cb8f91 | 2007-07-17 04:03:22 -0700 | [diff] [blame] | 187 | |
| 188 | if (!size) |
| 189 | return ZERO_SIZE_PTR; |
| 190 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 191 | #define CACHE(x) \ |
| 192 | if (size <= x) \ |
| 193 | goto found; \ |
| 194 | else \ |
| 195 | i++; |
Joe Perches | 1c61fc4 | 2008-03-05 13:58:17 -0800 | [diff] [blame] | 196 | #include <linux/kmalloc_sizes.h> |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 197 | #undef CACHE |
Jeff Mahoney | 1cf3eb2 | 2009-01-27 23:48:59 +0200 | [diff] [blame] | 198 | return NULL; |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 199 | found: |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 200 | #ifdef CONFIG_ZONE_DMA |
| 201 | if (flags & GFP_DMA) |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 202 | cachep = malloc_sizes[i].cs_dmacachep; |
| 203 | else |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 204 | #endif |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 205 | cachep = malloc_sizes[i].cs_cachep; |
| 206 | |
Ezequiel Garcia | dffa3f9 | 2012-09-26 09:21:33 -0300 | [diff] [blame] | 207 | return kmem_cache_alloc_node_trace(cachep, flags, node, size); |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 208 | } |
| 209 | return __kmalloc_node(size, flags, node); |
| 210 | } |
| 211 | |
| 212 | #endif /* CONFIG_NUMA */ |
| 213 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 214 | #endif /* _LINUX_SLAB_DEF_H */ |