Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 1 | #ifndef _LINUX_SLAB_DEF_H |
| 2 | #define _LINUX_SLAB_DEF_H |
| 3 | |
| 4 | /* |
| 5 | * Definitions unique to the original Linux SLAB allocator. |
| 6 | * |
| 7 | * What we provide here is a way to optimize the frequent kmalloc |
| 8 | * calls in the kernel by selecting the appropriate general cache |
| 9 | * if kmalloc was called with a size that can be established at |
| 10 | * compile time. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/init.h> |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 14 | #include <linux/compiler.h> |
Li Zefan | 039ca4e | 2010-05-26 17:22:17 +0800 | [diff] [blame] | 15 | |
David Woodhouse | 1f0ce8b3 | 2010-05-19 12:01:42 +0100 | [diff] [blame] | 16 | /* |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 17 | * struct kmem_cache |
| 18 | * |
| 19 | * manages a cache. |
| 20 | */ |
| 21 | |
| 22 | struct kmem_cache { |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 23 | /* 1) Cache tunables. Protected by cache_chain_mutex */ |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 24 | unsigned int batchcount; |
| 25 | unsigned int limit; |
| 26 | unsigned int shared; |
| 27 | |
Christoph Lameter | 3b0efdf | 2012-06-13 10:24:57 -0500 | [diff] [blame] | 28 | unsigned int size; |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 29 | u32 reciprocal_buffer_size; |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 30 | /* 2) touched by every alloc & free from the backend */ |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 31 | |
| 32 | unsigned int flags; /* constant flags */ |
| 33 | unsigned int num; /* # of objs per slab */ |
| 34 | |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 35 | /* 3) cache_grow/shrink */ |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 36 | /* order of pgs per slab (2^n) */ |
| 37 | unsigned int gfporder; |
| 38 | |
| 39 | /* force GFP flags, e.g. GFP_DMA */ |
Glauber Costa | a618e89 | 2012-06-14 16:17:21 +0400 | [diff] [blame] | 40 | gfp_t allocflags; |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 41 | |
| 42 | size_t colour; /* cache colouring range */ |
| 43 | unsigned int colour_off; /* colour offset */ |
| 44 | struct kmem_cache *slabp_cache; |
| 45 | unsigned int slab_size; |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 46 | |
| 47 | /* constructor func */ |
| 48 | void (*ctor)(void *obj); |
| 49 | |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 50 | /* 4) cache creation/removal */ |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 51 | const char *name; |
Christoph Lameter | 3b0efdf | 2012-06-13 10:24:57 -0500 | [diff] [blame] | 52 | struct list_head list; |
| 53 | int refcount; |
| 54 | int object_size; |
| 55 | int align; |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 56 | |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 57 | /* 5) statistics */ |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 58 | #ifdef CONFIG_DEBUG_SLAB |
| 59 | unsigned long num_active; |
| 60 | unsigned long num_allocations; |
| 61 | unsigned long high_mark; |
| 62 | unsigned long grown; |
| 63 | unsigned long reaped; |
| 64 | unsigned long errors; |
| 65 | unsigned long max_freeable; |
| 66 | unsigned long node_allocs; |
| 67 | unsigned long node_frees; |
| 68 | unsigned long node_overflow; |
| 69 | atomic_t allochit; |
| 70 | atomic_t allocmiss; |
| 71 | atomic_t freehit; |
| 72 | atomic_t freemiss; |
| 73 | |
| 74 | /* |
| 75 | * If debugging is enabled, then the allocator can add additional |
Christoph Lameter | 3b0efdf | 2012-06-13 10:24:57 -0500 | [diff] [blame] | 76 | * fields and/or padding to every object. size contains the total |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 77 | * object size including these internal fields, the following two |
| 78 | * variables contain the offset to the user object and its size. |
| 79 | */ |
| 80 | int obj_offset; |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 81 | #endif /* CONFIG_DEBUG_SLAB */ |
Glauber Costa | ba6c496 | 2012-12-18 14:22:27 -0800 | [diff] [blame] | 82 | #ifdef CONFIG_MEMCG_KMEM |
| 83 | struct memcg_cache_params *memcg_params; |
| 84 | #endif |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 85 | |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 86 | /* 6) per-cpu/per-node data, touched during every alloc/free */ |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 87 | /* |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 88 | * We put array[] at the end of kmem_cache, because we want to size |
| 89 | * this array to nr_cpu_ids slots instead of NR_CPUS |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 90 | * (see kmem_cache_init()) |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 91 | * We still use [NR_CPUS] and not [1] or [0] because cache_cache |
| 92 | * is statically defined, so we reserve the max number of cpus. |
Christoph Lameter | 3c58346 | 2012-11-28 16:23:01 +0000 | [diff] [blame] | 93 | * |
| 94 | * We also need to guarantee that the list is able to accomodate a |
| 95 | * pointer for each node since "nodelists" uses the remainder of |
| 96 | * available pointers. |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 97 | */ |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 98 | struct kmem_list3 **nodelists; |
Christoph Lameter | 3c58346 | 2012-11-28 16:23:01 +0000 | [diff] [blame] | 99 | struct array_cache *array[NR_CPUS + MAX_NUMNODES]; |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 100 | /* |
Eric Dumazet | b56efcf | 2011-07-20 19:04:23 +0200 | [diff] [blame] | 101 | * Do not add fields after array[] |
Pekka Enberg | 8eae985 | 2008-05-09 20:32:44 +0200 | [diff] [blame] | 102 | */ |
| 103 | }; |
| 104 | |
Christoph Lameter | e336601 | 2013-01-10 19:14:18 +0000 | [diff] [blame^] | 105 | extern struct kmem_cache *kmalloc_caches[PAGE_SHIFT + MAX_ORDER]; |
| 106 | extern struct kmem_cache *kmalloc_dma_caches[PAGE_SHIFT + MAX_ORDER]; |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 107 | |
Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 108 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
| 109 | void *__kmalloc(size_t size, gfp_t flags); |
| 110 | |
Li Zefan | 0f24f12 | 2009-12-11 15:45:30 +0800 | [diff] [blame] | 111 | #ifdef CONFIG_TRACING |
Ezequiel Garcia | 4052147 | 2012-09-08 17:47:56 -0300 | [diff] [blame] | 112 | extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 113 | #else |
| 114 | static __always_inline void * |
Ezequiel Garcia | 4052147 | 2012-09-08 17:47:56 -0300 | [diff] [blame] | 115 | kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 116 | { |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 117 | return kmem_cache_alloc(cachep, flags); |
| 118 | } |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 119 | #endif |
| 120 | |
| 121 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
| 122 | { |
| 123 | struct kmem_cache *cachep; |
| 124 | void *ret; |
| 125 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 126 | if (__builtin_constant_p(size)) { |
Christoph Lameter | e336601 | 2013-01-10 19:14:18 +0000 | [diff] [blame^] | 127 | int i; |
Christoph Lameter | 6cb8f91 | 2007-07-17 04:03:22 -0700 | [diff] [blame] | 128 | |
| 129 | if (!size) |
| 130 | return ZERO_SIZE_PTR; |
| 131 | |
Christoph Lameter | e336601 | 2013-01-10 19:14:18 +0000 | [diff] [blame^] | 132 | i = kmalloc_index(size); |
| 133 | |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 134 | #ifdef CONFIG_ZONE_DMA |
| 135 | if (flags & GFP_DMA) |
Christoph Lameter | e336601 | 2013-01-10 19:14:18 +0000 | [diff] [blame^] | 136 | cachep = kmalloc_dma_caches[i]; |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 137 | else |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 138 | #endif |
Christoph Lameter | e336601 | 2013-01-10 19:14:18 +0000 | [diff] [blame^] | 139 | cachep = kmalloc_caches[i]; |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 140 | |
Ezequiel Garcia | 4052147 | 2012-09-08 17:47:56 -0300 | [diff] [blame] | 141 | ret = kmem_cache_alloc_trace(cachep, flags, size); |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 142 | |
| 143 | return ret; |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 144 | } |
| 145 | return __kmalloc(size, flags); |
| 146 | } |
| 147 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 148 | #ifdef CONFIG_NUMA |
| 149 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); |
Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 150 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 151 | |
Li Zefan | 0f24f12 | 2009-12-11 15:45:30 +0800 | [diff] [blame] | 152 | #ifdef CONFIG_TRACING |
Ezequiel Garcia | dffa3f9 | 2012-09-26 09:21:33 -0300 | [diff] [blame] | 153 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, |
Steven Rostedt | 85beb58 | 2010-11-24 16:23:34 -0500 | [diff] [blame] | 154 | gfp_t flags, |
Ezequiel Garcia | dffa3f9 | 2012-09-26 09:21:33 -0300 | [diff] [blame] | 155 | int nodeid, |
| 156 | size_t size); |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 157 | #else |
| 158 | static __always_inline void * |
Ezequiel Garcia | dffa3f9 | 2012-09-26 09:21:33 -0300 | [diff] [blame] | 159 | kmem_cache_alloc_node_trace(struct kmem_cache *cachep, |
Steven Rostedt | 85beb58 | 2010-11-24 16:23:34 -0500 | [diff] [blame] | 160 | gfp_t flags, |
Ezequiel Garcia | dffa3f9 | 2012-09-26 09:21:33 -0300 | [diff] [blame] | 161 | int nodeid, |
| 162 | size_t size) |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 163 | { |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 164 | return kmem_cache_alloc_node(cachep, flags, nodeid); |
| 165 | } |
| 166 | #endif |
| 167 | |
| 168 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
| 169 | { |
| 170 | struct kmem_cache *cachep; |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 171 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 172 | if (__builtin_constant_p(size)) { |
Christoph Lameter | e336601 | 2013-01-10 19:14:18 +0000 | [diff] [blame^] | 173 | int i; |
Christoph Lameter | 6cb8f91 | 2007-07-17 04:03:22 -0700 | [diff] [blame] | 174 | |
| 175 | if (!size) |
| 176 | return ZERO_SIZE_PTR; |
| 177 | |
Christoph Lameter | e336601 | 2013-01-10 19:14:18 +0000 | [diff] [blame^] | 178 | i = kmalloc_index(size); |
| 179 | |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 180 | #ifdef CONFIG_ZONE_DMA |
| 181 | if (flags & GFP_DMA) |
Christoph Lameter | e336601 | 2013-01-10 19:14:18 +0000 | [diff] [blame^] | 182 | cachep = kmalloc_dma_caches[i]; |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 183 | else |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 184 | #endif |
Christoph Lameter | e336601 | 2013-01-10 19:14:18 +0000 | [diff] [blame^] | 185 | cachep = kmalloc_caches[i]; |
Eduard - Gabriel Munteanu | 3655575 | 2008-08-10 20:14:05 +0300 | [diff] [blame] | 186 | |
Ezequiel Garcia | dffa3f9 | 2012-09-26 09:21:33 -0300 | [diff] [blame] | 187 | return kmem_cache_alloc_node_trace(cachep, flags, node, size); |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 188 | } |
| 189 | return __kmalloc_node(size, flags, node); |
| 190 | } |
| 191 | |
| 192 | #endif /* CONFIG_NUMA */ |
| 193 | |
Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 194 | #endif /* _LINUX_SLAB_DEF_H */ |