Christoph Lameter | 97d0660 | 2012-07-06 15:25:11 -0500 | [diff] [blame] | 1 | #ifndef MM_SLAB_H |
| 2 | #define MM_SLAB_H |
| 3 | /* |
| 4 | * Internal slab definitions |
| 5 | */ |
| 6 | |
| 7 | /* |
| 8 | * State of the slab allocator. |
| 9 | * |
| 10 | * This is used to describe the states of the allocator during bootup. |
| 11 | * Allocators use this to gradually bootstrap themselves. Most allocators |
| 12 | * have the problem that the structures used for managing slab caches are |
| 13 | * allocated from slab caches themselves. |
| 14 | */ |
| 15 | enum slab_state { |
| 16 | DOWN, /* No slab functionality yet */ |
| 17 | PARTIAL, /* SLUB: kmem_cache_node available */ |
| 18 | PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */ |
Christoph Lameter | ce8eb6c | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 19 | PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ |
Christoph Lameter | 97d0660 | 2012-07-06 15:25:11 -0500 | [diff] [blame] | 20 | UP, /* Slab caches usable but not all extras yet */ |
| 21 | FULL /* Everything is working */ |
| 22 | }; |
| 23 | |
| 24 | extern enum slab_state slab_state; |
| 25 | |
Christoph Lameter | 18004c5 | 2012-07-06 15:25:12 -0500 | [diff] [blame] | 26 | /* The slab cache mutex protects the management structures during changes */ |
| 27 | extern struct mutex slab_mutex; |
Christoph Lameter | 9b030cb | 2012-09-05 00:20:33 +0000 | [diff] [blame] | 28 | |
| 29 | /* The list of all slab caches on the system */ |
Christoph Lameter | 18004c5 | 2012-07-06 15:25:12 -0500 | [diff] [blame] | 30 | extern struct list_head slab_caches; |
| 31 | |
Christoph Lameter | 9b030cb | 2012-09-05 00:20:33 +0000 | [diff] [blame] | 32 | /* The slab cache that manages slab cache information */ |
| 33 | extern struct kmem_cache *kmem_cache; |
| 34 | |
Christoph Lameter | 4590685 | 2012-11-28 16:23:16 +0000 | [diff] [blame] | 35 | unsigned long calculate_alignment(unsigned long flags, |
| 36 | unsigned long align, unsigned long size); |
| 37 | |
Christoph Lameter | f97d5f6 | 2013-01-10 19:12:17 +0000 | [diff] [blame] | 38 | #ifndef CONFIG_SLOB |
| 39 | /* Kmalloc array related functions */ |
| 40 | void create_kmalloc_caches(unsigned long); |
Christoph Lameter | 2c59dd6 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 41 | |
| 42 | /* Find the kmalloc slab corresponding for a certain size */ |
| 43 | struct kmem_cache *kmalloc_slab(size_t, gfp_t); |
Christoph Lameter | f97d5f6 | 2013-01-10 19:12:17 +0000 | [diff] [blame] | 44 | #endif |
| 45 | |
| 46 | |
Christoph Lameter | 9b030cb | 2012-09-05 00:20:33 +0000 | [diff] [blame] | 47 | /* Functions provided by the slab allocators */ |
Christoph Lameter | 8a13a4c | 2012-09-04 23:18:33 +0000 | [diff] [blame] | 48 | extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); |
Christoph Lameter | 97d0660 | 2012-07-06 15:25:11 -0500 | [diff] [blame] | 49 | |
Christoph Lameter | 45530c4 | 2012-11-28 16:23:07 +0000 | [diff] [blame] | 50 | extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, |
| 51 | unsigned long flags); |
| 52 | extern void create_boot_cache(struct kmem_cache *, const char *name, |
| 53 | size_t size, unsigned long flags); |
| 54 | |
Glauber Costa | 2633d7a | 2012-12-18 14:22:34 -0800 | [diff] [blame] | 55 | struct mem_cgroup; |
Christoph Lameter | cbb7969 | 2012-09-05 00:18:32 +0000 | [diff] [blame] | 56 | #ifdef CONFIG_SLUB |
Glauber Costa | 2633d7a | 2012-12-18 14:22:34 -0800 | [diff] [blame] | 57 | struct kmem_cache * |
| 58 | __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size, |
| 59 | size_t align, unsigned long flags, void (*ctor)(void *)); |
Christoph Lameter | cbb7969 | 2012-09-05 00:18:32 +0000 | [diff] [blame] | 60 | #else |
Glauber Costa | 2633d7a | 2012-12-18 14:22:34 -0800 | [diff] [blame] | 61 | static inline struct kmem_cache * |
| 62 | __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size, |
| 63 | size_t align, unsigned long flags, void (*ctor)(void *)) |
Christoph Lameter | cbb7969 | 2012-09-05 00:18:32 +0000 | [diff] [blame] | 64 | { return NULL; } |
| 65 | #endif |
| 66 | |
| 67 | |
Glauber Costa | d884392 | 2012-10-17 15:36:51 +0400 | [diff] [blame] | 68 | /* Legal flag mask for kmem_cache_create(), for various configurations */ |
| 69 | #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ |
| 70 | SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS ) |
| 71 | |
| 72 | #if defined(CONFIG_DEBUG_SLAB) |
| 73 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) |
| 74 | #elif defined(CONFIG_SLUB_DEBUG) |
| 75 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ |
| 76 | SLAB_TRACE | SLAB_DEBUG_FREE) |
| 77 | #else |
| 78 | #define SLAB_DEBUG_FLAGS (0) |
| 79 | #endif |
| 80 | |
| 81 | #if defined(CONFIG_SLAB) |
| 82 | #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ |
| 83 | SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK) |
| 84 | #elif defined(CONFIG_SLUB) |
| 85 | #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ |
| 86 | SLAB_TEMPORARY | SLAB_NOTRACK) |
| 87 | #else |
| 88 | #define SLAB_CACHE_FLAGS (0) |
| 89 | #endif |
| 90 | |
| 91 | #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) |
| 92 | |
Christoph Lameter | 945cf2b | 2012-09-04 23:18:33 +0000 | [diff] [blame] | 93 | int __kmem_cache_shutdown(struct kmem_cache *); |
Christoph Lameter | 945cf2b | 2012-09-04 23:18:33 +0000 | [diff] [blame] | 94 | |
Glauber Costa | b7454ad | 2012-10-19 18:20:25 +0400 | [diff] [blame] | 95 | struct seq_file; |
| 96 | struct file; |
Glauber Costa | b7454ad | 2012-10-19 18:20:25 +0400 | [diff] [blame] | 97 | |
Glauber Costa | 0d7561c | 2012-10-19 18:20:27 +0400 | [diff] [blame] | 98 | struct slabinfo { |
| 99 | unsigned long active_objs; |
| 100 | unsigned long num_objs; |
| 101 | unsigned long active_slabs; |
| 102 | unsigned long num_slabs; |
| 103 | unsigned long shared_avail; |
| 104 | unsigned int limit; |
| 105 | unsigned int batchcount; |
| 106 | unsigned int shared; |
| 107 | unsigned int objects_per_slab; |
| 108 | unsigned int cache_order; |
| 109 | }; |
| 110 | |
| 111 | void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); |
| 112 | void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); |
Glauber Costa | b7454ad | 2012-10-19 18:20:25 +0400 | [diff] [blame] | 113 | ssize_t slabinfo_write(struct file *file, const char __user *buffer, |
| 114 | size_t count, loff_t *ppos); |
Glauber Costa | ba6c496 | 2012-12-18 14:22:27 -0800 | [diff] [blame] | 115 | |
| 116 | #ifdef CONFIG_MEMCG_KMEM |
| 117 | static inline bool is_root_cache(struct kmem_cache *s) |
| 118 | { |
| 119 | return !s->memcg_params || s->memcg_params->is_root_cache; |
| 120 | } |
Glauber Costa | 2633d7a | 2012-12-18 14:22:34 -0800 | [diff] [blame] | 121 | |
| 122 | static inline bool cache_match_memcg(struct kmem_cache *cachep, |
| 123 | struct mem_cgroup *memcg) |
| 124 | { |
| 125 | return (is_root_cache(cachep) && !memcg) || |
| 126 | (cachep->memcg_params->memcg == memcg); |
| 127 | } |
Glauber Costa | b9ce5ef | 2012-12-18 14:22:46 -0800 | [diff] [blame] | 128 | |
Glauber Costa | 1f458cb | 2012-12-18 14:22:50 -0800 | [diff] [blame] | 129 | static inline void memcg_bind_pages(struct kmem_cache *s, int order) |
| 130 | { |
| 131 | if (!is_root_cache(s)) |
| 132 | atomic_add(1 << order, &s->memcg_params->nr_pages); |
| 133 | } |
| 134 | |
| 135 | static inline void memcg_release_pages(struct kmem_cache *s, int order) |
| 136 | { |
| 137 | if (is_root_cache(s)) |
| 138 | return; |
| 139 | |
| 140 | if (atomic_sub_and_test((1 << order), &s->memcg_params->nr_pages)) |
| 141 | mem_cgroup_destroy_cache(s); |
| 142 | } |
| 143 | |
Glauber Costa | b9ce5ef | 2012-12-18 14:22:46 -0800 | [diff] [blame] | 144 | static inline bool slab_equal_or_root(struct kmem_cache *s, |
| 145 | struct kmem_cache *p) |
| 146 | { |
| 147 | return (p == s) || |
| 148 | (s->memcg_params && (p == s->memcg_params->root_cache)); |
| 149 | } |
Glauber Costa | 749c541 | 2012-12-18 14:23:01 -0800 | [diff] [blame] | 150 | |
| 151 | /* |
| 152 | * We use suffixes to the name in memcg because we can't have caches |
| 153 | * created in the system with the same name. But when we print them |
| 154 | * locally, better refer to them with the base name |
| 155 | */ |
| 156 | static inline const char *cache_name(struct kmem_cache *s) |
| 157 | { |
| 158 | if (!is_root_cache(s)) |
| 159 | return s->memcg_params->root_cache->name; |
| 160 | return s->name; |
| 161 | } |
| 162 | |
Vladimir Davydov | f857026 | 2014-01-23 15:53:06 -0800 | [diff] [blame] | 163 | /* |
| 164 | * Note, we protect with RCU only the memcg_caches array, not per-memcg caches. |
| 165 | * That said the caller must assure the memcg's cache won't go away. Since once |
| 166 | * created a memcg's cache is destroyed only along with the root cache, it is |
| 167 | * true if we are going to allocate from the cache or hold a reference to the |
| 168 | * root cache by other means. Otherwise, we should hold either the slab_mutex |
| 169 | * or the memcg's slab_caches_mutex while calling this function and accessing |
| 170 | * the returned value. |
| 171 | */ |
Qiang Huang | 2ade4de | 2013-11-12 15:08:23 -0800 | [diff] [blame] | 172 | static inline struct kmem_cache * |
| 173 | cache_from_memcg_idx(struct kmem_cache *s, int idx) |
Glauber Costa | 749c541 | 2012-12-18 14:23:01 -0800 | [diff] [blame] | 174 | { |
Vladimir Davydov | 959c896 | 2014-01-23 15:52:59 -0800 | [diff] [blame] | 175 | struct kmem_cache *cachep; |
Vladimir Davydov | f857026 | 2014-01-23 15:53:06 -0800 | [diff] [blame] | 176 | struct memcg_cache_params *params; |
Vladimir Davydov | 959c896 | 2014-01-23 15:52:59 -0800 | [diff] [blame] | 177 | |
Andrey Vagin | 6f6b895 | 2013-08-28 16:35:20 -0700 | [diff] [blame] | 178 | if (!s->memcg_params) |
| 179 | return NULL; |
Vladimir Davydov | f857026 | 2014-01-23 15:53:06 -0800 | [diff] [blame] | 180 | |
| 181 | rcu_read_lock(); |
| 182 | params = rcu_dereference(s->memcg_params); |
| 183 | cachep = params->memcg_caches[idx]; |
| 184 | rcu_read_unlock(); |
Vladimir Davydov | 959c896 | 2014-01-23 15:52:59 -0800 | [diff] [blame] | 185 | |
| 186 | /* |
| 187 | * Make sure we will access the up-to-date value. The code updating |
| 188 | * memcg_caches issues a write barrier to match this (see |
| 189 | * memcg_register_cache()). |
| 190 | */ |
| 191 | smp_read_barrier_depends(); |
| 192 | return cachep; |
Glauber Costa | 749c541 | 2012-12-18 14:23:01 -0800 | [diff] [blame] | 193 | } |
Glauber Costa | 943a451 | 2012-12-18 14:23:03 -0800 | [diff] [blame] | 194 | |
| 195 | static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) |
| 196 | { |
| 197 | if (is_root_cache(s)) |
| 198 | return s; |
| 199 | return s->memcg_params->root_cache; |
| 200 | } |
Glauber Costa | ba6c496 | 2012-12-18 14:22:27 -0800 | [diff] [blame] | 201 | #else |
| 202 | static inline bool is_root_cache(struct kmem_cache *s) |
| 203 | { |
| 204 | return true; |
| 205 | } |
| 206 | |
Glauber Costa | 2633d7a | 2012-12-18 14:22:34 -0800 | [diff] [blame] | 207 | static inline bool cache_match_memcg(struct kmem_cache *cachep, |
| 208 | struct mem_cgroup *memcg) |
| 209 | { |
| 210 | return true; |
| 211 | } |
Glauber Costa | b9ce5ef | 2012-12-18 14:22:46 -0800 | [diff] [blame] | 212 | |
Glauber Costa | 1f458cb | 2012-12-18 14:22:50 -0800 | [diff] [blame] | 213 | static inline void memcg_bind_pages(struct kmem_cache *s, int order) |
| 214 | { |
| 215 | } |
| 216 | |
| 217 | static inline void memcg_release_pages(struct kmem_cache *s, int order) |
| 218 | { |
| 219 | } |
| 220 | |
Glauber Costa | b9ce5ef | 2012-12-18 14:22:46 -0800 | [diff] [blame] | 221 | static inline bool slab_equal_or_root(struct kmem_cache *s, |
| 222 | struct kmem_cache *p) |
| 223 | { |
| 224 | return true; |
| 225 | } |
Glauber Costa | 749c541 | 2012-12-18 14:23:01 -0800 | [diff] [blame] | 226 | |
| 227 | static inline const char *cache_name(struct kmem_cache *s) |
| 228 | { |
| 229 | return s->name; |
| 230 | } |
| 231 | |
Qiang Huang | 2ade4de | 2013-11-12 15:08:23 -0800 | [diff] [blame] | 232 | static inline struct kmem_cache * |
| 233 | cache_from_memcg_idx(struct kmem_cache *s, int idx) |
Glauber Costa | 749c541 | 2012-12-18 14:23:01 -0800 | [diff] [blame] | 234 | { |
| 235 | return NULL; |
| 236 | } |
Glauber Costa | 943a451 | 2012-12-18 14:23:03 -0800 | [diff] [blame] | 237 | |
| 238 | static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) |
| 239 | { |
| 240 | return s; |
| 241 | } |
Glauber Costa | ba6c496 | 2012-12-18 14:22:27 -0800 | [diff] [blame] | 242 | #endif |
Glauber Costa | b9ce5ef | 2012-12-18 14:22:46 -0800 | [diff] [blame] | 243 | |
| 244 | static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) |
| 245 | { |
| 246 | struct kmem_cache *cachep; |
| 247 | struct page *page; |
| 248 | |
| 249 | /* |
| 250 | * When kmemcg is not being used, both assignments should return the |
| 251 | * same value. but we don't want to pay the assignment price in that |
| 252 | * case. If it is not compiled in, the compiler should be smart enough |
| 253 | * to not do even the assignment. In that case, slab_equal_or_root |
| 254 | * will also be a constant. |
| 255 | */ |
| 256 | if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE)) |
| 257 | return s; |
| 258 | |
| 259 | page = virt_to_head_page(x); |
| 260 | cachep = page->slab_cache; |
| 261 | if (slab_equal_or_root(cachep, s)) |
| 262 | return cachep; |
| 263 | |
| 264 | pr_err("%s: Wrong slab cache. %s but object is from %s\n", |
| 265 | __FUNCTION__, cachep->name, s->name); |
| 266 | WARN_ON_ONCE(1); |
| 267 | return s; |
| 268 | } |
Christoph Lameter | 97d0660 | 2012-07-06 15:25:11 -0500 | [diff] [blame] | 269 | #endif |
Christoph Lameter | ca34956 | 2013-01-10 19:14:19 +0000 | [diff] [blame] | 270 | |
| 271 | |
| 272 | /* |
| 273 | * The slab lists for all objects. |
| 274 | */ |
| 275 | struct kmem_cache_node { |
| 276 | spinlock_t list_lock; |
| 277 | |
| 278 | #ifdef CONFIG_SLAB |
| 279 | struct list_head slabs_partial; /* partial list first, better asm code */ |
| 280 | struct list_head slabs_full; |
| 281 | struct list_head slabs_free; |
| 282 | unsigned long free_objects; |
| 283 | unsigned int free_limit; |
| 284 | unsigned int colour_next; /* Per-node cache coloring */ |
| 285 | struct array_cache *shared; /* shared per node */ |
| 286 | struct array_cache **alien; /* on other nodes */ |
| 287 | unsigned long next_reap; /* updated without locking */ |
| 288 | int free_touched; /* updated without locking */ |
| 289 | #endif |
| 290 | |
| 291 | #ifdef CONFIG_SLUB |
| 292 | unsigned long nr_partial; |
| 293 | struct list_head partial; |
| 294 | #ifdef CONFIG_SLUB_DEBUG |
| 295 | atomic_long_t nr_slabs; |
| 296 | atomic_long_t total_objects; |
| 297 | struct list_head full; |
| 298 | #endif |
| 299 | #endif |
| 300 | |
| 301 | }; |
Wanpeng Li | e25839f | 2013-07-04 08:33:23 +0800 | [diff] [blame] | 302 | |
Wanpeng Li | 276a243 | 2013-07-08 08:08:28 +0800 | [diff] [blame] | 303 | void *slab_next(struct seq_file *m, void *p, loff_t *pos); |
| 304 | void slab_stop(struct seq_file *m, void *p); |