blob: 0e0fdd3658409e0eee9a89fa51b1ea45cfc96466 [file] [log] [blame]
Christoph Lameter97d06602012-07-06 15:25:11 -05001#ifndef MM_SLAB_H
2#define MM_SLAB_H
3/*
4 * Internal slab definitions
5 */
6
7/*
8 * State of the slab allocator.
9 *
10 * This is used to describe the states of the allocator during bootup.
11 * Allocators use this to gradually bootstrap themselves. Most allocators
12 * have the problem that the structures used for managing slab caches are
13 * allocated from slab caches themselves.
14 */
15enum slab_state {
16 DOWN, /* No slab functionality yet */
17 PARTIAL, /* SLUB: kmem_cache_node available */
18 PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +000019 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
Christoph Lameter97d06602012-07-06 15:25:11 -050020 UP, /* Slab caches usable but not all extras yet */
21 FULL /* Everything is working */
22};
23
24extern enum slab_state slab_state;
25
Christoph Lameter18004c52012-07-06 15:25:12 -050026/* The slab cache mutex protects the management structures during changes */
27extern struct mutex slab_mutex;
Christoph Lameter9b030cb2012-09-05 00:20:33 +000028
29/* The list of all slab caches on the system */
Christoph Lameter18004c52012-07-06 15:25:12 -050030extern struct list_head slab_caches;
31
Christoph Lameter9b030cb2012-09-05 00:20:33 +000032/* The slab cache that manages slab cache information */
33extern struct kmem_cache *kmem_cache;
34
Christoph Lameter45906852012-11-28 16:23:16 +000035unsigned long calculate_alignment(unsigned long flags,
36 unsigned long align, unsigned long size);
37
Christoph Lameterf97d5f62013-01-10 19:12:17 +000038#ifndef CONFIG_SLOB
39/* Kmalloc array related functions */
40void create_kmalloc_caches(unsigned long);
Christoph Lameter2c59dd62013-01-10 19:14:19 +000041
42/* Find the kmalloc slab corresponding for a certain size */
43struct kmem_cache *kmalloc_slab(size_t, gfp_t);
Christoph Lameterf97d5f62013-01-10 19:12:17 +000044#endif
45
46
Christoph Lameter9b030cb2012-09-05 00:20:33 +000047/* Functions provided by the slab allocators */
Christoph Lameter8a13a4c2012-09-04 23:18:33 +000048extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
Christoph Lameter97d06602012-07-06 15:25:11 -050049
Christoph Lameter45530c42012-11-28 16:23:07 +000050extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
51 unsigned long flags);
52extern void create_boot_cache(struct kmem_cache *, const char *name,
53 size_t size, unsigned long flags);
54
Glauber Costa2633d7a2012-12-18 14:22:34 -080055struct mem_cgroup;
Christoph Lametercbb79692012-09-05 00:18:32 +000056#ifdef CONFIG_SLUB
Glauber Costa2633d7a2012-12-18 14:22:34 -080057struct kmem_cache *
Vladimir Davydova44cb942014-04-07 15:39:23 -070058__kmem_cache_alias(const char *name, size_t size, size_t align,
59 unsigned long flags, void (*ctor)(void *));
Christoph Lametercbb79692012-09-05 00:18:32 +000060#else
Glauber Costa2633d7a2012-12-18 14:22:34 -080061static inline struct kmem_cache *
Vladimir Davydova44cb942014-04-07 15:39:23 -070062__kmem_cache_alias(const char *name, size_t size, size_t align,
63 unsigned long flags, void (*ctor)(void *))
Christoph Lametercbb79692012-09-05 00:18:32 +000064{ return NULL; }
65#endif
66
67
Glauber Costad8843922012-10-17 15:36:51 +040068/* Legal flag mask for kmem_cache_create(), for various configurations */
69#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
70 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
71
72#if defined(CONFIG_DEBUG_SLAB)
73#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
74#elif defined(CONFIG_SLUB_DEBUG)
75#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
76 SLAB_TRACE | SLAB_DEBUG_FREE)
77#else
78#define SLAB_DEBUG_FLAGS (0)
79#endif
80
81#if defined(CONFIG_SLAB)
82#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
83 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK)
84#elif defined(CONFIG_SLUB)
85#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
86 SLAB_TEMPORARY | SLAB_NOTRACK)
87#else
88#define SLAB_CACHE_FLAGS (0)
89#endif
90
91#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
92
Christoph Lameter945cf2b2012-09-04 23:18:33 +000093int __kmem_cache_shutdown(struct kmem_cache *);
Vladimir Davydov03afc0e2014-06-04 16:07:20 -070094int __kmem_cache_shrink(struct kmem_cache *);
Christoph Lameter41a21282014-05-06 12:50:08 -070095void slab_kmem_cache_release(struct kmem_cache *);
Christoph Lameter945cf2b2012-09-04 23:18:33 +000096
Glauber Costab7454ad2012-10-19 18:20:25 +040097struct seq_file;
98struct file;
Glauber Costab7454ad2012-10-19 18:20:25 +040099
Glauber Costa0d7561c2012-10-19 18:20:27 +0400100struct slabinfo {
101 unsigned long active_objs;
102 unsigned long num_objs;
103 unsigned long active_slabs;
104 unsigned long num_slabs;
105 unsigned long shared_avail;
106 unsigned int limit;
107 unsigned int batchcount;
108 unsigned int shared;
109 unsigned int objects_per_slab;
110 unsigned int cache_order;
111};
112
113void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
114void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
Glauber Costab7454ad2012-10-19 18:20:25 +0400115ssize_t slabinfo_write(struct file *file, const char __user *buffer,
116 size_t count, loff_t *ppos);
Glauber Costaba6c4962012-12-18 14:22:27 -0800117
118#ifdef CONFIG_MEMCG_KMEM
119static inline bool is_root_cache(struct kmem_cache *s)
120{
121 return !s->memcg_params || s->memcg_params->is_root_cache;
122}
Glauber Costa2633d7a2012-12-18 14:22:34 -0800123
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800124static inline bool slab_equal_or_root(struct kmem_cache *s,
125 struct kmem_cache *p)
126{
127 return (p == s) ||
128 (s->memcg_params && (p == s->memcg_params->root_cache));
129}
Glauber Costa749c5412012-12-18 14:23:01 -0800130
131/*
132 * We use suffixes to the name in memcg because we can't have caches
133 * created in the system with the same name. But when we print them
134 * locally, better refer to them with the base name
135 */
136static inline const char *cache_name(struct kmem_cache *s)
137{
138 if (!is_root_cache(s))
139 return s->memcg_params->root_cache->name;
140 return s->name;
141}
142
Vladimir Davydovf8570262014-01-23 15:53:06 -0800143/*
144 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
145 * That said the caller must assure the memcg's cache won't go away. Since once
146 * created a memcg's cache is destroyed only along with the root cache, it is
147 * true if we are going to allocate from the cache or hold a reference to the
148 * root cache by other means. Otherwise, we should hold either the slab_mutex
149 * or the memcg's slab_caches_mutex while calling this function and accessing
150 * the returned value.
151 */
Qiang Huang2ade4de2013-11-12 15:08:23 -0800152static inline struct kmem_cache *
153cache_from_memcg_idx(struct kmem_cache *s, int idx)
Glauber Costa749c5412012-12-18 14:23:01 -0800154{
Vladimir Davydov959c8962014-01-23 15:52:59 -0800155 struct kmem_cache *cachep;
Vladimir Davydovf8570262014-01-23 15:53:06 -0800156 struct memcg_cache_params *params;
Vladimir Davydov959c8962014-01-23 15:52:59 -0800157
Andrey Vagin6f6b8952013-08-28 16:35:20 -0700158 if (!s->memcg_params)
159 return NULL;
Vladimir Davydovf8570262014-01-23 15:53:06 -0800160
161 rcu_read_lock();
162 params = rcu_dereference(s->memcg_params);
163 cachep = params->memcg_caches[idx];
164 rcu_read_unlock();
Vladimir Davydov959c8962014-01-23 15:52:59 -0800165
166 /*
167 * Make sure we will access the up-to-date value. The code updating
168 * memcg_caches issues a write barrier to match this (see
169 * memcg_register_cache()).
170 */
171 smp_read_barrier_depends();
172 return cachep;
Glauber Costa749c5412012-12-18 14:23:01 -0800173}
Glauber Costa943a4512012-12-18 14:23:03 -0800174
175static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
176{
177 if (is_root_cache(s))
178 return s;
179 return s->memcg_params->root_cache;
180}
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700181
182static __always_inline int memcg_charge_slab(struct kmem_cache *s,
183 gfp_t gfp, int order)
184{
185 if (!memcg_kmem_enabled())
186 return 0;
187 if (is_root_cache(s))
188 return 0;
Vladimir Davydovc67a8a62014-06-04 16:07:39 -0700189 return __memcg_charge_slab(s, gfp, order);
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700190}
191
192static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
193{
194 if (!memcg_kmem_enabled())
195 return;
196 if (is_root_cache(s))
197 return;
Vladimir Davydovc67a8a62014-06-04 16:07:39 -0700198 __memcg_uncharge_slab(s, order);
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700199}
Glauber Costaba6c4962012-12-18 14:22:27 -0800200#else
201static inline bool is_root_cache(struct kmem_cache *s)
202{
203 return true;
204}
205
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800206static inline bool slab_equal_or_root(struct kmem_cache *s,
207 struct kmem_cache *p)
208{
209 return true;
210}
Glauber Costa749c5412012-12-18 14:23:01 -0800211
212static inline const char *cache_name(struct kmem_cache *s)
213{
214 return s->name;
215}
216
Qiang Huang2ade4de2013-11-12 15:08:23 -0800217static inline struct kmem_cache *
218cache_from_memcg_idx(struct kmem_cache *s, int idx)
Glauber Costa749c5412012-12-18 14:23:01 -0800219{
220 return NULL;
221}
Glauber Costa943a4512012-12-18 14:23:03 -0800222
223static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
224{
225 return s;
226}
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700227
228static inline int memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, int order)
229{
230 return 0;
231}
232
233static inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
234{
235}
Glauber Costaba6c4962012-12-18 14:22:27 -0800236#endif
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800237
238static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
239{
240 struct kmem_cache *cachep;
241 struct page *page;
242
243 /*
244 * When kmemcg is not being used, both assignments should return the
245 * same value. but we don't want to pay the assignment price in that
246 * case. If it is not compiled in, the compiler should be smart enough
247 * to not do even the assignment. In that case, slab_equal_or_root
248 * will also be a constant.
249 */
250 if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE))
251 return s;
252
253 page = virt_to_head_page(x);
254 cachep = page->slab_cache;
255 if (slab_equal_or_root(cachep, s))
256 return cachep;
257
258 pr_err("%s: Wrong slab cache. %s but object is from %s\n",
Joe Perchesc42e5712014-08-06 16:04:53 -0700259 __func__, cachep->name, s->name);
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800260 WARN_ON_ONCE(1);
261 return s;
262}
Christoph Lameterca349562013-01-10 19:14:19 +0000263
Christoph Lameter44c53562014-08-06 16:04:07 -0700264#ifndef CONFIG_SLOB
Christoph Lameterca349562013-01-10 19:14:19 +0000265/*
266 * The slab lists for all objects.
267 */
268struct kmem_cache_node {
269 spinlock_t list_lock;
270
271#ifdef CONFIG_SLAB
272 struct list_head slabs_partial; /* partial list first, better asm code */
273 struct list_head slabs_full;
274 struct list_head slabs_free;
275 unsigned long free_objects;
276 unsigned int free_limit;
277 unsigned int colour_next; /* Per-node cache coloring */
278 struct array_cache *shared; /* shared per node */
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700279 struct alien_cache **alien; /* on other nodes */
Christoph Lameterca349562013-01-10 19:14:19 +0000280 unsigned long next_reap; /* updated without locking */
281 int free_touched; /* updated without locking */
282#endif
283
284#ifdef CONFIG_SLUB
285 unsigned long nr_partial;
286 struct list_head partial;
287#ifdef CONFIG_SLUB_DEBUG
288 atomic_long_t nr_slabs;
289 atomic_long_t total_objects;
290 struct list_head full;
291#endif
292#endif
293
294};
Wanpeng Lie25839f2013-07-04 08:33:23 +0800295
Christoph Lameter44c53562014-08-06 16:04:07 -0700296static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
297{
298 return s->node[node];
299}
300
301/*
302 * Iterator over all nodes. The body will be executed for each node that has
303 * a kmem_cache_node structure allocated (which is true for all online nodes)
304 */
305#define for_each_kmem_cache_node(__s, __node, __n) \
306 for (__node = 0; __n = get_node(__s, __node), __node < nr_node_ids; __node++) \
307 if (__n)
308
309#endif
310
Wanpeng Li276a2432013-07-08 08:08:28 +0800311void *slab_next(struct seq_file *m, void *p, loff_t *pos);
312void slab_stop(struct seq_file *m, void *p);
Andrey Ryabinin5240ab42014-08-06 16:04:14 -0700313
314#endif /* MM_SLAB_H */