blob: 8da63e4e470f21b935e12f7dc5a47199cca704fe [file] [log] [blame]
Christoph Lameter97d06602012-07-06 15:25:11 -05001#ifndef MM_SLAB_H
2#define MM_SLAB_H
3/*
4 * Internal slab definitions
5 */
6
Joonsoo Kim07f361b2014-10-09 15:26:00 -07007#ifdef CONFIG_SLOB
8/*
9 * Common fields provided in kmem_cache by all slab allocators
10 * This struct is either used directly by the allocator (SLOB)
11 * or the allocator must include definitions for all fields
12 * provided in kmem_cache_common in their definition of kmem_cache.
13 *
14 * Once we can do anonymous structs (C11 standard) we could put a
15 * anonymous struct definition in these allocators so that the
16 * separate allocations in the kmem_cache structure of SLAB and
17 * SLUB is no longer needed.
18 */
19struct kmem_cache {
20 unsigned int object_size;/* The original size of the object */
21 unsigned int size; /* The aligned/padded/added on size */
22 unsigned int align; /* Alignment as calculated */
23 unsigned long flags; /* Active flags on the slab */
24 const char *name; /* Slab name for sysfs */
25 int refcount; /* Use counter */
26 void (*ctor)(void *); /* Called on object slot creation */
27 struct list_head list; /* List of all slab caches on the system */
28};
29
30#endif /* CONFIG_SLOB */
31
32#ifdef CONFIG_SLAB
33#include <linux/slab_def.h>
34#endif
35
36#ifdef CONFIG_SLUB
37#include <linux/slub_def.h>
38#endif
39
40#include <linux/memcontrol.h>
41
Christoph Lameter97d06602012-07-06 15:25:11 -050042/*
43 * State of the slab allocator.
44 *
45 * This is used to describe the states of the allocator during bootup.
46 * Allocators use this to gradually bootstrap themselves. Most allocators
47 * have the problem that the structures used for managing slab caches are
48 * allocated from slab caches themselves.
49 */
50enum slab_state {
51 DOWN, /* No slab functionality yet */
52 PARTIAL, /* SLUB: kmem_cache_node available */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +000053 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
Christoph Lameter97d06602012-07-06 15:25:11 -050054 UP, /* Slab caches usable but not all extras yet */
55 FULL /* Everything is working */
56};
57
58extern enum slab_state slab_state;
59
Christoph Lameter18004c52012-07-06 15:25:12 -050060/* The slab cache mutex protects the management structures during changes */
61extern struct mutex slab_mutex;
Christoph Lameter9b030cb2012-09-05 00:20:33 +000062
63/* The list of all slab caches on the system */
Christoph Lameter18004c52012-07-06 15:25:12 -050064extern struct list_head slab_caches;
65
Christoph Lameter9b030cb2012-09-05 00:20:33 +000066/* The slab cache that manages slab cache information */
67extern struct kmem_cache *kmem_cache;
68
Christoph Lameter45906852012-11-28 16:23:16 +000069unsigned long calculate_alignment(unsigned long flags,
70 unsigned long align, unsigned long size);
71
Christoph Lameterf97d5f62013-01-10 19:12:17 +000072#ifndef CONFIG_SLOB
73/* Kmalloc array related functions */
Daniel Sanders34cc6992015-06-24 16:55:57 -070074void setup_kmalloc_cache_index_table(void);
Christoph Lameterf97d5f62013-01-10 19:12:17 +000075void create_kmalloc_caches(unsigned long);
Christoph Lameter2c59dd62013-01-10 19:14:19 +000076
77/* Find the kmalloc slab corresponding for a certain size */
78struct kmem_cache *kmalloc_slab(size_t, gfp_t);
Christoph Lameterf97d5f62013-01-10 19:12:17 +000079#endif
80
81
Christoph Lameter9b030cb2012-09-05 00:20:33 +000082/* Functions provided by the slab allocators */
Christoph Lameter8a13a4c2012-09-04 23:18:33 +000083extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
Christoph Lameter97d06602012-07-06 15:25:11 -050084
Christoph Lameter45530c42012-11-28 16:23:07 +000085extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
86 unsigned long flags);
87extern void create_boot_cache(struct kmem_cache *, const char *name,
88 size_t size, unsigned long flags);
89
Joonsoo Kim423c9292014-10-09 15:26:22 -070090int slab_unmergeable(struct kmem_cache *s);
91struct kmem_cache *find_mergeable(size_t size, size_t align,
92 unsigned long flags, const char *name, void (*ctor)(void *));
Joonsoo Kim12220de2014-10-09 15:26:24 -070093#ifndef CONFIG_SLOB
Glauber Costa2633d7a2012-12-18 14:22:34 -080094struct kmem_cache *
Vladimir Davydova44cb942014-04-07 15:39:23 -070095__kmem_cache_alias(const char *name, size_t size, size_t align,
96 unsigned long flags, void (*ctor)(void *));
Joonsoo Kim423c9292014-10-09 15:26:22 -070097
98unsigned long kmem_cache_flags(unsigned long object_size,
99 unsigned long flags, const char *name,
100 void (*ctor)(void *));
Christoph Lametercbb79692012-09-05 00:18:32 +0000101#else
Glauber Costa2633d7a2012-12-18 14:22:34 -0800102static inline struct kmem_cache *
Vladimir Davydova44cb942014-04-07 15:39:23 -0700103__kmem_cache_alias(const char *name, size_t size, size_t align,
104 unsigned long flags, void (*ctor)(void *))
Christoph Lametercbb79692012-09-05 00:18:32 +0000105{ return NULL; }
Joonsoo Kim423c9292014-10-09 15:26:22 -0700106
107static inline unsigned long kmem_cache_flags(unsigned long object_size,
108 unsigned long flags, const char *name,
109 void (*ctor)(void *))
110{
111 return flags;
112}
Christoph Lametercbb79692012-09-05 00:18:32 +0000113#endif
114
115
Glauber Costad8843922012-10-17 15:36:51 +0400116/* Legal flag mask for kmem_cache_create(), for various configurations */
117#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
118 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
119
120#if defined(CONFIG_DEBUG_SLAB)
121#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
122#elif defined(CONFIG_SLUB_DEBUG)
123#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
124 SLAB_TRACE | SLAB_DEBUG_FREE)
125#else
126#define SLAB_DEBUG_FLAGS (0)
127#endif
128
129#if defined(CONFIG_SLAB)
130#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
131 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK)
132#elif defined(CONFIG_SLUB)
133#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
134 SLAB_TEMPORARY | SLAB_NOTRACK)
135#else
136#define SLAB_CACHE_FLAGS (0)
137#endif
138
139#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
140
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000141int __kmem_cache_shutdown(struct kmem_cache *);
Vladimir Davydovd6e0b7f2015-02-12 14:59:47 -0800142int __kmem_cache_shrink(struct kmem_cache *, bool);
Christoph Lameter41a21282014-05-06 12:50:08 -0700143void slab_kmem_cache_release(struct kmem_cache *);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000144
Glauber Costab7454ad2012-10-19 18:20:25 +0400145struct seq_file;
146struct file;
Glauber Costab7454ad2012-10-19 18:20:25 +0400147
Glauber Costa0d7561c2012-10-19 18:20:27 +0400148struct slabinfo {
149 unsigned long active_objs;
150 unsigned long num_objs;
151 unsigned long active_slabs;
152 unsigned long num_slabs;
153 unsigned long shared_avail;
154 unsigned int limit;
155 unsigned int batchcount;
156 unsigned int shared;
157 unsigned int objects_per_slab;
158 unsigned int cache_order;
159};
160
161void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
162void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
Glauber Costab7454ad2012-10-19 18:20:25 +0400163ssize_t slabinfo_write(struct file *file, const char __user *buffer,
164 size_t count, loff_t *ppos);
Glauber Costaba6c4962012-12-18 14:22:27 -0800165
166#ifdef CONFIG_MEMCG_KMEM
Vladimir Davydov426589f2015-02-12 14:59:23 -0800167/*
168 * Iterate over all memcg caches of the given root cache. The caller must hold
169 * slab_mutex.
170 */
171#define for_each_memcg_cache(iter, root) \
172 list_for_each_entry(iter, &(root)->memcg_params.list, \
173 memcg_params.list)
174
175#define for_each_memcg_cache_safe(iter, tmp, root) \
176 list_for_each_entry_safe(iter, tmp, &(root)->memcg_params.list, \
177 memcg_params.list)
178
Glauber Costaba6c4962012-12-18 14:22:27 -0800179static inline bool is_root_cache(struct kmem_cache *s)
180{
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800181 return s->memcg_params.is_root_cache;
Glauber Costaba6c4962012-12-18 14:22:27 -0800182}
Glauber Costa2633d7a2012-12-18 14:22:34 -0800183
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800184static inline bool slab_equal_or_root(struct kmem_cache *s,
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800185 struct kmem_cache *p)
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800186{
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800187 return p == s || p == s->memcg_params.root_cache;
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800188}
Glauber Costa749c5412012-12-18 14:23:01 -0800189
190/*
191 * We use suffixes to the name in memcg because we can't have caches
192 * created in the system with the same name. But when we print them
193 * locally, better refer to them with the base name
194 */
195static inline const char *cache_name(struct kmem_cache *s)
196{
197 if (!is_root_cache(s))
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800198 s = s->memcg_params.root_cache;
Glauber Costa749c5412012-12-18 14:23:01 -0800199 return s->name;
200}
201
Vladimir Davydovf8570262014-01-23 15:53:06 -0800202/*
203 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800204 * That said the caller must assure the memcg's cache won't go away by either
205 * taking a css reference to the owner cgroup, or holding the slab_mutex.
Vladimir Davydovf8570262014-01-23 15:53:06 -0800206 */
Qiang Huang2ade4de2013-11-12 15:08:23 -0800207static inline struct kmem_cache *
208cache_from_memcg_idx(struct kmem_cache *s, int idx)
Glauber Costa749c5412012-12-18 14:23:01 -0800209{
Vladimir Davydov959c8962014-01-23 15:52:59 -0800210 struct kmem_cache *cachep;
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800211 struct memcg_cache_array *arr;
Vladimir Davydovf8570262014-01-23 15:53:06 -0800212
213 rcu_read_lock();
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800214 arr = rcu_dereference(s->memcg_params.memcg_caches);
Vladimir Davydov959c8962014-01-23 15:52:59 -0800215
216 /*
217 * Make sure we will access the up-to-date value. The code updating
218 * memcg_caches issues a write barrier to match this (see
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800219 * memcg_create_kmem_cache()).
Vladimir Davydov959c8962014-01-23 15:52:59 -0800220 */
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800221 cachep = lockless_dereference(arr->entries[idx]);
Pranith Kumar8df0c2d2014-12-10 15:42:28 -0800222 rcu_read_unlock();
223
Vladimir Davydov959c8962014-01-23 15:52:59 -0800224 return cachep;
Glauber Costa749c5412012-12-18 14:23:01 -0800225}
Glauber Costa943a4512012-12-18 14:23:03 -0800226
227static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
228{
229 if (is_root_cache(s))
230 return s;
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800231 return s->memcg_params.root_cache;
Glauber Costa943a4512012-12-18 14:23:03 -0800232}
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700233
234static __always_inline int memcg_charge_slab(struct kmem_cache *s,
235 gfp_t gfp, int order)
236{
237 if (!memcg_kmem_enabled())
238 return 0;
239 if (is_root_cache(s))
240 return 0;
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800241 return memcg_charge_kmem(s->memcg_params.memcg, gfp, 1 << order);
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700242}
243
244static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
245{
246 if (!memcg_kmem_enabled())
247 return;
248 if (is_root_cache(s))
249 return;
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800250 memcg_uncharge_kmem(s->memcg_params.memcg, 1 << order);
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700251}
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800252
253extern void slab_init_memcg_params(struct kmem_cache *);
254
255#else /* !CONFIG_MEMCG_KMEM */
256
Vladimir Davydov426589f2015-02-12 14:59:23 -0800257#define for_each_memcg_cache(iter, root) \
258 for ((void)(iter), (void)(root); 0; )
259#define for_each_memcg_cache_safe(iter, tmp, root) \
260 for ((void)(iter), (void)(tmp), (void)(root); 0; )
261
Glauber Costaba6c4962012-12-18 14:22:27 -0800262static inline bool is_root_cache(struct kmem_cache *s)
263{
264 return true;
265}
266
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800267static inline bool slab_equal_or_root(struct kmem_cache *s,
268 struct kmem_cache *p)
269{
270 return true;
271}
Glauber Costa749c5412012-12-18 14:23:01 -0800272
273static inline const char *cache_name(struct kmem_cache *s)
274{
275 return s->name;
276}
277
Qiang Huang2ade4de2013-11-12 15:08:23 -0800278static inline struct kmem_cache *
279cache_from_memcg_idx(struct kmem_cache *s, int idx)
Glauber Costa749c5412012-12-18 14:23:01 -0800280{
281 return NULL;
282}
Glauber Costa943a4512012-12-18 14:23:03 -0800283
284static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
285{
286 return s;
287}
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700288
289static inline int memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, int order)
290{
291 return 0;
292}
293
294static inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
295{
296}
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800297
298static inline void slab_init_memcg_params(struct kmem_cache *s)
299{
300}
301#endif /* CONFIG_MEMCG_KMEM */
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800302
303static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
304{
305 struct kmem_cache *cachep;
306 struct page *page;
307
308 /*
309 * When kmemcg is not being used, both assignments should return the
310 * same value. but we don't want to pay the assignment price in that
311 * case. If it is not compiled in, the compiler should be smart enough
312 * to not do even the assignment. In that case, slab_equal_or_root
313 * will also be a constant.
314 */
315 if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE))
316 return s;
317
318 page = virt_to_head_page(x);
319 cachep = page->slab_cache;
320 if (slab_equal_or_root(cachep, s))
321 return cachep;
322
323 pr_err("%s: Wrong slab cache. %s but object is from %s\n",
Joe Perchesc42e5712014-08-06 16:04:53 -0700324 __func__, cachep->name, s->name);
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800325 WARN_ON_ONCE(1);
326 return s;
327}
Christoph Lameterca349562013-01-10 19:14:19 +0000328
Christoph Lameter44c53562014-08-06 16:04:07 -0700329#ifndef CONFIG_SLOB
Christoph Lameterca349562013-01-10 19:14:19 +0000330/*
331 * The slab lists for all objects.
332 */
333struct kmem_cache_node {
334 spinlock_t list_lock;
335
336#ifdef CONFIG_SLAB
337 struct list_head slabs_partial; /* partial list first, better asm code */
338 struct list_head slabs_full;
339 struct list_head slabs_free;
340 unsigned long free_objects;
341 unsigned int free_limit;
342 unsigned int colour_next; /* Per-node cache coloring */
343 struct array_cache *shared; /* shared per node */
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700344 struct alien_cache **alien; /* on other nodes */
Christoph Lameterca349562013-01-10 19:14:19 +0000345 unsigned long next_reap; /* updated without locking */
346 int free_touched; /* updated without locking */
347#endif
348
349#ifdef CONFIG_SLUB
350 unsigned long nr_partial;
351 struct list_head partial;
352#ifdef CONFIG_SLUB_DEBUG
353 atomic_long_t nr_slabs;
354 atomic_long_t total_objects;
355 struct list_head full;
356#endif
357#endif
358
359};
Wanpeng Lie25839f2013-07-04 08:33:23 +0800360
Christoph Lameter44c53562014-08-06 16:04:07 -0700361static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
362{
363 return s->node[node];
364}
365
366/*
367 * Iterator over all nodes. The body will be executed for each node that has
368 * a kmem_cache_node structure allocated (which is true for all online nodes)
369 */
370#define for_each_kmem_cache_node(__s, __node, __n) \
Mikulas Patocka91635822014-10-09 15:26:20 -0700371 for (__node = 0; __node < nr_node_ids; __node++) \
372 if ((__n = get_node(__s, __node)))
Christoph Lameter44c53562014-08-06 16:04:07 -0700373
374#endif
375
Vladimir Davydov1df3b262014-12-10 15:42:16 -0800376void *slab_start(struct seq_file *m, loff_t *pos);
Wanpeng Li276a2432013-07-08 08:08:28 +0800377void *slab_next(struct seq_file *m, void *p, loff_t *pos);
378void slab_stop(struct seq_file *m, void *p);
Vladimir Davydovb0475012014-12-10 15:44:19 -0800379int memcg_slab_show(struct seq_file *m, void *p);
Andrey Ryabinin5240ab42014-08-06 16:04:14 -0700380
381#endif /* MM_SLAB_H */