blob: 9632772e14beb2dca42b5353f38b202a24db9cd9 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Christoph Lameter97d06602012-07-06 15:25:11 -05002#ifndef MM_SLAB_H
3#define MM_SLAB_H
4/*
5 * Internal slab definitions
6 */
7
Joonsoo Kim07f361b2014-10-09 15:26:00 -07008#ifdef CONFIG_SLOB
9/*
10 * Common fields provided in kmem_cache by all slab allocators
11 * This struct is either used directly by the allocator (SLOB)
12 * or the allocator must include definitions for all fields
13 * provided in kmem_cache_common in their definition of kmem_cache.
14 *
15 * Once we can do anonymous structs (C11 standard) we could put a
16 * anonymous struct definition in these allocators so that the
17 * separate allocations in the kmem_cache structure of SLAB and
18 * SLUB is no longer needed.
19 */
20struct kmem_cache {
21 unsigned int object_size;/* The original size of the object */
22 unsigned int size; /* The aligned/padded/added on size */
23 unsigned int align; /* Alignment as calculated */
Alexey Dobriyand50112e2017-11-15 17:32:18 -080024 slab_flags_t flags; /* Active flags on the slab */
Alexey Dobriyan7bbdb812018-04-05 16:21:31 -070025 unsigned int useroffset;/* Usercopy region offset */
26 unsigned int usersize; /* Usercopy region size */
Joonsoo Kim07f361b2014-10-09 15:26:00 -070027 const char *name; /* Slab name for sysfs */
28 int refcount; /* Use counter */
29 void (*ctor)(void *); /* Called on object slot creation */
30 struct list_head list; /* List of all slab caches on the system */
31};
32
33#endif /* CONFIG_SLOB */
34
35#ifdef CONFIG_SLAB
36#include <linux/slab_def.h>
37#endif
38
39#ifdef CONFIG_SLUB
40#include <linux/slub_def.h>
41#endif
42
43#include <linux/memcontrol.h>
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -070044#include <linux/fault-inject.h>
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -070045#include <linux/kasan.h>
46#include <linux/kmemleak.h>
Thomas Garnier7c00fce2016-07-26 15:21:56 -070047#include <linux/random.h>
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +010048#include <linux/sched/mm.h>
Joonsoo Kim07f361b2014-10-09 15:26:00 -070049
Christoph Lameter97d06602012-07-06 15:25:11 -050050/*
51 * State of the slab allocator.
52 *
53 * This is used to describe the states of the allocator during bootup.
54 * Allocators use this to gradually bootstrap themselves. Most allocators
55 * have the problem that the structures used for managing slab caches are
56 * allocated from slab caches themselves.
57 */
58enum slab_state {
59 DOWN, /* No slab functionality yet */
60 PARTIAL, /* SLUB: kmem_cache_node available */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +000061 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
Christoph Lameter97d06602012-07-06 15:25:11 -050062 UP, /* Slab caches usable but not all extras yet */
63 FULL /* Everything is working */
64};
65
66extern enum slab_state slab_state;
67
Christoph Lameter18004c52012-07-06 15:25:12 -050068/* The slab cache mutex protects the management structures during changes */
69extern struct mutex slab_mutex;
Christoph Lameter9b030cb2012-09-05 00:20:33 +000070
71/* The list of all slab caches on the system */
Christoph Lameter18004c52012-07-06 15:25:12 -050072extern struct list_head slab_caches;
73
Christoph Lameter9b030cb2012-09-05 00:20:33 +000074/* The slab cache that manages slab cache information */
75extern struct kmem_cache *kmem_cache;
76
Vlastimil Babkaaf3b5f82017-02-22 15:41:05 -080077/* A table of kmalloc cache names and sizes */
78extern const struct kmalloc_info_struct {
79 const char *name;
Alexey Dobriyan55de8b92018-04-05 16:20:29 -070080 unsigned int size;
Vlastimil Babkaaf3b5f82017-02-22 15:41:05 -080081} kmalloc_info[];
82
Christoph Lameterf97d5f62013-01-10 19:12:17 +000083#ifndef CONFIG_SLOB
84/* Kmalloc array related functions */
Daniel Sanders34cc6992015-06-24 16:55:57 -070085void setup_kmalloc_cache_index_table(void);
Alexey Dobriyand50112e2017-11-15 17:32:18 -080086void create_kmalloc_caches(slab_flags_t);
Christoph Lameter2c59dd62013-01-10 19:14:19 +000087
88/* Find the kmalloc slab corresponding for a certain size */
89struct kmem_cache *kmalloc_slab(size_t, gfp_t);
Christoph Lameterf97d5f62013-01-10 19:12:17 +000090#endif
91
92
Christoph Lameter9b030cb2012-09-05 00:20:33 +000093/* Functions provided by the slab allocators */
Alexey Dobriyand50112e2017-11-15 17:32:18 -080094int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
Christoph Lameter97d06602012-07-06 15:25:11 -050095
Alexey Dobriyan55de8b92018-04-05 16:20:29 -070096struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
97 slab_flags_t flags, unsigned int useroffset,
98 unsigned int usersize);
Christoph Lameter45530c42012-11-28 16:23:07 +000099extern void create_boot_cache(struct kmem_cache *, const char *name,
Alexey Dobriyan361d5752018-04-05 16:20:33 -0700100 unsigned int size, slab_flags_t flags,
101 unsigned int useroffset, unsigned int usersize);
Christoph Lameter45530c42012-11-28 16:23:07 +0000102
Joonsoo Kim423c9292014-10-09 15:26:22 -0700103int slab_unmergeable(struct kmem_cache *s);
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700104struct kmem_cache *find_mergeable(unsigned size, unsigned align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800105 slab_flags_t flags, const char *name, void (*ctor)(void *));
Joonsoo Kim12220de2014-10-09 15:26:24 -0700106#ifndef CONFIG_SLOB
Glauber Costa2633d7a2012-12-18 14:22:34 -0800107struct kmem_cache *
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700108__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800109 slab_flags_t flags, void (*ctor)(void *));
Joonsoo Kim423c9292014-10-09 15:26:22 -0700110
Alexey Dobriyan0293d1f2018-04-05 16:21:24 -0700111slab_flags_t kmem_cache_flags(unsigned int object_size,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800112 slab_flags_t flags, const char *name,
Joonsoo Kim423c9292014-10-09 15:26:22 -0700113 void (*ctor)(void *));
Christoph Lametercbb79692012-09-05 00:18:32 +0000114#else
Glauber Costa2633d7a2012-12-18 14:22:34 -0800115static inline struct kmem_cache *
Alexey Dobriyanf4957d52018-04-05 16:20:37 -0700116__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800117 slab_flags_t flags, void (*ctor)(void *))
Christoph Lametercbb79692012-09-05 00:18:32 +0000118{ return NULL; }
Joonsoo Kim423c9292014-10-09 15:26:22 -0700119
Alexey Dobriyan0293d1f2018-04-05 16:21:24 -0700120static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800121 slab_flags_t flags, const char *name,
Joonsoo Kim423c9292014-10-09 15:26:22 -0700122 void (*ctor)(void *))
123{
124 return flags;
125}
Christoph Lametercbb79692012-09-05 00:18:32 +0000126#endif
127
128
Glauber Costad8843922012-10-17 15:36:51 +0400129/* Legal flag mask for kmem_cache_create(), for various configurations */
Nicolas Boichat62d342d2019-03-28 20:43:42 -0700130#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
131 SLAB_CACHE_DMA32 | SLAB_PANIC | \
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800132 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
Glauber Costad8843922012-10-17 15:36:51 +0400133
134#if defined(CONFIG_DEBUG_SLAB)
135#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
136#elif defined(CONFIG_SLUB_DEBUG)
137#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
Laura Abbottbecfda62016-03-15 14:55:06 -0700138 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
Glauber Costad8843922012-10-17 15:36:51 +0400139#else
140#define SLAB_DEBUG_FLAGS (0)
141#endif
142
143#if defined(CONFIG_SLAB)
144#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
Vladimir Davydov230e9fc2016-01-14 15:18:15 -0800145 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -0800146 SLAB_ACCOUNT)
Glauber Costad8843922012-10-17 15:36:51 +0400147#elif defined(CONFIG_SLUB)
148#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -0800149 SLAB_TEMPORARY | SLAB_ACCOUNT)
Glauber Costad8843922012-10-17 15:36:51 +0400150#else
151#define SLAB_CACHE_FLAGS (0)
152#endif
153
Thomas Garniere70954f2016-12-12 16:41:38 -0800154/* Common flags available with current configuration */
Glauber Costad8843922012-10-17 15:36:51 +0400155#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
156
Thomas Garniere70954f2016-12-12 16:41:38 -0800157/* Common flags permitted for kmem_cache_create */
158#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
159 SLAB_RED_ZONE | \
160 SLAB_POISON | \
161 SLAB_STORE_USER | \
162 SLAB_TRACE | \
163 SLAB_CONSISTENCY_CHECKS | \
164 SLAB_MEM_SPREAD | \
165 SLAB_NOLEAKTRACE | \
166 SLAB_RECLAIM_ACCOUNT | \
167 SLAB_TEMPORARY | \
Thomas Garniere70954f2016-12-12 16:41:38 -0800168 SLAB_ACCOUNT)
169
Shakeel Buttf9e13c02018-04-05 16:21:57 -0700170bool __kmem_cache_empty(struct kmem_cache *);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000171int __kmem_cache_shutdown(struct kmem_cache *);
Dmitry Safonov52b4b952016-02-17 13:11:37 -0800172void __kmem_cache_release(struct kmem_cache *);
Tejun Heoc9fc5862017-02-22 15:41:27 -0800173int __kmem_cache_shrink(struct kmem_cache *);
174void __kmemcg_cache_deactivate(struct kmem_cache *s);
Christoph Lameter41a21282014-05-06 12:50:08 -0700175void slab_kmem_cache_release(struct kmem_cache *);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000176
Glauber Costab7454ad2012-10-19 18:20:25 +0400177struct seq_file;
178struct file;
Glauber Costab7454ad2012-10-19 18:20:25 +0400179
Glauber Costa0d7561c2012-10-19 18:20:27 +0400180struct slabinfo {
181 unsigned long active_objs;
182 unsigned long num_objs;
183 unsigned long active_slabs;
184 unsigned long num_slabs;
185 unsigned long shared_avail;
186 unsigned int limit;
187 unsigned int batchcount;
188 unsigned int shared;
189 unsigned int objects_per_slab;
190 unsigned int cache_order;
191};
192
193void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
194void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
Glauber Costab7454ad2012-10-19 18:20:25 +0400195ssize_t slabinfo_write(struct file *file, const char __user *buffer,
196 size_t count, loff_t *ppos);
Glauber Costaba6c4962012-12-18 14:22:27 -0800197
Christoph Lameter484748f2015-09-04 15:45:34 -0700198/*
199 * Generic implementation of bulk operations
200 * These are useful for situations in which the allocator cannot
Jesper Dangaard Brouer9f706d62016-03-15 14:54:03 -0700201 * perform optimizations. In that case segments of the object listed
Christoph Lameter484748f2015-09-04 15:45:34 -0700202 * may be allocated or freed using these operations.
203 */
204void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -0800205int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
Christoph Lameter484748f2015-09-04 15:45:34 -0700206
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700207#ifdef CONFIG_MEMCG_KMEM
Tejun Heo510ded32017-02-22 15:41:24 -0800208
209/* List of all root caches. */
210extern struct list_head slab_root_caches;
211#define root_caches_node memcg_params.__root_caches_node
212
Vladimir Davydov426589f2015-02-12 14:59:23 -0800213/*
214 * Iterate over all memcg caches of the given root cache. The caller must hold
215 * slab_mutex.
216 */
217#define for_each_memcg_cache(iter, root) \
Tejun Heo9eeadc82017-02-22 15:41:17 -0800218 list_for_each_entry(iter, &(root)->memcg_params.children, \
219 memcg_params.children_node)
Vladimir Davydov426589f2015-02-12 14:59:23 -0800220
Glauber Costaba6c4962012-12-18 14:22:27 -0800221static inline bool is_root_cache(struct kmem_cache *s)
222{
Tejun Heo9eeadc82017-02-22 15:41:17 -0800223 return !s->memcg_params.root_cache;
Glauber Costaba6c4962012-12-18 14:22:27 -0800224}
Glauber Costa2633d7a2012-12-18 14:22:34 -0800225
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800226static inline bool slab_equal_or_root(struct kmem_cache *s,
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800227 struct kmem_cache *p)
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800228{
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800229 return p == s || p == s->memcg_params.root_cache;
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800230}
Glauber Costa749c5412012-12-18 14:23:01 -0800231
232/*
233 * We use suffixes to the name in memcg because we can't have caches
234 * created in the system with the same name. But when we print them
235 * locally, better refer to them with the base name
236 */
237static inline const char *cache_name(struct kmem_cache *s)
238{
239 if (!is_root_cache(s))
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800240 s = s->memcg_params.root_cache;
Glauber Costa749c5412012-12-18 14:23:01 -0800241 return s->name;
242}
243
Vladimir Davydovf8570262014-01-23 15:53:06 -0800244/*
245 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800246 * That said the caller must assure the memcg's cache won't go away by either
247 * taking a css reference to the owner cgroup, or holding the slab_mutex.
Vladimir Davydovf8570262014-01-23 15:53:06 -0800248 */
Qiang Huang2ade4de2013-11-12 15:08:23 -0800249static inline struct kmem_cache *
250cache_from_memcg_idx(struct kmem_cache *s, int idx)
Glauber Costa749c5412012-12-18 14:23:01 -0800251{
Vladimir Davydov959c8962014-01-23 15:52:59 -0800252 struct kmem_cache *cachep;
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800253 struct memcg_cache_array *arr;
Vladimir Davydovf8570262014-01-23 15:53:06 -0800254
255 rcu_read_lock();
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800256 arr = rcu_dereference(s->memcg_params.memcg_caches);
Vladimir Davydov959c8962014-01-23 15:52:59 -0800257
258 /*
259 * Make sure we will access the up-to-date value. The code updating
260 * memcg_caches issues a write barrier to match this (see
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800261 * memcg_create_kmem_cache()).
Vladimir Davydov959c8962014-01-23 15:52:59 -0800262 */
Will Deacon506458e2017-10-24 11:22:48 +0100263 cachep = READ_ONCE(arr->entries[idx]);
Pranith Kumar8df0c2d2014-12-10 15:42:28 -0800264 rcu_read_unlock();
265
Vladimir Davydov959c8962014-01-23 15:52:59 -0800266 return cachep;
Glauber Costa749c5412012-12-18 14:23:01 -0800267}
Glauber Costa943a4512012-12-18 14:23:03 -0800268
269static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
270{
271 if (is_root_cache(s))
272 return s;
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800273 return s->memcg_params.root_cache;
Glauber Costa943a4512012-12-18 14:23:03 -0800274}
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700275
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -0800276static __always_inline int memcg_charge_slab(struct page *page,
277 gfp_t gfp, int order,
278 struct kmem_cache *s)
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700279{
280 if (!memcg_kmem_enabled())
281 return 0;
282 if (is_root_cache(s))
283 return 0;
Johannes Weiner7779f212017-07-06 15:40:55 -0700284 return memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
Vladimir Davydov27ee57c2016-03-17 14:17:35 -0700285}
286
287static __always_inline void memcg_uncharge_slab(struct page *page, int order,
288 struct kmem_cache *s)
289{
Vladimir Davydov45264772016-07-26 15:24:21 -0700290 if (!memcg_kmem_enabled())
291 return;
Vladimir Davydov27ee57c2016-03-17 14:17:35 -0700292 memcg_kmem_uncharge(page, order);
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700293}
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800294
295extern void slab_init_memcg_params(struct kmem_cache *);
Tejun Heo510ded32017-02-22 15:41:24 -0800296extern void memcg_link_cache(struct kmem_cache *s);
Tejun Heo01fb58b2017-02-22 15:41:30 -0800297extern void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
298 void (*deact_fn)(struct kmem_cache *));
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800299
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700300#else /* CONFIG_MEMCG_KMEM */
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800301
Tejun Heo510ded32017-02-22 15:41:24 -0800302/* If !memcg, all caches are root. */
303#define slab_root_caches slab_caches
304#define root_caches_node list
305
Vladimir Davydov426589f2015-02-12 14:59:23 -0800306#define for_each_memcg_cache(iter, root) \
307 for ((void)(iter), (void)(root); 0; )
Vladimir Davydov426589f2015-02-12 14:59:23 -0800308
Glauber Costaba6c4962012-12-18 14:22:27 -0800309static inline bool is_root_cache(struct kmem_cache *s)
310{
311 return true;
312}
313
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800314static inline bool slab_equal_or_root(struct kmem_cache *s,
315 struct kmem_cache *p)
316{
317 return true;
318}
Glauber Costa749c5412012-12-18 14:23:01 -0800319
320static inline const char *cache_name(struct kmem_cache *s)
321{
322 return s->name;
323}
324
Qiang Huang2ade4de2013-11-12 15:08:23 -0800325static inline struct kmem_cache *
326cache_from_memcg_idx(struct kmem_cache *s, int idx)
Glauber Costa749c5412012-12-18 14:23:01 -0800327{
328 return NULL;
329}
Glauber Costa943a4512012-12-18 14:23:03 -0800330
331static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
332{
333 return s;
334}
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700335
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -0800336static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
337 struct kmem_cache *s)
Vladimir Davydov5dfb4172014-06-04 16:06:38 -0700338{
339 return 0;
340}
341
Vladimir Davydov27ee57c2016-03-17 14:17:35 -0700342static inline void memcg_uncharge_slab(struct page *page, int order,
343 struct kmem_cache *s)
344{
345}
346
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800347static inline void slab_init_memcg_params(struct kmem_cache *s)
348{
349}
Tejun Heo510ded32017-02-22 15:41:24 -0800350
351static inline void memcg_link_cache(struct kmem_cache *s)
352{
353}
354
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700355#endif /* CONFIG_MEMCG_KMEM */
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800356
357static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
358{
359 struct kmem_cache *cachep;
360 struct page *page;
361
362 /*
363 * When kmemcg is not being used, both assignments should return the
364 * same value. but we don't want to pay the assignment price in that
365 * case. If it is not compiled in, the compiler should be smart enough
366 * to not do even the assignment. In that case, slab_equal_or_root
367 * will also be a constant.
368 */
Laura Abbottbecfda62016-03-15 14:55:06 -0700369 if (!memcg_kmem_enabled() &&
370 !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800371 return s;
372
373 page = virt_to_head_page(x);
374 cachep = page->slab_cache;
375 if (slab_equal_or_root(cachep, s))
376 return cachep;
377
378 pr_err("%s: Wrong slab cache. %s but object is from %s\n",
Daniel Borkmann2d16e0fd2015-09-04 15:45:57 -0700379 __func__, s->name, cachep->name);
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800380 WARN_ON_ONCE(1);
381 return s;
382}
Christoph Lameterca349562013-01-10 19:14:19 +0000383
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700384static inline size_t slab_ksize(const struct kmem_cache *s)
385{
386#ifndef CONFIG_SLUB
387 return s->object_size;
388
389#else /* CONFIG_SLUB */
390# ifdef CONFIG_SLUB_DEBUG
391 /*
392 * Debugging requires use of the padding between object
393 * and whatever may come after it.
394 */
395 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
396 return s->object_size;
397# endif
Alexander Potapenko80a92012016-07-28 15:49:07 -0700398 if (s->flags & SLAB_KASAN)
399 return s->object_size;
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700400 /*
401 * If we have the need to store the freelist pointer
402 * back there or track user information then we can
403 * only use the space before that information.
404 */
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800405 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700406 return s->inuse;
407 /*
408 * Else we can use all the padding etc for the allocation
409 */
410 return s->size;
411#endif
412}
413
414static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
415 gfp_t flags)
416{
417 flags &= gfp_allowed_mask;
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +0100418
419 fs_reclaim_acquire(flags);
420 fs_reclaim_release(flags);
421
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700422 might_sleep_if(gfpflags_allow_blocking(flags));
423
Jesper Dangaard Brouerfab99632016-03-15 14:53:38 -0700424 if (should_failslab(s, flags))
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700425 return NULL;
426
Vladimir Davydov45264772016-07-26 15:24:21 -0700427 if (memcg_kmem_enabled() &&
428 ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
429 return memcg_kmem_get_cache(s);
430
431 return s;
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700432}
433
434static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
435 size_t size, void **p)
436{
437 size_t i;
438
439 flags &= gfp_allowed_mask;
440 for (i = 0; i < size; i++) {
441 void *object = p[i];
442
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700443 kmemleak_alloc_recursive(object, s->object_size, 1,
444 s->flags, flags);
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700445 kasan_slab_alloc(s, object, flags);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700446 }
Vladimir Davydov45264772016-07-26 15:24:21 -0700447
448 if (memcg_kmem_enabled())
449 memcg_kmem_put_cache(s);
Jesper Dangaard Brouer11c7aec2016-03-15 14:53:35 -0700450}
451
Christoph Lameter44c53562014-08-06 16:04:07 -0700452#ifndef CONFIG_SLOB
Christoph Lameterca349562013-01-10 19:14:19 +0000453/*
454 * The slab lists for all objects.
455 */
456struct kmem_cache_node {
457 spinlock_t list_lock;
458
459#ifdef CONFIG_SLAB
460 struct list_head slabs_partial; /* partial list first, better asm code */
461 struct list_head slabs_full;
462 struct list_head slabs_free;
David Rientjesbf00bd32016-12-12 16:41:44 -0800463 unsigned long total_slabs; /* length of all slab lists */
464 unsigned long free_slabs; /* length of free slab list only */
Christoph Lameterca349562013-01-10 19:14:19 +0000465 unsigned long free_objects;
466 unsigned int free_limit;
467 unsigned int colour_next; /* Per-node cache coloring */
468 struct array_cache *shared; /* shared per node */
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700469 struct alien_cache **alien; /* on other nodes */
Christoph Lameterca349562013-01-10 19:14:19 +0000470 unsigned long next_reap; /* updated without locking */
471 int free_touched; /* updated without locking */
472#endif
473
474#ifdef CONFIG_SLUB
475 unsigned long nr_partial;
476 struct list_head partial;
477#ifdef CONFIG_SLUB_DEBUG
478 atomic_long_t nr_slabs;
479 atomic_long_t total_objects;
480 struct list_head full;
481#endif
482#endif
483
484};
Wanpeng Lie25839f2013-07-04 08:33:23 +0800485
Christoph Lameter44c53562014-08-06 16:04:07 -0700486static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
487{
488 return s->node[node];
489}
490
491/*
492 * Iterator over all nodes. The body will be executed for each node that has
493 * a kmem_cache_node structure allocated (which is true for all online nodes)
494 */
495#define for_each_kmem_cache_node(__s, __node, __n) \
Mikulas Patocka91635822014-10-09 15:26:20 -0700496 for (__node = 0; __node < nr_node_ids; __node++) \
497 if ((__n = get_node(__s, __node)))
Christoph Lameter44c53562014-08-06 16:04:07 -0700498
499#endif
500
Vladimir Davydov1df3b262014-12-10 15:42:16 -0800501void *slab_start(struct seq_file *m, loff_t *pos);
Wanpeng Li276a2432013-07-08 08:08:28 +0800502void *slab_next(struct seq_file *m, void *p, loff_t *pos);
503void slab_stop(struct seq_file *m, void *p);
Tejun Heobc2791f2017-02-22 15:41:21 -0800504void *memcg_slab_start(struct seq_file *m, loff_t *pos);
505void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
506void memcg_slab_stop(struct seq_file *m, void *p);
Vladimir Davydovb0475012014-12-10 15:44:19 -0800507int memcg_slab_show(struct seq_file *m, void *p);
Andrey Ryabinin5240ab42014-08-06 16:04:14 -0700508
Yang Shi852d8be2017-11-15 17:32:07 -0800509#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
510void dump_unreclaimable_slab(void);
511#else
512static inline void dump_unreclaimable_slab(void)
513{
514}
515#endif
516
Alexander Potapenko55834c52016-05-20 16:59:11 -0700517void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
518
Thomas Garnier7c00fce2016-07-26 15:21:56 -0700519#ifdef CONFIG_SLAB_FREELIST_RANDOM
520int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
521 gfp_t gfp);
522void cache_random_seq_destroy(struct kmem_cache *cachep);
523#else
524static inline int cache_random_seq_create(struct kmem_cache *cachep,
525 unsigned int count, gfp_t gfp)
526{
527 return 0;
528}
529static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
530#endif /* CONFIG_SLAB_FREELIST_RANDOM */
531
Andrey Ryabinin5240ab42014-08-06 16:04:14 -0700532#endif /* MM_SLAB_H */