blob: c8d2ed7f83309cbd156304fbd64ccafc85da455f [file] [log] [blame]
Christoph Lameter039363f2012-07-06 15:25:10 -05001/*
2 * Slab allocator functions that are independent of the allocator strategy
3 *
4 * (C) 2012 Christoph Lameter <cl@linux.com>
5 */
6#include <linux/slab.h>
7
8#include <linux/mm.h>
9#include <linux/poison.h>
10#include <linux/interrupt.h>
11#include <linux/memory.h>
12#include <linux/compiler.h>
13#include <linux/module.h>
Christoph Lameter20cea962012-07-06 15:25:13 -050014#include <linux/cpu.h>
15#include <linux/uaccess.h>
Glauber Costab7454ad2012-10-19 18:20:25 +040016#include <linux/seq_file.h>
17#include <linux/proc_fs.h>
Christoph Lameter039363f2012-07-06 15:25:10 -050018#include <asm/cacheflush.h>
19#include <asm/tlbflush.h>
20#include <asm/page.h>
Glauber Costa2633d7a2012-12-18 14:22:34 -080021#include <linux/memcontrol.h>
Andrey Ryabinin928cec92014-08-06 16:04:44 -070022
23#define CREATE_TRACE_POINTS
Christoph Lameterf1b6eb62013-09-04 16:35:34 +000024#include <trace/events/kmem.h>
Christoph Lameter039363f2012-07-06 15:25:10 -050025
Christoph Lameter97d06602012-07-06 15:25:11 -050026#include "slab.h"
27
28enum slab_state slab_state;
Christoph Lameter18004c52012-07-06 15:25:12 -050029LIST_HEAD(slab_caches);
30DEFINE_MUTEX(slab_mutex);
Christoph Lameter9b030cb2012-09-05 00:20:33 +000031struct kmem_cache *kmem_cache;
Christoph Lameter97d06602012-07-06 15:25:11 -050032
Joonsoo Kim07f361b2014-10-09 15:26:00 -070033/*
Joonsoo Kim423c9292014-10-09 15:26:22 -070034 * Set of flags that will prevent slab merging
35 */
36#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
37 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
38 SLAB_FAILSLAB)
39
Konstantin Khlebnikov3e810ae2015-08-06 15:46:36 -070040#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | SLAB_NOTRACK)
Joonsoo Kim423c9292014-10-09 15:26:22 -070041
42/*
43 * Merge control. If this is set then no merging of slab caches will occur.
44 * (Could be removed. This was introduced to pacify the merge skeptics.)
45 */
46static int slab_nomerge;
47
48static int __init setup_slab_nomerge(char *str)
49{
50 slab_nomerge = 1;
51 return 1;
52}
53
54#ifdef CONFIG_SLUB
55__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
56#endif
57
58__setup("slab_nomerge", setup_slab_nomerge);
59
60/*
Joonsoo Kim07f361b2014-10-09 15:26:00 -070061 * Determine the size of a slab object
62 */
63unsigned int kmem_cache_size(struct kmem_cache *s)
64{
65 return s->object_size;
66}
67EXPORT_SYMBOL(kmem_cache_size);
68
Shuah Khan77be4b12012-08-16 00:09:46 -070069#ifdef CONFIG_DEBUG_VM
Vladimir Davydov794b1242014-04-07 15:39:26 -070070static int kmem_cache_sanity_check(const char *name, size_t size)
Shuah Khan77be4b12012-08-16 00:09:46 -070071{
72 struct kmem_cache *s = NULL;
73
74 if (!name || in_interrupt() || size < sizeof(void *) ||
75 size > KMALLOC_MAX_SIZE) {
76 pr_err("kmem_cache_create(%s) integrity check failed\n", name);
77 return -EINVAL;
78 }
79
80 list_for_each_entry(s, &slab_caches, list) {
81 char tmp;
82 int res;
83
84 /*
85 * This happens when the module gets unloaded and doesn't
86 * destroy its slab cache and no-one else reuses the vmalloc
87 * area of the module. Print a warning.
88 */
89 res = probe_kernel_address(s->name, tmp);
90 if (res) {
91 pr_err("Slab cache with size %d has lost its name\n",
92 s->object_size);
93 continue;
94 }
Shuah Khan77be4b12012-08-16 00:09:46 -070095 }
96
97 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
98 return 0;
99}
100#else
Vladimir Davydov794b1242014-04-07 15:39:26 -0700101static inline int kmem_cache_sanity_check(const char *name, size_t size)
Shuah Khan77be4b12012-08-16 00:09:46 -0700102{
103 return 0;
104}
105#endif
106
Christoph Lameter484748f2015-09-04 15:45:34 -0700107void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
108{
109 size_t i;
110
111 for (i = 0; i < nr; i++)
112 kmem_cache_free(s, p[i]);
113}
114
115bool __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
116 void **p)
117{
118 size_t i;
119
120 for (i = 0; i < nr; i++) {
121 void *x = p[i] = kmem_cache_alloc(s, flags);
122 if (!x) {
123 __kmem_cache_free_bulk(s, i, p);
124 return false;
125 }
126 }
127 return true;
128}
129
Glauber Costa55007d82012-12-18 14:22:38 -0800130#ifdef CONFIG_MEMCG_KMEM
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800131void slab_init_memcg_params(struct kmem_cache *s)
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700132{
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800133 s->memcg_params.is_root_cache = true;
Vladimir Davydov426589f2015-02-12 14:59:23 -0800134 INIT_LIST_HEAD(&s->memcg_params.list);
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800135 RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
136}
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700137
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800138static int init_memcg_params(struct kmem_cache *s,
139 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
140{
141 struct memcg_cache_array *arr;
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700142
143 if (memcg) {
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800144 s->memcg_params.is_root_cache = false;
145 s->memcg_params.memcg = memcg;
146 s->memcg_params.root_cache = root_cache;
147 return 0;
148 }
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700149
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800150 slab_init_memcg_params(s);
151
152 if (!memcg_nr_cache_ids)
153 return 0;
154
155 arr = kzalloc(sizeof(struct memcg_cache_array) +
156 memcg_nr_cache_ids * sizeof(void *),
157 GFP_KERNEL);
158 if (!arr)
159 return -ENOMEM;
160
161 RCU_INIT_POINTER(s->memcg_params.memcg_caches, arr);
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700162 return 0;
163}
164
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800165static void destroy_memcg_params(struct kmem_cache *s)
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700166{
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800167 if (is_root_cache(s))
168 kfree(rcu_access_pointer(s->memcg_params.memcg_caches));
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700169}
170
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800171static int update_memcg_params(struct kmem_cache *s, int new_array_size)
Vladimir Davydov6f817f42014-10-09 15:28:47 -0700172{
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800173 struct memcg_cache_array *old, *new;
Vladimir Davydov6f817f42014-10-09 15:28:47 -0700174
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800175 if (!is_root_cache(s))
176 return 0;
Vladimir Davydov6f817f42014-10-09 15:28:47 -0700177
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800178 new = kzalloc(sizeof(struct memcg_cache_array) +
179 new_array_size * sizeof(void *), GFP_KERNEL);
180 if (!new)
Vladimir Davydov6f817f42014-10-09 15:28:47 -0700181 return -ENOMEM;
182
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800183 old = rcu_dereference_protected(s->memcg_params.memcg_caches,
184 lockdep_is_held(&slab_mutex));
185 if (old)
186 memcpy(new->entries, old->entries,
187 memcg_nr_cache_ids * sizeof(void *));
Vladimir Davydov6f817f42014-10-09 15:28:47 -0700188
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800189 rcu_assign_pointer(s->memcg_params.memcg_caches, new);
190 if (old)
191 kfree_rcu(old, rcu);
Vladimir Davydov6f817f42014-10-09 15:28:47 -0700192 return 0;
193}
194
Glauber Costa55007d82012-12-18 14:22:38 -0800195int memcg_update_all_caches(int num_memcgs)
196{
197 struct kmem_cache *s;
198 int ret = 0;
Glauber Costa55007d82012-12-18 14:22:38 -0800199
Vladimir Davydov05257a12015-02-12 14:59:01 -0800200 mutex_lock(&slab_mutex);
Glauber Costa55007d82012-12-18 14:22:38 -0800201 list_for_each_entry(s, &slab_caches, list) {
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800202 ret = update_memcg_params(s, num_memcgs);
Glauber Costa55007d82012-12-18 14:22:38 -0800203 /*
Glauber Costa55007d82012-12-18 14:22:38 -0800204 * Instead of freeing the memory, we'll just leave the caches
205 * up to this point in an updated state.
206 */
207 if (ret)
Vladimir Davydov05257a12015-02-12 14:59:01 -0800208 break;
Glauber Costa55007d82012-12-18 14:22:38 -0800209 }
Glauber Costa55007d82012-12-18 14:22:38 -0800210 mutex_unlock(&slab_mutex);
211 return ret;
212}
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700213#else
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800214static inline int init_memcg_params(struct kmem_cache *s,
215 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700216{
217 return 0;
218}
219
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800220static inline void destroy_memcg_params(struct kmem_cache *s)
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700221{
222}
223#endif /* CONFIG_MEMCG_KMEM */
Glauber Costa55007d82012-12-18 14:22:38 -0800224
Christoph Lameter039363f2012-07-06 15:25:10 -0500225/*
Joonsoo Kim423c9292014-10-09 15:26:22 -0700226 * Find a mergeable slab cache
227 */
228int slab_unmergeable(struct kmem_cache *s)
229{
230 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
231 return 1;
232
233 if (!is_root_cache(s))
234 return 1;
235
236 if (s->ctor)
237 return 1;
238
239 /*
240 * We may have set a slab to be unmergeable during bootstrap.
241 */
242 if (s->refcount < 0)
243 return 1;
244
245 return 0;
246}
247
248struct kmem_cache *find_mergeable(size_t size, size_t align,
249 unsigned long flags, const char *name, void (*ctor)(void *))
250{
251 struct kmem_cache *s;
252
253 if (slab_nomerge || (flags & SLAB_NEVER_MERGE))
254 return NULL;
255
256 if (ctor)
257 return NULL;
258
259 size = ALIGN(size, sizeof(void *));
260 align = calculate_alignment(flags, align, size);
261 size = ALIGN(size, align);
262 flags = kmem_cache_flags(size, flags, name, NULL);
263
Joonsoo Kim54362052014-12-10 15:42:18 -0800264 list_for_each_entry_reverse(s, &slab_caches, list) {
Joonsoo Kim423c9292014-10-09 15:26:22 -0700265 if (slab_unmergeable(s))
266 continue;
267
268 if (size > s->size)
269 continue;
270
271 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
272 continue;
273 /*
274 * Check if alignment is compatible.
275 * Courtesy of Adrian Drzewiecki
276 */
277 if ((s->size & ~(align - 1)) != s->size)
278 continue;
279
280 if (s->size - size >= sizeof(void *))
281 continue;
282
Joonsoo Kim95069ac82014-11-13 15:19:25 -0800283 if (IS_ENABLED(CONFIG_SLAB) && align &&
284 (align > s->align || s->align % align))
285 continue;
286
Joonsoo Kim423c9292014-10-09 15:26:22 -0700287 return s;
288 }
289 return NULL;
290}
291
292/*
Christoph Lameter45906852012-11-28 16:23:16 +0000293 * Figure out what the alignment of the objects will be given a set of
294 * flags, a user specified alignment and the size of the objects.
295 */
296unsigned long calculate_alignment(unsigned long flags,
297 unsigned long align, unsigned long size)
298{
299 /*
300 * If the user wants hardware cache aligned objects then follow that
301 * suggestion if the object is sufficiently large.
302 *
303 * The hardware cache alignment cannot override the specified
304 * alignment though. If that is greater then use it.
305 */
306 if (flags & SLAB_HWCACHE_ALIGN) {
307 unsigned long ralign = cache_line_size();
308 while (size <= ralign / 2)
309 ralign /= 2;
310 align = max(align, ralign);
311 }
312
313 if (align < ARCH_SLAB_MINALIGN)
314 align = ARCH_SLAB_MINALIGN;
315
316 return ALIGN(align, sizeof(void *));
317}
318
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800319static struct kmem_cache *create_cache(const char *name,
320 size_t object_size, size_t size, size_t align,
321 unsigned long flags, void (*ctor)(void *),
322 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
Vladimir Davydov794b1242014-04-07 15:39:26 -0700323{
324 struct kmem_cache *s;
325 int err;
326
327 err = -ENOMEM;
328 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
329 if (!s)
330 goto out;
331
332 s->name = name;
333 s->object_size = object_size;
334 s->size = size;
335 s->align = align;
336 s->ctor = ctor;
337
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800338 err = init_memcg_params(s, memcg, root_cache);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700339 if (err)
340 goto out_free_cache;
341
342 err = __kmem_cache_create(s, flags);
343 if (err)
344 goto out_free_cache;
345
346 s->refcount = 1;
347 list_add(&s->list, &slab_caches);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700348out:
349 if (err)
350 return ERR_PTR(err);
351 return s;
352
353out_free_cache:
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800354 destroy_memcg_params(s);
Vaishali Thakkar7c4da062015-02-10 14:09:40 -0800355 kmem_cache_free(kmem_cache, s);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700356 goto out;
357}
Christoph Lameter45906852012-11-28 16:23:16 +0000358
359/*
Christoph Lameter039363f2012-07-06 15:25:10 -0500360 * kmem_cache_create - Create a cache.
361 * @name: A string which is used in /proc/slabinfo to identify this cache.
362 * @size: The size of objects to be created in this cache.
363 * @align: The required alignment for the objects.
364 * @flags: SLAB flags
365 * @ctor: A constructor for the objects.
366 *
367 * Returns a ptr to the cache on success, NULL on failure.
368 * Cannot be called within a interrupt, but can be interrupted.
369 * The @ctor is run when new pages are allocated by the cache.
370 *
371 * The flags are
372 *
373 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
374 * to catch references to uninitialised memory.
375 *
376 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
377 * for buffer overruns.
378 *
379 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
380 * cacheline. This can be beneficial if you're counting cycles as closely
381 * as davem.
382 */
Glauber Costa2633d7a2012-12-18 14:22:34 -0800383struct kmem_cache *
Vladimir Davydov794b1242014-04-07 15:39:26 -0700384kmem_cache_create(const char *name, size_t size, size_t align,
385 unsigned long flags, void (*ctor)(void *))
Christoph Lameter039363f2012-07-06 15:25:10 -0500386{
Vladimir Davydov794b1242014-04-07 15:39:26 -0700387 struct kmem_cache *s;
Andrzej Hajda3dec16e2015-02-13 14:36:38 -0800388 const char *cache_name;
Vladimir Davydov3965fc32014-01-23 15:52:55 -0800389 int err;
Christoph Lameter039363f2012-07-06 15:25:10 -0500390
Pekka Enbergb9205362012-08-16 10:12:18 +0300391 get_online_cpus();
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700392 get_online_mems();
Vladimir Davydov05257a12015-02-12 14:59:01 -0800393 memcg_get_cache_ids();
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700394
Pekka Enbergb9205362012-08-16 10:12:18 +0300395 mutex_lock(&slab_mutex);
Christoph Lameter686d5502012-09-05 00:20:33 +0000396
Vladimir Davydov794b1242014-04-07 15:39:26 -0700397 err = kmem_cache_sanity_check(name, size);
Andrew Morton3aa24f52014-10-09 15:25:58 -0700398 if (err) {
399 s = NULL; /* suppress uninit var warning */
Vladimir Davydov3965fc32014-01-23 15:52:55 -0800400 goto out_unlock;
Andrew Morton3aa24f52014-10-09 15:25:58 -0700401 }
Christoph Lameter686d5502012-09-05 00:20:33 +0000402
Glauber Costad8843922012-10-17 15:36:51 +0400403 /*
404 * Some allocators will constraint the set of valid flags to a subset
405 * of all flags. We expect them to define CACHE_CREATE_MASK in this
406 * case, and we'll just provide them with a sanitized version of the
407 * passed flags.
408 */
409 flags &= CACHE_CREATE_MASK;
Christoph Lameter686d5502012-09-05 00:20:33 +0000410
Vladimir Davydov794b1242014-04-07 15:39:26 -0700411 s = __kmem_cache_alias(name, size, align, flags, ctor);
412 if (s)
Vladimir Davydov3965fc32014-01-23 15:52:55 -0800413 goto out_unlock;
Glauber Costa2633d7a2012-12-18 14:22:34 -0800414
Andrzej Hajda3dec16e2015-02-13 14:36:38 -0800415 cache_name = kstrdup_const(name, GFP_KERNEL);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700416 if (!cache_name) {
417 err = -ENOMEM;
418 goto out_unlock;
419 }
Glauber Costa2633d7a2012-12-18 14:22:34 -0800420
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800421 s = create_cache(cache_name, size, size,
422 calculate_alignment(flags, align, size),
423 flags, ctor, NULL, NULL);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700424 if (IS_ERR(s)) {
425 err = PTR_ERR(s);
Andrzej Hajda3dec16e2015-02-13 14:36:38 -0800426 kfree_const(cache_name);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700427 }
Vladimir Davydov3965fc32014-01-23 15:52:55 -0800428
429out_unlock:
Christoph Lameter20cea962012-07-06 15:25:13 -0500430 mutex_unlock(&slab_mutex);
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700431
Vladimir Davydov05257a12015-02-12 14:59:01 -0800432 memcg_put_cache_ids();
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700433 put_online_mems();
Christoph Lameter20cea962012-07-06 15:25:13 -0500434 put_online_cpus();
435
Dave Jonesba3253c72014-01-29 14:05:48 -0800436 if (err) {
Christoph Lameter686d5502012-09-05 00:20:33 +0000437 if (flags & SLAB_PANIC)
438 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
439 name, err);
440 else {
441 printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d",
442 name, err);
443 dump_stack();
444 }
Christoph Lameter686d5502012-09-05 00:20:33 +0000445 return NULL;
446 }
Christoph Lameter039363f2012-07-06 15:25:10 -0500447 return s;
Glauber Costa2633d7a2012-12-18 14:22:34 -0800448}
Christoph Lameter039363f2012-07-06 15:25:10 -0500449EXPORT_SYMBOL(kmem_cache_create);
Christoph Lameter97d06602012-07-06 15:25:11 -0500450
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800451static int shutdown_cache(struct kmem_cache *s,
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800452 struct list_head *release, bool *need_rcu_barrier)
453{
454 if (__kmem_cache_shutdown(s) != 0) {
455 printk(KERN_ERR "kmem_cache_destroy %s: "
456 "Slab cache still has objects\n", s->name);
457 dump_stack();
458 return -EBUSY;
459 }
460
461 if (s->flags & SLAB_DESTROY_BY_RCU)
462 *need_rcu_barrier = true;
463
464#ifdef CONFIG_MEMCG_KMEM
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800465 if (!is_root_cache(s))
Vladimir Davydov426589f2015-02-12 14:59:23 -0800466 list_del(&s->memcg_params.list);
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800467#endif
468 list_move(&s->list, release);
469 return 0;
470}
471
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800472static void release_caches(struct list_head *release, bool need_rcu_barrier)
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800473{
474 struct kmem_cache *s, *s2;
475
476 if (need_rcu_barrier)
477 rcu_barrier();
478
479 list_for_each_entry_safe(s, s2, release, list) {
480#ifdef SLAB_SUPPORTS_SYSFS
481 sysfs_slab_remove(s);
482#else
483 slab_kmem_cache_release(s);
484#endif
485 }
486}
487
Vladimir Davydov794b1242014-04-07 15:39:26 -0700488#ifdef CONFIG_MEMCG_KMEM
489/*
Vladimir Davydov776ed0f2014-06-04 16:10:02 -0700490 * memcg_create_kmem_cache - Create a cache for a memory cgroup.
Vladimir Davydov794b1242014-04-07 15:39:26 -0700491 * @memcg: The memory cgroup the new cache is for.
492 * @root_cache: The parent of the new cache.
493 *
494 * This function attempts to create a kmem cache that will serve allocation
495 * requests going from @memcg to @root_cache. The new cache inherits properties
496 * from its parent.
497 */
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800498void memcg_create_kmem_cache(struct mem_cgroup *memcg,
499 struct kmem_cache *root_cache)
Vladimir Davydov794b1242014-04-07 15:39:26 -0700500{
Vladimir Davydov3e0350a2015-02-10 14:11:44 -0800501 static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */
Michal Hocko33398cf2015-09-08 15:01:02 -0700502 struct cgroup_subsys_state *css = &memcg->css;
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800503 struct memcg_cache_array *arr;
Vladimir Davydovbd673142014-06-04 16:07:40 -0700504 struct kmem_cache *s = NULL;
Vladimir Davydov794b1242014-04-07 15:39:26 -0700505 char *cache_name;
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800506 int idx;
Vladimir Davydov794b1242014-04-07 15:39:26 -0700507
508 get_online_cpus();
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700509 get_online_mems();
510
Vladimir Davydov794b1242014-04-07 15:39:26 -0700511 mutex_lock(&slab_mutex);
512
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800513 /*
514 * The memory cgroup could have been deactivated while the cache
515 * creation work was pending.
516 */
517 if (!memcg_kmem_is_active(memcg))
518 goto out_unlock;
519
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800520 idx = memcg_cache_id(memcg);
521 arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches,
522 lockdep_is_held(&slab_mutex));
523
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800524 /*
525 * Since per-memcg caches are created asynchronously on first
526 * allocation (see memcg_kmem_get_cache()), several threads can try to
527 * create the same cache, but only one of them may succeed.
528 */
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800529 if (arr->entries[idx])
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800530 goto out_unlock;
531
Vladimir Davydovf1008362015-02-12 14:59:29 -0800532 cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
Vladimir Davydov073ee1c2014-06-04 16:08:23 -0700533 cache_name = kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
Vladimir Davydovf1008362015-02-12 14:59:29 -0800534 css->id, memcg_name_buf);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700535 if (!cache_name)
536 goto out_unlock;
537
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800538 s = create_cache(cache_name, root_cache->object_size,
539 root_cache->size, root_cache->align,
540 root_cache->flags, root_cache->ctor,
541 memcg, root_cache);
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800542 /*
543 * If we could not create a memcg cache, do not complain, because
544 * that's not critical at all as we can always proceed with the root
545 * cache.
546 */
Vladimir Davydovbd673142014-06-04 16:07:40 -0700547 if (IS_ERR(s)) {
Vladimir Davydov794b1242014-04-07 15:39:26 -0700548 kfree(cache_name);
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800549 goto out_unlock;
Vladimir Davydovbd673142014-06-04 16:07:40 -0700550 }
Vladimir Davydov794b1242014-04-07 15:39:26 -0700551
Vladimir Davydov426589f2015-02-12 14:59:23 -0800552 list_add(&s->memcg_params.list, &root_cache->memcg_params.list);
553
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800554 /*
555 * Since readers won't lock (see cache_from_memcg_idx()), we need a
556 * barrier here to ensure nobody will see the kmem_cache partially
557 * initialized.
558 */
559 smp_wmb();
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800560 arr->entries[idx] = s;
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800561
Vladimir Davydov794b1242014-04-07 15:39:26 -0700562out_unlock:
563 mutex_unlock(&slab_mutex);
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700564
565 put_online_mems();
Vladimir Davydov794b1242014-04-07 15:39:26 -0700566 put_online_cpus();
567}
Vladimir Davydovb8529902014-04-07 15:39:28 -0700568
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800569void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
570{
571 int idx;
572 struct memcg_cache_array *arr;
Vladimir Davydovd6e0b7f2015-02-12 14:59:47 -0800573 struct kmem_cache *s, *c;
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800574
575 idx = memcg_cache_id(memcg);
576
Vladimir Davydovd6e0b7f2015-02-12 14:59:47 -0800577 get_online_cpus();
578 get_online_mems();
579
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800580 mutex_lock(&slab_mutex);
581 list_for_each_entry(s, &slab_caches, list) {
582 if (!is_root_cache(s))
583 continue;
584
585 arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
586 lockdep_is_held(&slab_mutex));
Vladimir Davydovd6e0b7f2015-02-12 14:59:47 -0800587 c = arr->entries[idx];
588 if (!c)
589 continue;
590
591 __kmem_cache_shrink(c, true);
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800592 arr->entries[idx] = NULL;
593 }
594 mutex_unlock(&slab_mutex);
Vladimir Davydovd6e0b7f2015-02-12 14:59:47 -0800595
596 put_online_mems();
597 put_online_cpus();
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800598}
599
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800600void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
Vladimir Davydovb8529902014-04-07 15:39:28 -0700601{
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800602 LIST_HEAD(release);
603 bool need_rcu_barrier = false;
604 struct kmem_cache *s, *s2;
Vladimir Davydovb8529902014-04-07 15:39:28 -0700605
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800606 get_online_cpus();
607 get_online_mems();
Vladimir Davydovb8529902014-04-07 15:39:28 -0700608
Vladimir Davydovb8529902014-04-07 15:39:28 -0700609 mutex_lock(&slab_mutex);
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800610 list_for_each_entry_safe(s, s2, &slab_caches, list) {
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800611 if (is_root_cache(s) || s->memcg_params.memcg != memcg)
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800612 continue;
613 /*
614 * The cgroup is about to be freed and therefore has no charges
615 * left. Hence, all its caches must be empty by now.
616 */
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800617 BUG_ON(shutdown_cache(s, &release, &need_rcu_barrier));
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800618 }
619 mutex_unlock(&slab_mutex);
Vladimir Davydovb8529902014-04-07 15:39:28 -0700620
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800621 put_online_mems();
622 put_online_cpus();
623
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800624 release_caches(&release, need_rcu_barrier);
Vladimir Davydovb8529902014-04-07 15:39:28 -0700625}
Vladimir Davydov794b1242014-04-07 15:39:26 -0700626#endif /* CONFIG_MEMCG_KMEM */
627
Christoph Lameter41a21282014-05-06 12:50:08 -0700628void slab_kmem_cache_release(struct kmem_cache *s)
629{
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800630 destroy_memcg_params(s);
Andrzej Hajda3dec16e2015-02-13 14:36:38 -0800631 kfree_const(s->name);
Christoph Lameter41a21282014-05-06 12:50:08 -0700632 kmem_cache_free(kmem_cache, s);
633}
634
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000635void kmem_cache_destroy(struct kmem_cache *s)
636{
Vladimir Davydov426589f2015-02-12 14:59:23 -0800637 struct kmem_cache *c, *c2;
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800638 LIST_HEAD(release);
639 bool need_rcu_barrier = false;
640 bool busy = false;
641
Sergey Senozhatsky3942d292015-09-08 15:00:50 -0700642 if (unlikely(!s))
643 return;
644
Vladimir Davydov426589f2015-02-12 14:59:23 -0800645 BUG_ON(!is_root_cache(s));
646
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000647 get_online_cpus();
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700648 get_online_mems();
649
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000650 mutex_lock(&slab_mutex);
Vladimir Davydovb8529902014-04-07 15:39:28 -0700651
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000652 s->refcount--;
Vladimir Davydovb8529902014-04-07 15:39:28 -0700653 if (s->refcount)
654 goto out_unlock;
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000655
Vladimir Davydov426589f2015-02-12 14:59:23 -0800656 for_each_memcg_cache_safe(c, c2, s) {
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800657 if (shutdown_cache(c, &release, &need_rcu_barrier))
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800658 busy = true;
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000659 }
Vladimir Davydovb8529902014-04-07 15:39:28 -0700660
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800661 if (!busy)
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800662 shutdown_cache(s, &release, &need_rcu_barrier);
Vladimir Davydovb8529902014-04-07 15:39:28 -0700663
664out_unlock:
665 mutex_unlock(&slab_mutex);
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800666
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700667 put_online_mems();
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000668 put_online_cpus();
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800669
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800670 release_caches(&release, need_rcu_barrier);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000671}
672EXPORT_SYMBOL(kmem_cache_destroy);
673
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700674/**
675 * kmem_cache_shrink - Shrink a cache.
676 * @cachep: The cache to shrink.
677 *
678 * Releases as many slabs as possible for a cache.
679 * To help debugging, a zero exit status indicates all slabs were released.
680 */
681int kmem_cache_shrink(struct kmem_cache *cachep)
682{
683 int ret;
684
685 get_online_cpus();
686 get_online_mems();
Vladimir Davydovd6e0b7f2015-02-12 14:59:47 -0800687 ret = __kmem_cache_shrink(cachep, false);
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700688 put_online_mems();
689 put_online_cpus();
690 return ret;
691}
692EXPORT_SYMBOL(kmem_cache_shrink);
693
Denis Kirjanovfda90122015-11-05 18:44:59 -0800694bool slab_is_available(void)
Christoph Lameter97d06602012-07-06 15:25:11 -0500695{
696 return slab_state >= UP;
697}
Glauber Costab7454ad2012-10-19 18:20:25 +0400698
Christoph Lameter45530c42012-11-28 16:23:07 +0000699#ifndef CONFIG_SLOB
700/* Create a cache during boot when no slab services are available yet */
701void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
702 unsigned long flags)
703{
704 int err;
705
706 s->name = name;
707 s->size = s->object_size = size;
Christoph Lameter45906852012-11-28 16:23:16 +0000708 s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800709
710 slab_init_memcg_params(s);
711
Christoph Lameter45530c42012-11-28 16:23:07 +0000712 err = __kmem_cache_create(s, flags);
713
714 if (err)
Christoph Lameter31ba7342013-01-10 19:00:53 +0000715 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
Christoph Lameter45530c42012-11-28 16:23:07 +0000716 name, size, err);
717
718 s->refcount = -1; /* Exempt from merging for now */
719}
720
721struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
722 unsigned long flags)
723{
724 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
725
726 if (!s)
727 panic("Out of memory when creating slab %s\n", name);
728
729 create_boot_cache(s, name, size, flags);
730 list_add(&s->list, &slab_caches);
731 s->refcount = 1;
732 return s;
733}
734
Christoph Lameter9425c582013-01-10 19:12:17 +0000735struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
736EXPORT_SYMBOL(kmalloc_caches);
737
738#ifdef CONFIG_ZONE_DMA
739struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
740EXPORT_SYMBOL(kmalloc_dma_caches);
741#endif
742
Christoph Lameterf97d5f62013-01-10 19:12:17 +0000743/*
Christoph Lameter2c59dd62013-01-10 19:14:19 +0000744 * Conversion table for small slabs sizes / 8 to the index in the
745 * kmalloc array. This is necessary for slabs < 192 since we have non power
746 * of two cache sizes there. The size of larger slabs can be determined using
747 * fls.
748 */
749static s8 size_index[24] = {
750 3, /* 8 */
751 4, /* 16 */
752 5, /* 24 */
753 5, /* 32 */
754 6, /* 40 */
755 6, /* 48 */
756 6, /* 56 */
757 6, /* 64 */
758 1, /* 72 */
759 1, /* 80 */
760 1, /* 88 */
761 1, /* 96 */
762 7, /* 104 */
763 7, /* 112 */
764 7, /* 120 */
765 7, /* 128 */
766 2, /* 136 */
767 2, /* 144 */
768 2, /* 152 */
769 2, /* 160 */
770 2, /* 168 */
771 2, /* 176 */
772 2, /* 184 */
773 2 /* 192 */
774};
775
776static inline int size_index_elem(size_t bytes)
777{
778 return (bytes - 1) / 8;
779}
780
781/*
782 * Find the kmem_cache structure that serves a given size of
783 * allocation
784 */
785struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
786{
787 int index;
788
Joonsoo Kim9de1bc82013-08-02 11:02:42 +0900789 if (unlikely(size > KMALLOC_MAX_SIZE)) {
Sasha Levin907985f2013-06-10 15:18:00 -0400790 WARN_ON_ONCE(!(flags & __GFP_NOWARN));
Christoph Lameter6286ae92013-05-03 15:43:18 +0000791 return NULL;
Sasha Levin907985f2013-06-10 15:18:00 -0400792 }
Christoph Lameter6286ae92013-05-03 15:43:18 +0000793
Christoph Lameter2c59dd62013-01-10 19:14:19 +0000794 if (size <= 192) {
795 if (!size)
796 return ZERO_SIZE_PTR;
797
798 index = size_index[size_index_elem(size)];
799 } else
800 index = fls(size - 1);
801
802#ifdef CONFIG_ZONE_DMA
Joonsoo Kimb1e05412013-02-04 23:46:46 +0900803 if (unlikely((flags & GFP_DMA)))
Christoph Lameter2c59dd62013-01-10 19:14:19 +0000804 return kmalloc_dma_caches[index];
805
806#endif
807 return kmalloc_caches[index];
808}
809
810/*
Gavin Guo4066c332015-06-24 16:55:54 -0700811 * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
812 * kmalloc_index() supports up to 2^26=64MB, so the final entry of the table is
813 * kmalloc-67108864.
814 */
815static struct {
816 const char *name;
817 unsigned long size;
818} const kmalloc_info[] __initconst = {
819 {NULL, 0}, {"kmalloc-96", 96},
820 {"kmalloc-192", 192}, {"kmalloc-8", 8},
821 {"kmalloc-16", 16}, {"kmalloc-32", 32},
822 {"kmalloc-64", 64}, {"kmalloc-128", 128},
823 {"kmalloc-256", 256}, {"kmalloc-512", 512},
824 {"kmalloc-1024", 1024}, {"kmalloc-2048", 2048},
825 {"kmalloc-4096", 4096}, {"kmalloc-8192", 8192},
826 {"kmalloc-16384", 16384}, {"kmalloc-32768", 32768},
827 {"kmalloc-65536", 65536}, {"kmalloc-131072", 131072},
828 {"kmalloc-262144", 262144}, {"kmalloc-524288", 524288},
829 {"kmalloc-1048576", 1048576}, {"kmalloc-2097152", 2097152},
830 {"kmalloc-4194304", 4194304}, {"kmalloc-8388608", 8388608},
831 {"kmalloc-16777216", 16777216}, {"kmalloc-33554432", 33554432},
832 {"kmalloc-67108864", 67108864}
833};
834
835/*
Daniel Sanders34cc6992015-06-24 16:55:57 -0700836 * Patch up the size_index table if we have strange large alignment
837 * requirements for the kmalloc array. This is only the case for
838 * MIPS it seems. The standard arches will not generate any code here.
839 *
840 * Largest permitted alignment is 256 bytes due to the way we
841 * handle the index determination for the smaller caches.
842 *
843 * Make sure that nothing crazy happens if someone starts tinkering
844 * around with ARCH_KMALLOC_MINALIGN
Christoph Lameterf97d5f62013-01-10 19:12:17 +0000845 */
Daniel Sanders34cc6992015-06-24 16:55:57 -0700846void __init setup_kmalloc_cache_index_table(void)
Christoph Lameterf97d5f62013-01-10 19:12:17 +0000847{
848 int i;
849
Christoph Lameter2c59dd62013-01-10 19:14:19 +0000850 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
851 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
852
853 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
854 int elem = size_index_elem(i);
855
856 if (elem >= ARRAY_SIZE(size_index))
857 break;
858 size_index[elem] = KMALLOC_SHIFT_LOW;
859 }
860
861 if (KMALLOC_MIN_SIZE >= 64) {
862 /*
863 * The 96 byte size cache is not used if the alignment
864 * is 64 byte.
865 */
866 for (i = 64 + 8; i <= 96; i += 8)
867 size_index[size_index_elem(i)] = 7;
868
869 }
870
871 if (KMALLOC_MIN_SIZE >= 128) {
872 /*
873 * The 192 byte sized cache is not used if the alignment
874 * is 128 byte. Redirect kmalloc to use the 256 byte cache
875 * instead.
876 */
877 for (i = 128 + 8; i <= 192; i += 8)
878 size_index[size_index_elem(i)] = 8;
879 }
Daniel Sanders34cc6992015-06-24 16:55:57 -0700880}
881
Christoph Lameterae6f2462015-06-30 09:01:11 -0500882static void __init new_kmalloc_cache(int idx, unsigned long flags)
Christoph Lametera9730fc2015-06-29 09:28:08 -0500883{
884 kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name,
885 kmalloc_info[idx].size, flags);
886}
887
Daniel Sanders34cc6992015-06-24 16:55:57 -0700888/*
889 * Create the kmalloc array. Some of the regular kmalloc arrays
890 * may already have been created because they were needed to
891 * enable allocations for slab creation.
892 */
893void __init create_kmalloc_caches(unsigned long flags)
894{
895 int i;
896
Christoph Lametera9730fc2015-06-29 09:28:08 -0500897 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
898 if (!kmalloc_caches[i])
899 new_kmalloc_cache(i, flags);
Chris Mason956e46e2013-05-08 15:56:28 -0400900
901 /*
Christoph Lametera9730fc2015-06-29 09:28:08 -0500902 * Caches that are not of the two-to-the-power-of size.
903 * These have to be created immediately after the
904 * earlier power of two caches
Chris Mason956e46e2013-05-08 15:56:28 -0400905 */
Christoph Lametera9730fc2015-06-29 09:28:08 -0500906 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
907 new_kmalloc_cache(1, flags);
908 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
909 new_kmalloc_cache(2, flags);
Christoph Lameter8a965b32013-05-03 18:04:18 +0000910 }
911
Christoph Lameterf97d5f62013-01-10 19:12:17 +0000912 /* Kmalloc array is now usable */
913 slab_state = UP;
914
Christoph Lameterf97d5f62013-01-10 19:12:17 +0000915#ifdef CONFIG_ZONE_DMA
916 for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
917 struct kmem_cache *s = kmalloc_caches[i];
918
919 if (s) {
920 int size = kmalloc_size(i);
921 char *n = kasprintf(GFP_NOWAIT,
922 "dma-kmalloc-%d", size);
923
924 BUG_ON(!n);
925 kmalloc_dma_caches[i] = create_kmalloc_cache(n,
926 size, SLAB_CACHE_DMA | flags);
927 }
928 }
929#endif
930}
Christoph Lameter45530c42012-11-28 16:23:07 +0000931#endif /* !CONFIG_SLOB */
932
Vladimir Davydovcea371f2014-06-04 16:07:04 -0700933/*
934 * To avoid unnecessary overhead, we pass through large allocation requests
935 * directly to the page allocator. We use __GFP_COMP, because we will need to
936 * know the allocation order to free the pages properly in kfree.
937 */
Vladimir Davydov52383432014-06-04 16:06:39 -0700938void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
939{
940 void *ret;
941 struct page *page;
942
943 flags |= __GFP_COMP;
944 page = alloc_kmem_pages(flags, order);
945 ret = page ? page_address(page) : NULL;
946 kmemleak_alloc(ret, size, 1, flags);
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800947 kasan_kmalloc_large(ret, size);
Vladimir Davydov52383432014-06-04 16:06:39 -0700948 return ret;
949}
950EXPORT_SYMBOL(kmalloc_order);
951
Christoph Lameterf1b6eb62013-09-04 16:35:34 +0000952#ifdef CONFIG_TRACING
953void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
954{
955 void *ret = kmalloc_order(size, flags, order);
956 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
957 return ret;
958}
959EXPORT_SYMBOL(kmalloc_order_trace);
960#endif
Christoph Lameter45530c42012-11-28 16:23:07 +0000961
Glauber Costab7454ad2012-10-19 18:20:25 +0400962#ifdef CONFIG_SLABINFO
Wanpeng Lie9b4db22013-07-04 08:33:24 +0800963
964#ifdef CONFIG_SLAB
965#define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
966#else
967#define SLABINFO_RIGHTS S_IRUSR
968#endif
969
Vladimir Davydovb0475012014-12-10 15:44:19 -0800970static void print_slabinfo_header(struct seq_file *m)
Glauber Costabcee6e22012-10-19 18:20:26 +0400971{
972 /*
973 * Output format version, so at least we can change it
974 * without _too_ many complaints.
975 */
976#ifdef CONFIG_DEBUG_SLAB
977 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
978#else
979 seq_puts(m, "slabinfo - version: 2.1\n");
980#endif
981 seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
982 "<objperslab> <pagesperslab>");
983 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
984 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
985#ifdef CONFIG_DEBUG_SLAB
986 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
987 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
988 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
989#endif
990 seq_putc(m, '\n');
991}
992
Vladimir Davydov1df3b262014-12-10 15:42:16 -0800993void *slab_start(struct seq_file *m, loff_t *pos)
Glauber Costab7454ad2012-10-19 18:20:25 +0400994{
Glauber Costab7454ad2012-10-19 18:20:25 +0400995 mutex_lock(&slab_mutex);
Glauber Costab7454ad2012-10-19 18:20:25 +0400996 return seq_list_start(&slab_caches, *pos);
997}
998
Wanpeng Li276a2432013-07-08 08:08:28 +0800999void *slab_next(struct seq_file *m, void *p, loff_t *pos)
Glauber Costab7454ad2012-10-19 18:20:25 +04001000{
1001 return seq_list_next(p, &slab_caches, pos);
1002}
1003
Wanpeng Li276a2432013-07-08 08:08:28 +08001004void slab_stop(struct seq_file *m, void *p)
Glauber Costab7454ad2012-10-19 18:20:25 +04001005{
1006 mutex_unlock(&slab_mutex);
1007}
1008
Glauber Costa749c5412012-12-18 14:23:01 -08001009static void
1010memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
Glauber Costab7454ad2012-10-19 18:20:25 +04001011{
Glauber Costa749c5412012-12-18 14:23:01 -08001012 struct kmem_cache *c;
1013 struct slabinfo sinfo;
Glauber Costa749c5412012-12-18 14:23:01 -08001014
1015 if (!is_root_cache(s))
1016 return;
1017
Vladimir Davydov426589f2015-02-12 14:59:23 -08001018 for_each_memcg_cache(c, s) {
Glauber Costa749c5412012-12-18 14:23:01 -08001019 memset(&sinfo, 0, sizeof(sinfo));
1020 get_slabinfo(c, &sinfo);
1021
1022 info->active_slabs += sinfo.active_slabs;
1023 info->num_slabs += sinfo.num_slabs;
1024 info->shared_avail += sinfo.shared_avail;
1025 info->active_objs += sinfo.active_objs;
1026 info->num_objs += sinfo.num_objs;
1027 }
1028}
1029
Vladimir Davydovb0475012014-12-10 15:44:19 -08001030static void cache_show(struct kmem_cache *s, struct seq_file *m)
Glauber Costa749c5412012-12-18 14:23:01 -08001031{
Glauber Costa0d7561c2012-10-19 18:20:27 +04001032 struct slabinfo sinfo;
1033
1034 memset(&sinfo, 0, sizeof(sinfo));
1035 get_slabinfo(s, &sinfo);
1036
Glauber Costa749c5412012-12-18 14:23:01 -08001037 memcg_accumulate_slabinfo(s, &sinfo);
1038
Glauber Costa0d7561c2012-10-19 18:20:27 +04001039 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
Glauber Costa749c5412012-12-18 14:23:01 -08001040 cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
Glauber Costa0d7561c2012-10-19 18:20:27 +04001041 sinfo.objects_per_slab, (1 << sinfo.cache_order));
1042
1043 seq_printf(m, " : tunables %4u %4u %4u",
1044 sinfo.limit, sinfo.batchcount, sinfo.shared);
1045 seq_printf(m, " : slabdata %6lu %6lu %6lu",
1046 sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
1047 slabinfo_show_stats(m, s);
1048 seq_putc(m, '\n');
Glauber Costab7454ad2012-10-19 18:20:25 +04001049}
1050
Vladimir Davydov1df3b262014-12-10 15:42:16 -08001051static int slab_show(struct seq_file *m, void *p)
Glauber Costa749c5412012-12-18 14:23:01 -08001052{
1053 struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
1054
Vladimir Davydov1df3b262014-12-10 15:42:16 -08001055 if (p == slab_caches.next)
1056 print_slabinfo_header(m);
Vladimir Davydovb0475012014-12-10 15:44:19 -08001057 if (is_root_cache(s))
1058 cache_show(s, m);
1059 return 0;
Glauber Costa749c5412012-12-18 14:23:01 -08001060}
1061
Vladimir Davydovb0475012014-12-10 15:44:19 -08001062#ifdef CONFIG_MEMCG_KMEM
1063int memcg_slab_show(struct seq_file *m, void *p)
1064{
1065 struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
1066 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
1067
1068 if (p == slab_caches.next)
1069 print_slabinfo_header(m);
Vladimir Davydovf7ce3192015-02-12 14:59:20 -08001070 if (!is_root_cache(s) && s->memcg_params.memcg == memcg)
Vladimir Davydovb0475012014-12-10 15:44:19 -08001071 cache_show(s, m);
1072 return 0;
1073}
1074#endif
1075
Glauber Costab7454ad2012-10-19 18:20:25 +04001076/*
1077 * slabinfo_op - iterator that generates /proc/slabinfo
1078 *
1079 * Output layout:
1080 * cache-name
1081 * num-active-objs
1082 * total-objs
1083 * object size
1084 * num-active-slabs
1085 * total-slabs
1086 * num-pages-per-slab
1087 * + further values on SMP and with statistics enabled
1088 */
1089static const struct seq_operations slabinfo_op = {
Vladimir Davydov1df3b262014-12-10 15:42:16 -08001090 .start = slab_start,
Wanpeng Li276a2432013-07-08 08:08:28 +08001091 .next = slab_next,
1092 .stop = slab_stop,
Vladimir Davydov1df3b262014-12-10 15:42:16 -08001093 .show = slab_show,
Glauber Costab7454ad2012-10-19 18:20:25 +04001094};
1095
1096static int slabinfo_open(struct inode *inode, struct file *file)
1097{
1098 return seq_open(file, &slabinfo_op);
1099}
1100
1101static const struct file_operations proc_slabinfo_operations = {
1102 .open = slabinfo_open,
1103 .read = seq_read,
1104 .write = slabinfo_write,
1105 .llseek = seq_lseek,
1106 .release = seq_release,
1107};
1108
1109static int __init slab_proc_init(void)
1110{
Wanpeng Lie9b4db22013-07-04 08:33:24 +08001111 proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
1112 &proc_slabinfo_operations);
Glauber Costab7454ad2012-10-19 18:20:25 +04001113 return 0;
1114}
1115module_init(slab_proc_init);
1116#endif /* CONFIG_SLABINFO */
Andrey Ryabinin928cec92014-08-06 16:04:44 -07001117
1118static __always_inline void *__do_krealloc(const void *p, size_t new_size,
1119 gfp_t flags)
1120{
1121 void *ret;
1122 size_t ks = 0;
1123
1124 if (p)
1125 ks = ksize(p);
1126
Andrey Ryabinin0316bec2015-02-13 14:39:42 -08001127 if (ks >= new_size) {
1128 kasan_krealloc((void *)p, new_size);
Andrey Ryabinin928cec92014-08-06 16:04:44 -07001129 return (void *)p;
Andrey Ryabinin0316bec2015-02-13 14:39:42 -08001130 }
Andrey Ryabinin928cec92014-08-06 16:04:44 -07001131
1132 ret = kmalloc_track_caller(new_size, flags);
1133 if (ret && p)
1134 memcpy(ret, p, ks);
1135
1136 return ret;
1137}
1138
1139/**
1140 * __krealloc - like krealloc() but don't free @p.
1141 * @p: object to reallocate memory for.
1142 * @new_size: how many bytes of memory are required.
1143 * @flags: the type of memory to allocate.
1144 *
1145 * This function is like krealloc() except it never frees the originally
1146 * allocated buffer. Use this if you don't want to free the buffer immediately
1147 * like, for example, with RCU.
1148 */
1149void *__krealloc(const void *p, size_t new_size, gfp_t flags)
1150{
1151 if (unlikely(!new_size))
1152 return ZERO_SIZE_PTR;
1153
1154 return __do_krealloc(p, new_size, flags);
1155
1156}
1157EXPORT_SYMBOL(__krealloc);
1158
1159/**
1160 * krealloc - reallocate memory. The contents will remain unchanged.
1161 * @p: object to reallocate memory for.
1162 * @new_size: how many bytes of memory are required.
1163 * @flags: the type of memory to allocate.
1164 *
1165 * The contents of the object pointed to are preserved up to the
1166 * lesser of the new and old sizes. If @p is %NULL, krealloc()
1167 * behaves exactly like kmalloc(). If @new_size is 0 and @p is not a
1168 * %NULL pointer, the object pointed to is freed.
1169 */
1170void *krealloc(const void *p, size_t new_size, gfp_t flags)
1171{
1172 void *ret;
1173
1174 if (unlikely(!new_size)) {
1175 kfree(p);
1176 return ZERO_SIZE_PTR;
1177 }
1178
1179 ret = __do_krealloc(p, new_size, flags);
1180 if (ret && p != ret)
1181 kfree(p);
1182
1183 return ret;
1184}
1185EXPORT_SYMBOL(krealloc);
1186
1187/**
1188 * kzfree - like kfree but zero memory
1189 * @p: object to free memory of
1190 *
1191 * The memory of the object @p points to is zeroed before freed.
1192 * If @p is %NULL, kzfree() does nothing.
1193 *
1194 * Note: this function zeroes the whole allocated buffer which can be a good
1195 * deal bigger than the requested buffer size passed to kmalloc(). So be
1196 * careful when using this function in performance sensitive code.
1197 */
1198void kzfree(const void *p)
1199{
1200 size_t ks;
1201 void *mem = (void *)p;
1202
1203 if (unlikely(ZERO_OR_NULL_PTR(mem)))
1204 return;
1205 ks = ksize(mem);
1206 memset(mem, 0, ks);
1207 kfree(mem);
1208}
1209EXPORT_SYMBOL(kzfree);
1210
1211/* Tracepoints definitions. */
1212EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1213EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1214EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
1215EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
1216EXPORT_TRACEPOINT_SYMBOL(kfree);
1217EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);