blob: 23ff74e618388508e359a5317d93ae284778f779 [file] [log] [blame]
Christoph Lameter039363f2012-07-06 15:25:10 -05001/*
2 * Slab allocator functions that are independent of the allocator strategy
3 *
4 * (C) 2012 Christoph Lameter <cl@linux.com>
5 */
6#include <linux/slab.h>
7
8#include <linux/mm.h>
9#include <linux/poison.h>
10#include <linux/interrupt.h>
11#include <linux/memory.h>
12#include <linux/compiler.h>
13#include <linux/module.h>
Christoph Lameter20cea962012-07-06 15:25:13 -050014#include <linux/cpu.h>
15#include <linux/uaccess.h>
Glauber Costab7454ad2012-10-19 18:20:25 +040016#include <linux/seq_file.h>
17#include <linux/proc_fs.h>
Christoph Lameter039363f2012-07-06 15:25:10 -050018#include <asm/cacheflush.h>
19#include <asm/tlbflush.h>
20#include <asm/page.h>
Glauber Costa2633d7a2012-12-18 14:22:34 -080021#include <linux/memcontrol.h>
Andrey Ryabinin928cec92014-08-06 16:04:44 -070022
23#define CREATE_TRACE_POINTS
Christoph Lameterf1b6eb62013-09-04 16:35:34 +000024#include <trace/events/kmem.h>
Christoph Lameter039363f2012-07-06 15:25:10 -050025
Christoph Lameter97d06602012-07-06 15:25:11 -050026#include "slab.h"
27
28enum slab_state slab_state;
Christoph Lameter18004c52012-07-06 15:25:12 -050029LIST_HEAD(slab_caches);
30DEFINE_MUTEX(slab_mutex);
Christoph Lameter9b030cb2012-09-05 00:20:33 +000031struct kmem_cache *kmem_cache;
Christoph Lameter97d06602012-07-06 15:25:11 -050032
Tejun Heo657dc2f2017-02-22 15:41:14 -080033static LIST_HEAD(slab_caches_to_rcu_destroy);
34static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
35static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
36 slab_caches_to_rcu_destroy_workfn);
37
Joonsoo Kim07f361b2014-10-09 15:26:00 -070038/*
Joonsoo Kim423c9292014-10-09 15:26:22 -070039 * Set of flags that will prevent slab merging
40 */
41#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
42 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -070043 SLAB_FAILSLAB | SLAB_KASAN)
Joonsoo Kim423c9292014-10-09 15:26:22 -070044
Vladimir Davydov230e9fc2016-01-14 15:18:15 -080045#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
46 SLAB_NOTRACK | SLAB_ACCOUNT)
Joonsoo Kim423c9292014-10-09 15:26:22 -070047
48/*
49 * Merge control. If this is set then no merging of slab caches will occur.
50 * (Could be removed. This was introduced to pacify the merge skeptics.)
51 */
52static int slab_nomerge;
53
54static int __init setup_slab_nomerge(char *str)
55{
56 slab_nomerge = 1;
57 return 1;
58}
59
60#ifdef CONFIG_SLUB
61__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
62#endif
63
64__setup("slab_nomerge", setup_slab_nomerge);
65
66/*
Joonsoo Kim07f361b2014-10-09 15:26:00 -070067 * Determine the size of a slab object
68 */
69unsigned int kmem_cache_size(struct kmem_cache *s)
70{
71 return s->object_size;
72}
73EXPORT_SYMBOL(kmem_cache_size);
74
Shuah Khan77be4b12012-08-16 00:09:46 -070075#ifdef CONFIG_DEBUG_VM
Vladimir Davydov794b1242014-04-07 15:39:26 -070076static int kmem_cache_sanity_check(const char *name, size_t size)
Shuah Khan77be4b12012-08-16 00:09:46 -070077{
78 struct kmem_cache *s = NULL;
79
80 if (!name || in_interrupt() || size < sizeof(void *) ||
81 size > KMALLOC_MAX_SIZE) {
82 pr_err("kmem_cache_create(%s) integrity check failed\n", name);
83 return -EINVAL;
84 }
85
86 list_for_each_entry(s, &slab_caches, list) {
87 char tmp;
88 int res;
89
90 /*
91 * This happens when the module gets unloaded and doesn't
92 * destroy its slab cache and no-one else reuses the vmalloc
93 * area of the module. Print a warning.
94 */
95 res = probe_kernel_address(s->name, tmp);
96 if (res) {
97 pr_err("Slab cache with size %d has lost its name\n",
98 s->object_size);
99 continue;
100 }
Shuah Khan77be4b12012-08-16 00:09:46 -0700101 }
102
103 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
104 return 0;
105}
106#else
Vladimir Davydov794b1242014-04-07 15:39:26 -0700107static inline int kmem_cache_sanity_check(const char *name, size_t size)
Shuah Khan77be4b12012-08-16 00:09:46 -0700108{
109 return 0;
110}
111#endif
112
Christoph Lameter484748f2015-09-04 15:45:34 -0700113void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
114{
115 size_t i;
116
Jesper Dangaard Brouerca257192016-03-15 14:54:00 -0700117 for (i = 0; i < nr; i++) {
118 if (s)
119 kmem_cache_free(s, p[i]);
120 else
121 kfree(p[i]);
122 }
Christoph Lameter484748f2015-09-04 15:45:34 -0700123}
124
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -0800125int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
Christoph Lameter484748f2015-09-04 15:45:34 -0700126 void **p)
127{
128 size_t i;
129
130 for (i = 0; i < nr; i++) {
131 void *x = p[i] = kmem_cache_alloc(s, flags);
132 if (!x) {
133 __kmem_cache_free_bulk(s, i, p);
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -0800134 return 0;
Christoph Lameter484748f2015-09-04 15:45:34 -0700135 }
136 }
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -0800137 return i;
Christoph Lameter484748f2015-09-04 15:45:34 -0700138}
139
Johannes Weiner127424c2016-01-20 15:02:32 -0800140#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
Tejun Heo510ded32017-02-22 15:41:24 -0800141
142LIST_HEAD(slab_root_caches);
143
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800144void slab_init_memcg_params(struct kmem_cache *s)
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700145{
Tejun Heo9eeadc82017-02-22 15:41:17 -0800146 s->memcg_params.root_cache = NULL;
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800147 RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
Tejun Heo9eeadc82017-02-22 15:41:17 -0800148 INIT_LIST_HEAD(&s->memcg_params.children);
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800149}
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700150
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800151static int init_memcg_params(struct kmem_cache *s,
152 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
153{
154 struct memcg_cache_array *arr;
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700155
Tejun Heo9eeadc82017-02-22 15:41:17 -0800156 if (root_cache) {
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800157 s->memcg_params.root_cache = root_cache;
Tejun Heo9eeadc82017-02-22 15:41:17 -0800158 s->memcg_params.memcg = memcg;
159 INIT_LIST_HEAD(&s->memcg_params.children_node);
Tejun Heobc2791f2017-02-22 15:41:21 -0800160 INIT_LIST_HEAD(&s->memcg_params.kmem_caches_node);
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800161 return 0;
162 }
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700163
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800164 slab_init_memcg_params(s);
165
166 if (!memcg_nr_cache_ids)
167 return 0;
168
169 arr = kzalloc(sizeof(struct memcg_cache_array) +
170 memcg_nr_cache_ids * sizeof(void *),
171 GFP_KERNEL);
172 if (!arr)
173 return -ENOMEM;
174
175 RCU_INIT_POINTER(s->memcg_params.memcg_caches, arr);
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700176 return 0;
177}
178
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800179static void destroy_memcg_params(struct kmem_cache *s)
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700180{
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800181 if (is_root_cache(s))
182 kfree(rcu_access_pointer(s->memcg_params.memcg_caches));
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700183}
184
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800185static int update_memcg_params(struct kmem_cache *s, int new_array_size)
Vladimir Davydov6f817f42014-10-09 15:28:47 -0700186{
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800187 struct memcg_cache_array *old, *new;
Vladimir Davydov6f817f42014-10-09 15:28:47 -0700188
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800189 new = kzalloc(sizeof(struct memcg_cache_array) +
190 new_array_size * sizeof(void *), GFP_KERNEL);
191 if (!new)
Vladimir Davydov6f817f42014-10-09 15:28:47 -0700192 return -ENOMEM;
193
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800194 old = rcu_dereference_protected(s->memcg_params.memcg_caches,
195 lockdep_is_held(&slab_mutex));
196 if (old)
197 memcpy(new->entries, old->entries,
198 memcg_nr_cache_ids * sizeof(void *));
Vladimir Davydov6f817f42014-10-09 15:28:47 -0700199
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800200 rcu_assign_pointer(s->memcg_params.memcg_caches, new);
201 if (old)
202 kfree_rcu(old, rcu);
Vladimir Davydov6f817f42014-10-09 15:28:47 -0700203 return 0;
204}
205
Glauber Costa55007d82012-12-18 14:22:38 -0800206int memcg_update_all_caches(int num_memcgs)
207{
208 struct kmem_cache *s;
209 int ret = 0;
Glauber Costa55007d82012-12-18 14:22:38 -0800210
Vladimir Davydov05257a12015-02-12 14:59:01 -0800211 mutex_lock(&slab_mutex);
Tejun Heo510ded32017-02-22 15:41:24 -0800212 list_for_each_entry(s, &slab_root_caches, root_caches_node) {
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800213 ret = update_memcg_params(s, num_memcgs);
Glauber Costa55007d82012-12-18 14:22:38 -0800214 /*
Glauber Costa55007d82012-12-18 14:22:38 -0800215 * Instead of freeing the memory, we'll just leave the caches
216 * up to this point in an updated state.
217 */
218 if (ret)
Vladimir Davydov05257a12015-02-12 14:59:01 -0800219 break;
Glauber Costa55007d82012-12-18 14:22:38 -0800220 }
Glauber Costa55007d82012-12-18 14:22:38 -0800221 mutex_unlock(&slab_mutex);
222 return ret;
223}
Tejun Heo657dc2f2017-02-22 15:41:14 -0800224
Tejun Heo510ded32017-02-22 15:41:24 -0800225void memcg_link_cache(struct kmem_cache *s)
Tejun Heo657dc2f2017-02-22 15:41:14 -0800226{
Tejun Heo510ded32017-02-22 15:41:24 -0800227 if (is_root_cache(s)) {
228 list_add(&s->root_caches_node, &slab_root_caches);
229 } else {
230 list_add(&s->memcg_params.children_node,
231 &s->memcg_params.root_cache->memcg_params.children);
232 list_add(&s->memcg_params.kmem_caches_node,
233 &s->memcg_params.memcg->kmem_caches);
234 }
235}
236
237static void memcg_unlink_cache(struct kmem_cache *s)
238{
239 if (is_root_cache(s)) {
240 list_del(&s->root_caches_node);
241 } else {
242 list_del(&s->memcg_params.children_node);
243 list_del(&s->memcg_params.kmem_caches_node);
244 }
Tejun Heo657dc2f2017-02-22 15:41:14 -0800245}
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700246#else
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800247static inline int init_memcg_params(struct kmem_cache *s,
248 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700249{
250 return 0;
251}
252
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800253static inline void destroy_memcg_params(struct kmem_cache *s)
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700254{
255}
Tejun Heo657dc2f2017-02-22 15:41:14 -0800256
Tejun Heo510ded32017-02-22 15:41:24 -0800257static inline void memcg_unlink_cache(struct kmem_cache *s)
Tejun Heo657dc2f2017-02-22 15:41:14 -0800258{
259}
Johannes Weiner127424c2016-01-20 15:02:32 -0800260#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
Glauber Costa55007d82012-12-18 14:22:38 -0800261
Christoph Lameter039363f2012-07-06 15:25:10 -0500262/*
Joonsoo Kim423c9292014-10-09 15:26:22 -0700263 * Find a mergeable slab cache
264 */
265int slab_unmergeable(struct kmem_cache *s)
266{
267 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
268 return 1;
269
270 if (!is_root_cache(s))
271 return 1;
272
273 if (s->ctor)
274 return 1;
275
276 /*
277 * We may have set a slab to be unmergeable during bootstrap.
278 */
279 if (s->refcount < 0)
280 return 1;
281
282 return 0;
283}
284
285struct kmem_cache *find_mergeable(size_t size, size_t align,
286 unsigned long flags, const char *name, void (*ctor)(void *))
287{
288 struct kmem_cache *s;
289
Grygorii Maistrenkoc6e28892017-02-22 15:40:59 -0800290 if (slab_nomerge)
Joonsoo Kim423c9292014-10-09 15:26:22 -0700291 return NULL;
292
293 if (ctor)
294 return NULL;
295
296 size = ALIGN(size, sizeof(void *));
297 align = calculate_alignment(flags, align, size);
298 size = ALIGN(size, align);
299 flags = kmem_cache_flags(size, flags, name, NULL);
300
Grygorii Maistrenkoc6e28892017-02-22 15:40:59 -0800301 if (flags & SLAB_NEVER_MERGE)
302 return NULL;
303
Tejun Heo510ded32017-02-22 15:41:24 -0800304 list_for_each_entry_reverse(s, &slab_root_caches, root_caches_node) {
Joonsoo Kim423c9292014-10-09 15:26:22 -0700305 if (slab_unmergeable(s))
306 continue;
307
308 if (size > s->size)
309 continue;
310
311 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
312 continue;
313 /*
314 * Check if alignment is compatible.
315 * Courtesy of Adrian Drzewiecki
316 */
317 if ((s->size & ~(align - 1)) != s->size)
318 continue;
319
320 if (s->size - size >= sizeof(void *))
321 continue;
322
Joonsoo Kim95069ac82014-11-13 15:19:25 -0800323 if (IS_ENABLED(CONFIG_SLAB) && align &&
324 (align > s->align || s->align % align))
325 continue;
326
Joonsoo Kim423c9292014-10-09 15:26:22 -0700327 return s;
328 }
329 return NULL;
330}
331
332/*
Christoph Lameter45906852012-11-28 16:23:16 +0000333 * Figure out what the alignment of the objects will be given a set of
334 * flags, a user specified alignment and the size of the objects.
335 */
336unsigned long calculate_alignment(unsigned long flags,
337 unsigned long align, unsigned long size)
338{
339 /*
340 * If the user wants hardware cache aligned objects then follow that
341 * suggestion if the object is sufficiently large.
342 *
343 * The hardware cache alignment cannot override the specified
344 * alignment though. If that is greater then use it.
345 */
346 if (flags & SLAB_HWCACHE_ALIGN) {
347 unsigned long ralign = cache_line_size();
348 while (size <= ralign / 2)
349 ralign /= 2;
350 align = max(align, ralign);
351 }
352
353 if (align < ARCH_SLAB_MINALIGN)
354 align = ARCH_SLAB_MINALIGN;
355
356 return ALIGN(align, sizeof(void *));
357}
358
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800359static struct kmem_cache *create_cache(const char *name,
360 size_t object_size, size_t size, size_t align,
361 unsigned long flags, void (*ctor)(void *),
362 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
Vladimir Davydov794b1242014-04-07 15:39:26 -0700363{
364 struct kmem_cache *s;
365 int err;
366
367 err = -ENOMEM;
368 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
369 if (!s)
370 goto out;
371
372 s->name = name;
373 s->object_size = object_size;
374 s->size = size;
375 s->align = align;
376 s->ctor = ctor;
377
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800378 err = init_memcg_params(s, memcg, root_cache);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700379 if (err)
380 goto out_free_cache;
381
382 err = __kmem_cache_create(s, flags);
383 if (err)
384 goto out_free_cache;
385
386 s->refcount = 1;
387 list_add(&s->list, &slab_caches);
Tejun Heo510ded32017-02-22 15:41:24 -0800388 memcg_link_cache(s);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700389out:
390 if (err)
391 return ERR_PTR(err);
392 return s;
393
394out_free_cache:
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800395 destroy_memcg_params(s);
Vaishali Thakkar7c4da062015-02-10 14:09:40 -0800396 kmem_cache_free(kmem_cache, s);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700397 goto out;
398}
Christoph Lameter45906852012-11-28 16:23:16 +0000399
400/*
Christoph Lameter039363f2012-07-06 15:25:10 -0500401 * kmem_cache_create - Create a cache.
402 * @name: A string which is used in /proc/slabinfo to identify this cache.
403 * @size: The size of objects to be created in this cache.
404 * @align: The required alignment for the objects.
405 * @flags: SLAB flags
406 * @ctor: A constructor for the objects.
407 *
408 * Returns a ptr to the cache on success, NULL on failure.
409 * Cannot be called within a interrupt, but can be interrupted.
410 * The @ctor is run when new pages are allocated by the cache.
411 *
412 * The flags are
413 *
414 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
415 * to catch references to uninitialised memory.
416 *
417 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
418 * for buffer overruns.
419 *
420 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
421 * cacheline. This can be beneficial if you're counting cycles as closely
422 * as davem.
423 */
Glauber Costa2633d7a2012-12-18 14:22:34 -0800424struct kmem_cache *
Vladimir Davydov794b1242014-04-07 15:39:26 -0700425kmem_cache_create(const char *name, size_t size, size_t align,
426 unsigned long flags, void (*ctor)(void *))
Christoph Lameter039363f2012-07-06 15:25:10 -0500427{
Alexandru Moise40911a72015-11-05 18:45:43 -0800428 struct kmem_cache *s = NULL;
Andrzej Hajda3dec16e2015-02-13 14:36:38 -0800429 const char *cache_name;
Vladimir Davydov3965fc32014-01-23 15:52:55 -0800430 int err;
Christoph Lameter039363f2012-07-06 15:25:10 -0500431
Pekka Enbergb9205362012-08-16 10:12:18 +0300432 get_online_cpus();
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700433 get_online_mems();
Vladimir Davydov05257a12015-02-12 14:59:01 -0800434 memcg_get_cache_ids();
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700435
Pekka Enbergb9205362012-08-16 10:12:18 +0300436 mutex_lock(&slab_mutex);
Christoph Lameter686d5502012-09-05 00:20:33 +0000437
Vladimir Davydov794b1242014-04-07 15:39:26 -0700438 err = kmem_cache_sanity_check(name, size);
Andrew Morton3aa24f52014-10-09 15:25:58 -0700439 if (err) {
Vladimir Davydov3965fc32014-01-23 15:52:55 -0800440 goto out_unlock;
Andrew Morton3aa24f52014-10-09 15:25:58 -0700441 }
Christoph Lameter686d5502012-09-05 00:20:33 +0000442
Thomas Garniere70954f2016-12-12 16:41:38 -0800443 /* Refuse requests with allocator specific flags */
444 if (flags & ~SLAB_FLAGS_PERMITTED) {
445 err = -EINVAL;
446 goto out_unlock;
447 }
448
Glauber Costad8843922012-10-17 15:36:51 +0400449 /*
450 * Some allocators will constraint the set of valid flags to a subset
451 * of all flags. We expect them to define CACHE_CREATE_MASK in this
452 * case, and we'll just provide them with a sanitized version of the
453 * passed flags.
454 */
455 flags &= CACHE_CREATE_MASK;
Christoph Lameter686d5502012-09-05 00:20:33 +0000456
Vladimir Davydov794b1242014-04-07 15:39:26 -0700457 s = __kmem_cache_alias(name, size, align, flags, ctor);
458 if (s)
Vladimir Davydov3965fc32014-01-23 15:52:55 -0800459 goto out_unlock;
Glauber Costa2633d7a2012-12-18 14:22:34 -0800460
Andrzej Hajda3dec16e2015-02-13 14:36:38 -0800461 cache_name = kstrdup_const(name, GFP_KERNEL);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700462 if (!cache_name) {
463 err = -ENOMEM;
464 goto out_unlock;
465 }
Glauber Costa2633d7a2012-12-18 14:22:34 -0800466
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800467 s = create_cache(cache_name, size, size,
468 calculate_alignment(flags, align, size),
469 flags, ctor, NULL, NULL);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700470 if (IS_ERR(s)) {
471 err = PTR_ERR(s);
Andrzej Hajda3dec16e2015-02-13 14:36:38 -0800472 kfree_const(cache_name);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700473 }
Vladimir Davydov3965fc32014-01-23 15:52:55 -0800474
475out_unlock:
Christoph Lameter20cea962012-07-06 15:25:13 -0500476 mutex_unlock(&slab_mutex);
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700477
Vladimir Davydov05257a12015-02-12 14:59:01 -0800478 memcg_put_cache_ids();
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700479 put_online_mems();
Christoph Lameter20cea962012-07-06 15:25:13 -0500480 put_online_cpus();
481
Dave Jonesba3253c2014-01-29 14:05:48 -0800482 if (err) {
Christoph Lameter686d5502012-09-05 00:20:33 +0000483 if (flags & SLAB_PANIC)
484 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
485 name, err);
486 else {
Joe Perches11705322016-03-17 14:19:50 -0700487 pr_warn("kmem_cache_create(%s) failed with error %d\n",
Christoph Lameter686d5502012-09-05 00:20:33 +0000488 name, err);
489 dump_stack();
490 }
Christoph Lameter686d5502012-09-05 00:20:33 +0000491 return NULL;
492 }
Christoph Lameter039363f2012-07-06 15:25:10 -0500493 return s;
Glauber Costa2633d7a2012-12-18 14:22:34 -0800494}
Christoph Lameter039363f2012-07-06 15:25:10 -0500495EXPORT_SYMBOL(kmem_cache_create);
Christoph Lameter97d06602012-07-06 15:25:11 -0500496
Tejun Heo657dc2f2017-02-22 15:41:14 -0800497static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800498{
Tejun Heo657dc2f2017-02-22 15:41:14 -0800499 LIST_HEAD(to_destroy);
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800500 struct kmem_cache *s, *s2;
501
Tejun Heo657dc2f2017-02-22 15:41:14 -0800502 /*
503 * On destruction, SLAB_DESTROY_BY_RCU kmem_caches are put on the
504 * @slab_caches_to_rcu_destroy list. The slab pages are freed
505 * through RCU and and the associated kmem_cache are dereferenced
506 * while freeing the pages, so the kmem_caches should be freed only
507 * after the pending RCU operations are finished. As rcu_barrier()
508 * is a pretty slow operation, we batch all pending destructions
509 * asynchronously.
510 */
511 mutex_lock(&slab_mutex);
512 list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy);
513 mutex_unlock(&slab_mutex);
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800514
Tejun Heo657dc2f2017-02-22 15:41:14 -0800515 if (list_empty(&to_destroy))
516 return;
517
518 rcu_barrier();
519
520 list_for_each_entry_safe(s, s2, &to_destroy, list) {
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800521#ifdef SLAB_SUPPORTS_SYSFS
Tejun Heobf5eb3d2017-02-22 15:41:11 -0800522 sysfs_slab_release(s);
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800523#else
524 slab_kmem_cache_release(s);
525#endif
526 }
527}
528
Tejun Heo657dc2f2017-02-22 15:41:14 -0800529static int shutdown_cache(struct kmem_cache *s)
530{
531 if (__kmem_cache_shutdown(s) != 0)
532 return -EBUSY;
533
Tejun Heo510ded32017-02-22 15:41:24 -0800534 memcg_unlink_cache(s);
Tejun Heo657dc2f2017-02-22 15:41:14 -0800535 list_del(&s->list);
Tejun Heo657dc2f2017-02-22 15:41:14 -0800536
537 if (s->flags & SLAB_DESTROY_BY_RCU) {
538 list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
539 schedule_work(&slab_caches_to_rcu_destroy_work);
540 } else {
541#ifdef SLAB_SUPPORTS_SYSFS
542 sysfs_slab_release(s);
543#else
544 slab_kmem_cache_release(s);
545#endif
546 }
547
548 return 0;
549}
550
Johannes Weiner127424c2016-01-20 15:02:32 -0800551#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
Vladimir Davydov794b1242014-04-07 15:39:26 -0700552/*
Vladimir Davydov776ed0f2014-06-04 16:10:02 -0700553 * memcg_create_kmem_cache - Create a cache for a memory cgroup.
Vladimir Davydov794b1242014-04-07 15:39:26 -0700554 * @memcg: The memory cgroup the new cache is for.
555 * @root_cache: The parent of the new cache.
556 *
557 * This function attempts to create a kmem cache that will serve allocation
558 * requests going from @memcg to @root_cache. The new cache inherits properties
559 * from its parent.
560 */
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800561void memcg_create_kmem_cache(struct mem_cgroup *memcg,
562 struct kmem_cache *root_cache)
Vladimir Davydov794b1242014-04-07 15:39:26 -0700563{
Vladimir Davydov3e0350a2015-02-10 14:11:44 -0800564 static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */
Michal Hocko33398cf2015-09-08 15:01:02 -0700565 struct cgroup_subsys_state *css = &memcg->css;
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800566 struct memcg_cache_array *arr;
Vladimir Davydovbd673142014-06-04 16:07:40 -0700567 struct kmem_cache *s = NULL;
Vladimir Davydov794b1242014-04-07 15:39:26 -0700568 char *cache_name;
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800569 int idx;
Vladimir Davydov794b1242014-04-07 15:39:26 -0700570
571 get_online_cpus();
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700572 get_online_mems();
573
Vladimir Davydov794b1242014-04-07 15:39:26 -0700574 mutex_lock(&slab_mutex);
575
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800576 /*
Johannes Weiner567e9ab2016-01-20 15:02:24 -0800577 * The memory cgroup could have been offlined while the cache
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800578 * creation work was pending.
579 */
Vladimir Davydovb6ecd2d2016-03-17 14:18:33 -0700580 if (memcg->kmem_state != KMEM_ONLINE)
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800581 goto out_unlock;
582
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800583 idx = memcg_cache_id(memcg);
584 arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches,
585 lockdep_is_held(&slab_mutex));
586
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800587 /*
588 * Since per-memcg caches are created asynchronously on first
589 * allocation (see memcg_kmem_get_cache()), several threads can try to
590 * create the same cache, but only one of them may succeed.
591 */
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800592 if (arr->entries[idx])
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800593 goto out_unlock;
594
Vladimir Davydovf1008362015-02-12 14:59:29 -0800595 cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
Johannes Weiner73f576c2016-07-20 15:44:57 -0700596 cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name,
597 css->serial_nr, memcg_name_buf);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700598 if (!cache_name)
599 goto out_unlock;
600
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800601 s = create_cache(cache_name, root_cache->object_size,
602 root_cache->size, root_cache->align,
Greg Thelenf773e362016-11-10 10:46:41 -0800603 root_cache->flags & CACHE_CREATE_MASK,
604 root_cache->ctor, memcg, root_cache);
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800605 /*
606 * If we could not create a memcg cache, do not complain, because
607 * that's not critical at all as we can always proceed with the root
608 * cache.
609 */
Vladimir Davydovbd673142014-06-04 16:07:40 -0700610 if (IS_ERR(s)) {
Vladimir Davydov794b1242014-04-07 15:39:26 -0700611 kfree(cache_name);
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800612 goto out_unlock;
Vladimir Davydovbd673142014-06-04 16:07:40 -0700613 }
Vladimir Davydov794b1242014-04-07 15:39:26 -0700614
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800615 /*
616 * Since readers won't lock (see cache_from_memcg_idx()), we need a
617 * barrier here to ensure nobody will see the kmem_cache partially
618 * initialized.
619 */
620 smp_wmb();
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800621 arr->entries[idx] = s;
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800622
Vladimir Davydov794b1242014-04-07 15:39:26 -0700623out_unlock:
624 mutex_unlock(&slab_mutex);
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700625
626 put_online_mems();
Vladimir Davydov794b1242014-04-07 15:39:26 -0700627 put_online_cpus();
628}
Vladimir Davydovb8529902014-04-07 15:39:28 -0700629
Tejun Heo01fb58b2017-02-22 15:41:30 -0800630static void kmemcg_deactivate_workfn(struct work_struct *work)
631{
632 struct kmem_cache *s = container_of(work, struct kmem_cache,
633 memcg_params.deact_work);
634
635 get_online_cpus();
636 get_online_mems();
637
638 mutex_lock(&slab_mutex);
639
640 s->memcg_params.deact_fn(s);
641
642 mutex_unlock(&slab_mutex);
643
644 put_online_mems();
645 put_online_cpus();
646
647 /* done, put the ref from slab_deactivate_memcg_cache_rcu_sched() */
648 css_put(&s->memcg_params.memcg->css);
649}
650
651static void kmemcg_deactivate_rcufn(struct rcu_head *head)
652{
653 struct kmem_cache *s = container_of(head, struct kmem_cache,
654 memcg_params.deact_rcu_head);
655
656 /*
657 * We need to grab blocking locks. Bounce to ->deact_work. The
658 * work item shares the space with the RCU head and can't be
659 * initialized eariler.
660 */
661 INIT_WORK(&s->memcg_params.deact_work, kmemcg_deactivate_workfn);
Tejun Heo17cc4df2017-02-22 15:41:36 -0800662 queue_work(memcg_kmem_cache_wq, &s->memcg_params.deact_work);
Tejun Heo01fb58b2017-02-22 15:41:30 -0800663}
664
665/**
666 * slab_deactivate_memcg_cache_rcu_sched - schedule deactivation after a
667 * sched RCU grace period
668 * @s: target kmem_cache
669 * @deact_fn: deactivation function to call
670 *
671 * Schedule @deact_fn to be invoked with online cpus, mems and slab_mutex
672 * held after a sched RCU grace period. The slab is guaranteed to stay
673 * alive until @deact_fn is finished. This is to be used from
674 * __kmemcg_cache_deactivate().
675 */
676void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
677 void (*deact_fn)(struct kmem_cache *))
678{
679 if (WARN_ON_ONCE(is_root_cache(s)) ||
680 WARN_ON_ONCE(s->memcg_params.deact_fn))
681 return;
682
683 /* pin memcg so that @s doesn't get destroyed in the middle */
684 css_get(&s->memcg_params.memcg->css);
685
686 s->memcg_params.deact_fn = deact_fn;
687 call_rcu_sched(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn);
688}
689
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800690void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
691{
692 int idx;
693 struct memcg_cache_array *arr;
Vladimir Davydovd6e0b7f2015-02-12 14:59:47 -0800694 struct kmem_cache *s, *c;
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800695
696 idx = memcg_cache_id(memcg);
697
Vladimir Davydovd6e0b7f2015-02-12 14:59:47 -0800698 get_online_cpus();
699 get_online_mems();
700
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800701 mutex_lock(&slab_mutex);
Tejun Heo510ded32017-02-22 15:41:24 -0800702 list_for_each_entry(s, &slab_root_caches, root_caches_node) {
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800703 arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
704 lockdep_is_held(&slab_mutex));
Vladimir Davydovd6e0b7f2015-02-12 14:59:47 -0800705 c = arr->entries[idx];
706 if (!c)
707 continue;
708
Tejun Heoc9fc5862017-02-22 15:41:27 -0800709 __kmemcg_cache_deactivate(c);
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800710 arr->entries[idx] = NULL;
711 }
712 mutex_unlock(&slab_mutex);
Vladimir Davydovd6e0b7f2015-02-12 14:59:47 -0800713
714 put_online_mems();
715 put_online_cpus();
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800716}
717
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800718void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
Vladimir Davydovb8529902014-04-07 15:39:28 -0700719{
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800720 struct kmem_cache *s, *s2;
Vladimir Davydovb8529902014-04-07 15:39:28 -0700721
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800722 get_online_cpus();
723 get_online_mems();
Vladimir Davydovb8529902014-04-07 15:39:28 -0700724
Vladimir Davydovb8529902014-04-07 15:39:28 -0700725 mutex_lock(&slab_mutex);
Tejun Heobc2791f2017-02-22 15:41:21 -0800726 list_for_each_entry_safe(s, s2, &memcg->kmem_caches,
727 memcg_params.kmem_caches_node) {
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800728 /*
729 * The cgroup is about to be freed and therefore has no charges
730 * left. Hence, all its caches must be empty by now.
731 */
Tejun Heo657dc2f2017-02-22 15:41:14 -0800732 BUG_ON(shutdown_cache(s));
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800733 }
734 mutex_unlock(&slab_mutex);
Vladimir Davydovb8529902014-04-07 15:39:28 -0700735
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800736 put_online_mems();
737 put_online_cpus();
Vladimir Davydovb8529902014-04-07 15:39:28 -0700738}
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800739
Tejun Heo657dc2f2017-02-22 15:41:14 -0800740static int shutdown_memcg_caches(struct kmem_cache *s)
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800741{
742 struct memcg_cache_array *arr;
743 struct kmem_cache *c, *c2;
744 LIST_HEAD(busy);
745 int i;
746
747 BUG_ON(!is_root_cache(s));
748
749 /*
750 * First, shutdown active caches, i.e. caches that belong to online
751 * memory cgroups.
752 */
753 arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
754 lockdep_is_held(&slab_mutex));
755 for_each_memcg_cache_index(i) {
756 c = arr->entries[i];
757 if (!c)
758 continue;
Tejun Heo657dc2f2017-02-22 15:41:14 -0800759 if (shutdown_cache(c))
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800760 /*
761 * The cache still has objects. Move it to a temporary
762 * list so as not to try to destroy it for a second
763 * time while iterating over inactive caches below.
764 */
Tejun Heo9eeadc82017-02-22 15:41:17 -0800765 list_move(&c->memcg_params.children_node, &busy);
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800766 else
767 /*
768 * The cache is empty and will be destroyed soon. Clear
769 * the pointer to it in the memcg_caches array so that
770 * it will never be accessed even if the root cache
771 * stays alive.
772 */
773 arr->entries[i] = NULL;
774 }
775
776 /*
777 * Second, shutdown all caches left from memory cgroups that are now
778 * offline.
779 */
Tejun Heo9eeadc82017-02-22 15:41:17 -0800780 list_for_each_entry_safe(c, c2, &s->memcg_params.children,
781 memcg_params.children_node)
Tejun Heo657dc2f2017-02-22 15:41:14 -0800782 shutdown_cache(c);
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800783
Tejun Heo9eeadc82017-02-22 15:41:17 -0800784 list_splice(&busy, &s->memcg_params.children);
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800785
786 /*
787 * A cache being destroyed must be empty. In particular, this means
788 * that all per memcg caches attached to it must be empty too.
789 */
Tejun Heo9eeadc82017-02-22 15:41:17 -0800790 if (!list_empty(&s->memcg_params.children))
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800791 return -EBUSY;
792 return 0;
793}
794#else
Tejun Heo657dc2f2017-02-22 15:41:14 -0800795static inline int shutdown_memcg_caches(struct kmem_cache *s)
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800796{
797 return 0;
798}
Johannes Weiner127424c2016-01-20 15:02:32 -0800799#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
Vladimir Davydov794b1242014-04-07 15:39:26 -0700800
Christoph Lameter41a21282014-05-06 12:50:08 -0700801void slab_kmem_cache_release(struct kmem_cache *s)
802{
Dmitry Safonov52b4b952016-02-17 13:11:37 -0800803 __kmem_cache_release(s);
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800804 destroy_memcg_params(s);
Andrzej Hajda3dec16e2015-02-13 14:36:38 -0800805 kfree_const(s->name);
Christoph Lameter41a21282014-05-06 12:50:08 -0700806 kmem_cache_free(kmem_cache, s);
807}
808
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000809void kmem_cache_destroy(struct kmem_cache *s)
810{
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800811 int err;
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800812
Sergey Senozhatsky3942d292015-09-08 15:00:50 -0700813 if (unlikely(!s))
814 return;
815
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000816 get_online_cpus();
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700817 get_online_mems();
818
Alexander Potapenko55834c52016-05-20 16:59:11 -0700819 kasan_cache_destroy(s);
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000820 mutex_lock(&slab_mutex);
Vladimir Davydovb8529902014-04-07 15:39:28 -0700821
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000822 s->refcount--;
Vladimir Davydovb8529902014-04-07 15:39:28 -0700823 if (s->refcount)
824 goto out_unlock;
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000825
Tejun Heo657dc2f2017-02-22 15:41:14 -0800826 err = shutdown_memcg_caches(s);
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800827 if (!err)
Tejun Heo657dc2f2017-02-22 15:41:14 -0800828 err = shutdown_cache(s);
Vladimir Davydovb8529902014-04-07 15:39:28 -0700829
Vladimir Davydovcd918c52015-11-05 18:45:14 -0800830 if (err) {
Joe Perches756a0252016-03-17 14:19:47 -0700831 pr_err("kmem_cache_destroy %s: Slab cache still has objects\n",
832 s->name);
Vladimir Davydovcd918c52015-11-05 18:45:14 -0800833 dump_stack();
834 }
Vladimir Davydovb8529902014-04-07 15:39:28 -0700835out_unlock:
836 mutex_unlock(&slab_mutex);
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800837
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700838 put_online_mems();
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000839 put_online_cpus();
840}
841EXPORT_SYMBOL(kmem_cache_destroy);
842
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700843/**
844 * kmem_cache_shrink - Shrink a cache.
845 * @cachep: The cache to shrink.
846 *
847 * Releases as many slabs as possible for a cache.
848 * To help debugging, a zero exit status indicates all slabs were released.
849 */
850int kmem_cache_shrink(struct kmem_cache *cachep)
851{
852 int ret;
853
854 get_online_cpus();
855 get_online_mems();
Alexander Potapenko55834c52016-05-20 16:59:11 -0700856 kasan_cache_shrink(cachep);
Tejun Heoc9fc5862017-02-22 15:41:27 -0800857 ret = __kmem_cache_shrink(cachep);
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700858 put_online_mems();
859 put_online_cpus();
860 return ret;
861}
862EXPORT_SYMBOL(kmem_cache_shrink);
863
Denis Kirjanovfda90122015-11-05 18:44:59 -0800864bool slab_is_available(void)
Christoph Lameter97d06602012-07-06 15:25:11 -0500865{
866 return slab_state >= UP;
867}
Glauber Costab7454ad2012-10-19 18:20:25 +0400868
Christoph Lameter45530c42012-11-28 16:23:07 +0000869#ifndef CONFIG_SLOB
870/* Create a cache during boot when no slab services are available yet */
871void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
872 unsigned long flags)
873{
874 int err;
875
876 s->name = name;
877 s->size = s->object_size = size;
Christoph Lameter45906852012-11-28 16:23:16 +0000878 s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800879
880 slab_init_memcg_params(s);
881
Christoph Lameter45530c42012-11-28 16:23:07 +0000882 err = __kmem_cache_create(s, flags);
883
884 if (err)
Christoph Lameter31ba7342013-01-10 19:00:53 +0000885 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
Christoph Lameter45530c42012-11-28 16:23:07 +0000886 name, size, err);
887
888 s->refcount = -1; /* Exempt from merging for now */
889}
890
891struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
892 unsigned long flags)
893{
894 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
895
896 if (!s)
897 panic("Out of memory when creating slab %s\n", name);
898
899 create_boot_cache(s, name, size, flags);
900 list_add(&s->list, &slab_caches);
Tejun Heo510ded32017-02-22 15:41:24 -0800901 memcg_link_cache(s);
Christoph Lameter45530c42012-11-28 16:23:07 +0000902 s->refcount = 1;
903 return s;
904}
905
Christoph Lameter9425c582013-01-10 19:12:17 +0000906struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
907EXPORT_SYMBOL(kmalloc_caches);
908
909#ifdef CONFIG_ZONE_DMA
910struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
911EXPORT_SYMBOL(kmalloc_dma_caches);
912#endif
913
Christoph Lameterf97d5f62013-01-10 19:12:17 +0000914/*
Christoph Lameter2c59dd62013-01-10 19:14:19 +0000915 * Conversion table for small slabs sizes / 8 to the index in the
916 * kmalloc array. This is necessary for slabs < 192 since we have non power
917 * of two cache sizes there. The size of larger slabs can be determined using
918 * fls.
919 */
920static s8 size_index[24] = {
921 3, /* 8 */
922 4, /* 16 */
923 5, /* 24 */
924 5, /* 32 */
925 6, /* 40 */
926 6, /* 48 */
927 6, /* 56 */
928 6, /* 64 */
929 1, /* 72 */
930 1, /* 80 */
931 1, /* 88 */
932 1, /* 96 */
933 7, /* 104 */
934 7, /* 112 */
935 7, /* 120 */
936 7, /* 128 */
937 2, /* 136 */
938 2, /* 144 */
939 2, /* 152 */
940 2, /* 160 */
941 2, /* 168 */
942 2, /* 176 */
943 2, /* 184 */
944 2 /* 192 */
945};
946
947static inline int size_index_elem(size_t bytes)
948{
949 return (bytes - 1) / 8;
950}
951
952/*
953 * Find the kmem_cache structure that serves a given size of
954 * allocation
955 */
956struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
957{
958 int index;
959
Joonsoo Kim9de1bc82013-08-02 11:02:42 +0900960 if (unlikely(size > KMALLOC_MAX_SIZE)) {
Sasha Levin907985f2013-06-10 15:18:00 -0400961 WARN_ON_ONCE(!(flags & __GFP_NOWARN));
Christoph Lameter6286ae92013-05-03 15:43:18 +0000962 return NULL;
Sasha Levin907985f2013-06-10 15:18:00 -0400963 }
Christoph Lameter6286ae92013-05-03 15:43:18 +0000964
Christoph Lameter2c59dd62013-01-10 19:14:19 +0000965 if (size <= 192) {
966 if (!size)
967 return ZERO_SIZE_PTR;
968
969 index = size_index[size_index_elem(size)];
970 } else
971 index = fls(size - 1);
972
973#ifdef CONFIG_ZONE_DMA
Joonsoo Kimb1e05412013-02-04 23:46:46 +0900974 if (unlikely((flags & GFP_DMA)))
Christoph Lameter2c59dd62013-01-10 19:14:19 +0000975 return kmalloc_dma_caches[index];
976
977#endif
978 return kmalloc_caches[index];
979}
980
981/*
Gavin Guo4066c332015-06-24 16:55:54 -0700982 * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
983 * kmalloc_index() supports up to 2^26=64MB, so the final entry of the table is
984 * kmalloc-67108864.
985 */
Vlastimil Babkaaf3b5f82017-02-22 15:41:05 -0800986const struct kmalloc_info_struct kmalloc_info[] __initconst = {
Gavin Guo4066c332015-06-24 16:55:54 -0700987 {NULL, 0}, {"kmalloc-96", 96},
988 {"kmalloc-192", 192}, {"kmalloc-8", 8},
989 {"kmalloc-16", 16}, {"kmalloc-32", 32},
990 {"kmalloc-64", 64}, {"kmalloc-128", 128},
991 {"kmalloc-256", 256}, {"kmalloc-512", 512},
992 {"kmalloc-1024", 1024}, {"kmalloc-2048", 2048},
993 {"kmalloc-4096", 4096}, {"kmalloc-8192", 8192},
994 {"kmalloc-16384", 16384}, {"kmalloc-32768", 32768},
995 {"kmalloc-65536", 65536}, {"kmalloc-131072", 131072},
996 {"kmalloc-262144", 262144}, {"kmalloc-524288", 524288},
997 {"kmalloc-1048576", 1048576}, {"kmalloc-2097152", 2097152},
998 {"kmalloc-4194304", 4194304}, {"kmalloc-8388608", 8388608},
999 {"kmalloc-16777216", 16777216}, {"kmalloc-33554432", 33554432},
1000 {"kmalloc-67108864", 67108864}
1001};
1002
1003/*
Daniel Sanders34cc6992015-06-24 16:55:57 -07001004 * Patch up the size_index table if we have strange large alignment
1005 * requirements for the kmalloc array. This is only the case for
1006 * MIPS it seems. The standard arches will not generate any code here.
1007 *
1008 * Largest permitted alignment is 256 bytes due to the way we
1009 * handle the index determination for the smaller caches.
1010 *
1011 * Make sure that nothing crazy happens if someone starts tinkering
1012 * around with ARCH_KMALLOC_MINALIGN
Christoph Lameterf97d5f62013-01-10 19:12:17 +00001013 */
Daniel Sanders34cc6992015-06-24 16:55:57 -07001014void __init setup_kmalloc_cache_index_table(void)
Christoph Lameterf97d5f62013-01-10 19:12:17 +00001015{
1016 int i;
1017
Christoph Lameter2c59dd62013-01-10 19:14:19 +00001018 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
1019 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
1020
1021 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
1022 int elem = size_index_elem(i);
1023
1024 if (elem >= ARRAY_SIZE(size_index))
1025 break;
1026 size_index[elem] = KMALLOC_SHIFT_LOW;
1027 }
1028
1029 if (KMALLOC_MIN_SIZE >= 64) {
1030 /*
1031 * The 96 byte size cache is not used if the alignment
1032 * is 64 byte.
1033 */
1034 for (i = 64 + 8; i <= 96; i += 8)
1035 size_index[size_index_elem(i)] = 7;
1036
1037 }
1038
1039 if (KMALLOC_MIN_SIZE >= 128) {
1040 /*
1041 * The 192 byte sized cache is not used if the alignment
1042 * is 128 byte. Redirect kmalloc to use the 256 byte cache
1043 * instead.
1044 */
1045 for (i = 128 + 8; i <= 192; i += 8)
1046 size_index[size_index_elem(i)] = 8;
1047 }
Daniel Sanders34cc6992015-06-24 16:55:57 -07001048}
1049
Christoph Lameterae6f2462015-06-30 09:01:11 -05001050static void __init new_kmalloc_cache(int idx, unsigned long flags)
Christoph Lametera9730fc2015-06-29 09:28:08 -05001051{
1052 kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name,
1053 kmalloc_info[idx].size, flags);
1054}
1055
Daniel Sanders34cc6992015-06-24 16:55:57 -07001056/*
1057 * Create the kmalloc array. Some of the regular kmalloc arrays
1058 * may already have been created because they were needed to
1059 * enable allocations for slab creation.
1060 */
1061void __init create_kmalloc_caches(unsigned long flags)
1062{
1063 int i;
1064
Christoph Lametera9730fc2015-06-29 09:28:08 -05001065 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
1066 if (!kmalloc_caches[i])
1067 new_kmalloc_cache(i, flags);
Chris Mason956e46e2013-05-08 15:56:28 -04001068
1069 /*
Christoph Lametera9730fc2015-06-29 09:28:08 -05001070 * Caches that are not of the two-to-the-power-of size.
1071 * These have to be created immediately after the
1072 * earlier power of two caches
Chris Mason956e46e2013-05-08 15:56:28 -04001073 */
Christoph Lametera9730fc2015-06-29 09:28:08 -05001074 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
1075 new_kmalloc_cache(1, flags);
1076 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
1077 new_kmalloc_cache(2, flags);
Christoph Lameter8a965b32013-05-03 18:04:18 +00001078 }
1079
Christoph Lameterf97d5f62013-01-10 19:12:17 +00001080 /* Kmalloc array is now usable */
1081 slab_state = UP;
1082
Christoph Lameterf97d5f62013-01-10 19:12:17 +00001083#ifdef CONFIG_ZONE_DMA
1084 for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
1085 struct kmem_cache *s = kmalloc_caches[i];
1086
1087 if (s) {
1088 int size = kmalloc_size(i);
1089 char *n = kasprintf(GFP_NOWAIT,
1090 "dma-kmalloc-%d", size);
1091
1092 BUG_ON(!n);
1093 kmalloc_dma_caches[i] = create_kmalloc_cache(n,
1094 size, SLAB_CACHE_DMA | flags);
1095 }
1096 }
1097#endif
1098}
Christoph Lameter45530c42012-11-28 16:23:07 +00001099#endif /* !CONFIG_SLOB */
1100
Vladimir Davydovcea371f2014-06-04 16:07:04 -07001101/*
1102 * To avoid unnecessary overhead, we pass through large allocation requests
1103 * directly to the page allocator. We use __GFP_COMP, because we will need to
1104 * know the allocation order to free the pages properly in kfree.
1105 */
Vladimir Davydov52383432014-06-04 16:06:39 -07001106void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
1107{
1108 void *ret;
1109 struct page *page;
1110
1111 flags |= __GFP_COMP;
Vladimir Davydov49491482016-07-26 15:24:24 -07001112 page = alloc_pages(flags, order);
Vladimir Davydov52383432014-06-04 16:06:39 -07001113 ret = page ? page_address(page) : NULL;
1114 kmemleak_alloc(ret, size, 1, flags);
Alexander Potapenko505f5dc2016-03-25 14:22:02 -07001115 kasan_kmalloc_large(ret, size, flags);
Vladimir Davydov52383432014-06-04 16:06:39 -07001116 return ret;
1117}
1118EXPORT_SYMBOL(kmalloc_order);
1119
Christoph Lameterf1b6eb62013-09-04 16:35:34 +00001120#ifdef CONFIG_TRACING
1121void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
1122{
1123 void *ret = kmalloc_order(size, flags, order);
1124 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
1125 return ret;
1126}
1127EXPORT_SYMBOL(kmalloc_order_trace);
1128#endif
Christoph Lameter45530c42012-11-28 16:23:07 +00001129
Thomas Garnier7c00fce2016-07-26 15:21:56 -07001130#ifdef CONFIG_SLAB_FREELIST_RANDOM
1131/* Randomize a generic freelist */
1132static void freelist_randomize(struct rnd_state *state, unsigned int *list,
1133 size_t count)
1134{
1135 size_t i;
1136 unsigned int rand;
1137
1138 for (i = 0; i < count; i++)
1139 list[i] = i;
1140
1141 /* Fisher-Yates shuffle */
1142 for (i = count - 1; i > 0; i--) {
1143 rand = prandom_u32_state(state);
1144 rand %= (i + 1);
1145 swap(list[i], list[rand]);
1146 }
1147}
1148
1149/* Create a random sequence per cache */
1150int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
1151 gfp_t gfp)
1152{
1153 struct rnd_state state;
1154
1155 if (count < 2 || cachep->random_seq)
1156 return 0;
1157
1158 cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp);
1159 if (!cachep->random_seq)
1160 return -ENOMEM;
1161
1162 /* Get best entropy at this stage of boot */
1163 prandom_seed_state(&state, get_random_long());
1164
1165 freelist_randomize(&state, cachep->random_seq, count);
1166 return 0;
1167}
1168
1169/* Destroy the per-cache random freelist sequence */
1170void cache_random_seq_destroy(struct kmem_cache *cachep)
1171{
1172 kfree(cachep->random_seq);
1173 cachep->random_seq = NULL;
1174}
1175#endif /* CONFIG_SLAB_FREELIST_RANDOM */
1176
Glauber Costab7454ad2012-10-19 18:20:25 +04001177#ifdef CONFIG_SLABINFO
Wanpeng Lie9b4db22013-07-04 08:33:24 +08001178
1179#ifdef CONFIG_SLAB
1180#define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
1181#else
1182#define SLABINFO_RIGHTS S_IRUSR
1183#endif
1184
Vladimir Davydovb0475012014-12-10 15:44:19 -08001185static void print_slabinfo_header(struct seq_file *m)
Glauber Costabcee6e22012-10-19 18:20:26 +04001186{
1187 /*
1188 * Output format version, so at least we can change it
1189 * without _too_ many complaints.
1190 */
1191#ifdef CONFIG_DEBUG_SLAB
1192 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
1193#else
1194 seq_puts(m, "slabinfo - version: 2.1\n");
1195#endif
Joe Perches756a0252016-03-17 14:19:47 -07001196 seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
Glauber Costabcee6e22012-10-19 18:20:26 +04001197 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
1198 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1199#ifdef CONFIG_DEBUG_SLAB
Joe Perches756a0252016-03-17 14:19:47 -07001200 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
Glauber Costabcee6e22012-10-19 18:20:26 +04001201 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
1202#endif
1203 seq_putc(m, '\n');
1204}
1205
Vladimir Davydov1df3b262014-12-10 15:42:16 -08001206void *slab_start(struct seq_file *m, loff_t *pos)
Glauber Costab7454ad2012-10-19 18:20:25 +04001207{
Glauber Costab7454ad2012-10-19 18:20:25 +04001208 mutex_lock(&slab_mutex);
Tejun Heo510ded32017-02-22 15:41:24 -08001209 return seq_list_start(&slab_root_caches, *pos);
Glauber Costab7454ad2012-10-19 18:20:25 +04001210}
1211
Wanpeng Li276a2432013-07-08 08:08:28 +08001212void *slab_next(struct seq_file *m, void *p, loff_t *pos)
Glauber Costab7454ad2012-10-19 18:20:25 +04001213{
Tejun Heo510ded32017-02-22 15:41:24 -08001214 return seq_list_next(p, &slab_root_caches, pos);
Glauber Costab7454ad2012-10-19 18:20:25 +04001215}
1216
Wanpeng Li276a2432013-07-08 08:08:28 +08001217void slab_stop(struct seq_file *m, void *p)
Glauber Costab7454ad2012-10-19 18:20:25 +04001218{
1219 mutex_unlock(&slab_mutex);
1220}
1221
Glauber Costa749c5412012-12-18 14:23:01 -08001222static void
1223memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
Glauber Costab7454ad2012-10-19 18:20:25 +04001224{
Glauber Costa749c5412012-12-18 14:23:01 -08001225 struct kmem_cache *c;
1226 struct slabinfo sinfo;
Glauber Costa749c5412012-12-18 14:23:01 -08001227
1228 if (!is_root_cache(s))
1229 return;
1230
Vladimir Davydov426589f2015-02-12 14:59:23 -08001231 for_each_memcg_cache(c, s) {
Glauber Costa749c5412012-12-18 14:23:01 -08001232 memset(&sinfo, 0, sizeof(sinfo));
1233 get_slabinfo(c, &sinfo);
1234
1235 info->active_slabs += sinfo.active_slabs;
1236 info->num_slabs += sinfo.num_slabs;
1237 info->shared_avail += sinfo.shared_avail;
1238 info->active_objs += sinfo.active_objs;
1239 info->num_objs += sinfo.num_objs;
1240 }
1241}
1242
Vladimir Davydovb0475012014-12-10 15:44:19 -08001243static void cache_show(struct kmem_cache *s, struct seq_file *m)
Glauber Costa749c5412012-12-18 14:23:01 -08001244{
Glauber Costa0d7561c2012-10-19 18:20:27 +04001245 struct slabinfo sinfo;
1246
1247 memset(&sinfo, 0, sizeof(sinfo));
1248 get_slabinfo(s, &sinfo);
1249
Glauber Costa749c5412012-12-18 14:23:01 -08001250 memcg_accumulate_slabinfo(s, &sinfo);
1251
Glauber Costa0d7561c2012-10-19 18:20:27 +04001252 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
Glauber Costa749c5412012-12-18 14:23:01 -08001253 cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
Glauber Costa0d7561c2012-10-19 18:20:27 +04001254 sinfo.objects_per_slab, (1 << sinfo.cache_order));
1255
1256 seq_printf(m, " : tunables %4u %4u %4u",
1257 sinfo.limit, sinfo.batchcount, sinfo.shared);
1258 seq_printf(m, " : slabdata %6lu %6lu %6lu",
1259 sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
1260 slabinfo_show_stats(m, s);
1261 seq_putc(m, '\n');
Glauber Costab7454ad2012-10-19 18:20:25 +04001262}
1263
Vladimir Davydov1df3b262014-12-10 15:42:16 -08001264static int slab_show(struct seq_file *m, void *p)
Glauber Costa749c5412012-12-18 14:23:01 -08001265{
Tejun Heo510ded32017-02-22 15:41:24 -08001266 struct kmem_cache *s = list_entry(p, struct kmem_cache, root_caches_node);
Glauber Costa749c5412012-12-18 14:23:01 -08001267
Tejun Heo510ded32017-02-22 15:41:24 -08001268 if (p == slab_root_caches.next)
Vladimir Davydov1df3b262014-12-10 15:42:16 -08001269 print_slabinfo_header(m);
Tejun Heo510ded32017-02-22 15:41:24 -08001270 cache_show(s, m);
Vladimir Davydovb0475012014-12-10 15:44:19 -08001271 return 0;
Glauber Costa749c5412012-12-18 14:23:01 -08001272}
1273
Johannes Weiner127424c2016-01-20 15:02:32 -08001274#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
Tejun Heobc2791f2017-02-22 15:41:21 -08001275void *memcg_slab_start(struct seq_file *m, loff_t *pos)
Vladimir Davydovb0475012014-12-10 15:44:19 -08001276{
Vladimir Davydovb0475012014-12-10 15:44:19 -08001277 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
1278
Tejun Heobc2791f2017-02-22 15:41:21 -08001279 mutex_lock(&slab_mutex);
1280 return seq_list_start(&memcg->kmem_caches, *pos);
1281}
1282
1283void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos)
1284{
1285 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
1286
1287 return seq_list_next(p, &memcg->kmem_caches, pos);
1288}
1289
1290void memcg_slab_stop(struct seq_file *m, void *p)
1291{
1292 mutex_unlock(&slab_mutex);
1293}
1294
1295int memcg_slab_show(struct seq_file *m, void *p)
1296{
1297 struct kmem_cache *s = list_entry(p, struct kmem_cache,
1298 memcg_params.kmem_caches_node);
1299 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
1300
1301 if (p == memcg->kmem_caches.next)
Vladimir Davydovb0475012014-12-10 15:44:19 -08001302 print_slabinfo_header(m);
Tejun Heobc2791f2017-02-22 15:41:21 -08001303 cache_show(s, m);
Vladimir Davydovb0475012014-12-10 15:44:19 -08001304 return 0;
1305}
1306#endif
1307
Glauber Costab7454ad2012-10-19 18:20:25 +04001308/*
1309 * slabinfo_op - iterator that generates /proc/slabinfo
1310 *
1311 * Output layout:
1312 * cache-name
1313 * num-active-objs
1314 * total-objs
1315 * object size
1316 * num-active-slabs
1317 * total-slabs
1318 * num-pages-per-slab
1319 * + further values on SMP and with statistics enabled
1320 */
1321static const struct seq_operations slabinfo_op = {
Vladimir Davydov1df3b262014-12-10 15:42:16 -08001322 .start = slab_start,
Wanpeng Li276a2432013-07-08 08:08:28 +08001323 .next = slab_next,
1324 .stop = slab_stop,
Vladimir Davydov1df3b262014-12-10 15:42:16 -08001325 .show = slab_show,
Glauber Costab7454ad2012-10-19 18:20:25 +04001326};
1327
1328static int slabinfo_open(struct inode *inode, struct file *file)
1329{
1330 return seq_open(file, &slabinfo_op);
1331}
1332
1333static const struct file_operations proc_slabinfo_operations = {
1334 .open = slabinfo_open,
1335 .read = seq_read,
1336 .write = slabinfo_write,
1337 .llseek = seq_lseek,
1338 .release = seq_release,
1339};
1340
1341static int __init slab_proc_init(void)
1342{
Wanpeng Lie9b4db22013-07-04 08:33:24 +08001343 proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
1344 &proc_slabinfo_operations);
Glauber Costab7454ad2012-10-19 18:20:25 +04001345 return 0;
1346}
1347module_init(slab_proc_init);
1348#endif /* CONFIG_SLABINFO */
Andrey Ryabinin928cec92014-08-06 16:04:44 -07001349
1350static __always_inline void *__do_krealloc(const void *p, size_t new_size,
1351 gfp_t flags)
1352{
1353 void *ret;
1354 size_t ks = 0;
1355
1356 if (p)
1357 ks = ksize(p);
1358
Andrey Ryabinin0316bec2015-02-13 14:39:42 -08001359 if (ks >= new_size) {
Alexander Potapenko505f5dc2016-03-25 14:22:02 -07001360 kasan_krealloc((void *)p, new_size, flags);
Andrey Ryabinin928cec92014-08-06 16:04:44 -07001361 return (void *)p;
Andrey Ryabinin0316bec2015-02-13 14:39:42 -08001362 }
Andrey Ryabinin928cec92014-08-06 16:04:44 -07001363
1364 ret = kmalloc_track_caller(new_size, flags);
1365 if (ret && p)
1366 memcpy(ret, p, ks);
1367
1368 return ret;
1369}
1370
1371/**
1372 * __krealloc - like krealloc() but don't free @p.
1373 * @p: object to reallocate memory for.
1374 * @new_size: how many bytes of memory are required.
1375 * @flags: the type of memory to allocate.
1376 *
1377 * This function is like krealloc() except it never frees the originally
1378 * allocated buffer. Use this if you don't want to free the buffer immediately
1379 * like, for example, with RCU.
1380 */
1381void *__krealloc(const void *p, size_t new_size, gfp_t flags)
1382{
1383 if (unlikely(!new_size))
1384 return ZERO_SIZE_PTR;
1385
1386 return __do_krealloc(p, new_size, flags);
1387
1388}
1389EXPORT_SYMBOL(__krealloc);
1390
1391/**
1392 * krealloc - reallocate memory. The contents will remain unchanged.
1393 * @p: object to reallocate memory for.
1394 * @new_size: how many bytes of memory are required.
1395 * @flags: the type of memory to allocate.
1396 *
1397 * The contents of the object pointed to are preserved up to the
1398 * lesser of the new and old sizes. If @p is %NULL, krealloc()
1399 * behaves exactly like kmalloc(). If @new_size is 0 and @p is not a
1400 * %NULL pointer, the object pointed to is freed.
1401 */
1402void *krealloc(const void *p, size_t new_size, gfp_t flags)
1403{
1404 void *ret;
1405
1406 if (unlikely(!new_size)) {
1407 kfree(p);
1408 return ZERO_SIZE_PTR;
1409 }
1410
1411 ret = __do_krealloc(p, new_size, flags);
1412 if (ret && p != ret)
1413 kfree(p);
1414
1415 return ret;
1416}
1417EXPORT_SYMBOL(krealloc);
1418
1419/**
1420 * kzfree - like kfree but zero memory
1421 * @p: object to free memory of
1422 *
1423 * The memory of the object @p points to is zeroed before freed.
1424 * If @p is %NULL, kzfree() does nothing.
1425 *
1426 * Note: this function zeroes the whole allocated buffer which can be a good
1427 * deal bigger than the requested buffer size passed to kmalloc(). So be
1428 * careful when using this function in performance sensitive code.
1429 */
1430void kzfree(const void *p)
1431{
1432 size_t ks;
1433 void *mem = (void *)p;
1434
1435 if (unlikely(ZERO_OR_NULL_PTR(mem)))
1436 return;
1437 ks = ksize(mem);
1438 memset(mem, 0, ks);
1439 kfree(mem);
1440}
1441EXPORT_SYMBOL(kzfree);
1442
1443/* Tracepoints definitions. */
1444EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1445EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1446EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
1447EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
1448EXPORT_TRACEPOINT_SYMBOL(kfree);
1449EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);