blob: 904a83be82de81a24a73adfac8e573db32f7118a [file] [log] [blame]
Christoph Lameter039363f2012-07-06 15:25:10 -05001/*
2 * Slab allocator functions that are independent of the allocator strategy
3 *
4 * (C) 2012 Christoph Lameter <cl@linux.com>
5 */
6#include <linux/slab.h>
7
8#include <linux/mm.h>
9#include <linux/poison.h>
10#include <linux/interrupt.h>
11#include <linux/memory.h>
12#include <linux/compiler.h>
13#include <linux/module.h>
Christoph Lameter20cea962012-07-06 15:25:13 -050014#include <linux/cpu.h>
15#include <linux/uaccess.h>
Glauber Costab7454ad2012-10-19 18:20:25 +040016#include <linux/seq_file.h>
17#include <linux/proc_fs.h>
Christoph Lameter039363f2012-07-06 15:25:10 -050018#include <asm/cacheflush.h>
19#include <asm/tlbflush.h>
20#include <asm/page.h>
Glauber Costa2633d7a2012-12-18 14:22:34 -080021#include <linux/memcontrol.h>
Andrey Ryabinin928cec92014-08-06 16:04:44 -070022
23#define CREATE_TRACE_POINTS
Christoph Lameterf1b6eb62013-09-04 16:35:34 +000024#include <trace/events/kmem.h>
Christoph Lameter039363f2012-07-06 15:25:10 -050025
Christoph Lameter97d06602012-07-06 15:25:11 -050026#include "slab.h"
27
28enum slab_state slab_state;
Christoph Lameter18004c52012-07-06 15:25:12 -050029LIST_HEAD(slab_caches);
30DEFINE_MUTEX(slab_mutex);
Christoph Lameter9b030cb2012-09-05 00:20:33 +000031struct kmem_cache *kmem_cache;
Christoph Lameter97d06602012-07-06 15:25:11 -050032
Tejun Heo657dc2f2017-02-22 15:41:14 -080033static LIST_HEAD(slab_caches_to_rcu_destroy);
34static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
35static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
36 slab_caches_to_rcu_destroy_workfn);
37
Joonsoo Kim07f361b2014-10-09 15:26:00 -070038/*
Joonsoo Kim423c9292014-10-09 15:26:22 -070039 * Set of flags that will prevent slab merging
40 */
41#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -080042 SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -070043 SLAB_FAILSLAB | SLAB_KASAN)
Joonsoo Kim423c9292014-10-09 15:26:22 -070044
Vladimir Davydov230e9fc2016-01-14 15:18:15 -080045#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
46 SLAB_NOTRACK | SLAB_ACCOUNT)
Joonsoo Kim423c9292014-10-09 15:26:22 -070047
48/*
49 * Merge control. If this is set then no merging of slab caches will occur.
Joonsoo Kim423c9292014-10-09 15:26:22 -070050 */
Kees Cook7660a6f2017-07-06 15:36:40 -070051static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
Joonsoo Kim423c9292014-10-09 15:26:22 -070052
53static int __init setup_slab_nomerge(char *str)
54{
Kees Cook7660a6f2017-07-06 15:36:40 -070055 slab_nomerge = true;
Joonsoo Kim423c9292014-10-09 15:26:22 -070056 return 1;
57}
58
59#ifdef CONFIG_SLUB
60__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
61#endif
62
63__setup("slab_nomerge", setup_slab_nomerge);
64
65/*
Joonsoo Kim07f361b2014-10-09 15:26:00 -070066 * Determine the size of a slab object
67 */
68unsigned int kmem_cache_size(struct kmem_cache *s)
69{
70 return s->object_size;
71}
72EXPORT_SYMBOL(kmem_cache_size);
73
Shuah Khan77be4b12012-08-16 00:09:46 -070074#ifdef CONFIG_DEBUG_VM
Vladimir Davydov794b1242014-04-07 15:39:26 -070075static int kmem_cache_sanity_check(const char *name, size_t size)
Shuah Khan77be4b12012-08-16 00:09:46 -070076{
77 struct kmem_cache *s = NULL;
78
79 if (!name || in_interrupt() || size < sizeof(void *) ||
80 size > KMALLOC_MAX_SIZE) {
81 pr_err("kmem_cache_create(%s) integrity check failed\n", name);
82 return -EINVAL;
83 }
84
85 list_for_each_entry(s, &slab_caches, list) {
86 char tmp;
87 int res;
88
89 /*
90 * This happens when the module gets unloaded and doesn't
91 * destroy its slab cache and no-one else reuses the vmalloc
92 * area of the module. Print a warning.
93 */
94 res = probe_kernel_address(s->name, tmp);
95 if (res) {
96 pr_err("Slab cache with size %d has lost its name\n",
97 s->object_size);
98 continue;
99 }
Shuah Khan77be4b12012-08-16 00:09:46 -0700100 }
101
102 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
103 return 0;
104}
105#else
Vladimir Davydov794b1242014-04-07 15:39:26 -0700106static inline int kmem_cache_sanity_check(const char *name, size_t size)
Shuah Khan77be4b12012-08-16 00:09:46 -0700107{
108 return 0;
109}
110#endif
111
Christoph Lameter484748f2015-09-04 15:45:34 -0700112void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
113{
114 size_t i;
115
Jesper Dangaard Brouerca257192016-03-15 14:54:00 -0700116 for (i = 0; i < nr; i++) {
117 if (s)
118 kmem_cache_free(s, p[i]);
119 else
120 kfree(p[i]);
121 }
Christoph Lameter484748f2015-09-04 15:45:34 -0700122}
123
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -0800124int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
Christoph Lameter484748f2015-09-04 15:45:34 -0700125 void **p)
126{
127 size_t i;
128
129 for (i = 0; i < nr; i++) {
130 void *x = p[i] = kmem_cache_alloc(s, flags);
131 if (!x) {
132 __kmem_cache_free_bulk(s, i, p);
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -0800133 return 0;
Christoph Lameter484748f2015-09-04 15:45:34 -0700134 }
135 }
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -0800136 return i;
Christoph Lameter484748f2015-09-04 15:45:34 -0700137}
138
Johannes Weiner127424c2016-01-20 15:02:32 -0800139#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
Tejun Heo510ded32017-02-22 15:41:24 -0800140
141LIST_HEAD(slab_root_caches);
142
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800143void slab_init_memcg_params(struct kmem_cache *s)
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700144{
Tejun Heo9eeadc82017-02-22 15:41:17 -0800145 s->memcg_params.root_cache = NULL;
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800146 RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
Tejun Heo9eeadc82017-02-22 15:41:17 -0800147 INIT_LIST_HEAD(&s->memcg_params.children);
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800148}
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700149
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800150static int init_memcg_params(struct kmem_cache *s,
151 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
152{
153 struct memcg_cache_array *arr;
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700154
Tejun Heo9eeadc82017-02-22 15:41:17 -0800155 if (root_cache) {
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800156 s->memcg_params.root_cache = root_cache;
Tejun Heo9eeadc82017-02-22 15:41:17 -0800157 s->memcg_params.memcg = memcg;
158 INIT_LIST_HEAD(&s->memcg_params.children_node);
Tejun Heobc2791f2017-02-22 15:41:21 -0800159 INIT_LIST_HEAD(&s->memcg_params.kmem_caches_node);
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800160 return 0;
161 }
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700162
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800163 slab_init_memcg_params(s);
164
165 if (!memcg_nr_cache_ids)
166 return 0;
167
168 arr = kzalloc(sizeof(struct memcg_cache_array) +
169 memcg_nr_cache_ids * sizeof(void *),
170 GFP_KERNEL);
171 if (!arr)
172 return -ENOMEM;
173
174 RCU_INIT_POINTER(s->memcg_params.memcg_caches, arr);
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700175 return 0;
176}
177
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800178static void destroy_memcg_params(struct kmem_cache *s)
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700179{
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800180 if (is_root_cache(s))
181 kfree(rcu_access_pointer(s->memcg_params.memcg_caches));
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700182}
183
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800184static int update_memcg_params(struct kmem_cache *s, int new_array_size)
Vladimir Davydov6f817f42014-10-09 15:28:47 -0700185{
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800186 struct memcg_cache_array *old, *new;
Vladimir Davydov6f817f42014-10-09 15:28:47 -0700187
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800188 new = kzalloc(sizeof(struct memcg_cache_array) +
189 new_array_size * sizeof(void *), GFP_KERNEL);
190 if (!new)
Vladimir Davydov6f817f42014-10-09 15:28:47 -0700191 return -ENOMEM;
192
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800193 old = rcu_dereference_protected(s->memcg_params.memcg_caches,
194 lockdep_is_held(&slab_mutex));
195 if (old)
196 memcpy(new->entries, old->entries,
197 memcg_nr_cache_ids * sizeof(void *));
Vladimir Davydov6f817f42014-10-09 15:28:47 -0700198
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800199 rcu_assign_pointer(s->memcg_params.memcg_caches, new);
200 if (old)
201 kfree_rcu(old, rcu);
Vladimir Davydov6f817f42014-10-09 15:28:47 -0700202 return 0;
203}
204
Glauber Costa55007d82012-12-18 14:22:38 -0800205int memcg_update_all_caches(int num_memcgs)
206{
207 struct kmem_cache *s;
208 int ret = 0;
Glauber Costa55007d82012-12-18 14:22:38 -0800209
Vladimir Davydov05257a12015-02-12 14:59:01 -0800210 mutex_lock(&slab_mutex);
Tejun Heo510ded32017-02-22 15:41:24 -0800211 list_for_each_entry(s, &slab_root_caches, root_caches_node) {
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800212 ret = update_memcg_params(s, num_memcgs);
Glauber Costa55007d82012-12-18 14:22:38 -0800213 /*
Glauber Costa55007d82012-12-18 14:22:38 -0800214 * Instead of freeing the memory, we'll just leave the caches
215 * up to this point in an updated state.
216 */
217 if (ret)
Vladimir Davydov05257a12015-02-12 14:59:01 -0800218 break;
Glauber Costa55007d82012-12-18 14:22:38 -0800219 }
Glauber Costa55007d82012-12-18 14:22:38 -0800220 mutex_unlock(&slab_mutex);
221 return ret;
222}
Tejun Heo657dc2f2017-02-22 15:41:14 -0800223
Tejun Heo510ded32017-02-22 15:41:24 -0800224void memcg_link_cache(struct kmem_cache *s)
Tejun Heo657dc2f2017-02-22 15:41:14 -0800225{
Tejun Heo510ded32017-02-22 15:41:24 -0800226 if (is_root_cache(s)) {
227 list_add(&s->root_caches_node, &slab_root_caches);
228 } else {
229 list_add(&s->memcg_params.children_node,
230 &s->memcg_params.root_cache->memcg_params.children);
231 list_add(&s->memcg_params.kmem_caches_node,
232 &s->memcg_params.memcg->kmem_caches);
233 }
234}
235
236static void memcg_unlink_cache(struct kmem_cache *s)
237{
238 if (is_root_cache(s)) {
239 list_del(&s->root_caches_node);
240 } else {
241 list_del(&s->memcg_params.children_node);
242 list_del(&s->memcg_params.kmem_caches_node);
243 }
Tejun Heo657dc2f2017-02-22 15:41:14 -0800244}
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700245#else
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800246static inline int init_memcg_params(struct kmem_cache *s,
247 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700248{
249 return 0;
250}
251
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800252static inline void destroy_memcg_params(struct kmem_cache *s)
Vladimir Davydov33a690c2014-10-09 15:28:43 -0700253{
254}
Tejun Heo657dc2f2017-02-22 15:41:14 -0800255
Tejun Heo510ded32017-02-22 15:41:24 -0800256static inline void memcg_unlink_cache(struct kmem_cache *s)
Tejun Heo657dc2f2017-02-22 15:41:14 -0800257{
258}
Johannes Weiner127424c2016-01-20 15:02:32 -0800259#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
Glauber Costa55007d82012-12-18 14:22:38 -0800260
Christoph Lameter039363f2012-07-06 15:25:10 -0500261/*
Joonsoo Kim423c9292014-10-09 15:26:22 -0700262 * Find a mergeable slab cache
263 */
264int slab_unmergeable(struct kmem_cache *s)
265{
266 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
267 return 1;
268
269 if (!is_root_cache(s))
270 return 1;
271
272 if (s->ctor)
273 return 1;
274
275 /*
276 * We may have set a slab to be unmergeable during bootstrap.
277 */
278 if (s->refcount < 0)
279 return 1;
280
281 return 0;
282}
283
284struct kmem_cache *find_mergeable(size_t size, size_t align,
285 unsigned long flags, const char *name, void (*ctor)(void *))
286{
287 struct kmem_cache *s;
288
Grygorii Maistrenkoc6e28892017-02-22 15:40:59 -0800289 if (slab_nomerge)
Joonsoo Kim423c9292014-10-09 15:26:22 -0700290 return NULL;
291
292 if (ctor)
293 return NULL;
294
295 size = ALIGN(size, sizeof(void *));
296 align = calculate_alignment(flags, align, size);
297 size = ALIGN(size, align);
298 flags = kmem_cache_flags(size, flags, name, NULL);
299
Grygorii Maistrenkoc6e28892017-02-22 15:40:59 -0800300 if (flags & SLAB_NEVER_MERGE)
301 return NULL;
302
Tejun Heo510ded32017-02-22 15:41:24 -0800303 list_for_each_entry_reverse(s, &slab_root_caches, root_caches_node) {
Joonsoo Kim423c9292014-10-09 15:26:22 -0700304 if (slab_unmergeable(s))
305 continue;
306
307 if (size > s->size)
308 continue;
309
310 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
311 continue;
312 /*
313 * Check if alignment is compatible.
314 * Courtesy of Adrian Drzewiecki
315 */
316 if ((s->size & ~(align - 1)) != s->size)
317 continue;
318
319 if (s->size - size >= sizeof(void *))
320 continue;
321
Joonsoo Kim95069ac82014-11-13 15:19:25 -0800322 if (IS_ENABLED(CONFIG_SLAB) && align &&
323 (align > s->align || s->align % align))
324 continue;
325
Joonsoo Kim423c9292014-10-09 15:26:22 -0700326 return s;
327 }
328 return NULL;
329}
330
331/*
Christoph Lameter45906852012-11-28 16:23:16 +0000332 * Figure out what the alignment of the objects will be given a set of
333 * flags, a user specified alignment and the size of the objects.
334 */
335unsigned long calculate_alignment(unsigned long flags,
336 unsigned long align, unsigned long size)
337{
338 /*
339 * If the user wants hardware cache aligned objects then follow that
340 * suggestion if the object is sufficiently large.
341 *
342 * The hardware cache alignment cannot override the specified
343 * alignment though. If that is greater then use it.
344 */
345 if (flags & SLAB_HWCACHE_ALIGN) {
346 unsigned long ralign = cache_line_size();
347 while (size <= ralign / 2)
348 ralign /= 2;
349 align = max(align, ralign);
350 }
351
352 if (align < ARCH_SLAB_MINALIGN)
353 align = ARCH_SLAB_MINALIGN;
354
355 return ALIGN(align, sizeof(void *));
356}
357
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800358static struct kmem_cache *create_cache(const char *name,
359 size_t object_size, size_t size, size_t align,
360 unsigned long flags, void (*ctor)(void *),
361 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
Vladimir Davydov794b1242014-04-07 15:39:26 -0700362{
363 struct kmem_cache *s;
364 int err;
365
366 err = -ENOMEM;
367 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
368 if (!s)
369 goto out;
370
371 s->name = name;
372 s->object_size = object_size;
373 s->size = size;
374 s->align = align;
375 s->ctor = ctor;
376
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800377 err = init_memcg_params(s, memcg, root_cache);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700378 if (err)
379 goto out_free_cache;
380
381 err = __kmem_cache_create(s, flags);
382 if (err)
383 goto out_free_cache;
384
385 s->refcount = 1;
386 list_add(&s->list, &slab_caches);
Tejun Heo510ded32017-02-22 15:41:24 -0800387 memcg_link_cache(s);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700388out:
389 if (err)
390 return ERR_PTR(err);
391 return s;
392
393out_free_cache:
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800394 destroy_memcg_params(s);
Vaishali Thakkar7c4da062015-02-10 14:09:40 -0800395 kmem_cache_free(kmem_cache, s);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700396 goto out;
397}
Christoph Lameter45906852012-11-28 16:23:16 +0000398
399/*
Christoph Lameter039363f2012-07-06 15:25:10 -0500400 * kmem_cache_create - Create a cache.
401 * @name: A string which is used in /proc/slabinfo to identify this cache.
402 * @size: The size of objects to be created in this cache.
403 * @align: The required alignment for the objects.
404 * @flags: SLAB flags
405 * @ctor: A constructor for the objects.
406 *
407 * Returns a ptr to the cache on success, NULL on failure.
408 * Cannot be called within a interrupt, but can be interrupted.
409 * The @ctor is run when new pages are allocated by the cache.
410 *
411 * The flags are
412 *
413 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
414 * to catch references to uninitialised memory.
415 *
416 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
417 * for buffer overruns.
418 *
419 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
420 * cacheline. This can be beneficial if you're counting cycles as closely
421 * as davem.
422 */
Glauber Costa2633d7a2012-12-18 14:22:34 -0800423struct kmem_cache *
Vladimir Davydov794b1242014-04-07 15:39:26 -0700424kmem_cache_create(const char *name, size_t size, size_t align,
425 unsigned long flags, void (*ctor)(void *))
Christoph Lameter039363f2012-07-06 15:25:10 -0500426{
Alexandru Moise40911a72015-11-05 18:45:43 -0800427 struct kmem_cache *s = NULL;
Andrzej Hajda3dec16e2015-02-13 14:36:38 -0800428 const char *cache_name;
Vladimir Davydov3965fc32014-01-23 15:52:55 -0800429 int err;
Christoph Lameter039363f2012-07-06 15:25:10 -0500430
Pekka Enbergb9205362012-08-16 10:12:18 +0300431 get_online_cpus();
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700432 get_online_mems();
Vladimir Davydov05257a12015-02-12 14:59:01 -0800433 memcg_get_cache_ids();
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700434
Pekka Enbergb9205362012-08-16 10:12:18 +0300435 mutex_lock(&slab_mutex);
Christoph Lameter686d5502012-09-05 00:20:33 +0000436
Vladimir Davydov794b1242014-04-07 15:39:26 -0700437 err = kmem_cache_sanity_check(name, size);
Andrew Morton3aa24f52014-10-09 15:25:58 -0700438 if (err) {
Vladimir Davydov3965fc32014-01-23 15:52:55 -0800439 goto out_unlock;
Andrew Morton3aa24f52014-10-09 15:25:58 -0700440 }
Christoph Lameter686d5502012-09-05 00:20:33 +0000441
Thomas Garniere70954f2016-12-12 16:41:38 -0800442 /* Refuse requests with allocator specific flags */
443 if (flags & ~SLAB_FLAGS_PERMITTED) {
444 err = -EINVAL;
445 goto out_unlock;
446 }
447
Glauber Costad8843922012-10-17 15:36:51 +0400448 /*
449 * Some allocators will constraint the set of valid flags to a subset
450 * of all flags. We expect them to define CACHE_CREATE_MASK in this
451 * case, and we'll just provide them with a sanitized version of the
452 * passed flags.
453 */
454 flags &= CACHE_CREATE_MASK;
Christoph Lameter686d5502012-09-05 00:20:33 +0000455
Vladimir Davydov794b1242014-04-07 15:39:26 -0700456 s = __kmem_cache_alias(name, size, align, flags, ctor);
457 if (s)
Vladimir Davydov3965fc32014-01-23 15:52:55 -0800458 goto out_unlock;
Glauber Costa2633d7a2012-12-18 14:22:34 -0800459
Andrzej Hajda3dec16e2015-02-13 14:36:38 -0800460 cache_name = kstrdup_const(name, GFP_KERNEL);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700461 if (!cache_name) {
462 err = -ENOMEM;
463 goto out_unlock;
464 }
Glauber Costa2633d7a2012-12-18 14:22:34 -0800465
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800466 s = create_cache(cache_name, size, size,
467 calculate_alignment(flags, align, size),
468 flags, ctor, NULL, NULL);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700469 if (IS_ERR(s)) {
470 err = PTR_ERR(s);
Andrzej Hajda3dec16e2015-02-13 14:36:38 -0800471 kfree_const(cache_name);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700472 }
Vladimir Davydov3965fc32014-01-23 15:52:55 -0800473
474out_unlock:
Christoph Lameter20cea962012-07-06 15:25:13 -0500475 mutex_unlock(&slab_mutex);
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700476
Vladimir Davydov05257a12015-02-12 14:59:01 -0800477 memcg_put_cache_ids();
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700478 put_online_mems();
Christoph Lameter20cea962012-07-06 15:25:13 -0500479 put_online_cpus();
480
Dave Jonesba3253c2014-01-29 14:05:48 -0800481 if (err) {
Christoph Lameter686d5502012-09-05 00:20:33 +0000482 if (flags & SLAB_PANIC)
483 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
484 name, err);
485 else {
Joe Perches11705322016-03-17 14:19:50 -0700486 pr_warn("kmem_cache_create(%s) failed with error %d\n",
Christoph Lameter686d5502012-09-05 00:20:33 +0000487 name, err);
488 dump_stack();
489 }
Christoph Lameter686d5502012-09-05 00:20:33 +0000490 return NULL;
491 }
Christoph Lameter039363f2012-07-06 15:25:10 -0500492 return s;
Glauber Costa2633d7a2012-12-18 14:22:34 -0800493}
Christoph Lameter039363f2012-07-06 15:25:10 -0500494EXPORT_SYMBOL(kmem_cache_create);
Christoph Lameter97d06602012-07-06 15:25:11 -0500495
Tejun Heo657dc2f2017-02-22 15:41:14 -0800496static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800497{
Tejun Heo657dc2f2017-02-22 15:41:14 -0800498 LIST_HEAD(to_destroy);
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800499 struct kmem_cache *s, *s2;
500
Tejun Heo657dc2f2017-02-22 15:41:14 -0800501 /*
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800502 * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
Tejun Heo657dc2f2017-02-22 15:41:14 -0800503 * @slab_caches_to_rcu_destroy list. The slab pages are freed
504 * through RCU and and the associated kmem_cache are dereferenced
505 * while freeing the pages, so the kmem_caches should be freed only
506 * after the pending RCU operations are finished. As rcu_barrier()
507 * is a pretty slow operation, we batch all pending destructions
508 * asynchronously.
509 */
510 mutex_lock(&slab_mutex);
511 list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy);
512 mutex_unlock(&slab_mutex);
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800513
Tejun Heo657dc2f2017-02-22 15:41:14 -0800514 if (list_empty(&to_destroy))
515 return;
516
517 rcu_barrier();
518
519 list_for_each_entry_safe(s, s2, &to_destroy, list) {
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800520#ifdef SLAB_SUPPORTS_SYSFS
Tejun Heobf5eb3d2017-02-22 15:41:11 -0800521 sysfs_slab_release(s);
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800522#else
523 slab_kmem_cache_release(s);
524#endif
525 }
526}
527
Tejun Heo657dc2f2017-02-22 15:41:14 -0800528static int shutdown_cache(struct kmem_cache *s)
529{
Greg Thelenf9fa1d92017-02-24 15:00:05 -0800530 /* free asan quarantined objects */
531 kasan_cache_shutdown(s);
532
Tejun Heo657dc2f2017-02-22 15:41:14 -0800533 if (__kmem_cache_shutdown(s) != 0)
534 return -EBUSY;
535
Tejun Heo510ded32017-02-22 15:41:24 -0800536 memcg_unlink_cache(s);
Tejun Heo657dc2f2017-02-22 15:41:14 -0800537 list_del(&s->list);
Tejun Heo657dc2f2017-02-22 15:41:14 -0800538
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800539 if (s->flags & SLAB_TYPESAFE_BY_RCU) {
Tejun Heo657dc2f2017-02-22 15:41:14 -0800540 list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
541 schedule_work(&slab_caches_to_rcu_destroy_work);
542 } else {
543#ifdef SLAB_SUPPORTS_SYSFS
544 sysfs_slab_release(s);
545#else
546 slab_kmem_cache_release(s);
547#endif
548 }
549
550 return 0;
551}
552
Johannes Weiner127424c2016-01-20 15:02:32 -0800553#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
Vladimir Davydov794b1242014-04-07 15:39:26 -0700554/*
Vladimir Davydov776ed0f2014-06-04 16:10:02 -0700555 * memcg_create_kmem_cache - Create a cache for a memory cgroup.
Vladimir Davydov794b1242014-04-07 15:39:26 -0700556 * @memcg: The memory cgroup the new cache is for.
557 * @root_cache: The parent of the new cache.
558 *
559 * This function attempts to create a kmem cache that will serve allocation
560 * requests going from @memcg to @root_cache. The new cache inherits properties
561 * from its parent.
562 */
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800563void memcg_create_kmem_cache(struct mem_cgroup *memcg,
564 struct kmem_cache *root_cache)
Vladimir Davydov794b1242014-04-07 15:39:26 -0700565{
Vladimir Davydov3e0350a2015-02-10 14:11:44 -0800566 static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */
Michal Hocko33398cf2015-09-08 15:01:02 -0700567 struct cgroup_subsys_state *css = &memcg->css;
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800568 struct memcg_cache_array *arr;
Vladimir Davydovbd673142014-06-04 16:07:40 -0700569 struct kmem_cache *s = NULL;
Vladimir Davydov794b1242014-04-07 15:39:26 -0700570 char *cache_name;
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800571 int idx;
Vladimir Davydov794b1242014-04-07 15:39:26 -0700572
573 get_online_cpus();
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700574 get_online_mems();
575
Vladimir Davydov794b1242014-04-07 15:39:26 -0700576 mutex_lock(&slab_mutex);
577
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800578 /*
Johannes Weiner567e9ab2016-01-20 15:02:24 -0800579 * The memory cgroup could have been offlined while the cache
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800580 * creation work was pending.
581 */
Vladimir Davydovb6ecd2d2016-03-17 14:18:33 -0700582 if (memcg->kmem_state != KMEM_ONLINE)
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800583 goto out_unlock;
584
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800585 idx = memcg_cache_id(memcg);
586 arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches,
587 lockdep_is_held(&slab_mutex));
588
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800589 /*
590 * Since per-memcg caches are created asynchronously on first
591 * allocation (see memcg_kmem_get_cache()), several threads can try to
592 * create the same cache, but only one of them may succeed.
593 */
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800594 if (arr->entries[idx])
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800595 goto out_unlock;
596
Vladimir Davydovf1008362015-02-12 14:59:29 -0800597 cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
Johannes Weiner73f576c2016-07-20 15:44:57 -0700598 cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name,
599 css->serial_nr, memcg_name_buf);
Vladimir Davydov794b1242014-04-07 15:39:26 -0700600 if (!cache_name)
601 goto out_unlock;
602
Vladimir Davydovc9a77a72015-11-05 18:45:08 -0800603 s = create_cache(cache_name, root_cache->object_size,
604 root_cache->size, root_cache->align,
Greg Thelenf773e362016-11-10 10:46:41 -0800605 root_cache->flags & CACHE_CREATE_MASK,
606 root_cache->ctor, memcg, root_cache);
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800607 /*
608 * If we could not create a memcg cache, do not complain, because
609 * that's not critical at all as we can always proceed with the root
610 * cache.
611 */
Vladimir Davydovbd673142014-06-04 16:07:40 -0700612 if (IS_ERR(s)) {
Vladimir Davydov794b1242014-04-07 15:39:26 -0700613 kfree(cache_name);
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800614 goto out_unlock;
Vladimir Davydovbd673142014-06-04 16:07:40 -0700615 }
Vladimir Davydov794b1242014-04-07 15:39:26 -0700616
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800617 /*
618 * Since readers won't lock (see cache_from_memcg_idx()), we need a
619 * barrier here to ensure nobody will see the kmem_cache partially
620 * initialized.
621 */
622 smp_wmb();
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800623 arr->entries[idx] = s;
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800624
Vladimir Davydov794b1242014-04-07 15:39:26 -0700625out_unlock:
626 mutex_unlock(&slab_mutex);
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700627
628 put_online_mems();
Vladimir Davydov794b1242014-04-07 15:39:26 -0700629 put_online_cpus();
630}
Vladimir Davydovb8529902014-04-07 15:39:28 -0700631
Tejun Heo01fb58b2017-02-22 15:41:30 -0800632static void kmemcg_deactivate_workfn(struct work_struct *work)
633{
634 struct kmem_cache *s = container_of(work, struct kmem_cache,
635 memcg_params.deact_work);
636
637 get_online_cpus();
638 get_online_mems();
639
640 mutex_lock(&slab_mutex);
641
642 s->memcg_params.deact_fn(s);
643
644 mutex_unlock(&slab_mutex);
645
646 put_online_mems();
647 put_online_cpus();
648
649 /* done, put the ref from slab_deactivate_memcg_cache_rcu_sched() */
650 css_put(&s->memcg_params.memcg->css);
651}
652
653static void kmemcg_deactivate_rcufn(struct rcu_head *head)
654{
655 struct kmem_cache *s = container_of(head, struct kmem_cache,
656 memcg_params.deact_rcu_head);
657
658 /*
659 * We need to grab blocking locks. Bounce to ->deact_work. The
660 * work item shares the space with the RCU head and can't be
661 * initialized eariler.
662 */
663 INIT_WORK(&s->memcg_params.deact_work, kmemcg_deactivate_workfn);
Tejun Heo17cc4df2017-02-22 15:41:36 -0800664 queue_work(memcg_kmem_cache_wq, &s->memcg_params.deact_work);
Tejun Heo01fb58b2017-02-22 15:41:30 -0800665}
666
667/**
668 * slab_deactivate_memcg_cache_rcu_sched - schedule deactivation after a
669 * sched RCU grace period
670 * @s: target kmem_cache
671 * @deact_fn: deactivation function to call
672 *
673 * Schedule @deact_fn to be invoked with online cpus, mems and slab_mutex
674 * held after a sched RCU grace period. The slab is guaranteed to stay
675 * alive until @deact_fn is finished. This is to be used from
676 * __kmemcg_cache_deactivate().
677 */
678void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
679 void (*deact_fn)(struct kmem_cache *))
680{
681 if (WARN_ON_ONCE(is_root_cache(s)) ||
682 WARN_ON_ONCE(s->memcg_params.deact_fn))
683 return;
684
685 /* pin memcg so that @s doesn't get destroyed in the middle */
686 css_get(&s->memcg_params.memcg->css);
687
688 s->memcg_params.deact_fn = deact_fn;
689 call_rcu_sched(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn);
690}
691
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800692void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
693{
694 int idx;
695 struct memcg_cache_array *arr;
Vladimir Davydovd6e0b7f2015-02-12 14:59:47 -0800696 struct kmem_cache *s, *c;
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800697
698 idx = memcg_cache_id(memcg);
699
Vladimir Davydovd6e0b7f2015-02-12 14:59:47 -0800700 get_online_cpus();
701 get_online_mems();
702
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800703 mutex_lock(&slab_mutex);
Tejun Heo510ded32017-02-22 15:41:24 -0800704 list_for_each_entry(s, &slab_root_caches, root_caches_node) {
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800705 arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
706 lockdep_is_held(&slab_mutex));
Vladimir Davydovd6e0b7f2015-02-12 14:59:47 -0800707 c = arr->entries[idx];
708 if (!c)
709 continue;
710
Tejun Heoc9fc5862017-02-22 15:41:27 -0800711 __kmemcg_cache_deactivate(c);
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800712 arr->entries[idx] = NULL;
713 }
714 mutex_unlock(&slab_mutex);
Vladimir Davydovd6e0b7f2015-02-12 14:59:47 -0800715
716 put_online_mems();
717 put_online_cpus();
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800718}
719
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800720void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
Vladimir Davydovb8529902014-04-07 15:39:28 -0700721{
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800722 struct kmem_cache *s, *s2;
Vladimir Davydovb8529902014-04-07 15:39:28 -0700723
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800724 get_online_cpus();
725 get_online_mems();
Vladimir Davydovb8529902014-04-07 15:39:28 -0700726
Vladimir Davydovb8529902014-04-07 15:39:28 -0700727 mutex_lock(&slab_mutex);
Tejun Heobc2791f2017-02-22 15:41:21 -0800728 list_for_each_entry_safe(s, s2, &memcg->kmem_caches,
729 memcg_params.kmem_caches_node) {
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800730 /*
731 * The cgroup is about to be freed and therefore has no charges
732 * left. Hence, all its caches must be empty by now.
733 */
Tejun Heo657dc2f2017-02-22 15:41:14 -0800734 BUG_ON(shutdown_cache(s));
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800735 }
736 mutex_unlock(&slab_mutex);
Vladimir Davydovb8529902014-04-07 15:39:28 -0700737
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800738 put_online_mems();
739 put_online_cpus();
Vladimir Davydovb8529902014-04-07 15:39:28 -0700740}
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800741
Tejun Heo657dc2f2017-02-22 15:41:14 -0800742static int shutdown_memcg_caches(struct kmem_cache *s)
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800743{
744 struct memcg_cache_array *arr;
745 struct kmem_cache *c, *c2;
746 LIST_HEAD(busy);
747 int i;
748
749 BUG_ON(!is_root_cache(s));
750
751 /*
752 * First, shutdown active caches, i.e. caches that belong to online
753 * memory cgroups.
754 */
755 arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
756 lockdep_is_held(&slab_mutex));
757 for_each_memcg_cache_index(i) {
758 c = arr->entries[i];
759 if (!c)
760 continue;
Tejun Heo657dc2f2017-02-22 15:41:14 -0800761 if (shutdown_cache(c))
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800762 /*
763 * The cache still has objects. Move it to a temporary
764 * list so as not to try to destroy it for a second
765 * time while iterating over inactive caches below.
766 */
Tejun Heo9eeadc82017-02-22 15:41:17 -0800767 list_move(&c->memcg_params.children_node, &busy);
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800768 else
769 /*
770 * The cache is empty and will be destroyed soon. Clear
771 * the pointer to it in the memcg_caches array so that
772 * it will never be accessed even if the root cache
773 * stays alive.
774 */
775 arr->entries[i] = NULL;
776 }
777
778 /*
779 * Second, shutdown all caches left from memory cgroups that are now
780 * offline.
781 */
Tejun Heo9eeadc82017-02-22 15:41:17 -0800782 list_for_each_entry_safe(c, c2, &s->memcg_params.children,
783 memcg_params.children_node)
Tejun Heo657dc2f2017-02-22 15:41:14 -0800784 shutdown_cache(c);
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800785
Tejun Heo9eeadc82017-02-22 15:41:17 -0800786 list_splice(&busy, &s->memcg_params.children);
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800787
788 /*
789 * A cache being destroyed must be empty. In particular, this means
790 * that all per memcg caches attached to it must be empty too.
791 */
Tejun Heo9eeadc82017-02-22 15:41:17 -0800792 if (!list_empty(&s->memcg_params.children))
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800793 return -EBUSY;
794 return 0;
795}
796#else
Tejun Heo657dc2f2017-02-22 15:41:14 -0800797static inline int shutdown_memcg_caches(struct kmem_cache *s)
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800798{
799 return 0;
800}
Johannes Weiner127424c2016-01-20 15:02:32 -0800801#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
Vladimir Davydov794b1242014-04-07 15:39:26 -0700802
Christoph Lameter41a21282014-05-06 12:50:08 -0700803void slab_kmem_cache_release(struct kmem_cache *s)
804{
Dmitry Safonov52b4b952016-02-17 13:11:37 -0800805 __kmem_cache_release(s);
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800806 destroy_memcg_params(s);
Andrzej Hajda3dec16e2015-02-13 14:36:38 -0800807 kfree_const(s->name);
Christoph Lameter41a21282014-05-06 12:50:08 -0700808 kmem_cache_free(kmem_cache, s);
809}
810
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000811void kmem_cache_destroy(struct kmem_cache *s)
812{
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800813 int err;
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800814
Sergey Senozhatsky3942d292015-09-08 15:00:50 -0700815 if (unlikely(!s))
816 return;
817
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000818 get_online_cpus();
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700819 get_online_mems();
820
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000821 mutex_lock(&slab_mutex);
Vladimir Davydovb8529902014-04-07 15:39:28 -0700822
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000823 s->refcount--;
Vladimir Davydovb8529902014-04-07 15:39:28 -0700824 if (s->refcount)
825 goto out_unlock;
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000826
Tejun Heo657dc2f2017-02-22 15:41:14 -0800827 err = shutdown_memcg_caches(s);
Vladimir Davydovd60fdcc2015-11-05 18:45:11 -0800828 if (!err)
Tejun Heo657dc2f2017-02-22 15:41:14 -0800829 err = shutdown_cache(s);
Vladimir Davydovb8529902014-04-07 15:39:28 -0700830
Vladimir Davydovcd918c52015-11-05 18:45:14 -0800831 if (err) {
Joe Perches756a0252016-03-17 14:19:47 -0700832 pr_err("kmem_cache_destroy %s: Slab cache still has objects\n",
833 s->name);
Vladimir Davydovcd918c52015-11-05 18:45:14 -0800834 dump_stack();
835 }
Vladimir Davydovb8529902014-04-07 15:39:28 -0700836out_unlock:
837 mutex_unlock(&slab_mutex);
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -0800838
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700839 put_online_mems();
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000840 put_online_cpus();
841}
842EXPORT_SYMBOL(kmem_cache_destroy);
843
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700844/**
845 * kmem_cache_shrink - Shrink a cache.
846 * @cachep: The cache to shrink.
847 *
848 * Releases as many slabs as possible for a cache.
849 * To help debugging, a zero exit status indicates all slabs were released.
850 */
851int kmem_cache_shrink(struct kmem_cache *cachep)
852{
853 int ret;
854
855 get_online_cpus();
856 get_online_mems();
Alexander Potapenko55834c52016-05-20 16:59:11 -0700857 kasan_cache_shrink(cachep);
Tejun Heoc9fc5862017-02-22 15:41:27 -0800858 ret = __kmem_cache_shrink(cachep);
Vladimir Davydov03afc0e2014-06-04 16:07:20 -0700859 put_online_mems();
860 put_online_cpus();
861 return ret;
862}
863EXPORT_SYMBOL(kmem_cache_shrink);
864
Denis Kirjanovfda90122015-11-05 18:44:59 -0800865bool slab_is_available(void)
Christoph Lameter97d06602012-07-06 15:25:11 -0500866{
867 return slab_state >= UP;
868}
Glauber Costab7454ad2012-10-19 18:20:25 +0400869
Christoph Lameter45530c42012-11-28 16:23:07 +0000870#ifndef CONFIG_SLOB
871/* Create a cache during boot when no slab services are available yet */
872void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
873 unsigned long flags)
874{
875 int err;
876
877 s->name = name;
878 s->size = s->object_size = size;
Christoph Lameter45906852012-11-28 16:23:16 +0000879 s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800880
881 slab_init_memcg_params(s);
882
Christoph Lameter45530c42012-11-28 16:23:07 +0000883 err = __kmem_cache_create(s, flags);
884
885 if (err)
Christoph Lameter31ba7342013-01-10 19:00:53 +0000886 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
Christoph Lameter45530c42012-11-28 16:23:07 +0000887 name, size, err);
888
889 s->refcount = -1; /* Exempt from merging for now */
890}
891
892struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
893 unsigned long flags)
894{
895 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
896
897 if (!s)
898 panic("Out of memory when creating slab %s\n", name);
899
900 create_boot_cache(s, name, size, flags);
901 list_add(&s->list, &slab_caches);
Tejun Heo510ded32017-02-22 15:41:24 -0800902 memcg_link_cache(s);
Christoph Lameter45530c42012-11-28 16:23:07 +0000903 s->refcount = 1;
904 return s;
905}
906
Christoph Lameter9425c582013-01-10 19:12:17 +0000907struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
908EXPORT_SYMBOL(kmalloc_caches);
909
910#ifdef CONFIG_ZONE_DMA
911struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
912EXPORT_SYMBOL(kmalloc_dma_caches);
913#endif
914
Christoph Lameterf97d5f62013-01-10 19:12:17 +0000915/*
Christoph Lameter2c59dd62013-01-10 19:14:19 +0000916 * Conversion table for small slabs sizes / 8 to the index in the
917 * kmalloc array. This is necessary for slabs < 192 since we have non power
918 * of two cache sizes there. The size of larger slabs can be determined using
919 * fls.
920 */
921static s8 size_index[24] = {
922 3, /* 8 */
923 4, /* 16 */
924 5, /* 24 */
925 5, /* 32 */
926 6, /* 40 */
927 6, /* 48 */
928 6, /* 56 */
929 6, /* 64 */
930 1, /* 72 */
931 1, /* 80 */
932 1, /* 88 */
933 1, /* 96 */
934 7, /* 104 */
935 7, /* 112 */
936 7, /* 120 */
937 7, /* 128 */
938 2, /* 136 */
939 2, /* 144 */
940 2, /* 152 */
941 2, /* 160 */
942 2, /* 168 */
943 2, /* 176 */
944 2, /* 184 */
945 2 /* 192 */
946};
947
948static inline int size_index_elem(size_t bytes)
949{
950 return (bytes - 1) / 8;
951}
952
953/*
954 * Find the kmem_cache structure that serves a given size of
955 * allocation
956 */
957struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
958{
959 int index;
960
Joonsoo Kim9de1bc82013-08-02 11:02:42 +0900961 if (unlikely(size > KMALLOC_MAX_SIZE)) {
Sasha Levin907985f2013-06-10 15:18:00 -0400962 WARN_ON_ONCE(!(flags & __GFP_NOWARN));
Christoph Lameter6286ae92013-05-03 15:43:18 +0000963 return NULL;
Sasha Levin907985f2013-06-10 15:18:00 -0400964 }
Christoph Lameter6286ae92013-05-03 15:43:18 +0000965
Christoph Lameter2c59dd62013-01-10 19:14:19 +0000966 if (size <= 192) {
967 if (!size)
968 return ZERO_SIZE_PTR;
969
970 index = size_index[size_index_elem(size)];
971 } else
972 index = fls(size - 1);
973
974#ifdef CONFIG_ZONE_DMA
Joonsoo Kimb1e05412013-02-04 23:46:46 +0900975 if (unlikely((flags & GFP_DMA)))
Christoph Lameter2c59dd62013-01-10 19:14:19 +0000976 return kmalloc_dma_caches[index];
977
978#endif
979 return kmalloc_caches[index];
980}
981
982/*
Gavin Guo4066c332015-06-24 16:55:54 -0700983 * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
984 * kmalloc_index() supports up to 2^26=64MB, so the final entry of the table is
985 * kmalloc-67108864.
986 */
Vlastimil Babkaaf3b5f82017-02-22 15:41:05 -0800987const struct kmalloc_info_struct kmalloc_info[] __initconst = {
Gavin Guo4066c332015-06-24 16:55:54 -0700988 {NULL, 0}, {"kmalloc-96", 96},
989 {"kmalloc-192", 192}, {"kmalloc-8", 8},
990 {"kmalloc-16", 16}, {"kmalloc-32", 32},
991 {"kmalloc-64", 64}, {"kmalloc-128", 128},
992 {"kmalloc-256", 256}, {"kmalloc-512", 512},
993 {"kmalloc-1024", 1024}, {"kmalloc-2048", 2048},
994 {"kmalloc-4096", 4096}, {"kmalloc-8192", 8192},
995 {"kmalloc-16384", 16384}, {"kmalloc-32768", 32768},
996 {"kmalloc-65536", 65536}, {"kmalloc-131072", 131072},
997 {"kmalloc-262144", 262144}, {"kmalloc-524288", 524288},
998 {"kmalloc-1048576", 1048576}, {"kmalloc-2097152", 2097152},
999 {"kmalloc-4194304", 4194304}, {"kmalloc-8388608", 8388608},
1000 {"kmalloc-16777216", 16777216}, {"kmalloc-33554432", 33554432},
1001 {"kmalloc-67108864", 67108864}
1002};
1003
1004/*
Daniel Sanders34cc6992015-06-24 16:55:57 -07001005 * Patch up the size_index table if we have strange large alignment
1006 * requirements for the kmalloc array. This is only the case for
1007 * MIPS it seems. The standard arches will not generate any code here.
1008 *
1009 * Largest permitted alignment is 256 bytes due to the way we
1010 * handle the index determination for the smaller caches.
1011 *
1012 * Make sure that nothing crazy happens if someone starts tinkering
1013 * around with ARCH_KMALLOC_MINALIGN
Christoph Lameterf97d5f62013-01-10 19:12:17 +00001014 */
Daniel Sanders34cc6992015-06-24 16:55:57 -07001015void __init setup_kmalloc_cache_index_table(void)
Christoph Lameterf97d5f62013-01-10 19:12:17 +00001016{
1017 int i;
1018
Christoph Lameter2c59dd62013-01-10 19:14:19 +00001019 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
1020 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
1021
1022 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
1023 int elem = size_index_elem(i);
1024
1025 if (elem >= ARRAY_SIZE(size_index))
1026 break;
1027 size_index[elem] = KMALLOC_SHIFT_LOW;
1028 }
1029
1030 if (KMALLOC_MIN_SIZE >= 64) {
1031 /*
1032 * The 96 byte size cache is not used if the alignment
1033 * is 64 byte.
1034 */
1035 for (i = 64 + 8; i <= 96; i += 8)
1036 size_index[size_index_elem(i)] = 7;
1037
1038 }
1039
1040 if (KMALLOC_MIN_SIZE >= 128) {
1041 /*
1042 * The 192 byte sized cache is not used if the alignment
1043 * is 128 byte. Redirect kmalloc to use the 256 byte cache
1044 * instead.
1045 */
1046 for (i = 128 + 8; i <= 192; i += 8)
1047 size_index[size_index_elem(i)] = 8;
1048 }
Daniel Sanders34cc6992015-06-24 16:55:57 -07001049}
1050
Christoph Lameterae6f2462015-06-30 09:01:11 -05001051static void __init new_kmalloc_cache(int idx, unsigned long flags)
Christoph Lametera9730fc2015-06-29 09:28:08 -05001052{
1053 kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name,
1054 kmalloc_info[idx].size, flags);
1055}
1056
Daniel Sanders34cc6992015-06-24 16:55:57 -07001057/*
1058 * Create the kmalloc array. Some of the regular kmalloc arrays
1059 * may already have been created because they were needed to
1060 * enable allocations for slab creation.
1061 */
1062void __init create_kmalloc_caches(unsigned long flags)
1063{
1064 int i;
1065
Christoph Lametera9730fc2015-06-29 09:28:08 -05001066 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
1067 if (!kmalloc_caches[i])
1068 new_kmalloc_cache(i, flags);
Chris Mason956e46e2013-05-08 15:56:28 -04001069
1070 /*
Christoph Lametera9730fc2015-06-29 09:28:08 -05001071 * Caches that are not of the two-to-the-power-of size.
1072 * These have to be created immediately after the
1073 * earlier power of two caches
Chris Mason956e46e2013-05-08 15:56:28 -04001074 */
Christoph Lametera9730fc2015-06-29 09:28:08 -05001075 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
1076 new_kmalloc_cache(1, flags);
1077 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
1078 new_kmalloc_cache(2, flags);
Christoph Lameter8a965b32013-05-03 18:04:18 +00001079 }
1080
Christoph Lameterf97d5f62013-01-10 19:12:17 +00001081 /* Kmalloc array is now usable */
1082 slab_state = UP;
1083
Christoph Lameterf97d5f62013-01-10 19:12:17 +00001084#ifdef CONFIG_ZONE_DMA
1085 for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
1086 struct kmem_cache *s = kmalloc_caches[i];
1087
1088 if (s) {
1089 int size = kmalloc_size(i);
1090 char *n = kasprintf(GFP_NOWAIT,
1091 "dma-kmalloc-%d", size);
1092
1093 BUG_ON(!n);
1094 kmalloc_dma_caches[i] = create_kmalloc_cache(n,
1095 size, SLAB_CACHE_DMA | flags);
1096 }
1097 }
1098#endif
1099}
Christoph Lameter45530c42012-11-28 16:23:07 +00001100#endif /* !CONFIG_SLOB */
1101
Vladimir Davydovcea371f2014-06-04 16:07:04 -07001102/*
1103 * To avoid unnecessary overhead, we pass through large allocation requests
1104 * directly to the page allocator. We use __GFP_COMP, because we will need to
1105 * know the allocation order to free the pages properly in kfree.
1106 */
Vladimir Davydov52383432014-06-04 16:06:39 -07001107void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
1108{
1109 void *ret;
1110 struct page *page;
1111
1112 flags |= __GFP_COMP;
Vladimir Davydov49491482016-07-26 15:24:24 -07001113 page = alloc_pages(flags, order);
Vladimir Davydov52383432014-06-04 16:06:39 -07001114 ret = page ? page_address(page) : NULL;
1115 kmemleak_alloc(ret, size, 1, flags);
Alexander Potapenko505f5dc2016-03-25 14:22:02 -07001116 kasan_kmalloc_large(ret, size, flags);
Vladimir Davydov52383432014-06-04 16:06:39 -07001117 return ret;
1118}
1119EXPORT_SYMBOL(kmalloc_order);
1120
Christoph Lameterf1b6eb62013-09-04 16:35:34 +00001121#ifdef CONFIG_TRACING
1122void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
1123{
1124 void *ret = kmalloc_order(size, flags, order);
1125 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
1126 return ret;
1127}
1128EXPORT_SYMBOL(kmalloc_order_trace);
1129#endif
Christoph Lameter45530c42012-11-28 16:23:07 +00001130
Thomas Garnier7c00fce2016-07-26 15:21:56 -07001131#ifdef CONFIG_SLAB_FREELIST_RANDOM
1132/* Randomize a generic freelist */
1133static void freelist_randomize(struct rnd_state *state, unsigned int *list,
1134 size_t count)
1135{
1136 size_t i;
1137 unsigned int rand;
1138
1139 for (i = 0; i < count; i++)
1140 list[i] = i;
1141
1142 /* Fisher-Yates shuffle */
1143 for (i = count - 1; i > 0; i--) {
1144 rand = prandom_u32_state(state);
1145 rand %= (i + 1);
1146 swap(list[i], list[rand]);
1147 }
1148}
1149
1150/* Create a random sequence per cache */
1151int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
1152 gfp_t gfp)
1153{
1154 struct rnd_state state;
1155
1156 if (count < 2 || cachep->random_seq)
1157 return 0;
1158
1159 cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp);
1160 if (!cachep->random_seq)
1161 return -ENOMEM;
1162
1163 /* Get best entropy at this stage of boot */
1164 prandom_seed_state(&state, get_random_long());
1165
1166 freelist_randomize(&state, cachep->random_seq, count);
1167 return 0;
1168}
1169
1170/* Destroy the per-cache random freelist sequence */
1171void cache_random_seq_destroy(struct kmem_cache *cachep)
1172{
1173 kfree(cachep->random_seq);
1174 cachep->random_seq = NULL;
1175}
1176#endif /* CONFIG_SLAB_FREELIST_RANDOM */
1177
Glauber Costab7454ad2012-10-19 18:20:25 +04001178#ifdef CONFIG_SLABINFO
Wanpeng Lie9b4db22013-07-04 08:33:24 +08001179
1180#ifdef CONFIG_SLAB
1181#define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
1182#else
1183#define SLABINFO_RIGHTS S_IRUSR
1184#endif
1185
Vladimir Davydovb0475012014-12-10 15:44:19 -08001186static void print_slabinfo_header(struct seq_file *m)
Glauber Costabcee6e22012-10-19 18:20:26 +04001187{
1188 /*
1189 * Output format version, so at least we can change it
1190 * without _too_ many complaints.
1191 */
1192#ifdef CONFIG_DEBUG_SLAB
1193 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
1194#else
1195 seq_puts(m, "slabinfo - version: 2.1\n");
1196#endif
Joe Perches756a0252016-03-17 14:19:47 -07001197 seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
Glauber Costabcee6e22012-10-19 18:20:26 +04001198 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
1199 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1200#ifdef CONFIG_DEBUG_SLAB
Joe Perches756a0252016-03-17 14:19:47 -07001201 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
Glauber Costabcee6e22012-10-19 18:20:26 +04001202 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
1203#endif
1204 seq_putc(m, '\n');
1205}
1206
Vladimir Davydov1df3b262014-12-10 15:42:16 -08001207void *slab_start(struct seq_file *m, loff_t *pos)
Glauber Costab7454ad2012-10-19 18:20:25 +04001208{
Glauber Costab7454ad2012-10-19 18:20:25 +04001209 mutex_lock(&slab_mutex);
Tejun Heo510ded32017-02-22 15:41:24 -08001210 return seq_list_start(&slab_root_caches, *pos);
Glauber Costab7454ad2012-10-19 18:20:25 +04001211}
1212
Wanpeng Li276a2432013-07-08 08:08:28 +08001213void *slab_next(struct seq_file *m, void *p, loff_t *pos)
Glauber Costab7454ad2012-10-19 18:20:25 +04001214{
Tejun Heo510ded32017-02-22 15:41:24 -08001215 return seq_list_next(p, &slab_root_caches, pos);
Glauber Costab7454ad2012-10-19 18:20:25 +04001216}
1217
Wanpeng Li276a2432013-07-08 08:08:28 +08001218void slab_stop(struct seq_file *m, void *p)
Glauber Costab7454ad2012-10-19 18:20:25 +04001219{
1220 mutex_unlock(&slab_mutex);
1221}
1222
Glauber Costa749c5412012-12-18 14:23:01 -08001223static void
1224memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
Glauber Costab7454ad2012-10-19 18:20:25 +04001225{
Glauber Costa749c5412012-12-18 14:23:01 -08001226 struct kmem_cache *c;
1227 struct slabinfo sinfo;
Glauber Costa749c5412012-12-18 14:23:01 -08001228
1229 if (!is_root_cache(s))
1230 return;
1231
Vladimir Davydov426589f2015-02-12 14:59:23 -08001232 for_each_memcg_cache(c, s) {
Glauber Costa749c5412012-12-18 14:23:01 -08001233 memset(&sinfo, 0, sizeof(sinfo));
1234 get_slabinfo(c, &sinfo);
1235
1236 info->active_slabs += sinfo.active_slabs;
1237 info->num_slabs += sinfo.num_slabs;
1238 info->shared_avail += sinfo.shared_avail;
1239 info->active_objs += sinfo.active_objs;
1240 info->num_objs += sinfo.num_objs;
1241 }
1242}
1243
Vladimir Davydovb0475012014-12-10 15:44:19 -08001244static void cache_show(struct kmem_cache *s, struct seq_file *m)
Glauber Costa749c5412012-12-18 14:23:01 -08001245{
Glauber Costa0d7561c2012-10-19 18:20:27 +04001246 struct slabinfo sinfo;
1247
1248 memset(&sinfo, 0, sizeof(sinfo));
1249 get_slabinfo(s, &sinfo);
1250
Glauber Costa749c5412012-12-18 14:23:01 -08001251 memcg_accumulate_slabinfo(s, &sinfo);
1252
Glauber Costa0d7561c2012-10-19 18:20:27 +04001253 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
Glauber Costa749c5412012-12-18 14:23:01 -08001254 cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
Glauber Costa0d7561c2012-10-19 18:20:27 +04001255 sinfo.objects_per_slab, (1 << sinfo.cache_order));
1256
1257 seq_printf(m, " : tunables %4u %4u %4u",
1258 sinfo.limit, sinfo.batchcount, sinfo.shared);
1259 seq_printf(m, " : slabdata %6lu %6lu %6lu",
1260 sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
1261 slabinfo_show_stats(m, s);
1262 seq_putc(m, '\n');
Glauber Costab7454ad2012-10-19 18:20:25 +04001263}
1264
Vladimir Davydov1df3b262014-12-10 15:42:16 -08001265static int slab_show(struct seq_file *m, void *p)
Glauber Costa749c5412012-12-18 14:23:01 -08001266{
Tejun Heo510ded32017-02-22 15:41:24 -08001267 struct kmem_cache *s = list_entry(p, struct kmem_cache, root_caches_node);
Glauber Costa749c5412012-12-18 14:23:01 -08001268
Tejun Heo510ded32017-02-22 15:41:24 -08001269 if (p == slab_root_caches.next)
Vladimir Davydov1df3b262014-12-10 15:42:16 -08001270 print_slabinfo_header(m);
Tejun Heo510ded32017-02-22 15:41:24 -08001271 cache_show(s, m);
Vladimir Davydovb0475012014-12-10 15:44:19 -08001272 return 0;
Glauber Costa749c5412012-12-18 14:23:01 -08001273}
1274
Johannes Weiner127424c2016-01-20 15:02:32 -08001275#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
Tejun Heobc2791f2017-02-22 15:41:21 -08001276void *memcg_slab_start(struct seq_file *m, loff_t *pos)
Vladimir Davydovb0475012014-12-10 15:44:19 -08001277{
Vladimir Davydovb0475012014-12-10 15:44:19 -08001278 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
1279
Tejun Heobc2791f2017-02-22 15:41:21 -08001280 mutex_lock(&slab_mutex);
1281 return seq_list_start(&memcg->kmem_caches, *pos);
1282}
1283
1284void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos)
1285{
1286 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
1287
1288 return seq_list_next(p, &memcg->kmem_caches, pos);
1289}
1290
1291void memcg_slab_stop(struct seq_file *m, void *p)
1292{
1293 mutex_unlock(&slab_mutex);
1294}
1295
1296int memcg_slab_show(struct seq_file *m, void *p)
1297{
1298 struct kmem_cache *s = list_entry(p, struct kmem_cache,
1299 memcg_params.kmem_caches_node);
1300 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
1301
1302 if (p == memcg->kmem_caches.next)
Vladimir Davydovb0475012014-12-10 15:44:19 -08001303 print_slabinfo_header(m);
Tejun Heobc2791f2017-02-22 15:41:21 -08001304 cache_show(s, m);
Vladimir Davydovb0475012014-12-10 15:44:19 -08001305 return 0;
1306}
1307#endif
1308
Glauber Costab7454ad2012-10-19 18:20:25 +04001309/*
1310 * slabinfo_op - iterator that generates /proc/slabinfo
1311 *
1312 * Output layout:
1313 * cache-name
1314 * num-active-objs
1315 * total-objs
1316 * object size
1317 * num-active-slabs
1318 * total-slabs
1319 * num-pages-per-slab
1320 * + further values on SMP and with statistics enabled
1321 */
1322static const struct seq_operations slabinfo_op = {
Vladimir Davydov1df3b262014-12-10 15:42:16 -08001323 .start = slab_start,
Wanpeng Li276a2432013-07-08 08:08:28 +08001324 .next = slab_next,
1325 .stop = slab_stop,
Vladimir Davydov1df3b262014-12-10 15:42:16 -08001326 .show = slab_show,
Glauber Costab7454ad2012-10-19 18:20:25 +04001327};
1328
1329static int slabinfo_open(struct inode *inode, struct file *file)
1330{
1331 return seq_open(file, &slabinfo_op);
1332}
1333
1334static const struct file_operations proc_slabinfo_operations = {
1335 .open = slabinfo_open,
1336 .read = seq_read,
1337 .write = slabinfo_write,
1338 .llseek = seq_lseek,
1339 .release = seq_release,
1340};
1341
1342static int __init slab_proc_init(void)
1343{
Wanpeng Lie9b4db22013-07-04 08:33:24 +08001344 proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
1345 &proc_slabinfo_operations);
Glauber Costab7454ad2012-10-19 18:20:25 +04001346 return 0;
1347}
1348module_init(slab_proc_init);
1349#endif /* CONFIG_SLABINFO */
Andrey Ryabinin928cec92014-08-06 16:04:44 -07001350
1351static __always_inline void *__do_krealloc(const void *p, size_t new_size,
1352 gfp_t flags)
1353{
1354 void *ret;
1355 size_t ks = 0;
1356
1357 if (p)
1358 ks = ksize(p);
1359
Andrey Ryabinin0316bec2015-02-13 14:39:42 -08001360 if (ks >= new_size) {
Alexander Potapenko505f5dc2016-03-25 14:22:02 -07001361 kasan_krealloc((void *)p, new_size, flags);
Andrey Ryabinin928cec92014-08-06 16:04:44 -07001362 return (void *)p;
Andrey Ryabinin0316bec2015-02-13 14:39:42 -08001363 }
Andrey Ryabinin928cec92014-08-06 16:04:44 -07001364
1365 ret = kmalloc_track_caller(new_size, flags);
1366 if (ret && p)
1367 memcpy(ret, p, ks);
1368
1369 return ret;
1370}
1371
1372/**
1373 * __krealloc - like krealloc() but don't free @p.
1374 * @p: object to reallocate memory for.
1375 * @new_size: how many bytes of memory are required.
1376 * @flags: the type of memory to allocate.
1377 *
1378 * This function is like krealloc() except it never frees the originally
1379 * allocated buffer. Use this if you don't want to free the buffer immediately
1380 * like, for example, with RCU.
1381 */
1382void *__krealloc(const void *p, size_t new_size, gfp_t flags)
1383{
1384 if (unlikely(!new_size))
1385 return ZERO_SIZE_PTR;
1386
1387 return __do_krealloc(p, new_size, flags);
1388
1389}
1390EXPORT_SYMBOL(__krealloc);
1391
1392/**
1393 * krealloc - reallocate memory. The contents will remain unchanged.
1394 * @p: object to reallocate memory for.
1395 * @new_size: how many bytes of memory are required.
1396 * @flags: the type of memory to allocate.
1397 *
1398 * The contents of the object pointed to are preserved up to the
1399 * lesser of the new and old sizes. If @p is %NULL, krealloc()
1400 * behaves exactly like kmalloc(). If @new_size is 0 and @p is not a
1401 * %NULL pointer, the object pointed to is freed.
1402 */
1403void *krealloc(const void *p, size_t new_size, gfp_t flags)
1404{
1405 void *ret;
1406
1407 if (unlikely(!new_size)) {
1408 kfree(p);
1409 return ZERO_SIZE_PTR;
1410 }
1411
1412 ret = __do_krealloc(p, new_size, flags);
1413 if (ret && p != ret)
1414 kfree(p);
1415
1416 return ret;
1417}
1418EXPORT_SYMBOL(krealloc);
1419
1420/**
1421 * kzfree - like kfree but zero memory
1422 * @p: object to free memory of
1423 *
1424 * The memory of the object @p points to is zeroed before freed.
1425 * If @p is %NULL, kzfree() does nothing.
1426 *
1427 * Note: this function zeroes the whole allocated buffer which can be a good
1428 * deal bigger than the requested buffer size passed to kmalloc(). So be
1429 * careful when using this function in performance sensitive code.
1430 */
1431void kzfree(const void *p)
1432{
1433 size_t ks;
1434 void *mem = (void *)p;
1435
1436 if (unlikely(ZERO_OR_NULL_PTR(mem)))
1437 return;
1438 ks = ksize(mem);
1439 memset(mem, 0, ks);
1440 kfree(mem);
1441}
1442EXPORT_SYMBOL(kzfree);
1443
1444/* Tracepoints definitions. */
1445EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1446EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1447EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
1448EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
1449EXPORT_TRACEPOINT_SYMBOL(kfree);
1450EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);