blob: 3dd389aa91c7cc702c383c33efb20c3a79efc27a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Christoph Lameter2e892f42006-12-13 00:34:23 -08002 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
3 *
Christoph Lametercde53532008-07-04 09:59:22 -07004 * (C) SGI 2006, Christoph Lameter
Christoph Lameter2e892f42006-12-13 00:34:23 -08005 * Cleaned up and restructured to ease the addition of alternative
6 * implementations of SLAB allocators.
Christoph Lameterf1b6eb62013-09-04 16:35:34 +00007 * (C) Linux Foundation 2008-2013
8 * Unified interface for all slab allocators
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 */
10
11#ifndef _LINUX_SLAB_H
12#define _LINUX_SLAB_H
13
Andrew Morton1b1cec42006-12-06 20:33:22 -080014#include <linux/gfp.h>
Andrew Morton1b1cec42006-12-06 20:33:22 -080015#include <linux/types.h>
Glauber Costa1f458cb2012-12-18 14:22:50 -080016#include <linux/workqueue.h>
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
Christoph Lameter2e892f42006-12-13 00:34:23 -080019/*
20 * Flags to pass to kmem_cache_create().
21 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 */
Christoph Lameter55935a32006-12-13 00:34:24 -080023#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
Christoph Lameter55935a32006-12-13 00:34:24 -080024#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
25#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
26#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
Christoph Lameter2e892f42006-12-13 00:34:23 -080027#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
Christoph Lameter2e892f42006-12-13 00:34:23 -080028#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
Christoph Lameter2e892f42006-12-13 00:34:23 -080029#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
Peter Zijlstrad7de4c12008-11-13 20:40:12 +020030/*
31 * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
32 *
33 * This delays freeing the SLAB page by a grace period, it does _NOT_
34 * delay object freeing. This means that if you do kmem_cache_free()
35 * that memory location is free to be reused at any time. Thus it may
36 * be possible to see another object there in the same RCU grace period.
37 *
38 * This feature only ensures the memory location backing the object
39 * stays valid, the trick to using this is relying on an independent
40 * object validation pass. Something like:
41 *
42 * rcu_read_lock()
43 * again:
44 * obj = lockless_lookup(key);
45 * if (obj) {
46 * if (!try_get_ref(obj)) // might fail for free objects
47 * goto again;
48 *
49 * if (obj->key != key) { // not the object we expected
50 * put_ref(obj);
51 * goto again;
52 * }
53 * }
54 * rcu_read_unlock();
55 *
Joonsoo Kim68126702013-10-24 10:07:42 +090056 * This is useful if we need to approach a kernel structure obliquely,
57 * from its address obtained without the usual locking. We can lock
58 * the structure to stabilize it and check it's still at the given address,
59 * only if we can be sure that the memory has not been meanwhile reused
60 * for some other kind of object (which our subsystem's lock might corrupt).
61 *
62 * rcu_read_lock before reading the address, then rcu_read_unlock after
63 * taking the spinlock within the structure expected at that address.
Peter Zijlstrad7de4c12008-11-13 20:40:12 +020064 */
Christoph Lameter2e892f42006-12-13 00:34:23 -080065#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
Paul Jackson101a5002006-03-24 03:16:07 -080066#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
Christoph Lameter81819f02007-05-06 14:49:36 -070067#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
Thomas Gleixner30327ac2008-04-30 00:54:59 -070069/* Flag to prevent checks on free */
70#ifdef CONFIG_DEBUG_OBJECTS
71# define SLAB_DEBUG_OBJECTS 0x00400000UL
72#else
73# define SLAB_DEBUG_OBJECTS 0x00000000UL
74#endif
75
Catalin Marinasd5cff632009-06-11 13:22:40 +010076#define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */
77
Vegard Nossum2dff4402008-05-31 15:56:17 +020078/* Don't track use of uninitialized memory */
79#ifdef CONFIG_KMEMCHECK
80# define SLAB_NOTRACK 0x01000000UL
81#else
82# define SLAB_NOTRACK 0x00000000UL
83#endif
Dmitry Monakhov4c13dd32010-02-26 09:36:12 +030084#ifdef CONFIG_FAILSLAB
85# define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */
86#else
87# define SLAB_FAILSLAB 0x00000000UL
88#endif
Vegard Nossum2dff4402008-05-31 15:56:17 +020089
Mel Gormane12ba742007-10-16 01:25:52 -070090/* The following flags affect the page allocator grouping pages by mobility */
91#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
92#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
Christoph Lameter2e892f42006-12-13 00:34:23 -080093/*
Christoph Lameter6cb8f912007-07-17 04:03:22 -070094 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
95 *
96 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
97 *
98 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
99 * Both make kfree a no-op.
100 */
101#define ZERO_SIZE_PTR ((void *)16)
102
Roland Dreier1d4ec7b2007-07-20 12:13:20 -0700103#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
Christoph Lameter6cb8f912007-07-17 04:03:22 -0700104 (unsigned long)ZERO_SIZE_PTR)
105
Christoph Lameterf1b6eb62013-09-04 16:35:34 +0000106#include <linux/kmemleak.h>
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500107
Glauber Costa2633d7a2012-12-18 14:22:34 -0800108struct mem_cgroup;
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500109/*
Christoph Lameter2e892f42006-12-13 00:34:23 -0800110 * struct kmem_cache related prototypes
111 */
112void __init kmem_cache_init(void);
Christoph Lameter81819f02007-05-06 14:49:36 -0700113int slab_is_available(void);
Matt Mackall10cef602006-01-08 01:01:45 -0800114
Christoph Lameter2e892f42006-12-13 00:34:23 -0800115struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
Christoph Lameterebe29732006-12-06 20:32:59 -0800116 unsigned long,
Alexey Dobriyan51cc5062008-07-25 19:45:34 -0700117 void (*)(void *));
Vladimir Davydov794b1242014-04-07 15:39:26 -0700118#ifdef CONFIG_MEMCG_KMEM
119void kmem_cache_create_memcg(struct mem_cgroup *, struct kmem_cache *);
120#endif
Christoph Lameter2e892f42006-12-13 00:34:23 -0800121void kmem_cache_destroy(struct kmem_cache *);
122int kmem_cache_shrink(struct kmem_cache *);
Christoph Lameter2e892f42006-12-13 00:34:23 -0800123void kmem_cache_free(struct kmem_cache *, void *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
Christoph Lameter0a31bd52007-05-06 14:49:57 -0700125/*
126 * Please use this macro to create slab caches. Simply specify the
127 * name of the structure and maybe some flags that are listed above.
128 *
129 * The alignment of the struct determines object alignment. If you
130 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
131 * then the objects will be properly aligned in SMP configurations.
132 */
133#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
134 sizeof(struct __struct), __alignof__(struct __struct),\
Paul Mundt20c2df82007-07-20 10:11:58 +0900135 (__flags), NULL)
Christoph Lameter0a31bd52007-05-06 14:49:57 -0700136
Christoph Lameter2e892f42006-12-13 00:34:23 -0800137/*
Christoph Lameter34504662013-01-10 19:00:53 +0000138 * Common kmalloc functions provided by all allocators
139 */
140void * __must_check __krealloc(const void *, size_t, gfp_t);
141void * __must_check krealloc(const void *, size_t, gfp_t);
142void kfree(const void *);
143void kzfree(const void *);
144size_t ksize(const void *);
145
Christoph Lameterc601fd62013-02-05 16:36:47 +0000146/*
147 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
148 * alignment larger than the alignment of a 64-bit integer.
149 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
150 */
151#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
152#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
153#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
154#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
155#else
156#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
157#endif
158
Christoph Lameterce6a5022013-01-10 19:14:19 +0000159#ifdef CONFIG_SLOB
160/*
161 * Common fields provided in kmem_cache by all slab allocators
162 * This struct is either used directly by the allocator (SLOB)
163 * or the allocator must include definitions for all fields
164 * provided in kmem_cache_common in their definition of kmem_cache.
165 *
166 * Once we can do anonymous structs (C11 standard) we could put a
167 * anonymous struct definition in these allocators so that the
168 * separate allocations in the kmem_cache structure of SLAB and
169 * SLUB is no longer needed.
170 */
171struct kmem_cache {
172 unsigned int object_size;/* The original size of the object */
173 unsigned int size; /* The aligned/padded/added on size */
174 unsigned int align; /* Alignment as calculated */
175 unsigned long flags; /* Active flags on the slab */
176 const char *name; /* Slab name for sysfs */
177 int refcount; /* Use counter */
178 void (*ctor)(void *); /* Called on object slot creation */
179 struct list_head list; /* List of all slab caches on the system */
180};
181
Christoph Lameter069e2b352013-06-14 19:55:13 +0000182#endif /* CONFIG_SLOB */
Christoph Lameterce6a5022013-01-10 19:14:19 +0000183
Christoph Lameter34504662013-01-10 19:00:53 +0000184/*
Christoph Lameter95a05b42013-01-10 19:14:19 +0000185 * Kmalloc array related definitions
186 */
187
188#ifdef CONFIG_SLAB
189/*
190 * The largest kmalloc size supported by the SLAB allocators is
Christoph Lameter0aa817f2007-05-16 22:11:01 -0700191 * 32 megabyte (2^25) or the maximum allocatable page order if that is
192 * less than 32 MB.
193 *
194 * WARNING: Its not easy to increase this value since the allocators have
195 * to do various tricks to work around compiler limitations in order to
196 * ensure proper constant folding.
197 */
Christoph Lameterdebee072007-06-23 17:16:43 -0700198#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
199 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
Christoph Lameter95a05b42013-01-10 19:14:19 +0000200#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
Christoph Lameterc601fd62013-02-05 16:36:47 +0000201#ifndef KMALLOC_SHIFT_LOW
Christoph Lameter95a05b42013-01-10 19:14:19 +0000202#define KMALLOC_SHIFT_LOW 5
Christoph Lameterc601fd62013-02-05 16:36:47 +0000203#endif
Christoph Lameter069e2b352013-06-14 19:55:13 +0000204#endif
205
206#ifdef CONFIG_SLUB
Christoph Lameter95a05b42013-01-10 19:14:19 +0000207/*
Dave Hansen433a91f2014-01-28 14:24:50 -0800208 * SLUB directly allocates requests fitting in to an order-1 page
209 * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
Christoph Lameter95a05b42013-01-10 19:14:19 +0000210 */
211#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
212#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
Christoph Lameterc601fd62013-02-05 16:36:47 +0000213#ifndef KMALLOC_SHIFT_LOW
Christoph Lameter95a05b42013-01-10 19:14:19 +0000214#define KMALLOC_SHIFT_LOW 3
215#endif
Christoph Lameterc601fd62013-02-05 16:36:47 +0000216#endif
Christoph Lameter0aa817f2007-05-16 22:11:01 -0700217
Christoph Lameter069e2b352013-06-14 19:55:13 +0000218#ifdef CONFIG_SLOB
219/*
Dave Hansen433a91f2014-01-28 14:24:50 -0800220 * SLOB passes all requests larger than one page to the page allocator.
Christoph Lameter069e2b352013-06-14 19:55:13 +0000221 * No kmalloc array is necessary since objects of different sizes can
222 * be allocated from the same page.
223 */
Christoph Lameter069e2b352013-06-14 19:55:13 +0000224#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
Dave Hansen433a91f2014-01-28 14:24:50 -0800225#define KMALLOC_SHIFT_MAX 30
Christoph Lameter069e2b352013-06-14 19:55:13 +0000226#ifndef KMALLOC_SHIFT_LOW
227#define KMALLOC_SHIFT_LOW 3
228#endif
229#endif
230
Christoph Lameter95a05b42013-01-10 19:14:19 +0000231/* Maximum allocatable size */
232#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
233/* Maximum size for which we actually use a slab cache */
234#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
235/* Maximum order allocatable via the slab allocagtor */
236#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
Christoph Lameter0aa817f2007-05-16 22:11:01 -0700237
Christoph Lameter90810642011-06-23 09:36:12 -0500238/*
Christoph Lameterce6a5022013-01-10 19:14:19 +0000239 * Kmalloc subsystem.
240 */
Christoph Lameterc601fd62013-02-05 16:36:47 +0000241#ifndef KMALLOC_MIN_SIZE
Christoph Lameter95a05b42013-01-10 19:14:19 +0000242#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
Christoph Lameterce6a5022013-01-10 19:14:19 +0000243#endif
Christoph Lameterce6a5022013-01-10 19:14:19 +0000244
Christoph Lameter069e2b352013-06-14 19:55:13 +0000245#ifndef CONFIG_SLOB
Christoph Lameter9425c582013-01-10 19:12:17 +0000246extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
247#ifdef CONFIG_ZONE_DMA
248extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
249#endif
250
Christoph Lameterce6a5022013-01-10 19:14:19 +0000251/*
252 * Figure out which kmalloc slab an allocation of a certain size
253 * belongs to.
254 * 0 = zero alloc
255 * 1 = 65 .. 96 bytes
256 * 2 = 120 .. 192 bytes
257 * n = 2^(n-1) .. 2^n -1
258 */
259static __always_inline int kmalloc_index(size_t size)
260{
261 if (!size)
262 return 0;
263
264 if (size <= KMALLOC_MIN_SIZE)
265 return KMALLOC_SHIFT_LOW;
266
267 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
268 return 1;
269 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
270 return 2;
271 if (size <= 8) return 3;
272 if (size <= 16) return 4;
273 if (size <= 32) return 5;
274 if (size <= 64) return 6;
275 if (size <= 128) return 7;
276 if (size <= 256) return 8;
277 if (size <= 512) return 9;
278 if (size <= 1024) return 10;
279 if (size <= 2 * 1024) return 11;
280 if (size <= 4 * 1024) return 12;
281 if (size <= 8 * 1024) return 13;
282 if (size <= 16 * 1024) return 14;
283 if (size <= 32 * 1024) return 15;
284 if (size <= 64 * 1024) return 16;
285 if (size <= 128 * 1024) return 17;
286 if (size <= 256 * 1024) return 18;
287 if (size <= 512 * 1024) return 19;
288 if (size <= 1024 * 1024) return 20;
289 if (size <= 2 * 1024 * 1024) return 21;
290 if (size <= 4 * 1024 * 1024) return 22;
291 if (size <= 8 * 1024 * 1024) return 23;
292 if (size <= 16 * 1024 * 1024) return 24;
293 if (size <= 32 * 1024 * 1024) return 25;
294 if (size <= 64 * 1024 * 1024) return 26;
295 BUG();
296
297 /* Will never be reached. Needed because the compiler may complain */
298 return -1;
299}
Christoph Lameter069e2b352013-06-14 19:55:13 +0000300#endif /* !CONFIG_SLOB */
Christoph Lameterce6a5022013-01-10 19:14:19 +0000301
Christoph Lameterf1b6eb62013-09-04 16:35:34 +0000302void *__kmalloc(size_t size, gfp_t flags);
303void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
304
305#ifdef CONFIG_NUMA
306void *__kmalloc_node(size_t size, gfp_t flags, int node);
307void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
308#else
309static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
310{
311 return __kmalloc(size, flags);
312}
313
314static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
315{
316 return kmem_cache_alloc(s, flags);
317}
318#endif
319
320#ifdef CONFIG_TRACING
321extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
322
323#ifdef CONFIG_NUMA
324extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
325 gfp_t gfpflags,
326 int node, size_t size);
327#else
328static __always_inline void *
329kmem_cache_alloc_node_trace(struct kmem_cache *s,
330 gfp_t gfpflags,
331 int node, size_t size)
332{
333 return kmem_cache_alloc_trace(s, gfpflags, size);
334}
335#endif /* CONFIG_NUMA */
336
337#else /* CONFIG_TRACING */
338static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
339 gfp_t flags, size_t size)
340{
341 return kmem_cache_alloc(s, flags);
342}
343
344static __always_inline void *
345kmem_cache_alloc_node_trace(struct kmem_cache *s,
346 gfp_t gfpflags,
347 int node, size_t size)
348{
349 return kmem_cache_alloc_node(s, gfpflags, node);
350}
351#endif /* CONFIG_TRACING */
352
Christoph Lameterce6a5022013-01-10 19:14:19 +0000353#ifdef CONFIG_SLAB
354#include <linux/slab_def.h>
Christoph Lameter069e2b352013-06-14 19:55:13 +0000355#endif
356
357#ifdef CONFIG_SLUB
Christoph Lameterce6a5022013-01-10 19:14:19 +0000358#include <linux/slub_def.h>
Christoph Lameter069e2b352013-06-14 19:55:13 +0000359#endif
360
Christoph Lameterf1b6eb62013-09-04 16:35:34 +0000361static __always_inline void *
362kmalloc_order(size_t size, gfp_t flags, unsigned int order)
363{
364 void *ret;
365
366 flags |= (__GFP_COMP | __GFP_KMEMCG);
367 ret = (void *) __get_free_pages(flags, order);
368 kmemleak_alloc(ret, size, 1, flags);
369 return ret;
370}
371
372#ifdef CONFIG_TRACING
373extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
374#else
375static __always_inline void *
376kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
377{
378 return kmalloc_order(size, flags, order);
379}
Christoph Lameterce6a5022013-01-10 19:14:19 +0000380#endif
381
Christoph Lameterf1b6eb62013-09-04 16:35:34 +0000382static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
383{
384 unsigned int order = get_order(size);
385 return kmalloc_order_trace(size, flags, order);
386}
387
388/**
389 * kmalloc - allocate memory
390 * @size: how many bytes of memory are required.
Randy Dunlap7e3528c2013-11-22 18:14:38 -0800391 * @flags: the type of memory to allocate.
Christoph Lameterf1b6eb62013-09-04 16:35:34 +0000392 *
393 * kmalloc is the normal method of allocating memory
394 * for objects smaller than page size in the kernel.
Randy Dunlap7e3528c2013-11-22 18:14:38 -0800395 *
396 * The @flags argument may be one of:
397 *
398 * %GFP_USER - Allocate memory on behalf of user. May sleep.
399 *
400 * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
401 *
402 * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools.
403 * For example, use this inside interrupt handlers.
404 *
405 * %GFP_HIGHUSER - Allocate pages from high memory.
406 *
407 * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
408 *
409 * %GFP_NOFS - Do not make any fs calls while trying to get memory.
410 *
411 * %GFP_NOWAIT - Allocation will not sleep.
412 *
Johannes Weinere97ca8e2014-03-10 15:49:43 -0700413 * %__GFP_THISNODE - Allocate node-local memory only.
Randy Dunlap7e3528c2013-11-22 18:14:38 -0800414 *
415 * %GFP_DMA - Allocation suitable for DMA.
416 * Should only be used for kmalloc() caches. Otherwise, use a
417 * slab created with SLAB_DMA.
418 *
419 * Also it is possible to set different flags by OR'ing
420 * in one or more of the following additional @flags:
421 *
422 * %__GFP_COLD - Request cache-cold pages instead of
423 * trying to return cache-warm pages.
424 *
425 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
426 *
427 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
428 * (think twice before using).
429 *
430 * %__GFP_NORETRY - If memory is not immediately available,
431 * then give up at once.
432 *
433 * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
434 *
435 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
436 *
437 * There are other flags available as well, but these are not intended
438 * for general use, and so are not documented here. For a full list of
439 * potential flags, always refer to linux/gfp.h.
Christoph Lameterf1b6eb62013-09-04 16:35:34 +0000440 */
441static __always_inline void *kmalloc(size_t size, gfp_t flags)
442{
443 if (__builtin_constant_p(size)) {
444 if (size > KMALLOC_MAX_CACHE_SIZE)
445 return kmalloc_large(size, flags);
446#ifndef CONFIG_SLOB
447 if (!(flags & GFP_DMA)) {
448 int index = kmalloc_index(size);
449
450 if (!index)
451 return ZERO_SIZE_PTR;
452
453 return kmem_cache_alloc_trace(kmalloc_caches[index],
454 flags, size);
455 }
456#endif
457 }
458 return __kmalloc(size, flags);
459}
460
Christoph Lameterce6a5022013-01-10 19:14:19 +0000461/*
462 * Determine size used for the nth kmalloc cache.
463 * return size or 0 if a kmalloc cache for that
464 * size does not exist
465 */
466static __always_inline int kmalloc_size(int n)
467{
Christoph Lameter069e2b352013-06-14 19:55:13 +0000468#ifndef CONFIG_SLOB
Christoph Lameterce6a5022013-01-10 19:14:19 +0000469 if (n > 2)
470 return 1 << n;
471
472 if (n == 1 && KMALLOC_MIN_SIZE <= 32)
473 return 96;
474
475 if (n == 2 && KMALLOC_MIN_SIZE <= 64)
476 return 192;
Christoph Lameter069e2b352013-06-14 19:55:13 +0000477#endif
Christoph Lameterce6a5022013-01-10 19:14:19 +0000478 return 0;
479}
Christoph Lameterce6a5022013-01-10 19:14:19 +0000480
Christoph Lameterf1b6eb62013-09-04 16:35:34 +0000481static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
482{
483#ifndef CONFIG_SLOB
484 if (__builtin_constant_p(size) &&
Christoph Lameter23774a22013-09-04 19:58:08 +0000485 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
Christoph Lameterf1b6eb62013-09-04 16:35:34 +0000486 int i = kmalloc_index(size);
487
488 if (!i)
489 return ZERO_SIZE_PTR;
490
491 return kmem_cache_alloc_node_trace(kmalloc_caches[i],
492 flags, node, size);
493 }
494#endif
495 return __kmalloc_node(size, flags, node);
496}
497
Christoph Lameterce6a5022013-01-10 19:14:19 +0000498/*
Christoph Lameter90810642011-06-23 09:36:12 -0500499 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
500 * Intended for arches that get misalignment faults even for 64 bit integer
501 * aligned buffers.
502 */
Christoph Lameter3192b922011-06-14 16:16:36 -0500503#ifndef ARCH_SLAB_MINALIGN
504#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
505#endif
Christoph Lameter0aa817f2007-05-16 22:11:01 -0700506/*
Glauber Costaba6c4962012-12-18 14:22:27 -0800507 * This is the main placeholder for memcg-related information in kmem caches.
508 * struct kmem_cache will hold a pointer to it, so the memory cost while
509 * disabled is 1 pointer. The runtime cost while enabled, gets bigger than it
510 * would otherwise be if that would be bundled in kmem_cache: we'll need an
511 * extra pointer chase. But the trade off clearly lays in favor of not
512 * penalizing non-users.
513 *
514 * Both the root cache and the child caches will have it. For the root cache,
515 * this will hold a dynamically allocated array large enough to hold
Vladimir Davydovf8570262014-01-23 15:53:06 -0800516 * information about the currently limited memcgs in the system. To allow the
517 * array to be accessed without taking any locks, on relocation we free the old
518 * version only after a grace period.
Glauber Costaba6c4962012-12-18 14:22:27 -0800519 *
520 * Child caches will hold extra metadata needed for its operation. Fields are:
521 *
522 * @memcg: pointer to the memcg this cache belongs to
Glauber Costa2633d7a2012-12-18 14:22:34 -0800523 * @list: list_head for the list of all caches in this memcg
524 * @root_cache: pointer to the global, root cache, this cache was derived from
Glauber Costa1f458cb2012-12-18 14:22:50 -0800525 * @dead: set to true after the memcg dies; the cache may still be around.
526 * @nr_pages: number of pages that belongs to this cache.
527 * @destroy: worker to be called whenever we are ready, or believe we may be
528 * ready, to destroy this cache.
Glauber Costaba6c4962012-12-18 14:22:27 -0800529 */
530struct memcg_cache_params {
531 bool is_root_cache;
532 union {
Vladimir Davydovf8570262014-01-23 15:53:06 -0800533 struct {
534 struct rcu_head rcu_head;
535 struct kmem_cache *memcg_caches[0];
536 };
Glauber Costa2633d7a2012-12-18 14:22:34 -0800537 struct {
538 struct mem_cgroup *memcg;
539 struct list_head list;
540 struct kmem_cache *root_cache;
Glauber Costa1f458cb2012-12-18 14:22:50 -0800541 bool dead;
542 atomic_t nr_pages;
543 struct work_struct destroy;
Glauber Costa2633d7a2012-12-18 14:22:34 -0800544 };
Glauber Costaba6c4962012-12-18 14:22:27 -0800545 };
546};
547
Glauber Costa2633d7a2012-12-18 14:22:34 -0800548int memcg_update_all_caches(int num_memcgs);
549
Glauber Costa749c5412012-12-18 14:23:01 -0800550struct seq_file;
551int cache_show(struct kmem_cache *s, struct seq_file *m);
552void print_slabinfo_header(struct seq_file *m);
553
Christoph Lameter2e892f42006-12-13 00:34:23 -0800554/**
Michael Opdenackere7efa612013-06-25 18:16:55 +0200555 * kmalloc_array - allocate memory for an array.
556 * @n: number of elements.
557 * @size: element size.
558 * @flags: the type of memory to allocate (see kmalloc).
Paul Drynoff800590f2006-06-23 02:03:48 -0700559 */
Xi Wanga8203722012-03-05 15:14:41 -0800560static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561{
Xi Wanga3860c12012-05-31 16:26:04 -0700562 if (size != 0 && n > SIZE_MAX / size)
Paul Mundt6193a2f2007-07-15 23:38:22 -0700563 return NULL;
Xi Wanga8203722012-03-05 15:14:41 -0800564 return __kmalloc(n * size, flags);
565}
566
567/**
568 * kcalloc - allocate memory for an array. The memory is set to zero.
569 * @n: number of elements.
570 * @size: element size.
571 * @flags: the type of memory to allocate (see kmalloc).
572 */
573static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
574{
575 return kmalloc_array(n, size, flags | __GFP_ZERO);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576}
577
Christoph Hellwig1d2c8ee2006-10-04 02:15:25 -0700578/*
579 * kmalloc_track_caller is a special version of kmalloc that records the
580 * calling function of the routine calling it for slab leak tracking instead
581 * of just the calling function (confusing, eh?).
582 * It's useful when the call to kmalloc comes from a widely-used standard
583 * allocator where we care about the real place the memory allocation
584 * request comes from.
585 */
Xiaotian Feng7adde042010-06-30 17:57:22 +0800586#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
Ezequiel Garciaf3f74102012-09-08 17:47:53 -0300587 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
588 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +0300589extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
Christoph Hellwig1d2c8ee2006-10-04 02:15:25 -0700590#define kmalloc_track_caller(size, flags) \
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +0300591 __kmalloc_track_caller(size, flags, _RET_IP_)
Christoph Lameter2e892f42006-12-13 00:34:23 -0800592#else
593#define kmalloc_track_caller(size, flags) \
594 __kmalloc(size, flags)
595#endif /* DEBUG_SLAB */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596
Manfred Spraul97e2bde2005-05-01 08:58:38 -0700597#ifdef CONFIG_NUMA
Christoph Hellwig8b98c162006-12-06 20:32:30 -0800598/*
599 * kmalloc_node_track_caller is a special version of kmalloc_node that
600 * records the calling function of the routine calling it for slab leak
601 * tracking instead of just the calling function (confusing, eh?).
602 * It's useful when the call to kmalloc_node comes from a widely-used
603 * standard allocator where we care about the real place the memory
604 * allocation request comes from.
605 */
Xiaotian Feng7adde042010-06-30 17:57:22 +0800606#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
Ezequiel Garciaf3f74102012-09-08 17:47:53 -0300607 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
608 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +0300609extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
Christoph Hellwig8b98c162006-12-06 20:32:30 -0800610#define kmalloc_node_track_caller(size, flags, node) \
611 __kmalloc_node_track_caller(size, flags, node, \
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +0300612 _RET_IP_)
Christoph Lameter2e892f42006-12-13 00:34:23 -0800613#else
614#define kmalloc_node_track_caller(size, flags, node) \
615 __kmalloc_node(size, flags, node)
Christoph Hellwig8b98c162006-12-06 20:32:30 -0800616#endif
Christoph Lameter2e892f42006-12-13 00:34:23 -0800617
Christoph Hellwig8b98c162006-12-06 20:32:30 -0800618#else /* CONFIG_NUMA */
Christoph Lameter2e892f42006-12-13 00:34:23 -0800619
620#define kmalloc_node_track_caller(size, flags, node) \
621 kmalloc_track_caller(size, flags)
622
Pascal Terjandfcd3612008-11-25 15:08:19 +0100623#endif /* CONFIG_NUMA */
Christoph Hellwig8b98c162006-12-06 20:32:30 -0800624
Christoph Lameter81cda662007-07-17 04:03:29 -0700625/*
626 * Shortcuts
627 */
628static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
629{
630 return kmem_cache_alloc(k, flags | __GFP_ZERO);
631}
632
633/**
634 * kzalloc - allocate memory. The memory is set to zero.
635 * @size: how many bytes of memory are required.
636 * @flags: the type of memory to allocate (see kmalloc).
637 */
638static inline void *kzalloc(size_t size, gfp_t flags)
639{
640 return kmalloc(size, flags | __GFP_ZERO);
641}
642
Jeff Layton979b0fe2008-06-05 22:47:00 -0700643/**
644 * kzalloc_node - allocate zeroed memory from a particular memory node.
645 * @size: how many bytes of memory are required.
646 * @flags: the type of memory to allocate (see kmalloc).
647 * @node: memory node from which to allocate
648 */
649static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
650{
651 return kmalloc_node(size, flags | __GFP_ZERO, node);
652}
653
Ezequiel Garcia242860a2012-10-19 09:33:12 -0300654/*
655 * Determine the size of a slab object
656 */
657static inline unsigned int kmem_cache_size(struct kmem_cache *s)
658{
659 return s->object_size;
660}
661
Pekka Enberg7e85ee02009-06-12 14:03:06 +0300662void __init kmem_cache_init_late(void);
663
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664#endif /* _LINUX_SLAB_H */