blob: cd401580bdd30f2f0c6fb586e8244f7e6756b1fa [file] [log] [blame]
Christoph Lameter2e892f42006-12-13 00:34:23 -08001#ifndef _LINUX_SLAB_DEF_H
2#define _LINUX_SLAB_DEF_H
3
4/*
5 * Definitions unique to the original Linux SLAB allocator.
6 *
7 * What we provide here is a way to optimize the frequent kmalloc
8 * calls in the kernel by selecting the appropriate general cache
9 * if kmalloc was called with a size that can be established at
10 * compile time.
11 */
12
13#include <linux/init.h>
Christoph Lameter2e892f42006-12-13 00:34:23 -080014#include <linux/compiler.h>
Li Zefan039ca4e2010-05-26 17:22:17 +080015
David Woodhouse1f0ce8b32010-05-19 12:01:42 +010016/*
Pekka Enberg8eae9852008-05-09 20:32:44 +020017 * struct kmem_cache
18 *
19 * manages a cache.
20 */
21
22struct kmem_cache {
Eric Dumazetb56efcf2011-07-20 19:04:23 +020023/* 1) Cache tunables. Protected by cache_chain_mutex */
Pekka Enberg8eae9852008-05-09 20:32:44 +020024 unsigned int batchcount;
25 unsigned int limit;
26 unsigned int shared;
27
Christoph Lameter3b0efdf2012-06-13 10:24:57 -050028 unsigned int size;
Pekka Enberg8eae9852008-05-09 20:32:44 +020029 u32 reciprocal_buffer_size;
Eric Dumazetb56efcf2011-07-20 19:04:23 +020030/* 2) touched by every alloc & free from the backend */
Pekka Enberg8eae9852008-05-09 20:32:44 +020031
32 unsigned int flags; /* constant flags */
33 unsigned int num; /* # of objs per slab */
34
Eric Dumazetb56efcf2011-07-20 19:04:23 +020035/* 3) cache_grow/shrink */
Pekka Enberg8eae9852008-05-09 20:32:44 +020036 /* order of pgs per slab (2^n) */
37 unsigned int gfporder;
38
39 /* force GFP flags, e.g. GFP_DMA */
Glauber Costaa618e892012-06-14 16:17:21 +040040 gfp_t allocflags;
Pekka Enberg8eae9852008-05-09 20:32:44 +020041
42 size_t colour; /* cache colouring range */
43 unsigned int colour_off; /* colour offset */
44 struct kmem_cache *slabp_cache;
45 unsigned int slab_size;
Pekka Enberg8eae9852008-05-09 20:32:44 +020046
47 /* constructor func */
48 void (*ctor)(void *obj);
49
Eric Dumazetb56efcf2011-07-20 19:04:23 +020050/* 4) cache creation/removal */
Pekka Enberg8eae9852008-05-09 20:32:44 +020051 const char *name;
Christoph Lameter3b0efdf2012-06-13 10:24:57 -050052 struct list_head list;
53 int refcount;
54 int object_size;
55 int align;
Pekka Enberg8eae9852008-05-09 20:32:44 +020056
Eric Dumazetb56efcf2011-07-20 19:04:23 +020057/* 5) statistics */
Pekka Enberg8eae9852008-05-09 20:32:44 +020058#ifdef CONFIG_DEBUG_SLAB
59 unsigned long num_active;
60 unsigned long num_allocations;
61 unsigned long high_mark;
62 unsigned long grown;
63 unsigned long reaped;
64 unsigned long errors;
65 unsigned long max_freeable;
66 unsigned long node_allocs;
67 unsigned long node_frees;
68 unsigned long node_overflow;
69 atomic_t allochit;
70 atomic_t allocmiss;
71 atomic_t freehit;
72 atomic_t freemiss;
73
74 /*
75 * If debugging is enabled, then the allocator can add additional
Christoph Lameter3b0efdf2012-06-13 10:24:57 -050076 * fields and/or padding to every object. size contains the total
Pekka Enberg8eae9852008-05-09 20:32:44 +020077 * object size including these internal fields, the following two
78 * variables contain the offset to the user object and its size.
79 */
80 int obj_offset;
Pekka Enberg8eae9852008-05-09 20:32:44 +020081#endif /* CONFIG_DEBUG_SLAB */
Glauber Costaba6c4962012-12-18 14:22:27 -080082#ifdef CONFIG_MEMCG_KMEM
83 struct memcg_cache_params *memcg_params;
84#endif
Pekka Enberg8eae9852008-05-09 20:32:44 +020085
Eric Dumazetb56efcf2011-07-20 19:04:23 +020086/* 6) per-cpu/per-node data, touched during every alloc/free */
Pekka Enberg8eae9852008-05-09 20:32:44 +020087 /*
Eric Dumazetb56efcf2011-07-20 19:04:23 +020088 * We put array[] at the end of kmem_cache, because we want to size
89 * this array to nr_cpu_ids slots instead of NR_CPUS
Pekka Enberg8eae9852008-05-09 20:32:44 +020090 * (see kmem_cache_init())
Eric Dumazetb56efcf2011-07-20 19:04:23 +020091 * We still use [NR_CPUS] and not [1] or [0] because cache_cache
92 * is statically defined, so we reserve the max number of cpus.
Christoph Lameter3c583462012-11-28 16:23:01 +000093 *
94 * We also need to guarantee that the list is able to accomodate a
95 * pointer for each node since "nodelists" uses the remainder of
96 * available pointers.
Pekka Enberg8eae9852008-05-09 20:32:44 +020097 */
Christoph Lameter6a673682013-01-10 19:14:19 +000098 struct kmem_cache_node **node;
Christoph Lameter3c583462012-11-28 16:23:01 +000099 struct array_cache *array[NR_CPUS + MAX_NUMNODES];
Pekka Enberg8eae9852008-05-09 20:32:44 +0200100 /*
Eric Dumazetb56efcf2011-07-20 19:04:23 +0200101 * Do not add fields after array[]
Pekka Enberg8eae9852008-05-09 20:32:44 +0200102 */
103};
104
Paul Mundt6193a2f2007-07-15 23:38:22 -0700105void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
106void *__kmalloc(size_t size, gfp_t flags);
107
Li Zefan0f24f122009-12-11 15:45:30 +0800108#ifdef CONFIG_TRACING
Ezequiel Garcia40521472012-09-08 17:47:56 -0300109extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300110#else
111static __always_inline void *
Ezequiel Garcia40521472012-09-08 17:47:56 -0300112kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
Christoph Lameter2e892f42006-12-13 00:34:23 -0800113{
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300114 return kmem_cache_alloc(cachep, flags);
115}
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300116#endif
117
118static __always_inline void *kmalloc(size_t size, gfp_t flags)
119{
120 struct kmem_cache *cachep;
121 void *ret;
122
Christoph Lameter2e892f42006-12-13 00:34:23 -0800123 if (__builtin_constant_p(size)) {
Christoph Lametere3366012013-01-10 19:14:18 +0000124 int i;
Christoph Lameter6cb8f912007-07-17 04:03:22 -0700125
126 if (!size)
127 return ZERO_SIZE_PTR;
128
Christoph Lameter6286ae92013-05-03 15:43:18 +0000129 if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
130 return NULL;
131
Christoph Lametere3366012013-01-10 19:14:18 +0000132 i = kmalloc_index(size);
133
Christoph Lameter4b51d662007-02-10 01:43:10 -0800134#ifdef CONFIG_ZONE_DMA
135 if (flags & GFP_DMA)
Christoph Lametere3366012013-01-10 19:14:18 +0000136 cachep = kmalloc_dma_caches[i];
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300137 else
Christoph Lameter4b51d662007-02-10 01:43:10 -0800138#endif
Christoph Lametere3366012013-01-10 19:14:18 +0000139 cachep = kmalloc_caches[i];
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300140
Ezequiel Garcia40521472012-09-08 17:47:56 -0300141 ret = kmem_cache_alloc_trace(cachep, flags, size);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300142
143 return ret;
Christoph Lameter2e892f42006-12-13 00:34:23 -0800144 }
145 return __kmalloc(size, flags);
146}
147
Christoph Lameter2e892f42006-12-13 00:34:23 -0800148#ifdef CONFIG_NUMA
149extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
Paul Mundt6193a2f2007-07-15 23:38:22 -0700150extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
Christoph Lameter2e892f42006-12-13 00:34:23 -0800151
Li Zefan0f24f122009-12-11 15:45:30 +0800152#ifdef CONFIG_TRACING
Ezequiel Garciadffa3f92012-09-26 09:21:33 -0300153extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
Steven Rostedt85beb582010-11-24 16:23:34 -0500154 gfp_t flags,
Ezequiel Garciadffa3f92012-09-26 09:21:33 -0300155 int nodeid,
156 size_t size);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300157#else
158static __always_inline void *
Ezequiel Garciadffa3f92012-09-26 09:21:33 -0300159kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
Steven Rostedt85beb582010-11-24 16:23:34 -0500160 gfp_t flags,
Ezequiel Garciadffa3f92012-09-26 09:21:33 -0300161 int nodeid,
162 size_t size)
Christoph Lameter2e892f42006-12-13 00:34:23 -0800163{
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300164 return kmem_cache_alloc_node(cachep, flags, nodeid);
165}
166#endif
167
168static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
169{
170 struct kmem_cache *cachep;
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300171
Christoph Lameter2e892f42006-12-13 00:34:23 -0800172 if (__builtin_constant_p(size)) {
Christoph Lametere3366012013-01-10 19:14:18 +0000173 int i;
Christoph Lameter6cb8f912007-07-17 04:03:22 -0700174
175 if (!size)
176 return ZERO_SIZE_PTR;
177
Christoph Lameter6286ae92013-05-03 15:43:18 +0000178 if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
179 return NULL;
180
Christoph Lametere3366012013-01-10 19:14:18 +0000181 i = kmalloc_index(size);
182
Christoph Lameter4b51d662007-02-10 01:43:10 -0800183#ifdef CONFIG_ZONE_DMA
184 if (flags & GFP_DMA)
Christoph Lametere3366012013-01-10 19:14:18 +0000185 cachep = kmalloc_dma_caches[i];
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300186 else
Christoph Lameter4b51d662007-02-10 01:43:10 -0800187#endif
Christoph Lametere3366012013-01-10 19:14:18 +0000188 cachep = kmalloc_caches[i];
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300189
Ezequiel Garciadffa3f92012-09-26 09:21:33 -0300190 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
Christoph Lameter2e892f42006-12-13 00:34:23 -0800191 }
192 return __kmalloc_node(size, flags, node);
193}
194
195#endif /* CONFIG_NUMA */
196
Christoph Lameter2e892f42006-12-13 00:34:23 -0800197#endif /* _LINUX_SLAB_DEF_H */