blob: cc290f0bdb346efe82dbe0e87985e6d015ad58e1 [file] [log] [blame]
Christoph Lameter2e892f42006-12-13 00:34:23 -08001#ifndef _LINUX_SLAB_DEF_H
2#define _LINUX_SLAB_DEF_H
3
4/*
5 * Definitions unique to the original Linux SLAB allocator.
6 *
7 * What we provide here is a way to optimize the frequent kmalloc
8 * calls in the kernel by selecting the appropriate general cache
9 * if kmalloc was called with a size that can be established at
10 * compile time.
11 */
12
13#include <linux/init.h>
14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16#include <linux/compiler.h>
Li Zefan039ca4e2010-05-26 17:22:17 +080017
David Woodhouse1f0ce8b32010-05-19 12:01:42 +010018/*
Pekka Enberg8eae9852008-05-09 20:32:44 +020019 * struct kmem_cache
20 *
21 * manages a cache.
22 */
23
24struct kmem_cache {
Eric Dumazetb56efcf2011-07-20 19:04:23 +020025/* 1) Cache tunables. Protected by cache_chain_mutex */
Pekka Enberg8eae9852008-05-09 20:32:44 +020026 unsigned int batchcount;
27 unsigned int limit;
28 unsigned int shared;
29
Christoph Lameter3b0efdf2012-06-13 10:24:57 -050030 unsigned int size;
Pekka Enberg8eae9852008-05-09 20:32:44 +020031 u32 reciprocal_buffer_size;
Eric Dumazetb56efcf2011-07-20 19:04:23 +020032/* 2) touched by every alloc & free from the backend */
Pekka Enberg8eae9852008-05-09 20:32:44 +020033
34 unsigned int flags; /* constant flags */
35 unsigned int num; /* # of objs per slab */
36
Eric Dumazetb56efcf2011-07-20 19:04:23 +020037/* 3) cache_grow/shrink */
Pekka Enberg8eae9852008-05-09 20:32:44 +020038 /* order of pgs per slab (2^n) */
39 unsigned int gfporder;
40
41 /* force GFP flags, e.g. GFP_DMA */
Glauber Costaa618e892012-06-14 16:17:21 +040042 gfp_t allocflags;
Pekka Enberg8eae9852008-05-09 20:32:44 +020043
44 size_t colour; /* cache colouring range */
45 unsigned int colour_off; /* colour offset */
46 struct kmem_cache *slabp_cache;
47 unsigned int slab_size;
Pekka Enberg8eae9852008-05-09 20:32:44 +020048
49 /* constructor func */
50 void (*ctor)(void *obj);
51
Eric Dumazetb56efcf2011-07-20 19:04:23 +020052/* 4) cache creation/removal */
Pekka Enberg8eae9852008-05-09 20:32:44 +020053 const char *name;
Christoph Lameter3b0efdf2012-06-13 10:24:57 -050054 struct list_head list;
55 int refcount;
56 int object_size;
57 int align;
Pekka Enberg8eae9852008-05-09 20:32:44 +020058
Eric Dumazetb56efcf2011-07-20 19:04:23 +020059/* 5) statistics */
Pekka Enberg8eae9852008-05-09 20:32:44 +020060#ifdef CONFIG_DEBUG_SLAB
61 unsigned long num_active;
62 unsigned long num_allocations;
63 unsigned long high_mark;
64 unsigned long grown;
65 unsigned long reaped;
66 unsigned long errors;
67 unsigned long max_freeable;
68 unsigned long node_allocs;
69 unsigned long node_frees;
70 unsigned long node_overflow;
71 atomic_t allochit;
72 atomic_t allocmiss;
73 atomic_t freehit;
74 atomic_t freemiss;
75
76 /*
77 * If debugging is enabled, then the allocator can add additional
Christoph Lameter3b0efdf2012-06-13 10:24:57 -050078 * fields and/or padding to every object. size contains the total
Pekka Enberg8eae9852008-05-09 20:32:44 +020079 * object size including these internal fields, the following two
80 * variables contain the offset to the user object and its size.
81 */
82 int obj_offset;
Pekka Enberg8eae9852008-05-09 20:32:44 +020083#endif /* CONFIG_DEBUG_SLAB */
84
Eric Dumazetb56efcf2011-07-20 19:04:23 +020085/* 6) per-cpu/per-node data, touched during every alloc/free */
Pekka Enberg8eae9852008-05-09 20:32:44 +020086 /*
Eric Dumazetb56efcf2011-07-20 19:04:23 +020087 * We put array[] at the end of kmem_cache, because we want to size
88 * this array to nr_cpu_ids slots instead of NR_CPUS
Pekka Enberg8eae9852008-05-09 20:32:44 +020089 * (see kmem_cache_init())
Eric Dumazetb56efcf2011-07-20 19:04:23 +020090 * We still use [NR_CPUS] and not [1] or [0] because cache_cache
91 * is statically defined, so we reserve the max number of cpus.
Pekka Enberg8eae9852008-05-09 20:32:44 +020092 */
Eric Dumazetb56efcf2011-07-20 19:04:23 +020093 struct kmem_list3 **nodelists;
94 struct array_cache *array[NR_CPUS];
Pekka Enberg8eae9852008-05-09 20:32:44 +020095 /*
Eric Dumazetb56efcf2011-07-20 19:04:23 +020096 * Do not add fields after array[]
Pekka Enberg8eae9852008-05-09 20:32:44 +020097 */
98};
99
Christoph Lameter2e892f42006-12-13 00:34:23 -0800100/* Size description struct for general caches. */
101struct cache_sizes {
102 size_t cs_size;
103 struct kmem_cache *cs_cachep;
Christoph Lameter4b51d662007-02-10 01:43:10 -0800104#ifdef CONFIG_ZONE_DMA
Christoph Lameter2e892f42006-12-13 00:34:23 -0800105 struct kmem_cache *cs_dmacachep;
Christoph Lameter4b51d662007-02-10 01:43:10 -0800106#endif
Christoph Lameter2e892f42006-12-13 00:34:23 -0800107};
108extern struct cache_sizes malloc_sizes[];
109
Paul Mundt6193a2f2007-07-15 23:38:22 -0700110void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
111void *__kmalloc(size_t size, gfp_t flags);
112
Li Zefan0f24f122009-12-11 15:45:30 +0800113#ifdef CONFIG_TRACING
Ezequiel Garcia40521472012-09-08 17:47:56 -0300114extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300115#else
116static __always_inline void *
Ezequiel Garcia40521472012-09-08 17:47:56 -0300117kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
Christoph Lameter2e892f42006-12-13 00:34:23 -0800118{
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300119 return kmem_cache_alloc(cachep, flags);
120}
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300121#endif
122
123static __always_inline void *kmalloc(size_t size, gfp_t flags)
124{
125 struct kmem_cache *cachep;
126 void *ret;
127
Christoph Lameter2e892f42006-12-13 00:34:23 -0800128 if (__builtin_constant_p(size)) {
129 int i = 0;
Christoph Lameter6cb8f912007-07-17 04:03:22 -0700130
131 if (!size)
132 return ZERO_SIZE_PTR;
133
Christoph Lameter2e892f42006-12-13 00:34:23 -0800134#define CACHE(x) \
135 if (size <= x) \
136 goto found; \
137 else \
138 i++;
Joe Perches1c61fc42008-03-05 13:58:17 -0800139#include <linux/kmalloc_sizes.h>
Christoph Lameter2e892f42006-12-13 00:34:23 -0800140#undef CACHE
Jeff Mahoney1cf3eb22009-01-27 23:48:59 +0200141 return NULL;
Christoph Lameter2e892f42006-12-13 00:34:23 -0800142found:
Christoph Lameter4b51d662007-02-10 01:43:10 -0800143#ifdef CONFIG_ZONE_DMA
144 if (flags & GFP_DMA)
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300145 cachep = malloc_sizes[i].cs_dmacachep;
146 else
Christoph Lameter4b51d662007-02-10 01:43:10 -0800147#endif
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300148 cachep = malloc_sizes[i].cs_cachep;
149
Ezequiel Garcia40521472012-09-08 17:47:56 -0300150 ret = kmem_cache_alloc_trace(cachep, flags, size);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300151
152 return ret;
Christoph Lameter2e892f42006-12-13 00:34:23 -0800153 }
154 return __kmalloc(size, flags);
155}
156
Christoph Lameter2e892f42006-12-13 00:34:23 -0800157#ifdef CONFIG_NUMA
158extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
Paul Mundt6193a2f2007-07-15 23:38:22 -0700159extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
Christoph Lameter2e892f42006-12-13 00:34:23 -0800160
Li Zefan0f24f122009-12-11 15:45:30 +0800161#ifdef CONFIG_TRACING
Ezequiel Garciadffa3f92012-09-26 09:21:33 -0300162extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
Steven Rostedt85beb582010-11-24 16:23:34 -0500163 gfp_t flags,
Ezequiel Garciadffa3f92012-09-26 09:21:33 -0300164 int nodeid,
165 size_t size);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300166#else
167static __always_inline void *
Ezequiel Garciadffa3f92012-09-26 09:21:33 -0300168kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
Steven Rostedt85beb582010-11-24 16:23:34 -0500169 gfp_t flags,
Ezequiel Garciadffa3f92012-09-26 09:21:33 -0300170 int nodeid,
171 size_t size)
Christoph Lameter2e892f42006-12-13 00:34:23 -0800172{
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300173 return kmem_cache_alloc_node(cachep, flags, nodeid);
174}
175#endif
176
177static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
178{
179 struct kmem_cache *cachep;
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300180
Christoph Lameter2e892f42006-12-13 00:34:23 -0800181 if (__builtin_constant_p(size)) {
182 int i = 0;
Christoph Lameter6cb8f912007-07-17 04:03:22 -0700183
184 if (!size)
185 return ZERO_SIZE_PTR;
186
Christoph Lameter2e892f42006-12-13 00:34:23 -0800187#define CACHE(x) \
188 if (size <= x) \
189 goto found; \
190 else \
191 i++;
Joe Perches1c61fc42008-03-05 13:58:17 -0800192#include <linux/kmalloc_sizes.h>
Christoph Lameter2e892f42006-12-13 00:34:23 -0800193#undef CACHE
Jeff Mahoney1cf3eb22009-01-27 23:48:59 +0200194 return NULL;
Christoph Lameter2e892f42006-12-13 00:34:23 -0800195found:
Christoph Lameter4b51d662007-02-10 01:43:10 -0800196#ifdef CONFIG_ZONE_DMA
197 if (flags & GFP_DMA)
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300198 cachep = malloc_sizes[i].cs_dmacachep;
199 else
Christoph Lameter4b51d662007-02-10 01:43:10 -0800200#endif
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300201 cachep = malloc_sizes[i].cs_cachep;
202
Ezequiel Garciadffa3f92012-09-26 09:21:33 -0300203 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
Christoph Lameter2e892f42006-12-13 00:34:23 -0800204 }
205 return __kmalloc_node(size, flags, node);
206}
207
208#endif /* CONFIG_NUMA */
209
Christoph Lameter2e892f42006-12-13 00:34:23 -0800210#endif /* _LINUX_SLAB_DEF_H */