blob: 791a502f69063a2255c3e41d6c47e865e0c1f610 [file] [log] [blame]
Christoph Lameter2e892f42006-12-13 00:34:23 -08001#ifndef _LINUX_SLAB_DEF_H
2#define _LINUX_SLAB_DEF_H
3
4/*
5 * Definitions unique to the original Linux SLAB allocator.
6 *
7 * What we provide here is a way to optimize the frequent kmalloc
8 * calls in the kernel by selecting the appropriate general cache
9 * if kmalloc was called with a size that can be established at
10 * compile time.
11 */
12
13#include <linux/init.h>
14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16#include <linux/compiler.h>
Li Zefan039ca4e2010-05-26 17:22:17 +080017
18#include <trace/events/kmem.h>
Christoph Lameter2e892f42006-12-13 00:34:23 -080019
David Woodhouse1f0ce8b32010-05-19 12:01:42 +010020/*
21 * Enforce a minimum alignment for the kmalloc caches.
22 * Usually, the kmalloc caches are cache_line_size() aligned, except when
23 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
24 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
25 * alignment larger than the alignment of a 64-bit integer.
26 * ARCH_KMALLOC_MINALIGN allows that.
27 * Note that increasing this value may disable some debug features.
28 */
FUJITA Tomonoria6eb9fe2010-08-10 18:03:22 -070029#ifdef ARCH_DMA_MINALIGN
30#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
31#else
David Woodhouse1f0ce8b32010-05-19 12:01:42 +010032#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
33#endif
34
35#ifndef ARCH_SLAB_MINALIGN
36/*
37 * Enforce a minimum alignment for all caches.
38 * Intended for archs that get misalignment faults even for BYTES_PER_WORD
39 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
40 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
41 * some debug features.
42 */
43#define ARCH_SLAB_MINALIGN 0
44#endif
45
Pekka Enberg8eae9852008-05-09 20:32:44 +020046/*
47 * struct kmem_cache
48 *
49 * manages a cache.
50 */
51
52struct kmem_cache {
53/* 1) per-cpu data, touched during every alloc/free */
54 struct array_cache *array[NR_CPUS];
55/* 2) Cache tunables. Protected by cache_chain_mutex */
56 unsigned int batchcount;
57 unsigned int limit;
58 unsigned int shared;
59
60 unsigned int buffer_size;
61 u32 reciprocal_buffer_size;
62/* 3) touched by every alloc & free from the backend */
63
64 unsigned int flags; /* constant flags */
65 unsigned int num; /* # of objs per slab */
66
67/* 4) cache_grow/shrink */
68 /* order of pgs per slab (2^n) */
69 unsigned int gfporder;
70
71 /* force GFP flags, e.g. GFP_DMA */
72 gfp_t gfpflags;
73
74 size_t colour; /* cache colouring range */
75 unsigned int colour_off; /* colour offset */
76 struct kmem_cache *slabp_cache;
77 unsigned int slab_size;
78 unsigned int dflags; /* dynamic flags */
79
80 /* constructor func */
81 void (*ctor)(void *obj);
82
83/* 5) cache creation/removal */
84 const char *name;
85 struct list_head next;
86
87/* 6) statistics */
88#ifdef CONFIG_DEBUG_SLAB
89 unsigned long num_active;
90 unsigned long num_allocations;
91 unsigned long high_mark;
92 unsigned long grown;
93 unsigned long reaped;
94 unsigned long errors;
95 unsigned long max_freeable;
96 unsigned long node_allocs;
97 unsigned long node_frees;
98 unsigned long node_overflow;
99 atomic_t allochit;
100 atomic_t allocmiss;
101 atomic_t freehit;
102 atomic_t freemiss;
103
104 /*
105 * If debugging is enabled, then the allocator can add additional
106 * fields and/or padding to every object. buffer_size contains the total
107 * object size including these internal fields, the following two
108 * variables contain the offset to the user object and its size.
109 */
110 int obj_offset;
111 int obj_size;
112#endif /* CONFIG_DEBUG_SLAB */
113
114 /*
115 * We put nodelists[] at the end of kmem_cache, because we want to size
116 * this array to nr_node_ids slots instead of MAX_NUMNODES
117 * (see kmem_cache_init())
118 * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
119 * is statically defined, so we reserve the max number of nodes.
120 */
121 struct kmem_list3 *nodelists[MAX_NUMNODES];
122 /*
123 * Do not add fields after nodelists[]
124 */
125};
126
Christoph Lameter2e892f42006-12-13 00:34:23 -0800127/* Size description struct for general caches. */
128struct cache_sizes {
129 size_t cs_size;
130 struct kmem_cache *cs_cachep;
Christoph Lameter4b51d662007-02-10 01:43:10 -0800131#ifdef CONFIG_ZONE_DMA
Christoph Lameter2e892f42006-12-13 00:34:23 -0800132 struct kmem_cache *cs_dmacachep;
Christoph Lameter4b51d662007-02-10 01:43:10 -0800133#endif
Christoph Lameter2e892f42006-12-13 00:34:23 -0800134};
135extern struct cache_sizes malloc_sizes[];
136
Paul Mundt6193a2f2007-07-15 23:38:22 -0700137void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
138void *__kmalloc(size_t size, gfp_t flags);
139
Li Zefan0f24f122009-12-11 15:45:30 +0800140#ifdef CONFIG_TRACING
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300141extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
142extern size_t slab_buffer_size(struct kmem_cache *cachep);
143#else
144static __always_inline void *
145kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
Christoph Lameter2e892f42006-12-13 00:34:23 -0800146{
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300147 return kmem_cache_alloc(cachep, flags);
148}
149static inline size_t slab_buffer_size(struct kmem_cache *cachep)
150{
151 return 0;
152}
153#endif
154
155static __always_inline void *kmalloc(size_t size, gfp_t flags)
156{
157 struct kmem_cache *cachep;
158 void *ret;
159
Christoph Lameter2e892f42006-12-13 00:34:23 -0800160 if (__builtin_constant_p(size)) {
161 int i = 0;
Christoph Lameter6cb8f912007-07-17 04:03:22 -0700162
163 if (!size)
164 return ZERO_SIZE_PTR;
165
Christoph Lameter2e892f42006-12-13 00:34:23 -0800166#define CACHE(x) \
167 if (size <= x) \
168 goto found; \
169 else \
170 i++;
Joe Perches1c61fc42008-03-05 13:58:17 -0800171#include <linux/kmalloc_sizes.h>
Christoph Lameter2e892f42006-12-13 00:34:23 -0800172#undef CACHE
Jeff Mahoney1cf3eb22009-01-27 23:48:59 +0200173 return NULL;
Christoph Lameter2e892f42006-12-13 00:34:23 -0800174found:
Christoph Lameter4b51d662007-02-10 01:43:10 -0800175#ifdef CONFIG_ZONE_DMA
176 if (flags & GFP_DMA)
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300177 cachep = malloc_sizes[i].cs_dmacachep;
178 else
Christoph Lameter4b51d662007-02-10 01:43:10 -0800179#endif
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300180 cachep = malloc_sizes[i].cs_cachep;
181
182 ret = kmem_cache_alloc_notrace(cachep, flags);
183
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +0200184 trace_kmalloc(_THIS_IP_, ret,
185 size, slab_buffer_size(cachep), flags);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300186
187 return ret;
Christoph Lameter2e892f42006-12-13 00:34:23 -0800188 }
189 return __kmalloc(size, flags);
190}
191
Christoph Lameter2e892f42006-12-13 00:34:23 -0800192#ifdef CONFIG_NUMA
193extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
Paul Mundt6193a2f2007-07-15 23:38:22 -0700194extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
Christoph Lameter2e892f42006-12-13 00:34:23 -0800195
Li Zefan0f24f122009-12-11 15:45:30 +0800196#ifdef CONFIG_TRACING
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300197extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
198 gfp_t flags,
199 int nodeid);
200#else
201static __always_inline void *
202kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
203 gfp_t flags,
204 int nodeid)
Christoph Lameter2e892f42006-12-13 00:34:23 -0800205{
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300206 return kmem_cache_alloc_node(cachep, flags, nodeid);
207}
208#endif
209
210static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
211{
212 struct kmem_cache *cachep;
213 void *ret;
214
Christoph Lameter2e892f42006-12-13 00:34:23 -0800215 if (__builtin_constant_p(size)) {
216 int i = 0;
Christoph Lameter6cb8f912007-07-17 04:03:22 -0700217
218 if (!size)
219 return ZERO_SIZE_PTR;
220
Christoph Lameter2e892f42006-12-13 00:34:23 -0800221#define CACHE(x) \
222 if (size <= x) \
223 goto found; \
224 else \
225 i++;
Joe Perches1c61fc42008-03-05 13:58:17 -0800226#include <linux/kmalloc_sizes.h>
Christoph Lameter2e892f42006-12-13 00:34:23 -0800227#undef CACHE
Jeff Mahoney1cf3eb22009-01-27 23:48:59 +0200228 return NULL;
Christoph Lameter2e892f42006-12-13 00:34:23 -0800229found:
Christoph Lameter4b51d662007-02-10 01:43:10 -0800230#ifdef CONFIG_ZONE_DMA
231 if (flags & GFP_DMA)
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300232 cachep = malloc_sizes[i].cs_dmacachep;
233 else
Christoph Lameter4b51d662007-02-10 01:43:10 -0800234#endif
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300235 cachep = malloc_sizes[i].cs_cachep;
236
237 ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
238
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +0200239 trace_kmalloc_node(_THIS_IP_, ret,
240 size, slab_buffer_size(cachep),
241 flags, node);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300242
243 return ret;
Christoph Lameter2e892f42006-12-13 00:34:23 -0800244 }
245 return __kmalloc_node(size, flags, node);
246}
247
248#endif /* CONFIG_NUMA */
249
Christoph Lameter2e892f42006-12-13 00:34:23 -0800250#endif /* _LINUX_SLAB_DEF_H */