blob: 6207a3d8da7150abbb8c1361331b5f4945162b66 [file] [log] [blame]
Christoph Lameter81819f02007-05-06 14:49:36 -07001#ifndef _LINUX_SLUB_DEF_H
2#define _LINUX_SLUB_DEF_H
3
4/*
5 * SLUB : A Slab allocator without object queues.
6 *
7 * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com>
8 */
9#include <linux/types.h>
10#include <linux/gfp.h>
11#include <linux/workqueue.h>
12#include <linux/kobject.h>
13
14struct kmem_cache_node {
15 spinlock_t list_lock; /* Protect partial list and nr_partial */
16 unsigned long nr_partial;
17 atomic_long_t nr_slabs;
18 struct list_head partial;
Christoph Lameter643b1132007-05-06 14:49:42 -070019 struct list_head full;
Christoph Lameter81819f02007-05-06 14:49:36 -070020};
21
22/*
23 * Slab cache management.
24 */
25struct kmem_cache {
26 /* Used for retriving partial slabs etc */
27 unsigned long flags;
28 int size; /* The size of an object including meta data */
29 int objsize; /* The size of an object without meta data */
30 int offset; /* Free pointer offset. */
Christoph Lameter4b356be2007-06-16 10:16:13 -070031 int order;
Christoph Lameter81819f02007-05-06 14:49:36 -070032
33 /*
34 * Avoid an extra cache line for UP, SMP and for the node local to
35 * struct kmem_cache.
36 */
37 struct kmem_cache_node local_node;
38
39 /* Allocation and freeing of slabs */
40 int objects; /* Number of objects in slab */
41 int refcount; /* Refcount for slab cache destroy */
42 void (*ctor)(void *, struct kmem_cache *, unsigned long);
Christoph Lameter81819f02007-05-06 14:49:36 -070043 int inuse; /* Offset to metadata */
44 int align; /* Alignment */
45 const char *name; /* Name (only for display!) */
46 struct list_head list; /* List of slab caches */
47 struct kobject kobj; /* For sysfs */
48
49#ifdef CONFIG_NUMA
50 int defrag_ratio;
51 struct kmem_cache_node *node[MAX_NUMNODES];
52#endif
53 struct page *cpu_slab[NR_CPUS];
54};
55
56/*
57 * Kmalloc subsystem.
58 */
Christoph Lameter4b356be2007-06-16 10:16:13 -070059#if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8
60#define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
61#else
62#define KMALLOC_MIN_SIZE 8
63#endif
64
65#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
Christoph Lameter81819f02007-05-06 14:49:36 -070066
Christoph Lameter81819f02007-05-06 14:49:36 -070067/*
68 * We keep the general caches in an array of slab caches that are used for
69 * 2^x bytes of allocations.
70 */
71extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
72
73/*
74 * Sorry that the following has to be that ugly but some versions of GCC
75 * have trouble with constant propagation and loops.
76 */
Christoph Lameter0aa817f2007-05-16 22:11:01 -070077static inline int kmalloc_index(size_t size)
Christoph Lameter81819f02007-05-06 14:49:36 -070078{
Christoph Lameter272c1d22007-06-08 13:46:49 -070079 if (!size)
80 return 0;
Christoph Lameter614410d2007-05-06 14:49:38 -070081
Christoph Lameter0aa817f2007-05-16 22:11:01 -070082 if (size > KMALLOC_MAX_SIZE)
Christoph Lametercfbf07f2007-05-15 01:42:06 -070083 return -1;
84
Christoph Lameter4b356be2007-06-16 10:16:13 -070085 if (size <= KMALLOC_MIN_SIZE)
86 return KMALLOC_SHIFT_LOW;
87
Christoph Lameter81819f02007-05-06 14:49:36 -070088 if (size > 64 && size <= 96)
89 return 1;
90 if (size > 128 && size <= 192)
91 return 2;
92 if (size <= 8) return 3;
93 if (size <= 16) return 4;
94 if (size <= 32) return 5;
95 if (size <= 64) return 6;
96 if (size <= 128) return 7;
97 if (size <= 256) return 8;
98 if (size <= 512) return 9;
99 if (size <= 1024) return 10;
100 if (size <= 2 * 1024) return 11;
101 if (size <= 4 * 1024) return 12;
102 if (size <= 8 * 1024) return 13;
103 if (size <= 16 * 1024) return 14;
104 if (size <= 32 * 1024) return 15;
105 if (size <= 64 * 1024) return 16;
106 if (size <= 128 * 1024) return 17;
107 if (size <= 256 * 1024) return 18;
Christoph Lameter81819f02007-05-06 14:49:36 -0700108 if (size <= 512 * 1024) return 19;
109 if (size <= 1024 * 1024) return 20;
Christoph Lameter81819f02007-05-06 14:49:36 -0700110 if (size <= 2 * 1024 * 1024) return 21;
111 if (size <= 4 * 1024 * 1024) return 22;
112 if (size <= 8 * 1024 * 1024) return 23;
113 if (size <= 16 * 1024 * 1024) return 24;
114 if (size <= 32 * 1024 * 1024) return 25;
Christoph Lameter81819f02007-05-06 14:49:36 -0700115 return -1;
116
117/*
118 * What we really wanted to do and cannot do because of compiler issues is:
119 * int i;
120 * for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
121 * if (size <= (1 << i))
122 * return i;
123 */
124}
125
126/*
127 * Find the slab cache for a given combination of allocation flags and size.
128 *
129 * This ought to end up with a global pointer to the right cache
130 * in kmalloc_caches.
131 */
132static inline struct kmem_cache *kmalloc_slab(size_t size)
133{
134 int index = kmalloc_index(size);
135
136 if (index == 0)
137 return NULL;
138
Andrew Mortonade3aff2007-05-16 22:10:54 -0700139 /*
140 * This function only gets expanded if __builtin_constant_p(size), so
141 * testing it here shouldn't be needed. But some versions of gcc need
142 * help.
143 */
144 if (__builtin_constant_p(size) && index < 0) {
Christoph Lameter81819f02007-05-06 14:49:36 -0700145 /*
146 * Generate a link failure. Would be great if we could
147 * do something to stop the compile here.
148 */
149 extern void __kmalloc_size_too_large(void);
150 __kmalloc_size_too_large();
151 }
152 return &kmalloc_caches[index];
153}
154
155#ifdef CONFIG_ZONE_DMA
156#define SLUB_DMA __GFP_DMA
157#else
158/* Disable DMA functionality */
159#define SLUB_DMA 0
160#endif
161
Christoph Lameter272c1d22007-06-08 13:46:49 -0700162
163/*
164 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
165 *
166 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
167 *
168 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
169 * Both make kfree a no-op.
170 */
171#define ZERO_SIZE_PTR ((void *)16)
172
173
Christoph Lameter81819f02007-05-06 14:49:36 -0700174static inline void *kmalloc(size_t size, gfp_t flags)
175{
176 if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
177 struct kmem_cache *s = kmalloc_slab(size);
178
179 if (!s)
Christoph Lameter272c1d22007-06-08 13:46:49 -0700180 return ZERO_SIZE_PTR;
Christoph Lameter81819f02007-05-06 14:49:36 -0700181
182 return kmem_cache_alloc(s, flags);
183 } else
184 return __kmalloc(size, flags);
185}
186
187static inline void *kzalloc(size_t size, gfp_t flags)
188{
189 if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
190 struct kmem_cache *s = kmalloc_slab(size);
191
192 if (!s)
Christoph Lameter272c1d22007-06-08 13:46:49 -0700193 return ZERO_SIZE_PTR;
Christoph Lameter81819f02007-05-06 14:49:36 -0700194
195 return kmem_cache_zalloc(s, flags);
196 } else
197 return __kzalloc(size, flags);
198}
199
200#ifdef CONFIG_NUMA
201extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
202
203static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
204{
205 if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
206 struct kmem_cache *s = kmalloc_slab(size);
207
208 if (!s)
Christoph Lameter272c1d22007-06-08 13:46:49 -0700209 return ZERO_SIZE_PTR;
Christoph Lameter81819f02007-05-06 14:49:36 -0700210
211 return kmem_cache_alloc_node(s, flags, node);
212 } else
213 return __kmalloc_node(size, flags, node);
214}
215#endif
216
217#endif /* _LINUX_SLUB_DEF_H */