Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_SLUB_DEF_H |
| 2 | #define _LINUX_SLUB_DEF_H |
| 3 | |
| 4 | /* |
| 5 | * SLUB : A Slab allocator without object queues. |
| 6 | * |
| 7 | * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com> |
| 8 | */ |
| 9 | #include <linux/types.h> |
| 10 | #include <linux/gfp.h> |
| 11 | #include <linux/workqueue.h> |
| 12 | #include <linux/kobject.h> |
| 13 | |
| 14 | struct kmem_cache_node { |
| 15 | spinlock_t list_lock; /* Protect partial list and nr_partial */ |
| 16 | unsigned long nr_partial; |
| 17 | atomic_long_t nr_slabs; |
| 18 | struct list_head partial; |
Christoph Lameter | 0c71001 | 2007-07-17 04:03:24 -0700 | [diff] [blame] | 19 | #ifdef CONFIG_SLUB_DEBUG |
Christoph Lameter | 643b113 | 2007-05-06 14:49:42 -0700 | [diff] [blame] | 20 | struct list_head full; |
Christoph Lameter | 0c71001 | 2007-07-17 04:03:24 -0700 | [diff] [blame] | 21 | #endif |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 22 | }; |
| 23 | |
| 24 | /* |
| 25 | * Slab cache management. |
| 26 | */ |
| 27 | struct kmem_cache { |
| 28 | /* Used for retriving partial slabs etc */ |
| 29 | unsigned long flags; |
| 30 | int size; /* The size of an object including meta data */ |
| 31 | int objsize; /* The size of an object without meta data */ |
| 32 | int offset; /* Free pointer offset. */ |
Christoph Lameter | 4b356be | 2007-06-16 10:16:13 -0700 | [diff] [blame] | 33 | int order; |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 34 | |
| 35 | /* |
| 36 | * Avoid an extra cache line for UP, SMP and for the node local to |
| 37 | * struct kmem_cache. |
| 38 | */ |
| 39 | struct kmem_cache_node local_node; |
| 40 | |
| 41 | /* Allocation and freeing of slabs */ |
| 42 | int objects; /* Number of objects in slab */ |
| 43 | int refcount; /* Refcount for slab cache destroy */ |
| 44 | void (*ctor)(void *, struct kmem_cache *, unsigned long); |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 45 | int inuse; /* Offset to metadata */ |
| 46 | int align; /* Alignment */ |
| 47 | const char *name; /* Name (only for display!) */ |
| 48 | struct list_head list; /* List of slab caches */ |
Christoph Lameter | 0c71001 | 2007-07-17 04:03:24 -0700 | [diff] [blame] | 49 | #ifdef CONFIG_SLUB_DEBUG |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 50 | struct kobject kobj; /* For sysfs */ |
Christoph Lameter | 0c71001 | 2007-07-17 04:03:24 -0700 | [diff] [blame] | 51 | #endif |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 52 | |
| 53 | #ifdef CONFIG_NUMA |
| 54 | int defrag_ratio; |
| 55 | struct kmem_cache_node *node[MAX_NUMNODES]; |
| 56 | #endif |
| 57 | struct page *cpu_slab[NR_CPUS]; |
| 58 | }; |
| 59 | |
| 60 | /* |
| 61 | * Kmalloc subsystem. |
| 62 | */ |
Christoph Lameter | 4b356be | 2007-06-16 10:16:13 -0700 | [diff] [blame] | 63 | #if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8 |
| 64 | #define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN |
| 65 | #else |
| 66 | #define KMALLOC_MIN_SIZE 8 |
| 67 | #endif |
| 68 | |
| 69 | #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 70 | |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 71 | /* |
| 72 | * We keep the general caches in an array of slab caches that are used for |
| 73 | * 2^x bytes of allocations. |
| 74 | */ |
| 75 | extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; |
| 76 | |
| 77 | /* |
| 78 | * Sorry that the following has to be that ugly but some versions of GCC |
| 79 | * have trouble with constant propagation and loops. |
| 80 | */ |
Christoph Lameter | 0aa817f | 2007-05-16 22:11:01 -0700 | [diff] [blame] | 81 | static inline int kmalloc_index(size_t size) |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 82 | { |
Christoph Lameter | 272c1d2 | 2007-06-08 13:46:49 -0700 | [diff] [blame] | 83 | if (!size) |
| 84 | return 0; |
Christoph Lameter | 614410d | 2007-05-06 14:49:38 -0700 | [diff] [blame] | 85 | |
Christoph Lameter | 0aa817f | 2007-05-16 22:11:01 -0700 | [diff] [blame] | 86 | if (size > KMALLOC_MAX_SIZE) |
Christoph Lameter | cfbf07f | 2007-05-15 01:42:06 -0700 | [diff] [blame] | 87 | return -1; |
| 88 | |
Christoph Lameter | 4b356be | 2007-06-16 10:16:13 -0700 | [diff] [blame] | 89 | if (size <= KMALLOC_MIN_SIZE) |
| 90 | return KMALLOC_SHIFT_LOW; |
| 91 | |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 92 | if (size > 64 && size <= 96) |
| 93 | return 1; |
| 94 | if (size > 128 && size <= 192) |
| 95 | return 2; |
| 96 | if (size <= 8) return 3; |
| 97 | if (size <= 16) return 4; |
| 98 | if (size <= 32) return 5; |
| 99 | if (size <= 64) return 6; |
| 100 | if (size <= 128) return 7; |
| 101 | if (size <= 256) return 8; |
| 102 | if (size <= 512) return 9; |
| 103 | if (size <= 1024) return 10; |
| 104 | if (size <= 2 * 1024) return 11; |
| 105 | if (size <= 4 * 1024) return 12; |
| 106 | if (size <= 8 * 1024) return 13; |
| 107 | if (size <= 16 * 1024) return 14; |
| 108 | if (size <= 32 * 1024) return 15; |
| 109 | if (size <= 64 * 1024) return 16; |
| 110 | if (size <= 128 * 1024) return 17; |
| 111 | if (size <= 256 * 1024) return 18; |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 112 | if (size <= 512 * 1024) return 19; |
| 113 | if (size <= 1024 * 1024) return 20; |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 114 | if (size <= 2 * 1024 * 1024) return 21; |
| 115 | if (size <= 4 * 1024 * 1024) return 22; |
| 116 | if (size <= 8 * 1024 * 1024) return 23; |
| 117 | if (size <= 16 * 1024 * 1024) return 24; |
| 118 | if (size <= 32 * 1024 * 1024) return 25; |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 119 | return -1; |
| 120 | |
| 121 | /* |
| 122 | * What we really wanted to do and cannot do because of compiler issues is: |
| 123 | * int i; |
| 124 | * for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) |
| 125 | * if (size <= (1 << i)) |
| 126 | * return i; |
| 127 | */ |
| 128 | } |
| 129 | |
| 130 | /* |
| 131 | * Find the slab cache for a given combination of allocation flags and size. |
| 132 | * |
| 133 | * This ought to end up with a global pointer to the right cache |
| 134 | * in kmalloc_caches. |
| 135 | */ |
| 136 | static inline struct kmem_cache *kmalloc_slab(size_t size) |
| 137 | { |
| 138 | int index = kmalloc_index(size); |
| 139 | |
| 140 | if (index == 0) |
| 141 | return NULL; |
| 142 | |
Andrew Morton | ade3aff | 2007-05-16 22:10:54 -0700 | [diff] [blame] | 143 | /* |
| 144 | * This function only gets expanded if __builtin_constant_p(size), so |
| 145 | * testing it here shouldn't be needed. But some versions of gcc need |
| 146 | * help. |
| 147 | */ |
| 148 | if (__builtin_constant_p(size) && index < 0) { |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 149 | /* |
| 150 | * Generate a link failure. Would be great if we could |
| 151 | * do something to stop the compile here. |
| 152 | */ |
| 153 | extern void __kmalloc_size_too_large(void); |
| 154 | __kmalloc_size_too_large(); |
| 155 | } |
| 156 | return &kmalloc_caches[index]; |
| 157 | } |
| 158 | |
| 159 | #ifdef CONFIG_ZONE_DMA |
| 160 | #define SLUB_DMA __GFP_DMA |
| 161 | #else |
| 162 | /* Disable DMA functionality */ |
Al Viro | d046943 | 2007-07-20 16:18:06 +0100 | [diff] [blame] | 163 | #define SLUB_DMA (__force gfp_t)0 |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 164 | #endif |
| 165 | |
Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 166 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
| 167 | void *__kmalloc(size_t size, gfp_t flags); |
| 168 | |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 169 | static inline void *kmalloc(size_t size, gfp_t flags) |
| 170 | { |
| 171 | if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { |
| 172 | struct kmem_cache *s = kmalloc_slab(size); |
| 173 | |
| 174 | if (!s) |
Christoph Lameter | 272c1d2 | 2007-06-08 13:46:49 -0700 | [diff] [blame] | 175 | return ZERO_SIZE_PTR; |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 176 | |
| 177 | return kmem_cache_alloc(s, flags); |
| 178 | } else |
| 179 | return __kmalloc(size, flags); |
| 180 | } |
| 181 | |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 182 | #ifdef CONFIG_NUMA |
Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 183 | void *__kmalloc_node(size_t size, gfp_t flags, int node); |
| 184 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 185 | |
| 186 | static inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
| 187 | { |
| 188 | if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { |
| 189 | struct kmem_cache *s = kmalloc_slab(size); |
| 190 | |
| 191 | if (!s) |
Christoph Lameter | 272c1d2 | 2007-06-08 13:46:49 -0700 | [diff] [blame] | 192 | return ZERO_SIZE_PTR; |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 193 | |
| 194 | return kmem_cache_alloc_node(s, flags, node); |
| 195 | } else |
| 196 | return __kmalloc_node(size, flags, node); |
| 197 | } |
| 198 | #endif |
| 199 | |
| 200 | #endif /* _LINUX_SLUB_DEF_H */ |