Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_SLUB_DEF_H |
| 2 | #define _LINUX_SLUB_DEF_H |
| 3 | |
| 4 | /* |
| 5 | * SLUB : A Slab allocator without object queues. |
| 6 | * |
Christoph Lameter | cde5353 | 2008-07-04 09:59:22 -0700 | [diff] [blame] | 7 | * (C) 2007 SGI, Christoph Lameter |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 8 | */ |
| 9 | #include <linux/types.h> |
| 10 | #include <linux/gfp.h> |
| 11 | #include <linux/workqueue.h> |
| 12 | #include <linux/kobject.h> |
Zhaolei | 02af61b | 2009-04-10 14:26:18 +0800 | [diff] [blame] | 13 | #include <linux/kmemtrace.h> |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 14 | |
Christoph Lameter | 8ff12cf | 2008-02-07 17:47:41 -0800 | [diff] [blame] | 15 | enum stat_item { |
| 16 | ALLOC_FASTPATH, /* Allocation from cpu slab */ |
| 17 | ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ |
| 18 | FREE_FASTPATH, /* Free to cpu slub */ |
| 19 | FREE_SLOWPATH, /* Freeing not to cpu slab */ |
| 20 | FREE_FROZEN, /* Freeing to frozen slab */ |
| 21 | FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ |
| 22 | FREE_REMOVE_PARTIAL, /* Freeing removes last object */ |
| 23 | ALLOC_FROM_PARTIAL, /* Cpu slab acquired from partial list */ |
| 24 | ALLOC_SLAB, /* Cpu slab acquired from page allocator */ |
| 25 | ALLOC_REFILL, /* Refill cpu slab from slab freelist */ |
| 26 | FREE_SLAB, /* Slab freed to the page allocator */ |
| 27 | CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ |
| 28 | DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ |
| 29 | DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */ |
| 30 | DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ |
| 31 | DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ |
| 32 | DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ |
Christoph Lameter | 65c3376 | 2008-04-14 19:11:40 +0300 | [diff] [blame] | 33 | ORDER_FALLBACK, /* Number of times fallback was necessary */ |
Christoph Lameter | 8ff12cf | 2008-02-07 17:47:41 -0800 | [diff] [blame] | 34 | NR_SLUB_STAT_ITEMS }; |
| 35 | |
Christoph Lameter | dfb4f09 | 2007-10-16 01:26:05 -0700 | [diff] [blame] | 36 | struct kmem_cache_cpu { |
Christoph Lameter | da89b79 | 2008-01-07 23:20:31 -0800 | [diff] [blame] | 37 | void **freelist; /* Pointer to first free per cpu object */ |
| 38 | struct page *page; /* The slab from which we are allocating */ |
| 39 | int node; /* The node of the page (or -1 for debug) */ |
| 40 | unsigned int offset; /* Freepointer offset (in word units) */ |
| 41 | unsigned int objsize; /* Size of an object (from kmem_cache) */ |
Christoph Lameter | 8ff12cf | 2008-02-07 17:47:41 -0800 | [diff] [blame] | 42 | #ifdef CONFIG_SLUB_STATS |
| 43 | unsigned stat[NR_SLUB_STAT_ITEMS]; |
| 44 | #endif |
Christoph Lameter | 4c93c355 | 2007-10-16 01:26:08 -0700 | [diff] [blame] | 45 | }; |
Christoph Lameter | dfb4f09 | 2007-10-16 01:26:05 -0700 | [diff] [blame] | 46 | |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 47 | struct kmem_cache_node { |
| 48 | spinlock_t list_lock; /* Protect partial list and nr_partial */ |
| 49 | unsigned long nr_partial; |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 50 | struct list_head partial; |
Christoph Lameter | 0c71001 | 2007-07-17 04:03:24 -0700 | [diff] [blame] | 51 | #ifdef CONFIG_SLUB_DEBUG |
Christoph Lameter | 0f389ec | 2008-04-14 18:53:02 +0300 | [diff] [blame] | 52 | atomic_long_t nr_slabs; |
Christoph Lameter | 205ab99 | 2008-04-14 19:11:40 +0300 | [diff] [blame] | 53 | atomic_long_t total_objects; |
Christoph Lameter | 643b113 | 2007-05-06 14:49:42 -0700 | [diff] [blame] | 54 | struct list_head full; |
Christoph Lameter | 0c71001 | 2007-07-17 04:03:24 -0700 | [diff] [blame] | 55 | #endif |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 56 | }; |
| 57 | |
| 58 | /* |
Christoph Lameter | 834f3d1 | 2008-04-14 19:11:31 +0300 | [diff] [blame] | 59 | * Word size structure that can be atomically updated or read and that |
| 60 | * contains both the order and the number of objects that a slab of the |
| 61 | * given order would contain. |
| 62 | */ |
| 63 | struct kmem_cache_order_objects { |
| 64 | unsigned long x; |
| 65 | }; |
| 66 | |
| 67 | /* |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 68 | * Slab cache management. |
| 69 | */ |
| 70 | struct kmem_cache { |
| 71 | /* Used for retriving partial slabs etc */ |
| 72 | unsigned long flags; |
| 73 | int size; /* The size of an object including meta data */ |
| 74 | int objsize; /* The size of an object without meta data */ |
| 75 | int offset; /* Free pointer offset. */ |
Christoph Lameter | 834f3d1 | 2008-04-14 19:11:31 +0300 | [diff] [blame] | 76 | struct kmem_cache_order_objects oo; |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 77 | |
| 78 | /* |
| 79 | * Avoid an extra cache line for UP, SMP and for the node local to |
| 80 | * struct kmem_cache. |
| 81 | */ |
| 82 | struct kmem_cache_node local_node; |
| 83 | |
| 84 | /* Allocation and freeing of slabs */ |
Christoph Lameter | 205ab99 | 2008-04-14 19:11:40 +0300 | [diff] [blame] | 85 | struct kmem_cache_order_objects max; |
Christoph Lameter | 65c3376 | 2008-04-14 19:11:40 +0300 | [diff] [blame] | 86 | struct kmem_cache_order_objects min; |
Christoph Lameter | b7a49f0 | 2008-02-14 14:21:32 -0800 | [diff] [blame] | 87 | gfp_t allocflags; /* gfp flags to use on each alloc */ |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 88 | int refcount; /* Refcount for slab cache destroy */ |
Alexey Dobriyan | 51cc506 | 2008-07-25 19:45:34 -0700 | [diff] [blame] | 89 | void (*ctor)(void *); |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 90 | int inuse; /* Offset to metadata */ |
| 91 | int align; /* Alignment */ |
David Rientjes | 3b89d7d | 2009-02-22 17:40:07 -0800 | [diff] [blame] | 92 | unsigned long min_partial; |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 93 | const char *name; /* Name (only for display!) */ |
| 94 | struct list_head list; /* List of slab caches */ |
Christoph Lameter | 0c71001 | 2007-07-17 04:03:24 -0700 | [diff] [blame] | 95 | #ifdef CONFIG_SLUB_DEBUG |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 96 | struct kobject kobj; /* For sysfs */ |
Christoph Lameter | 0c71001 | 2007-07-17 04:03:24 -0700 | [diff] [blame] | 97 | #endif |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 98 | |
| 99 | #ifdef CONFIG_NUMA |
Christoph Lameter | 9824601 | 2008-01-07 23:20:26 -0800 | [diff] [blame] | 100 | /* |
| 101 | * Defragmentation by allocating from a remote node. |
| 102 | */ |
| 103 | int remote_node_defrag_ratio; |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 104 | struct kmem_cache_node *node[MAX_NUMNODES]; |
| 105 | #endif |
Christoph Lameter | 4c93c355 | 2007-10-16 01:26:08 -0700 | [diff] [blame] | 106 | #ifdef CONFIG_SMP |
| 107 | struct kmem_cache_cpu *cpu_slab[NR_CPUS]; |
| 108 | #else |
| 109 | struct kmem_cache_cpu cpu_slab; |
| 110 | #endif |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 111 | }; |
| 112 | |
| 113 | /* |
| 114 | * Kmalloc subsystem. |
| 115 | */ |
Christoph Lameter | 4b356be | 2007-06-16 10:16:13 -0700 | [diff] [blame] | 116 | #if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8 |
| 117 | #define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN |
| 118 | #else |
| 119 | #define KMALLOC_MIN_SIZE 8 |
| 120 | #endif |
| 121 | |
| 122 | #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 123 | |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 124 | /* |
Christoph Lameter | ffadd4d | 2009-02-17 12:05:07 -0500 | [diff] [blame] | 125 | * Maximum kmalloc object size handled by SLUB. Larger object allocations |
| 126 | * are passed through to the page allocator. The page allocator "fastpath" |
| 127 | * is relatively slow so we need this value sufficiently high so that |
| 128 | * performance critical objects are allocated through the SLUB fastpath. |
| 129 | * |
| 130 | * This should be dropped to PAGE_SIZE / 2 once the page allocator |
| 131 | * "fastpath" becomes competitive with the slab allocator fastpaths. |
| 132 | */ |
Pekka Enberg | 51735a7 | 2009-02-20 12:21:33 +0200 | [diff] [blame] | 133 | #define SLUB_MAX_SIZE (2 * PAGE_SIZE) |
Christoph Lameter | ffadd4d | 2009-02-17 12:05:07 -0500 | [diff] [blame] | 134 | |
Pekka Enberg | 51735a7 | 2009-02-20 12:21:33 +0200 | [diff] [blame] | 135 | #define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2) |
Christoph Lameter | ffadd4d | 2009-02-17 12:05:07 -0500 | [diff] [blame] | 136 | |
| 137 | /* |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 138 | * We keep the general caches in an array of slab caches that are used for |
| 139 | * 2^x bytes of allocations. |
| 140 | */ |
Christoph Lameter | ffadd4d | 2009-02-17 12:05:07 -0500 | [diff] [blame] | 141 | extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT]; |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 142 | |
| 143 | /* |
| 144 | * Sorry that the following has to be that ugly but some versions of GCC |
| 145 | * have trouble with constant propagation and loops. |
| 146 | */ |
Christoph Lameter | aa137f9 | 2007-08-31 00:48:45 -0700 | [diff] [blame] | 147 | static __always_inline int kmalloc_index(size_t size) |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 148 | { |
Christoph Lameter | 272c1d2 | 2007-06-08 13:46:49 -0700 | [diff] [blame] | 149 | if (!size) |
| 150 | return 0; |
Christoph Lameter | 614410d | 2007-05-06 14:49:38 -0700 | [diff] [blame] | 151 | |
Christoph Lameter | 4b356be | 2007-06-16 10:16:13 -0700 | [diff] [blame] | 152 | if (size <= KMALLOC_MIN_SIZE) |
| 153 | return KMALLOC_SHIFT_LOW; |
| 154 | |
Aaro Koskinen | acdfcd0 | 2009-08-28 14:28:54 +0300 | [diff] [blame^] | 155 | if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 156 | return 1; |
Aaro Koskinen | acdfcd0 | 2009-08-28 14:28:54 +0300 | [diff] [blame^] | 157 | if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 158 | return 2; |
| 159 | if (size <= 8) return 3; |
| 160 | if (size <= 16) return 4; |
| 161 | if (size <= 32) return 5; |
| 162 | if (size <= 64) return 6; |
| 163 | if (size <= 128) return 7; |
| 164 | if (size <= 256) return 8; |
| 165 | if (size <= 512) return 9; |
| 166 | if (size <= 1024) return 10; |
| 167 | if (size <= 2 * 1024) return 11; |
Christoph Lameter | 6446faa | 2008-02-15 23:45:26 -0800 | [diff] [blame] | 168 | if (size <= 4 * 1024) return 12; |
Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 169 | /* |
| 170 | * The following is only needed to support architectures with a larger page |
| 171 | * size than 4k. |
| 172 | */ |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 173 | if (size <= 8 * 1024) return 13; |
| 174 | if (size <= 16 * 1024) return 14; |
| 175 | if (size <= 32 * 1024) return 15; |
| 176 | if (size <= 64 * 1024) return 16; |
| 177 | if (size <= 128 * 1024) return 17; |
| 178 | if (size <= 256 * 1024) return 18; |
Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 179 | if (size <= 512 * 1024) return 19; |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 180 | if (size <= 1024 * 1024) return 20; |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 181 | if (size <= 2 * 1024 * 1024) return 21; |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 182 | return -1; |
| 183 | |
| 184 | /* |
| 185 | * What we really wanted to do and cannot do because of compiler issues is: |
| 186 | * int i; |
| 187 | * for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) |
| 188 | * if (size <= (1 << i)) |
| 189 | * return i; |
| 190 | */ |
| 191 | } |
| 192 | |
| 193 | /* |
| 194 | * Find the slab cache for a given combination of allocation flags and size. |
| 195 | * |
| 196 | * This ought to end up with a global pointer to the right cache |
| 197 | * in kmalloc_caches. |
| 198 | */ |
Christoph Lameter | aa137f9 | 2007-08-31 00:48:45 -0700 | [diff] [blame] | 199 | static __always_inline struct kmem_cache *kmalloc_slab(size_t size) |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 200 | { |
| 201 | int index = kmalloc_index(size); |
| 202 | |
| 203 | if (index == 0) |
| 204 | return NULL; |
| 205 | |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 206 | return &kmalloc_caches[index]; |
| 207 | } |
| 208 | |
| 209 | #ifdef CONFIG_ZONE_DMA |
| 210 | #define SLUB_DMA __GFP_DMA |
| 211 | #else |
| 212 | /* Disable DMA functionality */ |
Al Viro | d046943 | 2007-07-20 16:18:06 +0100 | [diff] [blame] | 213 | #define SLUB_DMA (__force gfp_t)0 |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 214 | #endif |
| 215 | |
Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 216 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
| 217 | void *__kmalloc(size_t size, gfp_t flags); |
| 218 | |
Eduard - Gabriel Munteanu | 5b882be | 2008-08-19 20:43:26 +0300 | [diff] [blame] | 219 | #ifdef CONFIG_KMEMTRACE |
| 220 | extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags); |
| 221 | #else |
| 222 | static __always_inline void * |
| 223 | kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) |
| 224 | { |
| 225 | return kmem_cache_alloc(s, gfpflags); |
| 226 | } |
| 227 | #endif |
| 228 | |
Pekka Enberg | eada35e | 2008-02-11 22:47:46 +0200 | [diff] [blame] | 229 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) |
| 230 | { |
Eduard - Gabriel Munteanu | 5b882be | 2008-08-19 20:43:26 +0300 | [diff] [blame] | 231 | unsigned int order = get_order(size); |
| 232 | void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); |
| 233 | |
Eduard - Gabriel Munteanu | ca2b84c | 2009-03-23 15:12:24 +0200 | [diff] [blame] | 234 | trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags); |
Eduard - Gabriel Munteanu | 5b882be | 2008-08-19 20:43:26 +0300 | [diff] [blame] | 235 | |
| 236 | return ret; |
Pekka Enberg | eada35e | 2008-02-11 22:47:46 +0200 | [diff] [blame] | 237 | } |
| 238 | |
Christoph Lameter | aa137f9 | 2007-08-31 00:48:45 -0700 | [diff] [blame] | 239 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 240 | { |
Eduard - Gabriel Munteanu | 5b882be | 2008-08-19 20:43:26 +0300 | [diff] [blame] | 241 | void *ret; |
| 242 | |
Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 243 | if (__builtin_constant_p(size)) { |
Christoph Lameter | ffadd4d | 2009-02-17 12:05:07 -0500 | [diff] [blame] | 244 | if (size > SLUB_MAX_SIZE) |
Pekka Enberg | eada35e | 2008-02-11 22:47:46 +0200 | [diff] [blame] | 245 | return kmalloc_large(size, flags); |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 246 | |
Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 247 | if (!(flags & SLUB_DMA)) { |
| 248 | struct kmem_cache *s = kmalloc_slab(size); |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 249 | |
Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 250 | if (!s) |
| 251 | return ZERO_SIZE_PTR; |
| 252 | |
Eduard - Gabriel Munteanu | 5b882be | 2008-08-19 20:43:26 +0300 | [diff] [blame] | 253 | ret = kmem_cache_alloc_notrace(s, flags); |
| 254 | |
Eduard - Gabriel Munteanu | ca2b84c | 2009-03-23 15:12:24 +0200 | [diff] [blame] | 255 | trace_kmalloc(_THIS_IP_, ret, size, s->size, flags); |
Eduard - Gabriel Munteanu | 5b882be | 2008-08-19 20:43:26 +0300 | [diff] [blame] | 256 | |
| 257 | return ret; |
Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 258 | } |
| 259 | } |
| 260 | return __kmalloc(size, flags); |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 261 | } |
| 262 | |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 263 | #ifdef CONFIG_NUMA |
Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 264 | void *__kmalloc_node(size_t size, gfp_t flags, int node); |
| 265 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 266 | |
Eduard - Gabriel Munteanu | 5b882be | 2008-08-19 20:43:26 +0300 | [diff] [blame] | 267 | #ifdef CONFIG_KMEMTRACE |
| 268 | extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, |
| 269 | gfp_t gfpflags, |
| 270 | int node); |
| 271 | #else |
| 272 | static __always_inline void * |
| 273 | kmem_cache_alloc_node_notrace(struct kmem_cache *s, |
| 274 | gfp_t gfpflags, |
| 275 | int node) |
| 276 | { |
| 277 | return kmem_cache_alloc_node(s, gfpflags, node); |
| 278 | } |
| 279 | #endif |
| 280 | |
Christoph Lameter | aa137f9 | 2007-08-31 00:48:45 -0700 | [diff] [blame] | 281 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 282 | { |
Eduard - Gabriel Munteanu | 5b882be | 2008-08-19 20:43:26 +0300 | [diff] [blame] | 283 | void *ret; |
| 284 | |
Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 285 | if (__builtin_constant_p(size) && |
Christoph Lameter | ffadd4d | 2009-02-17 12:05:07 -0500 | [diff] [blame] | 286 | size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { |
Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 287 | struct kmem_cache *s = kmalloc_slab(size); |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 288 | |
| 289 | if (!s) |
Christoph Lameter | 272c1d2 | 2007-06-08 13:46:49 -0700 | [diff] [blame] | 290 | return ZERO_SIZE_PTR; |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 291 | |
Eduard - Gabriel Munteanu | 5b882be | 2008-08-19 20:43:26 +0300 | [diff] [blame] | 292 | ret = kmem_cache_alloc_node_notrace(s, flags, node); |
| 293 | |
Eduard - Gabriel Munteanu | ca2b84c | 2009-03-23 15:12:24 +0200 | [diff] [blame] | 294 | trace_kmalloc_node(_THIS_IP_, ret, |
| 295 | size, s->size, flags, node); |
Eduard - Gabriel Munteanu | 5b882be | 2008-08-19 20:43:26 +0300 | [diff] [blame] | 296 | |
| 297 | return ret; |
Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 298 | } |
| 299 | return __kmalloc_node(size, flags, node); |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 300 | } |
| 301 | #endif |
| 302 | |
Pekka Enberg | 7e85ee0 | 2009-06-12 14:03:06 +0300 | [diff] [blame] | 303 | void __init kmem_cache_init_late(void); |
| 304 | |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 305 | #endif /* _LINUX_SLUB_DEF_H */ |