Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_SLUB_DEF_H |
| 2 | #define _LINUX_SLUB_DEF_H |
| 3 | |
| 4 | /* |
| 5 | * SLUB : A Slab allocator without object queues. |
| 6 | * |
Christoph Lameter | cde5353 | 2008-07-04 09:59:22 -0700 | [diff] [blame] | 7 | * (C) 2007 SGI, Christoph Lameter |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 8 | */ |
| 9 | #include <linux/types.h> |
| 10 | #include <linux/gfp.h> |
| 11 | #include <linux/workqueue.h> |
| 12 | #include <linux/kobject.h> |
| 13 | |
Richard Kennedy | 4a92379 | 2010-10-21 10:29:19 +0100 | [diff] [blame] | 14 | #include <linux/kmemleak.h> |
Li Zefan | 039ca4e | 2010-05-26 17:22:17 +0800 | [diff] [blame] | 15 | |
Christoph Lameter | 8ff12cf | 2008-02-07 17:47:41 -0800 | [diff] [blame] | 16 | enum stat_item { |
| 17 | ALLOC_FASTPATH, /* Allocation from cpu slab */ |
| 18 | ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ |
| 19 | FREE_FASTPATH, /* Free to cpu slub */ |
| 20 | FREE_SLOWPATH, /* Freeing not to cpu slab */ |
| 21 | FREE_FROZEN, /* Freeing to frozen slab */ |
| 22 | FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ |
| 23 | FREE_REMOVE_PARTIAL, /* Freeing removes last object */ |
| 24 | ALLOC_FROM_PARTIAL, /* Cpu slab acquired from partial list */ |
| 25 | ALLOC_SLAB, /* Cpu slab acquired from page allocator */ |
| 26 | ALLOC_REFILL, /* Refill cpu slab from slab freelist */ |
Christoph Lameter | e36a265 | 2011-06-01 12:25:57 -0500 | [diff] [blame] | 27 | ALLOC_NODE_MISMATCH, /* Switching cpu slab */ |
Christoph Lameter | 8ff12cf | 2008-02-07 17:47:41 -0800 | [diff] [blame] | 28 | FREE_SLAB, /* Slab freed to the page allocator */ |
| 29 | CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ |
| 30 | DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ |
| 31 | DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */ |
| 32 | DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ |
| 33 | DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ |
| 34 | DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ |
Christoph Lameter | 03e404a | 2011-06-01 12:25:58 -0500 | [diff] [blame] | 35 | DEACTIVATE_BYPASS, /* Implicit deactivation */ |
Christoph Lameter | 65c3376 | 2008-04-14 19:11:40 +0300 | [diff] [blame] | 36 | ORDER_FALLBACK, /* Number of times fallback was necessary */ |
Christoph Lameter | 4fdccdf | 2011-03-22 13:35:00 -0500 | [diff] [blame] | 37 | CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */ |
Christoph Lameter | b789ef5 | 2011-06-01 12:25:49 -0500 | [diff] [blame] | 38 | CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */ |
Christoph Lameter | 49e2258 | 2011-08-09 16:12:27 -0500 | [diff] [blame] | 39 | CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */ |
| 40 | CPU_PARTIAL_FREE, /* USed cpu partial on free */ |
Christoph Lameter | 8ff12cf | 2008-02-07 17:47:41 -0800 | [diff] [blame] | 41 | NR_SLUB_STAT_ITEMS }; |
| 42 | |
Christoph Lameter | dfb4f09 | 2007-10-16 01:26:05 -0700 | [diff] [blame] | 43 | struct kmem_cache_cpu { |
Christoph Lameter | 8a5ec0b | 2011-02-25 11:38:54 -0600 | [diff] [blame] | 44 | void **freelist; /* Pointer to next available object */ |
Christoph Lameter | 8a5ec0b | 2011-02-25 11:38:54 -0600 | [diff] [blame] | 45 | unsigned long tid; /* Globally unique transaction id */ |
Christoph Lameter | da89b79 | 2008-01-07 23:20:31 -0800 | [diff] [blame] | 46 | struct page *page; /* The slab from which we are allocating */ |
Christoph Lameter | 49e2258 | 2011-08-09 16:12:27 -0500 | [diff] [blame] | 47 | struct page *partial; /* Partially allocated frozen slabs */ |
Christoph Lameter | da89b79 | 2008-01-07 23:20:31 -0800 | [diff] [blame] | 48 | int node; /* The node of the page (or -1 for debug) */ |
Christoph Lameter | 8ff12cf | 2008-02-07 17:47:41 -0800 | [diff] [blame] | 49 | #ifdef CONFIG_SLUB_STATS |
| 50 | unsigned stat[NR_SLUB_STAT_ITEMS]; |
| 51 | #endif |
Christoph Lameter | 4c93c355 | 2007-10-16 01:26:08 -0700 | [diff] [blame] | 52 | }; |
Christoph Lameter | dfb4f09 | 2007-10-16 01:26:05 -0700 | [diff] [blame] | 53 | |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 54 | struct kmem_cache_node { |
| 55 | spinlock_t list_lock; /* Protect partial list and nr_partial */ |
| 56 | unsigned long nr_partial; |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 57 | struct list_head partial; |
Christoph Lameter | 0c71001 | 2007-07-17 04:03:24 -0700 | [diff] [blame] | 58 | #ifdef CONFIG_SLUB_DEBUG |
Christoph Lameter | 0f389ec | 2008-04-14 18:53:02 +0300 | [diff] [blame] | 59 | atomic_long_t nr_slabs; |
Christoph Lameter | 205ab99 | 2008-04-14 19:11:40 +0300 | [diff] [blame] | 60 | atomic_long_t total_objects; |
Christoph Lameter | 643b113 | 2007-05-06 14:49:42 -0700 | [diff] [blame] | 61 | struct list_head full; |
Christoph Lameter | 0c71001 | 2007-07-17 04:03:24 -0700 | [diff] [blame] | 62 | #endif |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 63 | }; |
| 64 | |
| 65 | /* |
Christoph Lameter | 834f3d1 | 2008-04-14 19:11:31 +0300 | [diff] [blame] | 66 | * Word size structure that can be atomically updated or read and that |
| 67 | * contains both the order and the number of objects that a slab of the |
| 68 | * given order would contain. |
| 69 | */ |
| 70 | struct kmem_cache_order_objects { |
| 71 | unsigned long x; |
| 72 | }; |
| 73 | |
| 74 | /* |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 75 | * Slab cache management. |
| 76 | */ |
| 77 | struct kmem_cache { |
Namhyung Kim | 1b5ad24 | 2010-08-07 14:29:22 +0200 | [diff] [blame] | 78 | struct kmem_cache_cpu __percpu *cpu_slab; |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 79 | /* Used for retriving partial slabs etc */ |
| 80 | unsigned long flags; |
Christoph Lameter | 1a757fe | 2011-02-25 11:38:51 -0600 | [diff] [blame] | 81 | unsigned long min_partial; |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 82 | int size; /* The size of an object including meta data */ |
| 83 | int objsize; /* The size of an object without meta data */ |
| 84 | int offset; /* Free pointer offset. */ |
Alex Shi | 9f26490 | 2011-09-01 11:32:18 +0800 | [diff] [blame] | 85 | int cpu_partial; /* Number of per cpu partial objects to keep around */ |
Christoph Lameter | 834f3d1 | 2008-04-14 19:11:31 +0300 | [diff] [blame] | 86 | struct kmem_cache_order_objects oo; |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 87 | |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 88 | /* Allocation and freeing of slabs */ |
Christoph Lameter | 205ab99 | 2008-04-14 19:11:40 +0300 | [diff] [blame] | 89 | struct kmem_cache_order_objects max; |
Christoph Lameter | 65c3376 | 2008-04-14 19:11:40 +0300 | [diff] [blame] | 90 | struct kmem_cache_order_objects min; |
Christoph Lameter | b7a49f0 | 2008-02-14 14:21:32 -0800 | [diff] [blame] | 91 | gfp_t allocflags; /* gfp flags to use on each alloc */ |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 92 | int refcount; /* Refcount for slab cache destroy */ |
Alexey Dobriyan | 51cc506 | 2008-07-25 19:45:34 -0700 | [diff] [blame] | 93 | void (*ctor)(void *); |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 94 | int inuse; /* Offset to metadata */ |
| 95 | int align; /* Alignment */ |
Lai Jiangshan | ab9a0f1 | 2011-03-10 15:21:48 +0800 | [diff] [blame] | 96 | int reserved; /* Reserved bytes at the end of slabs */ |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 97 | const char *name; /* Name (only for display!) */ |
| 98 | struct list_head list; /* List of slab caches */ |
Christoph Lameter | ab4d5ed | 2010-10-05 13:57:26 -0500 | [diff] [blame] | 99 | #ifdef CONFIG_SYSFS |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 100 | struct kobject kobj; /* For sysfs */ |
Christoph Lameter | 0c71001 | 2007-07-17 04:03:24 -0700 | [diff] [blame] | 101 | #endif |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 102 | |
| 103 | #ifdef CONFIG_NUMA |
Christoph Lameter | 9824601 | 2008-01-07 23:20:26 -0800 | [diff] [blame] | 104 | /* |
| 105 | * Defragmentation by allocating from a remote node. |
| 106 | */ |
| 107 | int remote_node_defrag_ratio; |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 108 | #endif |
Christoph Lameter | 7340cc8 | 2010-09-28 08:10:26 -0500 | [diff] [blame] | 109 | struct kmem_cache_node *node[MAX_NUMNODES]; |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 110 | }; |
| 111 | |
| 112 | /* |
| 113 | * Kmalloc subsystem. |
| 114 | */ |
FUJITA Tomonori | a6eb9fe | 2010-08-10 18:03:22 -0700 | [diff] [blame] | 115 | #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 |
| 116 | #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN |
Christoph Lameter | 4b356be | 2007-06-16 10:16:13 -0700 | [diff] [blame] | 117 | #else |
| 118 | #define KMALLOC_MIN_SIZE 8 |
| 119 | #endif |
| 120 | |
| 121 | #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 122 | |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 123 | /* |
Christoph Lameter | ffadd4d | 2009-02-17 12:05:07 -0500 | [diff] [blame] | 124 | * Maximum kmalloc object size handled by SLUB. Larger object allocations |
| 125 | * are passed through to the page allocator. The page allocator "fastpath" |
| 126 | * is relatively slow so we need this value sufficiently high so that |
| 127 | * performance critical objects are allocated through the SLUB fastpath. |
| 128 | * |
| 129 | * This should be dropped to PAGE_SIZE / 2 once the page allocator |
| 130 | * "fastpath" becomes competitive with the slab allocator fastpaths. |
| 131 | */ |
Pekka Enberg | 51735a7 | 2009-02-20 12:21:33 +0200 | [diff] [blame] | 132 | #define SLUB_MAX_SIZE (2 * PAGE_SIZE) |
Christoph Lameter | ffadd4d | 2009-02-17 12:05:07 -0500 | [diff] [blame] | 133 | |
Pekka Enberg | 51735a7 | 2009-02-20 12:21:33 +0200 | [diff] [blame] | 134 | #define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2) |
Christoph Lameter | ffadd4d | 2009-02-17 12:05:07 -0500 | [diff] [blame] | 135 | |
Christoph Lameter | 756dee7 | 2009-12-18 16:26:21 -0600 | [diff] [blame] | 136 | #ifdef CONFIG_ZONE_DMA |
| 137 | #define SLUB_DMA __GFP_DMA |
Christoph Lameter | 756dee7 | 2009-12-18 16:26:21 -0600 | [diff] [blame] | 138 | #else |
| 139 | /* Disable DMA functionality */ |
| 140 | #define SLUB_DMA (__force gfp_t)0 |
Christoph Lameter | 756dee7 | 2009-12-18 16:26:21 -0600 | [diff] [blame] | 141 | #endif |
| 142 | |
Christoph Lameter | ffadd4d | 2009-02-17 12:05:07 -0500 | [diff] [blame] | 143 | /* |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 144 | * We keep the general caches in an array of slab caches that are used for |
| 145 | * 2^x bytes of allocations. |
| 146 | */ |
Christoph Lameter | 51df114 | 2010-08-20 12:37:15 -0500 | [diff] [blame] | 147 | extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 148 | |
| 149 | /* |
| 150 | * Sorry that the following has to be that ugly but some versions of GCC |
| 151 | * have trouble with constant propagation and loops. |
| 152 | */ |
Christoph Lameter | aa137f9 | 2007-08-31 00:48:45 -0700 | [diff] [blame] | 153 | static __always_inline int kmalloc_index(size_t size) |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 154 | { |
Christoph Lameter | 272c1d2 | 2007-06-08 13:46:49 -0700 | [diff] [blame] | 155 | if (!size) |
| 156 | return 0; |
Christoph Lameter | 614410d | 2007-05-06 14:49:38 -0700 | [diff] [blame] | 157 | |
Christoph Lameter | 4b356be | 2007-06-16 10:16:13 -0700 | [diff] [blame] | 158 | if (size <= KMALLOC_MIN_SIZE) |
| 159 | return KMALLOC_SHIFT_LOW; |
| 160 | |
Aaro Koskinen | acdfcd0 | 2009-08-28 14:28:54 +0300 | [diff] [blame] | 161 | if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 162 | return 1; |
Aaro Koskinen | acdfcd0 | 2009-08-28 14:28:54 +0300 | [diff] [blame] | 163 | if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 164 | return 2; |
| 165 | if (size <= 8) return 3; |
| 166 | if (size <= 16) return 4; |
| 167 | if (size <= 32) return 5; |
| 168 | if (size <= 64) return 6; |
| 169 | if (size <= 128) return 7; |
| 170 | if (size <= 256) return 8; |
| 171 | if (size <= 512) return 9; |
| 172 | if (size <= 1024) return 10; |
| 173 | if (size <= 2 * 1024) return 11; |
Christoph Lameter | 6446faa | 2008-02-15 23:45:26 -0800 | [diff] [blame] | 174 | if (size <= 4 * 1024) return 12; |
Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 175 | /* |
| 176 | * The following is only needed to support architectures with a larger page |
Christoph Lameter | 3e0c2ab | 2011-05-20 09:42:48 -0500 | [diff] [blame] | 177 | * size than 4k. We need to support 2 * PAGE_SIZE here. So for a 64k page |
| 178 | * size we would have to go up to 128k. |
Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 179 | */ |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 180 | if (size <= 8 * 1024) return 13; |
| 181 | if (size <= 16 * 1024) return 14; |
| 182 | if (size <= 32 * 1024) return 15; |
| 183 | if (size <= 64 * 1024) return 16; |
| 184 | if (size <= 128 * 1024) return 17; |
| 185 | if (size <= 256 * 1024) return 18; |
Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 186 | if (size <= 512 * 1024) return 19; |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 187 | if (size <= 1024 * 1024) return 20; |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 188 | if (size <= 2 * 1024 * 1024) return 21; |
Christoph Lameter | 3e0c2ab | 2011-05-20 09:42:48 -0500 | [diff] [blame] | 189 | BUG(); |
| 190 | return -1; /* Will never be reached */ |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 191 | |
| 192 | /* |
| 193 | * What we really wanted to do and cannot do because of compiler issues is: |
| 194 | * int i; |
| 195 | * for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) |
| 196 | * if (size <= (1 << i)) |
| 197 | * return i; |
| 198 | */ |
| 199 | } |
| 200 | |
| 201 | /* |
| 202 | * Find the slab cache for a given combination of allocation flags and size. |
| 203 | * |
| 204 | * This ought to end up with a global pointer to the right cache |
| 205 | * in kmalloc_caches. |
| 206 | */ |
Christoph Lameter | aa137f9 | 2007-08-31 00:48:45 -0700 | [diff] [blame] | 207 | static __always_inline struct kmem_cache *kmalloc_slab(size_t size) |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 208 | { |
| 209 | int index = kmalloc_index(size); |
| 210 | |
| 211 | if (index == 0) |
| 212 | return NULL; |
| 213 | |
Christoph Lameter | 51df114 | 2010-08-20 12:37:15 -0500 | [diff] [blame] | 214 | return kmalloc_caches[index]; |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 215 | } |
| 216 | |
Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 217 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
| 218 | void *__kmalloc(size_t size, gfp_t flags); |
| 219 | |
Richard Kennedy | 4a92379 | 2010-10-21 10:29:19 +0100 | [diff] [blame] | 220 | static __always_inline void * |
| 221 | kmalloc_order(size_t size, gfp_t flags, unsigned int order) |
| 222 | { |
| 223 | void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); |
| 224 | kmemleak_alloc(ret, size, 1, flags); |
| 225 | return ret; |
| 226 | } |
| 227 | |
Ben Greear | d18a90d | 2011-07-07 11:36:37 -0700 | [diff] [blame] | 228 | /** |
| 229 | * Calling this on allocated memory will check that the memory |
| 230 | * is expected to be in use, and print warnings if not. |
| 231 | */ |
| 232 | #ifdef CONFIG_SLUB_DEBUG |
| 233 | extern bool verify_mem_not_deleted(const void *x); |
| 234 | #else |
| 235 | static inline bool verify_mem_not_deleted(const void *x) |
| 236 | { |
| 237 | return true; |
| 238 | } |
| 239 | #endif |
| 240 | |
Li Zefan | 0f24f12 | 2009-12-11 15:45:30 +0800 | [diff] [blame] | 241 | #ifdef CONFIG_TRACING |
Richard Kennedy | 4a92379 | 2010-10-21 10:29:19 +0100 | [diff] [blame] | 242 | extern void * |
| 243 | kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size); |
| 244 | extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); |
Eduard - Gabriel Munteanu | 5b882be | 2008-08-19 20:43:26 +0300 | [diff] [blame] | 245 | #else |
| 246 | static __always_inline void * |
Richard Kennedy | 4a92379 | 2010-10-21 10:29:19 +0100 | [diff] [blame] | 247 | kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) |
Eduard - Gabriel Munteanu | 5b882be | 2008-08-19 20:43:26 +0300 | [diff] [blame] | 248 | { |
| 249 | return kmem_cache_alloc(s, gfpflags); |
| 250 | } |
Richard Kennedy | 4a92379 | 2010-10-21 10:29:19 +0100 | [diff] [blame] | 251 | |
| 252 | static __always_inline void * |
| 253 | kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) |
| 254 | { |
| 255 | return kmalloc_order(size, flags, order); |
| 256 | } |
Eduard - Gabriel Munteanu | 5b882be | 2008-08-19 20:43:26 +0300 | [diff] [blame] | 257 | #endif |
| 258 | |
Pekka Enberg | eada35e | 2008-02-11 22:47:46 +0200 | [diff] [blame] | 259 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) |
| 260 | { |
Eduard - Gabriel Munteanu | 5b882be | 2008-08-19 20:43:26 +0300 | [diff] [blame] | 261 | unsigned int order = get_order(size); |
Richard Kennedy | 4a92379 | 2010-10-21 10:29:19 +0100 | [diff] [blame] | 262 | return kmalloc_order_trace(size, flags, order); |
Pekka Enberg | eada35e | 2008-02-11 22:47:46 +0200 | [diff] [blame] | 263 | } |
| 264 | |
Christoph Lameter | aa137f9 | 2007-08-31 00:48:45 -0700 | [diff] [blame] | 265 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 266 | { |
Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 267 | if (__builtin_constant_p(size)) { |
Christoph Lameter | ffadd4d | 2009-02-17 12:05:07 -0500 | [diff] [blame] | 268 | if (size > SLUB_MAX_SIZE) |
Pekka Enberg | eada35e | 2008-02-11 22:47:46 +0200 | [diff] [blame] | 269 | return kmalloc_large(size, flags); |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 270 | |
Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 271 | if (!(flags & SLUB_DMA)) { |
| 272 | struct kmem_cache *s = kmalloc_slab(size); |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 273 | |
Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 274 | if (!s) |
| 275 | return ZERO_SIZE_PTR; |
| 276 | |
Richard Kennedy | 4a92379 | 2010-10-21 10:29:19 +0100 | [diff] [blame] | 277 | return kmem_cache_alloc_trace(s, flags, size); |
Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 278 | } |
| 279 | } |
| 280 | return __kmalloc(size, flags); |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 281 | } |
| 282 | |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 283 | #ifdef CONFIG_NUMA |
Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 284 | void *__kmalloc_node(size_t size, gfp_t flags, int node); |
| 285 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 286 | |
Li Zefan | 0f24f12 | 2009-12-11 15:45:30 +0800 | [diff] [blame] | 287 | #ifdef CONFIG_TRACING |
Richard Kennedy | 4a92379 | 2010-10-21 10:29:19 +0100 | [diff] [blame] | 288 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, |
Eduard - Gabriel Munteanu | 5b882be | 2008-08-19 20:43:26 +0300 | [diff] [blame] | 289 | gfp_t gfpflags, |
Richard Kennedy | 4a92379 | 2010-10-21 10:29:19 +0100 | [diff] [blame] | 290 | int node, size_t size); |
Eduard - Gabriel Munteanu | 5b882be | 2008-08-19 20:43:26 +0300 | [diff] [blame] | 291 | #else |
| 292 | static __always_inline void * |
Richard Kennedy | 4a92379 | 2010-10-21 10:29:19 +0100 | [diff] [blame] | 293 | kmem_cache_alloc_node_trace(struct kmem_cache *s, |
Eduard - Gabriel Munteanu | 5b882be | 2008-08-19 20:43:26 +0300 | [diff] [blame] | 294 | gfp_t gfpflags, |
Richard Kennedy | 4a92379 | 2010-10-21 10:29:19 +0100 | [diff] [blame] | 295 | int node, size_t size) |
Eduard - Gabriel Munteanu | 5b882be | 2008-08-19 20:43:26 +0300 | [diff] [blame] | 296 | { |
| 297 | return kmem_cache_alloc_node(s, gfpflags, node); |
| 298 | } |
| 299 | #endif |
| 300 | |
Christoph Lameter | aa137f9 | 2007-08-31 00:48:45 -0700 | [diff] [blame] | 301 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 302 | { |
Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 303 | if (__builtin_constant_p(size) && |
Christoph Lameter | ffadd4d | 2009-02-17 12:05:07 -0500 | [diff] [blame] | 304 | size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { |
Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 305 | struct kmem_cache *s = kmalloc_slab(size); |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 306 | |
| 307 | if (!s) |
Christoph Lameter | 272c1d2 | 2007-06-08 13:46:49 -0700 | [diff] [blame] | 308 | return ZERO_SIZE_PTR; |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 309 | |
Richard Kennedy | 4a92379 | 2010-10-21 10:29:19 +0100 | [diff] [blame] | 310 | return kmem_cache_alloc_node_trace(s, flags, node, size); |
Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 311 | } |
| 312 | return __kmalloc_node(size, flags, node); |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 313 | } |
| 314 | #endif |
| 315 | |
Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 316 | #endif /* _LINUX_SLUB_DEF_H */ |