blob: 8d81a60518e4c615a89a1993f82a3bc2c9a4ac1b [file] [log] [blame]
Christoph Lameter2e892f42006-12-13 00:34:23 -08001#ifndef _LINUX_SLAB_DEF_H
2#define _LINUX_SLAB_DEF_H
3
4/*
5 * Definitions unique to the original Linux SLAB allocator.
6 *
7 * What we provide here is a way to optimize the frequent kmalloc
8 * calls in the kernel by selecting the appropriate general cache
9 * if kmalloc was called with a size that can be established at
10 * compile time.
11 */
12
13#include <linux/init.h>
14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16#include <linux/compiler.h>
17
18/* Size description struct for general caches. */
19struct cache_sizes {
20 size_t cs_size;
21 struct kmem_cache *cs_cachep;
Christoph Lameter4b51d662007-02-10 01:43:10 -080022#ifdef CONFIG_ZONE_DMA
Christoph Lameter2e892f42006-12-13 00:34:23 -080023 struct kmem_cache *cs_dmacachep;
Christoph Lameter4b51d662007-02-10 01:43:10 -080024#endif
Christoph Lameter2e892f42006-12-13 00:34:23 -080025};
26extern struct cache_sizes malloc_sizes[];
27
28static inline void *kmalloc(size_t size, gfp_t flags)
29{
30 if (__builtin_constant_p(size)) {
31 int i = 0;
32#define CACHE(x) \
33 if (size <= x) \
34 goto found; \
35 else \
36 i++;
37#include "kmalloc_sizes.h"
38#undef CACHE
39 {
40 extern void __you_cannot_kmalloc_that_much(void);
41 __you_cannot_kmalloc_that_much();
42 }
43found:
Christoph Lameter4b51d662007-02-10 01:43:10 -080044#ifdef CONFIG_ZONE_DMA
45 if (flags & GFP_DMA)
46 return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep,
47 flags);
48#endif
49 return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags);
Christoph Lameter2e892f42006-12-13 00:34:23 -080050 }
51 return __kmalloc(size, flags);
52}
53
54static inline void *kzalloc(size_t size, gfp_t flags)
55{
56 if (__builtin_constant_p(size)) {
57 int i = 0;
58#define CACHE(x) \
59 if (size <= x) \
60 goto found; \
61 else \
62 i++;
63#include "kmalloc_sizes.h"
64#undef CACHE
65 {
66 extern void __you_cannot_kzalloc_that_much(void);
67 __you_cannot_kzalloc_that_much();
68 }
69found:
Christoph Lameter4b51d662007-02-10 01:43:10 -080070#ifdef CONFIG_ZONE_DMA
71 if (flags & GFP_DMA)
72 return kmem_cache_zalloc(malloc_sizes[i].cs_dmacachep,
73 flags);
74#endif
75 return kmem_cache_zalloc(malloc_sizes[i].cs_cachep, flags);
Christoph Lameter2e892f42006-12-13 00:34:23 -080076 }
77 return __kzalloc(size, flags);
78}
79
80#ifdef CONFIG_NUMA
81extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
82
83static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
84{
85 if (__builtin_constant_p(size)) {
86 int i = 0;
87#define CACHE(x) \
88 if (size <= x) \
89 goto found; \
90 else \
91 i++;
92#include "kmalloc_sizes.h"
93#undef CACHE
94 {
95 extern void __you_cannot_kmalloc_that_much(void);
96 __you_cannot_kmalloc_that_much();
97 }
98found:
Christoph Lameter4b51d662007-02-10 01:43:10 -080099#ifdef CONFIG_ZONE_DMA
100 if (flags & GFP_DMA)
101 return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep,
102 flags, node);
103#endif
104 return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep,
105 flags, node);
Christoph Lameter2e892f42006-12-13 00:34:23 -0800106 }
107 return __kmalloc_node(size, flags, node);
108}
109
110#endif /* CONFIG_NUMA */
111
Christoph Lameter3ca12ee2007-05-16 22:10:52 -0700112extern const struct seq_operations slabinfo_op;
113ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *);
114
Christoph Lameter2e892f42006-12-13 00:34:23 -0800115#endif /* _LINUX_SLAB_DEF_H */