blob: 365d036c454ad11b051f50e8a236fc5de8d9f734 [file] [log] [blame]
Christoph Lameter2e892f42006-12-13 00:34:23 -08001#ifndef _LINUX_SLAB_DEF_H
2#define _LINUX_SLAB_DEF_H
3
4/*
5 * Definitions unique to the original Linux SLAB allocator.
6 *
7 * What we provide here is a way to optimize the frequent kmalloc
8 * calls in the kernel by selecting the appropriate general cache
9 * if kmalloc was called with a size that can be established at
10 * compile time.
11 */
12
13#include <linux/init.h>
14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16#include <linux/compiler.h>
17
18/* Size description struct for general caches. */
19struct cache_sizes {
20 size_t cs_size;
21 struct kmem_cache *cs_cachep;
Christoph Lameter4b51d662007-02-10 01:43:10 -080022#ifdef CONFIG_ZONE_DMA
Christoph Lameter2e892f42006-12-13 00:34:23 -080023 struct kmem_cache *cs_dmacachep;
Christoph Lameter4b51d662007-02-10 01:43:10 -080024#endif
Christoph Lameter2e892f42006-12-13 00:34:23 -080025};
26extern struct cache_sizes malloc_sizes[];
27
Paul Mundt6193a2f2007-07-15 23:38:22 -070028void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
29void *__kmalloc(size_t size, gfp_t flags);
30
Christoph Lameter2e892f42006-12-13 00:34:23 -080031static inline void *kmalloc(size_t size, gfp_t flags)
32{
33 if (__builtin_constant_p(size)) {
34 int i = 0;
35#define CACHE(x) \
36 if (size <= x) \
37 goto found; \
38 else \
39 i++;
40#include "kmalloc_sizes.h"
41#undef CACHE
42 {
43 extern void __you_cannot_kmalloc_that_much(void);
44 __you_cannot_kmalloc_that_much();
45 }
46found:
Christoph Lameter4b51d662007-02-10 01:43:10 -080047#ifdef CONFIG_ZONE_DMA
48 if (flags & GFP_DMA)
49 return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep,
50 flags);
51#endif
52 return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags);
Christoph Lameter2e892f42006-12-13 00:34:23 -080053 }
54 return __kmalloc(size, flags);
55}
56
57static inline void *kzalloc(size_t size, gfp_t flags)
58{
59 if (__builtin_constant_p(size)) {
60 int i = 0;
61#define CACHE(x) \
62 if (size <= x) \
63 goto found; \
64 else \
65 i++;
66#include "kmalloc_sizes.h"
67#undef CACHE
68 {
69 extern void __you_cannot_kzalloc_that_much(void);
70 __you_cannot_kzalloc_that_much();
71 }
72found:
Christoph Lameter4b51d662007-02-10 01:43:10 -080073#ifdef CONFIG_ZONE_DMA
74 if (flags & GFP_DMA)
75 return kmem_cache_zalloc(malloc_sizes[i].cs_dmacachep,
76 flags);
77#endif
78 return kmem_cache_zalloc(malloc_sizes[i].cs_cachep, flags);
Christoph Lameter2e892f42006-12-13 00:34:23 -080079 }
80 return __kzalloc(size, flags);
81}
82
83#ifdef CONFIG_NUMA
84extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
Paul Mundt6193a2f2007-07-15 23:38:22 -070085extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
Christoph Lameter2e892f42006-12-13 00:34:23 -080086
87static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
88{
89 if (__builtin_constant_p(size)) {
90 int i = 0;
91#define CACHE(x) \
92 if (size <= x) \
93 goto found; \
94 else \
95 i++;
96#include "kmalloc_sizes.h"
97#undef CACHE
98 {
99 extern void __you_cannot_kmalloc_that_much(void);
100 __you_cannot_kmalloc_that_much();
101 }
102found:
Christoph Lameter4b51d662007-02-10 01:43:10 -0800103#ifdef CONFIG_ZONE_DMA
104 if (flags & GFP_DMA)
105 return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep,
106 flags, node);
107#endif
108 return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep,
109 flags, node);
Christoph Lameter2e892f42006-12-13 00:34:23 -0800110 }
111 return __kmalloc_node(size, flags, node);
112}
113
114#endif /* CONFIG_NUMA */
115
Christoph Lameter3ca12ee2007-05-16 22:10:52 -0700116extern const struct seq_operations slabinfo_op;
117ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *);
118
Christoph Lameter2e892f42006-12-13 00:34:23 -0800119#endif /* _LINUX_SLAB_DEF_H */