blob: 66a62d3536c622b8386c15b5d1e2e9e7fac6dbd7 [file] [log] [blame]
Christoph Lameter97d06602012-07-06 15:25:11 -05001#ifndef MM_SLAB_H
2#define MM_SLAB_H
3/*
4 * Internal slab definitions
5 */
6
7/*
8 * State of the slab allocator.
9 *
10 * This is used to describe the states of the allocator during bootup.
11 * Allocators use this to gradually bootstrap themselves. Most allocators
12 * have the problem that the structures used for managing slab caches are
13 * allocated from slab caches themselves.
14 */
15enum slab_state {
16 DOWN, /* No slab functionality yet */
17 PARTIAL, /* SLUB: kmem_cache_node available */
18 PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */
19 PARTIAL_L3, /* SLAB: kmalloc size for l3 struct available */
20 UP, /* Slab caches usable but not all extras yet */
21 FULL /* Everything is working */
22};
23
24extern enum slab_state slab_state;
25
Christoph Lameter18004c52012-07-06 15:25:12 -050026/* The slab cache mutex protects the management structures during changes */
27extern struct mutex slab_mutex;
Christoph Lameter9b030cb2012-09-05 00:20:33 +000028
29/* The list of all slab caches on the system */
Christoph Lameter18004c52012-07-06 15:25:12 -050030extern struct list_head slab_caches;
31
Christoph Lameter9b030cb2012-09-05 00:20:33 +000032/* The slab cache that manages slab cache information */
33extern struct kmem_cache *kmem_cache;
34
35/* Functions provided by the slab allocators */
Christoph Lameter8a13a4c2012-09-04 23:18:33 +000036extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
Christoph Lameter97d06602012-07-06 15:25:11 -050037
Christoph Lametercbb79692012-09-05 00:18:32 +000038#ifdef CONFIG_SLUB
39struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
40 size_t align, unsigned long flags, void (*ctor)(void *));
41#else
42static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
43 size_t align, unsigned long flags, void (*ctor)(void *))
44{ return NULL; }
45#endif
46
47
Glauber Costad8843922012-10-17 15:36:51 +040048/* Legal flag mask for kmem_cache_create(), for various configurations */
49#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
50 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
51
52#if defined(CONFIG_DEBUG_SLAB)
53#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
54#elif defined(CONFIG_SLUB_DEBUG)
55#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
56 SLAB_TRACE | SLAB_DEBUG_FREE)
57#else
58#define SLAB_DEBUG_FLAGS (0)
59#endif
60
61#if defined(CONFIG_SLAB)
62#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
63 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK)
64#elif defined(CONFIG_SLUB)
65#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
66 SLAB_TEMPORARY | SLAB_NOTRACK)
67#else
68#define SLAB_CACHE_FLAGS (0)
69#endif
70
71#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
72
Christoph Lameter945cf2b2012-09-04 23:18:33 +000073int __kmem_cache_shutdown(struct kmem_cache *);
Christoph Lameter945cf2b2012-09-04 23:18:33 +000074
Glauber Costab7454ad2012-10-19 18:20:25 +040075struct seq_file;
76struct file;
Glauber Costab7454ad2012-10-19 18:20:25 +040077
Glauber Costa0d7561c2012-10-19 18:20:27 +040078struct slabinfo {
79 unsigned long active_objs;
80 unsigned long num_objs;
81 unsigned long active_slabs;
82 unsigned long num_slabs;
83 unsigned long shared_avail;
84 unsigned int limit;
85 unsigned int batchcount;
86 unsigned int shared;
87 unsigned int objects_per_slab;
88 unsigned int cache_order;
89};
90
91void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
92void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
Glauber Costab7454ad2012-10-19 18:20:25 +040093ssize_t slabinfo_write(struct file *file, const char __user *buffer,
94 size_t count, loff_t *ppos);
Christoph Lameter97d06602012-07-06 15:25:11 -050095#endif