blob: e9346b4f1ef4b2ef6d302a5799e30e631fa10ce2 [file] [log] [blame]
Christoph Lameter2e892f42006-12-13 00:34:23 -08001#ifndef _LINUX_SLAB_DEF_H
2#define _LINUX_SLAB_DEF_H
3
4/*
5 * Definitions unique to the original Linux SLAB allocator.
Pekka Enberg8eae9852008-05-09 20:32:44 +02006 */
7
8struct kmem_cache {
Eric Dumazetb56efcf2011-07-20 19:04:23 +02009/* 1) Cache tunables. Protected by cache_chain_mutex */
Pekka Enberg8eae9852008-05-09 20:32:44 +020010 unsigned int batchcount;
11 unsigned int limit;
12 unsigned int shared;
13
Christoph Lameter3b0efdf2012-06-13 10:24:57 -050014 unsigned int size;
Pekka Enberg8eae9852008-05-09 20:32:44 +020015 u32 reciprocal_buffer_size;
Eric Dumazetb56efcf2011-07-20 19:04:23 +020016/* 2) touched by every alloc & free from the backend */
Pekka Enberg8eae9852008-05-09 20:32:44 +020017
18 unsigned int flags; /* constant flags */
19 unsigned int num; /* # of objs per slab */
20
Eric Dumazetb56efcf2011-07-20 19:04:23 +020021/* 3) cache_grow/shrink */
Pekka Enberg8eae9852008-05-09 20:32:44 +020022 /* order of pgs per slab (2^n) */
23 unsigned int gfporder;
24
25 /* force GFP flags, e.g. GFP_DMA */
Glauber Costaa618e892012-06-14 16:17:21 +040026 gfp_t allocflags;
Pekka Enberg8eae9852008-05-09 20:32:44 +020027
28 size_t colour; /* cache colouring range */
29 unsigned int colour_off; /* colour offset */
30 struct kmem_cache *slabp_cache;
31 unsigned int slab_size;
Pekka Enberg8eae9852008-05-09 20:32:44 +020032
33 /* constructor func */
34 void (*ctor)(void *obj);
35
Eric Dumazetb56efcf2011-07-20 19:04:23 +020036/* 4) cache creation/removal */
Pekka Enberg8eae9852008-05-09 20:32:44 +020037 const char *name;
Christoph Lameter3b0efdf2012-06-13 10:24:57 -050038 struct list_head list;
39 int refcount;
40 int object_size;
41 int align;
Pekka Enberg8eae9852008-05-09 20:32:44 +020042
Eric Dumazetb56efcf2011-07-20 19:04:23 +020043/* 5) statistics */
Pekka Enberg8eae9852008-05-09 20:32:44 +020044#ifdef CONFIG_DEBUG_SLAB
45 unsigned long num_active;
46 unsigned long num_allocations;
47 unsigned long high_mark;
48 unsigned long grown;
49 unsigned long reaped;
50 unsigned long errors;
51 unsigned long max_freeable;
52 unsigned long node_allocs;
53 unsigned long node_frees;
54 unsigned long node_overflow;
55 atomic_t allochit;
56 atomic_t allocmiss;
57 atomic_t freehit;
58 atomic_t freemiss;
59
60 /*
61 * If debugging is enabled, then the allocator can add additional
Christoph Lameter3b0efdf2012-06-13 10:24:57 -050062 * fields and/or padding to every object. size contains the total
Pekka Enberg8eae9852008-05-09 20:32:44 +020063 * object size including these internal fields, the following two
64 * variables contain the offset to the user object and its size.
65 */
66 int obj_offset;
Pekka Enberg8eae9852008-05-09 20:32:44 +020067#endif /* CONFIG_DEBUG_SLAB */
Glauber Costaba6c4962012-12-18 14:22:27 -080068#ifdef CONFIG_MEMCG_KMEM
69 struct memcg_cache_params *memcg_params;
70#endif
Pekka Enberg8eae9852008-05-09 20:32:44 +020071
Eric Dumazetb56efcf2011-07-20 19:04:23 +020072/* 6) per-cpu/per-node data, touched during every alloc/free */
Pekka Enberg8eae9852008-05-09 20:32:44 +020073 /*
Eric Dumazetb56efcf2011-07-20 19:04:23 +020074 * We put array[] at the end of kmem_cache, because we want to size
75 * this array to nr_cpu_ids slots instead of NR_CPUS
Pekka Enberg8eae9852008-05-09 20:32:44 +020076 * (see kmem_cache_init())
Eric Dumazetb56efcf2011-07-20 19:04:23 +020077 * We still use [NR_CPUS] and not [1] or [0] because cache_cache
78 * is statically defined, so we reserve the max number of cpus.
Christoph Lameter3c583462012-11-28 16:23:01 +000079 *
80 * We also need to guarantee that the list is able to accomodate a
81 * pointer for each node since "nodelists" uses the remainder of
82 * available pointers.
Pekka Enberg8eae9852008-05-09 20:32:44 +020083 */
Christoph Lameter6a673682013-01-10 19:14:19 +000084 struct kmem_cache_node **node;
Christoph Lameter3c583462012-11-28 16:23:01 +000085 struct array_cache *array[NR_CPUS + MAX_NUMNODES];
Pekka Enberg8eae9852008-05-09 20:32:44 +020086 /*
Eric Dumazetb56efcf2011-07-20 19:04:23 +020087 * Do not add fields after array[]
Pekka Enberg8eae9852008-05-09 20:32:44 +020088 */
89};
90
Christoph Lameter2e892f42006-12-13 00:34:23 -080091#endif /* _LINUX_SLAB_DEF_H */