blob: 8235dfbb3b050e090868b35c2abc143ef5cf253b [file] [log] [blame]
Christoph Lameter2e892f42006-12-13 00:34:23 -08001#ifndef _LINUX_SLAB_DEF_H
2#define _LINUX_SLAB_DEF_H
3
Hannes Frederic Sowa809fa972014-01-22 02:29:41 +01004#include <linux/reciprocal_div.h>
5
Christoph Lameter2e892f42006-12-13 00:34:23 -08006/*
7 * Definitions unique to the original Linux SLAB allocator.
Pekka Enberg8eae9852008-05-09 20:32:44 +02008 */
9
10struct kmem_cache {
Fam Zheng24755e22014-01-21 09:12:42 +080011/* 1) Cache tunables. Protected by slab_mutex */
Pekka Enberg8eae9852008-05-09 20:32:44 +020012 unsigned int batchcount;
13 unsigned int limit;
14 unsigned int shared;
15
Christoph Lameter3b0efdf2012-06-13 10:24:57 -050016 unsigned int size;
Hannes Frederic Sowa809fa972014-01-22 02:29:41 +010017 struct reciprocal_value reciprocal_buffer_size;
Eric Dumazetb56efcf2011-07-20 19:04:23 +020018/* 2) touched by every alloc & free from the backend */
Pekka Enberg8eae9852008-05-09 20:32:44 +020019
20 unsigned int flags; /* constant flags */
21 unsigned int num; /* # of objs per slab */
22
Eric Dumazetb56efcf2011-07-20 19:04:23 +020023/* 3) cache_grow/shrink */
Pekka Enberg8eae9852008-05-09 20:32:44 +020024 /* order of pgs per slab (2^n) */
25 unsigned int gfporder;
26
27 /* force GFP flags, e.g. GFP_DMA */
Glauber Costaa618e892012-06-14 16:17:21 +040028 gfp_t allocflags;
Pekka Enberg8eae9852008-05-09 20:32:44 +020029
30 size_t colour; /* cache colouring range */
31 unsigned int colour_off; /* colour offset */
Joonsoo Kim8456a642013-10-24 10:07:49 +090032 struct kmem_cache *freelist_cache;
33 unsigned int freelist_size;
Pekka Enberg8eae9852008-05-09 20:32:44 +020034
35 /* constructor func */
36 void (*ctor)(void *obj);
37
Eric Dumazetb56efcf2011-07-20 19:04:23 +020038/* 4) cache creation/removal */
Pekka Enberg8eae9852008-05-09 20:32:44 +020039 const char *name;
Christoph Lameter3b0efdf2012-06-13 10:24:57 -050040 struct list_head list;
41 int refcount;
42 int object_size;
43 int align;
Pekka Enberg8eae9852008-05-09 20:32:44 +020044
Eric Dumazetb56efcf2011-07-20 19:04:23 +020045/* 5) statistics */
Pekka Enberg8eae9852008-05-09 20:32:44 +020046#ifdef CONFIG_DEBUG_SLAB
47 unsigned long num_active;
48 unsigned long num_allocations;
49 unsigned long high_mark;
50 unsigned long grown;
51 unsigned long reaped;
52 unsigned long errors;
53 unsigned long max_freeable;
54 unsigned long node_allocs;
55 unsigned long node_frees;
56 unsigned long node_overflow;
57 atomic_t allochit;
58 atomic_t allocmiss;
59 atomic_t freehit;
60 atomic_t freemiss;
61
62 /*
63 * If debugging is enabled, then the allocator can add additional
Christoph Lameter3b0efdf2012-06-13 10:24:57 -050064 * fields and/or padding to every object. size contains the total
Pekka Enberg8eae9852008-05-09 20:32:44 +020065 * object size including these internal fields, the following two
66 * variables contain the offset to the user object and its size.
67 */
68 int obj_offset;
Pekka Enberg8eae9852008-05-09 20:32:44 +020069#endif /* CONFIG_DEBUG_SLAB */
Glauber Costaba6c4962012-12-18 14:22:27 -080070#ifdef CONFIG_MEMCG_KMEM
71 struct memcg_cache_params *memcg_params;
72#endif
Pekka Enberg8eae9852008-05-09 20:32:44 +020073
Eric Dumazetb56efcf2011-07-20 19:04:23 +020074/* 6) per-cpu/per-node data, touched during every alloc/free */
Pekka Enberg8eae9852008-05-09 20:32:44 +020075 /*
Eric Dumazetb56efcf2011-07-20 19:04:23 +020076 * We put array[] at the end of kmem_cache, because we want to size
77 * this array to nr_cpu_ids slots instead of NR_CPUS
Pekka Enberg8eae9852008-05-09 20:32:44 +020078 * (see kmem_cache_init())
Eric Dumazetb56efcf2011-07-20 19:04:23 +020079 * We still use [NR_CPUS] and not [1] or [0] because cache_cache
80 * is statically defined, so we reserve the max number of cpus.
Christoph Lameter3c583462012-11-28 16:23:01 +000081 *
82 * We also need to guarantee that the list is able to accomodate a
83 * pointer for each node since "nodelists" uses the remainder of
84 * available pointers.
Pekka Enberg8eae9852008-05-09 20:32:44 +020085 */
Christoph Lameter6a673682013-01-10 19:14:19 +000086 struct kmem_cache_node **node;
Christoph Lameter3c583462012-11-28 16:23:01 +000087 struct array_cache *array[NR_CPUS + MAX_NUMNODES];
Pekka Enberg8eae9852008-05-09 20:32:44 +020088 /*
Eric Dumazetb56efcf2011-07-20 19:04:23 +020089 * Do not add fields after array[]
Pekka Enberg8eae9852008-05-09 20:32:44 +020090 */
91};
92
Christoph Lameter2e892f42006-12-13 00:34:23 -080093#endif /* _LINUX_SLAB_DEF_H */