blob: 4ad2c5a263999e24cf4d860f088bb4df07ba0e4c [file] [log] [blame]
Christoph Lameter2e892f42006-12-13 00:34:23 -08001#ifndef _LINUX_SLAB_DEF_H
2#define _LINUX_SLAB_DEF_H
3
Hannes Frederic Sowa809fa972014-01-22 02:29:41 +01004#include <linux/reciprocal_div.h>
5
Christoph Lameter2e892f42006-12-13 00:34:23 -08006/*
7 * Definitions unique to the original Linux SLAB allocator.
Pekka Enberg8eae9852008-05-09 20:32:44 +02008 */
9
10struct kmem_cache {
Joonsoo Kimbf0dea22014-10-09 15:26:27 -070011 struct array_cache __percpu *cpu_cache;
12
Fam Zheng24755e22014-01-21 09:12:42 +080013/* 1) Cache tunables. Protected by slab_mutex */
Pekka Enberg8eae9852008-05-09 20:32:44 +020014 unsigned int batchcount;
15 unsigned int limit;
16 unsigned int shared;
17
Christoph Lameter3b0efdf2012-06-13 10:24:57 -050018 unsigned int size;
Hannes Frederic Sowa809fa972014-01-22 02:29:41 +010019 struct reciprocal_value reciprocal_buffer_size;
Eric Dumazetb56efcf2011-07-20 19:04:23 +020020/* 2) touched by every alloc & free from the backend */
Pekka Enberg8eae9852008-05-09 20:32:44 +020021
22 unsigned int flags; /* constant flags */
23 unsigned int num; /* # of objs per slab */
24
Eric Dumazetb56efcf2011-07-20 19:04:23 +020025/* 3) cache_grow/shrink */
Pekka Enberg8eae9852008-05-09 20:32:44 +020026 /* order of pgs per slab (2^n) */
27 unsigned int gfporder;
28
29 /* force GFP flags, e.g. GFP_DMA */
Glauber Costaa618e892012-06-14 16:17:21 +040030 gfp_t allocflags;
Pekka Enberg8eae9852008-05-09 20:32:44 +020031
32 size_t colour; /* cache colouring range */
33 unsigned int colour_off; /* colour offset */
Joonsoo Kim8456a642013-10-24 10:07:49 +090034 struct kmem_cache *freelist_cache;
35 unsigned int freelist_size;
Pekka Enberg8eae9852008-05-09 20:32:44 +020036
37 /* constructor func */
38 void (*ctor)(void *obj);
39
Eric Dumazetb56efcf2011-07-20 19:04:23 +020040/* 4) cache creation/removal */
Pekka Enberg8eae9852008-05-09 20:32:44 +020041 const char *name;
Christoph Lameter3b0efdf2012-06-13 10:24:57 -050042 struct list_head list;
43 int refcount;
44 int object_size;
45 int align;
Pekka Enberg8eae9852008-05-09 20:32:44 +020046
Eric Dumazetb56efcf2011-07-20 19:04:23 +020047/* 5) statistics */
Pekka Enberg8eae9852008-05-09 20:32:44 +020048#ifdef CONFIG_DEBUG_SLAB
49 unsigned long num_active;
50 unsigned long num_allocations;
51 unsigned long high_mark;
52 unsigned long grown;
53 unsigned long reaped;
54 unsigned long errors;
55 unsigned long max_freeable;
56 unsigned long node_allocs;
57 unsigned long node_frees;
58 unsigned long node_overflow;
59 atomic_t allochit;
60 atomic_t allocmiss;
61 atomic_t freehit;
62 atomic_t freemiss;
Joonsoo Kimd31676d2016-03-15 14:54:24 -070063#ifdef CONFIG_DEBUG_SLAB_LEAK
64 atomic_t store_user_clean;
65#endif
Pekka Enberg8eae9852008-05-09 20:32:44 +020066
67 /*
68 * If debugging is enabled, then the allocator can add additional
Christoph Lameter3b0efdf2012-06-13 10:24:57 -050069 * fields and/or padding to every object. size contains the total
Pekka Enberg8eae9852008-05-09 20:32:44 +020070 * object size including these internal fields, the following two
71 * variables contain the offset to the user object and its size.
72 */
73 int obj_offset;
Pekka Enberg8eae9852008-05-09 20:32:44 +020074#endif /* CONFIG_DEBUG_SLAB */
Johannes Weiner127424c2016-01-20 15:02:32 -080075
76#ifdef CONFIG_MEMCG
Vladimir Davydovf7ce3192015-02-12 14:59:20 -080077 struct memcg_cache_params memcg_params;
Glauber Costaba6c4962012-12-18 14:22:27 -080078#endif
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -070079#ifdef CONFIG_KASAN
80 struct kasan_cache kasan_info;
81#endif
Pekka Enberg8eae9852008-05-09 20:32:44 +020082
Thomas Garnierc7ce4f602016-05-19 17:10:37 -070083#ifdef CONFIG_SLAB_FREELIST_RANDOM
Thomas Garnier7c00fce2016-07-26 15:21:56 -070084 unsigned int *random_seq;
Thomas Garnierc7ce4f602016-05-19 17:10:37 -070085#endif
86
Joonsoo Kimbf0dea22014-10-09 15:26:27 -070087 struct kmem_cache_node *node[MAX_NUMNODES];
Pekka Enberg8eae9852008-05-09 20:32:44 +020088};
89
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -070090static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
Alexander Potapenko80a92012016-07-28 15:49:07 -070091 void *x)
92{
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -070093 void *object = x - (x - page->s_mem) % cache->size;
94 void *last_object = page->s_mem + (cache->num - 1) * cache->size;
95
96 if (unlikely(object > last_object))
97 return last_object;
98 else
99 return object;
100}
101
Christoph Lameter2e892f42006-12-13 00:34:23 -0800102#endif /* _LINUX_SLAB_DEF_H */