blob: 0adae162dc8f134201a7be92cab43796f97aef49 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Christoph Lameter81819f02007-05-06 14:49:36 -07002#ifndef _LINUX_SLUB_DEF_H
3#define _LINUX_SLUB_DEF_H
4
5/*
6 * SLUB : A Slab allocator without object queues.
7 *
Christoph Lametercde53532008-07-04 09:59:22 -07008 * (C) 2007 SGI, Christoph Lameter
Christoph Lameter81819f02007-05-06 14:49:36 -07009 */
Christoph Lameter81819f02007-05-06 14:49:36 -070010#include <linux/kobject.h>
11
Christoph Lameter8ff12cf2008-02-07 17:47:41 -080012enum stat_item {
13 ALLOC_FASTPATH, /* Allocation from cpu slab */
14 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
Zhi Yong Wua941f832013-11-08 20:47:36 +080015 FREE_FASTPATH, /* Free to cpu slab */
Christoph Lameter8ff12cf2008-02-07 17:47:41 -080016 FREE_SLOWPATH, /* Freeing not to cpu slab */
17 FREE_FROZEN, /* Freeing to frozen slab */
18 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
19 FREE_REMOVE_PARTIAL, /* Freeing removes last object */
Alex Shi8028dce2012-02-03 23:34:56 +080020 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
Christoph Lameter8ff12cf2008-02-07 17:47:41 -080021 ALLOC_SLAB, /* Cpu slab acquired from page allocator */
22 ALLOC_REFILL, /* Refill cpu slab from slab freelist */
Christoph Lametere36a2652011-06-01 12:25:57 -050023 ALLOC_NODE_MISMATCH, /* Switching cpu slab */
Christoph Lameter8ff12cf2008-02-07 17:47:41 -080024 FREE_SLAB, /* Slab freed to the page allocator */
25 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
26 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
27 DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
28 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
29 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
30 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
Christoph Lameter03e404a2011-06-01 12:25:58 -050031 DEACTIVATE_BYPASS, /* Implicit deactivation */
Christoph Lameter65c33762008-04-14 19:11:40 +030032 ORDER_FALLBACK, /* Number of times fallback was necessary */
Christoph Lameter4fdccdf2011-03-22 13:35:00 -050033 CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
Christoph Lameterb789ef52011-06-01 12:25:49 -050034 CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
Christoph Lameter49e22582011-08-09 16:12:27 -050035 CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
Alex Shi8028dce2012-02-03 23:34:56 +080036 CPU_PARTIAL_FREE, /* Refill cpu partial on free */
37 CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
38 CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
Christoph Lameter8ff12cf2008-02-07 17:47:41 -080039 NR_SLUB_STAT_ITEMS };
40
Christoph Lameterdfb4f092007-10-16 01:26:05 -070041struct kmem_cache_cpu {
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -060042 void **freelist; /* Pointer to next available object */
Christoph Lameter8a5ec0b2011-02-25 11:38:54 -060043 unsigned long tid; /* Globally unique transaction id */
Christoph Lameterda89b792008-01-07 23:20:31 -080044 struct page *page; /* The slab from which we are allocating */
Wei Yanga93cf072017-07-06 15:36:31 -070045#ifdef CONFIG_SLUB_CPU_PARTIAL
Christoph Lameter49e22582011-08-09 16:12:27 -050046 struct page *partial; /* Partially allocated frozen slabs */
Wei Yanga93cf072017-07-06 15:36:31 -070047#endif
Christoph Lameter8ff12cf2008-02-07 17:47:41 -080048#ifdef CONFIG_SLUB_STATS
49 unsigned stat[NR_SLUB_STAT_ITEMS];
50#endif
Christoph Lameter4c93c3552007-10-16 01:26:08 -070051};
Christoph Lameterdfb4f092007-10-16 01:26:05 -070052
Wei Yanga93cf072017-07-06 15:36:31 -070053#ifdef CONFIG_SLUB_CPU_PARTIAL
54#define slub_percpu_partial(c) ((c)->partial)
55
56#define slub_set_percpu_partial(c, p) \
57({ \
58 slub_percpu_partial(c) = (p)->next; \
59})
60
61#define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
62#else
63#define slub_percpu_partial(c) NULL
64
65#define slub_set_percpu_partial(c, p)
66
67#define slub_percpu_partial_read_once(c) NULL
68#endif // CONFIG_SLUB_CPU_PARTIAL
69
Christoph Lameter81819f02007-05-06 14:49:36 -070070/*
Christoph Lameter834f3d12008-04-14 19:11:31 +030071 * Word size structure that can be atomically updated or read and that
72 * contains both the order and the number of objects that a slab of the
73 * given order would contain.
74 */
75struct kmem_cache_order_objects {
76 unsigned long x;
77};
78
79/*
Christoph Lameter81819f02007-05-06 14:49:36 -070080 * Slab cache management.
81 */
82struct kmem_cache {
Namhyung Kim1b5ad242010-08-07 14:29:22 +020083 struct kmem_cache_cpu __percpu *cpu_slab;
Christoph Lameter81819f02007-05-06 14:49:36 -070084 /* Used for retriving partial slabs etc */
Alexey Dobriyand50112e2017-11-15 17:32:18 -080085 slab_flags_t flags;
Christoph Lameter1a757fe2011-02-25 11:38:51 -060086 unsigned long min_partial;
Christoph Lameter81819f02007-05-06 14:49:36 -070087 int size; /* The size of an object including meta data */
Christoph Lameter3b0efdf2012-06-13 10:24:57 -050088 int object_size; /* The size of an object without meta data */
Christoph Lameter81819f02007-05-06 14:49:36 -070089 int offset; /* Free pointer offset. */
Wei Yange6d0e1d2017-07-06 15:36:34 -070090#ifdef CONFIG_SLUB_CPU_PARTIAL
Alex Shi9f264902011-09-01 11:32:18 +080091 int cpu_partial; /* Number of per cpu partial objects to keep around */
Wei Yange6d0e1d2017-07-06 15:36:34 -070092#endif
Christoph Lameter834f3d12008-04-14 19:11:31 +030093 struct kmem_cache_order_objects oo;
Christoph Lameter81819f02007-05-06 14:49:36 -070094
Christoph Lameter81819f02007-05-06 14:49:36 -070095 /* Allocation and freeing of slabs */
Christoph Lameter205ab992008-04-14 19:11:40 +030096 struct kmem_cache_order_objects max;
Christoph Lameter65c33762008-04-14 19:11:40 +030097 struct kmem_cache_order_objects min;
Christoph Lameterb7a49f02008-02-14 14:21:32 -080098 gfp_t allocflags; /* gfp flags to use on each alloc */
Christoph Lameter81819f02007-05-06 14:49:36 -070099 int refcount; /* Refcount for slab cache destroy */
Alexey Dobriyan51cc5062008-07-25 19:45:34 -0700100 void (*ctor)(void *);
Christoph Lameter81819f02007-05-06 14:49:36 -0700101 int inuse; /* Offset to metadata */
102 int align; /* Alignment */
Lai Jiangshanab9a0f12011-03-10 15:21:48 +0800103 int reserved; /* Reserved bytes at the end of slabs */
Wei Yangd3111e62017-07-06 15:36:28 -0700104 int red_left_pad; /* Left redzone padding size */
Christoph Lameter81819f02007-05-06 14:49:36 -0700105 const char *name; /* Name (only for display!) */
106 struct list_head list; /* List of slab caches */
Christoph Lameterab4d5ed2010-10-05 13:57:26 -0500107#ifdef CONFIG_SYSFS
Christoph Lameter81819f02007-05-06 14:49:36 -0700108 struct kobject kobj; /* For sysfs */
Tejun Heo3b7b3142017-06-23 15:08:52 -0700109 struct work_struct kobj_remove_work;
Christoph Lameter0c710012007-07-17 04:03:24 -0700110#endif
Johannes Weiner127424c2016-01-20 15:02:32 -0800111#ifdef CONFIG_MEMCG
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800112 struct memcg_cache_params memcg_params;
Glauber Costa107dab52012-12-18 14:23:05 -0800113 int max_attr_size; /* for propagation, maximum size of a stored attr */
Vladimir Davydov9a417072014-04-07 15:39:31 -0700114#ifdef CONFIG_SYSFS
115 struct kset *memcg_kset;
116#endif
Glauber Costaba6c4962012-12-18 14:22:27 -0800117#endif
Christoph Lameter81819f02007-05-06 14:49:36 -0700118
Kees Cook2482ddec2017-09-06 16:19:18 -0700119#ifdef CONFIG_SLAB_FREELIST_HARDENED
120 unsigned long random;
121#endif
122
Christoph Lameter81819f02007-05-06 14:49:36 -0700123#ifdef CONFIG_NUMA
Christoph Lameter98246012008-01-07 23:20:26 -0800124 /*
125 * Defragmentation by allocating from a remote node.
126 */
127 int remote_node_defrag_ratio;
Christoph Lameter81819f02007-05-06 14:49:36 -0700128#endif
Thomas Garnier210e7a42016-07-26 15:21:59 -0700129
130#ifdef CONFIG_SLAB_FREELIST_RANDOM
131 unsigned int *random_seq;
132#endif
133
Alexander Potapenko80a92012016-07-28 15:49:07 -0700134#ifdef CONFIG_KASAN
135 struct kasan_cache kasan_info;
136#endif
137
Christoph Lameter7340cc82010-09-28 08:10:26 -0500138 struct kmem_cache_node *node[MAX_NUMNODES];
Christoph Lameter81819f02007-05-06 14:49:36 -0700139};
140
Wei Yange6d0e1d2017-07-06 15:36:34 -0700141#ifdef CONFIG_SLUB_CPU_PARTIAL
142#define slub_cpu_partial(s) ((s)->cpu_partial)
143#define slub_set_cpu_partial(s, n) \
144({ \
145 slub_cpu_partial(s) = (n); \
146})
147#else
148#define slub_cpu_partial(s) (0)
149#define slub_set_cpu_partial(s, n)
150#endif // CONFIG_SLUB_CPU_PARTIAL
151
Christoph Lameter41a21282014-05-06 12:50:08 -0700152#ifdef CONFIG_SYSFS
153#define SLAB_SUPPORTS_SYSFS
Tejun Heobf5eb3d2017-02-22 15:41:11 -0800154void sysfs_slab_release(struct kmem_cache *);
Christoph Lameter41a21282014-05-06 12:50:08 -0700155#else
Tejun Heobf5eb3d2017-02-22 15:41:11 -0800156static inline void sysfs_slab_release(struct kmem_cache *s)
Christoph Lameter41a21282014-05-06 12:50:08 -0700157{
158}
159#endif
160
Andrey Ryabinin75c66de2015-02-13 14:39:35 -0800161void object_err(struct kmem_cache *s, struct page *page,
162 u8 *object, char *reason);
163
Alexander Potapenkoc146a2b2016-07-28 15:49:04 -0700164void *fixup_red_left(struct kmem_cache *s, void *p);
165
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700166static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
167 void *x) {
168 void *object = x - (x - page_address(page)) % cache->size;
169 void *last_object = page_address(page) +
170 (page->objects - 1) * cache->size;
Alexander Potapenkoc146a2b2016-07-28 15:49:04 -0700171 void *result = (unlikely(object > last_object)) ? last_object : object;
172
173 result = fixup_red_left(cache, result);
174 return result;
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700175}
176
Christoph Lameter81819f02007-05-06 14:49:36 -0700177#endif /* _LINUX_SLUB_DEF_H */