blob: 17e2848979c53a369ad9d7d766d9872173c1787f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/slab.c
3 * Written by Mark Hemment, 1996/97.
4 * (markhe@nextd.demon.co.uk)
5 *
6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
7 *
8 * Major cleanup, different bufctl logic, per-cpu arrays
9 * (c) 2000 Manfred Spraul
10 *
11 * Cleanup, make the head arrays unconditional, preparation for NUMA
12 * (c) 2002 Manfred Spraul
13 *
14 * An implementation of the Slab Allocator as described in outline in;
15 * UNIX Internals: The New Frontiers by Uresh Vahalia
16 * Pub: Prentice Hall ISBN 0-13-101908-2
17 * or with a little more detail in;
18 * The Slab Allocator: An Object-Caching Kernel Memory Allocator
19 * Jeff Bonwick (Sun Microsystems).
20 * Presented at: USENIX Summer 1994 Technical Conference
21 *
22 * The memory is organized in caches, one cache for each object type.
23 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
24 * Each cache consists out of many slabs (they are small (usually one
25 * page long) and always contiguous), and each slab contains multiple
26 * initialized objects.
27 *
28 * This means, that your constructor is used only for newly allocated
Simon Arlott183ff222007-10-20 01:27:18 +020029 * slabs and you must pass objects with the same initializations to
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 * kmem_cache_free.
31 *
32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
33 * normal). If you need a special memory type, then must create a new
34 * cache for that memory type.
35 *
36 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
37 * full slabs with 0 free objects
38 * partial slabs
39 * empty slabs with no allocated objects
40 *
41 * If partial slabs exist, then new allocations come from these slabs,
42 * otherwise from empty slabs or new slabs are allocated.
43 *
44 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
45 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
46 *
47 * Each cache has a short per-cpu head array, most allocs
48 * and frees go into that array, and if that array overflows, then 1/2
49 * of the entries in the array are given back into the global cache.
50 * The head array is strictly LIFO and should improve the cache hit rates.
51 * On SMP, it additionally reduces the spinlock operations.
52 *
Andrew Mortona737b3e2006-03-22 00:08:11 -080053 * The c_cpuarray may not be read with enabled local interrupts -
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 * it's changed with a smp_call_function().
55 *
56 * SMP synchronization:
57 * constructors and destructors are called without any locking.
Pekka Enberg343e0d72006-02-01 03:05:50 -080058 * Several members in struct kmem_cache and struct slab never change, they
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 * are accessed without any locking.
60 * The per-cpu arrays are never accessed from the wrong cpu, no locking,
61 * and local interrupts are disabled so slab code is preempt-safe.
62 * The non-constant members are protected with a per-cache irq spinlock.
63 *
64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
65 * in 2000 - many ideas in the current implementation are derived from
66 * his patch.
67 *
68 * Further notes from the original documentation:
69 *
70 * 11 April '97. Started multi-threading - markhe
Christoph Lameter18004c52012-07-06 15:25:12 -050071 * The global cache-chain is protected by the mutex 'slab_mutex'.
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 * The sem is only needed when accessing/extending the cache-chain, which
73 * can never happen inside an interrupt (kmem_cache_create(),
74 * kmem_cache_shrink() and kmem_cache_reap()).
75 *
76 * At present, each engine can be growing a cache. This should be blocked.
77 *
Christoph Lametere498be72005-09-09 13:03:32 -070078 * 15 March 2005. NUMA slab allocator.
79 * Shai Fultheim <shai@scalex86.org>.
80 * Shobhit Dayal <shobhit@calsoftinc.com>
81 * Alok N Kataria <alokk@calsoftinc.com>
82 * Christoph Lameter <christoph@lameter.com>
83 *
84 * Modified the slab allocator to be node aware on NUMA systems.
85 * Each node has its own list of partial, free and full slabs.
86 * All object allocations for a node occur from node specific slab lists.
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 */
88
Linus Torvalds1da177e2005-04-16 15:20:36 -070089#include <linux/slab.h>
90#include <linux/mm.h>
Randy Dunlapc9cf5522006-06-27 02:53:52 -070091#include <linux/poison.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070092#include <linux/swap.h>
93#include <linux/cache.h>
94#include <linux/interrupt.h>
95#include <linux/init.h>
96#include <linux/compiler.h>
Paul Jackson101a5002006-03-24 03:16:07 -080097#include <linux/cpuset.h>
Alexey Dobriyana0ec95a2008-10-06 00:59:10 +040098#include <linux/proc_fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070099#include <linux/seq_file.h>
100#include <linux/notifier.h>
101#include <linux/kallsyms.h>
102#include <linux/cpu.h>
103#include <linux/sysctl.h>
104#include <linux/module.h>
105#include <linux/rcupdate.h>
Paulo Marques543537b2005-06-23 00:09:02 -0700106#include <linux/string.h>
Andrew Morton138ae662006-12-06 20:36:41 -0800107#include <linux/uaccess.h>
Christoph Lametere498be72005-09-09 13:03:32 -0700108#include <linux/nodemask.h>
Catalin Marinasd5cff632009-06-11 13:22:40 +0100109#include <linux/kmemleak.h>
Christoph Lameterdc85da12006-01-18 17:42:36 -0800110#include <linux/mempolicy.h>
Ingo Molnarfc0abb12006-01-18 17:42:33 -0800111#include <linux/mutex.h>
Akinobu Mita8a8b6502006-12-08 02:39:44 -0800112#include <linux/fault-inject.h>
Ingo Molnare7eebaf2006-06-27 02:54:55 -0700113#include <linux/rtmutex.h>
Eric Dumazet6a2d7a92006-12-13 00:34:27 -0800114#include <linux/reciprocal_div.h>
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -0700115#include <linux/debugobjects.h>
Pekka Enbergc175eea2008-05-09 20:35:53 +0200116#include <linux/kmemcheck.h>
David Rientjes8f9f8d92010-03-27 19:40:47 -0700117#include <linux/memory.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -0700118#include <linux/prefetch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
Mel Gorman381760e2012-07-31 16:44:30 -0700120#include <net/sock.h>
121
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122#include <asm/cacheflush.h>
123#include <asm/tlbflush.h>
124#include <asm/page.h>
125
Steven Rostedt4dee6b62012-01-09 17:15:42 -0500126#include <trace/events/kmem.h>
127
Mel Gorman072bb0a2012-07-31 16:43:58 -0700128#include "internal.h"
129
Glauber Costab9ce5ef2012-12-18 14:22:46 -0800130#include "slab.h"
131
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132/*
Christoph Lameter50953fe2007-05-06 14:50:16 -0700133 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 * 0 for faster, smaller code (especially in the critical paths).
135 *
136 * STATS - 1 to collect stats for /proc/slabinfo.
137 * 0 for faster, smaller code (especially in the critical paths).
138 *
139 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
140 */
141
142#ifdef CONFIG_DEBUG_SLAB
143#define DEBUG 1
144#define STATS 1
145#define FORCED_DEBUG 1
146#else
147#define DEBUG 0
148#define STATS 0
149#define FORCED_DEBUG 0
150#endif
151
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152/* Shouldn't this be in a header file somewhere? */
153#define BYTES_PER_WORD sizeof(void *)
David Woodhouse87a927c2007-07-04 21:26:44 -0400154#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156#ifndef ARCH_KMALLOC_FLAGS
157#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
158#endif
159
Joonsoo Kimf315e3f2013-12-02 17:49:41 +0900160#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
161 <= SLAB_OBJ_MIN_SIZE) ? 1 : 0)
162
163#if FREELIST_BYTE_INDEX
164typedef unsigned char freelist_idx_t;
165#else
166typedef unsigned short freelist_idx_t;
167#endif
168
David Miller30321c72014-05-05 16:20:04 -0400169#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
Joonsoo Kimf315e3f2013-12-02 17:49:41 +0900170
Mel Gorman072bb0a2012-07-31 16:43:58 -0700171/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 * struct array_cache
173 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 * Purpose:
175 * - LIFO ordering, to hand out cache-warm objects from _alloc
176 * - reduce the number of linked list operations
177 * - reduce spinlock operations
178 *
179 * The limit is stored in the per-cpu structure to reduce the data cache
180 * footprint.
181 *
182 */
183struct array_cache {
184 unsigned int avail;
185 unsigned int limit;
186 unsigned int batchcount;
187 unsigned int touched;
Robert P. J. Daybda5b652007-10-16 23:30:05 -0700188 void *entry[]; /*
Andrew Mortona737b3e2006-03-22 00:08:11 -0800189 * Must have this definition in here for the proper
190 * alignment of array_cache. Also simplifies accessing
191 * the entries.
Andrew Mortona737b3e2006-03-22 00:08:11 -0800192 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193};
194
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700195struct alien_cache {
196 spinlock_t lock;
197 struct array_cache ac;
198};
199
Andrew Mortona737b3e2006-03-22 00:08:11 -0800200/*
Christoph Lametere498be72005-09-09 13:03:32 -0700201 * Need this for bootstrapping a per node allocator.
202 */
Joonsoo Kimbf0dea22014-10-09 15:26:27 -0700203#define NUM_INIT_LISTS (2 * MAX_NUMNODES)
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000204static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
Christoph Lametere498be72005-09-09 13:03:32 -0700205#define CACHE_CACHE 0
Joonsoo Kimbf0dea22014-10-09 15:26:27 -0700206#define SIZE_NODE (MAX_NUMNODES)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
Christoph Lametered11d9e2006-06-30 01:55:45 -0700208static int drain_freelist(struct kmem_cache *cache,
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000209 struct kmem_cache_node *n, int tofree);
Christoph Lametered11d9e2006-06-30 01:55:45 -0700210static void free_block(struct kmem_cache *cachep, void **objpp, int len,
Joonsoo Kim97654df2014-08-06 16:04:25 -0700211 int node, struct list_head *list);
212static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
Pekka Enberg83b519e2009-06-10 19:40:04 +0300213static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
David Howells65f27f32006-11-22 14:55:48 +0000214static void cache_reap(struct work_struct *unused);
Christoph Lametered11d9e2006-06-30 01:55:45 -0700215
Ingo Molnare0a42722006-06-23 02:03:46 -0700216static int slab_early_init = 1;
217
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000218#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
Christoph Lametere498be72005-09-09 13:03:32 -0700219
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000220static void kmem_cache_node_init(struct kmem_cache_node *parent)
Christoph Lametere498be72005-09-09 13:03:32 -0700221{
222 INIT_LIST_HEAD(&parent->slabs_full);
223 INIT_LIST_HEAD(&parent->slabs_partial);
224 INIT_LIST_HEAD(&parent->slabs_free);
225 parent->shared = NULL;
226 parent->alien = NULL;
Ravikiran G Thirumalai2e1217c2006-02-04 23:27:56 -0800227 parent->colour_next = 0;
Christoph Lametere498be72005-09-09 13:03:32 -0700228 spin_lock_init(&parent->list_lock);
229 parent->free_objects = 0;
230 parent->free_touched = 0;
231}
232
Andrew Mortona737b3e2006-03-22 00:08:11 -0800233#define MAKE_LIST(cachep, listp, slab, nodeid) \
234 do { \
235 INIT_LIST_HEAD(listp); \
Christoph Lameter18bf8542014-08-06 16:04:11 -0700236 list_splice(&get_node(cachep, nodeid)->slab, listp); \
Christoph Lametere498be72005-09-09 13:03:32 -0700237 } while (0)
238
Andrew Mortona737b3e2006-03-22 00:08:11 -0800239#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
240 do { \
Christoph Lametere498be72005-09-09 13:03:32 -0700241 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
242 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
243 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
244 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245
Joonsoo Kimb03a017b2016-03-15 14:54:50 -0700246#define CFLGS_OBJFREELIST_SLAB (0x40000000UL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247#define CFLGS_OFF_SLAB (0x80000000UL)
Joonsoo Kimb03a017b2016-03-15 14:54:50 -0700248#define OBJFREELIST_SLAB(x) ((x)->flags & CFLGS_OBJFREELIST_SLAB)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
250
251#define BATCHREFILL_LIMIT 16
Andrew Mortona737b3e2006-03-22 00:08:11 -0800252/*
253 * Optimization question: fewer reaps means less probability for unnessary
254 * cpucache drain/refill cycles.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 *
Adrian Bunkdc6f3f22005-11-08 16:44:08 +0100256 * OTOH the cpuarrays can contain lots of objects,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 * which could lock up otherwise freeable slabs.
258 */
Jianyu Zhan5f0985b2014-03-30 17:02:20 +0800259#define REAPTIMEOUT_AC (2*HZ)
260#define REAPTIMEOUT_NODE (4*HZ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261
262#if STATS
263#define STATS_INC_ACTIVE(x) ((x)->num_active++)
264#define STATS_DEC_ACTIVE(x) ((x)->num_active--)
265#define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
266#define STATS_INC_GROWN(x) ((x)->grown++)
Christoph Lametered11d9e2006-06-30 01:55:45 -0700267#define STATS_ADD_REAPED(x,y) ((x)->reaped += (y))
Andrew Mortona737b3e2006-03-22 00:08:11 -0800268#define STATS_SET_HIGH(x) \
269 do { \
270 if ((x)->num_active > (x)->high_mark) \
271 (x)->high_mark = (x)->num_active; \
272 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273#define STATS_INC_ERR(x) ((x)->errors++)
274#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
Christoph Lametere498be72005-09-09 13:03:32 -0700275#define STATS_INC_NODEFREES(x) ((x)->node_frees++)
Ravikiran G Thirumalaifb7faf32006-04-10 22:52:54 -0700276#define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++)
Andrew Mortona737b3e2006-03-22 00:08:11 -0800277#define STATS_SET_FREEABLE(x, i) \
278 do { \
279 if ((x)->max_freeable < i) \
280 (x)->max_freeable = i; \
281 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
283#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
284#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
285#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
286#else
287#define STATS_INC_ACTIVE(x) do { } while (0)
288#define STATS_DEC_ACTIVE(x) do { } while (0)
289#define STATS_INC_ALLOCED(x) do { } while (0)
290#define STATS_INC_GROWN(x) do { } while (0)
Andi Kleen4e60c862010-08-09 17:19:03 -0700291#define STATS_ADD_REAPED(x,y) do { (void)(y); } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292#define STATS_SET_HIGH(x) do { } while (0)
293#define STATS_INC_ERR(x) do { } while (0)
294#define STATS_INC_NODEALLOCS(x) do { } while (0)
Christoph Lametere498be72005-09-09 13:03:32 -0700295#define STATS_INC_NODEFREES(x) do { } while (0)
Ravikiran G Thirumalaifb7faf32006-04-10 22:52:54 -0700296#define STATS_INC_ACOVERFLOW(x) do { } while (0)
Andrew Mortona737b3e2006-03-22 00:08:11 -0800297#define STATS_SET_FREEABLE(x, i) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298#define STATS_INC_ALLOCHIT(x) do { } while (0)
299#define STATS_INC_ALLOCMISS(x) do { } while (0)
300#define STATS_INC_FREEHIT(x) do { } while (0)
301#define STATS_INC_FREEMISS(x) do { } while (0)
302#endif
303
304#if DEBUG
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
Andrew Mortona737b3e2006-03-22 00:08:11 -0800306/*
307 * memory layout of objects:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 * 0 : objp
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800309 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 * the end of an object is aligned with the end of the real
311 * allocation. Catches writes behind the end of the allocation.
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800312 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 * redzone word.
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800314 * cachep->obj_offset: The real object.
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500315 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
316 * cachep->size - 1* BYTES_PER_WORD: last caller address
Andrew Mortona737b3e2006-03-22 00:08:11 -0800317 * [BYTES_PER_WORD long]
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 */
Pekka Enberg343e0d72006-02-01 03:05:50 -0800319static int obj_offset(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320{
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800321 return cachep->obj_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322}
323
David Woodhouseb46b8f12007-05-08 00:22:59 -0700324static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325{
326 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
David Woodhouseb46b8f12007-05-08 00:22:59 -0700327 return (unsigned long long*) (objp + obj_offset(cachep) -
328 sizeof(unsigned long long));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329}
330
David Woodhouseb46b8f12007-05-08 00:22:59 -0700331static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332{
333 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
334 if (cachep->flags & SLAB_STORE_USER)
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500335 return (unsigned long long *)(objp + cachep->size -
David Woodhouseb46b8f12007-05-08 00:22:59 -0700336 sizeof(unsigned long long) -
David Woodhouse87a927c2007-07-04 21:26:44 -0400337 REDZONE_ALIGN);
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500338 return (unsigned long long *) (objp + cachep->size -
David Woodhouseb46b8f12007-05-08 00:22:59 -0700339 sizeof(unsigned long long));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340}
341
Pekka Enberg343e0d72006-02-01 03:05:50 -0800342static void **dbg_userword(struct kmem_cache *cachep, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343{
344 BUG_ON(!(cachep->flags & SLAB_STORE_USER));
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500345 return (void **)(objp + cachep->size - BYTES_PER_WORD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346}
347
348#else
349
Manfred Spraul3dafccf2006-02-01 03:05:42 -0800350#define obj_offset(x) 0
David Woodhouseb46b8f12007-05-08 00:22:59 -0700351#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
352#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
354
355#endif
356
Joonsoo Kim03787302014-06-23 13:22:06 -0700357#ifdef CONFIG_DEBUG_SLAB_LEAK
358
Joonsoo Kimd31676d2016-03-15 14:54:24 -0700359static inline bool is_store_user_clean(struct kmem_cache *cachep)
Joonsoo Kim03787302014-06-23 13:22:06 -0700360{
Joonsoo Kimd31676d2016-03-15 14:54:24 -0700361 return atomic_read(&cachep->store_user_clean) == 1;
362}
Joonsoo Kim03787302014-06-23 13:22:06 -0700363
Joonsoo Kimd31676d2016-03-15 14:54:24 -0700364static inline void set_store_user_clean(struct kmem_cache *cachep)
365{
366 atomic_set(&cachep->store_user_clean, 1);
367}
Joonsoo Kim03787302014-06-23 13:22:06 -0700368
Joonsoo Kimd31676d2016-03-15 14:54:24 -0700369static inline void set_store_user_dirty(struct kmem_cache *cachep)
370{
371 if (is_store_user_clean(cachep))
372 atomic_set(&cachep->store_user_clean, 0);
Joonsoo Kim03787302014-06-23 13:22:06 -0700373}
374
375#else
Joonsoo Kimd31676d2016-03-15 14:54:24 -0700376static inline void set_store_user_dirty(struct kmem_cache *cachep) {}
Joonsoo Kim03787302014-06-23 13:22:06 -0700377
378#endif
379
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380/*
David Rientjes3df1ccc2011-10-18 22:09:28 -0700381 * Do not go above this order unless 0 objects fit into the slab or
382 * overridden on the command line.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 */
David Rientjes543585c2011-10-18 22:09:24 -0700384#define SLAB_MAX_ORDER_HI 1
385#define SLAB_MAX_ORDER_LO 0
386static int slab_max_order = SLAB_MAX_ORDER_LO;
David Rientjes3df1ccc2011-10-18 22:09:28 -0700387static bool slab_max_order_set __initdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388
Pekka Enberg6ed5eb2212006-02-01 03:05:49 -0800389static inline struct kmem_cache *virt_to_cache(const void *obj)
390{
Christoph Lameterb49af682007-05-06 14:49:41 -0700391 struct page *page = virt_to_head_page(obj);
Christoph Lameter35026082012-06-13 10:24:56 -0500392 return page->slab_cache;
Pekka Enberg6ed5eb2212006-02-01 03:05:49 -0800393}
394
Joonsoo Kim8456a642013-10-24 10:07:49 +0900395static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
Pekka Enberg8fea4e92006-03-22 00:08:10 -0800396 unsigned int idx)
397{
Joonsoo Kim8456a642013-10-24 10:07:49 +0900398 return page->s_mem + cache->size * idx;
Pekka Enberg8fea4e92006-03-22 00:08:10 -0800399}
400
Eric Dumazet6a2d7a92006-12-13 00:34:27 -0800401/*
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500402 * We want to avoid an expensive divide : (offset / cache->size)
403 * Using the fact that size is a constant for a particular cache,
404 * we can replace (offset / cache->size) by
Eric Dumazet6a2d7a92006-12-13 00:34:27 -0800405 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
406 */
407static inline unsigned int obj_to_index(const struct kmem_cache *cache,
Joonsoo Kim8456a642013-10-24 10:07:49 +0900408 const struct page *page, void *obj)
Pekka Enberg8fea4e92006-03-22 00:08:10 -0800409{
Joonsoo Kim8456a642013-10-24 10:07:49 +0900410 u32 offset = (obj - page->s_mem);
Eric Dumazet6a2d7a92006-12-13 00:34:27 -0800411 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
Pekka Enberg8fea4e92006-03-22 00:08:10 -0800412}
413
Joonsoo Kim6fb92432016-03-15 14:54:09 -0700414#define BOOT_CPUCACHE_ENTRIES 1
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415/* internal cache of cache description objs */
Christoph Lameter9b030cb2012-09-05 00:20:33 +0000416static struct kmem_cache kmem_cache_boot = {
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800417 .batchcount = 1,
418 .limit = BOOT_CPUCACHE_ENTRIES,
419 .shared = 1,
Christoph Lameter3b0efdf2012-06-13 10:24:57 -0500420 .size = sizeof(struct kmem_cache),
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800421 .name = "kmem_cache",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422};
423
Joonsoo Kimedcad252014-08-08 14:19:15 -0700424#define BAD_ALIEN_MAGIC 0x01020304ul
425
Tejun Heo1871e522009-10-29 22:34:13 +0900426static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427
Pekka Enberg343e0d72006-02-01 03:05:50 -0800428static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429{
Joonsoo Kimbf0dea22014-10-09 15:26:27 -0700430 return this_cpu_ptr(cachep->cpu_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431}
432
Andrew Mortona737b3e2006-03-22 00:08:11 -0800433/*
434 * Calculate the number of objects and left-over bytes for a given buffer size.
435 */
Joonsoo Kim70f75062016-03-15 14:54:53 -0700436static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
437 unsigned long flags, size_t *left_over)
Steven Rostedtfbaccac2006-02-01 03:05:45 -0800438{
Joonsoo Kim70f75062016-03-15 14:54:53 -0700439 unsigned int num;
Steven Rostedtfbaccac2006-02-01 03:05:45 -0800440 size_t slab_size = PAGE_SIZE << gfporder;
441
442 /*
443 * The slab management structure can be either off the slab or
444 * on it. For the latter case, the memory allocated for a
445 * slab is used for:
446 *
Steven Rostedtfbaccac2006-02-01 03:05:45 -0800447 * - @buffer_size bytes for each object
Joonsoo Kim2e6b3602016-03-15 14:54:30 -0700448 * - One freelist_idx_t for each object
449 *
450 * We don't need to consider alignment of freelist because
451 * freelist will be at the end of slab page. The objects will be
452 * at the correct alignment.
Steven Rostedtfbaccac2006-02-01 03:05:45 -0800453 *
454 * If the slab management structure is off the slab, then the
455 * alignment will already be calculated into the size. Because
456 * the slabs are all pages aligned, the objects will be at the
457 * correct alignment when allocated.
458 */
Joonsoo Kimb03a017b2016-03-15 14:54:50 -0700459 if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
Joonsoo Kim70f75062016-03-15 14:54:53 -0700460 num = slab_size / buffer_size;
Joonsoo Kim2e6b3602016-03-15 14:54:30 -0700461 *left_over = slab_size % buffer_size;
Steven Rostedtfbaccac2006-02-01 03:05:45 -0800462 } else {
Joonsoo Kim70f75062016-03-15 14:54:53 -0700463 num = slab_size / (buffer_size + sizeof(freelist_idx_t));
Joonsoo Kim2e6b3602016-03-15 14:54:30 -0700464 *left_over = slab_size %
465 (buffer_size + sizeof(freelist_idx_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 }
Joonsoo Kim70f75062016-03-15 14:54:53 -0700467
468 return num;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469}
470
Christoph Lameterf28510d2012-09-11 19:49:38 +0000471#if DEBUG
Harvey Harrisond40cee22008-04-30 00:55:07 -0700472#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473
Andrew Mortona737b3e2006-03-22 00:08:11 -0800474static void __slab_error(const char *function, struct kmem_cache *cachep,
475 char *msg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476{
Joe Perches11705322016-03-17 14:19:50 -0700477 pr_err("slab error in %s(): cache `%s': %s\n",
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800478 function, cachep->name, msg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 dump_stack();
Rusty Russell373d4d02013-01-21 17:17:39 +1030480 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481}
Christoph Lameterf28510d2012-09-11 19:49:38 +0000482#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483
Paul Menage3395ee02006-12-06 20:32:16 -0800484/*
485 * By default on NUMA we use alien caches to stage the freeing of
486 * objects allocated from other nodes. This causes massive memory
487 * inefficiencies when using fake NUMA setup to split memory into a
488 * large number of small nodes, so it can be disabled on the command
489 * line
490 */
491
492static int use_alien_caches __read_mostly = 1;
493static int __init noaliencache_setup(char *s)
494{
495 use_alien_caches = 0;
496 return 1;
497}
498__setup("noaliencache", noaliencache_setup);
499
David Rientjes3df1ccc2011-10-18 22:09:28 -0700500static int __init slab_max_order_setup(char *str)
501{
502 get_option(&str, &slab_max_order);
503 slab_max_order = slab_max_order < 0 ? 0 :
504 min(slab_max_order, MAX_ORDER - 1);
505 slab_max_order_set = true;
506
507 return 1;
508}
509__setup("slab_max_order=", slab_max_order_setup);
510
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800511#ifdef CONFIG_NUMA
512/*
513 * Special reaping functions for NUMA systems called from cache_reap().
514 * These take care of doing round robin flushing of alien caches (containing
515 * objects freed on different nodes from which they were allocated) and the
516 * flushing of remote pcps by calling drain_node_pages.
517 */
Tejun Heo1871e522009-10-29 22:34:13 +0900518static DEFINE_PER_CPU(unsigned long, slab_reap_node);
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800519
520static void init_reap_node(int cpu)
521{
522 int node;
523
Lee Schermerhorn7d6e6d02010-05-26 14:45:03 -0700524 node = next_node(cpu_to_mem(cpu), node_online_map);
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800525 if (node == MAX_NUMNODES)
Paul Jackson442295c2006-03-22 00:09:11 -0800526 node = first_node(node_online_map);
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800527
Tejun Heo1871e522009-10-29 22:34:13 +0900528 per_cpu(slab_reap_node, cpu) = node;
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800529}
530
531static void next_reap_node(void)
532{
Christoph Lameter909ea962010-12-08 16:22:55 +0100533 int node = __this_cpu_read(slab_reap_node);
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800534
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800535 node = next_node(node, node_online_map);
536 if (unlikely(node >= MAX_NUMNODES))
537 node = first_node(node_online_map);
Christoph Lameter909ea962010-12-08 16:22:55 +0100538 __this_cpu_write(slab_reap_node, node);
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800539}
540
541#else
542#define init_reap_node(cpu) do { } while (0)
543#define next_reap_node(void) do { } while (0)
544#endif
545
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546/*
547 * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz
548 * via the workqueue/eventd.
549 * Add the CPU number into the expiration time to minimize the possibility of
550 * the CPUs getting into lockstep and contending for the global cache chain
551 * lock.
552 */
Paul Gortmaker0db06282013-06-19 14:53:51 -0400553static void start_cpu_timer(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554{
Tejun Heo1871e522009-10-29 22:34:13 +0900555 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556
557 /*
558 * When this gets called from do_initcalls via cpucache_init(),
559 * init_workqueues() has already run, so keventd will be setup
560 * at that time.
561 */
David Howells52bad642006-11-22 14:54:01 +0000562 if (keventd_up() && reap_work->work.func == NULL) {
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800563 init_reap_node(cpu);
Tejun Heo203b42f2012-08-21 13:18:23 -0700564 INIT_DEFERRABLE_WORK(reap_work, cache_reap);
Arjan van de Ven2b284212006-12-10 02:21:28 -0800565 schedule_delayed_work_on(cpu, reap_work,
566 __round_jiffies_relative(HZ, cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 }
568}
569
Joonsoo Kim1fe00d52014-08-06 16:04:27 -0700570static void init_arraycache(struct array_cache *ac, int limit, int batch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571{
Catalin Marinasd5cff632009-06-11 13:22:40 +0100572 /*
573 * The array_cache structures contain pointers to free object.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300574 * However, when such objects are allocated or transferred to another
Catalin Marinasd5cff632009-06-11 13:22:40 +0100575 * cache the pointers are not cleared and they could be counted as
576 * valid references during a kmemleak scan. Therefore, kmemleak must
577 * not scan such objects.
578 */
Joonsoo Kim1fe00d52014-08-06 16:04:27 -0700579 kmemleak_no_scan(ac);
580 if (ac) {
581 ac->avail = 0;
582 ac->limit = limit;
583 ac->batchcount = batch;
584 ac->touched = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 }
Joonsoo Kim1fe00d52014-08-06 16:04:27 -0700586}
587
588static struct array_cache *alloc_arraycache(int node, int entries,
589 int batchcount, gfp_t gfp)
590{
Joonsoo Kim5e804782014-08-06 16:04:40 -0700591 size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
Joonsoo Kim1fe00d52014-08-06 16:04:27 -0700592 struct array_cache *ac = NULL;
593
594 ac = kmalloc_node(memsize, gfp, node);
595 init_arraycache(ac, entries, batchcount);
596 return ac;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597}
598
Joonsoo Kimf68f8dd2016-03-15 14:54:56 -0700599static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
600 struct page *page, void *objp)
Mel Gorman072bb0a2012-07-31 16:43:58 -0700601{
Joonsoo Kimf68f8dd2016-03-15 14:54:56 -0700602 struct kmem_cache_node *n;
603 int page_node;
604 LIST_HEAD(list);
Mel Gorman072bb0a2012-07-31 16:43:58 -0700605
Joonsoo Kimf68f8dd2016-03-15 14:54:56 -0700606 page_node = page_to_nid(page);
607 n = get_node(cachep, page_node);
Mel Gorman072bb0a2012-07-31 16:43:58 -0700608
Joonsoo Kimf68f8dd2016-03-15 14:54:56 -0700609 spin_lock(&n->list_lock);
610 free_block(cachep, &objp, 1, page_node, &list);
611 spin_unlock(&n->list_lock);
Mel Gorman072bb0a2012-07-31 16:43:58 -0700612
Joonsoo Kimf68f8dd2016-03-15 14:54:56 -0700613 slabs_destroy(cachep, &list);
Mel Gorman072bb0a2012-07-31 16:43:58 -0700614}
615
Christoph Lameter3ded1752006-03-25 03:06:44 -0800616/*
617 * Transfer objects in one arraycache to another.
618 * Locking must be handled by the caller.
619 *
620 * Return the number of entries transferred.
621 */
622static int transfer_objects(struct array_cache *to,
623 struct array_cache *from, unsigned int max)
624{
625 /* Figure out how many entries to transfer */
Hagen Paul Pfeifer732eacc2010-10-26 14:22:23 -0700626 int nr = min3(from->avail, max, to->limit - to->avail);
Christoph Lameter3ded1752006-03-25 03:06:44 -0800627
628 if (!nr)
629 return 0;
630
631 memcpy(to->entry + to->avail, from->entry + from->avail -nr,
632 sizeof(void *) *nr);
633
634 from->avail -= nr;
635 to->avail += nr;
Christoph Lameter3ded1752006-03-25 03:06:44 -0800636 return nr;
637}
638
Christoph Lameter765c4502006-09-27 01:50:08 -0700639#ifndef CONFIG_NUMA
640
641#define drain_alien_cache(cachep, alien) do { } while (0)
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000642#define reap_alien(cachep, n) do { } while (0)
Christoph Lameter765c4502006-09-27 01:50:08 -0700643
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700644static inline struct alien_cache **alloc_alien_cache(int node,
645 int limit, gfp_t gfp)
Christoph Lameter765c4502006-09-27 01:50:08 -0700646{
Joonsoo Kimedcad252014-08-08 14:19:15 -0700647 return (struct alien_cache **)BAD_ALIEN_MAGIC;
Christoph Lameter765c4502006-09-27 01:50:08 -0700648}
649
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700650static inline void free_alien_cache(struct alien_cache **ac_ptr)
Christoph Lameter765c4502006-09-27 01:50:08 -0700651{
652}
653
654static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
655{
656 return 0;
657}
658
659static inline void *alternate_node_alloc(struct kmem_cache *cachep,
660 gfp_t flags)
661{
662 return NULL;
663}
664
Christoph Hellwig8b98c162006-12-06 20:32:30 -0800665static inline void *____cache_alloc_node(struct kmem_cache *cachep,
Christoph Lameter765c4502006-09-27 01:50:08 -0700666 gfp_t flags, int nodeid)
667{
668 return NULL;
669}
670
David Rientjes4167e9b2015-04-14 15:46:55 -0700671static inline gfp_t gfp_exact_node(gfp_t flags)
672{
Mel Gorman444eb2a42016-03-17 14:19:23 -0700673 return flags & ~__GFP_NOFAIL;
David Rientjes4167e9b2015-04-14 15:46:55 -0700674}
675
Christoph Lameter765c4502006-09-27 01:50:08 -0700676#else /* CONFIG_NUMA */
677
Christoph Hellwig8b98c162006-12-06 20:32:30 -0800678static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
Paul Jacksonc61afb12006-03-24 03:16:08 -0800679static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
Christoph Lameterdc85da12006-01-18 17:42:36 -0800680
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700681static struct alien_cache *__alloc_alien_cache(int node, int entries,
682 int batch, gfp_t gfp)
Christoph Lametere498be72005-09-09 13:03:32 -0700683{
Joonsoo Kim5e804782014-08-06 16:04:40 -0700684 size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700685 struct alien_cache *alc = NULL;
686
687 alc = kmalloc_node(memsize, gfp, node);
688 init_arraycache(&alc->ac, entries, batch);
Joonsoo Kim49dfc302014-08-06 16:04:31 -0700689 spin_lock_init(&alc->lock);
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700690 return alc;
691}
692
693static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
694{
695 struct alien_cache **alc_ptr;
Joonsoo Kim5e804782014-08-06 16:04:40 -0700696 size_t memsize = sizeof(void *) * nr_node_ids;
Christoph Lametere498be72005-09-09 13:03:32 -0700697 int i;
698
699 if (limit > 1)
700 limit = 12;
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700701 alc_ptr = kzalloc_node(memsize, gfp, node);
702 if (!alc_ptr)
703 return NULL;
704
705 for_each_node(i) {
706 if (i == node || !node_online(i))
707 continue;
708 alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
709 if (!alc_ptr[i]) {
710 for (i--; i >= 0; i--)
711 kfree(alc_ptr[i]);
712 kfree(alc_ptr);
713 return NULL;
Christoph Lametere498be72005-09-09 13:03:32 -0700714 }
715 }
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700716 return alc_ptr;
Christoph Lametere498be72005-09-09 13:03:32 -0700717}
718
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700719static void free_alien_cache(struct alien_cache **alc_ptr)
Christoph Lametere498be72005-09-09 13:03:32 -0700720{
721 int i;
722
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700723 if (!alc_ptr)
Christoph Lametere498be72005-09-09 13:03:32 -0700724 return;
Christoph Lametere498be72005-09-09 13:03:32 -0700725 for_each_node(i)
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700726 kfree(alc_ptr[i]);
727 kfree(alc_ptr);
Christoph Lametere498be72005-09-09 13:03:32 -0700728}
729
Pekka Enberg343e0d72006-02-01 03:05:50 -0800730static void __drain_alien_cache(struct kmem_cache *cachep,
Joonsoo Kim833b7062014-08-06 16:04:33 -0700731 struct array_cache *ac, int node,
732 struct list_head *list)
Christoph Lametere498be72005-09-09 13:03:32 -0700733{
Christoph Lameter18bf8542014-08-06 16:04:11 -0700734 struct kmem_cache_node *n = get_node(cachep, node);
Christoph Lametere498be72005-09-09 13:03:32 -0700735
736 if (ac->avail) {
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000737 spin_lock(&n->list_lock);
Christoph Lametere00946f2006-03-25 03:06:45 -0800738 /*
739 * Stuff objects into the remote nodes shared array first.
740 * That way we could avoid the overhead of putting the objects
741 * into the free lists and getting them back later.
742 */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000743 if (n->shared)
744 transfer_objects(n->shared, ac, ac->limit);
Christoph Lametere00946f2006-03-25 03:06:45 -0800745
Joonsoo Kim833b7062014-08-06 16:04:33 -0700746 free_block(cachep, ac->entry, ac->avail, node, list);
Christoph Lametere498be72005-09-09 13:03:32 -0700747 ac->avail = 0;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000748 spin_unlock(&n->list_lock);
Christoph Lametere498be72005-09-09 13:03:32 -0700749 }
750}
751
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800752/*
753 * Called from cache_reap() to regularly drain alien caches round robin.
754 */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000755static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800756{
Christoph Lameter909ea962010-12-08 16:22:55 +0100757 int node = __this_cpu_read(slab_reap_node);
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800758
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000759 if (n->alien) {
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700760 struct alien_cache *alc = n->alien[node];
761 struct array_cache *ac;
Christoph Lametere00946f2006-03-25 03:06:45 -0800762
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700763 if (alc) {
764 ac = &alc->ac;
Joonsoo Kim49dfc302014-08-06 16:04:31 -0700765 if (ac->avail && spin_trylock_irq(&alc->lock)) {
Joonsoo Kim833b7062014-08-06 16:04:33 -0700766 LIST_HEAD(list);
767
768 __drain_alien_cache(cachep, ac, node, &list);
Joonsoo Kim49dfc302014-08-06 16:04:31 -0700769 spin_unlock_irq(&alc->lock);
Joonsoo Kim833b7062014-08-06 16:04:33 -0700770 slabs_destroy(cachep, &list);
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700771 }
Christoph Lameter8fce4d82006-03-09 17:33:54 -0800772 }
773 }
774}
775
Andrew Mortona737b3e2006-03-22 00:08:11 -0800776static void drain_alien_cache(struct kmem_cache *cachep,
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700777 struct alien_cache **alien)
Christoph Lametere498be72005-09-09 13:03:32 -0700778{
Pekka Enbergb28a02d2006-01-08 01:00:37 -0800779 int i = 0;
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700780 struct alien_cache *alc;
Christoph Lametere498be72005-09-09 13:03:32 -0700781 struct array_cache *ac;
782 unsigned long flags;
783
784 for_each_online_node(i) {
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700785 alc = alien[i];
786 if (alc) {
Joonsoo Kim833b7062014-08-06 16:04:33 -0700787 LIST_HEAD(list);
788
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700789 ac = &alc->ac;
Joonsoo Kim49dfc302014-08-06 16:04:31 -0700790 spin_lock_irqsave(&alc->lock, flags);
Joonsoo Kim833b7062014-08-06 16:04:33 -0700791 __drain_alien_cache(cachep, ac, i, &list);
Joonsoo Kim49dfc302014-08-06 16:04:31 -0700792 spin_unlock_irqrestore(&alc->lock, flags);
Joonsoo Kim833b7062014-08-06 16:04:33 -0700793 slabs_destroy(cachep, &list);
Christoph Lametere498be72005-09-09 13:03:32 -0700794 }
795 }
796}
Pekka Enberg729bd0b2006-06-23 02:03:05 -0700797
Joonsoo Kim25c4f302014-10-09 15:26:09 -0700798static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
799 int node, int page_node)
Pekka Enberg729bd0b2006-06-23 02:03:05 -0700800{
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000801 struct kmem_cache_node *n;
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700802 struct alien_cache *alien = NULL;
803 struct array_cache *ac;
Joonsoo Kim97654df2014-08-06 16:04:25 -0700804 LIST_HEAD(list);
Pekka Enberg1ca4cb22006-10-06 00:43:52 -0700805
Christoph Lameter18bf8542014-08-06 16:04:11 -0700806 n = get_node(cachep, node);
Pekka Enberg729bd0b2006-06-23 02:03:05 -0700807 STATS_INC_NODEFREES(cachep);
Joonsoo Kim25c4f302014-10-09 15:26:09 -0700808 if (n->alien && n->alien[page_node]) {
809 alien = n->alien[page_node];
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700810 ac = &alien->ac;
Joonsoo Kim49dfc302014-08-06 16:04:31 -0700811 spin_lock(&alien->lock);
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700812 if (unlikely(ac->avail == ac->limit)) {
Pekka Enberg729bd0b2006-06-23 02:03:05 -0700813 STATS_INC_ACOVERFLOW(cachep);
Joonsoo Kim25c4f302014-10-09 15:26:09 -0700814 __drain_alien_cache(cachep, ac, page_node, &list);
Pekka Enberg729bd0b2006-06-23 02:03:05 -0700815 }
Joonsoo Kimf68f8dd2016-03-15 14:54:56 -0700816 ac->entry[ac->avail++] = objp;
Joonsoo Kim49dfc302014-08-06 16:04:31 -0700817 spin_unlock(&alien->lock);
Joonsoo Kim833b7062014-08-06 16:04:33 -0700818 slabs_destroy(cachep, &list);
Pekka Enberg729bd0b2006-06-23 02:03:05 -0700819 } else {
Joonsoo Kim25c4f302014-10-09 15:26:09 -0700820 n = get_node(cachep, page_node);
Christoph Lameter18bf8542014-08-06 16:04:11 -0700821 spin_lock(&n->list_lock);
Joonsoo Kim25c4f302014-10-09 15:26:09 -0700822 free_block(cachep, &objp, 1, page_node, &list);
Christoph Lameter18bf8542014-08-06 16:04:11 -0700823 spin_unlock(&n->list_lock);
Joonsoo Kim97654df2014-08-06 16:04:25 -0700824 slabs_destroy(cachep, &list);
Pekka Enberg729bd0b2006-06-23 02:03:05 -0700825 }
826 return 1;
827}
Joonsoo Kim25c4f302014-10-09 15:26:09 -0700828
829static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
830{
831 int page_node = page_to_nid(virt_to_page(objp));
832 int node = numa_mem_id();
833 /*
834 * Make sure we are not freeing a object from another node to the array
835 * cache on this cpu.
836 */
837 if (likely(node == page_node))
838 return 0;
839
840 return __cache_free_alien(cachep, objp, node, page_node);
841}
David Rientjes4167e9b2015-04-14 15:46:55 -0700842
843/*
Mel Gorman444eb2a42016-03-17 14:19:23 -0700844 * Construct gfp mask to allocate from a specific node but do not reclaim or
845 * warn about failures.
David Rientjes4167e9b2015-04-14 15:46:55 -0700846 */
847static inline gfp_t gfp_exact_node(gfp_t flags)
848{
Mel Gorman444eb2a42016-03-17 14:19:23 -0700849 return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
David Rientjes4167e9b2015-04-14 15:46:55 -0700850}
Christoph Lametere498be72005-09-09 13:03:32 -0700851#endif
852
David Rientjes8f9f8d92010-03-27 19:40:47 -0700853/*
Christoph Lameter6a673682013-01-10 19:14:19 +0000854 * Allocates and initializes node for a node on each slab cache, used for
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000855 * either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node
David Rientjes8f9f8d92010-03-27 19:40:47 -0700856 * will be allocated off-node since memory is not yet online for the new node.
Christoph Lameter6a673682013-01-10 19:14:19 +0000857 * When hotplugging memory or a cpu, existing node are not replaced if
David Rientjes8f9f8d92010-03-27 19:40:47 -0700858 * already in use.
859 *
Christoph Lameter18004c52012-07-06 15:25:12 -0500860 * Must hold slab_mutex.
David Rientjes8f9f8d92010-03-27 19:40:47 -0700861 */
Christoph Lameter6a673682013-01-10 19:14:19 +0000862static int init_cache_node_node(int node)
David Rientjes8f9f8d92010-03-27 19:40:47 -0700863{
864 struct kmem_cache *cachep;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000865 struct kmem_cache_node *n;
Joonsoo Kim5e804782014-08-06 16:04:40 -0700866 const size_t memsize = sizeof(struct kmem_cache_node);
David Rientjes8f9f8d92010-03-27 19:40:47 -0700867
Christoph Lameter18004c52012-07-06 15:25:12 -0500868 list_for_each_entry(cachep, &slab_caches, list) {
David Rientjes8f9f8d92010-03-27 19:40:47 -0700869 /*
Jianyu Zhan5f0985b2014-03-30 17:02:20 +0800870 * Set up the kmem_cache_node for cpu before we can
David Rientjes8f9f8d92010-03-27 19:40:47 -0700871 * begin anything. Make sure some other cpu on this
872 * node has not already allocated this
873 */
Christoph Lameter18bf8542014-08-06 16:04:11 -0700874 n = get_node(cachep, node);
875 if (!n) {
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000876 n = kmalloc_node(memsize, GFP_KERNEL, node);
877 if (!n)
David Rientjes8f9f8d92010-03-27 19:40:47 -0700878 return -ENOMEM;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000879 kmem_cache_node_init(n);
Jianyu Zhan5f0985b2014-03-30 17:02:20 +0800880 n->next_reap = jiffies + REAPTIMEOUT_NODE +
881 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
David Rientjes8f9f8d92010-03-27 19:40:47 -0700882
883 /*
Jianyu Zhan5f0985b2014-03-30 17:02:20 +0800884 * The kmem_cache_nodes don't come and go as CPUs
885 * come and go. slab_mutex is sufficient
David Rientjes8f9f8d92010-03-27 19:40:47 -0700886 * protection here.
887 */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000888 cachep->node[node] = n;
David Rientjes8f9f8d92010-03-27 19:40:47 -0700889 }
890
Christoph Lameter18bf8542014-08-06 16:04:11 -0700891 spin_lock_irq(&n->list_lock);
892 n->free_limit =
David Rientjes8f9f8d92010-03-27 19:40:47 -0700893 (1 + nr_cpus_node(node)) *
894 cachep->batchcount + cachep->num;
Christoph Lameter18bf8542014-08-06 16:04:11 -0700895 spin_unlock_irq(&n->list_lock);
David Rientjes8f9f8d92010-03-27 19:40:47 -0700896 }
897 return 0;
898}
899
Wanpeng Li0fa81032013-07-04 08:33:22 +0800900static inline int slabs_tofree(struct kmem_cache *cachep,
901 struct kmem_cache_node *n)
902{
903 return (n->free_objects + cachep->num - 1) / cachep->num;
904}
905
Paul Gortmaker0db06282013-06-19 14:53:51 -0400906static void cpuup_canceled(long cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907{
Akinobu Mitafbf1e472007-10-18 03:05:09 -0700908 struct kmem_cache *cachep;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000909 struct kmem_cache_node *n = NULL;
Lee Schermerhorn7d6e6d02010-05-26 14:45:03 -0700910 int node = cpu_to_mem(cpu);
Rusty Russella70f7302009-03-13 14:49:46 +1030911 const struct cpumask *mask = cpumask_of_node(node);
Akinobu Mitafbf1e472007-10-18 03:05:09 -0700912
Christoph Lameter18004c52012-07-06 15:25:12 -0500913 list_for_each_entry(cachep, &slab_caches, list) {
Akinobu Mitafbf1e472007-10-18 03:05:09 -0700914 struct array_cache *nc;
915 struct array_cache *shared;
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700916 struct alien_cache **alien;
Joonsoo Kim97654df2014-08-06 16:04:25 -0700917 LIST_HEAD(list);
Akinobu Mitafbf1e472007-10-18 03:05:09 -0700918
Christoph Lameter18bf8542014-08-06 16:04:11 -0700919 n = get_node(cachep, node);
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000920 if (!n)
Joonsoo Kimbf0dea22014-10-09 15:26:27 -0700921 continue;
Akinobu Mitafbf1e472007-10-18 03:05:09 -0700922
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000923 spin_lock_irq(&n->list_lock);
Akinobu Mitafbf1e472007-10-18 03:05:09 -0700924
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000925 /* Free limit for this kmem_cache_node */
926 n->free_limit -= cachep->batchcount;
Joonsoo Kimbf0dea22014-10-09 15:26:27 -0700927
928 /* cpu is dead; no one can alloc from it. */
929 nc = per_cpu_ptr(cachep->cpu_cache, cpu);
930 if (nc) {
Joonsoo Kim97654df2014-08-06 16:04:25 -0700931 free_block(cachep, nc->entry, nc->avail, node, &list);
Joonsoo Kimbf0dea22014-10-09 15:26:27 -0700932 nc->avail = 0;
933 }
Akinobu Mitafbf1e472007-10-18 03:05:09 -0700934
Rusty Russell58463c12009-12-17 11:43:12 -0600935 if (!cpumask_empty(mask)) {
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000936 spin_unlock_irq(&n->list_lock);
Joonsoo Kimbf0dea22014-10-09 15:26:27 -0700937 goto free_slab;
Akinobu Mitafbf1e472007-10-18 03:05:09 -0700938 }
939
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000940 shared = n->shared;
Akinobu Mitafbf1e472007-10-18 03:05:09 -0700941 if (shared) {
942 free_block(cachep, shared->entry,
Joonsoo Kim97654df2014-08-06 16:04:25 -0700943 shared->avail, node, &list);
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000944 n->shared = NULL;
Akinobu Mitafbf1e472007-10-18 03:05:09 -0700945 }
946
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000947 alien = n->alien;
948 n->alien = NULL;
Akinobu Mitafbf1e472007-10-18 03:05:09 -0700949
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000950 spin_unlock_irq(&n->list_lock);
Akinobu Mitafbf1e472007-10-18 03:05:09 -0700951
952 kfree(shared);
953 if (alien) {
954 drain_alien_cache(cachep, alien);
955 free_alien_cache(alien);
956 }
Joonsoo Kimbf0dea22014-10-09 15:26:27 -0700957
958free_slab:
Joonsoo Kim97654df2014-08-06 16:04:25 -0700959 slabs_destroy(cachep, &list);
Akinobu Mitafbf1e472007-10-18 03:05:09 -0700960 }
961 /*
962 * In the previous loop, all the objects were freed to
963 * the respective cache's slabs, now we can go ahead and
964 * shrink each nodelist to its limit.
965 */
Christoph Lameter18004c52012-07-06 15:25:12 -0500966 list_for_each_entry(cachep, &slab_caches, list) {
Christoph Lameter18bf8542014-08-06 16:04:11 -0700967 n = get_node(cachep, node);
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000968 if (!n)
Akinobu Mitafbf1e472007-10-18 03:05:09 -0700969 continue;
Wanpeng Li0fa81032013-07-04 08:33:22 +0800970 drain_freelist(cachep, n, slabs_tofree(cachep, n));
Akinobu Mitafbf1e472007-10-18 03:05:09 -0700971 }
972}
973
Paul Gortmaker0db06282013-06-19 14:53:51 -0400974static int cpuup_prepare(long cpu)
Akinobu Mitafbf1e472007-10-18 03:05:09 -0700975{
Pekka Enberg343e0d72006-02-01 03:05:50 -0800976 struct kmem_cache *cachep;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000977 struct kmem_cache_node *n = NULL;
Lee Schermerhorn7d6e6d02010-05-26 14:45:03 -0700978 int node = cpu_to_mem(cpu);
David Rientjes8f9f8d92010-03-27 19:40:47 -0700979 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980
Akinobu Mitafbf1e472007-10-18 03:05:09 -0700981 /*
982 * We need to do this right in the beginning since
983 * alloc_arraycache's are going to use this list.
984 * kmalloc_node allows us to add the slab to the right
Christoph Lameterce8eb6c2013-01-10 19:14:19 +0000985 * kmem_cache_node and not this cpu's kmem_cache_node
Akinobu Mitafbf1e472007-10-18 03:05:09 -0700986 */
Christoph Lameter6a673682013-01-10 19:14:19 +0000987 err = init_cache_node_node(node);
David Rientjes8f9f8d92010-03-27 19:40:47 -0700988 if (err < 0)
989 goto bad;
Akinobu Mitafbf1e472007-10-18 03:05:09 -0700990
991 /*
992 * Now we can go ahead with allocating the shared arrays and
993 * array caches
994 */
Christoph Lameter18004c52012-07-06 15:25:12 -0500995 list_for_each_entry(cachep, &slab_caches, list) {
Akinobu Mitafbf1e472007-10-18 03:05:09 -0700996 struct array_cache *shared = NULL;
Joonsoo Kimc8522a32014-08-06 16:04:29 -0700997 struct alien_cache **alien = NULL;
Akinobu Mitafbf1e472007-10-18 03:05:09 -0700998
Akinobu Mitafbf1e472007-10-18 03:05:09 -0700999 if (cachep->shared) {
1000 shared = alloc_arraycache(node,
1001 cachep->shared * cachep->batchcount,
Pekka Enberg83b519e2009-06-10 19:40:04 +03001002 0xbaadf00d, GFP_KERNEL);
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001003 if (!shared)
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001004 goto bad;
1005 }
1006 if (use_alien_caches) {
Pekka Enberg83b519e2009-06-10 19:40:04 +03001007 alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL);
Akinobu Mita12d00f62007-10-18 03:05:11 -07001008 if (!alien) {
1009 kfree(shared);
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001010 goto bad;
Akinobu Mita12d00f62007-10-18 03:05:11 -07001011 }
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001012 }
Christoph Lameter18bf8542014-08-06 16:04:11 -07001013 n = get_node(cachep, node);
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001014 BUG_ON(!n);
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001015
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001016 spin_lock_irq(&n->list_lock);
1017 if (!n->shared) {
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001018 /*
1019 * We are serialised from CPU_DEAD or
1020 * CPU_UP_CANCELLED by the cpucontrol lock
1021 */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001022 n->shared = shared;
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001023 shared = NULL;
1024 }
1025#ifdef CONFIG_NUMA
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001026 if (!n->alien) {
1027 n->alien = alien;
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001028 alien = NULL;
1029 }
1030#endif
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001031 spin_unlock_irq(&n->list_lock);
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001032 kfree(shared);
1033 free_alien_cache(alien);
1034 }
Pekka Enbergce79ddc2009-11-23 22:01:15 +02001035
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001036 return 0;
1037bad:
Akinobu Mita12d00f62007-10-18 03:05:11 -07001038 cpuup_canceled(cpu);
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001039 return -ENOMEM;
1040}
1041
Paul Gortmaker0db06282013-06-19 14:53:51 -04001042static int cpuup_callback(struct notifier_block *nfb,
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001043 unsigned long action, void *hcpu)
1044{
1045 long cpu = (long)hcpu;
1046 int err = 0;
1047
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 switch (action) {
Heiko Carstens38c3bd92007-05-09 02:34:05 -07001049 case CPU_UP_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001050 case CPU_UP_PREPARE_FROZEN:
Christoph Lameter18004c52012-07-06 15:25:12 -05001051 mutex_lock(&slab_mutex);
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001052 err = cpuup_prepare(cpu);
Christoph Lameter18004c52012-07-06 15:25:12 -05001053 mutex_unlock(&slab_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 break;
1055 case CPU_ONLINE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001056 case CPU_ONLINE_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 start_cpu_timer(cpu);
1058 break;
1059#ifdef CONFIG_HOTPLUG_CPU
Christoph Lameter5830c592007-05-09 02:34:22 -07001060 case CPU_DOWN_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001061 case CPU_DOWN_PREPARE_FROZEN:
Christoph Lameter5830c592007-05-09 02:34:22 -07001062 /*
Christoph Lameter18004c52012-07-06 15:25:12 -05001063 * Shutdown cache reaper. Note that the slab_mutex is
Christoph Lameter5830c592007-05-09 02:34:22 -07001064 * held so that if cache_reap() is invoked it cannot do
1065 * anything expensive but will only modify reap_work
1066 * and reschedule the timer.
1067 */
Tejun Heoafe2c512010-12-14 16:21:17 +01001068 cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
Christoph Lameter5830c592007-05-09 02:34:22 -07001069 /* Now the cache_reaper is guaranteed to be not running. */
Tejun Heo1871e522009-10-29 22:34:13 +09001070 per_cpu(slab_reap_work, cpu).work.func = NULL;
Christoph Lameter5830c592007-05-09 02:34:22 -07001071 break;
1072 case CPU_DOWN_FAILED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001073 case CPU_DOWN_FAILED_FROZEN:
Christoph Lameter5830c592007-05-09 02:34:22 -07001074 start_cpu_timer(cpu);
1075 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 case CPU_DEAD:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001077 case CPU_DEAD_FROZEN:
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001078 /*
1079 * Even if all the cpus of a node are down, we don't free the
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001080 * kmem_cache_node of any cache. This to avoid a race between
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001081 * cpu_down, and a kmalloc allocation from another cpu for
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001082 * memory from the node of the cpu going down. The node
Ravikiran G Thirumalai4484ebf2006-02-04 23:27:59 -08001083 * structure is usually allocated from kmem_cache_create() and
1084 * gets destroyed at kmem_cache_destroy().
1085 */
Simon Arlott183ff222007-10-20 01:27:18 +02001086 /* fall through */
Ravikiran G Thirumalai8f5be202006-12-06 20:32:14 -08001087#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 case CPU_UP_CANCELED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001089 case CPU_UP_CANCELED_FROZEN:
Christoph Lameter18004c52012-07-06 15:25:12 -05001090 mutex_lock(&slab_mutex);
Akinobu Mitafbf1e472007-10-18 03:05:09 -07001091 cpuup_canceled(cpu);
Christoph Lameter18004c52012-07-06 15:25:12 -05001092 mutex_unlock(&slab_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 }
Akinobu Mitaeac40682010-05-26 14:43:32 -07001095 return notifier_from_errno(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096}
1097
Paul Gortmaker0db06282013-06-19 14:53:51 -04001098static struct notifier_block cpucache_notifier = {
Chandra Seetharaman74b85f32006-06-27 02:54:09 -07001099 &cpuup_callback, NULL, 0
1100};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101
David Rientjes8f9f8d92010-03-27 19:40:47 -07001102#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
1103/*
1104 * Drains freelist for a node on each slab cache, used for memory hot-remove.
1105 * Returns -EBUSY if all objects cannot be drained so that the node is not
1106 * removed.
1107 *
Christoph Lameter18004c52012-07-06 15:25:12 -05001108 * Must hold slab_mutex.
David Rientjes8f9f8d92010-03-27 19:40:47 -07001109 */
Christoph Lameter6a673682013-01-10 19:14:19 +00001110static int __meminit drain_cache_node_node(int node)
David Rientjes8f9f8d92010-03-27 19:40:47 -07001111{
1112 struct kmem_cache *cachep;
1113 int ret = 0;
1114
Christoph Lameter18004c52012-07-06 15:25:12 -05001115 list_for_each_entry(cachep, &slab_caches, list) {
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001116 struct kmem_cache_node *n;
David Rientjes8f9f8d92010-03-27 19:40:47 -07001117
Christoph Lameter18bf8542014-08-06 16:04:11 -07001118 n = get_node(cachep, node);
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001119 if (!n)
David Rientjes8f9f8d92010-03-27 19:40:47 -07001120 continue;
1121
Wanpeng Li0fa81032013-07-04 08:33:22 +08001122 drain_freelist(cachep, n, slabs_tofree(cachep, n));
David Rientjes8f9f8d92010-03-27 19:40:47 -07001123
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001124 if (!list_empty(&n->slabs_full) ||
1125 !list_empty(&n->slabs_partial)) {
David Rientjes8f9f8d92010-03-27 19:40:47 -07001126 ret = -EBUSY;
1127 break;
1128 }
1129 }
1130 return ret;
1131}
1132
1133static int __meminit slab_memory_callback(struct notifier_block *self,
1134 unsigned long action, void *arg)
1135{
1136 struct memory_notify *mnb = arg;
1137 int ret = 0;
1138 int nid;
1139
1140 nid = mnb->status_change_nid;
1141 if (nid < 0)
1142 goto out;
1143
1144 switch (action) {
1145 case MEM_GOING_ONLINE:
Christoph Lameter18004c52012-07-06 15:25:12 -05001146 mutex_lock(&slab_mutex);
Christoph Lameter6a673682013-01-10 19:14:19 +00001147 ret = init_cache_node_node(nid);
Christoph Lameter18004c52012-07-06 15:25:12 -05001148 mutex_unlock(&slab_mutex);
David Rientjes8f9f8d92010-03-27 19:40:47 -07001149 break;
1150 case MEM_GOING_OFFLINE:
Christoph Lameter18004c52012-07-06 15:25:12 -05001151 mutex_lock(&slab_mutex);
Christoph Lameter6a673682013-01-10 19:14:19 +00001152 ret = drain_cache_node_node(nid);
Christoph Lameter18004c52012-07-06 15:25:12 -05001153 mutex_unlock(&slab_mutex);
David Rientjes8f9f8d92010-03-27 19:40:47 -07001154 break;
1155 case MEM_ONLINE:
1156 case MEM_OFFLINE:
1157 case MEM_CANCEL_ONLINE:
1158 case MEM_CANCEL_OFFLINE:
1159 break;
1160 }
1161out:
Prarit Bhargava5fda1bd2011-03-22 16:30:49 -07001162 return notifier_from_errno(ret);
David Rientjes8f9f8d92010-03-27 19:40:47 -07001163}
1164#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
1165
Christoph Lametere498be72005-09-09 13:03:32 -07001166/*
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001167 * swap the static kmem_cache_node with kmalloced memory
Christoph Lametere498be72005-09-09 13:03:32 -07001168 */
Christoph Lameter6744f082013-01-10 19:12:17 +00001169static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
David Rientjes8f9f8d92010-03-27 19:40:47 -07001170 int nodeid)
Christoph Lametere498be72005-09-09 13:03:32 -07001171{
Christoph Lameter6744f082013-01-10 19:12:17 +00001172 struct kmem_cache_node *ptr;
Christoph Lametere498be72005-09-09 13:03:32 -07001173
Christoph Lameter6744f082013-01-10 19:12:17 +00001174 ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
Christoph Lametere498be72005-09-09 13:03:32 -07001175 BUG_ON(!ptr);
1176
Christoph Lameter6744f082013-01-10 19:12:17 +00001177 memcpy(ptr, list, sizeof(struct kmem_cache_node));
Ingo Molnar2b2d5492006-07-03 00:25:28 -07001178 /*
1179 * Do not assume that spinlocks can be initialized via memcpy:
1180 */
1181 spin_lock_init(&ptr->list_lock);
1182
Christoph Lametere498be72005-09-09 13:03:32 -07001183 MAKE_ALL_LISTS(cachep, ptr, nodeid);
Christoph Lameter6a673682013-01-10 19:14:19 +00001184 cachep->node[nodeid] = ptr;
Christoph Lametere498be72005-09-09 13:03:32 -07001185}
1186
Andrew Mortona737b3e2006-03-22 00:08:11 -08001187/*
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001188 * For setting up all the kmem_cache_node for cache whose buffer_size is same as
1189 * size of kmem_cache_node.
Pekka Enberg556a1692008-01-25 08:20:51 +02001190 */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001191static void __init set_up_node(struct kmem_cache *cachep, int index)
Pekka Enberg556a1692008-01-25 08:20:51 +02001192{
1193 int node;
1194
1195 for_each_online_node(node) {
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001196 cachep->node[node] = &init_kmem_cache_node[index + node];
Christoph Lameter6a673682013-01-10 19:14:19 +00001197 cachep->node[node]->next_reap = jiffies +
Jianyu Zhan5f0985b2014-03-30 17:02:20 +08001198 REAPTIMEOUT_NODE +
1199 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
Pekka Enberg556a1692008-01-25 08:20:51 +02001200 }
1201}
1202
1203/*
Andrew Mortona737b3e2006-03-22 00:08:11 -08001204 * Initialisation. Called after the page allocator have been initialised and
1205 * before smp_init().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 */
1207void __init kmem_cache_init(void)
1208{
Christoph Lametere498be72005-09-09 13:03:32 -07001209 int i;
1210
Joonsoo Kim68126702013-10-24 10:07:42 +09001211 BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
1212 sizeof(struct rcu_head));
Christoph Lameter9b030cb2012-09-05 00:20:33 +00001213 kmem_cache = &kmem_cache_boot;
1214
Mel Gormanb6e68bc2009-06-16 15:32:16 -07001215 if (num_possible_nodes() == 1)
Siddha, Suresh B62918a02007-05-02 19:27:18 +02001216 use_alien_caches = 0;
1217
Christoph Lameter3c583462012-11-28 16:23:01 +00001218 for (i = 0; i < NUM_INIT_LISTS; i++)
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001219 kmem_cache_node_init(&init_kmem_cache_node[i]);
Christoph Lameter3c583462012-11-28 16:23:01 +00001220
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 /*
1222 * Fragmentation resistance on low memory - only use bigger
David Rientjes3df1ccc2011-10-18 22:09:28 -07001223 * page orders on machines with more than 32MB of memory if
1224 * not overridden on the command line.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 */
David Rientjes3df1ccc2011-10-18 22:09:28 -07001226 if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
David Rientjes543585c2011-10-18 22:09:24 -07001227 slab_max_order = SLAB_MAX_ORDER_HI;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 /* Bootstrap is tricky, because several objects are allocated
1230 * from caches that do not exist yet:
Christoph Lameter9b030cb2012-09-05 00:20:33 +00001231 * 1) initialize the kmem_cache cache: it contains the struct
1232 * kmem_cache structures of all caches, except kmem_cache itself:
1233 * kmem_cache is statically allocated.
Christoph Lametere498be72005-09-09 13:03:32 -07001234 * Initially an __init data area is used for the head array and the
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001235 * kmem_cache_node structures, it's replaced with a kmalloc allocated
Christoph Lametere498be72005-09-09 13:03:32 -07001236 * array at the end of the bootstrap.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 * 2) Create the first kmalloc cache.
Pekka Enberg343e0d72006-02-01 03:05:50 -08001238 * The struct kmem_cache for the new cache is allocated normally.
Christoph Lametere498be72005-09-09 13:03:32 -07001239 * An __init data area is used for the head array.
1240 * 3) Create the remaining kmalloc caches, with minimally sized
1241 * head arrays.
Christoph Lameter9b030cb2012-09-05 00:20:33 +00001242 * 4) Replace the __init data head arrays for kmem_cache and the first
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 * kmalloc cache with kmalloc allocated arrays.
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001244 * 5) Replace the __init data for kmem_cache_node for kmem_cache and
Christoph Lametere498be72005-09-09 13:03:32 -07001245 * the other cache's with kmalloc allocated memory.
1246 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 */
1248
Christoph Lameter9b030cb2012-09-05 00:20:33 +00001249 /* 1) create the kmem_cache */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250
Eric Dumazet8da34302007-05-06 14:49:29 -07001251 /*
Eric Dumazetb56efcf2011-07-20 19:04:23 +02001252 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
Eric Dumazet8da34302007-05-06 14:49:29 -07001253 */
Christoph Lameter2f9baa92012-11-28 16:23:09 +00001254 create_boot_cache(kmem_cache, "kmem_cache",
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001255 offsetof(struct kmem_cache, node) +
Christoph Lameter6744f082013-01-10 19:12:17 +00001256 nr_node_ids * sizeof(struct kmem_cache_node *),
Christoph Lameter2f9baa92012-11-28 16:23:09 +00001257 SLAB_HWCACHE_ALIGN);
1258 list_add(&kmem_cache->list, &slab_caches);
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001259 slab_state = PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260
Andrew Mortona737b3e2006-03-22 00:08:11 -08001261 /*
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001262 * Initialize the caches that provide memory for the kmem_cache_node
1263 * structures first. Without this, further allocations will bug.
Christoph Lametere498be72005-09-09 13:03:32 -07001264 */
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001265 kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node",
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001266 kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001267 slab_state = PARTIAL_NODE;
Daniel Sanders34cc6992015-06-24 16:55:57 -07001268 setup_kmalloc_cache_index_table();
Christoph Lametere498be72005-09-09 13:03:32 -07001269
Ingo Molnare0a42722006-06-23 02:03:46 -07001270 slab_early_init = 0;
1271
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001272 /* 5) Replace the bootstrap kmem_cache_node */
Christoph Lametere498be72005-09-09 13:03:32 -07001273 {
Pekka Enberg1ca4cb22006-10-06 00:43:52 -07001274 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275
Mel Gorman9c09a952008-01-24 05:49:54 -08001276 for_each_online_node(nid) {
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001277 init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
Pekka Enberg556a1692008-01-25 08:20:51 +02001278
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001279 init_list(kmalloc_caches[INDEX_NODE],
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001280 &init_kmem_cache_node[SIZE_NODE + nid], nid);
Christoph Lametere498be72005-09-09 13:03:32 -07001281 }
1282 }
1283
Christoph Lameterf97d5f62013-01-10 19:12:17 +00001284 create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
Pekka Enberg8429db52009-06-12 15:58:59 +03001285}
Ravikiran G Thirumalai056c6242006-09-25 23:31:38 -07001286
Pekka Enberg8429db52009-06-12 15:58:59 +03001287void __init kmem_cache_init_late(void)
1288{
1289 struct kmem_cache *cachep;
1290
Christoph Lameter97d06602012-07-06 15:25:11 -05001291 slab_state = UP;
Peter Zijlstra52cef182011-11-28 21:12:40 +01001292
Pekka Enberg8429db52009-06-12 15:58:59 +03001293 /* 6) resize the head arrays to their final sizes */
Christoph Lameter18004c52012-07-06 15:25:12 -05001294 mutex_lock(&slab_mutex);
1295 list_for_each_entry(cachep, &slab_caches, list)
Pekka Enberg8429db52009-06-12 15:58:59 +03001296 if (enable_cpucache(cachep, GFP_NOWAIT))
1297 BUG();
Christoph Lameter18004c52012-07-06 15:25:12 -05001298 mutex_unlock(&slab_mutex);
Ravikiran G Thirumalai056c6242006-09-25 23:31:38 -07001299
Christoph Lameter97d06602012-07-06 15:25:11 -05001300 /* Done! */
1301 slab_state = FULL;
1302
Andrew Mortona737b3e2006-03-22 00:08:11 -08001303 /*
1304 * Register a cpu startup notifier callback that initializes
1305 * cpu_cache_get for all new cpus
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306 */
1307 register_cpu_notifier(&cpucache_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308
David Rientjes8f9f8d92010-03-27 19:40:47 -07001309#ifdef CONFIG_NUMA
1310 /*
1311 * Register a memory hotplug callback that initializes and frees
Christoph Lameter6a673682013-01-10 19:14:19 +00001312 * node.
David Rientjes8f9f8d92010-03-27 19:40:47 -07001313 */
1314 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
1315#endif
1316
Andrew Mortona737b3e2006-03-22 00:08:11 -08001317 /*
1318 * The reap timers are started later, with a module init call: That part
1319 * of the kernel is not yet operational.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 */
1321}
1322
1323static int __init cpucache_init(void)
1324{
1325 int cpu;
1326
Andrew Mortona737b3e2006-03-22 00:08:11 -08001327 /*
1328 * Register the timers that return unneeded pages to the page allocator
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 */
Christoph Lametere498be72005-09-09 13:03:32 -07001330 for_each_online_cpu(cpu)
Andrew Mortona737b3e2006-03-22 00:08:11 -08001331 start_cpu_timer(cpu);
Glauber Costaa164f8962012-06-21 00:59:18 +04001332
1333 /* Done! */
Christoph Lameter97d06602012-07-06 15:25:11 -05001334 slab_state = FULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 return 0;
1336}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337__initcall(cpucache_init);
1338
Rafael Aquini8bdec192012-03-09 17:27:27 -03001339static noinline void
1340slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1341{
David Rientjes9a02d692014-06-04 16:06:36 -07001342#if DEBUG
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001343 struct kmem_cache_node *n;
Joonsoo Kim8456a642013-10-24 10:07:49 +09001344 struct page *page;
Rafael Aquini8bdec192012-03-09 17:27:27 -03001345 unsigned long flags;
1346 int node;
David Rientjes9a02d692014-06-04 16:06:36 -07001347 static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
1348 DEFAULT_RATELIMIT_BURST);
1349
1350 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs))
1351 return;
Rafael Aquini8bdec192012-03-09 17:27:27 -03001352
Vlastimil Babka5b3810e2016-03-15 14:56:33 -07001353 pr_warn("SLAB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
1354 nodeid, gfpflags, &gfpflags);
1355 pr_warn(" cache: %s, object size: %d, order: %d\n",
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05001356 cachep->name, cachep->size, cachep->gfporder);
Rafael Aquini8bdec192012-03-09 17:27:27 -03001357
Christoph Lameter18bf8542014-08-06 16:04:11 -07001358 for_each_kmem_cache_node(cachep, node, n) {
Rafael Aquini8bdec192012-03-09 17:27:27 -03001359 unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
1360 unsigned long active_slabs = 0, num_slabs = 0;
1361
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001362 spin_lock_irqsave(&n->list_lock, flags);
Joonsoo Kim8456a642013-10-24 10:07:49 +09001363 list_for_each_entry(page, &n->slabs_full, lru) {
Rafael Aquini8bdec192012-03-09 17:27:27 -03001364 active_objs += cachep->num;
1365 active_slabs++;
1366 }
Joonsoo Kim8456a642013-10-24 10:07:49 +09001367 list_for_each_entry(page, &n->slabs_partial, lru) {
1368 active_objs += page->active;
Rafael Aquini8bdec192012-03-09 17:27:27 -03001369 active_slabs++;
1370 }
Joonsoo Kim8456a642013-10-24 10:07:49 +09001371 list_for_each_entry(page, &n->slabs_free, lru)
Rafael Aquini8bdec192012-03-09 17:27:27 -03001372 num_slabs++;
1373
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00001374 free_objects += n->free_objects;
1375 spin_unlock_irqrestore(&n->list_lock, flags);
Rafael Aquini8bdec192012-03-09 17:27:27 -03001376
1377 num_slabs += active_slabs;
1378 num_objs = num_slabs * cachep->num;
Vlastimil Babka5b3810e2016-03-15 14:56:33 -07001379 pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
Rafael Aquini8bdec192012-03-09 17:27:27 -03001380 node, active_slabs, num_slabs, active_objs, num_objs,
1381 free_objects);
1382 }
David Rientjes9a02d692014-06-04 16:06:36 -07001383#endif
Rafael Aquini8bdec192012-03-09 17:27:27 -03001384}
1385
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386/*
Wang Sheng-Hui8a7d9b42014-08-06 16:04:46 -07001387 * Interface to system's page allocator. No need to hold the
1388 * kmem_cache_node ->list_lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 *
1390 * If we requested dmaable memory, we will get it. Even if we
1391 * did not request dmaable memory, we might get it, but that
1392 * would be relatively rare and ignorable.
1393 */
Joonsoo Kim0c3aa832013-10-24 10:07:38 +09001394static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1395 int nodeid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396{
1397 struct page *page;
Christoph Hellwige1b6aa62006-06-23 02:03:17 -07001398 int nr_pages;
Christoph Lameter765c4502006-09-27 01:50:08 -07001399
Glauber Costaa618e892012-06-14 16:17:21 +04001400 flags |= cachep->allocflags;
Mel Gormane12ba742007-10-16 01:25:52 -07001401 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1402 flags |= __GFP_RECLAIMABLE;
Christoph Hellwige1b6aa62006-06-23 02:03:17 -07001403
Vlastimil Babka96db8002015-09-08 15:03:50 -07001404 page = __alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
Rafael Aquini8bdec192012-03-09 17:27:27 -03001405 if (!page) {
David Rientjes9a02d692014-06-04 16:06:36 -07001406 slab_out_of_memory(cachep, flags, nodeid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 return NULL;
Rafael Aquini8bdec192012-03-09 17:27:27 -03001408 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08001410 if (memcg_charge_slab(page, flags, cachep->gfporder, cachep)) {
1411 __free_pages(page, cachep->gfporder);
1412 return NULL;
1413 }
1414
Christoph Hellwige1b6aa62006-06-23 02:03:17 -07001415 nr_pages = (1 << cachep->gfporder);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
Christoph Lameter972d1a72006-09-25 23:31:51 -07001417 add_zone_page_state(page_zone(page),
1418 NR_SLAB_RECLAIMABLE, nr_pages);
1419 else
1420 add_zone_page_state(page_zone(page),
1421 NR_SLAB_UNRECLAIMABLE, nr_pages);
Joonsoo Kimf68f8dd2016-03-15 14:54:56 -07001422
Joonsoo Kima57a4982013-10-24 10:07:44 +09001423 __SetPageSlab(page);
Joonsoo Kimf68f8dd2016-03-15 14:54:56 -07001424 /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
1425 if (sk_memalloc_socks() && page_is_pfmemalloc(page))
Joonsoo Kima57a4982013-10-24 10:07:44 +09001426 SetPageSlabPfmemalloc(page);
Mel Gorman072bb0a2012-07-31 16:43:58 -07001427
Vegard Nossumb1eeab62008-11-25 16:55:53 +01001428 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
1429 kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
1430
1431 if (cachep->ctor)
1432 kmemcheck_mark_uninitialized_pages(page, nr_pages);
1433 else
1434 kmemcheck_mark_unallocated_pages(page, nr_pages);
1435 }
Pekka Enbergc175eea2008-05-09 20:35:53 +02001436
Joonsoo Kim0c3aa832013-10-24 10:07:38 +09001437 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438}
1439
1440/*
1441 * Interface to system's page release.
1442 */
Joonsoo Kim0c3aa832013-10-24 10:07:38 +09001443static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444{
Vladimir Davydov27ee57c2016-03-17 14:17:35 -07001445 int order = cachep->gfporder;
1446 unsigned long nr_freed = (1 << order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447
Vladimir Davydov27ee57c2016-03-17 14:17:35 -07001448 kmemcheck_free_shadow(page, order);
Pekka Enbergc175eea2008-05-09 20:35:53 +02001449
Christoph Lameter972d1a72006-09-25 23:31:51 -07001450 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1451 sub_zone_page_state(page_zone(page),
1452 NR_SLAB_RECLAIMABLE, nr_freed);
1453 else
1454 sub_zone_page_state(page_zone(page),
1455 NR_SLAB_UNRECLAIMABLE, nr_freed);
Joonsoo Kim73293c22013-10-24 10:07:37 +09001456
Joonsoo Kima57a4982013-10-24 10:07:44 +09001457 BUG_ON(!PageSlab(page));
Joonsoo Kim73293c22013-10-24 10:07:37 +09001458 __ClearPageSlabPfmemalloc(page);
Joonsoo Kima57a4982013-10-24 10:07:44 +09001459 __ClearPageSlab(page);
Joonsoo Kim8456a642013-10-24 10:07:49 +09001460 page_mapcount_reset(page);
1461 page->mapping = NULL;
Glauber Costa1f458cb2012-12-18 14:22:50 -08001462
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 if (current->reclaim_state)
1464 current->reclaim_state->reclaimed_slab += nr_freed;
Vladimir Davydov27ee57c2016-03-17 14:17:35 -07001465 memcg_uncharge_slab(page, order, cachep);
1466 __free_pages(page, order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467}
1468
1469static void kmem_rcu_free(struct rcu_head *head)
1470{
Joonsoo Kim68126702013-10-24 10:07:42 +09001471 struct kmem_cache *cachep;
1472 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473
Joonsoo Kim68126702013-10-24 10:07:42 +09001474 page = container_of(head, struct page, rcu_head);
1475 cachep = page->slab_cache;
1476
1477 kmem_freepages(cachep, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478}
1479
1480#if DEBUG
Joonsoo Kim40b44132016-03-15 14:54:21 -07001481static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
1482{
1483 if (debug_pagealloc_enabled() && OFF_SLAB(cachep) &&
1484 (cachep->size % PAGE_SIZE) == 0)
1485 return true;
1486
1487 return false;
1488}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489
1490#ifdef CONFIG_DEBUG_PAGEALLOC
Pekka Enberg343e0d72006-02-01 03:05:50 -08001491static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001492 unsigned long caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493{
Christoph Lameter8c138bc2012-06-13 10:24:58 -05001494 int size = cachep->object_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001496 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001498 if (size < 5 * sizeof(unsigned long))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 return;
1500
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001501 *addr++ = 0x12345678;
1502 *addr++ = caller;
1503 *addr++ = smp_processor_id();
1504 size -= 3 * sizeof(unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 {
1506 unsigned long *sptr = &caller;
1507 unsigned long svalue;
1508
1509 while (!kstack_end(sptr)) {
1510 svalue = *sptr++;
1511 if (kernel_text_address(svalue)) {
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001512 *addr++ = svalue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 size -= sizeof(unsigned long);
1514 if (size <= sizeof(unsigned long))
1515 break;
1516 }
1517 }
1518
1519 }
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001520 *addr++ = 0x87654321;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521}
Joonsoo Kim40b44132016-03-15 14:54:21 -07001522
1523static void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1524 int map, unsigned long caller)
1525{
1526 if (!is_debug_pagealloc_cache(cachep))
1527 return;
1528
1529 if (caller)
1530 store_stackinfo(cachep, objp, caller);
1531
1532 kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
1533}
1534
1535#else
1536static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1537 int map, unsigned long caller) {}
1538
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539#endif
1540
Pekka Enberg343e0d72006-02-01 03:05:50 -08001541static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542{
Christoph Lameter8c138bc2012-06-13 10:24:58 -05001543 int size = cachep->object_size;
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001544 addr = &((char *)addr)[obj_offset(cachep)];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545
1546 memset(addr, val, size);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001547 *(unsigned char *)(addr + size - 1) = POISON_END;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548}
1549
1550static void dump_line(char *data, int offset, int limit)
1551{
1552 int i;
Dave Jonesaa83aa42006-09-29 01:59:51 -07001553 unsigned char error = 0;
1554 int bad_count = 0;
1555
Joe Perches11705322016-03-17 14:19:50 -07001556 pr_err("%03x: ", offset);
Dave Jonesaa83aa42006-09-29 01:59:51 -07001557 for (i = 0; i < limit; i++) {
1558 if (data[offset + i] != POISON_FREE) {
1559 error = data[offset + i];
1560 bad_count++;
1561 }
Dave Jonesaa83aa42006-09-29 01:59:51 -07001562 }
Sebastian Andrzej Siewiorfdde6ab2011-07-29 18:22:13 +02001563 print_hex_dump(KERN_CONT, "", 0, 16, 1,
1564 &data[offset], limit, 1);
Dave Jonesaa83aa42006-09-29 01:59:51 -07001565
1566 if (bad_count == 1) {
1567 error ^= POISON_FREE;
1568 if (!(error & (error - 1))) {
Joe Perches11705322016-03-17 14:19:50 -07001569 pr_err("Single bit error detected. Probably bad RAM.\n");
Dave Jonesaa83aa42006-09-29 01:59:51 -07001570#ifdef CONFIG_X86
Joe Perches11705322016-03-17 14:19:50 -07001571 pr_err("Run memtest86+ or a similar memory test tool.\n");
Dave Jonesaa83aa42006-09-29 01:59:51 -07001572#else
Joe Perches11705322016-03-17 14:19:50 -07001573 pr_err("Run a memory test tool.\n");
Dave Jonesaa83aa42006-09-29 01:59:51 -07001574#endif
1575 }
1576 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577}
1578#endif
1579
1580#if DEBUG
1581
Pekka Enberg343e0d72006-02-01 03:05:50 -08001582static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583{
1584 int i, size;
1585 char *realobj;
1586
1587 if (cachep->flags & SLAB_RED_ZONE) {
Joe Perches11705322016-03-17 14:19:50 -07001588 pr_err("Redzone: 0x%llx/0x%llx\n",
1589 *dbg_redzone1(cachep, objp),
1590 *dbg_redzone2(cachep, objp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 }
1592
1593 if (cachep->flags & SLAB_STORE_USER) {
Joe Perches11705322016-03-17 14:19:50 -07001594 pr_err("Last user: [<%p>](%pSR)\n",
Joe Perches071361d2012-12-12 10:19:12 -08001595 *dbg_userword(cachep, objp),
1596 *dbg_userword(cachep, objp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 }
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001598 realobj = (char *)objp + obj_offset(cachep);
Christoph Lameter8c138bc2012-06-13 10:24:58 -05001599 size = cachep->object_size;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001600 for (i = 0; i < size && lines; i += 16, lines--) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 int limit;
1602 limit = 16;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001603 if (i + limit > size)
1604 limit = size - i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605 dump_line(realobj, i, limit);
1606 }
1607}
1608
Pekka Enberg343e0d72006-02-01 03:05:50 -08001609static void check_poison_obj(struct kmem_cache *cachep, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610{
1611 char *realobj;
1612 int size, i;
1613 int lines = 0;
1614
Joonsoo Kim40b44132016-03-15 14:54:21 -07001615 if (is_debug_pagealloc_cache(cachep))
1616 return;
1617
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001618 realobj = (char *)objp + obj_offset(cachep);
Christoph Lameter8c138bc2012-06-13 10:24:58 -05001619 size = cachep->object_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001621 for (i = 0; i < size; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 char exp = POISON_FREE;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001623 if (i == size - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 exp = POISON_END;
1625 if (realobj[i] != exp) {
1626 int limit;
1627 /* Mismatch ! */
1628 /* Print header */
1629 if (lines == 0) {
Joe Perches11705322016-03-17 14:19:50 -07001630 pr_err("Slab corruption (%s): %s start=%p, len=%d\n",
1631 print_tainted(), cachep->name,
1632 realobj, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633 print_objinfo(cachep, objp, 0);
1634 }
1635 /* Hexdump the affected line */
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001636 i = (i / 16) * 16;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637 limit = 16;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001638 if (i + limit > size)
1639 limit = size - i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 dump_line(realobj, i, limit);
1641 i += 16;
1642 lines++;
1643 /* Limit to 5 lines */
1644 if (lines > 5)
1645 break;
1646 }
1647 }
1648 if (lines != 0) {
1649 /* Print some data about the neighboring objects, if they
1650 * exist:
1651 */
Joonsoo Kim8456a642013-10-24 10:07:49 +09001652 struct page *page = virt_to_head_page(objp);
Pekka Enberg8fea4e92006-03-22 00:08:10 -08001653 unsigned int objnr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654
Joonsoo Kim8456a642013-10-24 10:07:49 +09001655 objnr = obj_to_index(cachep, page, objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656 if (objnr) {
Joonsoo Kim8456a642013-10-24 10:07:49 +09001657 objp = index_to_obj(cachep, page, objnr - 1);
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001658 realobj = (char *)objp + obj_offset(cachep);
Joe Perches11705322016-03-17 14:19:50 -07001659 pr_err("Prev obj: start=%p, len=%d\n", realobj, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 print_objinfo(cachep, objp, 2);
1661 }
Pekka Enbergb28a02d2006-01-08 01:00:37 -08001662 if (objnr + 1 < cachep->num) {
Joonsoo Kim8456a642013-10-24 10:07:49 +09001663 objp = index_to_obj(cachep, page, objnr + 1);
Manfred Spraul3dafccf2006-02-01 03:05:42 -08001664 realobj = (char *)objp + obj_offset(cachep);
Joe Perches11705322016-03-17 14:19:50 -07001665 pr_err("Next obj: start=%p, len=%d\n", realobj, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 print_objinfo(cachep, objp, 2);
1667 }
1668 }
1669}
1670#endif
1671
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672#if DEBUG
Joonsoo Kim8456a642013-10-24 10:07:49 +09001673static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1674 struct page *page)
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001675{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676 int i;
Joonsoo Kimb03a017b2016-03-15 14:54:50 -07001677
1678 if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) {
1679 poison_obj(cachep, page->freelist - obj_offset(cachep),
1680 POISON_FREE);
1681 }
1682
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 for (i = 0; i < cachep->num; i++) {
Joonsoo Kim8456a642013-10-24 10:07:49 +09001684 void *objp = index_to_obj(cachep, page, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685
1686 if (cachep->flags & SLAB_POISON) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 check_poison_obj(cachep, objp);
Joonsoo Kim40b44132016-03-15 14:54:21 -07001688 slab_kernel_map(cachep, objp, 1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 }
1690 if (cachep->flags & SLAB_RED_ZONE) {
1691 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
Joe Perches756a025f02016-03-17 14:19:47 -07001692 slab_error(cachep, "start of a freed object was overwritten");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
Joe Perches756a025f02016-03-17 14:19:47 -07001694 slab_error(cachep, "end of a freed object was overwritten");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 }
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001697}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698#else
Joonsoo Kim8456a642013-10-24 10:07:49 +09001699static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1700 struct page *page)
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001701{
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001702}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703#endif
1704
Randy Dunlap911851e2006-03-22 00:08:14 -08001705/**
1706 * slab_destroy - destroy and release all objects in a slab
1707 * @cachep: cache pointer being destroyed
Masanari Iidacb8ee1a2014-01-28 02:57:08 +09001708 * @page: page pointer being destroyed
Randy Dunlap911851e2006-03-22 00:08:14 -08001709 *
Wang Sheng-Hui8a7d9b42014-08-06 16:04:46 -07001710 * Destroy all the objs in a slab page, and release the mem back to the system.
1711 * Before calling the slab page must have been unlinked from the cache. The
1712 * kmem_cache_node ->list_lock is not held/needed.
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001713 */
Joonsoo Kim8456a642013-10-24 10:07:49 +09001714static void slab_destroy(struct kmem_cache *cachep, struct page *page)
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001715{
Joonsoo Kim7e007352013-10-30 19:04:01 +09001716 void *freelist;
Matthew Dobson12dd36f2006-02-01 03:05:46 -08001717
Joonsoo Kim8456a642013-10-24 10:07:49 +09001718 freelist = page->freelist;
1719 slab_destroy_debugcheck(cachep, page);
Kirill A. Shutemovbc4f6102015-11-06 16:29:44 -08001720 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
1721 call_rcu(&page->rcu_head, kmem_rcu_free);
1722 else
Joonsoo Kim0c3aa832013-10-24 10:07:38 +09001723 kmem_freepages(cachep, page);
Joonsoo Kim68126702013-10-24 10:07:42 +09001724
1725 /*
Joonsoo Kim8456a642013-10-24 10:07:49 +09001726 * From now on, we don't use freelist
Joonsoo Kim68126702013-10-24 10:07:42 +09001727 * although actual page can be freed in rcu context
1728 */
1729 if (OFF_SLAB(cachep))
Joonsoo Kim8456a642013-10-24 10:07:49 +09001730 kmem_cache_free(cachep->freelist_cache, freelist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731}
1732
Joonsoo Kim97654df2014-08-06 16:04:25 -07001733static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
1734{
1735 struct page *page, *n;
1736
1737 list_for_each_entry_safe(page, n, list, lru) {
1738 list_del(&page->lru);
1739 slab_destroy(cachep, page);
1740 }
1741}
1742
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743/**
Randy.Dunlapa70773d2006-02-01 03:05:52 -08001744 * calculate_slab_order - calculate size (page order) of slabs
1745 * @cachep: pointer to the cache that is being created
1746 * @size: size of objects to be created in this cache.
Randy.Dunlapa70773d2006-02-01 03:05:52 -08001747 * @flags: slab allocation flags
1748 *
1749 * Also calculates the number of objects per slab.
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001750 *
1751 * This could be made much more intelligent. For now, try to avoid using
1752 * high order pages for slabs. When the gfp() functions are more friendly
1753 * towards high-order requests, this should be changed.
1754 */
Andrew Mortona737b3e2006-03-22 00:08:11 -08001755static size_t calculate_slab_order(struct kmem_cache *cachep,
Joonsoo Kim2e6b3602016-03-15 14:54:30 -07001756 size_t size, unsigned long flags)
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001757{
1758 size_t left_over = 0;
Linus Torvalds9888e6f2006-03-06 17:44:43 -08001759 int gfporder;
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001760
Christoph Lameter0aa817f2007-05-16 22:11:01 -07001761 for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001762 unsigned int num;
1763 size_t remainder;
1764
Joonsoo Kim70f75062016-03-15 14:54:53 -07001765 num = cache_estimate(gfporder, size, flags, &remainder);
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001766 if (!num)
1767 continue;
Linus Torvalds9888e6f2006-03-06 17:44:43 -08001768
Joonsoo Kimf315e3f2013-12-02 17:49:41 +09001769 /* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
1770 if (num > SLAB_OBJ_MAX_NUM)
1771 break;
1772
Ingo Molnarb1ab41c2006-06-02 15:44:58 +02001773 if (flags & CFLGS_OFF_SLAB) {
Joonsoo Kim3217fd92016-03-15 14:54:41 -07001774 struct kmem_cache *freelist_cache;
1775 size_t freelist_size;
Ingo Molnarb1ab41c2006-06-02 15:44:58 +02001776
Joonsoo Kim3217fd92016-03-15 14:54:41 -07001777 freelist_size = num * sizeof(freelist_idx_t);
1778 freelist_cache = kmalloc_slab(freelist_size, 0u);
1779 if (!freelist_cache)
1780 continue;
1781
1782 /*
1783 * Needed to avoid possible looping condition
1784 * in cache_grow()
1785 */
1786 if (OFF_SLAB(freelist_cache))
1787 continue;
1788
1789 /* check if off slab has enough benefit */
1790 if (freelist_cache->size > cachep->size / 2)
1791 continue;
Ingo Molnarb1ab41c2006-06-02 15:44:58 +02001792 }
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001793
Linus Torvalds9888e6f2006-03-06 17:44:43 -08001794 /* Found something acceptable - save it away */
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001795 cachep->num = num;
Linus Torvalds9888e6f2006-03-06 17:44:43 -08001796 cachep->gfporder = gfporder;
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001797 left_over = remainder;
1798
1799 /*
Linus Torvaldsf78bb8a2006-03-08 10:33:05 -08001800 * A VFS-reclaimable slab tends to have most allocations
1801 * as GFP_NOFS and we really don't want to have to be allocating
1802 * higher-order pages when we are unable to shrink dcache.
1803 */
1804 if (flags & SLAB_RECLAIM_ACCOUNT)
1805 break;
1806
1807 /*
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001808 * Large number of objects is good, but very large slabs are
1809 * currently bad for the gfp()s.
1810 */
David Rientjes543585c2011-10-18 22:09:24 -07001811 if (gfporder >= slab_max_order)
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001812 break;
1813
Linus Torvalds9888e6f2006-03-06 17:44:43 -08001814 /*
1815 * Acceptable internal fragmentation?
1816 */
Andrew Mortona737b3e2006-03-22 00:08:11 -08001817 if (left_over * 8 <= (PAGE_SIZE << gfporder))
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001818 break;
1819 }
1820 return left_over;
1821}
1822
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001823static struct array_cache __percpu *alloc_kmem_cache_cpus(
1824 struct kmem_cache *cachep, int entries, int batchcount)
1825{
1826 int cpu;
1827 size_t size;
1828 struct array_cache __percpu *cpu_cache;
1829
1830 size = sizeof(void *) * entries + sizeof(struct array_cache);
Joonsoo Kim85c9f4b2014-10-13 15:51:01 -07001831 cpu_cache = __alloc_percpu(size, sizeof(void *));
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001832
1833 if (!cpu_cache)
1834 return NULL;
1835
1836 for_each_possible_cpu(cpu) {
1837 init_arraycache(per_cpu_ptr(cpu_cache, cpu),
1838 entries, batchcount);
1839 }
1840
1841 return cpu_cache;
1842}
1843
Pekka Enberg83b519e2009-06-10 19:40:04 +03001844static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
Pekka Enbergf30cf7d2006-03-22 00:08:11 -08001845{
Christoph Lameter97d06602012-07-06 15:25:11 -05001846 if (slab_state >= FULL)
Pekka Enberg83b519e2009-06-10 19:40:04 +03001847 return enable_cpucache(cachep, gfp);
Christoph Lameter2ed3a4e2006-09-25 23:31:38 -07001848
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001849 cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1);
1850 if (!cachep->cpu_cache)
1851 return 1;
1852
Christoph Lameter97d06602012-07-06 15:25:11 -05001853 if (slab_state == DOWN) {
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001854 /* Creation of first cache (kmem_cache). */
1855 set_up_node(kmem_cache, CACHE_CACHE);
Christoph Lameter2f9baa92012-11-28 16:23:09 +00001856 } else if (slab_state == PARTIAL) {
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001857 /* For kmem_cache_node */
1858 set_up_node(cachep, SIZE_NODE);
Pekka Enbergf30cf7d2006-03-22 00:08:11 -08001859 } else {
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001860 int node;
Pekka Enbergf30cf7d2006-03-22 00:08:11 -08001861
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001862 for_each_online_node(node) {
1863 cachep->node[node] = kmalloc_node(
1864 sizeof(struct kmem_cache_node), gfp, node);
1865 BUG_ON(!cachep->node[node]);
1866 kmem_cache_node_init(cachep->node[node]);
Pekka Enbergf30cf7d2006-03-22 00:08:11 -08001867 }
1868 }
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07001869
Christoph Lameter6a673682013-01-10 19:14:19 +00001870 cachep->node[numa_mem_id()]->next_reap =
Jianyu Zhan5f0985b2014-03-30 17:02:20 +08001871 jiffies + REAPTIMEOUT_NODE +
1872 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
Pekka Enbergf30cf7d2006-03-22 00:08:11 -08001873
1874 cpu_cache_get(cachep)->avail = 0;
1875 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
1876 cpu_cache_get(cachep)->batchcount = 1;
1877 cpu_cache_get(cachep)->touched = 0;
1878 cachep->batchcount = 1;
1879 cachep->limit = BOOT_CPUCACHE_ENTRIES;
Christoph Lameter2ed3a4e2006-09-25 23:31:38 -07001880 return 0;
Pekka Enbergf30cf7d2006-03-22 00:08:11 -08001881}
1882
Joonsoo Kim12220de2014-10-09 15:26:24 -07001883unsigned long kmem_cache_flags(unsigned long object_size,
1884 unsigned long flags, const char *name,
1885 void (*ctor)(void *))
1886{
1887 return flags;
1888}
1889
1890struct kmem_cache *
1891__kmem_cache_alias(const char *name, size_t size, size_t align,
1892 unsigned long flags, void (*ctor)(void *))
1893{
1894 struct kmem_cache *cachep;
1895
1896 cachep = find_mergeable(size, align, flags, name, ctor);
1897 if (cachep) {
1898 cachep->refcount++;
1899
1900 /*
1901 * Adjust the object sizes so that we clear
1902 * the complete object on kzalloc.
1903 */
1904 cachep->object_size = max_t(int, cachep->object_size, size);
1905 }
1906 return cachep;
1907}
1908
Joonsoo Kimb03a017b2016-03-15 14:54:50 -07001909static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
1910 size_t size, unsigned long flags)
1911{
1912 size_t left;
1913
1914 cachep->num = 0;
1915
1916 if (cachep->ctor || flags & SLAB_DESTROY_BY_RCU)
1917 return false;
1918
1919 left = calculate_slab_order(cachep, size,
1920 flags | CFLGS_OBJFREELIST_SLAB);
1921 if (!cachep->num)
1922 return false;
1923
1924 if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size)
1925 return false;
1926
1927 cachep->colour = left / cachep->colour_off;
1928
1929 return true;
1930}
1931
Joonsoo Kim158e3192016-03-15 14:54:35 -07001932static bool set_off_slab_cache(struct kmem_cache *cachep,
1933 size_t size, unsigned long flags)
1934{
1935 size_t left;
1936
1937 cachep->num = 0;
1938
1939 /*
Joonsoo Kim3217fd92016-03-15 14:54:41 -07001940 * Always use on-slab management when SLAB_NOLEAKTRACE
1941 * to avoid recursive calls into kmemleak.
Joonsoo Kim158e3192016-03-15 14:54:35 -07001942 */
Joonsoo Kim158e3192016-03-15 14:54:35 -07001943 if (flags & SLAB_NOLEAKTRACE)
1944 return false;
1945
1946 /*
1947 * Size is large, assume best to place the slab management obj
1948 * off-slab (should allow better packing of objs).
1949 */
1950 left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB);
1951 if (!cachep->num)
1952 return false;
1953
1954 /*
1955 * If the slab has been placed off-slab, and we have enough space then
1956 * move it on-slab. This is at the expense of any extra colouring.
1957 */
1958 if (left >= cachep->num * sizeof(freelist_idx_t))
1959 return false;
1960
1961 cachep->colour = left / cachep->colour_off;
1962
1963 return true;
1964}
1965
1966static bool set_on_slab_cache(struct kmem_cache *cachep,
1967 size_t size, unsigned long flags)
1968{
1969 size_t left;
1970
1971 cachep->num = 0;
1972
1973 left = calculate_slab_order(cachep, size, flags);
1974 if (!cachep->num)
1975 return false;
1976
1977 cachep->colour = left / cachep->colour_off;
1978
1979 return true;
1980}
1981
Pekka Enberg4d268eb2006-01-08 01:00:36 -08001982/**
Christoph Lameter039363f2012-07-06 15:25:10 -05001983 * __kmem_cache_create - Create a cache.
Randy Dunlapa755b762012-11-06 17:10:10 -08001984 * @cachep: cache management descriptor
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985 * @flags: SLAB flags
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 *
1987 * Returns a ptr to the cache on success, NULL on failure.
1988 * Cannot be called within a int, but can be interrupted.
Paul Mundt20c2df82007-07-20 10:11:58 +09001989 * The @ctor is run when new pages are allocated by the cache.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991 * The flags are
1992 *
1993 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
1994 * to catch references to uninitialised memory.
1995 *
1996 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
1997 * for buffer overruns.
1998 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
2000 * cacheline. This can be beneficial if you're counting cycles as closely
2001 * as davem.
2002 */
Christoph Lameter278b1bb2012-09-05 00:20:34 +00002003int
Christoph Lameter8a13a4c2012-09-04 23:18:33 +00002004__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005{
David Rientjesd4a5fca2014-09-25 16:05:20 -07002006 size_t ralign = BYTES_PER_WORD;
Pekka Enberg83b519e2009-06-10 19:40:04 +03002007 gfp_t gfp;
Christoph Lameter278b1bb2012-09-05 00:20:34 +00002008 int err;
Christoph Lameter8a13a4c2012-09-04 23:18:33 +00002009 size_t size = cachep->size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011#if DEBUG
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012#if FORCED_DEBUG
2013 /*
2014 * Enable redzoning and last user accounting, except for caches with
2015 * large objects, if the increased size would increase the object size
2016 * above the next power of two: caches with object sizes just above a
2017 * power of two have a significant amount of internal fragmentation.
2018 */
David Woodhouse87a927c2007-07-04 21:26:44 -04002019 if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2020 2 * sizeof(unsigned long long)))
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002021 flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022 if (!(flags & SLAB_DESTROY_BY_RCU))
2023 flags |= SLAB_POISON;
2024#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026
Andrew Mortona737b3e2006-03-22 00:08:11 -08002027 /*
2028 * Check that size is in terms of words. This is needed to avoid
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029 * unaligned accesses for some archs when redzoning is used, and makes
2030 * sure any on-slab bufctl's are also correctly aligned.
2031 */
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002032 if (size & (BYTES_PER_WORD - 1)) {
2033 size += (BYTES_PER_WORD - 1);
2034 size &= ~(BYTES_PER_WORD - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 }
2036
David Woodhouse87a927c2007-07-04 21:26:44 -04002037 if (flags & SLAB_RED_ZONE) {
2038 ralign = REDZONE_ALIGN;
2039 /* If redzoning, ensure that the second redzone is suitably
2040 * aligned, by adjusting the object size accordingly. */
2041 size += REDZONE_ALIGN - 1;
2042 size &= ~(REDZONE_ALIGN - 1);
2043 }
Pekka Enbergca5f9702006-09-25 23:31:25 -07002044
Kevin Hilmana44b56d2006-12-06 20:32:11 -08002045 /* 3) caller mandated alignment */
Christoph Lameter8a13a4c2012-09-04 23:18:33 +00002046 if (ralign < cachep->align) {
2047 ralign = cachep->align;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 }
Pekka Enberg3ff84a72011-02-14 17:46:21 +02002049 /* disable debug if necessary */
2050 if (ralign > __alignof__(unsigned long long))
Kevin Hilmana44b56d2006-12-06 20:32:11 -08002051 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
Andrew Mortona737b3e2006-03-22 00:08:11 -08002052 /*
Pekka Enbergca5f9702006-09-25 23:31:25 -07002053 * 4) Store it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 */
Christoph Lameter8a13a4c2012-09-04 23:18:33 +00002055 cachep->align = ralign;
Joonsoo Kim158e3192016-03-15 14:54:35 -07002056 cachep->colour_off = cache_line_size();
2057 /* Offset must be a multiple of the alignment. */
2058 if (cachep->colour_off < cachep->align)
2059 cachep->colour_off = cachep->align;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060
Pekka Enberg83b519e2009-06-10 19:40:04 +03002061 if (slab_is_available())
2062 gfp = GFP_KERNEL;
2063 else
2064 gfp = GFP_NOWAIT;
2065
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066#if DEBUG
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067
Pekka Enbergca5f9702006-09-25 23:31:25 -07002068 /*
2069 * Both debugging options require word-alignment which is calculated
2070 * into align above.
2071 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072 if (flags & SLAB_RED_ZONE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073 /* add space for red zone words */
Pekka Enberg3ff84a72011-02-14 17:46:21 +02002074 cachep->obj_offset += sizeof(unsigned long long);
2075 size += 2 * sizeof(unsigned long long);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076 }
2077 if (flags & SLAB_STORE_USER) {
Pekka Enbergca5f9702006-09-25 23:31:25 -07002078 /* user store requires one word storage behind the end of
David Woodhouse87a927c2007-07-04 21:26:44 -04002079 * the real object. But if the second red zone needs to be
2080 * aligned to 64 bits, we must allow that much space.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 */
David Woodhouse87a927c2007-07-04 21:26:44 -04002082 if (flags & SLAB_RED_ZONE)
2083 size += REDZONE_ALIGN;
2084 else
2085 size += BYTES_PER_WORD;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 }
Joonsoo Kim832a15d2016-03-15 14:54:33 -07002087#endif
2088
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -07002089 kasan_cache_create(cachep, &size, &flags);
2090
Joonsoo Kim832a15d2016-03-15 14:54:33 -07002091 size = ALIGN(size, cachep->align);
2092 /*
2093 * We should restrict the number of objects in a slab to implement
2094 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
2095 */
2096 if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
2097 size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
2098
2099#if DEBUG
Joonsoo Kim03a2d2a2015-10-01 15:36:54 -07002100 /*
2101 * To activate debug pagealloc, off-slab management is necessary
2102 * requirement. In early phase of initialization, small sized slab
2103 * doesn't get initialized so it would not be possible. So, we need
2104 * to check size >= 256. It guarantees that all necessary small
2105 * sized slab is initialized in current slab initialization sequence.
2106 */
Joonsoo Kim40323272016-03-15 14:54:18 -07002107 if (debug_pagealloc_enabled() && (flags & SLAB_POISON) &&
Joonsoo Kimf3a3c322016-03-15 14:54:38 -07002108 size >= 256 && cachep->object_size > cache_line_size()) {
2109 if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
2110 size_t tmp_size = ALIGN(size, PAGE_SIZE);
2111
2112 if (set_off_slab_cache(cachep, tmp_size, flags)) {
2113 flags |= CFLGS_OFF_SLAB;
2114 cachep->obj_offset += tmp_size - size;
2115 size = tmp_size;
2116 goto done;
2117 }
2118 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 }
2120#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121
Joonsoo Kimb03a017b2016-03-15 14:54:50 -07002122 if (set_objfreelist_slab_cache(cachep, size, flags)) {
2123 flags |= CFLGS_OBJFREELIST_SLAB;
2124 goto done;
2125 }
2126
Joonsoo Kim158e3192016-03-15 14:54:35 -07002127 if (set_off_slab_cache(cachep, size, flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128 flags |= CFLGS_OFF_SLAB;
Joonsoo Kim158e3192016-03-15 14:54:35 -07002129 goto done;
Joonsoo Kim832a15d2016-03-15 14:54:33 -07002130 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131
Joonsoo Kim158e3192016-03-15 14:54:35 -07002132 if (set_on_slab_cache(cachep, size, flags))
2133 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134
Joonsoo Kim158e3192016-03-15 14:54:35 -07002135 return -E2BIG;
Christoph Lameter8a13a4c2012-09-04 23:18:33 +00002136
Joonsoo Kim158e3192016-03-15 14:54:35 -07002137done:
2138 cachep->freelist_size = cachep->num * sizeof(freelist_idx_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 cachep->flags = flags;
Joonsoo Kima57a4982013-10-24 10:07:44 +09002140 cachep->allocflags = __GFP_COMP;
Christoph Lameter4b51d662007-02-10 01:43:10 -08002141 if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
Glauber Costaa618e892012-06-14 16:17:21 +04002142 cachep->allocflags |= GFP_DMA;
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05002143 cachep->size = size;
Eric Dumazet6a2d7a92006-12-13 00:34:27 -08002144 cachep->reciprocal_buffer_size = reciprocal_value(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145
Joonsoo Kim40b44132016-03-15 14:54:21 -07002146#if DEBUG
2147 /*
2148 * If we're going to use the generic kernel_map_pages()
2149 * poisoning, then it's going to smash the contents of
2150 * the redzone and userword anyhow, so switch them off.
2151 */
2152 if (IS_ENABLED(CONFIG_PAGE_POISONING) &&
2153 (cachep->flags & SLAB_POISON) &&
2154 is_debug_pagealloc_cache(cachep))
2155 cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2156#endif
2157
2158 if (OFF_SLAB(cachep)) {
Joonsoo Kim158e3192016-03-15 14:54:35 -07002159 cachep->freelist_cache =
2160 kmalloc_slab(cachep->freelist_size, 0u);
Ravikiran G Thirumalaie5ac9c52006-09-25 23:31:34 -07002161 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162
Christoph Lameter278b1bb2012-09-05 00:20:34 +00002163 err = setup_cpu_cache(cachep, gfp);
2164 if (err) {
Dmitry Safonov52b4b952016-02-17 13:11:37 -08002165 __kmem_cache_release(cachep);
Christoph Lameter278b1bb2012-09-05 00:20:34 +00002166 return err;
Christoph Lameter2ed3a4e2006-09-25 23:31:38 -07002167 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168
Christoph Lameter278b1bb2012-09-05 00:20:34 +00002169 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171
2172#if DEBUG
2173static void check_irq_off(void)
2174{
2175 BUG_ON(!irqs_disabled());
2176}
2177
2178static void check_irq_on(void)
2179{
2180 BUG_ON(irqs_disabled());
2181}
2182
Pekka Enberg343e0d72006-02-01 03:05:50 -08002183static void check_spinlock_acquired(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184{
2185#ifdef CONFIG_SMP
2186 check_irq_off();
Christoph Lameter18bf8542014-08-06 16:04:11 -07002187 assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188#endif
2189}
Christoph Lametere498be72005-09-09 13:03:32 -07002190
Pekka Enberg343e0d72006-02-01 03:05:50 -08002191static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
Christoph Lametere498be72005-09-09 13:03:32 -07002192{
2193#ifdef CONFIG_SMP
2194 check_irq_off();
Christoph Lameter18bf8542014-08-06 16:04:11 -07002195 assert_spin_locked(&get_node(cachep, node)->list_lock);
Christoph Lametere498be72005-09-09 13:03:32 -07002196#endif
2197}
2198
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199#else
2200#define check_irq_off() do { } while(0)
2201#define check_irq_on() do { } while(0)
2202#define check_spinlock_acquired(x) do { } while(0)
Christoph Lametere498be72005-09-09 13:03:32 -07002203#define check_spinlock_acquired_node(x, y) do { } while(0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204#endif
2205
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002206static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
Christoph Lameteraab22072006-03-22 00:09:06 -08002207 struct array_cache *ac,
2208 int force, int node);
2209
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210static void do_drain(void *arg)
2211{
Andrew Mortona737b3e2006-03-22 00:08:11 -08002212 struct kmem_cache *cachep = arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213 struct array_cache *ac;
Lee Schermerhorn7d6e6d02010-05-26 14:45:03 -07002214 int node = numa_mem_id();
Christoph Lameter18bf8542014-08-06 16:04:11 -07002215 struct kmem_cache_node *n;
Joonsoo Kim97654df2014-08-06 16:04:25 -07002216 LIST_HEAD(list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217
2218 check_irq_off();
Pekka Enberg9a2dba42006-02-01 03:05:49 -08002219 ac = cpu_cache_get(cachep);
Christoph Lameter18bf8542014-08-06 16:04:11 -07002220 n = get_node(cachep, node);
2221 spin_lock(&n->list_lock);
Joonsoo Kim97654df2014-08-06 16:04:25 -07002222 free_block(cachep, ac->entry, ac->avail, node, &list);
Christoph Lameter18bf8542014-08-06 16:04:11 -07002223 spin_unlock(&n->list_lock);
Joonsoo Kim97654df2014-08-06 16:04:25 -07002224 slabs_destroy(cachep, &list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225 ac->avail = 0;
2226}
2227
Pekka Enberg343e0d72006-02-01 03:05:50 -08002228static void drain_cpu_caches(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229{
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002230 struct kmem_cache_node *n;
Christoph Lametere498be72005-09-09 13:03:32 -07002231 int node;
2232
Jens Axboe15c8b6c2008-05-09 09:39:44 +02002233 on_each_cpu(do_drain, cachep, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234 check_irq_on();
Christoph Lameter18bf8542014-08-06 16:04:11 -07002235 for_each_kmem_cache_node(cachep, node, n)
2236 if (n->alien)
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002237 drain_alien_cache(cachep, n->alien);
Roland Dreiera4523a82006-05-15 11:41:00 -07002238
Christoph Lameter18bf8542014-08-06 16:04:11 -07002239 for_each_kmem_cache_node(cachep, node, n)
2240 drain_array(cachep, n, n->shared, 1, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241}
2242
Christoph Lametered11d9e2006-06-30 01:55:45 -07002243/*
2244 * Remove slabs from the list of free slabs.
2245 * Specify the number of slabs to drain in tofree.
2246 *
2247 * Returns the actual number of slabs released.
2248 */
2249static int drain_freelist(struct kmem_cache *cache,
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002250 struct kmem_cache_node *n, int tofree)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251{
Christoph Lametered11d9e2006-06-30 01:55:45 -07002252 struct list_head *p;
2253 int nr_freed;
Joonsoo Kim8456a642013-10-24 10:07:49 +09002254 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255
Christoph Lametered11d9e2006-06-30 01:55:45 -07002256 nr_freed = 0;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002257 while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002259 spin_lock_irq(&n->list_lock);
2260 p = n->slabs_free.prev;
2261 if (p == &n->slabs_free) {
2262 spin_unlock_irq(&n->list_lock);
Christoph Lametered11d9e2006-06-30 01:55:45 -07002263 goto out;
2264 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265
Joonsoo Kim8456a642013-10-24 10:07:49 +09002266 page = list_entry(p, struct page, lru);
Joonsoo Kim8456a642013-10-24 10:07:49 +09002267 list_del(&page->lru);
Christoph Lametered11d9e2006-06-30 01:55:45 -07002268 /*
2269 * Safe to drop the lock. The slab is no longer linked
2270 * to the cache.
2271 */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002272 n->free_objects -= cache->num;
2273 spin_unlock_irq(&n->list_lock);
Joonsoo Kim8456a642013-10-24 10:07:49 +09002274 slab_destroy(cache, page);
Christoph Lametered11d9e2006-06-30 01:55:45 -07002275 nr_freed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276 }
Christoph Lametered11d9e2006-06-30 01:55:45 -07002277out:
2278 return nr_freed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279}
2280
Vladimir Davydovd6e0b7f2015-02-12 14:59:47 -08002281int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
Christoph Lametere498be72005-09-09 13:03:32 -07002282{
Christoph Lameter18bf8542014-08-06 16:04:11 -07002283 int ret = 0;
2284 int node;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002285 struct kmem_cache_node *n;
Christoph Lametere498be72005-09-09 13:03:32 -07002286
2287 drain_cpu_caches(cachep);
2288
2289 check_irq_on();
Christoph Lameter18bf8542014-08-06 16:04:11 -07002290 for_each_kmem_cache_node(cachep, node, n) {
Wanpeng Li0fa81032013-07-04 08:33:22 +08002291 drain_freelist(cachep, n, slabs_tofree(cachep, n));
Christoph Lametered11d9e2006-06-30 01:55:45 -07002292
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002293 ret += !list_empty(&n->slabs_full) ||
2294 !list_empty(&n->slabs_partial);
Christoph Lametere498be72005-09-09 13:03:32 -07002295 }
2296 return (ret ? 1 : 0);
2297}
2298
Christoph Lameter945cf2b2012-09-04 23:18:33 +00002299int __kmem_cache_shutdown(struct kmem_cache *cachep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300{
Dmitry Safonov52b4b952016-02-17 13:11:37 -08002301 return __kmem_cache_shrink(cachep, false);
2302}
2303
2304void __kmem_cache_release(struct kmem_cache *cachep)
2305{
Christoph Lameter12c36672012-09-04 23:38:33 +00002306 int i;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002307 struct kmem_cache_node *n;
Christoph Lameter12c36672012-09-04 23:38:33 +00002308
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07002309 free_percpu(cachep->cpu_cache);
Christoph Lameter12c36672012-09-04 23:38:33 +00002310
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002311 /* NUMA: free the node structures */
Christoph Lameter18bf8542014-08-06 16:04:11 -07002312 for_each_kmem_cache_node(cachep, i, n) {
2313 kfree(n->shared);
2314 free_alien_cache(n->alien);
2315 kfree(n);
2316 cachep->node[i] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319
Ravikiran G Thirumalaie5ac9c52006-09-25 23:31:34 -07002320/*
2321 * Get the memory for a slab management obj.
Jianyu Zhan5f0985b2014-03-30 17:02:20 +08002322 *
2323 * For a slab cache when the slab descriptor is off-slab, the
2324 * slab descriptor can't come from the same cache which is being created,
2325 * Because if it is the case, that means we defer the creation of
2326 * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
2327 * And we eventually call down to __kmem_cache_create(), which
2328 * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one.
2329 * This is a "chicken-and-egg" problem.
2330 *
2331 * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
2332 * which are all initialized during kmem_cache_init().
Ravikiran G Thirumalaie5ac9c52006-09-25 23:31:34 -07002333 */
Joonsoo Kim7e007352013-10-30 19:04:01 +09002334static void *alloc_slabmgmt(struct kmem_cache *cachep,
Joonsoo Kim0c3aa832013-10-24 10:07:38 +09002335 struct page *page, int colour_off,
2336 gfp_t local_flags, int nodeid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337{
Joonsoo Kim7e007352013-10-30 19:04:01 +09002338 void *freelist;
Joonsoo Kim0c3aa832013-10-24 10:07:38 +09002339 void *addr = page_address(page);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002340
Joonsoo Kim2e6b3602016-03-15 14:54:30 -07002341 page->s_mem = addr + colour_off;
2342 page->active = 0;
2343
Joonsoo Kimb03a017b2016-03-15 14:54:50 -07002344 if (OBJFREELIST_SLAB(cachep))
2345 freelist = NULL;
2346 else if (OFF_SLAB(cachep)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347 /* Slab management obj is off-slab. */
Joonsoo Kim8456a642013-10-24 10:07:49 +09002348 freelist = kmem_cache_alloc_node(cachep->freelist_cache,
Pekka Enberg8759ec52008-11-26 10:01:31 +02002349 local_flags, nodeid);
Joonsoo Kim8456a642013-10-24 10:07:49 +09002350 if (!freelist)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351 return NULL;
2352 } else {
Joonsoo Kim2e6b3602016-03-15 14:54:30 -07002353 /* We will use last bytes at the slab for freelist */
2354 freelist = addr + (PAGE_SIZE << cachep->gfporder) -
2355 cachep->freelist_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 }
Joonsoo Kim2e6b3602016-03-15 14:54:30 -07002357
Joonsoo Kim8456a642013-10-24 10:07:49 +09002358 return freelist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359}
2360
Joonsoo Kim7cc689732014-04-18 16:24:09 +09002361static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362{
Joonsoo Kima41adfa2013-12-02 17:49:42 +09002363 return ((freelist_idx_t *)page->freelist)[idx];
Joonsoo Kime5c58df2013-12-02 17:49:40 +09002364}
2365
2366static inline void set_free_obj(struct page *page,
Joonsoo Kim7cc689732014-04-18 16:24:09 +09002367 unsigned int idx, freelist_idx_t val)
Joonsoo Kime5c58df2013-12-02 17:49:40 +09002368{
Joonsoo Kima41adfa2013-12-02 17:49:42 +09002369 ((freelist_idx_t *)(page->freelist))[idx] = val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370}
2371
Joonsoo Kim10b2e9e2016-03-15 14:54:47 -07002372static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373{
Joonsoo Kim10b2e9e2016-03-15 14:54:47 -07002374#if DEBUG
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 int i;
2376
2377 for (i = 0; i < cachep->num; i++) {
Joonsoo Kim8456a642013-10-24 10:07:49 +09002378 void *objp = index_to_obj(cachep, page, i);
Joonsoo Kim10b2e9e2016-03-15 14:54:47 -07002379
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380 if (cachep->flags & SLAB_STORE_USER)
2381 *dbg_userword(cachep, objp) = NULL;
2382
2383 if (cachep->flags & SLAB_RED_ZONE) {
2384 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2385 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2386 }
2387 /*
Andrew Mortona737b3e2006-03-22 00:08:11 -08002388 * Constructors are not allowed to allocate memory from the same
2389 * cache which they are a constructor for. Otherwise, deadlock.
2390 * They must also be threaded.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391 */
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -07002392 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
2393 kasan_unpoison_object_data(cachep,
2394 objp + obj_offset(cachep));
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07002395 cachep->ctor(objp + obj_offset(cachep));
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -07002396 kasan_poison_object_data(
2397 cachep, objp + obj_offset(cachep));
2398 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399
2400 if (cachep->flags & SLAB_RED_ZONE) {
2401 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
Joe Perches756a025f02016-03-17 14:19:47 -07002402 slab_error(cachep, "constructor overwrote the end of an object");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
Joe Perches756a025f02016-03-17 14:19:47 -07002404 slab_error(cachep, "constructor overwrote the start of an object");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405 }
Joonsoo Kim40b44132016-03-15 14:54:21 -07002406 /* need to poison the objs? */
2407 if (cachep->flags & SLAB_POISON) {
2408 poison_obj(cachep, objp, POISON_FREE);
2409 slab_kernel_map(cachep, objp, 0, 0);
2410 }
Joonsoo Kim10b2e9e2016-03-15 14:54:47 -07002411 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412#endif
Joonsoo Kim10b2e9e2016-03-15 14:54:47 -07002413}
2414
2415static void cache_init_objs(struct kmem_cache *cachep,
2416 struct page *page)
2417{
2418 int i;
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -07002419 void *objp;
Joonsoo Kim10b2e9e2016-03-15 14:54:47 -07002420
2421 cache_init_objs_debug(cachep, page);
2422
Joonsoo Kimb03a017b2016-03-15 14:54:50 -07002423 if (OBJFREELIST_SLAB(cachep)) {
2424 page->freelist = index_to_obj(cachep, page, cachep->num - 1) +
2425 obj_offset(cachep);
2426 }
2427
Joonsoo Kim10b2e9e2016-03-15 14:54:47 -07002428 for (i = 0; i < cachep->num; i++) {
2429 /* constructor could break poison info */
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -07002430 if (DEBUG == 0 && cachep->ctor) {
2431 objp = index_to_obj(cachep, page, i);
2432 kasan_unpoison_object_data(cachep, objp);
2433 cachep->ctor(objp);
2434 kasan_poison_object_data(cachep, objp);
2435 }
Joonsoo Kim10b2e9e2016-03-15 14:54:47 -07002436
Joonsoo Kime5c58df2013-12-02 17:49:40 +09002437 set_free_obj(page, i, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439}
2440
Pekka Enberg343e0d72006-02-01 03:05:50 -08002441static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442{
Christoph Lameter4b51d662007-02-10 01:43:10 -08002443 if (CONFIG_ZONE_DMA_FLAG) {
2444 if (flags & GFP_DMA)
Glauber Costaa618e892012-06-14 16:17:21 +04002445 BUG_ON(!(cachep->allocflags & GFP_DMA));
Christoph Lameter4b51d662007-02-10 01:43:10 -08002446 else
Glauber Costaa618e892012-06-14 16:17:21 +04002447 BUG_ON(cachep->allocflags & GFP_DMA);
Christoph Lameter4b51d662007-02-10 01:43:10 -08002448 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449}
2450
Joonsoo Kim260b61d2016-03-15 14:54:12 -07002451static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
Matthew Dobson78d382d2006-02-01 03:05:47 -08002452{
Joonsoo Kimb1cb0982013-10-24 10:07:45 +09002453 void *objp;
Matthew Dobson78d382d2006-02-01 03:05:47 -08002454
Joonsoo Kime5c58df2013-12-02 17:49:40 +09002455 objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
Joonsoo Kim8456a642013-10-24 10:07:49 +09002456 page->active++;
Matthew Dobson78d382d2006-02-01 03:05:47 -08002457
Joonsoo Kimd31676d2016-03-15 14:54:24 -07002458#if DEBUG
2459 if (cachep->flags & SLAB_STORE_USER)
2460 set_store_user_dirty(cachep);
2461#endif
2462
Matthew Dobson78d382d2006-02-01 03:05:47 -08002463 return objp;
2464}
2465
Joonsoo Kim260b61d2016-03-15 14:54:12 -07002466static void slab_put_obj(struct kmem_cache *cachep,
2467 struct page *page, void *objp)
Matthew Dobson78d382d2006-02-01 03:05:47 -08002468{
Joonsoo Kim8456a642013-10-24 10:07:49 +09002469 unsigned int objnr = obj_to_index(cachep, page, objp);
Matthew Dobson78d382d2006-02-01 03:05:47 -08002470#if DEBUG
Joonsoo Kim16025172013-10-24 10:07:46 +09002471 unsigned int i;
Matthew Dobson78d382d2006-02-01 03:05:47 -08002472
Joonsoo Kimb1cb0982013-10-24 10:07:45 +09002473 /* Verify double free bug */
Joonsoo Kim8456a642013-10-24 10:07:49 +09002474 for (i = page->active; i < cachep->num; i++) {
Joonsoo Kime5c58df2013-12-02 17:49:40 +09002475 if (get_free_obj(page, i) == objnr) {
Joe Perches11705322016-03-17 14:19:50 -07002476 pr_err("slab: double free detected in cache '%s', objp %p\n",
Joe Perches756a025f02016-03-17 14:19:47 -07002477 cachep->name, objp);
Joonsoo Kimb1cb0982013-10-24 10:07:45 +09002478 BUG();
2479 }
Matthew Dobson78d382d2006-02-01 03:05:47 -08002480 }
2481#endif
Joonsoo Kim8456a642013-10-24 10:07:49 +09002482 page->active--;
Joonsoo Kimb03a017b2016-03-15 14:54:50 -07002483 if (!page->freelist)
2484 page->freelist = objp + obj_offset(cachep);
2485
Joonsoo Kime5c58df2013-12-02 17:49:40 +09002486 set_free_obj(page, page->active, objnr);
Matthew Dobson78d382d2006-02-01 03:05:47 -08002487}
2488
Pekka Enberg47768742006-06-23 02:03:07 -07002489/*
2490 * Map pages beginning at addr to the given cache and slab. This is required
2491 * for the slab allocator to be able to lookup the cache and slab of a
Nick Pigginccd35fb2011-01-07 17:49:17 +11002492 * virtual address for kfree, ksize, and slab debugging.
Pekka Enberg47768742006-06-23 02:03:07 -07002493 */
Joonsoo Kim8456a642013-10-24 10:07:49 +09002494static void slab_map_pages(struct kmem_cache *cache, struct page *page,
Joonsoo Kim7e007352013-10-30 19:04:01 +09002495 void *freelist)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496{
Joonsoo Kima57a4982013-10-24 10:07:44 +09002497 page->slab_cache = cache;
Joonsoo Kim8456a642013-10-24 10:07:49 +09002498 page->freelist = freelist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499}
2500
2501/*
2502 * Grow (by 1) the number of slabs within a cache. This is called by
2503 * kmem_cache_alloc() when there are no active objs left in a cache.
2504 */
Christoph Lameter3c517a62006-12-06 20:33:29 -08002505static int cache_grow(struct kmem_cache *cachep,
Joonsoo Kim0c3aa832013-10-24 10:07:38 +09002506 gfp_t flags, int nodeid, struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507{
Joonsoo Kim7e007352013-10-30 19:04:01 +09002508 void *freelist;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002509 size_t offset;
2510 gfp_t local_flags;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002511 struct kmem_cache_node *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512
Andrew Mortona737b3e2006-03-22 00:08:11 -08002513 /*
2514 * Be lazy and only check for valid flags here, keeping it out of the
2515 * critical path in kmem_cache_alloc().
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 */
Andrew Mortonc871ac42014-12-10 15:42:25 -08002517 if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
2518 pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
2519 BUG();
2520 }
Christoph Lameter6cb06222007-10-16 01:25:41 -07002521 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002523 /* Take the node list lock to change the colour_next on this node */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524 check_irq_off();
Christoph Lameter18bf8542014-08-06 16:04:11 -07002525 n = get_node(cachep, nodeid);
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002526 spin_lock(&n->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527
2528 /* Get colour for the slab, and cal the next value. */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002529 offset = n->colour_next;
2530 n->colour_next++;
2531 if (n->colour_next >= cachep->colour)
2532 n->colour_next = 0;
2533 spin_unlock(&n->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534
Ravikiran G Thirumalai2e1217c2006-02-04 23:27:56 -08002535 offset *= cachep->colour_off;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536
Mel Gormand0164ad2015-11-06 16:28:21 -08002537 if (gfpflags_allow_blocking(local_flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538 local_irq_enable();
2539
2540 /*
2541 * The test for missing atomic flag is performed here, rather than
2542 * the more obvious place, simply to reduce the critical path length
2543 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
2544 * will eventually be caught here (where it matters).
2545 */
2546 kmem_flagcheck(cachep, flags);
2547
Andrew Mortona737b3e2006-03-22 00:08:11 -08002548 /*
2549 * Get mem for the objs. Attempt to allocate a physical page from
2550 * 'nodeid'.
Christoph Lametere498be72005-09-09 13:03:32 -07002551 */
Joonsoo Kim0c3aa832013-10-24 10:07:38 +09002552 if (!page)
2553 page = kmem_getpages(cachep, local_flags, nodeid);
2554 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555 goto failed;
2556
2557 /* Get slab management. */
Joonsoo Kim8456a642013-10-24 10:07:49 +09002558 freelist = alloc_slabmgmt(cachep, page, offset,
Christoph Lameter6cb06222007-10-16 01:25:41 -07002559 local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
Joonsoo Kimb03a017b2016-03-15 14:54:50 -07002560 if (OFF_SLAB(cachep) && !freelist)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561 goto opps1;
2562
Joonsoo Kim8456a642013-10-24 10:07:49 +09002563 slab_map_pages(cachep, page, freelist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -07002565 kasan_poison_slab(page);
Joonsoo Kim8456a642013-10-24 10:07:49 +09002566 cache_init_objs(cachep, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567
Mel Gormand0164ad2015-11-06 16:28:21 -08002568 if (gfpflags_allow_blocking(local_flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569 local_irq_disable();
2570 check_irq_off();
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002571 spin_lock(&n->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572
2573 /* Make slab active. */
Joonsoo Kim8456a642013-10-24 10:07:49 +09002574 list_add_tail(&page->lru, &(n->slabs_free));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575 STATS_INC_GROWN(cachep);
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002576 n->free_objects += cachep->num;
2577 spin_unlock(&n->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002578 return 1;
Andrew Mortona737b3e2006-03-22 00:08:11 -08002579opps1:
Joonsoo Kim0c3aa832013-10-24 10:07:38 +09002580 kmem_freepages(cachep, page);
Andrew Mortona737b3e2006-03-22 00:08:11 -08002581failed:
Mel Gormand0164ad2015-11-06 16:28:21 -08002582 if (gfpflags_allow_blocking(local_flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583 local_irq_disable();
2584 return 0;
2585}
2586
2587#if DEBUG
2588
2589/*
2590 * Perform extra freeing checks:
2591 * - detect bad pointers.
2592 * - POISON/RED_ZONE checking
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593 */
2594static void kfree_debugcheck(const void *objp)
2595{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596 if (!virt_addr_valid(objp)) {
Joe Perches11705322016-03-17 14:19:50 -07002597 pr_err("kfree_debugcheck: out of range ptr %lxh\n",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002598 (unsigned long)objp);
2599 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601}
2602
Pekka Enberg58ce1fd2006-06-23 02:03:24 -07002603static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2604{
David Woodhouseb46b8f12007-05-08 00:22:59 -07002605 unsigned long long redzone1, redzone2;
Pekka Enberg58ce1fd2006-06-23 02:03:24 -07002606
2607 redzone1 = *dbg_redzone1(cache, obj);
2608 redzone2 = *dbg_redzone2(cache, obj);
2609
2610 /*
2611 * Redzone is ok.
2612 */
2613 if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2614 return;
2615
2616 if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2617 slab_error(cache, "double free detected");
2618 else
2619 slab_error(cache, "memory outside object was overwritten");
2620
Joe Perches11705322016-03-17 14:19:50 -07002621 pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
2622 obj, redzone1, redzone2);
Pekka Enberg58ce1fd2006-06-23 02:03:24 -07002623}
2624
Pekka Enberg343e0d72006-02-01 03:05:50 -08002625static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03002626 unsigned long caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628 unsigned int objnr;
Joonsoo Kim8456a642013-10-24 10:07:49 +09002629 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630
Matthew Wilcox80cbd912007-11-29 12:05:13 -07002631 BUG_ON(virt_to_cache(objp) != cachep);
2632
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002633 objp -= obj_offset(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634 kfree_debugcheck(objp);
Christoph Lameterb49af682007-05-06 14:49:41 -07002635 page = virt_to_head_page(objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636
Linus Torvalds1da177e2005-04-16 15:20:36 -07002637 if (cachep->flags & SLAB_RED_ZONE) {
Pekka Enberg58ce1fd2006-06-23 02:03:24 -07002638 verify_redzone_free(cachep, objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2640 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2641 }
Joonsoo Kimd31676d2016-03-15 14:54:24 -07002642 if (cachep->flags & SLAB_STORE_USER) {
2643 set_store_user_dirty(cachep);
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03002644 *dbg_userword(cachep, objp) = (void *)caller;
Joonsoo Kimd31676d2016-03-15 14:54:24 -07002645 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646
Joonsoo Kim8456a642013-10-24 10:07:49 +09002647 objnr = obj_to_index(cachep, page, objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648
2649 BUG_ON(objnr >= cachep->num);
Joonsoo Kim8456a642013-10-24 10:07:49 +09002650 BUG_ON(objp != index_to_obj(cachep, page, objnr));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652 if (cachep->flags & SLAB_POISON) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 poison_obj(cachep, objp, POISON_FREE);
Joonsoo Kim40b44132016-03-15 14:54:21 -07002654 slab_kernel_map(cachep, objp, 0, caller);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655 }
2656 return objp;
2657}
2658
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659#else
2660#define kfree_debugcheck(x) do { } while(0)
2661#define cache_free_debugcheck(x,objp,z) (objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662#endif
2663
Joonsoo Kimb03a017b2016-03-15 14:54:50 -07002664static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
2665 void **list)
2666{
2667#if DEBUG
2668 void *next = *list;
2669 void *objp;
2670
2671 while (next) {
2672 objp = next - obj_offset(cachep);
2673 next = *(void **)next;
2674 poison_obj(cachep, objp, POISON_FREE);
2675 }
2676#endif
2677}
2678
Joonsoo Kimd8410232016-03-15 14:54:44 -07002679static inline void fixup_slab_list(struct kmem_cache *cachep,
Joonsoo Kimb03a017b2016-03-15 14:54:50 -07002680 struct kmem_cache_node *n, struct page *page,
2681 void **list)
Joonsoo Kimd8410232016-03-15 14:54:44 -07002682{
2683 /* move slabp to correct slabp list: */
2684 list_del(&page->lru);
Joonsoo Kimb03a017b2016-03-15 14:54:50 -07002685 if (page->active == cachep->num) {
Joonsoo Kimd8410232016-03-15 14:54:44 -07002686 list_add(&page->lru, &n->slabs_full);
Joonsoo Kimb03a017b2016-03-15 14:54:50 -07002687 if (OBJFREELIST_SLAB(cachep)) {
2688#if DEBUG
2689 /* Poisoning will be done without holding the lock */
2690 if (cachep->flags & SLAB_POISON) {
2691 void **objp = page->freelist;
2692
2693 *objp = *list;
2694 *list = objp;
2695 }
2696#endif
2697 page->freelist = NULL;
2698 }
2699 } else
Joonsoo Kimd8410232016-03-15 14:54:44 -07002700 list_add(&page->lru, &n->slabs_partial);
2701}
2702
Joonsoo Kimf68f8dd2016-03-15 14:54:56 -07002703/* Try to find non-pfmemalloc slab if needed */
2704static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
2705 struct page *page, bool pfmemalloc)
2706{
2707 if (!page)
2708 return NULL;
2709
2710 if (pfmemalloc)
2711 return page;
2712
2713 if (!PageSlabPfmemalloc(page))
2714 return page;
2715
2716 /* No need to keep pfmemalloc slab if we have enough free objects */
2717 if (n->free_objects > n->free_limit) {
2718 ClearPageSlabPfmemalloc(page);
2719 return page;
2720 }
2721
2722 /* Move pfmemalloc slab to the end of list to speed up next search */
2723 list_del(&page->lru);
2724 if (!page->active)
2725 list_add_tail(&page->lru, &n->slabs_free);
2726 else
2727 list_add_tail(&page->lru, &n->slabs_partial);
2728
2729 list_for_each_entry(page, &n->slabs_partial, lru) {
2730 if (!PageSlabPfmemalloc(page))
2731 return page;
2732 }
2733
2734 list_for_each_entry(page, &n->slabs_free, lru) {
2735 if (!PageSlabPfmemalloc(page))
2736 return page;
2737 }
2738
2739 return NULL;
2740}
2741
2742static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
Geliang Tang7aa0d222016-01-14 15:18:02 -08002743{
2744 struct page *page;
2745
2746 page = list_first_entry_or_null(&n->slabs_partial,
2747 struct page, lru);
2748 if (!page) {
2749 n->free_touched = 1;
2750 page = list_first_entry_or_null(&n->slabs_free,
2751 struct page, lru);
2752 }
2753
Joonsoo Kimf68f8dd2016-03-15 14:54:56 -07002754 if (sk_memalloc_socks())
2755 return get_valid_first_slab(n, page, pfmemalloc);
2756
Geliang Tang7aa0d222016-01-14 15:18:02 -08002757 return page;
2758}
2759
Joonsoo Kimf68f8dd2016-03-15 14:54:56 -07002760static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
2761 struct kmem_cache_node *n, gfp_t flags)
2762{
2763 struct page *page;
2764 void *obj;
2765 void *list = NULL;
2766
2767 if (!gfp_pfmemalloc_allowed(flags))
2768 return NULL;
2769
2770 spin_lock(&n->list_lock);
2771 page = get_first_slab(n, true);
2772 if (!page) {
2773 spin_unlock(&n->list_lock);
2774 return NULL;
2775 }
2776
2777 obj = slab_get_obj(cachep, page);
2778 n->free_objects--;
2779
2780 fixup_slab_list(cachep, n, page, &list);
2781
2782 spin_unlock(&n->list_lock);
2783 fixup_objfreelist_debug(cachep, &list);
2784
2785 return obj;
2786}
2787
2788static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789{
2790 int batchcount;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002791 struct kmem_cache_node *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792 struct array_cache *ac;
Pekka Enberg1ca4cb22006-10-06 00:43:52 -07002793 int node;
Joonsoo Kimb03a017b2016-03-15 14:54:50 -07002794 void *list = NULL;
Pekka Enberg1ca4cb22006-10-06 00:43:52 -07002795
Joe Korty6d2144d2008-03-05 15:04:59 -08002796 check_irq_off();
Lee Schermerhorn7d6e6d02010-05-26 14:45:03 -07002797 node = numa_mem_id();
Joonsoo Kimf68f8dd2016-03-15 14:54:56 -07002798
Mel Gorman072bb0a2012-07-31 16:43:58 -07002799retry:
Joe Korty6d2144d2008-03-05 15:04:59 -08002800 ac = cpu_cache_get(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801 batchcount = ac->batchcount;
2802 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08002803 /*
2804 * If there was little recent activity on this cache, then
2805 * perform only a partial refill. Otherwise we could generate
2806 * refill bouncing.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807 */
2808 batchcount = BATCHREFILL_LIMIT;
2809 }
Christoph Lameter18bf8542014-08-06 16:04:11 -07002810 n = get_node(cachep, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002812 BUG_ON(ac->avail > 0 || !n);
2813 spin_lock(&n->list_lock);
Christoph Lametere498be72005-09-09 13:03:32 -07002814
Christoph Lameter3ded1752006-03-25 03:06:44 -08002815 /* See if we can refill from the shared array */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002816 if (n->shared && transfer_objects(ac, n->shared, batchcount)) {
2817 n->shared->touched = 1;
Christoph Lameter3ded1752006-03-25 03:06:44 -08002818 goto alloc_done;
Nick Piggin44b57f12010-01-27 22:27:40 +11002819 }
Christoph Lameter3ded1752006-03-25 03:06:44 -08002820
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821 while (batchcount > 0) {
Joonsoo Kim8456a642013-10-24 10:07:49 +09002822 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823 /* Get slab alloc is to come from. */
Joonsoo Kimf68f8dd2016-03-15 14:54:56 -07002824 page = get_first_slab(n, false);
Geliang Tang7aa0d222016-01-14 15:18:02 -08002825 if (!page)
2826 goto must_grow;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828 check_spinlock_acquired(cachep);
Pekka Enberg714b81712007-05-06 14:49:03 -07002829
2830 /*
2831 * The slab was either on partial or free list so
2832 * there must be at least one object available for
2833 * allocation.
2834 */
Joonsoo Kim8456a642013-10-24 10:07:49 +09002835 BUG_ON(page->active >= cachep->num);
Pekka Enberg714b81712007-05-06 14:49:03 -07002836
Joonsoo Kim8456a642013-10-24 10:07:49 +09002837 while (page->active < cachep->num && batchcount--) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838 STATS_INC_ALLOCED(cachep);
2839 STATS_INC_ACTIVE(cachep);
2840 STATS_SET_HIGH(cachep);
2841
Joonsoo Kimf68f8dd2016-03-15 14:54:56 -07002842 ac->entry[ac->avail++] = slab_get_obj(cachep, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844
Joonsoo Kimb03a017b2016-03-15 14:54:50 -07002845 fixup_slab_list(cachep, n, page, &list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846 }
2847
Andrew Mortona737b3e2006-03-22 00:08:11 -08002848must_grow:
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002849 n->free_objects -= ac->avail;
Andrew Mortona737b3e2006-03-22 00:08:11 -08002850alloc_done:
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00002851 spin_unlock(&n->list_lock);
Joonsoo Kimb03a017b2016-03-15 14:54:50 -07002852 fixup_objfreelist_debug(cachep, &list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853
2854 if (unlikely(!ac->avail)) {
2855 int x;
Joonsoo Kimf68f8dd2016-03-15 14:54:56 -07002856
2857 /* Check if we can use obj in pfmemalloc slab */
2858 if (sk_memalloc_socks()) {
2859 void *obj = cache_alloc_pfmemalloc(cachep, n, flags);
2860
2861 if (obj)
2862 return obj;
2863 }
2864
David Rientjes4167e9b2015-04-14 15:46:55 -07002865 x = cache_grow(cachep, gfp_exact_node(flags), node, NULL);
Christoph Lametere498be72005-09-09 13:03:32 -07002866
Andrew Mortona737b3e2006-03-22 00:08:11 -08002867 /* cache_grow can reenable interrupts, then ac could change. */
Pekka Enberg9a2dba42006-02-01 03:05:49 -08002868 ac = cpu_cache_get(cachep);
David Rientjes51cd8e62012-08-28 19:57:21 -07002869 node = numa_mem_id();
Mel Gorman072bb0a2012-07-31 16:43:58 -07002870
2871 /* no objects in sight? abort */
Joonsoo Kimf68f8dd2016-03-15 14:54:56 -07002872 if (!x && ac->avail == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873 return NULL;
2874
Andrew Mortona737b3e2006-03-22 00:08:11 -08002875 if (!ac->avail) /* objects refilled by interrupt? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002876 goto retry;
2877 }
2878 ac->touched = 1;
Mel Gorman072bb0a2012-07-31 16:43:58 -07002879
Joonsoo Kimf68f8dd2016-03-15 14:54:56 -07002880 return ac->entry[--ac->avail];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881}
2882
Andrew Mortona737b3e2006-03-22 00:08:11 -08002883static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
2884 gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885{
Mel Gormand0164ad2015-11-06 16:28:21 -08002886 might_sleep_if(gfpflags_allow_blocking(flags));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887#if DEBUG
2888 kmem_flagcheck(cachep, flags);
2889#endif
2890}
2891
2892#if DEBUG
Andrew Mortona737b3e2006-03-22 00:08:11 -08002893static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03002894 gfp_t flags, void *objp, unsigned long caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002896 if (!objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002897 return objp;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002898 if (cachep->flags & SLAB_POISON) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899 check_poison_obj(cachep, objp);
Joonsoo Kim40b44132016-03-15 14:54:21 -07002900 slab_kernel_map(cachep, objp, 1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901 poison_obj(cachep, objp, POISON_INUSE);
2902 }
2903 if (cachep->flags & SLAB_STORE_USER)
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03002904 *dbg_userword(cachep, objp) = (void *)caller;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905
2906 if (cachep->flags & SLAB_RED_ZONE) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08002907 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
2908 *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
Joe Perches756a025f02016-03-17 14:19:47 -07002909 slab_error(cachep, "double free, or memory outside object was overwritten");
Joe Perches11705322016-03-17 14:19:50 -07002910 pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
2911 objp, *dbg_redzone1(cachep, objp),
2912 *dbg_redzone2(cachep, objp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913 }
2914 *dbg_redzone1(cachep, objp) = RED_ACTIVE;
2915 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
2916 }
Joonsoo Kim03787302014-06-23 13:22:06 -07002917
Manfred Spraul3dafccf2006-02-01 03:05:42 -08002918 objp += obj_offset(cachep);
Christoph Lameter4f104932007-05-06 14:50:17 -07002919 if (cachep->ctor && cachep->flags & SLAB_POISON)
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07002920 cachep->ctor(objp);
Tetsuo Handa7ea466f2011-07-21 09:42:45 +09002921 if (ARCH_SLAB_MINALIGN &&
2922 ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
Joe Perches11705322016-03-17 14:19:50 -07002923 pr_err("0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
Hugh Dickinsc2251502011-07-11 13:35:08 -07002924 objp, (int)ARCH_SLAB_MINALIGN);
Kevin Hilmana44b56d2006-12-06 20:32:11 -08002925 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926 return objp;
2927}
2928#else
2929#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
2930#endif
2931
Pekka Enberg343e0d72006-02-01 03:05:50 -08002932static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08002934 void *objp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935 struct array_cache *ac;
2936
Alok N Kataria5c382302005-09-27 21:45:46 -07002937 check_irq_off();
Akinobu Mita8a8b6502006-12-08 02:39:44 -08002938
Pekka Enberg9a2dba42006-02-01 03:05:49 -08002939 ac = cpu_cache_get(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002940 if (likely(ac->avail)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002941 ac->touched = 1;
Joonsoo Kimf68f8dd2016-03-15 14:54:56 -07002942 objp = ac->entry[--ac->avail];
Mel Gorman072bb0a2012-07-31 16:43:58 -07002943
Joonsoo Kimf68f8dd2016-03-15 14:54:56 -07002944 STATS_INC_ALLOCHIT(cachep);
2945 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946 }
Mel Gorman072bb0a2012-07-31 16:43:58 -07002947
2948 STATS_INC_ALLOCMISS(cachep);
Joonsoo Kimf68f8dd2016-03-15 14:54:56 -07002949 objp = cache_alloc_refill(cachep, flags);
Mel Gorman072bb0a2012-07-31 16:43:58 -07002950 /*
2951 * the 'ac' may be updated by cache_alloc_refill(),
2952 * and kmemleak_erase() requires its correct value.
2953 */
2954 ac = cpu_cache_get(cachep);
2955
2956out:
Catalin Marinasd5cff632009-06-11 13:22:40 +01002957 /*
2958 * To avoid a false negative, if an object that is in one of the
2959 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
2960 * treat the array pointers as a reference to the object.
2961 */
J. R. Okajimaf3d8b532009-12-02 16:55:49 +09002962 if (objp)
2963 kmemleak_erase(&ac->entry[ac->avail]);
Alok N Kataria5c382302005-09-27 21:45:46 -07002964 return objp;
2965}
2966
Christoph Lametere498be72005-09-09 13:03:32 -07002967#ifdef CONFIG_NUMA
2968/*
Zefan Li2ad654b2014-09-25 09:41:02 +08002969 * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set.
Paul Jacksonc61afb12006-03-24 03:16:08 -08002970 *
2971 * If we are in_interrupt, then process context, including cpusets and
2972 * mempolicy, may not apply and should not be used for allocation policy.
2973 */
2974static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
2975{
2976 int nid_alloc, nid_here;
2977
Christoph Lameter765c4502006-09-27 01:50:08 -07002978 if (in_interrupt() || (flags & __GFP_THISNODE))
Paul Jacksonc61afb12006-03-24 03:16:08 -08002979 return NULL;
Lee Schermerhorn7d6e6d02010-05-26 14:45:03 -07002980 nid_alloc = nid_here = numa_mem_id();
Paul Jacksonc61afb12006-03-24 03:16:08 -08002981 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
Jack Steiner6adef3e2010-05-26 14:42:49 -07002982 nid_alloc = cpuset_slab_spread_node();
Paul Jacksonc61afb12006-03-24 03:16:08 -08002983 else if (current->mempolicy)
David Rientjes2a389612014-04-07 15:37:29 -07002984 nid_alloc = mempolicy_slab_node();
Paul Jacksonc61afb12006-03-24 03:16:08 -08002985 if (nid_alloc != nid_here)
Christoph Hellwig8b98c162006-12-06 20:32:30 -08002986 return ____cache_alloc_node(cachep, flags, nid_alloc);
Paul Jacksonc61afb12006-03-24 03:16:08 -08002987 return NULL;
2988}
2989
2990/*
Christoph Lameter765c4502006-09-27 01:50:08 -07002991 * Fallback function if there was no memory available and no objects on a
Christoph Lameter3c517a62006-12-06 20:33:29 -08002992 * certain node and fall back is permitted. First we scan all the
Christoph Lameter6a673682013-01-10 19:14:19 +00002993 * available node for available objects. If that fails then we
Christoph Lameter3c517a62006-12-06 20:33:29 -08002994 * perform an allocation without specifying a node. This allows the page
2995 * allocator to do its reclaim / fallback magic. We then insert the
2996 * slab into the proper nodelist and then allocate from it.
Christoph Lameter765c4502006-09-27 01:50:08 -07002997 */
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08002998static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
Christoph Lameter765c4502006-09-27 01:50:08 -07002999{
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003000 struct zonelist *zonelist;
3001 gfp_t local_flags;
Mel Gormandd1a2392008-04-28 02:12:17 -07003002 struct zoneref *z;
Mel Gorman54a6eb52008-04-28 02:12:16 -07003003 struct zone *zone;
3004 enum zone_type high_zoneidx = gfp_zone(flags);
Christoph Lameter765c4502006-09-27 01:50:08 -07003005 void *obj = NULL;
Christoph Lameter3c517a62006-12-06 20:33:29 -08003006 int nid;
Mel Gormancc9a6c82012-03-21 16:34:11 -07003007 unsigned int cpuset_mems_cookie;
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003008
3009 if (flags & __GFP_THISNODE)
3010 return NULL;
3011
Christoph Lameter6cb06222007-10-16 01:25:41 -07003012 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
Christoph Lameter765c4502006-09-27 01:50:08 -07003013
Mel Gormancc9a6c82012-03-21 16:34:11 -07003014retry_cpuset:
Mel Gormand26914d2014-04-03 14:47:24 -07003015 cpuset_mems_cookie = read_mems_allowed_begin();
David Rientjes2a389612014-04-07 15:37:29 -07003016 zonelist = node_zonelist(mempolicy_slab_node(), flags);
Mel Gormancc9a6c82012-03-21 16:34:11 -07003017
Christoph Lameter3c517a62006-12-06 20:33:29 -08003018retry:
3019 /*
3020 * Look through allowed nodes for objects available
3021 * from existing per node queues.
3022 */
Mel Gorman54a6eb52008-04-28 02:12:16 -07003023 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
3024 nid = zone_to_nid(zone);
Christoph Lameteraedb0eb2006-10-21 10:24:16 -07003025
Vladimir Davydov061d7072014-12-12 16:58:25 -08003026 if (cpuset_zone_allowed(zone, flags) &&
Christoph Lameter18bf8542014-08-06 16:04:11 -07003027 get_node(cache, nid) &&
3028 get_node(cache, nid)->free_objects) {
Christoph Lameter3c517a62006-12-06 20:33:29 -08003029 obj = ____cache_alloc_node(cache,
David Rientjes4167e9b2015-04-14 15:46:55 -07003030 gfp_exact_node(flags), nid);
Christoph Lameter481c5342008-06-21 16:46:35 -07003031 if (obj)
3032 break;
3033 }
Christoph Lameter3c517a62006-12-06 20:33:29 -08003034 }
3035
Christoph Lametercfce6602007-05-06 14:50:17 -07003036 if (!obj) {
Christoph Lameter3c517a62006-12-06 20:33:29 -08003037 /*
3038 * This allocation will be performed within the constraints
3039 * of the current cpuset / memory policy requirements.
3040 * We may trigger various forms of reclaim on the allowed
3041 * set and go into memory reserves if necessary.
3042 */
Joonsoo Kim0c3aa832013-10-24 10:07:38 +09003043 struct page *page;
3044
Mel Gormand0164ad2015-11-06 16:28:21 -08003045 if (gfpflags_allow_blocking(local_flags))
Christoph Lameterdd47ea72006-12-13 00:34:11 -08003046 local_irq_enable();
3047 kmem_flagcheck(cache, flags);
Joonsoo Kim0c3aa832013-10-24 10:07:38 +09003048 page = kmem_getpages(cache, local_flags, numa_mem_id());
Mel Gormand0164ad2015-11-06 16:28:21 -08003049 if (gfpflags_allow_blocking(local_flags))
Christoph Lameterdd47ea72006-12-13 00:34:11 -08003050 local_irq_disable();
Joonsoo Kim0c3aa832013-10-24 10:07:38 +09003051 if (page) {
Christoph Lameter3c517a62006-12-06 20:33:29 -08003052 /*
3053 * Insert into the appropriate per node queues
3054 */
Joonsoo Kim0c3aa832013-10-24 10:07:38 +09003055 nid = page_to_nid(page);
3056 if (cache_grow(cache, flags, nid, page)) {
Christoph Lameter3c517a62006-12-06 20:33:29 -08003057 obj = ____cache_alloc_node(cache,
David Rientjes4167e9b2015-04-14 15:46:55 -07003058 gfp_exact_node(flags), nid);
Christoph Lameter3c517a62006-12-06 20:33:29 -08003059 if (!obj)
3060 /*
3061 * Another processor may allocate the
3062 * objects in the slab since we are
3063 * not holding any locks.
3064 */
3065 goto retry;
3066 } else {
Hugh Dickinsb6a60452007-01-05 16:36:36 -08003067 /* cache_grow already freed obj */
Christoph Lameter3c517a62006-12-06 20:33:29 -08003068 obj = NULL;
3069 }
3070 }
Christoph Lameteraedb0eb2006-10-21 10:24:16 -07003071 }
Mel Gormancc9a6c82012-03-21 16:34:11 -07003072
Mel Gormand26914d2014-04-03 14:47:24 -07003073 if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
Mel Gormancc9a6c82012-03-21 16:34:11 -07003074 goto retry_cpuset;
Christoph Lameter765c4502006-09-27 01:50:08 -07003075 return obj;
3076}
3077
3078/*
Christoph Lametere498be72005-09-09 13:03:32 -07003079 * A interface to enable slab creation on nodeid
Linus Torvalds1da177e2005-04-16 15:20:36 -07003080 */
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003081static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
Andrew Mortona737b3e2006-03-22 00:08:11 -08003082 int nodeid)
Christoph Lametere498be72005-09-09 13:03:32 -07003083{
Joonsoo Kim8456a642013-10-24 10:07:49 +09003084 struct page *page;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003085 struct kmem_cache_node *n;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003086 void *obj;
Joonsoo Kimb03a017b2016-03-15 14:54:50 -07003087 void *list = NULL;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003088 int x;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003089
Paul Mackerras7c3fbbd2014-12-02 15:59:48 -08003090 VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
Christoph Lameter18bf8542014-08-06 16:04:11 -07003091 n = get_node(cachep, nodeid);
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003092 BUG_ON(!n);
Christoph Lametere498be72005-09-09 13:03:32 -07003093
Andrew Mortona737b3e2006-03-22 00:08:11 -08003094retry:
Ravikiran G Thirumalaica3b9b92006-02-04 23:27:58 -08003095 check_irq_off();
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003096 spin_lock(&n->list_lock);
Joonsoo Kimf68f8dd2016-03-15 14:54:56 -07003097 page = get_first_slab(n, false);
Geliang Tang7aa0d222016-01-14 15:18:02 -08003098 if (!page)
3099 goto must_grow;
Christoph Lametere498be72005-09-09 13:03:32 -07003100
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003101 check_spinlock_acquired_node(cachep, nodeid);
Christoph Lametere498be72005-09-09 13:03:32 -07003102
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003103 STATS_INC_NODEALLOCS(cachep);
3104 STATS_INC_ACTIVE(cachep);
3105 STATS_SET_HIGH(cachep);
Christoph Lametere498be72005-09-09 13:03:32 -07003106
Joonsoo Kim8456a642013-10-24 10:07:49 +09003107 BUG_ON(page->active == cachep->num);
Christoph Lametere498be72005-09-09 13:03:32 -07003108
Joonsoo Kim260b61d2016-03-15 14:54:12 -07003109 obj = slab_get_obj(cachep, page);
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003110 n->free_objects--;
Christoph Lametere498be72005-09-09 13:03:32 -07003111
Joonsoo Kimb03a017b2016-03-15 14:54:50 -07003112 fixup_slab_list(cachep, n, page, &list);
Christoph Lametere498be72005-09-09 13:03:32 -07003113
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003114 spin_unlock(&n->list_lock);
Joonsoo Kimb03a017b2016-03-15 14:54:50 -07003115 fixup_objfreelist_debug(cachep, &list);
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003116 goto done;
Christoph Lametere498be72005-09-09 13:03:32 -07003117
Andrew Mortona737b3e2006-03-22 00:08:11 -08003118must_grow:
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003119 spin_unlock(&n->list_lock);
David Rientjes4167e9b2015-04-14 15:46:55 -07003120 x = cache_grow(cachep, gfp_exact_node(flags), nodeid, NULL);
Christoph Lameter765c4502006-09-27 01:50:08 -07003121 if (x)
3122 goto retry;
Christoph Lametere498be72005-09-09 13:03:32 -07003123
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003124 return fallback_alloc(cachep, flags);
Christoph Lameter765c4502006-09-27 01:50:08 -07003125
Andrew Mortona737b3e2006-03-22 00:08:11 -08003126done:
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003127 return obj;
Christoph Lametere498be72005-09-09 13:03:32 -07003128}
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003129
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003130static __always_inline void *
Ezequiel Garcia48356302012-09-08 17:47:57 -03003131slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03003132 unsigned long caller)
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003133{
3134 unsigned long save_flags;
3135 void *ptr;
Lee Schermerhorn7d6e6d02010-05-26 14:45:03 -07003136 int slab_node = numa_mem_id();
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003137
Benjamin Herrenschmidtdcce2842009-06-18 13:24:12 +10003138 flags &= gfp_allowed_mask;
Jesper Dangaard Brouer011ecea2016-03-15 14:53:41 -07003139 cachep = slab_pre_alloc_hook(cachep, flags);
3140 if (unlikely(!cachep))
Akinobu Mita824ebef2007-05-06 14:49:58 -07003141 return NULL;
3142
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003143 cache_alloc_debugcheck_before(cachep, flags);
3144 local_irq_save(save_flags);
3145
Andrew Mortoneacbbae2011-07-28 13:59:49 -07003146 if (nodeid == NUMA_NO_NODE)
Lee Schermerhorn7d6e6d02010-05-26 14:45:03 -07003147 nodeid = slab_node;
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003148
Christoph Lameter18bf8542014-08-06 16:04:11 -07003149 if (unlikely(!get_node(cachep, nodeid))) {
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003150 /* Node not bootstrapped yet */
3151 ptr = fallback_alloc(cachep, flags);
3152 goto out;
3153 }
3154
Lee Schermerhorn7d6e6d02010-05-26 14:45:03 -07003155 if (nodeid == slab_node) {
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003156 /*
3157 * Use the locally cached objects if possible.
3158 * However ____cache_alloc does not allow fallback
3159 * to other nodes. It may fail while we still have
3160 * objects on other nodes available.
3161 */
3162 ptr = ____cache_alloc(cachep, flags);
3163 if (ptr)
3164 goto out;
3165 }
3166 /* ___cache_alloc_node can fall back to other nodes */
3167 ptr = ____cache_alloc_node(cachep, flags, nodeid);
3168 out:
3169 local_irq_restore(save_flags);
3170 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3171
Jesper Dangaard Brouerd5e3ed62016-03-15 14:53:47 -07003172 if (unlikely(flags & __GFP_ZERO) && ptr)
3173 memset(ptr, 0, cachep->object_size);
Christoph Lameterd07dbea2007-07-17 04:03:23 -07003174
Jesper Dangaard Brouerd5e3ed62016-03-15 14:53:47 -07003175 slab_post_alloc_hook(cachep, flags, 1, &ptr);
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003176 return ptr;
3177}
3178
3179static __always_inline void *
3180__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3181{
3182 void *objp;
3183
Zefan Li2ad654b2014-09-25 09:41:02 +08003184 if (current->mempolicy || cpuset_do_slab_mem_spread()) {
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003185 objp = alternate_node_alloc(cache, flags);
3186 if (objp)
3187 goto out;
3188 }
3189 objp = ____cache_alloc(cache, flags);
3190
3191 /*
3192 * We may just have run out of memory on the local node.
3193 * ____cache_alloc_node() knows how to locate memory on other nodes
3194 */
Lee Schermerhorn7d6e6d02010-05-26 14:45:03 -07003195 if (!objp)
3196 objp = ____cache_alloc_node(cache, flags, numa_mem_id());
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003197
3198 out:
3199 return objp;
3200}
3201#else
3202
3203static __always_inline void *
3204__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3205{
3206 return ____cache_alloc(cachep, flags);
3207}
3208
3209#endif /* CONFIG_NUMA */
3210
3211static __always_inline void *
Ezequiel Garcia48356302012-09-08 17:47:57 -03003212slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003213{
3214 unsigned long save_flags;
3215 void *objp;
3216
Benjamin Herrenschmidtdcce2842009-06-18 13:24:12 +10003217 flags &= gfp_allowed_mask;
Jesper Dangaard Brouer011ecea2016-03-15 14:53:41 -07003218 cachep = slab_pre_alloc_hook(cachep, flags);
3219 if (unlikely(!cachep))
Akinobu Mita824ebef2007-05-06 14:49:58 -07003220 return NULL;
3221
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003222 cache_alloc_debugcheck_before(cachep, flags);
3223 local_irq_save(save_flags);
3224 objp = __do_cache_alloc(cachep, flags);
3225 local_irq_restore(save_flags);
3226 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3227 prefetchw(objp);
3228
Jesper Dangaard Brouerd5e3ed62016-03-15 14:53:47 -07003229 if (unlikely(flags & __GFP_ZERO) && objp)
3230 memset(objp, 0, cachep->object_size);
Christoph Lameterd07dbea2007-07-17 04:03:23 -07003231
Jesper Dangaard Brouerd5e3ed62016-03-15 14:53:47 -07003232 slab_post_alloc_hook(cachep, flags, 1, &objp);
Pekka Enberg8c8cc2c2007-02-10 01:42:53 -08003233 return objp;
3234}
Christoph Lametere498be72005-09-09 13:03:32 -07003235
3236/*
Jianyu Zhan5f0985b2014-03-30 17:02:20 +08003237 * Caller needs to acquire correct kmem_cache_node's list_lock
Joonsoo Kim97654df2014-08-06 16:04:25 -07003238 * @list: List of detached free slabs should be freed by caller
Christoph Lametere498be72005-09-09 13:03:32 -07003239 */
Joonsoo Kim97654df2014-08-06 16:04:25 -07003240static void free_block(struct kmem_cache *cachep, void **objpp,
3241 int nr_objects, int node, struct list_head *list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242{
3243 int i;
Joonsoo Kim25c063f2014-08-06 16:04:22 -07003244 struct kmem_cache_node *n = get_node(cachep, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003245
3246 for (i = 0; i < nr_objects; i++) {
Mel Gorman072bb0a2012-07-31 16:43:58 -07003247 void *objp;
Joonsoo Kim8456a642013-10-24 10:07:49 +09003248 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003249
Mel Gorman072bb0a2012-07-31 16:43:58 -07003250 objp = objpp[i];
3251
Joonsoo Kim8456a642013-10-24 10:07:49 +09003252 page = virt_to_head_page(objp);
Joonsoo Kim8456a642013-10-24 10:07:49 +09003253 list_del(&page->lru);
Christoph Lameterff694162005-09-22 21:44:02 -07003254 check_spinlock_acquired_node(cachep, node);
Joonsoo Kim260b61d2016-03-15 14:54:12 -07003255 slab_put_obj(cachep, page, objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003256 STATS_DEC_ACTIVE(cachep);
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003257 n->free_objects++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003258
3259 /* fixup slab chains */
Joonsoo Kim8456a642013-10-24 10:07:49 +09003260 if (page->active == 0) {
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003261 if (n->free_objects > n->free_limit) {
3262 n->free_objects -= cachep->num;
Joonsoo Kim97654df2014-08-06 16:04:25 -07003263 list_add_tail(&page->lru, list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003264 } else {
Joonsoo Kim8456a642013-10-24 10:07:49 +09003265 list_add(&page->lru, &n->slabs_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003266 }
3267 } else {
3268 /* Unconditionally move a slab to the end of the
3269 * partial list on free - maximum time for the
3270 * other objects to be freed, too.
3271 */
Joonsoo Kim8456a642013-10-24 10:07:49 +09003272 list_add_tail(&page->lru, &n->slabs_partial);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003273 }
3274 }
3275}
3276
Pekka Enberg343e0d72006-02-01 03:05:50 -08003277static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278{
3279 int batchcount;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003280 struct kmem_cache_node *n;
Lee Schermerhorn7d6e6d02010-05-26 14:45:03 -07003281 int node = numa_mem_id();
Joonsoo Kim97654df2014-08-06 16:04:25 -07003282 LIST_HEAD(list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283
3284 batchcount = ac->batchcount;
Joonsoo Kim260b61d2016-03-15 14:54:12 -07003285
Linus Torvalds1da177e2005-04-16 15:20:36 -07003286 check_irq_off();
Christoph Lameter18bf8542014-08-06 16:04:11 -07003287 n = get_node(cachep, node);
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003288 spin_lock(&n->list_lock);
3289 if (n->shared) {
3290 struct array_cache *shared_array = n->shared;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003291 int max = shared_array->limit - shared_array->avail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003292 if (max) {
3293 if (batchcount > max)
3294 batchcount = max;
Christoph Lametere498be72005-09-09 13:03:32 -07003295 memcpy(&(shared_array->entry[shared_array->avail]),
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003296 ac->entry, sizeof(void *) * batchcount);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003297 shared_array->avail += batchcount;
3298 goto free_done;
3299 }
3300 }
3301
Joonsoo Kim97654df2014-08-06 16:04:25 -07003302 free_block(cachep, ac->entry, batchcount, node, &list);
Andrew Mortona737b3e2006-03-22 00:08:11 -08003303free_done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003304#if STATS
3305 {
3306 int i = 0;
Geliang Tang73c02192016-01-14 15:17:59 -08003307 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003308
Geliang Tang73c02192016-01-14 15:17:59 -08003309 list_for_each_entry(page, &n->slabs_free, lru) {
Joonsoo Kim8456a642013-10-24 10:07:49 +09003310 BUG_ON(page->active);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003311
3312 i++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003313 }
3314 STATS_SET_FREEABLE(cachep, i);
3315 }
3316#endif
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003317 spin_unlock(&n->list_lock);
Joonsoo Kim97654df2014-08-06 16:04:25 -07003318 slabs_destroy(cachep, &list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003319 ac->avail -= batchcount;
Andrew Mortona737b3e2006-03-22 00:08:11 -08003320 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003321}
3322
3323/*
Andrew Mortona737b3e2006-03-22 00:08:11 -08003324 * Release an obj back to its cache. If the obj has a constructed state, it must
3325 * be in this state _before_ it is released. Called with disabled ints.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003326 */
Suleiman Souhlala947eb92011-06-02 00:16:42 -07003327static inline void __cache_free(struct kmem_cache *cachep, void *objp,
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03003328 unsigned long caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003329{
Pekka Enberg9a2dba42006-02-01 03:05:49 -08003330 struct array_cache *ac = cpu_cache_get(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003331
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -07003332 kasan_slab_free(cachep, objp);
3333
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334 check_irq_off();
Catalin Marinasd5cff632009-06-11 13:22:40 +01003335 kmemleak_free_recursive(objp, cachep->flags);
Suleiman Souhlala947eb92011-06-02 00:16:42 -07003336 objp = cache_free_debugcheck(cachep, objp, caller);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337
Christoph Lameter8c138bc2012-06-13 10:24:58 -05003338 kmemcheck_slab_free(cachep, objp, cachep->object_size);
Pekka Enbergc175eea2008-05-09 20:35:53 +02003339
Siddha, Suresh B1807a1a2007-08-22 14:01:49 -07003340 /*
3341 * Skip calling cache_free_alien() when the platform is not numa.
3342 * This will avoid cache misses that happen while accessing slabp (which
3343 * is per page memory reference) to get nodeid. Instead use a global
3344 * variable to skip the call, which is mostly likely to be present in
3345 * the cache.
3346 */
Mel Gormanb6e68bc2009-06-16 15:32:16 -07003347 if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
Pekka Enberg729bd0b2006-06-23 02:03:05 -07003348 return;
Christoph Lametere498be72005-09-09 13:03:32 -07003349
Joonsoo Kim3d880192014-10-09 15:26:04 -07003350 if (ac->avail < ac->limit) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351 STATS_INC_FREEHIT(cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352 } else {
3353 STATS_INC_FREEMISS(cachep);
3354 cache_flusharray(cachep, ac);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003355 }
Zhao Jin42c8c992011-08-27 00:26:17 +08003356
Joonsoo Kimf68f8dd2016-03-15 14:54:56 -07003357 if (sk_memalloc_socks()) {
3358 struct page *page = virt_to_head_page(objp);
3359
3360 if (unlikely(PageSlabPfmemalloc(page))) {
3361 cache_free_pfmemalloc(cachep, page, objp);
3362 return;
3363 }
3364 }
3365
3366 ac->entry[ac->avail++] = objp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003367}
3368
3369/**
3370 * kmem_cache_alloc - Allocate an object
3371 * @cachep: The cache to allocate from.
3372 * @flags: See kmalloc().
3373 *
3374 * Allocate an object from this cache. The flags are only relevant
3375 * if the cache has no available objects.
3376 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08003377void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003378{
Ezequiel Garcia48356302012-09-08 17:47:57 -03003379 void *ret = slab_alloc(cachep, flags, _RET_IP_);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003380
Alexander Potapenko505f5dc2016-03-25 14:22:02 -07003381 kasan_slab_alloc(cachep, ret, flags);
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02003382 trace_kmem_cache_alloc(_RET_IP_, ret,
Christoph Lameter8c138bc2012-06-13 10:24:58 -05003383 cachep->object_size, cachep->size, flags);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003384
3385 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003386}
3387EXPORT_SYMBOL(kmem_cache_alloc);
3388
Jesper Dangaard Brouer7b0501d2016-03-15 14:53:53 -07003389static __always_inline void
3390cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
3391 size_t size, void **p, unsigned long caller)
3392{
3393 size_t i;
3394
3395 for (i = 0; i < size; i++)
3396 p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller);
3397}
3398
Jesper Dangaard Brouer865762a2015-11-20 15:57:58 -08003399int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
Jesper Dangaard Brouer2a777ea2016-03-15 14:53:50 -07003400 void **p)
Christoph Lameter484748f2015-09-04 15:45:34 -07003401{
Jesper Dangaard Brouer2a777ea2016-03-15 14:53:50 -07003402 size_t i;
3403
3404 s = slab_pre_alloc_hook(s, flags);
3405 if (!s)
3406 return 0;
3407
3408 cache_alloc_debugcheck_before(s, flags);
3409
3410 local_irq_disable();
3411 for (i = 0; i < size; i++) {
3412 void *objp = __do_cache_alloc(s, flags);
3413
Jesper Dangaard Brouer2a777ea2016-03-15 14:53:50 -07003414 if (unlikely(!objp))
3415 goto error;
3416 p[i] = objp;
3417 }
3418 local_irq_enable();
3419
Jesper Dangaard Brouer7b0501d2016-03-15 14:53:53 -07003420 cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);
3421
Jesper Dangaard Brouer2a777ea2016-03-15 14:53:50 -07003422 /* Clear memory outside IRQ disabled section */
3423 if (unlikely(flags & __GFP_ZERO))
3424 for (i = 0; i < size; i++)
3425 memset(p[i], 0, s->object_size);
3426
3427 slab_post_alloc_hook(s, flags, size, p);
3428 /* FIXME: Trace call missing. Christoph would like a bulk variant */
3429 return size;
3430error:
3431 local_irq_enable();
Jesper Dangaard Brouer7b0501d2016-03-15 14:53:53 -07003432 cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
Jesper Dangaard Brouer2a777ea2016-03-15 14:53:50 -07003433 slab_post_alloc_hook(s, flags, i, p);
3434 __kmem_cache_free_bulk(s, i, p);
3435 return 0;
Christoph Lameter484748f2015-09-04 15:45:34 -07003436}
3437EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3438
Li Zefan0f24f122009-12-11 15:45:30 +08003439#ifdef CONFIG_TRACING
Steven Rostedt85beb582010-11-24 16:23:34 -05003440void *
Ezequiel Garcia40521472012-09-08 17:47:56 -03003441kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003442{
Steven Rostedt85beb582010-11-24 16:23:34 -05003443 void *ret;
3444
Ezequiel Garcia48356302012-09-08 17:47:57 -03003445 ret = slab_alloc(cachep, flags, _RET_IP_);
Steven Rostedt85beb582010-11-24 16:23:34 -05003446
Alexander Potapenko505f5dc2016-03-25 14:22:02 -07003447 kasan_kmalloc(cachep, ret, size, flags);
Steven Rostedt85beb582010-11-24 16:23:34 -05003448 trace_kmalloc(_RET_IP_, ret,
Ezequiel Garciaff4fcd02012-09-08 17:47:52 -03003449 size, cachep->size, flags);
Steven Rostedt85beb582010-11-24 16:23:34 -05003450 return ret;
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003451}
Steven Rostedt85beb582010-11-24 16:23:34 -05003452EXPORT_SYMBOL(kmem_cache_alloc_trace);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003453#endif
3454
Linus Torvalds1da177e2005-04-16 15:20:36 -07003455#ifdef CONFIG_NUMA
Zhouping Liud0d04b72013-05-16 11:36:23 +08003456/**
3457 * kmem_cache_alloc_node - Allocate an object on the specified node
3458 * @cachep: The cache to allocate from.
3459 * @flags: See kmalloc().
3460 * @nodeid: node number of the target node.
3461 *
3462 * Identical to kmem_cache_alloc but it will allocate memory on the given
3463 * node, which can improve the performance for cpu bound structures.
3464 *
3465 * Fallback to other node is possible if __GFP_THISNODE is not set.
3466 */
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003467void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3468{
Ezequiel Garcia48356302012-09-08 17:47:57 -03003469 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003470
Alexander Potapenko505f5dc2016-03-25 14:22:02 -07003471 kasan_slab_alloc(cachep, ret, flags);
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02003472 trace_kmem_cache_alloc_node(_RET_IP_, ret,
Christoph Lameter8c138bc2012-06-13 10:24:58 -05003473 cachep->object_size, cachep->size,
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02003474 flags, nodeid);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003475
3476 return ret;
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003477}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478EXPORT_SYMBOL(kmem_cache_alloc_node);
3479
Li Zefan0f24f122009-12-11 15:45:30 +08003480#ifdef CONFIG_TRACING
Ezequiel Garcia40521472012-09-08 17:47:56 -03003481void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
Steven Rostedt85beb582010-11-24 16:23:34 -05003482 gfp_t flags,
Ezequiel Garcia40521472012-09-08 17:47:56 -03003483 int nodeid,
3484 size_t size)
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003485{
Steven Rostedt85beb582010-11-24 16:23:34 -05003486 void *ret;
3487
Ezequiel Garcia592f4142012-09-25 08:07:08 -03003488 ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
Alexander Potapenko505f5dc2016-03-25 14:22:02 -07003489
3490 kasan_kmalloc(cachep, ret, size, flags);
Steven Rostedt85beb582010-11-24 16:23:34 -05003491 trace_kmalloc_node(_RET_IP_, ret,
Ezequiel Garciaff4fcd02012-09-08 17:47:52 -03003492 size, cachep->size,
Steven Rostedt85beb582010-11-24 16:23:34 -05003493 flags, nodeid);
3494 return ret;
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003495}
Steven Rostedt85beb582010-11-24 16:23:34 -05003496EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003497#endif
3498
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003499static __always_inline void *
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03003500__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
Manfred Spraul97e2bde2005-05-01 08:58:38 -07003501{
Pekka Enberg343e0d72006-02-01 03:05:50 -08003502 struct kmem_cache *cachep;
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -07003503 void *ret;
Manfred Spraul97e2bde2005-05-01 08:58:38 -07003504
Christoph Lameter2c59dd62013-01-10 19:14:19 +00003505 cachep = kmalloc_slab(size, flags);
Christoph Lameter6cb8f912007-07-17 04:03:22 -07003506 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3507 return cachep;
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -07003508 ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
Alexander Potapenko505f5dc2016-03-25 14:22:02 -07003509 kasan_kmalloc(cachep, ret, size, flags);
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -07003510
3511 return ret;
Manfred Spraul97e2bde2005-05-01 08:58:38 -07003512}
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003513
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003514void *__kmalloc_node(size_t size, gfp_t flags, int node)
3515{
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03003516 return __do_kmalloc_node(size, flags, node, _RET_IP_);
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003517}
Christoph Hellwigdbe5e692006-09-25 23:31:36 -07003518EXPORT_SYMBOL(__kmalloc_node);
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003519
3520void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03003521 int node, unsigned long caller)
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003522{
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03003523 return __do_kmalloc_node(size, flags, node, caller);
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003524}
3525EXPORT_SYMBOL(__kmalloc_node_track_caller);
Christoph Hellwig8b98c162006-12-06 20:32:30 -08003526#endif /* CONFIG_NUMA */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003527
3528/**
Paul Drynoff800590f2006-06-23 02:03:48 -07003529 * __do_kmalloc - allocate memory
Linus Torvalds1da177e2005-04-16 15:20:36 -07003530 * @size: how many bytes of memory are required.
Paul Drynoff800590f2006-06-23 02:03:48 -07003531 * @flags: the type of memory to allocate (see kmalloc).
Randy Dunlap911851e2006-03-22 00:08:14 -08003532 * @caller: function caller for debug tracking of the caller
Linus Torvalds1da177e2005-04-16 15:20:36 -07003533 */
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003534static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03003535 unsigned long caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003536{
Pekka Enberg343e0d72006-02-01 03:05:50 -08003537 struct kmem_cache *cachep;
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003538 void *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539
Christoph Lameter2c59dd62013-01-10 19:14:19 +00003540 cachep = kmalloc_slab(size, flags);
Linus Torvaldsa5c96d82007-07-19 13:17:15 -07003541 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3542 return cachep;
Ezequiel Garcia48356302012-09-08 17:47:57 -03003543 ret = slab_alloc(cachep, flags, caller);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003544
Alexander Potapenko505f5dc2016-03-25 14:22:02 -07003545 kasan_kmalloc(cachep, ret, size, flags);
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03003546 trace_kmalloc(caller, ret,
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003547 size, cachep->size, flags);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003548
3549 return ret;
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003550}
3551
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003552void *__kmalloc(size_t size, gfp_t flags)
3553{
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03003554 return __do_kmalloc(size, flags, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003555}
3556EXPORT_SYMBOL(__kmalloc);
3557
Eduard - Gabriel Munteanuce71e272008-08-19 20:43:25 +03003558void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003559{
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03003560 return __do_kmalloc(size, flags, caller);
Pekka Enberg7fd6b142006-02-01 03:05:52 -08003561}
3562EXPORT_SYMBOL(__kmalloc_track_caller);
Christoph Hellwig1d2c8ee2006-10-04 02:15:25 -07003563
Linus Torvalds1da177e2005-04-16 15:20:36 -07003564/**
3565 * kmem_cache_free - Deallocate an object
3566 * @cachep: The cache the allocation was from.
3567 * @objp: The previously allocated object.
3568 *
3569 * Free an object which was previously allocated from this
3570 * cache.
3571 */
Pekka Enberg343e0d72006-02-01 03:05:50 -08003572void kmem_cache_free(struct kmem_cache *cachep, void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003573{
3574 unsigned long flags;
Glauber Costab9ce5ef2012-12-18 14:22:46 -08003575 cachep = cache_from_obj(cachep, objp);
3576 if (!cachep)
3577 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578
3579 local_irq_save(flags);
Feng Tangd97d4762012-07-02 14:29:10 +08003580 debug_check_no_locks_freed(objp, cachep->object_size);
Thomas Gleixner3ac7fe52008-04-30 00:55:01 -07003581 if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
Christoph Lameter8c138bc2012-06-13 10:24:58 -05003582 debug_check_no_obj_freed(objp, cachep->object_size);
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03003583 __cache_free(cachep, objp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003584 local_irq_restore(flags);
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +03003585
Eduard - Gabriel Munteanuca2b84cb2009-03-23 15:12:24 +02003586 trace_kmem_cache_free(_RET_IP_, objp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003587}
3588EXPORT_SYMBOL(kmem_cache_free);
3589
Jesper Dangaard Brouere6cdb582016-03-15 14:53:56 -07003590void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
3591{
3592 struct kmem_cache *s;
3593 size_t i;
3594
3595 local_irq_disable();
3596 for (i = 0; i < size; i++) {
3597 void *objp = p[i];
3598
Jesper Dangaard Brouerca257192016-03-15 14:54:00 -07003599 if (!orig_s) /* called via kfree_bulk */
3600 s = virt_to_cache(objp);
3601 else
3602 s = cache_from_obj(orig_s, objp);
Jesper Dangaard Brouere6cdb582016-03-15 14:53:56 -07003603
3604 debug_check_no_locks_freed(objp, s->object_size);
3605 if (!(s->flags & SLAB_DEBUG_OBJECTS))
3606 debug_check_no_obj_freed(objp, s->object_size);
3607
3608 __cache_free(s, objp, _RET_IP_);
3609 }
3610 local_irq_enable();
3611
3612 /* FIXME: add tracing */
3613}
3614EXPORT_SYMBOL(kmem_cache_free_bulk);
3615
Linus Torvalds1da177e2005-04-16 15:20:36 -07003616/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003617 * kfree - free previously allocated memory
3618 * @objp: pointer returned by kmalloc.
3619 *
Pekka Enberg80e93ef2005-09-09 13:10:16 -07003620 * If @objp is NULL, no operation is performed.
3621 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622 * Don't free memory not originally allocated by kmalloc()
3623 * or you will run into trouble.
3624 */
3625void kfree(const void *objp)
3626{
Pekka Enberg343e0d72006-02-01 03:05:50 -08003627 struct kmem_cache *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003628 unsigned long flags;
3629
Pekka Enberg2121db72009-03-25 11:05:57 +02003630 trace_kfree(_RET_IP_, objp);
3631
Christoph Lameter6cb8f912007-07-17 04:03:22 -07003632 if (unlikely(ZERO_OR_NULL_PTR(objp)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003633 return;
3634 local_irq_save(flags);
3635 kfree_debugcheck(objp);
Pekka Enberg6ed5eb2212006-02-01 03:05:49 -08003636 c = virt_to_cache(objp);
Christoph Lameter8c138bc2012-06-13 10:24:58 -05003637 debug_check_no_locks_freed(objp, c->object_size);
3638
3639 debug_check_no_obj_freed(objp, c->object_size);
Ezequiel Garcia7c0cb9c2012-09-08 17:47:55 -03003640 __cache_free(c, (void *)objp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003641 local_irq_restore(flags);
3642}
3643EXPORT_SYMBOL(kfree);
3644
Christoph Lametere498be72005-09-09 13:03:32 -07003645/*
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003646 * This initializes kmem_cache_node or resizes various caches for all nodes.
Christoph Lametere498be72005-09-09 13:03:32 -07003647 */
Jianyu Zhan5f0985b2014-03-30 17:02:20 +08003648static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp)
Christoph Lametere498be72005-09-09 13:03:32 -07003649{
3650 int node;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003651 struct kmem_cache_node *n;
Christoph Lametercafeb022006-03-25 03:06:46 -08003652 struct array_cache *new_shared;
Joonsoo Kimc8522a32014-08-06 16:04:29 -07003653 struct alien_cache **new_alien = NULL;
Christoph Lametere498be72005-09-09 13:03:32 -07003654
Mel Gorman9c09a952008-01-24 05:49:54 -08003655 for_each_online_node(node) {
Christoph Lametercafeb022006-03-25 03:06:46 -08003656
LQYMGTb455def2014-12-10 15:42:13 -08003657 if (use_alien_caches) {
3658 new_alien = alloc_alien_cache(node, cachep->limit, gfp);
3659 if (!new_alien)
3660 goto fail;
3661 }
Christoph Lametercafeb022006-03-25 03:06:46 -08003662
Eric Dumazet63109842007-05-06 14:49:28 -07003663 new_shared = NULL;
3664 if (cachep->shared) {
3665 new_shared = alloc_arraycache(node,
Christoph Lameter0718dc22006-03-25 03:06:47 -08003666 cachep->shared*cachep->batchcount,
Pekka Enberg83b519e2009-06-10 19:40:04 +03003667 0xbaadf00d, gfp);
Eric Dumazet63109842007-05-06 14:49:28 -07003668 if (!new_shared) {
3669 free_alien_cache(new_alien);
3670 goto fail;
3671 }
Christoph Lameter0718dc22006-03-25 03:06:47 -08003672 }
Christoph Lametercafeb022006-03-25 03:06:46 -08003673
Christoph Lameter18bf8542014-08-06 16:04:11 -07003674 n = get_node(cachep, node);
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003675 if (n) {
3676 struct array_cache *shared = n->shared;
Joonsoo Kim97654df2014-08-06 16:04:25 -07003677 LIST_HEAD(list);
Christoph Lametercafeb022006-03-25 03:06:46 -08003678
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003679 spin_lock_irq(&n->list_lock);
Christoph Lametere498be72005-09-09 13:03:32 -07003680
Christoph Lametercafeb022006-03-25 03:06:46 -08003681 if (shared)
Christoph Lameter0718dc22006-03-25 03:06:47 -08003682 free_block(cachep, shared->entry,
Joonsoo Kim97654df2014-08-06 16:04:25 -07003683 shared->avail, node, &list);
Christoph Lametere498be72005-09-09 13:03:32 -07003684
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003685 n->shared = new_shared;
3686 if (!n->alien) {
3687 n->alien = new_alien;
Christoph Lametere498be72005-09-09 13:03:32 -07003688 new_alien = NULL;
3689 }
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003690 n->free_limit = (1 + nr_cpus_node(node)) *
Andrew Mortona737b3e2006-03-22 00:08:11 -08003691 cachep->batchcount + cachep->num;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003692 spin_unlock_irq(&n->list_lock);
Joonsoo Kim97654df2014-08-06 16:04:25 -07003693 slabs_destroy(cachep, &list);
Christoph Lametercafeb022006-03-25 03:06:46 -08003694 kfree(shared);
Christoph Lametere498be72005-09-09 13:03:32 -07003695 free_alien_cache(new_alien);
3696 continue;
3697 }
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003698 n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
3699 if (!n) {
Christoph Lameter0718dc22006-03-25 03:06:47 -08003700 free_alien_cache(new_alien);
3701 kfree(new_shared);
Christoph Lametere498be72005-09-09 13:03:32 -07003702 goto fail;
Christoph Lameter0718dc22006-03-25 03:06:47 -08003703 }
Christoph Lametere498be72005-09-09 13:03:32 -07003704
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003705 kmem_cache_node_init(n);
Jianyu Zhan5f0985b2014-03-30 17:02:20 +08003706 n->next_reap = jiffies + REAPTIMEOUT_NODE +
3707 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003708 n->shared = new_shared;
3709 n->alien = new_alien;
3710 n->free_limit = (1 + nr_cpus_node(node)) *
Andrew Mortona737b3e2006-03-22 00:08:11 -08003711 cachep->batchcount + cachep->num;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003712 cachep->node[node] = n;
Christoph Lametere498be72005-09-09 13:03:32 -07003713 }
Christoph Lametercafeb022006-03-25 03:06:46 -08003714 return 0;
Christoph Lameter0718dc22006-03-25 03:06:47 -08003715
Andrew Mortona737b3e2006-03-22 00:08:11 -08003716fail:
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003717 if (!cachep->list.next) {
Christoph Lameter0718dc22006-03-25 03:06:47 -08003718 /* Cache is not active yet. Roll back what we did */
3719 node--;
3720 while (node >= 0) {
Christoph Lameter18bf8542014-08-06 16:04:11 -07003721 n = get_node(cachep, node);
3722 if (n) {
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003723 kfree(n->shared);
3724 free_alien_cache(n->alien);
3725 kfree(n);
Christoph Lameter6a673682013-01-10 19:14:19 +00003726 cachep->node[node] = NULL;
Christoph Lameter0718dc22006-03-25 03:06:47 -08003727 }
3728 node--;
3729 }
3730 }
Christoph Lametercafeb022006-03-25 03:06:46 -08003731 return -ENOMEM;
Christoph Lametere498be72005-09-09 13:03:32 -07003732}
3733
Christoph Lameter18004c52012-07-06 15:25:12 -05003734/* Always called with the slab_mutex held */
Glauber Costa943a4512012-12-18 14:23:03 -08003735static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
Pekka Enberg83b519e2009-06-10 19:40:04 +03003736 int batchcount, int shared, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737{
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07003738 struct array_cache __percpu *cpu_cache, *prev;
3739 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003740
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07003741 cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount);
3742 if (!cpu_cache)
Siddha, Suresh Bd2e7b7d2006-09-25 23:31:47 -07003743 return -ENOMEM;
3744
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07003745 prev = cachep->cpu_cache;
3746 cachep->cpu_cache = cpu_cache;
3747 kick_all_cpus_sync();
Christoph Lametere498be72005-09-09 13:03:32 -07003748
Linus Torvalds1da177e2005-04-16 15:20:36 -07003749 check_irq_on();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003750 cachep->batchcount = batchcount;
3751 cachep->limit = limit;
Christoph Lametere498be72005-09-09 13:03:32 -07003752 cachep->shared = shared;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003753
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07003754 if (!prev)
3755 goto alloc_node;
3756
3757 for_each_online_cpu(cpu) {
Joonsoo Kim97654df2014-08-06 16:04:25 -07003758 LIST_HEAD(list);
Christoph Lameter18bf8542014-08-06 16:04:11 -07003759 int node;
3760 struct kmem_cache_node *n;
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07003761 struct array_cache *ac = per_cpu_ptr(prev, cpu);
Christoph Lameter18bf8542014-08-06 16:04:11 -07003762
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07003763 node = cpu_to_mem(cpu);
Christoph Lameter18bf8542014-08-06 16:04:11 -07003764 n = get_node(cachep, node);
3765 spin_lock_irq(&n->list_lock);
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07003766 free_block(cachep, ac->entry, ac->avail, node, &list);
Christoph Lameter18bf8542014-08-06 16:04:11 -07003767 spin_unlock_irq(&n->list_lock);
Joonsoo Kim97654df2014-08-06 16:04:25 -07003768 slabs_destroy(cachep, &list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003769 }
Joonsoo Kimbf0dea22014-10-09 15:26:27 -07003770 free_percpu(prev);
3771
3772alloc_node:
Jianyu Zhan5f0985b2014-03-30 17:02:20 +08003773 return alloc_kmem_cache_node(cachep, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003774}
3775
Glauber Costa943a4512012-12-18 14:23:03 -08003776static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3777 int batchcount, int shared, gfp_t gfp)
3778{
3779 int ret;
Vladimir Davydov426589f2015-02-12 14:59:23 -08003780 struct kmem_cache *c;
Glauber Costa943a4512012-12-18 14:23:03 -08003781
3782 ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3783
3784 if (slab_state < FULL)
3785 return ret;
3786
3787 if ((ret < 0) || !is_root_cache(cachep))
3788 return ret;
3789
Vladimir Davydov426589f2015-02-12 14:59:23 -08003790 lockdep_assert_held(&slab_mutex);
3791 for_each_memcg_cache(c, cachep) {
3792 /* return value determined by the root cache only */
3793 __do_tune_cpucache(c, limit, batchcount, shared, gfp);
Glauber Costa943a4512012-12-18 14:23:03 -08003794 }
3795
3796 return ret;
3797}
3798
Christoph Lameter18004c52012-07-06 15:25:12 -05003799/* Called with slab_mutex held always */
Pekka Enberg83b519e2009-06-10 19:40:04 +03003800static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003801{
3802 int err;
Glauber Costa943a4512012-12-18 14:23:03 -08003803 int limit = 0;
3804 int shared = 0;
3805 int batchcount = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003806
Glauber Costa943a4512012-12-18 14:23:03 -08003807 if (!is_root_cache(cachep)) {
3808 struct kmem_cache *root = memcg_root_cache(cachep);
3809 limit = root->limit;
3810 shared = root->shared;
3811 batchcount = root->batchcount;
3812 }
3813
3814 if (limit && shared && batchcount)
3815 goto skip_setup;
Andrew Mortona737b3e2006-03-22 00:08:11 -08003816 /*
3817 * The head array serves three purposes:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003818 * - create a LIFO ordering, i.e. return objects that are cache-warm
3819 * - reduce the number of spinlock operations.
Andrew Mortona737b3e2006-03-22 00:08:11 -08003820 * - reduce the number of linked list operations on the slab and
Linus Torvalds1da177e2005-04-16 15:20:36 -07003821 * bufctl chains: array operations are cheaper.
3822 * The numbers are guessed, we should auto-tune as described by
3823 * Bonwick.
3824 */
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003825 if (cachep->size > 131072)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003826 limit = 1;
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003827 else if (cachep->size > PAGE_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003828 limit = 8;
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003829 else if (cachep->size > 1024)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003830 limit = 24;
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003831 else if (cachep->size > 256)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003832 limit = 54;
3833 else
3834 limit = 120;
3835
Andrew Mortona737b3e2006-03-22 00:08:11 -08003836 /*
3837 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
Linus Torvalds1da177e2005-04-16 15:20:36 -07003838 * allocation behaviour: Most allocs on one cpu, most free operations
3839 * on another cpu. For these cases, an efficient object passing between
3840 * cpus is necessary. This is provided by a shared array. The array
3841 * replaces Bonwick's magazine layer.
3842 * On uniprocessor, it's functionally equivalent (but less efficient)
3843 * to a larger limit. Thus disabled by default.
3844 */
3845 shared = 0;
Christoph Lameter3b0efdf2012-06-13 10:24:57 -05003846 if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003847 shared = 8;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003848
3849#if DEBUG
Andrew Mortona737b3e2006-03-22 00:08:11 -08003850 /*
3851 * With debugging enabled, large batchcount lead to excessively long
3852 * periods with disabled local interrupts. Limit the batchcount
Linus Torvalds1da177e2005-04-16 15:20:36 -07003853 */
3854 if (limit > 32)
3855 limit = 32;
3856#endif
Glauber Costa943a4512012-12-18 14:23:03 -08003857 batchcount = (limit + 1) / 2;
3858skip_setup:
3859 err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003860 if (err)
Joe Perches11705322016-03-17 14:19:50 -07003861 pr_err("enable_cpucache failed for %s, error %d\n",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003862 cachep->name, -err);
Christoph Lameter2ed3a4e2006-09-25 23:31:38 -07003863 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003864}
3865
Christoph Lameter1b552532006-03-22 00:09:07 -08003866/*
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003867 * Drain an array if it contains any elements taking the node lock only if
3868 * necessary. Note that the node listlock also protects the array_cache
Christoph Lameterb18e7e62006-03-22 00:09:07 -08003869 * if drain_array() is used on the shared array.
Christoph Lameter1b552532006-03-22 00:09:07 -08003870 */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003871static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
Christoph Lameter1b552532006-03-22 00:09:07 -08003872 struct array_cache *ac, int force, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003873{
Joonsoo Kim97654df2014-08-06 16:04:25 -07003874 LIST_HEAD(list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003875 int tofree;
3876
Christoph Lameter1b552532006-03-22 00:09:07 -08003877 if (!ac || !ac->avail)
3878 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003879 if (ac->touched && !force) {
3880 ac->touched = 0;
Christoph Lameterb18e7e62006-03-22 00:09:07 -08003881 } else {
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003882 spin_lock_irq(&n->list_lock);
Christoph Lameterb18e7e62006-03-22 00:09:07 -08003883 if (ac->avail) {
3884 tofree = force ? ac->avail : (ac->limit + 4) / 5;
3885 if (tofree > ac->avail)
3886 tofree = (ac->avail + 1) / 2;
Joonsoo Kim97654df2014-08-06 16:04:25 -07003887 free_block(cachep, ac->entry, tofree, node, &list);
Christoph Lameterb18e7e62006-03-22 00:09:07 -08003888 ac->avail -= tofree;
3889 memmove(ac->entry, &(ac->entry[tofree]),
3890 sizeof(void *) * ac->avail);
3891 }
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003892 spin_unlock_irq(&n->list_lock);
Joonsoo Kim97654df2014-08-06 16:04:25 -07003893 slabs_destroy(cachep, &list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003894 }
3895}
3896
3897/**
3898 * cache_reap - Reclaim memory from caches.
Randy Dunlap05fb6bf2007-02-28 20:12:13 -08003899 * @w: work descriptor
Linus Torvalds1da177e2005-04-16 15:20:36 -07003900 *
3901 * Called from workqueue/eventd every few seconds.
3902 * Purpose:
3903 * - clear the per-cpu caches for this CPU.
3904 * - return freeable pages to the main free memory pool.
3905 *
Andrew Mortona737b3e2006-03-22 00:08:11 -08003906 * If we cannot acquire the cache chain mutex then just give up - we'll try
3907 * again on the next iteration.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003908 */
Christoph Lameter7c5cae32007-02-10 01:42:55 -08003909static void cache_reap(struct work_struct *w)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003910{
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07003911 struct kmem_cache *searchp;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003912 struct kmem_cache_node *n;
Lee Schermerhorn7d6e6d02010-05-26 14:45:03 -07003913 int node = numa_mem_id();
Jean Delvarebf6aede2009-04-02 16:56:54 -07003914 struct delayed_work *work = to_delayed_work(w);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003915
Christoph Lameter18004c52012-07-06 15:25:12 -05003916 if (!mutex_trylock(&slab_mutex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003917 /* Give up. Setup the next iteration. */
Christoph Lameter7c5cae32007-02-10 01:42:55 -08003918 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003919
Christoph Lameter18004c52012-07-06 15:25:12 -05003920 list_for_each_entry(searchp, &slab_caches, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003921 check_irq_on();
3922
Christoph Lameter35386e32006-03-22 00:09:05 -08003923 /*
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003924 * We only take the node lock if absolutely necessary and we
Christoph Lameter35386e32006-03-22 00:09:05 -08003925 * have established with reasonable certainty that
3926 * we can do some work if the lock was obtained.
3927 */
Christoph Lameter18bf8542014-08-06 16:04:11 -07003928 n = get_node(searchp, node);
Christoph Lameter35386e32006-03-22 00:09:05 -08003929
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003930 reap_alien(searchp, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003931
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003932 drain_array(searchp, n, cpu_cache_get(searchp), 0, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003933
Christoph Lameter35386e32006-03-22 00:09:05 -08003934 /*
3935 * These are racy checks but it does not matter
3936 * if we skip one check or scan twice.
3937 */
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003938 if (time_after(n->next_reap, jiffies))
Christoph Lameter35386e32006-03-22 00:09:05 -08003939 goto next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003940
Jianyu Zhan5f0985b2014-03-30 17:02:20 +08003941 n->next_reap = jiffies + REAPTIMEOUT_NODE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003942
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003943 drain_array(searchp, n, n->shared, 0, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003944
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003945 if (n->free_touched)
3946 n->free_touched = 0;
Christoph Lametered11d9e2006-06-30 01:55:45 -07003947 else {
3948 int freed;
3949
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003950 freed = drain_freelist(searchp, n, (n->free_limit +
Christoph Lametered11d9e2006-06-30 01:55:45 -07003951 5 * searchp->num - 1) / (5 * searchp->num));
3952 STATS_ADD_REAPED(searchp, freed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003953 }
Christoph Lameter35386e32006-03-22 00:09:05 -08003954next:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003955 cond_resched();
3956 }
3957 check_irq_on();
Christoph Lameter18004c52012-07-06 15:25:12 -05003958 mutex_unlock(&slab_mutex);
Christoph Lameter8fce4d82006-03-09 17:33:54 -08003959 next_reap_node();
Christoph Lameter7c5cae32007-02-10 01:42:55 -08003960out:
Andrew Mortona737b3e2006-03-22 00:08:11 -08003961 /* Set up the next iteration */
Jianyu Zhan5f0985b2014-03-30 17:02:20 +08003962 schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003963}
3964
Linus Torvalds158a9622008-01-02 13:04:48 -08003965#ifdef CONFIG_SLABINFO
Glauber Costa0d7561c2012-10-19 18:20:27 +04003966void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003967{
Joonsoo Kim8456a642013-10-24 10:07:49 +09003968 struct page *page;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08003969 unsigned long active_objs;
3970 unsigned long num_objs;
3971 unsigned long active_slabs = 0;
3972 unsigned long num_slabs, free_objects = 0, shared_avail = 0;
Christoph Lametere498be72005-09-09 13:03:32 -07003973 const char *name;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003974 char *error = NULL;
Christoph Lametere498be72005-09-09 13:03:32 -07003975 int node;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003976 struct kmem_cache_node *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003977
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978 active_objs = 0;
3979 num_slabs = 0;
Christoph Lameter18bf8542014-08-06 16:04:11 -07003980 for_each_kmem_cache_node(cachep, node, n) {
Christoph Lametere498be72005-09-09 13:03:32 -07003981
Ravikiran G Thirumalaica3b9b92006-02-04 23:27:58 -08003982 check_irq_on();
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00003983 spin_lock_irq(&n->list_lock);
Christoph Lametere498be72005-09-09 13:03:32 -07003984
Joonsoo Kim8456a642013-10-24 10:07:49 +09003985 list_for_each_entry(page, &n->slabs_full, lru) {
3986 if (page->active != cachep->num && !error)
Christoph Lametere498be72005-09-09 13:03:32 -07003987 error = "slabs_full accounting error";
3988 active_objs += cachep->num;
3989 active_slabs++;
3990 }
Joonsoo Kim8456a642013-10-24 10:07:49 +09003991 list_for_each_entry(page, &n->slabs_partial, lru) {
3992 if (page->active == cachep->num && !error)
Joonsoo Kim106a74e2013-10-24 10:07:48 +09003993 error = "slabs_partial accounting error";
Joonsoo Kim8456a642013-10-24 10:07:49 +09003994 if (!page->active && !error)
Joonsoo Kim106a74e2013-10-24 10:07:48 +09003995 error = "slabs_partial accounting error";
Joonsoo Kim8456a642013-10-24 10:07:49 +09003996 active_objs += page->active;
Christoph Lametere498be72005-09-09 13:03:32 -07003997 active_slabs++;
3998 }
Joonsoo Kim8456a642013-10-24 10:07:49 +09003999 list_for_each_entry(page, &n->slabs_free, lru) {
4000 if (page->active && !error)
Joonsoo Kim106a74e2013-10-24 10:07:48 +09004001 error = "slabs_free accounting error";
Christoph Lametere498be72005-09-09 13:03:32 -07004002 num_slabs++;
4003 }
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00004004 free_objects += n->free_objects;
4005 if (n->shared)
4006 shared_avail += n->shared->avail;
Christoph Lametere498be72005-09-09 13:03:32 -07004007
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00004008 spin_unlock_irq(&n->list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004009 }
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004010 num_slabs += active_slabs;
4011 num_objs = num_slabs * cachep->num;
Christoph Lametere498be72005-09-09 13:03:32 -07004012 if (num_objs - active_objs != free_objects && !error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004013 error = "free_objects accounting error";
4014
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004015 name = cachep->name;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004016 if (error)
Joe Perches11705322016-03-17 14:19:50 -07004017 pr_err("slab: cache %s error: %s\n", name, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004018
Glauber Costa0d7561c2012-10-19 18:20:27 +04004019 sinfo->active_objs = active_objs;
4020 sinfo->num_objs = num_objs;
4021 sinfo->active_slabs = active_slabs;
4022 sinfo->num_slabs = num_slabs;
4023 sinfo->shared_avail = shared_avail;
4024 sinfo->limit = cachep->limit;
4025 sinfo->batchcount = cachep->batchcount;
4026 sinfo->shared = cachep->shared;
4027 sinfo->objects_per_slab = cachep->num;
4028 sinfo->cache_order = cachep->gfporder;
4029}
4030
4031void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
4032{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004033#if STATS
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00004034 { /* node stats */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004035 unsigned long high = cachep->high_mark;
4036 unsigned long allocs = cachep->num_allocations;
4037 unsigned long grown = cachep->grown;
4038 unsigned long reaped = cachep->reaped;
4039 unsigned long errors = cachep->errors;
4040 unsigned long max_freeable = cachep->max_freeable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004041 unsigned long node_allocs = cachep->node_allocs;
Christoph Lametere498be72005-09-09 13:03:32 -07004042 unsigned long node_frees = cachep->node_frees;
Ravikiran G Thirumalaifb7faf32006-04-10 22:52:54 -07004043 unsigned long overflows = cachep->node_overflow;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004044
Joe Perches756a025f02016-03-17 14:19:47 -07004045 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
Joe Perchese92dd4f2010-03-26 19:27:58 -07004046 allocs, high, grown,
4047 reaped, errors, max_freeable, node_allocs,
4048 node_frees, overflows);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004049 }
4050 /* cpu stats */
4051 {
4052 unsigned long allochit = atomic_read(&cachep->allochit);
4053 unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4054 unsigned long freehit = atomic_read(&cachep->freehit);
4055 unsigned long freemiss = atomic_read(&cachep->freemiss);
4056
4057 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004058 allochit, allocmiss, freehit, freemiss);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004059 }
4060#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004061}
4062
Linus Torvalds1da177e2005-04-16 15:20:36 -07004063#define MAX_SLABINFO_WRITE 128
4064/**
4065 * slabinfo_write - Tuning for the slab allocator
4066 * @file: unused
4067 * @buffer: user buffer
4068 * @count: data length
4069 * @ppos: unused
4070 */
Glauber Costab7454ad2012-10-19 18:20:25 +04004071ssize_t slabinfo_write(struct file *file, const char __user *buffer,
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004072 size_t count, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004073{
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004074 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004075 int limit, batchcount, shared, res;
Christoph Hellwig7a7c3812006-06-23 02:03:17 -07004076 struct kmem_cache *cachep;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004077
Linus Torvalds1da177e2005-04-16 15:20:36 -07004078 if (count > MAX_SLABINFO_WRITE)
4079 return -EINVAL;
4080 if (copy_from_user(&kbuf, buffer, count))
4081 return -EFAULT;
Pekka Enbergb28a02d2006-01-08 01:00:37 -08004082 kbuf[MAX_SLABINFO_WRITE] = '\0';
Linus Torvalds1da177e2005-04-16 15:20:36 -07004083
4084 tmp = strchr(kbuf, ' ');
4085 if (!tmp)
4086 return -EINVAL;
4087 *tmp = '\0';
4088 tmp++;
4089 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4090 return -EINVAL;
4091
4092 /* Find the cache in the chain of caches. */
Christoph Lameter18004c52012-07-06 15:25:12 -05004093 mutex_lock(&slab_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004094 res = -EINVAL;
Christoph Lameter18004c52012-07-06 15:25:12 -05004095 list_for_each_entry(cachep, &slab_caches, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004096 if (!strcmp(cachep->name, kbuf)) {
Andrew Mortona737b3e2006-03-22 00:08:11 -08004097 if (limit < 1 || batchcount < 1 ||
4098 batchcount > limit || shared < 0) {
Christoph Lametere498be72005-09-09 13:03:32 -07004099 res = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004100 } else {
Christoph Lametere498be72005-09-09 13:03:32 -07004101 res = do_tune_cpucache(cachep, limit,
Pekka Enberg83b519e2009-06-10 19:40:04 +03004102 batchcount, shared,
4103 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004104 }
4105 break;
4106 }
4107 }
Christoph Lameter18004c52012-07-06 15:25:12 -05004108 mutex_unlock(&slab_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004109 if (res >= 0)
4110 res = count;
4111 return res;
4112}
Al Viro871751e2006-03-25 03:06:39 -08004113
4114#ifdef CONFIG_DEBUG_SLAB_LEAK
4115
Al Viro871751e2006-03-25 03:06:39 -08004116static inline int add_caller(unsigned long *n, unsigned long v)
4117{
4118 unsigned long *p;
4119 int l;
4120 if (!v)
4121 return 1;
4122 l = n[1];
4123 p = n + 2;
4124 while (l) {
4125 int i = l/2;
4126 unsigned long *q = p + 2 * i;
4127 if (*q == v) {
4128 q[1]++;
4129 return 1;
4130 }
4131 if (*q > v) {
4132 l = i;
4133 } else {
4134 p = q + 2;
4135 l -= i + 1;
4136 }
4137 }
4138 if (++n[1] == n[0])
4139 return 0;
4140 memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4141 p[0] = v;
4142 p[1] = 1;
4143 return 1;
4144}
4145
Joonsoo Kim8456a642013-10-24 10:07:49 +09004146static void handle_slab(unsigned long *n, struct kmem_cache *c,
4147 struct page *page)
Al Viro871751e2006-03-25 03:06:39 -08004148{
4149 void *p;
Joonsoo Kimd31676d2016-03-15 14:54:24 -07004150 int i, j;
4151 unsigned long v;
Joonsoo Kimb1cb0982013-10-24 10:07:45 +09004152
Al Viro871751e2006-03-25 03:06:39 -08004153 if (n[0] == n[1])
4154 return;
Joonsoo Kim8456a642013-10-24 10:07:49 +09004155 for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
Joonsoo Kimd31676d2016-03-15 14:54:24 -07004156 bool active = true;
4157
4158 for (j = page->active; j < c->num; j++) {
4159 if (get_free_obj(page, j) == i) {
4160 active = false;
4161 break;
4162 }
4163 }
4164
4165 if (!active)
Al Viro871751e2006-03-25 03:06:39 -08004166 continue;
Joonsoo Kimb1cb0982013-10-24 10:07:45 +09004167
Joonsoo Kimd31676d2016-03-15 14:54:24 -07004168 /*
4169 * probe_kernel_read() is used for DEBUG_PAGEALLOC. page table
4170 * mapping is established when actual object allocation and
4171 * we could mistakenly access the unmapped object in the cpu
4172 * cache.
4173 */
4174 if (probe_kernel_read(&v, dbg_userword(c, p), sizeof(v)))
4175 continue;
4176
4177 if (!add_caller(n, v))
Al Viro871751e2006-03-25 03:06:39 -08004178 return;
4179 }
4180}
4181
4182static void show_symbol(struct seq_file *m, unsigned long address)
4183{
4184#ifdef CONFIG_KALLSYMS
Al Viro871751e2006-03-25 03:06:39 -08004185 unsigned long offset, size;
Tejun Heo9281ace2007-07-17 04:03:51 -07004186 char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
Al Viro871751e2006-03-25 03:06:39 -08004187
Alexey Dobriyana5c43da2007-05-08 00:28:47 -07004188 if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
Al Viro871751e2006-03-25 03:06:39 -08004189 seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
Alexey Dobriyana5c43da2007-05-08 00:28:47 -07004190 if (modname[0])
Al Viro871751e2006-03-25 03:06:39 -08004191 seq_printf(m, " [%s]", modname);
4192 return;
4193 }
4194#endif
4195 seq_printf(m, "%p", (void *)address);
4196}
4197
4198static int leaks_show(struct seq_file *m, void *p)
4199{
Thierry Reding0672aa72012-06-22 19:42:49 +02004200 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
Joonsoo Kim8456a642013-10-24 10:07:49 +09004201 struct page *page;
Christoph Lameterce8eb6c2013-01-10 19:14:19 +00004202 struct kmem_cache_node *n;
Al Viro871751e2006-03-25 03:06:39 -08004203 const char *name;
Christoph Lameterdb845062013-02-05 18:45:23 +00004204 unsigned long *x = m->private;
Al Viro871751e2006-03-25 03:06:39 -08004205 int node;
4206 int i;
4207
4208 if (!(cachep->flags & SLAB_STORE_USER))
4209 return 0;
4210 if (!(cachep->flags & SLAB_RED_ZONE))
4211 return 0;
4212
Joonsoo Kimd31676d2016-03-15 14:54:24 -07004213 /*
4214 * Set store_user_clean and start to grab stored user information
4215 * for all objects on this cache. If some alloc/free requests comes
4216 * during the processing, information would be wrong so restart
4217 * whole processing.
4218 */
4219 do {
4220 set_store_user_clean(cachep);
4221 drain_cpu_caches(cachep);
Al Viro871751e2006-03-25 03:06:39 -08004222
Joonsoo Kimd31676d2016-03-15 14:54:24 -07004223 x[1] = 0;
Al Viro871751e2006-03-25 03:06:39 -08004224
Joonsoo Kimd31676d2016-03-15 14:54:24 -07004225 for_each_kmem_cache_node(cachep, node, n) {
Al Viro871751e2006-03-25 03:06:39 -08004226
Joonsoo Kimd31676d2016-03-15 14:54:24 -07004227 check_irq_on();
4228 spin_lock_irq(&n->list_lock);
Al Viro871751e2006-03-25 03:06:39 -08004229
Joonsoo Kimd31676d2016-03-15 14:54:24 -07004230 list_for_each_entry(page, &n->slabs_full, lru)
4231 handle_slab(x, cachep, page);
4232 list_for_each_entry(page, &n->slabs_partial, lru)
4233 handle_slab(x, cachep, page);
4234 spin_unlock_irq(&n->list_lock);
4235 }
4236 } while (!is_store_user_clean(cachep));
4237
Al Viro871751e2006-03-25 03:06:39 -08004238 name = cachep->name;
Christoph Lameterdb845062013-02-05 18:45:23 +00004239 if (x[0] == x[1]) {
Al Viro871751e2006-03-25 03:06:39 -08004240 /* Increase the buffer size */
Christoph Lameter18004c52012-07-06 15:25:12 -05004241 mutex_unlock(&slab_mutex);
Christoph Lameterdb845062013-02-05 18:45:23 +00004242 m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
Al Viro871751e2006-03-25 03:06:39 -08004243 if (!m->private) {
4244 /* Too bad, we are really out */
Christoph Lameterdb845062013-02-05 18:45:23 +00004245 m->private = x;
Christoph Lameter18004c52012-07-06 15:25:12 -05004246 mutex_lock(&slab_mutex);
Al Viro871751e2006-03-25 03:06:39 -08004247 return -ENOMEM;
4248 }
Christoph Lameterdb845062013-02-05 18:45:23 +00004249 *(unsigned long *)m->private = x[0] * 2;
4250 kfree(x);
Christoph Lameter18004c52012-07-06 15:25:12 -05004251 mutex_lock(&slab_mutex);
Al Viro871751e2006-03-25 03:06:39 -08004252 /* Now make sure this entry will be retried */
4253 m->count = m->size;
4254 return 0;
4255 }
Christoph Lameterdb845062013-02-05 18:45:23 +00004256 for (i = 0; i < x[1]; i++) {
4257 seq_printf(m, "%s: %lu ", name, x[2*i+3]);
4258 show_symbol(m, x[2*i+2]);
Al Viro871751e2006-03-25 03:06:39 -08004259 seq_putc(m, '\n');
4260 }
Siddha, Suresh Bd2e7b7d2006-09-25 23:31:47 -07004261
Al Viro871751e2006-03-25 03:06:39 -08004262 return 0;
4263}
4264
Alexey Dobriyana0ec95a2008-10-06 00:59:10 +04004265static const struct seq_operations slabstats_op = {
Vladimir Davydov1df3b262014-12-10 15:42:16 -08004266 .start = slab_start,
Wanpeng Li276a2432013-07-08 08:08:28 +08004267 .next = slab_next,
4268 .stop = slab_stop,
Al Viro871751e2006-03-25 03:06:39 -08004269 .show = leaks_show,
4270};
Alexey Dobriyana0ec95a2008-10-06 00:59:10 +04004271
4272static int slabstats_open(struct inode *inode, struct file *file)
4273{
Rob Jonesb208ce32014-10-09 15:28:03 -07004274 unsigned long *n;
4275
4276 n = __seq_open_private(file, &slabstats_op, PAGE_SIZE);
4277 if (!n)
4278 return -ENOMEM;
4279
4280 *n = PAGE_SIZE / (2 * sizeof(unsigned long));
4281
4282 return 0;
Alexey Dobriyana0ec95a2008-10-06 00:59:10 +04004283}
4284
4285static const struct file_operations proc_slabstats_operations = {
4286 .open = slabstats_open,
4287 .read = seq_read,
4288 .llseek = seq_lseek,
4289 .release = seq_release_private,
4290};
Al Viro871751e2006-03-25 03:06:39 -08004291#endif
Alexey Dobriyana0ec95a2008-10-06 00:59:10 +04004292
4293static int __init slab_proc_init(void)
4294{
4295#ifdef CONFIG_DEBUG_SLAB_LEAK
4296 proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4297#endif
4298 return 0;
4299}
4300module_init(slab_proc_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004301#endif
4302
Manfred Spraul00e145b2005-09-03 15:55:07 -07004303/**
4304 * ksize - get the actual amount of memory allocated for a given object
4305 * @objp: Pointer to the object
4306 *
4307 * kmalloc may internally round up allocations and return more memory
4308 * than requested. ksize() can be used to determine the actual amount of
4309 * memory allocated. The caller may use this additional memory, even though
4310 * a smaller amount of memory was initially specified with the kmalloc call.
4311 * The caller must guarantee that objp points to a valid object previously
4312 * allocated with either kmalloc() or kmem_cache_alloc(). The object
4313 * must not be freed during the duration of the call.
4314 */
Pekka Enbergfd76bab2007-05-06 14:48:40 -07004315size_t ksize(const void *objp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004316{
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -07004317 size_t size;
4318
Christoph Lameteref8b4522007-10-16 01:24:46 -07004319 BUG_ON(!objp);
4320 if (unlikely(objp == ZERO_SIZE_PTR))
Manfred Spraul00e145b2005-09-03 15:55:07 -07004321 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004322
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -07004323 size = virt_to_cache(objp)->object_size;
4324 /* We assume that ksize callers could use the whole allocated area,
4325 * so we need to unpoison this area.
4326 */
Alexander Potapenko505f5dc2016-03-25 14:22:02 -07004327 kasan_krealloc(objp, size, GFP_NOWAIT);
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -07004328
4329 return size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004330}
Kirill A. Shutemovb1aabec2009-02-10 15:21:44 +02004331EXPORT_SYMBOL(ksize);