blob: 5374150f54866c5ae33e75354d9fc7a2d7c47e67 [file] [log] [blame]
Christoph Lameter039363f2012-07-06 15:25:10 -05001/*
2 * Slab allocator functions that are independent of the allocator strategy
3 *
4 * (C) 2012 Christoph Lameter <cl@linux.com>
5 */
6#include <linux/slab.h>
7
8#include <linux/mm.h>
9#include <linux/poison.h>
10#include <linux/interrupt.h>
11#include <linux/memory.h>
12#include <linux/compiler.h>
13#include <linux/module.h>
Christoph Lameter20cea962012-07-06 15:25:13 -050014#include <linux/cpu.h>
15#include <linux/uaccess.h>
Christoph Lameter039363f2012-07-06 15:25:10 -050016#include <asm/cacheflush.h>
17#include <asm/tlbflush.h>
18#include <asm/page.h>
19
Christoph Lameter97d06602012-07-06 15:25:11 -050020#include "slab.h"
21
22enum slab_state slab_state;
Christoph Lameter18004c52012-07-06 15:25:12 -050023LIST_HEAD(slab_caches);
24DEFINE_MUTEX(slab_mutex);
Christoph Lameter9b030cb2012-09-05 00:20:33 +000025struct kmem_cache *kmem_cache;
Christoph Lameter97d06602012-07-06 15:25:11 -050026
Shuah Khan77be4b12012-08-16 00:09:46 -070027#ifdef CONFIG_DEBUG_VM
28static int kmem_cache_sanity_check(const char *name, size_t size)
29{
30 struct kmem_cache *s = NULL;
31
32 if (!name || in_interrupt() || size < sizeof(void *) ||
33 size > KMALLOC_MAX_SIZE) {
34 pr_err("kmem_cache_create(%s) integrity check failed\n", name);
35 return -EINVAL;
36 }
37
38 list_for_each_entry(s, &slab_caches, list) {
39 char tmp;
40 int res;
41
42 /*
43 * This happens when the module gets unloaded and doesn't
44 * destroy its slab cache and no-one else reuses the vmalloc
45 * area of the module. Print a warning.
46 */
47 res = probe_kernel_address(s->name, tmp);
48 if (res) {
49 pr_err("Slab cache with size %d has lost its name\n",
50 s->object_size);
51 continue;
52 }
53
54 if (!strcmp(s->name, name)) {
55 pr_err("%s (%s): Cache name already exists.\n",
56 __func__, name);
57 dump_stack();
58 s = NULL;
59 return -EINVAL;
60 }
61 }
62
63 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
64 return 0;
65}
66#else
67static inline int kmem_cache_sanity_check(const char *name, size_t size)
68{
69 return 0;
70}
71#endif
72
Christoph Lameter039363f2012-07-06 15:25:10 -050073/*
74 * kmem_cache_create - Create a cache.
75 * @name: A string which is used in /proc/slabinfo to identify this cache.
76 * @size: The size of objects to be created in this cache.
77 * @align: The required alignment for the objects.
78 * @flags: SLAB flags
79 * @ctor: A constructor for the objects.
80 *
81 * Returns a ptr to the cache on success, NULL on failure.
82 * Cannot be called within a interrupt, but can be interrupted.
83 * The @ctor is run when new pages are allocated by the cache.
84 *
85 * The flags are
86 *
87 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
88 * to catch references to uninitialised memory.
89 *
90 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
91 * for buffer overruns.
92 *
93 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
94 * cacheline. This can be beneficial if you're counting cycles as closely
95 * as davem.
96 */
97
98struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align,
99 unsigned long flags, void (*ctor)(void *))
100{
101 struct kmem_cache *s = NULL;
Christoph Lameter686d5502012-09-05 00:20:33 +0000102 int err = 0;
Christoph Lameter039363f2012-07-06 15:25:10 -0500103
Pekka Enbergb9205362012-08-16 10:12:18 +0300104 get_online_cpus();
105 mutex_lock(&slab_mutex);
Christoph Lameter686d5502012-09-05 00:20:33 +0000106
107 if (!kmem_cache_sanity_check(name, size) == 0)
108 goto out_locked;
109
110
111 s = __kmem_cache_create(name, size, align, flags, ctor);
112 if (!s)
113 err = -ENOSYS; /* Until __kmem_cache_create returns code */
114
Christoph Lameter7c9adf52012-09-04 23:38:33 +0000115 /*
116 * Check if the slab has actually been created and if it was a
117 * real instatiation. Aliases do not belong on the list
118 */
119 if (s && s->refcount == 1)
120 list_add(&s->list, &slab_caches);
121
Christoph Lameter686d5502012-09-05 00:20:33 +0000122out_locked:
Christoph Lameter20cea962012-07-06 15:25:13 -0500123 mutex_unlock(&slab_mutex);
124 put_online_cpus();
125
Christoph Lameter686d5502012-09-05 00:20:33 +0000126 if (err) {
127
128 if (flags & SLAB_PANIC)
129 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
130 name, err);
131 else {
132 printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d",
133 name, err);
134 dump_stack();
135 }
136
137 return NULL;
138 }
Christoph Lameter039363f2012-07-06 15:25:10 -0500139
140 return s;
141}
142EXPORT_SYMBOL(kmem_cache_create);
Christoph Lameter97d06602012-07-06 15:25:11 -0500143
Christoph Lameter945cf2b2012-09-04 23:18:33 +0000144void kmem_cache_destroy(struct kmem_cache *s)
145{
146 get_online_cpus();
147 mutex_lock(&slab_mutex);
148 s->refcount--;
149 if (!s->refcount) {
150 list_del(&s->list);
151
152 if (!__kmem_cache_shutdown(s)) {
153 if (s->flags & SLAB_DESTROY_BY_RCU)
154 rcu_barrier();
155
156 __kmem_cache_destroy(s);
157 } else {
158 list_add(&s->list, &slab_caches);
159 printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n",
160 s->name);
161 dump_stack();
162 }
163 }
164 mutex_unlock(&slab_mutex);
165 put_online_cpus();
166}
167EXPORT_SYMBOL(kmem_cache_destroy);
168
Christoph Lameter97d06602012-07-06 15:25:11 -0500169int slab_is_available(void)
170{
171 return slab_state >= UP;
172}