blob: 8cf8b4962d6c895ad28646978dc26525c8efd6cc [file] [log] [blame]
Christoph Lameter039363f2012-07-06 15:25:10 -05001/*
2 * Slab allocator functions that are independent of the allocator strategy
3 *
4 * (C) 2012 Christoph Lameter <cl@linux.com>
5 */
6#include <linux/slab.h>
7
8#include <linux/mm.h>
9#include <linux/poison.h>
10#include <linux/interrupt.h>
11#include <linux/memory.h>
12#include <linux/compiler.h>
13#include <linux/module.h>
Christoph Lameter20cea962012-07-06 15:25:13 -050014#include <linux/cpu.h>
15#include <linux/uaccess.h>
Christoph Lameter039363f2012-07-06 15:25:10 -050016#include <asm/cacheflush.h>
17#include <asm/tlbflush.h>
18#include <asm/page.h>
19
Christoph Lameter97d06602012-07-06 15:25:11 -050020#include "slab.h"
21
22enum slab_state slab_state;
Christoph Lameter18004c52012-07-06 15:25:12 -050023LIST_HEAD(slab_caches);
24DEFINE_MUTEX(slab_mutex);
Christoph Lameter97d06602012-07-06 15:25:11 -050025
Shuah Khan77be4b12012-08-16 00:09:46 -070026#ifdef CONFIG_DEBUG_VM
27static int kmem_cache_sanity_check(const char *name, size_t size)
28{
29 struct kmem_cache *s = NULL;
30
31 if (!name || in_interrupt() || size < sizeof(void *) ||
32 size > KMALLOC_MAX_SIZE) {
33 pr_err("kmem_cache_create(%s) integrity check failed\n", name);
34 return -EINVAL;
35 }
36
37 list_for_each_entry(s, &slab_caches, list) {
38 char tmp;
39 int res;
40
41 /*
42 * This happens when the module gets unloaded and doesn't
43 * destroy its slab cache and no-one else reuses the vmalloc
44 * area of the module. Print a warning.
45 */
46 res = probe_kernel_address(s->name, tmp);
47 if (res) {
48 pr_err("Slab cache with size %d has lost its name\n",
49 s->object_size);
50 continue;
51 }
52
53 if (!strcmp(s->name, name)) {
54 pr_err("%s (%s): Cache name already exists.\n",
55 __func__, name);
56 dump_stack();
57 s = NULL;
58 return -EINVAL;
59 }
60 }
61
62 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
63 return 0;
64}
65#else
66static inline int kmem_cache_sanity_check(const char *name, size_t size)
67{
68 return 0;
69}
70#endif
71
Christoph Lameter039363f2012-07-06 15:25:10 -050072/*
73 * kmem_cache_create - Create a cache.
74 * @name: A string which is used in /proc/slabinfo to identify this cache.
75 * @size: The size of objects to be created in this cache.
76 * @align: The required alignment for the objects.
77 * @flags: SLAB flags
78 * @ctor: A constructor for the objects.
79 *
80 * Returns a ptr to the cache on success, NULL on failure.
81 * Cannot be called within a interrupt, but can be interrupted.
82 * The @ctor is run when new pages are allocated by the cache.
83 *
84 * The flags are
85 *
86 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
87 * to catch references to uninitialised memory.
88 *
89 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
90 * for buffer overruns.
91 *
92 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
93 * cacheline. This can be beneficial if you're counting cycles as closely
94 * as davem.
95 */
96
97struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align,
98 unsigned long flags, void (*ctor)(void *))
99{
100 struct kmem_cache *s = NULL;
101
Pekka Enbergb9205362012-08-16 10:12:18 +0300102 get_online_cpus();
103 mutex_lock(&slab_mutex);
Shuah Khan77be4b12012-08-16 00:09:46 -0700104 if (kmem_cache_sanity_check(name, size) == 0)
105 s = __kmem_cache_create(name, size, align, flags, ctor);
Christoph Lameter20cea962012-07-06 15:25:13 -0500106 mutex_unlock(&slab_mutex);
107 put_online_cpus();
108
Christoph Lameter039363f2012-07-06 15:25:10 -0500109 if (!s && (flags & SLAB_PANIC))
110 panic("kmem_cache_create: Failed to create slab '%s'\n", name);
111
112 return s;
113}
114EXPORT_SYMBOL(kmem_cache_create);
Christoph Lameter97d06602012-07-06 15:25:11 -0500115
116int slab_is_available(void)
117{
118 return slab_state >= UP;
119}