| /* |
| * Slab allocator functions that are independent of the allocator strategy |
| * |
| * (C) 2012 Christoph Lameter <cl@linux.com> |
| */ |
| #include <linux/slab.h> |
| |
| #include <linux/mm.h> |
| #include <linux/poison.h> |
| #include <linux/interrupt.h> |
| #include <linux/memory.h> |
| #include <linux/compiler.h> |
| #include <linux/module.h> |
| #include <linux/cpu.h> |
| #include <linux/uaccess.h> |
| #include <asm/cacheflush.h> |
| #include <asm/tlbflush.h> |
| #include <asm/page.h> |
| |
| #include "slab.h" |
| |
| enum slab_state slab_state; |
| LIST_HEAD(slab_caches); |
| DEFINE_MUTEX(slab_mutex); |
| struct kmem_cache *kmem_cache; |
| |
| #ifdef CONFIG_DEBUG_VM |
| static int kmem_cache_sanity_check(const char *name, size_t size) |
| { |
| struct kmem_cache *s = NULL; |
| |
| if (!name || in_interrupt() || size < sizeof(void *) || |
| size > KMALLOC_MAX_SIZE) { |
| pr_err("kmem_cache_create(%s) integrity check failed\n", name); |
| return -EINVAL; |
| } |
| |
| list_for_each_entry(s, &slab_caches, list) { |
| char tmp; |
| int res; |
| |
| /* |
| * This happens when the module gets unloaded and doesn't |
| * destroy its slab cache and no-one else reuses the vmalloc |
| * area of the module. Print a warning. |
| */ |
| res = probe_kernel_address(s->name, tmp); |
| if (res) { |
| pr_err("Slab cache with size %d has lost its name\n", |
| s->object_size); |
| continue; |
| } |
| |
| if (!strcmp(s->name, name)) { |
| pr_err("%s (%s): Cache name already exists.\n", |
| __func__, name); |
| dump_stack(); |
| s = NULL; |
| return -EINVAL; |
| } |
| } |
| |
| WARN_ON(strchr(name, ' ')); /* It confuses parsers */ |
| return 0; |
| } |
| #else |
| static inline int kmem_cache_sanity_check(const char *name, size_t size) |
| { |
| return 0; |
| } |
| #endif |
| |
| /* |
| * kmem_cache_create - Create a cache. |
| * @name: A string which is used in /proc/slabinfo to identify this cache. |
| * @size: The size of objects to be created in this cache. |
| * @align: The required alignment for the objects. |
| * @flags: SLAB flags |
| * @ctor: A constructor for the objects. |
| * |
| * Returns a ptr to the cache on success, NULL on failure. |
| * Cannot be called within a interrupt, but can be interrupted. |
| * The @ctor is run when new pages are allocated by the cache. |
| * |
| * The flags are |
| * |
| * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) |
| * to catch references to uninitialised memory. |
| * |
| * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check |
| * for buffer overruns. |
| * |
| * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware |
| * cacheline. This can be beneficial if you're counting cycles as closely |
| * as davem. |
| */ |
| |
| struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align, |
| unsigned long flags, void (*ctor)(void *)) |
| { |
| struct kmem_cache *s = NULL; |
| int err = 0; |
| |
| get_online_cpus(); |
| mutex_lock(&slab_mutex); |
| |
| if (!kmem_cache_sanity_check(name, size) == 0) |
| goto out_locked; |
| |
| |
| s = __kmem_cache_alias(name, size, align, flags, ctor); |
| if (s) |
| goto out_locked; |
| |
| s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); |
| if (s) { |
| s->object_size = s->size = size; |
| s->align = align; |
| s->ctor = ctor; |
| s->name = kstrdup(name, GFP_KERNEL); |
| if (!s->name) { |
| kmem_cache_free(kmem_cache, s); |
| err = -ENOMEM; |
| goto out_locked; |
| } |
| |
| err = __kmem_cache_create(s, flags); |
| if (!err) { |
| |
| s->refcount = 1; |
| list_add(&s->list, &slab_caches); |
| |
| } else { |
| kfree(s->name); |
| kmem_cache_free(kmem_cache, s); |
| } |
| } else |
| err = -ENOMEM; |
| |
| out_locked: |
| mutex_unlock(&slab_mutex); |
| put_online_cpus(); |
| |
| if (err) { |
| |
| if (flags & SLAB_PANIC) |
| panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", |
| name, err); |
| else { |
| printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d", |
| name, err); |
| dump_stack(); |
| } |
| |
| return NULL; |
| } |
| |
| return s; |
| } |
| EXPORT_SYMBOL(kmem_cache_create); |
| |
| void kmem_cache_destroy(struct kmem_cache *s) |
| { |
| get_online_cpus(); |
| mutex_lock(&slab_mutex); |
| s->refcount--; |
| if (!s->refcount) { |
| list_del(&s->list); |
| |
| if (!__kmem_cache_shutdown(s)) { |
| if (s->flags & SLAB_DESTROY_BY_RCU) |
| rcu_barrier(); |
| |
| kfree(s->name); |
| kmem_cache_free(kmem_cache, s); |
| } else { |
| list_add(&s->list, &slab_caches); |
| printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n", |
| s->name); |
| dump_stack(); |
| } |
| } |
| mutex_unlock(&slab_mutex); |
| put_online_cpus(); |
| } |
| EXPORT_SYMBOL(kmem_cache_destroy); |
| |
| int slab_is_available(void) |
| { |
| return slab_state >= UP; |
| } |