mm/slab_common: commonize slab merge logic
Slab merge is good feature to reduce fragmentation. Now, it is only
applied to SLUB, but, it would be good to apply it to SLAB. This patch is
preparation step to apply slab merge to SLAB by commonizing slab merge
logic.
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/mm/slub.c b/mm/slub.c
index 1050d7d..ae7b9f1 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -169,16 +169,6 @@
*/
#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
-/*
- * Set of flags that will prevent slab merging
- */
-#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
- SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
- SLAB_FAILSLAB)
-
-#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
- SLAB_CACHE_DMA | SLAB_NOTRACK)
-
#define OO_SHIFT 16
#define OO_MASK ((1 << OO_SHIFT) - 1)
#define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
@@ -1176,7 +1166,7 @@
__setup("slub_debug", setup_slub_debug);
-static unsigned long kmem_cache_flags(unsigned long object_size,
+unsigned long kmem_cache_flags(unsigned long object_size,
unsigned long flags, const char *name,
void (*ctor)(void *))
{
@@ -1208,7 +1198,7 @@
struct page *page) {}
static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
struct page *page) {}
-static inline unsigned long kmem_cache_flags(unsigned long object_size,
+unsigned long kmem_cache_flags(unsigned long object_size,
unsigned long flags, const char *name,
void (*ctor)(void *))
{
@@ -2719,12 +2709,6 @@
static int slub_min_objects;
/*
- * Merge control. If this is set then no merging of slab caches will occur.
- * (Could be removed. This was introduced to pacify the merge skeptics.)
- */
-static int slub_nomerge;
-
-/*
* Calculate the order of allocation given an slab object size.
*
* The order of allocation has significant impact on performance and other
@@ -3252,14 +3236,6 @@
__setup("slub_min_objects=", setup_slub_min_objects);
-static int __init setup_slub_nomerge(char *str)
-{
- slub_nomerge = 1;
- return 1;
-}
-
-__setup("slub_nomerge", setup_slub_nomerge);
-
void *__kmalloc(size_t size, gfp_t flags)
{
struct kmem_cache *s;
@@ -3637,69 +3613,6 @@
{
}
-/*
- * Find a mergeable slab cache
- */
-static int slab_unmergeable(struct kmem_cache *s)
-{
- if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
- return 1;
-
- if (!is_root_cache(s))
- return 1;
-
- if (s->ctor)
- return 1;
-
- /*
- * We may have set a slab to be unmergeable during bootstrap.
- */
- if (s->refcount < 0)
- return 1;
-
- return 0;
-}
-
-static struct kmem_cache *find_mergeable(size_t size, size_t align,
- unsigned long flags, const char *name, void (*ctor)(void *))
-{
- struct kmem_cache *s;
-
- if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
- return NULL;
-
- if (ctor)
- return NULL;
-
- size = ALIGN(size, sizeof(void *));
- align = calculate_alignment(flags, align, size);
- size = ALIGN(size, align);
- flags = kmem_cache_flags(size, flags, name, NULL);
-
- list_for_each_entry(s, &slab_caches, list) {
- if (slab_unmergeable(s))
- continue;
-
- if (size > s->size)
- continue;
-
- if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
- continue;
- /*
- * Check if alignment is compatible.
- * Courtesy of Adrian Drzewiecki
- */
- if ((s->size & ~(align - 1)) != s->size)
- continue;
-
- if (s->size - size >= sizeof(void *))
- continue;
-
- return s;
- }
- return NULL;
-}
-
struct kmem_cache *
__kmem_cache_alias(const char *name, size_t size, size_t align,
unsigned long flags, void (*ctor)(void *))