[PATCH] add kmalloc_node, inline cleanup

The patch makes the following function calls available to allocate memory
on a specific node without changing the basic operation of the slab
allocator:

 kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int flags, int node);
 kmalloc_node(size_t size, unsigned int flags, int node);

in a similar way to the existing node-blind functions:

 kmem_cache_alloc(kmem_cache_t *cachep, unsigned int flags);
 kmalloc(size, flags);

kmem_cache_alloc_node was changed to pass flags and the node information
through the existing layers of the slab allocator (which lead to some minor
rearrangements).  The functions at the lowest layer (kmem_getpages,
cache_grow) are already node aware.  Also __alloc_percpu can call
kmalloc_node now.

Performance measurements (using the pageset localization patch) yields:

w/o patches:
Tasks    jobs/min  jti  jobs/min/task      real       cpu
    1      484.27  100       484.2736     12.02      1.97   Wed Mar 30 20:50:43 2005
  100    25170.83   91       251.7083     23.12    150.10   Wed Mar 30 20:51:06 2005
  200    34601.66   84       173.0083     33.64    294.14   Wed Mar 30 20:51:40 2005
  300    37154.47   86       123.8482     46.99    436.56   Wed Mar 30 20:52:28 2005
  400    39839.82   80        99.5995     58.43    580.46   Wed Mar 30 20:53:27 2005
  500    40036.32   79        80.0726     72.68    728.60   Wed Mar 30 20:54:40 2005
  600    44074.21   79        73.4570     79.23    872.10   Wed Mar 30 20:55:59 2005
  700    44016.60   78        62.8809     92.56   1015.84   Wed Mar 30 20:57:32 2005
  800    40411.05   80        50.5138    115.22   1161.13   Wed Mar 30 20:59:28 2005
  900    42298.56   79        46.9984    123.83   1303.42   Wed Mar 30 21:01:33 2005
 1000    40955.05   80        40.9551    142.11   1441.92   Wed Mar 30 21:03:55 2005

with pageset localization and slab API patches:
Tasks    jobs/min  jti  jobs/min/task      real       cpu
    1      484.19  100       484.1930     12.02      1.98   Wed Mar 30 21:10:18 2005
  100    27428.25   92       274.2825     21.22    149.79   Wed Mar 30 21:10:40 2005
  200    37228.94   86       186.1447     31.27    293.49   Wed Mar 30 21:11:12 2005
  300    41725.42   85       139.0847     41.84    434.10   Wed Mar 30 21:11:54 2005
  400    43032.22   82       107.5805     54.10    582.06   Wed Mar 30 21:12:48 2005
  500    42211.23   83        84.4225     68.94    722.61   Wed Mar 30 21:13:58 2005
  600    40084.49   82        66.8075     87.12    873.11   Wed Mar 30 21:15:25 2005
  700    44169.30   79        63.0990     92.24   1008.77   Wed Mar 30 21:16:58 2005
  800    43097.94   79        53.8724    108.03   1155.88   Wed Mar 30 21:18:47 2005
  900    41846.75   79        46.4964    125.17   1303.38   Wed Mar 30 21:20:52 2005
 1000    40247.85   79        40.2478    144.60   1442.21   Wed Mar 30 21:23:17 2005

Signed-off-by: Christoph Lameter <christoph@lameter.com>
Signed-off-by: Manfred Spraul <manfred@colorfullife.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 3e3c3ab..7d66385 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -62,16 +62,9 @@
 extern int kmem_cache_destroy(kmem_cache_t *);
 extern int kmem_cache_shrink(kmem_cache_t *);
 extern void *kmem_cache_alloc(kmem_cache_t *, unsigned int __nocast);
-#ifdef CONFIG_NUMA
-extern void *kmem_cache_alloc_node(kmem_cache_t *, int);
-#else
-static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int node)
-{
-	return kmem_cache_alloc(cachep, GFP_KERNEL);
-}
-#endif
 extern void kmem_cache_free(kmem_cache_t *, void *);
 extern unsigned int kmem_cache_size(kmem_cache_t *);
+extern kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags);
 
 /* Size description struct for general caches. */
 struct cache_sizes {
@@ -109,6 +102,20 @@
 extern void kfree(const void *);
 extern unsigned int ksize(const void *);
 
+#ifdef CONFIG_NUMA
+extern void *kmem_cache_alloc_node(kmem_cache_t *, int flags, int node);
+extern void *kmalloc_node(size_t size, int flags, int node);
+#else
+static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int node)
+{
+	return kmem_cache_alloc(cachep, flags);
+}
+static inline void *kmalloc_node(size_t size, int flags, int node)
+{
+	return kmalloc(size, flags);
+}
+#endif
+
 extern int FASTCALL(kmem_cache_reap(int));
 extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr));
 
diff --git a/mm/slab.c b/mm/slab.c
index ec660d8..771cc09 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -583,7 +583,7 @@
 	return cachep->array[smp_processor_id()];
 }
 
-static inline kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags)
+static inline kmem_cache_t *__find_general_cachep(size_t size, int gfpflags)
 {
 	struct cache_sizes *csizep = malloc_sizes;
 
@@ -607,6 +607,12 @@
 	return csizep->cs_cachep;
 }
 
+kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags)
+{
+	return __find_general_cachep(size, gfpflags);
+}
+EXPORT_SYMBOL(kmem_find_general_cachep);
+
 /* Cal the num objs, wastage, and bytes left over for a given slab size. */
 static void cache_estimate(unsigned long gfporder, size_t size, size_t align,
 		 int flags, size_t *left_over, unsigned int *num)
@@ -672,14 +678,11 @@
 	int memsize = sizeof(void*)*entries+sizeof(struct array_cache);
 	struct array_cache *nc = NULL;
 
-	if (cpu != -1) {
-		kmem_cache_t *cachep;
-		cachep = kmem_find_general_cachep(memsize, GFP_KERNEL);
-		if (cachep)
-			nc = kmem_cache_alloc_node(cachep, cpu_to_node(cpu));
-	}
-	if (!nc)
+	if (cpu == -1)
 		nc = kmalloc(memsize, GFP_KERNEL);
+	else
+		nc = kmalloc_node(memsize, GFP_KERNEL, cpu_to_node(cpu));
+
 	if (nc) {
 		nc->avail = 0;
 		nc->limit = entries;
@@ -2361,7 +2364,7 @@
  * and can sleep. And it will allocate memory on the given node, which
  * can improve the performance for cpu bound structures.
  */
-void *kmem_cache_alloc_node(kmem_cache_t *cachep, int nodeid)
+void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int nodeid)
 {
 	int loop;
 	void *objp;
@@ -2393,7 +2396,7 @@
 		spin_unlock_irq(&cachep->spinlock);
 
 		local_irq_disable();
-		if (!cache_grow(cachep, GFP_KERNEL, nodeid)) {
+		if (!cache_grow(cachep, flags, nodeid)) {
 			local_irq_enable();
 			return NULL;
 		}
@@ -2435,6 +2438,16 @@
 }
 EXPORT_SYMBOL(kmem_cache_alloc_node);
 
+void *kmalloc_node(size_t size, int flags, int node)
+{
+	kmem_cache_t *cachep;
+
+	cachep = kmem_find_general_cachep(size, flags);
+	if (unlikely(cachep == NULL))
+		return NULL;
+	return kmem_cache_alloc_node(cachep, flags, node);
+}
+EXPORT_SYMBOL(kmalloc_node);
 #endif
 
 /**
@@ -2462,7 +2475,12 @@
 {
 	kmem_cache_t *cachep;
 
-	cachep = kmem_find_general_cachep(size, flags);
+	/* If you want to save a few bytes .text space: replace
+	 * __ with kmem_.
+	 * Then kmalloc uses the uninlined functions instead of the inline
+	 * functions.
+	 */
+	cachep = __find_general_cachep(size, flags);
 	if (unlikely(cachep == NULL))
 		return NULL;
 	return __cache_alloc(cachep, flags);
@@ -2489,9 +2507,8 @@
 	for (i = 0; i < NR_CPUS; i++) {
 		if (!cpu_possible(i))
 			continue;
-		pdata->ptrs[i] = kmem_cache_alloc_node(
-				kmem_find_general_cachep(size, GFP_KERNEL),
-				cpu_to_node(i));
+		pdata->ptrs[i] = kmalloc_node(size, GFP_KERNEL,
+						cpu_to_node(i));
 
 		if (!pdata->ptrs[i])
 			goto unwind_oom;