[PATCH] sem2mutex: mm/slab.c

Convert mm/swapfile.c's swapon_sem to swapon_mutex.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/mm/slab.c b/mm/slab.c
index 9374293..bd0317f 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -68,7 +68,7 @@
  * Further notes from the original documentation:
  *
  * 11 April '97.  Started multi-threading - markhe
- *	The global cache-chain is protected by the semaphore 'cache_chain_sem'.
+ *	The global cache-chain is protected by the mutex 'cache_chain_mutex'.
  *	The sem is only needed when accessing/extending the cache-chain, which
  *	can never happen inside an interrupt (kmem_cache_create(),
  *	kmem_cache_shrink() and kmem_cache_reap()).
@@ -103,6 +103,7 @@
 #include	<linux/rcupdate.h>
 #include	<linux/string.h>
 #include	<linux/nodemask.h>
+#include	<linux/mutex.h>
 
 #include	<asm/uaccess.h>
 #include	<asm/cacheflush.h>
@@ -631,7 +632,7 @@
 };
 
 /* Guard access to the cache-chain. */
-static struct semaphore cache_chain_sem;
+static DEFINE_MUTEX(cache_chain_mutex);
 static struct list_head cache_chain;
 
 /*
@@ -857,7 +858,7 @@
 
 	switch (action) {
 	case CPU_UP_PREPARE:
-		down(&cache_chain_sem);
+		mutex_lock(&cache_chain_mutex);
 		/* we need to do this right in the beginning since
 		 * alloc_arraycache's are going to use this list.
 		 * kmalloc_node allows us to add the slab to the right
@@ -912,7 +913,7 @@
 				l3->shared = nc;
 			}
 		}
-		up(&cache_chain_sem);
+		mutex_unlock(&cache_chain_mutex);
 		break;
 	case CPU_ONLINE:
 		start_cpu_timer(cpu);
@@ -921,7 +922,7 @@
 	case CPU_DEAD:
 		/* fall thru */
 	case CPU_UP_CANCELED:
-		down(&cache_chain_sem);
+		mutex_lock(&cache_chain_mutex);
 
 		list_for_each_entry(cachep, &cache_chain, next) {
 			struct array_cache *nc;
@@ -973,13 +974,13 @@
 			spin_unlock_irq(&cachep->spinlock);
 			kfree(nc);
 		}
-		up(&cache_chain_sem);
+		mutex_unlock(&cache_chain_mutex);
 		break;
 #endif
 	}
 	return NOTIFY_OK;
       bad:
-	up(&cache_chain_sem);
+	mutex_unlock(&cache_chain_mutex);
 	return NOTIFY_BAD;
 }
 
@@ -1047,7 +1048,6 @@
 	 */
 
 	/* 1) create the cache_cache */
-	init_MUTEX(&cache_chain_sem);
 	INIT_LIST_HEAD(&cache_chain);
 	list_add(&cache_cache.next, &cache_chain);
 	cache_cache.colour_off = cache_line_size();
@@ -1168,10 +1168,10 @@
 	/* 6) resize the head arrays to their final sizes */
 	{
 		kmem_cache_t *cachep;
-		down(&cache_chain_sem);
+		mutex_lock(&cache_chain_mutex);
 		list_for_each_entry(cachep, &cache_chain, next)
 		    enable_cpucache(cachep);
-		up(&cache_chain_sem);
+		mutex_unlock(&cache_chain_mutex);
 	}
 
 	/* Done! */
@@ -1590,7 +1590,7 @@
 		BUG();
 	}
 
-	down(&cache_chain_sem);
+	mutex_lock(&cache_chain_mutex);
 
 	list_for_each(p, &cache_chain) {
 		kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);
@@ -1856,7 +1856,7 @@
 	if (!cachep && (flags & SLAB_PANIC))
 		panic("kmem_cache_create(): failed to create slab `%s'\n",
 		      name);
-	up(&cache_chain_sem);
+	mutex_unlock(&cache_chain_mutex);
 	return cachep;
 }
 EXPORT_SYMBOL(kmem_cache_create);
@@ -2044,18 +2044,18 @@
 	lock_cpu_hotplug();
 
 	/* Find the cache in the chain of caches. */
-	down(&cache_chain_sem);
+	mutex_lock(&cache_chain_mutex);
 	/*
 	 * the chain is never empty, cache_cache is never destroyed
 	 */
 	list_del(&cachep->next);
-	up(&cache_chain_sem);
+	mutex_unlock(&cache_chain_mutex);
 
 	if (__cache_shrink(cachep)) {
 		slab_error(cachep, "Can't free all objects");
-		down(&cache_chain_sem);
+		mutex_lock(&cache_chain_mutex);
 		list_add(&cachep->next, &cache_chain);
-		up(&cache_chain_sem);
+		mutex_unlock(&cache_chain_mutex);
 		unlock_cpu_hotplug();
 		return 1;
 	}
@@ -3314,7 +3314,7 @@
  * - clear the per-cpu caches for this CPU.
  * - return freeable pages to the main free memory pool.
  *
- * If we cannot acquire the cache chain semaphore then just give up - we'll
+ * If we cannot acquire the cache chain mutex then just give up - we'll
  * try again on the next iteration.
  */
 static void cache_reap(void *unused)
@@ -3322,7 +3322,7 @@
 	struct list_head *walk;
 	struct kmem_list3 *l3;
 
-	if (down_trylock(&cache_chain_sem)) {
+	if (!mutex_trylock(&cache_chain_mutex)) {
 		/* Give up. Setup the next iteration. */
 		schedule_delayed_work(&__get_cpu_var(reap_work),
 				      REAPTIMEOUT_CPUC);
@@ -3393,7 +3393,7 @@
 		cond_resched();
 	}
 	check_irq_on();
-	up(&cache_chain_sem);
+	mutex_unlock(&cache_chain_mutex);
 	drain_remote_pages();
 	/* Setup the next iteration */
 	schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
@@ -3429,7 +3429,7 @@
 	loff_t n = *pos;
 	struct list_head *p;
 
-	down(&cache_chain_sem);
+	mutex_lock(&cache_chain_mutex);
 	if (!n)
 		print_slabinfo_header(m);
 	p = cache_chain.next;
@@ -3451,7 +3451,7 @@
 
 static void s_stop(struct seq_file *m, void *p)
 {
-	up(&cache_chain_sem);
+	mutex_unlock(&cache_chain_mutex);
 }
 
 static int s_show(struct seq_file *m, void *p)
@@ -3603,7 +3603,7 @@
 		return -EINVAL;
 
 	/* Find the cache in the chain of caches. */
-	down(&cache_chain_sem);
+	mutex_lock(&cache_chain_mutex);
 	res = -EINVAL;
 	list_for_each(p, &cache_chain) {
 		kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next);
@@ -3620,7 +3620,7 @@
 			break;
 		}
 	}
-	up(&cache_chain_sem);
+	mutex_unlock(&cache_chain_mutex);
 	if (res >= 0)
 		res = count;
 	return res;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 957fef4..f1e69c3 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -25,6 +25,7 @@
 #include <linux/rmap.h>
 #include <linux/security.h>
 #include <linux/backing-dev.h>
+#include <linux/mutex.h>
 #include <linux/capability.h>
 #include <linux/syscalls.h>
 
@@ -46,12 +47,12 @@
 
 struct swap_info_struct swap_info[MAX_SWAPFILES];
 
-static DECLARE_MUTEX(swapon_sem);
+static DEFINE_MUTEX(swapon_mutex);
 
 /*
  * We need this because the bdev->unplug_fn can sleep and we cannot
  * hold swap_lock while calling the unplug_fn. And swap_lock
- * cannot be turned into a semaphore.
+ * cannot be turned into a mutex.
  */
 static DECLARE_RWSEM(swap_unplug_sem);
 
@@ -1161,7 +1162,7 @@
 	up_write(&swap_unplug_sem);
 
 	destroy_swap_extents(p);
-	down(&swapon_sem);
+	mutex_lock(&swapon_mutex);
 	spin_lock(&swap_lock);
 	drain_mmlist();
 
@@ -1180,7 +1181,7 @@
 	p->swap_map = NULL;
 	p->flags = 0;
 	spin_unlock(&swap_lock);
-	up(&swapon_sem);
+	mutex_unlock(&swapon_mutex);
 	vfree(swap_map);
 	inode = mapping->host;
 	if (S_ISBLK(inode->i_mode)) {
@@ -1209,7 +1210,7 @@
 	int i;
 	loff_t l = *pos;
 
-	down(&swapon_sem);
+	mutex_lock(&swapon_mutex);
 
 	for (i = 0; i < nr_swapfiles; i++, ptr++) {
 		if (!(ptr->flags & SWP_USED) || !ptr->swap_map)
@@ -1238,7 +1239,7 @@
 
 static void swap_stop(struct seq_file *swap, void *v)
 {
-	up(&swapon_sem);
+	mutex_unlock(&swapon_mutex);
 }
 
 static int swap_show(struct seq_file *swap, void *v)
@@ -1540,7 +1541,7 @@
 		goto bad_swap;
 	}
 
-	down(&swapon_sem);
+	mutex_lock(&swapon_mutex);
 	spin_lock(&swap_lock);
 	p->flags = SWP_ACTIVE;
 	nr_swap_pages += nr_good_pages;
@@ -1566,7 +1567,7 @@
 		swap_info[prev].next = p - swap_info;
 	}
 	spin_unlock(&swap_lock);
-	up(&swapon_sem);
+	mutex_unlock(&swapon_mutex);
 	error = 0;
 	goto out;
 bad_swap: