switch the protection of percpu_counter list to spinlock

... making percpu_counter_destroy() non-blocking

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index f8a3f1a..ba6085d 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -12,7 +12,7 @@
 
 #ifdef CONFIG_HOTPLUG_CPU
 static LIST_HEAD(percpu_counters);
-static DEFINE_MUTEX(percpu_counters_lock);
+static DEFINE_SPINLOCK(percpu_counters_lock);
 #endif
 
 #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
@@ -123,9 +123,9 @@
 
 #ifdef CONFIG_HOTPLUG_CPU
 	INIT_LIST_HEAD(&fbc->list);
-	mutex_lock(&percpu_counters_lock);
+	spin_lock(&percpu_counters_lock);
 	list_add(&fbc->list, &percpu_counters);
-	mutex_unlock(&percpu_counters_lock);
+	spin_unlock(&percpu_counters_lock);
 #endif
 	return 0;
 }
@@ -139,9 +139,9 @@
 	debug_percpu_counter_deactivate(fbc);
 
 #ifdef CONFIG_HOTPLUG_CPU
-	mutex_lock(&percpu_counters_lock);
+	spin_lock(&percpu_counters_lock);
 	list_del(&fbc->list);
-	mutex_unlock(&percpu_counters_lock);
+	spin_unlock(&percpu_counters_lock);
 #endif
 	free_percpu(fbc->counters);
 	fbc->counters = NULL;
@@ -170,7 +170,7 @@
 		return NOTIFY_OK;
 
 	cpu = (unsigned long)hcpu;
-	mutex_lock(&percpu_counters_lock);
+	spin_lock(&percpu_counters_lock);
 	list_for_each_entry(fbc, &percpu_counters, list) {
 		s32 *pcount;
 		unsigned long flags;
@@ -181,7 +181,7 @@
 		*pcount = 0;
 		raw_spin_unlock_irqrestore(&fbc->lock, flags);
 	}
-	mutex_unlock(&percpu_counters_lock);
+	spin_unlock(&percpu_counters_lock);
 #endif
 	return NOTIFY_OK;
 }