[XFS] Unwrap mru_lock.

Un-obfuscate mru_lock, remove mutex_lock->spin_lock macros, call spin_lock
directly, remove extraneous cookie holdover from old xfs code.

SGI-PV: 970382
SGI-Modid: xfs-linux-melb:xfs-kern:29745a

Signed-off-by: Eric Sandeen <sandeen@sandeen.net>
Signed-off-by: Donald Douwsma <donaldd@sgi.com>
Signed-off-by: Tim Shimmin <tes@sgi.com>
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c
index e0b358c..dc64630 100644
--- a/fs/xfs/xfs_mru_cache.c
+++ b/fs/xfs/xfs_mru_cache.c
@@ -245,7 +245,7 @@
 		 */
 		list_move(&elem->list_node, &tmp);
 	}
-	mutex_spinunlock(&mru->lock, 0);
+	spin_unlock(&mru->lock);
 
 	list_for_each_entry_safe(elem, next, &tmp, list_node) {
 
@@ -259,7 +259,7 @@
 		kmem_zone_free(xfs_mru_elem_zone, elem);
 	}
 
-	mutex_spinlock(&mru->lock);
+	spin_lock(&mru->lock);
 }
 
 /*
@@ -280,7 +280,7 @@
 	if (!mru || !mru->lists)
 		return;
 
-	mutex_spinlock(&mru->lock);
+	spin_lock(&mru->lock);
 	next = _xfs_mru_cache_migrate(mru, jiffies);
 	_xfs_mru_cache_clear_reap_list(mru);
 
@@ -294,7 +294,7 @@
 		queue_delayed_work(xfs_mru_reap_wq, &mru->work, next);
 	}
 
-	mutex_spinunlock(&mru->lock, 0);
+	spin_unlock(&mru->lock);
 }
 
 int
@@ -398,17 +398,17 @@
 	if (!mru || !mru->lists)
 		return;
 
-	mutex_spinlock(&mru->lock);
+	spin_lock(&mru->lock);
 	if (mru->queued) {
-		mutex_spinunlock(&mru->lock, 0);
+		spin_unlock(&mru->lock);
 		cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work);
-		mutex_spinlock(&mru->lock);
+		spin_lock(&mru->lock);
 	}
 
 	_xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time);
 	_xfs_mru_cache_clear_reap_list(mru);
 
-	mutex_spinunlock(&mru->lock, 0);
+	spin_unlock(&mru->lock);
 }
 
 void
@@ -454,13 +454,13 @@
 	elem->key = key;
 	elem->value = value;
 
-	mutex_spinlock(&mru->lock);
+	spin_lock(&mru->lock);
 
 	radix_tree_insert(&mru->store, key, elem);
 	radix_tree_preload_end();
 	_xfs_mru_cache_list_insert(mru, elem);
 
-	mutex_spinunlock(&mru->lock, 0);
+	spin_unlock(&mru->lock);
 
 	return 0;
 }
@@ -483,14 +483,14 @@
 	if (!mru || !mru->lists)
 		return NULL;
 
-	mutex_spinlock(&mru->lock);
+	spin_lock(&mru->lock);
 	elem = radix_tree_delete(&mru->store, key);
 	if (elem) {
 		value = elem->value;
 		list_del(&elem->list_node);
 	}
 
-	mutex_spinunlock(&mru->lock, 0);
+	spin_unlock(&mru->lock);
 
 	if (elem)
 		kmem_zone_free(xfs_mru_elem_zone, elem);
@@ -540,14 +540,14 @@
 	if (!mru || !mru->lists)
 		return NULL;
 
-	mutex_spinlock(&mru->lock);
+	spin_lock(&mru->lock);
 	elem = radix_tree_lookup(&mru->store, key);
 	if (elem) {
 		list_del(&elem->list_node);
 		_xfs_mru_cache_list_insert(mru, elem);
 	}
 	else
-		mutex_spinunlock(&mru->lock, 0);
+		spin_unlock(&mru->lock);
 
 	return elem ? elem->value : NULL;
 }
@@ -571,10 +571,10 @@
 	if (!mru || !mru->lists)
 		return NULL;
 
-	mutex_spinlock(&mru->lock);
+	spin_lock(&mru->lock);
 	elem = radix_tree_lookup(&mru->store, key);
 	if (!elem)
-		mutex_spinunlock(&mru->lock, 0);
+		spin_unlock(&mru->lock);
 
 	return elem ? elem->value : NULL;
 }
@@ -588,5 +588,5 @@
 xfs_mru_cache_done(
 	xfs_mru_cache_t	*mru)
 {
-	mutex_spinunlock(&mru->lock, 0);
+	spin_unlock(&mru->lock);
 }