add hlist_bl_lock/unlock helpers

Now that the whole dcache_hash_bucket crap is gone, go all the way and
also remove the weird locking layering violations for locking the hash
buckets.  Add hlist_bl_lock/unlock helpers to move the locking into the
list abstraction instead of requiring each caller to open code it.
After all allowing for the bit locks is the whole point of these helpers
over the plain hlist variant.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/fs/dcache.c b/fs/dcache.c
index d600a0a..22a0ef4 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -109,16 +109,6 @@
 	return dentry_hashtable + (hash & D_HASHMASK);
 }
 
-static inline void spin_lock_bucket(struct hlist_bl_head *b)
-{
-	bit_spin_lock(0, (unsigned long *)&b->first);
-}
-
-static inline void spin_unlock_bucket(struct hlist_bl_head *b)
-{
-	__bit_spin_unlock(0, (unsigned long *)&b->first);
-}
-
 /* Statistics gathering. */
 struct dentry_stat_t dentry_stat = {
 	.age_limit = 45,
@@ -334,10 +324,10 @@
 		else
 			b = d_hash(dentry->d_parent, dentry->d_name.hash);
 
-		spin_lock_bucket(b);
+		hlist_bl_lock(b);
 		__hlist_bl_del(&dentry->d_hash);
 		dentry->d_hash.pprev = NULL;
-		spin_unlock_bucket(b);
+		hlist_bl_unlock(b);
 
 		dentry_rcuwalk_barrier(dentry);
 	}
@@ -1594,9 +1584,9 @@
 	tmp->d_inode = inode;
 	tmp->d_flags |= DCACHE_DISCONNECTED;
 	list_add(&tmp->d_alias, &inode->i_dentry);
-	spin_lock_bucket(&tmp->d_sb->s_anon);
+	hlist_bl_lock(&tmp->d_sb->s_anon);
 	hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
-	spin_unlock_bucket(&tmp->d_sb->s_anon);
+	hlist_bl_unlock(&tmp->d_sb->s_anon);
 	spin_unlock(&tmp->d_lock);
 	spin_unlock(&inode->i_lock);
 	security_d_instantiate(tmp, inode);
@@ -2076,10 +2066,10 @@
 static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
 {
 	BUG_ON(!d_unhashed(entry));
-	spin_lock_bucket(b);
+	hlist_bl_lock(b);
 	entry->d_flags |= DCACHE_RCUACCESS;
 	hlist_bl_add_head_rcu(&entry->d_hash, b);
-	spin_unlock_bucket(b);
+	hlist_bl_unlock(b);
 }
 
 static void _d_rehash(struct dentry * entry)
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index f07643e..7a4fb63 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -93,14 +93,12 @@
 
 static inline void spin_lock_bucket(unsigned int hash)
 {
-	struct hlist_bl_head *bl = &gl_hash_table[hash];
-	bit_spin_lock(0, (unsigned long *)bl);
+	hlist_bl_lock(&gl_hash_table[hash]);
 }
 
 static inline void spin_unlock_bucket(unsigned int hash)
 {
-	struct hlist_bl_head *bl = &gl_hash_table[hash];
-	__bit_spin_unlock(0, (unsigned long *)bl);
+	hlist_bl_unlock(&gl_hash_table[hash]);
 }
 
 static void gfs2_glock_dealloc(struct rcu_head *rcu)
diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
index 5bad17d1..31f9d75 100644
--- a/include/linux/list_bl.h
+++ b/include/linux/list_bl.h
@@ -2,6 +2,7 @@
 #define _LINUX_LIST_BL_H
 
 #include <linux/list.h>
+#include <linux/bit_spinlock.h>
 
 /*
  * Special version of lists, where head of the list has a lock in the lowest
@@ -114,6 +115,16 @@
 	}
 }
 
+static inline void hlist_bl_lock(struct hlist_bl_head *b)
+{
+	bit_spin_lock(0, (unsigned long *)b);
+}
+
+static inline void hlist_bl_unlock(struct hlist_bl_head *b)
+{
+	__bit_spin_unlock(0, (unsigned long *)b);
+}
+
 /**
  * hlist_bl_for_each_entry	- iterate over list of given type
  * @tpos:	the type * to use as a loop cursor.