Btrfs: Fix for lockdep warnings with alloc_mutex and pinned_mutex

This the lockdep complaint by having a different mutex to gaurd caching the
block group, so you don't end up with this backwards dependancy.  Thank you,

Signed-off-by: Josef Bacik <jbacik@redhat.com>

diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 0f2a9b5..166896d 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -580,6 +580,7 @@
 	struct btrfs_block_group_item item;
 	spinlock_t lock;
 	struct mutex alloc_mutex;
+	struct mutex cache_mutex;
 	u64 pinned;
 	u64 reserved;
 	u64 flags;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index b33e0bf..a970472 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -170,8 +170,8 @@
 			start = extent_end + 1;
 		} else if (extent_start > start && extent_start < end) {
 			size = extent_start - start;
-			ret = btrfs_add_free_space_lock(block_group, start,
-							size);
+			ret = btrfs_add_free_space(block_group, start,
+						   size);
 			BUG_ON(ret);
 			start = extent_end + 1;
 		} else {
@@ -181,7 +181,7 @@
 
 	if (start < end) {
 		size = end - start;
-		ret = btrfs_add_free_space_lock(block_group, start, size);
+		ret = btrfs_add_free_space(block_group, start, size);
 		BUG_ON(ret);
 	}
 	mutex_unlock(&info->pinned_mutex);
@@ -2842,17 +2842,19 @@
 		if (!block_group)
 			goto new_group_no_lock;
 
+		if (unlikely(!block_group->cached)) {
+			mutex_lock(&block_group->cache_mutex);
+			ret = cache_block_group(root, block_group);
+			mutex_unlock(&block_group->cache_mutex);
+			if (ret)
+				break;
+		}
+
 		mutex_lock(&block_group->alloc_mutex);
 		if (unlikely(!block_group_bits(block_group, data)))
 			goto new_group;
 
-		ret = cache_block_group(root, block_group);
-		if (ret) {
-			mutex_unlock(&block_group->alloc_mutex);
-			break;
-		}
-
-		if (block_group->ro)
+		if (unlikely(block_group->ro))
 			goto new_group;
 
 		free_space = btrfs_find_free_space(block_group, search_start,
@@ -3273,12 +3275,12 @@
 	struct btrfs_block_group_cache *block_group;
 
 	block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
-	mutex_lock(&block_group->alloc_mutex);
+	mutex_lock(&block_group->cache_mutex);
 	cache_block_group(root, block_group);
+	mutex_unlock(&block_group->cache_mutex);
 
-	ret = btrfs_remove_free_space_lock(block_group, ins->objectid,
-					   ins->offset);
-	mutex_unlock(&block_group->alloc_mutex);
+	ret = btrfs_remove_free_space(block_group, ins->objectid,
+				      ins->offset);
 	BUG_ON(ret);
 	ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid,
 					    ref_generation, owner, ins);
@@ -5801,6 +5803,7 @@
 
 		spin_lock_init(&cache->lock);
 		mutex_init(&cache->alloc_mutex);
+		mutex_init(&cache->cache_mutex);
 		INIT_LIST_HEAD(&cache->list);
 		read_extent_buffer(leaf, &cache->item,
 				   btrfs_item_ptr_offset(leaf, path->slots[0]),
@@ -5854,6 +5857,7 @@
 	cache->key.offset = size;
 	spin_lock_init(&cache->lock);
 	mutex_init(&cache->alloc_mutex);
+	mutex_init(&cache->cache_mutex);
 	INIT_LIST_HEAD(&cache->list);
 	btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY);