bcache: Don't bother with bucket refcount for btree node allocations

The bucket refcount (dropped with bkey_put()) is only needed to prevent
the newly allocated bucket from being garbage collected until we've
added a pointer to it somewhere. But for btree node allocations, the
fact that we have btree nodes locked is enough to guard against races
with garbage collection.

Eventually the per bucket refcount is going to be replaced with
something specific to bch_alloc_sectors().

Signed-off-by: Kent Overstreet <kmo@daterainc.com>
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index fa4d0b1..7dff73b 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -180,7 +180,7 @@
 
 /* Btree key manipulation */
 
-void __bkey_put(struct cache_set *c, struct bkey *k)
+void bkey_put(struct cache_set *c, struct bkey *k)
 {
 	unsigned i;
 
@@ -189,12 +189,6 @@
 			atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
 }
 
-static void bkey_put(struct cache_set *c, struct bkey *k, int level)
-{
-	if ((level && KEY_OFFSET(k)) || !level)
-		__bkey_put(c, k);
-}
-
 /* Btree IO */
 
 static uint64_t btree_csum_set(struct btree *b, struct bset *i)
@@ -1068,6 +1062,7 @@
 	if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, true))
 		goto err;
 
+	bkey_put(c, &k.key);
 	SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
 
 	b = mca_alloc(c, &k.key, level);
@@ -1077,7 +1072,6 @@
 	if (!b) {
 		cache_bug(c,
 			"Tried to allocate bucket that was in btree cache");
-		__bkey_put(c, &k.key);
 		goto retry;
 	}
 
@@ -1090,7 +1084,6 @@
 	return b;
 err_free:
 	bch_bucket_free(c, &k.key);
-	__bkey_put(c, &k.key);
 err:
 	mutex_unlock(&c->bucket_lock);
 
@@ -1217,7 +1210,6 @@
 
 	if (!IS_ERR_OR_NULL(n)) {
 		swap(b, n);
-		__bkey_put(b->c, &b->key);
 
 		memcpy(k->ptr, b->key.ptr,
 		       sizeof(uint64_t) * KEY_PTRS(&b->key));
@@ -1932,19 +1924,12 @@
 			break;
 
 		if (bkey_cmp(k, &b->key) <= 0) {
-			bkey_put(b->c, k, b->level);
+			if (!b->level)
+				bkey_put(b->c, k);
 
 			ret |= btree_insert_key(b, op, k, replace_key);
 			bch_keylist_pop_front(insert_keys);
 		} else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
-#if 0
-			if (replace_key) {
-				bkey_put(b->c, k, b->level);
-				bch_keylist_pop_front(insert_keys);
-				op->insert_collision = true;
-				break;
-			}
-#endif
 			BKEY_PADDED(key) temp;
 			bkey_copy(&temp.key, insert_keys->keys);
 
@@ -2071,11 +2056,9 @@
 
 	return 0;
 err_free2:
-	__bkey_put(n2->c, &n2->key);
 	btree_node_free(n2);
 	rw_unlock(true, n2);
 err_free1:
-	__bkey_put(n1->c, &n1->key);
 	btree_node_free(n1);
 	rw_unlock(true, n1);
 err:
@@ -2225,7 +2208,7 @@
 		pr_err("error %i", ret);
 
 		while ((k = bch_keylist_pop(keys)))
-			bkey_put(c, k, 0);
+			bkey_put(c, k);
 	} else if (op.op.insert_collision)
 		ret = -ESRCH;
 
@@ -2251,7 +2234,6 @@
 	mutex_unlock(&b->c->bucket_lock);
 
 	b->c->root = b;
-	__bkey_put(b->c, &b->key);
 
 	bch_journal_meta(b->c, &cl);
 	closure_sync(&cl);