bcache: Kill op->replace

This is prep work for converting bch_btree_insert to
bch_btree_map_leaf_nodes() - we have to convert all its arguments to
actual arguments. Bunch of churn, but should be straightforward.

Signed-off-by: Kent Overstreet <kmo@daterainc.com>
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 3e0c901..7a1d8dc 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -89,15 +89,6 @@
  * Test module load/unload
  */
 
-static const char * const op_types[] = {
-	"insert", "replace"
-};
-
-static const char *op_type(struct btree_op *op)
-{
-	return op_types[op->type];
-}
-
 enum {
 	BTREE_INSERT_STATUS_INSERT,
 	BTREE_INSERT_STATUS_BACK_MERGE,
@@ -1699,10 +1690,9 @@
 	bch_bset_fix_lookup_table(b, where);
 }
 
-static bool fix_overlapping_extents(struct btree *b,
-				    struct bkey *insert,
+static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
 				    struct btree_iter *iter,
-				    struct btree_op *op)
+				    struct bkey *replace_key)
 {
 	void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
 	{
@@ -1730,39 +1720,38 @@
 		 * We might overlap with 0 size extents; we can't skip these
 		 * because if they're in the set we're inserting to we have to
 		 * adjust them so they don't overlap with the key we're
-		 * inserting. But we don't want to check them for BTREE_REPLACE
+		 * inserting. But we don't want to check them for replace
 		 * operations.
 		 */
 
-		if (op->type == BTREE_REPLACE &&
-		    KEY_SIZE(k)) {
+		if (replace_key && KEY_SIZE(k)) {
 			/*
 			 * k might have been split since we inserted/found the
 			 * key we're replacing
 			 */
 			unsigned i;
 			uint64_t offset = KEY_START(k) -
-				KEY_START(&op->replace);
+				KEY_START(replace_key);
 
 			/* But it must be a subset of the replace key */
-			if (KEY_START(k) < KEY_START(&op->replace) ||
-			    KEY_OFFSET(k) > KEY_OFFSET(&op->replace))
+			if (KEY_START(k) < KEY_START(replace_key) ||
+			    KEY_OFFSET(k) > KEY_OFFSET(replace_key))
 				goto check_failed;
 
 			/* We didn't find a key that we were supposed to */
 			if (KEY_START(k) > KEY_START(insert) + sectors_found)
 				goto check_failed;
 
-			if (KEY_PTRS(&op->replace) != KEY_PTRS(k))
+			if (KEY_PTRS(replace_key) != KEY_PTRS(k))
 				goto check_failed;
 
 			/* skip past gen */
 			offset <<= 8;
 
-			BUG_ON(!KEY_PTRS(&op->replace));
+			BUG_ON(!KEY_PTRS(replace_key));
 
-			for (i = 0; i < KEY_PTRS(&op->replace); i++)
-				if (k->ptr[i] != op->replace.ptr[i] + offset)
+			for (i = 0; i < KEY_PTRS(replace_key); i++)
+				if (k->ptr[i] != replace_key->ptr[i] + offset)
 					goto check_failed;
 
 			sectors_found = KEY_OFFSET(k) - KEY_START(insert);
@@ -1833,9 +1822,8 @@
 	}
 
 check_failed:
-	if (op->type == BTREE_REPLACE) {
+	if (replace_key) {
 		if (!sectors_found) {
-			op->insert_collision = true;
 			return true;
 		} else if (sectors_found < KEY_SIZE(insert)) {
 			SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
@@ -1848,7 +1836,7 @@
 }
 
 static bool btree_insert_key(struct btree *b, struct btree_op *op,
-			     struct bkey *k)
+			     struct bkey *k, struct bkey *replace_key)
 {
 	struct bset *i = b->sets[b->nsets].data;
 	struct bkey *m, *prev;
@@ -1874,8 +1862,10 @@
 		prev = NULL;
 		m = bch_btree_iter_init(b, &iter, &search);
 
-		if (fix_overlapping_extents(b, k, &iter, op))
+		if (fix_overlapping_extents(b, k, &iter, replace_key)) {
+			op->insert_collision = true;
 			return false;
+		}
 
 		if (KEY_DIRTY(k))
 			bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
@@ -1903,24 +1893,28 @@
 		if (m != end(i) &&
 		    bch_bkey_try_merge(b, k, m))
 			goto copy;
-	} else
+	} else {
+		BUG_ON(replace_key);
 		m = bch_bset_search(b, &b->sets[b->nsets], k);
+	}
 
 insert:	shift_keys(b, m, k);
 copy:	bkey_copy(m, k);
 merged:
-	bch_check_keys(b, "%u for %s", status, op_type(op));
+	bch_check_keys(b, "%u for %s", status,
+		       replace_key ? "replace" : "insert");
 
 	if (b->level && !KEY_OFFSET(k))
 		btree_current_write(b)->prio_blocked++;
 
-	trace_bcache_btree_insert_key(b, k, op->type, status);
+	trace_bcache_btree_insert_key(b, k, replace_key != NULL, status);
 
 	return true;
 }
 
 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
-				  struct keylist *insert_keys)
+				  struct keylist *insert_keys,
+				  struct bkey *replace_key)
 {
 	bool ret = false;
 	unsigned oldsize = bch_count_data(b);
@@ -1936,11 +1930,11 @@
 		if (bkey_cmp(k, &b->key) <= 0) {
 			bkey_put(b->c, k, b->level);
 
-			ret |= btree_insert_key(b, op, k);
+			ret |= btree_insert_key(b, op, k, replace_key);
 			bch_keylist_pop_front(insert_keys);
 		} else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
 #if 0
-			if (op->type == BTREE_REPLACE) {
+			if (replace_key) {
 				bkey_put(b->c, k, b->level);
 				bch_keylist_pop_front(insert_keys);
 				op->insert_collision = true;
@@ -1953,7 +1947,7 @@
 			bch_cut_back(&b->key, &temp.key);
 			bch_cut_front(&b->key, insert_keys->keys);
 
-			ret |= btree_insert_key(b, op, &temp.key);
+			ret |= btree_insert_key(b, op, &temp.key, replace_key);
 			break;
 		} else {
 			break;
@@ -1968,7 +1962,8 @@
 
 static int btree_split(struct btree *b, struct btree_op *op,
 		       struct keylist *insert_keys,
-		       struct keylist *parent_keys)
+		       struct keylist *parent_keys,
+		       struct bkey *replace_key)
 {
 	bool split;
 	struct btree *n1, *n2 = NULL, *n3 = NULL;
@@ -1998,7 +1993,7 @@
 				goto err_free2;
 		}
 
-		bch_btree_insert_keys(n1, op, insert_keys);
+		bch_btree_insert_keys(n1, op, insert_keys, replace_key);
 
 		/*
 		 * Has to be a linear search because we don't have an auxiliary
@@ -2026,7 +2021,7 @@
 	} else {
 		trace_bcache_btree_node_compact(b, n1->sets[0].data->keys);
 
-		bch_btree_insert_keys(n1, op, insert_keys);
+		bch_btree_insert_keys(n1, op, insert_keys, replace_key);
 	}
 
 	bch_keylist_add(parent_keys, &n1->key);
@@ -2036,7 +2031,7 @@
 		/* Depth increases, make a new root */
 
 		bkey_copy_key(&n3->key, &MAX_KEY);
-		bch_btree_insert_keys(n3, op, parent_keys);
+		bch_btree_insert_keys(n3, op, parent_keys, NULL);
 		bch_btree_node_write(n3, &cl);
 
 		closure_sync(&cl);
@@ -2091,7 +2086,8 @@
 
 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
 				 struct keylist *insert_keys,
-				 atomic_t *journal_ref)
+				 atomic_t *journal_ref,
+				 struct bkey *replace_key)
 {
 	int ret = 0;
 	struct keylist split_keys;
@@ -2101,6 +2097,8 @@
 	BUG_ON(b->level);
 
 	do {
+		BUG_ON(b->level && replace_key);
+
 		if (should_split(b)) {
 			if (current->bio_list) {
 				op->lock = b->c->root->level + 1;
@@ -2112,8 +2110,9 @@
 				struct btree *parent = b->parent;
 
 				ret = btree_split(b, op, insert_keys,
-						  &split_keys);
+						  &split_keys, replace_key);
 				insert_keys = &split_keys;
+				replace_key = NULL;
 				b = parent;
 				if (!ret)
 					ret = -EINTR;
@@ -2121,7 +2120,8 @@
 		} else {
 			BUG_ON(write_block(b) != b->sets[b->nsets].data);
 
-			if (bch_btree_insert_keys(b, op, insert_keys)) {
+			if (bch_btree_insert_keys(b, op, insert_keys,
+						  replace_key)) {
 				if (!b->level) {
 					bch_btree_leaf_dirty(b, journal_ref);
 				} else {
@@ -2165,9 +2165,7 @@
 
 	bch_keylist_add(&insert, check_key);
 
-	BUG_ON(op->type != BTREE_INSERT);
-
-	ret = bch_btree_insert_node(b, op, &insert, NULL);
+	ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
 
 	BUG_ON(!ret && !bch_keylist_empty(&insert));
 out:
@@ -2177,7 +2175,8 @@
 }
 
 static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op,
-				    struct keylist *keys, atomic_t *journal_ref)
+				    struct keylist *keys, atomic_t *journal_ref,
+				    struct bkey *replace_key)
 {
 	if (bch_keylist_empty(keys))
 		return 0;
@@ -2194,14 +2193,17 @@
 			return -EIO;
 		}
 
-		return btree(insert_recurse, k, b, op, keys, journal_ref);
+		return btree(insert_recurse, k, b, op, keys,
+			     journal_ref, replace_key);
 	} else {
-		return bch_btree_insert_node(b, op, keys, journal_ref);
+		return bch_btree_insert_node(b, op, keys,
+					     journal_ref, replace_key);
 	}
 }
 
 int bch_btree_insert(struct btree_op *op, struct cache_set *c,
-		     struct keylist *keys, atomic_t *journal_ref)
+		     struct keylist *keys, atomic_t *journal_ref,
+		     struct bkey *replace_key)
 {
 	int ret = 0;
 
@@ -2209,7 +2211,8 @@
 
 	while (!bch_keylist_empty(keys)) {
 		op->lock = 0;
-		ret = btree_root(insert_recurse, c, op, keys, journal_ref);
+		ret = btree_root(insert_recurse, c, op, keys,
+				 journal_ref, replace_key);
 
 		if (ret == -EAGAIN) {
 			BUG();
@@ -2217,8 +2220,7 @@
 		} else if (ret) {
 			struct bkey *k;
 
-			pr_err("error %i trying to insert key for %s",
-			       ret, op_type(op));
+			pr_err("error %i", ret);
 
 			while ((k = bch_keylist_pop(keys)))
 				bkey_put(c, k, 0);