blob: 0530f6f2e4ba8bff7e93cd11151db804ca70ffac [file] [log] [blame]
Miao Xie16cdcec2011-04-22 18:12:22 +08001/*
2 * Copyright (C) 2011 Fujitsu. All rights reserved.
3 * Written by Miao Xie <miaox@cn.fujitsu.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
18 */
19
20#include <linux/slab.h>
Jeff Laytonc7f88c42017-12-11 06:35:12 -050021#include <linux/iversion.h>
Miao Xie16cdcec2011-04-22 18:12:22 +080022#include "delayed-inode.h"
23#include "disk-io.h"
24#include "transaction.h"
Qu Wenruo3cae2102013-07-16 11:19:18 +080025#include "ctree.h"
Miao Xie16cdcec2011-04-22 18:12:22 +080026
Chris Masonde3cb942013-03-04 17:13:31 -050027#define BTRFS_DELAYED_WRITEBACK 512
28#define BTRFS_DELAYED_BACKGROUND 128
29#define BTRFS_DELAYED_BATCH 16
Miao Xie16cdcec2011-04-22 18:12:22 +080030
31static struct kmem_cache *delayed_node_cache;
32
33int __init btrfs_delayed_inode_init(void)
34{
David Sterba837e1972012-09-07 03:00:48 -060035 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
Miao Xie16cdcec2011-04-22 18:12:22 +080036 sizeof(struct btrfs_delayed_node),
37 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +030038 SLAB_MEM_SPREAD,
Miao Xie16cdcec2011-04-22 18:12:22 +080039 NULL);
40 if (!delayed_node_cache)
41 return -ENOMEM;
42 return 0;
43}
44
45void btrfs_delayed_inode_exit(void)
46{
Kinglong Mee5598e902016-01-29 21:36:35 +080047 kmem_cache_destroy(delayed_node_cache);
Miao Xie16cdcec2011-04-22 18:12:22 +080048}
49
50static inline void btrfs_init_delayed_node(
51 struct btrfs_delayed_node *delayed_node,
52 struct btrfs_root *root, u64 inode_id)
53{
54 delayed_node->root = root;
55 delayed_node->inode_id = inode_id;
Elena Reshetova6de5f182017-03-03 10:55:16 +020056 refcount_set(&delayed_node->refs, 0);
Miao Xie16cdcec2011-04-22 18:12:22 +080057 delayed_node->ins_root = RB_ROOT;
58 delayed_node->del_root = RB_ROOT;
59 mutex_init(&delayed_node->mutex);
Miao Xie16cdcec2011-04-22 18:12:22 +080060 INIT_LIST_HEAD(&delayed_node->n_list);
61 INIT_LIST_HEAD(&delayed_node->p_list);
Miao Xie16cdcec2011-04-22 18:12:22 +080062}
63
64static inline int btrfs_is_continuous_delayed_item(
65 struct btrfs_delayed_item *item1,
66 struct btrfs_delayed_item *item2)
67{
68 if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
69 item1->key.objectid == item2->key.objectid &&
70 item1->key.type == item2->key.type &&
71 item1->key.offset + 1 == item2->key.offset)
72 return 1;
73 return 0;
74}
75
David Sterbaf85b7372017-01-20 14:54:07 +010076static struct btrfs_delayed_node *btrfs_get_delayed_node(
77 struct btrfs_inode *btrfs_inode)
Miao Xie2f7e33d2011-06-23 07:27:13 +000078{
Miao Xie2f7e33d2011-06-23 07:27:13 +000079 struct btrfs_root *root = btrfs_inode->root;
Nikolay Borisov4a0cc7c2017-01-10 20:35:31 +020080 u64 ino = btrfs_ino(btrfs_inode);
Miao Xie2f7e33d2011-06-23 07:27:13 +000081 struct btrfs_delayed_node *node;
82
Seraphime Kirkovski20c7bce2016-12-15 14:38:16 +010083 node = READ_ONCE(btrfs_inode->delayed_node);
Miao Xie2f7e33d2011-06-23 07:27:13 +000084 if (node) {
Elena Reshetova6de5f182017-03-03 10:55:16 +020085 refcount_inc(&node->refs);
Miao Xie2f7e33d2011-06-23 07:27:13 +000086 return node;
87 }
88
89 spin_lock(&root->inode_lock);
90 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
Chris Masonec35e482017-12-15 11:58:27 -080091
Miao Xie2f7e33d2011-06-23 07:27:13 +000092 if (node) {
93 if (btrfs_inode->delayed_node) {
Elena Reshetova6de5f182017-03-03 10:55:16 +020094 refcount_inc(&node->refs); /* can be accessed */
Miao Xie2f7e33d2011-06-23 07:27:13 +000095 BUG_ON(btrfs_inode->delayed_node != node);
96 spin_unlock(&root->inode_lock);
97 return node;
98 }
Chris Masonec35e482017-12-15 11:58:27 -080099
100 /*
101 * It's possible that we're racing into the middle of removing
102 * this node from the radix tree. In this case, the refcount
103 * was zero and it should never go back to one. Just return
104 * NULL like it was never in the radix at all; our release
105 * function is in the process of removing it.
106 *
107 * Some implementations of refcount_inc refuse to bump the
108 * refcount once it has hit zero. If we don't do this dance
109 * here, refcount_inc() may decide to just WARN_ONCE() instead
110 * of actually bumping the refcount.
111 *
112 * If this node is properly in the radix, we want to bump the
113 * refcount twice, once for the inode and once for this get
114 * operation.
115 */
116 if (refcount_inc_not_zero(&node->refs)) {
117 refcount_inc(&node->refs);
118 btrfs_inode->delayed_node = node;
119 } else {
120 node = NULL;
121 }
122
Miao Xie2f7e33d2011-06-23 07:27:13 +0000123 spin_unlock(&root->inode_lock);
124 return node;
125 }
126 spin_unlock(&root->inode_lock);
127
128 return NULL;
129}
130
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100131/* Will return either the node or PTR_ERR(-ENOMEM) */
Miao Xie16cdcec2011-04-22 18:12:22 +0800132static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
David Sterbaf85b7372017-01-20 14:54:07 +0100133 struct btrfs_inode *btrfs_inode)
Miao Xie16cdcec2011-04-22 18:12:22 +0800134{
135 struct btrfs_delayed_node *node;
Miao Xie16cdcec2011-04-22 18:12:22 +0800136 struct btrfs_root *root = btrfs_inode->root;
Nikolay Borisov4a0cc7c2017-01-10 20:35:31 +0200137 u64 ino = btrfs_ino(btrfs_inode);
Miao Xie16cdcec2011-04-22 18:12:22 +0800138 int ret;
139
140again:
Nikolay Borisov340c6ca2017-01-10 20:35:32 +0200141 node = btrfs_get_delayed_node(btrfs_inode);
Miao Xie2f7e33d2011-06-23 07:27:13 +0000142 if (node)
Miao Xie16cdcec2011-04-22 18:12:22 +0800143 return node;
Miao Xie16cdcec2011-04-22 18:12:22 +0800144
Alexandru Moise352dd9c2015-10-25 20:15:06 +0000145 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
Miao Xie16cdcec2011-04-22 18:12:22 +0800146 if (!node)
147 return ERR_PTR(-ENOMEM);
Chris Mason0d0ca302011-05-22 07:11:22 -0400148 btrfs_init_delayed_node(node, root, ino);
Miao Xie16cdcec2011-04-22 18:12:22 +0800149
Rashika95e94d12013-10-31 03:12:42 +0530150 /* cached in the btrfs inode and can be accessed */
Elena Reshetova6de5f182017-03-03 10:55:16 +0200151 refcount_set(&node->refs, 2);
Miao Xie16cdcec2011-04-22 18:12:22 +0800152
David Sterbae1860a72016-05-09 14:11:38 +0200153 ret = radix_tree_preload(GFP_NOFS);
Miao Xie16cdcec2011-04-22 18:12:22 +0800154 if (ret) {
155 kmem_cache_free(delayed_node_cache, node);
156 return ERR_PTR(ret);
157 }
158
159 spin_lock(&root->inode_lock);
Chris Mason0d0ca302011-05-22 07:11:22 -0400160 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
Miao Xie16cdcec2011-04-22 18:12:22 +0800161 if (ret == -EEXIST) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800162 spin_unlock(&root->inode_lock);
Jeff Mahoney96493032014-05-27 13:53:20 -0400163 kmem_cache_free(delayed_node_cache, node);
Miao Xie16cdcec2011-04-22 18:12:22 +0800164 radix_tree_preload_end();
165 goto again;
166 }
167 btrfs_inode->delayed_node = node;
168 spin_unlock(&root->inode_lock);
169 radix_tree_preload_end();
170
171 return node;
172}
173
174/*
175 * Call it when holding delayed_node->mutex
176 *
177 * If mod = 1, add this node into the prepared list.
178 */
179static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
180 struct btrfs_delayed_node *node,
181 int mod)
182{
183 spin_lock(&root->lock);
Miao Xie7cf35d92013-12-26 13:07:05 +0800184 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800185 if (!list_empty(&node->p_list))
186 list_move_tail(&node->p_list, &root->prepare_list);
187 else if (mod)
188 list_add_tail(&node->p_list, &root->prepare_list);
189 } else {
190 list_add_tail(&node->n_list, &root->node_list);
191 list_add_tail(&node->p_list, &root->prepare_list);
Elena Reshetova6de5f182017-03-03 10:55:16 +0200192 refcount_inc(&node->refs); /* inserted into list */
Miao Xie16cdcec2011-04-22 18:12:22 +0800193 root->nodes++;
Miao Xie7cf35d92013-12-26 13:07:05 +0800194 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
Miao Xie16cdcec2011-04-22 18:12:22 +0800195 }
196 spin_unlock(&root->lock);
197}
198
199/* Call it when holding delayed_node->mutex */
200static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
201 struct btrfs_delayed_node *node)
202{
203 spin_lock(&root->lock);
Miao Xie7cf35d92013-12-26 13:07:05 +0800204 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800205 root->nodes--;
Elena Reshetova6de5f182017-03-03 10:55:16 +0200206 refcount_dec(&node->refs); /* not in the list */
Miao Xie16cdcec2011-04-22 18:12:22 +0800207 list_del_init(&node->n_list);
208 if (!list_empty(&node->p_list))
209 list_del_init(&node->p_list);
Miao Xie7cf35d92013-12-26 13:07:05 +0800210 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
Miao Xie16cdcec2011-04-22 18:12:22 +0800211 }
212 spin_unlock(&root->lock);
213}
214
Eric Sandeen48a3b632013-04-25 20:41:01 +0000215static struct btrfs_delayed_node *btrfs_first_delayed_node(
Miao Xie16cdcec2011-04-22 18:12:22 +0800216 struct btrfs_delayed_root *delayed_root)
217{
218 struct list_head *p;
219 struct btrfs_delayed_node *node = NULL;
220
221 spin_lock(&delayed_root->lock);
222 if (list_empty(&delayed_root->node_list))
223 goto out;
224
225 p = delayed_root->node_list.next;
226 node = list_entry(p, struct btrfs_delayed_node, n_list);
Elena Reshetova6de5f182017-03-03 10:55:16 +0200227 refcount_inc(&node->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +0800228out:
229 spin_unlock(&delayed_root->lock);
230
231 return node;
232}
233
Eric Sandeen48a3b632013-04-25 20:41:01 +0000234static struct btrfs_delayed_node *btrfs_next_delayed_node(
Miao Xie16cdcec2011-04-22 18:12:22 +0800235 struct btrfs_delayed_node *node)
236{
237 struct btrfs_delayed_root *delayed_root;
238 struct list_head *p;
239 struct btrfs_delayed_node *next = NULL;
240
241 delayed_root = node->root->fs_info->delayed_root;
242 spin_lock(&delayed_root->lock);
Miao Xie7cf35d92013-12-26 13:07:05 +0800243 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
244 /* not in the list */
Miao Xie16cdcec2011-04-22 18:12:22 +0800245 if (list_empty(&delayed_root->node_list))
246 goto out;
247 p = delayed_root->node_list.next;
248 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
249 goto out;
250 else
251 p = node->n_list.next;
252
253 next = list_entry(p, struct btrfs_delayed_node, n_list);
Elena Reshetova6de5f182017-03-03 10:55:16 +0200254 refcount_inc(&next->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +0800255out:
256 spin_unlock(&delayed_root->lock);
257
258 return next;
259}
260
261static void __btrfs_release_delayed_node(
262 struct btrfs_delayed_node *delayed_node,
263 int mod)
264{
265 struct btrfs_delayed_root *delayed_root;
266
267 if (!delayed_node)
268 return;
269
270 delayed_root = delayed_node->root->fs_info->delayed_root;
271
272 mutex_lock(&delayed_node->mutex);
273 if (delayed_node->count)
274 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
275 else
276 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
277 mutex_unlock(&delayed_node->mutex);
278
Elena Reshetova6de5f182017-03-03 10:55:16 +0200279 if (refcount_dec_and_test(&delayed_node->refs)) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800280 struct btrfs_root *root = delayed_node->root;
Chris Masonec35e482017-12-15 11:58:27 -0800281
Miao Xie16cdcec2011-04-22 18:12:22 +0800282 spin_lock(&root->inode_lock);
Chris Masonec35e482017-12-15 11:58:27 -0800283 /*
284 * Once our refcount goes to zero, nobody is allowed to bump it
285 * back up. We can delete it now.
286 */
287 ASSERT(refcount_read(&delayed_node->refs) == 0);
288 radix_tree_delete(&root->delayed_nodes_tree,
289 delayed_node->inode_id);
Miao Xie16cdcec2011-04-22 18:12:22 +0800290 spin_unlock(&root->inode_lock);
Chris Masonec35e482017-12-15 11:58:27 -0800291 kmem_cache_free(delayed_node_cache, delayed_node);
Miao Xie16cdcec2011-04-22 18:12:22 +0800292 }
293}
294
295static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
296{
297 __btrfs_release_delayed_node(node, 0);
298}
299
Eric Sandeen48a3b632013-04-25 20:41:01 +0000300static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
Miao Xie16cdcec2011-04-22 18:12:22 +0800301 struct btrfs_delayed_root *delayed_root)
302{
303 struct list_head *p;
304 struct btrfs_delayed_node *node = NULL;
305
306 spin_lock(&delayed_root->lock);
307 if (list_empty(&delayed_root->prepare_list))
308 goto out;
309
310 p = delayed_root->prepare_list.next;
311 list_del_init(p);
312 node = list_entry(p, struct btrfs_delayed_node, p_list);
Elena Reshetova6de5f182017-03-03 10:55:16 +0200313 refcount_inc(&node->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +0800314out:
315 spin_unlock(&delayed_root->lock);
316
317 return node;
318}
319
320static inline void btrfs_release_prepared_delayed_node(
321 struct btrfs_delayed_node *node)
322{
323 __btrfs_release_delayed_node(node, 1);
324}
325
Eric Sandeen48a3b632013-04-25 20:41:01 +0000326static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
Miao Xie16cdcec2011-04-22 18:12:22 +0800327{
328 struct btrfs_delayed_item *item;
329 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
330 if (item) {
331 item->data_len = data_len;
332 item->ins_or_del = 0;
333 item->bytes_reserved = 0;
Miao Xie16cdcec2011-04-22 18:12:22 +0800334 item->delayed_node = NULL;
Elena Reshetova089e77e2017-03-03 10:55:17 +0200335 refcount_set(&item->refs, 1);
Miao Xie16cdcec2011-04-22 18:12:22 +0800336 }
337 return item;
338}
339
340/*
341 * __btrfs_lookup_delayed_item - look up the delayed item by key
342 * @delayed_node: pointer to the delayed node
343 * @key: the key to look up
344 * @prev: used to store the prev item if the right item isn't found
345 * @next: used to store the next item if the right item isn't found
346 *
347 * Note: if we don't find the right item, we will return the prev item and
348 * the next item.
349 */
350static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
351 struct rb_root *root,
352 struct btrfs_key *key,
353 struct btrfs_delayed_item **prev,
354 struct btrfs_delayed_item **next)
355{
356 struct rb_node *node, *prev_node = NULL;
357 struct btrfs_delayed_item *delayed_item = NULL;
358 int ret = 0;
359
360 node = root->rb_node;
361
362 while (node) {
363 delayed_item = rb_entry(node, struct btrfs_delayed_item,
364 rb_node);
365 prev_node = node;
366 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
367 if (ret < 0)
368 node = node->rb_right;
369 else if (ret > 0)
370 node = node->rb_left;
371 else
372 return delayed_item;
373 }
374
375 if (prev) {
376 if (!prev_node)
377 *prev = NULL;
378 else if (ret < 0)
379 *prev = delayed_item;
380 else if ((node = rb_prev(prev_node)) != NULL) {
381 *prev = rb_entry(node, struct btrfs_delayed_item,
382 rb_node);
383 } else
384 *prev = NULL;
385 }
386
387 if (next) {
388 if (!prev_node)
389 *next = NULL;
390 else if (ret > 0)
391 *next = delayed_item;
392 else if ((node = rb_next(prev_node)) != NULL) {
393 *next = rb_entry(node, struct btrfs_delayed_item,
394 rb_node);
395 } else
396 *next = NULL;
397 }
398 return NULL;
399}
400
Eric Sandeen48a3b632013-04-25 20:41:01 +0000401static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
Miao Xie16cdcec2011-04-22 18:12:22 +0800402 struct btrfs_delayed_node *delayed_node,
403 struct btrfs_key *key)
404{
Masahiro Yamadae2c89902016-09-13 04:35:52 +0900405 return __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
Miao Xie16cdcec2011-04-22 18:12:22 +0800406 NULL, NULL);
Miao Xie16cdcec2011-04-22 18:12:22 +0800407}
408
Miao Xie16cdcec2011-04-22 18:12:22 +0800409static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
410 struct btrfs_delayed_item *ins,
411 int action)
412{
413 struct rb_node **p, *node;
414 struct rb_node *parent_node = NULL;
415 struct rb_root *root;
416 struct btrfs_delayed_item *item;
417 int cmp;
418
419 if (action == BTRFS_DELAYED_INSERTION_ITEM)
420 root = &delayed_node->ins_root;
421 else if (action == BTRFS_DELAYED_DELETION_ITEM)
422 root = &delayed_node->del_root;
423 else
424 BUG();
425 p = &root->rb_node;
426 node = &ins->rb_node;
427
428 while (*p) {
429 parent_node = *p;
430 item = rb_entry(parent_node, struct btrfs_delayed_item,
431 rb_node);
432
433 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
434 if (cmp < 0)
435 p = &(*p)->rb_right;
436 else if (cmp > 0)
437 p = &(*p)->rb_left;
438 else
439 return -EEXIST;
440 }
441
442 rb_link_node(node, parent_node, p);
443 rb_insert_color(node, root);
444 ins->delayed_node = delayed_node;
445 ins->ins_or_del = action;
446
447 if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
448 action == BTRFS_DELAYED_INSERTION_ITEM &&
449 ins->key.offset >= delayed_node->index_cnt)
450 delayed_node->index_cnt = ins->key.offset + 1;
451
452 delayed_node->count++;
453 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
454 return 0;
455}
456
457static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
458 struct btrfs_delayed_item *item)
459{
460 return __btrfs_add_delayed_item(node, item,
461 BTRFS_DELAYED_INSERTION_ITEM);
462}
463
464static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
465 struct btrfs_delayed_item *item)
466{
467 return __btrfs_add_delayed_item(node, item,
468 BTRFS_DELAYED_DELETION_ITEM);
469}
470
Chris Masonde3cb942013-03-04 17:13:31 -0500471static void finish_one_item(struct btrfs_delayed_root *delayed_root)
472{
473 int seq = atomic_inc_return(&delayed_root->items_seq);
David Sterbaee863952015-02-16 19:41:40 +0100474
475 /*
476 * atomic_dec_return implies a barrier for waitqueue_active
477 */
Chris Masonde3cb942013-03-04 17:13:31 -0500478 if ((atomic_dec_return(&delayed_root->items) <
479 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
480 waitqueue_active(&delayed_root->wait))
481 wake_up(&delayed_root->wait);
482}
483
Miao Xie16cdcec2011-04-22 18:12:22 +0800484static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
485{
486 struct rb_root *root;
487 struct btrfs_delayed_root *delayed_root;
488
489 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
490
491 BUG_ON(!delayed_root);
492 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
493 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
494
495 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
496 root = &delayed_item->delayed_node->ins_root;
497 else
498 root = &delayed_item->delayed_node->del_root;
499
500 rb_erase(&delayed_item->rb_node, root);
501 delayed_item->delayed_node->count--;
Chris Masonde3cb942013-03-04 17:13:31 -0500502
503 finish_one_item(delayed_root);
Miao Xie16cdcec2011-04-22 18:12:22 +0800504}
505
506static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
507{
508 if (item) {
509 __btrfs_remove_delayed_item(item);
Elena Reshetova089e77e2017-03-03 10:55:17 +0200510 if (refcount_dec_and_test(&item->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +0800511 kfree(item);
512 }
513}
514
Eric Sandeen48a3b632013-04-25 20:41:01 +0000515static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
Miao Xie16cdcec2011-04-22 18:12:22 +0800516 struct btrfs_delayed_node *delayed_node)
517{
518 struct rb_node *p;
519 struct btrfs_delayed_item *item = NULL;
520
521 p = rb_first(&delayed_node->ins_root);
522 if (p)
523 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
524
525 return item;
526}
527
Eric Sandeen48a3b632013-04-25 20:41:01 +0000528static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
Miao Xie16cdcec2011-04-22 18:12:22 +0800529 struct btrfs_delayed_node *delayed_node)
530{
531 struct rb_node *p;
532 struct btrfs_delayed_item *item = NULL;
533
534 p = rb_first(&delayed_node->del_root);
535 if (p)
536 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
537
538 return item;
539}
540
Eric Sandeen48a3b632013-04-25 20:41:01 +0000541static struct btrfs_delayed_item *__btrfs_next_delayed_item(
Miao Xie16cdcec2011-04-22 18:12:22 +0800542 struct btrfs_delayed_item *item)
543{
544 struct rb_node *p;
545 struct btrfs_delayed_item *next = NULL;
546
547 p = rb_next(&item->rb_node);
548 if (p)
549 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
550
551 return next;
552}
553
Miao Xie16cdcec2011-04-22 18:12:22 +0800554static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400555 struct btrfs_fs_info *fs_info,
Miao Xie16cdcec2011-04-22 18:12:22 +0800556 struct btrfs_delayed_item *item)
557{
558 struct btrfs_block_rsv *src_rsv;
559 struct btrfs_block_rsv *dst_rsv;
560 u64 num_bytes;
561 int ret;
562
563 if (!trans->bytes_reserved)
564 return 0;
565
566 src_rsv = trans->block_rsv;
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400567 dst_rsv = &fs_info->delayed_block_rsv;
Miao Xie16cdcec2011-04-22 18:12:22 +0800568
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400569 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
Josef Bacik25d609f2016-03-25 13:25:48 -0400570 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500571 if (!ret) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400572 trace_btrfs_space_reservation(fs_info, "delayed_item",
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500573 item->key.objectid,
574 num_bytes, 1);
Miao Xie16cdcec2011-04-22 18:12:22 +0800575 item->bytes_reserved = num_bytes;
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500576 }
Miao Xie16cdcec2011-04-22 18:12:22 +0800577
578 return ret;
579}
580
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400581static void btrfs_delayed_item_release_metadata(struct btrfs_fs_info *fs_info,
Miao Xie16cdcec2011-04-22 18:12:22 +0800582 struct btrfs_delayed_item *item)
583{
Miao Xie19fd2942011-06-15 10:47:30 +0000584 struct btrfs_block_rsv *rsv;
585
Miao Xie16cdcec2011-04-22 18:12:22 +0800586 if (!item->bytes_reserved)
587 return;
588
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400589 rsv = &fs_info->delayed_block_rsv;
590 trace_btrfs_space_reservation(fs_info, "delayed_item",
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500591 item->key.objectid, item->bytes_reserved,
592 0);
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400593 btrfs_block_rsv_release(fs_info, rsv,
Miao Xie16cdcec2011-04-22 18:12:22 +0800594 item->bytes_reserved);
595}
596
597static int btrfs_delayed_inode_reserve_metadata(
598 struct btrfs_trans_handle *trans,
599 struct btrfs_root *root,
Nikolay Borisovfcabdd12017-01-10 20:35:34 +0200600 struct btrfs_inode *inode,
Miao Xie16cdcec2011-04-22 18:12:22 +0800601 struct btrfs_delayed_node *node)
602{
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400603 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +0800604 struct btrfs_block_rsv *src_rsv;
605 struct btrfs_block_rsv *dst_rsv;
606 u64 num_bytes;
607 int ret;
608
Miao Xie16cdcec2011-04-22 18:12:22 +0800609 src_rsv = trans->block_rsv;
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400610 dst_rsv = &fs_info->delayed_block_rsv;
Miao Xie16cdcec2011-04-22 18:12:22 +0800611
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400612 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
Josef Bacikc06a0e12011-11-04 19:56:02 -0400613
614 /*
615 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
616 * which doesn't reserve space for speed. This is a problem since we
617 * still need to reserve space for this update, so try to reserve the
618 * space.
619 *
620 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
Josef Bacik69fe2d72017-10-19 14:15:57 -0400621 * we always reserve enough to update the inode item.
Josef Bacikc06a0e12011-11-04 19:56:02 -0400622 */
Chris Masone755d9a2011-12-15 13:36:29 -0500623 if (!src_rsv || (!trans->bytes_reserved &&
Miao Xie66d8f3d2012-09-06 04:02:28 -0600624 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
Miao Xie08e007d2012-10-16 11:33:38 +0000625 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
626 BTRFS_RESERVE_NO_FLUSH);
Josef Bacikc06a0e12011-11-04 19:56:02 -0400627 /*
628 * Since we're under a transaction reserve_metadata_bytes could
629 * try to commit the transaction which will make it return
630 * EAGAIN to make us stop the transaction we have, so return
631 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
632 */
633 if (ret == -EAGAIN)
634 ret = -ENOSPC;
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500635 if (!ret) {
Josef Bacikc06a0e12011-11-04 19:56:02 -0400636 node->bytes_reserved = num_bytes;
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400637 trace_btrfs_space_reservation(fs_info,
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500638 "delayed_inode",
Nikolay Borisovfcabdd12017-01-10 20:35:34 +0200639 btrfs_ino(inode),
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500640 num_bytes, 1);
641 }
Josef Bacikc06a0e12011-11-04 19:56:02 -0400642 return ret;
643 }
644
Josef Bacik25d609f2016-03-25 13:25:48 -0400645 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500646 if (!ret) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400647 trace_btrfs_space_reservation(fs_info, "delayed_inode",
Nikolay Borisovfcabdd12017-01-10 20:35:34 +0200648 btrfs_ino(inode), num_bytes, 1);
Miao Xie16cdcec2011-04-22 18:12:22 +0800649 node->bytes_reserved = num_bytes;
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500650 }
Miao Xie16cdcec2011-04-22 18:12:22 +0800651
652 return ret;
653}
654
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400655static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
Miao Xie16cdcec2011-04-22 18:12:22 +0800656 struct btrfs_delayed_node *node)
657{
658 struct btrfs_block_rsv *rsv;
659
660 if (!node->bytes_reserved)
661 return;
662
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400663 rsv = &fs_info->delayed_block_rsv;
664 trace_btrfs_space_reservation(fs_info, "delayed_inode",
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500665 node->inode_id, node->bytes_reserved, 0);
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400666 btrfs_block_rsv_release(fs_info, rsv,
Miao Xie16cdcec2011-04-22 18:12:22 +0800667 node->bytes_reserved);
668 node->bytes_reserved = 0;
669}
670
671/*
672 * This helper will insert some continuous items into the same leaf according
673 * to the free space of the leaf.
674 */
Tsutomu Itohafe5fea2013-04-16 05:18:22 +0000675static int btrfs_batch_insert_items(struct btrfs_root *root,
676 struct btrfs_path *path,
677 struct btrfs_delayed_item *item)
Miao Xie16cdcec2011-04-22 18:12:22 +0800678{
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400679 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +0800680 struct btrfs_delayed_item *curr, *next;
681 int free_space;
682 int total_data_size = 0, total_size = 0;
683 struct extent_buffer *leaf;
684 char *data_ptr;
685 struct btrfs_key *keys;
686 u32 *data_size;
687 struct list_head head;
688 int slot;
689 int nitems;
690 int i;
691 int ret = 0;
692
693 BUG_ON(!path->nodes[0]);
694
695 leaf = path->nodes[0];
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400696 free_space = btrfs_leaf_free_space(fs_info, leaf);
Miao Xie16cdcec2011-04-22 18:12:22 +0800697 INIT_LIST_HEAD(&head);
698
699 next = item;
Chris Mason17aca1c2011-06-03 01:13:45 -0400700 nitems = 0;
Miao Xie16cdcec2011-04-22 18:12:22 +0800701
702 /*
703 * count the number of the continuous items that we can insert in batch
704 */
705 while (total_size + next->data_len + sizeof(struct btrfs_item) <=
706 free_space) {
707 total_data_size += next->data_len;
708 total_size += next->data_len + sizeof(struct btrfs_item);
709 list_add_tail(&next->tree_list, &head);
710 nitems++;
711
712 curr = next;
713 next = __btrfs_next_delayed_item(curr);
714 if (!next)
715 break;
716
717 if (!btrfs_is_continuous_delayed_item(curr, next))
718 break;
719 }
720
721 if (!nitems) {
722 ret = 0;
723 goto out;
724 }
725
726 /*
727 * we need allocate some memory space, but it might cause the task
728 * to sleep, so we set all locked nodes in the path to blocking locks
729 * first.
730 */
731 btrfs_set_path_blocking(path);
732
Dulshani Gunawardhanad9b0d9b2013-10-31 10:32:18 +0530733 keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
Miao Xie16cdcec2011-04-22 18:12:22 +0800734 if (!keys) {
735 ret = -ENOMEM;
736 goto out;
737 }
738
Dulshani Gunawardhanad9b0d9b2013-10-31 10:32:18 +0530739 data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
Miao Xie16cdcec2011-04-22 18:12:22 +0800740 if (!data_size) {
741 ret = -ENOMEM;
742 goto error;
743 }
744
745 /* get keys of all the delayed items */
746 i = 0;
747 list_for_each_entry(next, &head, tree_list) {
748 keys[i] = next->key;
749 data_size[i] = next->data_len;
750 i++;
751 }
752
753 /* reset all the locked nodes in the patch to spinning locks. */
Chris Masonbd681512011-07-16 15:23:14 -0400754 btrfs_clear_path_blocking(path, NULL, 0);
Miao Xie16cdcec2011-04-22 18:12:22 +0800755
756 /* insert the keys of the items */
Tsutomu Itohafe5fea2013-04-16 05:18:22 +0000757 setup_items_for_insert(root, path, keys, data_size,
Jeff Mahoney143bede2012-03-01 14:56:26 +0100758 total_data_size, total_size, nitems);
Miao Xie16cdcec2011-04-22 18:12:22 +0800759
760 /* insert the dir index items */
761 slot = path->slots[0];
762 list_for_each_entry_safe(curr, next, &head, tree_list) {
763 data_ptr = btrfs_item_ptr(leaf, slot, char);
764 write_extent_buffer(leaf, &curr->data,
765 (unsigned long)data_ptr,
766 curr->data_len);
767 slot++;
768
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400769 btrfs_delayed_item_release_metadata(fs_info, curr);
Miao Xie16cdcec2011-04-22 18:12:22 +0800770
771 list_del(&curr->tree_list);
772 btrfs_release_delayed_item(curr);
773 }
774
775error:
776 kfree(data_size);
777 kfree(keys);
778out:
779 return ret;
780}
781
782/*
783 * This helper can just do simple insertion that needn't extend item for new
784 * data, such as directory name index insertion, inode insertion.
785 */
786static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
787 struct btrfs_root *root,
788 struct btrfs_path *path,
789 struct btrfs_delayed_item *delayed_item)
790{
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400791 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +0800792 struct extent_buffer *leaf;
Miao Xie16cdcec2011-04-22 18:12:22 +0800793 char *ptr;
794 int ret;
795
796 ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
797 delayed_item->data_len);
798 if (ret < 0 && ret != -EEXIST)
799 return ret;
800
801 leaf = path->nodes[0];
802
Miao Xie16cdcec2011-04-22 18:12:22 +0800803 ptr = btrfs_item_ptr(leaf, path->slots[0], char);
804
805 write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
806 delayed_item->data_len);
807 btrfs_mark_buffer_dirty(leaf);
808
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400809 btrfs_delayed_item_release_metadata(fs_info, delayed_item);
Miao Xie16cdcec2011-04-22 18:12:22 +0800810 return 0;
811}
812
813/*
814 * we insert an item first, then if there are some continuous items, we try
815 * to insert those items into the same leaf.
816 */
817static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
818 struct btrfs_path *path,
819 struct btrfs_root *root,
820 struct btrfs_delayed_node *node)
821{
822 struct btrfs_delayed_item *curr, *prev;
823 int ret = 0;
824
825do_again:
826 mutex_lock(&node->mutex);
827 curr = __btrfs_first_delayed_insertion_item(node);
828 if (!curr)
829 goto insert_end;
830
831 ret = btrfs_insert_delayed_item(trans, root, path, curr);
832 if (ret < 0) {
Chris Mason945d8962011-05-22 12:33:42 -0400833 btrfs_release_path(path);
Miao Xie16cdcec2011-04-22 18:12:22 +0800834 goto insert_end;
835 }
836
837 prev = curr;
838 curr = __btrfs_next_delayed_item(prev);
839 if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
840 /* insert the continuous items into the same leaf */
841 path->slots[0]++;
Tsutomu Itohafe5fea2013-04-16 05:18:22 +0000842 btrfs_batch_insert_items(root, path, curr);
Miao Xie16cdcec2011-04-22 18:12:22 +0800843 }
844 btrfs_release_delayed_item(prev);
845 btrfs_mark_buffer_dirty(path->nodes[0]);
846
Chris Mason945d8962011-05-22 12:33:42 -0400847 btrfs_release_path(path);
Miao Xie16cdcec2011-04-22 18:12:22 +0800848 mutex_unlock(&node->mutex);
849 goto do_again;
850
851insert_end:
852 mutex_unlock(&node->mutex);
853 return ret;
854}
855
856static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
857 struct btrfs_root *root,
858 struct btrfs_path *path,
859 struct btrfs_delayed_item *item)
860{
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400861 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +0800862 struct btrfs_delayed_item *curr, *next;
863 struct extent_buffer *leaf;
864 struct btrfs_key key;
865 struct list_head head;
866 int nitems, i, last_item;
867 int ret = 0;
868
869 BUG_ON(!path->nodes[0]);
870
871 leaf = path->nodes[0];
872
873 i = path->slots[0];
874 last_item = btrfs_header_nritems(leaf) - 1;
875 if (i > last_item)
876 return -ENOENT; /* FIXME: Is errno suitable? */
877
878 next = item;
879 INIT_LIST_HEAD(&head);
880 btrfs_item_key_to_cpu(leaf, &key, i);
881 nitems = 0;
882 /*
883 * count the number of the dir index items that we can delete in batch
884 */
885 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
886 list_add_tail(&next->tree_list, &head);
887 nitems++;
888
889 curr = next;
890 next = __btrfs_next_delayed_item(curr);
891 if (!next)
892 break;
893
894 if (!btrfs_is_continuous_delayed_item(curr, next))
895 break;
896
897 i++;
898 if (i > last_item)
899 break;
900 btrfs_item_key_to_cpu(leaf, &key, i);
901 }
902
903 if (!nitems)
904 return 0;
905
906 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
907 if (ret)
908 goto out;
909
910 list_for_each_entry_safe(curr, next, &head, tree_list) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400911 btrfs_delayed_item_release_metadata(fs_info, curr);
Miao Xie16cdcec2011-04-22 18:12:22 +0800912 list_del(&curr->tree_list);
913 btrfs_release_delayed_item(curr);
914 }
915
916out:
917 return ret;
918}
919
920static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
921 struct btrfs_path *path,
922 struct btrfs_root *root,
923 struct btrfs_delayed_node *node)
924{
925 struct btrfs_delayed_item *curr, *prev;
926 int ret = 0;
927
928do_again:
929 mutex_lock(&node->mutex);
930 curr = __btrfs_first_delayed_deletion_item(node);
931 if (!curr)
932 goto delete_fail;
933
934 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
935 if (ret < 0)
936 goto delete_fail;
937 else if (ret > 0) {
938 /*
939 * can't find the item which the node points to, so this node
940 * is invalid, just drop it.
941 */
942 prev = curr;
943 curr = __btrfs_next_delayed_item(prev);
944 btrfs_release_delayed_item(prev);
945 ret = 0;
Chris Mason945d8962011-05-22 12:33:42 -0400946 btrfs_release_path(path);
Fengguang Wu62095262012-08-04 01:45:02 -0600947 if (curr) {
948 mutex_unlock(&node->mutex);
Miao Xie16cdcec2011-04-22 18:12:22 +0800949 goto do_again;
Fengguang Wu62095262012-08-04 01:45:02 -0600950 } else
Miao Xie16cdcec2011-04-22 18:12:22 +0800951 goto delete_fail;
952 }
953
954 btrfs_batch_delete_items(trans, root, path, curr);
Chris Mason945d8962011-05-22 12:33:42 -0400955 btrfs_release_path(path);
Miao Xie16cdcec2011-04-22 18:12:22 +0800956 mutex_unlock(&node->mutex);
957 goto do_again;
958
959delete_fail:
Chris Mason945d8962011-05-22 12:33:42 -0400960 btrfs_release_path(path);
Miao Xie16cdcec2011-04-22 18:12:22 +0800961 mutex_unlock(&node->mutex);
962 return ret;
963}
964
965static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
966{
967 struct btrfs_delayed_root *delayed_root;
968
Miao Xie7cf35d92013-12-26 13:07:05 +0800969 if (delayed_node &&
970 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800971 BUG_ON(!delayed_node->root);
Miao Xie7cf35d92013-12-26 13:07:05 +0800972 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
Miao Xie16cdcec2011-04-22 18:12:22 +0800973 delayed_node->count--;
974
975 delayed_root = delayed_node->root->fs_info->delayed_root;
Chris Masonde3cb942013-03-04 17:13:31 -0500976 finish_one_item(delayed_root);
Miao Xie16cdcec2011-04-22 18:12:22 +0800977 }
978}
979
Miao Xie67de1172013-12-26 13:07:06 +0800980static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
981{
982 struct btrfs_delayed_root *delayed_root;
983
984 ASSERT(delayed_node->root);
985 clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
986 delayed_node->count--;
987
988 delayed_root = delayed_node->root->fs_info->delayed_root;
989 finish_one_item(delayed_root);
990}
991
Miao Xie0e8c36a2012-12-19 06:59:51 +0000992static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
993 struct btrfs_root *root,
994 struct btrfs_path *path,
995 struct btrfs_delayed_node *node)
Miao Xie16cdcec2011-04-22 18:12:22 +0800996{
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400997 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +0800998 struct btrfs_key key;
999 struct btrfs_inode_item *inode_item;
1000 struct extent_buffer *leaf;
Miao Xie67de1172013-12-26 13:07:06 +08001001 int mod;
Miao Xie16cdcec2011-04-22 18:12:22 +08001002 int ret;
1003
Miao Xie16cdcec2011-04-22 18:12:22 +08001004 key.objectid = node->inode_id;
David Sterba962a2982014-06-04 18:41:45 +02001005 key.type = BTRFS_INODE_ITEM_KEY;
Miao Xie16cdcec2011-04-22 18:12:22 +08001006 key.offset = 0;
Miao Xie0e8c36a2012-12-19 06:59:51 +00001007
Miao Xie67de1172013-12-26 13:07:06 +08001008 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1009 mod = -1;
1010 else
1011 mod = 1;
1012
1013 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
Miao Xie16cdcec2011-04-22 18:12:22 +08001014 if (ret > 0) {
Chris Mason945d8962011-05-22 12:33:42 -04001015 btrfs_release_path(path);
Miao Xie16cdcec2011-04-22 18:12:22 +08001016 return -ENOENT;
1017 } else if (ret < 0) {
Miao Xie16cdcec2011-04-22 18:12:22 +08001018 return ret;
1019 }
1020
Miao Xie16cdcec2011-04-22 18:12:22 +08001021 leaf = path->nodes[0];
1022 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1023 struct btrfs_inode_item);
1024 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1025 sizeof(struct btrfs_inode_item));
1026 btrfs_mark_buffer_dirty(leaf);
Miao Xie16cdcec2011-04-22 18:12:22 +08001027
Miao Xie67de1172013-12-26 13:07:06 +08001028 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1029 goto no_iref;
1030
1031 path->slots[0]++;
1032 if (path->slots[0] >= btrfs_header_nritems(leaf))
1033 goto search;
1034again:
1035 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1036 if (key.objectid != node->inode_id)
1037 goto out;
1038
1039 if (key.type != BTRFS_INODE_REF_KEY &&
1040 key.type != BTRFS_INODE_EXTREF_KEY)
1041 goto out;
1042
1043 /*
1044 * Delayed iref deletion is for the inode who has only one link,
1045 * so there is only one iref. The case that several irefs are
1046 * in the same item doesn't exist.
1047 */
1048 btrfs_del_item(trans, root, path);
1049out:
1050 btrfs_release_delayed_iref(node);
1051no_iref:
1052 btrfs_release_path(path);
1053err_out:
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001054 btrfs_delayed_inode_release_metadata(fs_info, node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001055 btrfs_release_delayed_inode(node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001056
Miao Xie67de1172013-12-26 13:07:06 +08001057 return ret;
1058
1059search:
1060 btrfs_release_path(path);
1061
David Sterba962a2982014-06-04 18:41:45 +02001062 key.type = BTRFS_INODE_EXTREF_KEY;
Miao Xie67de1172013-12-26 13:07:06 +08001063 key.offset = -1;
1064 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1065 if (ret < 0)
1066 goto err_out;
1067 ASSERT(ret);
1068
1069 ret = 0;
1070 leaf = path->nodes[0];
1071 path->slots[0]--;
1072 goto again;
Miao Xie16cdcec2011-04-22 18:12:22 +08001073}
1074
Miao Xie0e8c36a2012-12-19 06:59:51 +00001075static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1076 struct btrfs_root *root,
1077 struct btrfs_path *path,
1078 struct btrfs_delayed_node *node)
1079{
1080 int ret;
1081
1082 mutex_lock(&node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001083 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
Miao Xie0e8c36a2012-12-19 06:59:51 +00001084 mutex_unlock(&node->mutex);
1085 return 0;
1086 }
1087
1088 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1089 mutex_unlock(&node->mutex);
1090 return ret;
1091}
1092
Miao Xie4ea41ce2012-12-19 06:59:03 +00001093static inline int
1094__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1095 struct btrfs_path *path,
1096 struct btrfs_delayed_node *node)
1097{
1098 int ret;
1099
1100 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1101 if (ret)
1102 return ret;
1103
1104 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1105 if (ret)
1106 return ret;
1107
1108 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1109 return ret;
1110}
1111
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001112/*
1113 * Called when committing the transaction.
1114 * Returns 0 on success.
1115 * Returns < 0 on error and returns with an aborted transaction with any
1116 * outstanding delayed items cleaned up.
1117 */
Josef Bacik96c3f432012-06-21 14:05:49 -04001118static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001119 struct btrfs_fs_info *fs_info, int nr)
Miao Xie16cdcec2011-04-22 18:12:22 +08001120{
1121 struct btrfs_delayed_root *delayed_root;
1122 struct btrfs_delayed_node *curr_node, *prev_node;
1123 struct btrfs_path *path;
Miao Xie19fd2942011-06-15 10:47:30 +00001124 struct btrfs_block_rsv *block_rsv;
Miao Xie16cdcec2011-04-22 18:12:22 +08001125 int ret = 0;
Josef Bacik96c3f432012-06-21 14:05:49 -04001126 bool count = (nr > 0);
Miao Xie16cdcec2011-04-22 18:12:22 +08001127
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001128 if (trans->aborted)
1129 return -EIO;
1130
Miao Xie16cdcec2011-04-22 18:12:22 +08001131 path = btrfs_alloc_path();
1132 if (!path)
1133 return -ENOMEM;
1134 path->leave_spinning = 1;
1135
Miao Xie19fd2942011-06-15 10:47:30 +00001136 block_rsv = trans->block_rsv;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001137 trans->block_rsv = &fs_info->delayed_block_rsv;
Miao Xie19fd2942011-06-15 10:47:30 +00001138
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001139 delayed_root = fs_info->delayed_root;
Miao Xie16cdcec2011-04-22 18:12:22 +08001140
1141 curr_node = btrfs_first_delayed_node(delayed_root);
Josef Bacik96c3f432012-06-21 14:05:49 -04001142 while (curr_node && (!count || (count && nr--))) {
Miao Xie4ea41ce2012-12-19 06:59:03 +00001143 ret = __btrfs_commit_inode_delayed_items(trans, path,
1144 curr_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001145 if (ret) {
1146 btrfs_release_delayed_node(curr_node);
Josef Bacik96c3f432012-06-21 14:05:49 -04001147 curr_node = NULL;
Jeff Mahoney66642832016-06-10 18:19:25 -04001148 btrfs_abort_transaction(trans, ret);
Miao Xie16cdcec2011-04-22 18:12:22 +08001149 break;
1150 }
1151
1152 prev_node = curr_node;
1153 curr_node = btrfs_next_delayed_node(curr_node);
1154 btrfs_release_delayed_node(prev_node);
1155 }
1156
Josef Bacik96c3f432012-06-21 14:05:49 -04001157 if (curr_node)
1158 btrfs_release_delayed_node(curr_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001159 btrfs_free_path(path);
Miao Xie19fd2942011-06-15 10:47:30 +00001160 trans->block_rsv = block_rsv;
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001161
Miao Xie16cdcec2011-04-22 18:12:22 +08001162 return ret;
1163}
1164
Josef Bacik96c3f432012-06-21 14:05:49 -04001165int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001166 struct btrfs_fs_info *fs_info)
Josef Bacik96c3f432012-06-21 14:05:49 -04001167{
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001168 return __btrfs_run_delayed_items(trans, fs_info, -1);
Josef Bacik96c3f432012-06-21 14:05:49 -04001169}
1170
1171int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001172 struct btrfs_fs_info *fs_info, int nr)
Josef Bacik96c3f432012-06-21 14:05:49 -04001173{
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001174 return __btrfs_run_delayed_items(trans, fs_info, nr);
Josef Bacik96c3f432012-06-21 14:05:49 -04001175}
1176
Miao Xie16cdcec2011-04-22 18:12:22 +08001177int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
Nikolay Borisov5f4b32e2017-01-10 20:35:41 +02001178 struct btrfs_inode *inode)
Miao Xie16cdcec2011-04-22 18:12:22 +08001179{
Nikolay Borisov5f4b32e2017-01-10 20:35:41 +02001180 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
Miao Xie4ea41ce2012-12-19 06:59:03 +00001181 struct btrfs_path *path;
1182 struct btrfs_block_rsv *block_rsv;
Miao Xie16cdcec2011-04-22 18:12:22 +08001183 int ret;
1184
1185 if (!delayed_node)
1186 return 0;
1187
1188 mutex_lock(&delayed_node->mutex);
1189 if (!delayed_node->count) {
1190 mutex_unlock(&delayed_node->mutex);
1191 btrfs_release_delayed_node(delayed_node);
1192 return 0;
1193 }
1194 mutex_unlock(&delayed_node->mutex);
1195
Miao Xie4ea41ce2012-12-19 06:59:03 +00001196 path = btrfs_alloc_path();
Filipe David Borba Manana3c77bd92013-10-12 20:32:59 +01001197 if (!path) {
1198 btrfs_release_delayed_node(delayed_node);
Miao Xie4ea41ce2012-12-19 06:59:03 +00001199 return -ENOMEM;
Filipe David Borba Manana3c77bd92013-10-12 20:32:59 +01001200 }
Miao Xie4ea41ce2012-12-19 06:59:03 +00001201 path->leave_spinning = 1;
1202
1203 block_rsv = trans->block_rsv;
1204 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1205
1206 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1207
Miao Xie16cdcec2011-04-22 18:12:22 +08001208 btrfs_release_delayed_node(delayed_node);
Miao Xie4ea41ce2012-12-19 06:59:03 +00001209 btrfs_free_path(path);
1210 trans->block_rsv = block_rsv;
1211
Miao Xie16cdcec2011-04-22 18:12:22 +08001212 return ret;
1213}
1214
Nikolay Borisovaa790212017-01-10 20:35:40 +02001215int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
Miao Xie0e8c36a2012-12-19 06:59:51 +00001216{
Nikolay Borisovaa790212017-01-10 20:35:40 +02001217 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
Miao Xie0e8c36a2012-12-19 06:59:51 +00001218 struct btrfs_trans_handle *trans;
Nikolay Borisovaa790212017-01-10 20:35:40 +02001219 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
Miao Xie0e8c36a2012-12-19 06:59:51 +00001220 struct btrfs_path *path;
1221 struct btrfs_block_rsv *block_rsv;
1222 int ret;
1223
1224 if (!delayed_node)
1225 return 0;
1226
1227 mutex_lock(&delayed_node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001228 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Miao Xie0e8c36a2012-12-19 06:59:51 +00001229 mutex_unlock(&delayed_node->mutex);
1230 btrfs_release_delayed_node(delayed_node);
1231 return 0;
1232 }
1233 mutex_unlock(&delayed_node->mutex);
1234
1235 trans = btrfs_join_transaction(delayed_node->root);
1236 if (IS_ERR(trans)) {
1237 ret = PTR_ERR(trans);
1238 goto out;
1239 }
1240
1241 path = btrfs_alloc_path();
1242 if (!path) {
1243 ret = -ENOMEM;
1244 goto trans_out;
1245 }
1246 path->leave_spinning = 1;
1247
1248 block_rsv = trans->block_rsv;
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001249 trans->block_rsv = &fs_info->delayed_block_rsv;
Miao Xie0e8c36a2012-12-19 06:59:51 +00001250
1251 mutex_lock(&delayed_node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001252 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
Miao Xie0e8c36a2012-12-19 06:59:51 +00001253 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1254 path, delayed_node);
1255 else
1256 ret = 0;
1257 mutex_unlock(&delayed_node->mutex);
1258
1259 btrfs_free_path(path);
1260 trans->block_rsv = block_rsv;
1261trans_out:
Jeff Mahoney3a45bb22016-09-09 21:39:03 -04001262 btrfs_end_transaction(trans);
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001263 btrfs_btree_balance_dirty(fs_info);
Miao Xie0e8c36a2012-12-19 06:59:51 +00001264out:
1265 btrfs_release_delayed_node(delayed_node);
1266
1267 return ret;
1268}
1269
Nikolay Borisovf48d1cf2017-01-10 20:35:39 +02001270void btrfs_remove_delayed_node(struct btrfs_inode *inode)
Miao Xie16cdcec2011-04-22 18:12:22 +08001271{
1272 struct btrfs_delayed_node *delayed_node;
1273
Nikolay Borisovf48d1cf2017-01-10 20:35:39 +02001274 delayed_node = READ_ONCE(inode->delayed_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001275 if (!delayed_node)
1276 return;
1277
Nikolay Borisovf48d1cf2017-01-10 20:35:39 +02001278 inode->delayed_node = NULL;
Miao Xie16cdcec2011-04-22 18:12:22 +08001279 btrfs_release_delayed_node(delayed_node);
1280}
1281
Chris Masonde3cb942013-03-04 17:13:31 -05001282struct btrfs_async_delayed_work {
1283 struct btrfs_delayed_root *delayed_root;
1284 int nr;
Qu Wenruod458b052014-02-28 10:46:19 +08001285 struct btrfs_work work;
Miao Xie16cdcec2011-04-22 18:12:22 +08001286};
1287
Qu Wenruod458b052014-02-28 10:46:19 +08001288static void btrfs_async_run_delayed_root(struct btrfs_work *work)
Miao Xie16cdcec2011-04-22 18:12:22 +08001289{
Chris Masonde3cb942013-03-04 17:13:31 -05001290 struct btrfs_async_delayed_work *async_work;
1291 struct btrfs_delayed_root *delayed_root;
Miao Xie16cdcec2011-04-22 18:12:22 +08001292 struct btrfs_trans_handle *trans;
1293 struct btrfs_path *path;
1294 struct btrfs_delayed_node *delayed_node = NULL;
1295 struct btrfs_root *root;
Miao Xie19fd2942011-06-15 10:47:30 +00001296 struct btrfs_block_rsv *block_rsv;
Chris Masonde3cb942013-03-04 17:13:31 -05001297 int total_done = 0;
Miao Xie16cdcec2011-04-22 18:12:22 +08001298
Chris Masonde3cb942013-03-04 17:13:31 -05001299 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1300 delayed_root = async_work->delayed_root;
Miao Xie16cdcec2011-04-22 18:12:22 +08001301
1302 path = btrfs_alloc_path();
1303 if (!path)
1304 goto out;
Miao Xie16cdcec2011-04-22 18:12:22 +08001305
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001306 do {
1307 if (atomic_read(&delayed_root->items) <
1308 BTRFS_DELAYED_BACKGROUND / 2)
1309 break;
Chris Masonde3cb942013-03-04 17:13:31 -05001310
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001311 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1312 if (!delayed_node)
1313 break;
Chris Masonde3cb942013-03-04 17:13:31 -05001314
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001315 path->leave_spinning = 1;
1316 root = delayed_node->root;
Miao Xie16cdcec2011-04-22 18:12:22 +08001317
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001318 trans = btrfs_join_transaction(root);
1319 if (IS_ERR(trans)) {
1320 btrfs_release_path(path);
1321 btrfs_release_prepared_delayed_node(delayed_node);
1322 total_done++;
1323 continue;
1324 }
Miao Xie16cdcec2011-04-22 18:12:22 +08001325
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001326 block_rsv = trans->block_rsv;
1327 trans->block_rsv = &root->fs_info->delayed_block_rsv;
Miao Xie19fd2942011-06-15 10:47:30 +00001328
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001329 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001330
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001331 trans->block_rsv = block_rsv;
1332 btrfs_end_transaction(trans);
1333 btrfs_btree_balance_dirty_nodelay(root->fs_info);
Chris Masonde3cb942013-03-04 17:13:31 -05001334
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001335 btrfs_release_path(path);
1336 btrfs_release_prepared_delayed_node(delayed_node);
1337 total_done++;
Chris Masonde3cb942013-03-04 17:13:31 -05001338
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001339 } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1340 || total_done < async_work->nr);
Chris Masonde3cb942013-03-04 17:13:31 -05001341
Miao Xie16cdcec2011-04-22 18:12:22 +08001342 btrfs_free_path(path);
1343out:
Chris Masonde3cb942013-03-04 17:13:31 -05001344 wake_up(&delayed_root->wait);
1345 kfree(async_work);
Miao Xie16cdcec2011-04-22 18:12:22 +08001346}
1347
Miao Xie16cdcec2011-04-22 18:12:22 +08001348
Chris Masonde3cb942013-03-04 17:13:31 -05001349static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
Daniel Dresslera585e942014-11-17 22:05:02 +09001350 struct btrfs_fs_info *fs_info, int nr)
Chris Masonde3cb942013-03-04 17:13:31 -05001351{
1352 struct btrfs_async_delayed_work *async_work;
1353
Chris Masonde3cb942013-03-04 17:13:31 -05001354 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1355 if (!async_work)
Miao Xie16cdcec2011-04-22 18:12:22 +08001356 return -ENOMEM;
Miao Xie16cdcec2011-04-22 18:12:22 +08001357
Chris Masonde3cb942013-03-04 17:13:31 -05001358 async_work->delayed_root = delayed_root;
Liu Bo9e0af232014-08-15 23:36:53 +08001359 btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
1360 btrfs_async_run_delayed_root, NULL, NULL);
Chris Masonde3cb942013-03-04 17:13:31 -05001361 async_work->nr = nr;
Miao Xie16cdcec2011-04-22 18:12:22 +08001362
Daniel Dresslera585e942014-11-17 22:05:02 +09001363 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
Miao Xie16cdcec2011-04-22 18:12:22 +08001364 return 0;
1365}
1366
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001367void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
Chris Masone9993762011-06-17 16:14:09 -04001368{
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001369 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
Chris Masone9993762011-06-17 16:14:09 -04001370}
1371
Miao Xie03538082013-12-26 13:07:03 +08001372static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
Chris Masonde3cb942013-03-04 17:13:31 -05001373{
1374 int val = atomic_read(&delayed_root->items_seq);
1375
Miao Xie03538082013-12-26 13:07:03 +08001376 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
Chris Masonde3cb942013-03-04 17:13:31 -05001377 return 1;
Miao Xie03538082013-12-26 13:07:03 +08001378
1379 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1380 return 1;
1381
Chris Masonde3cb942013-03-04 17:13:31 -05001382 return 0;
1383}
1384
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001385void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
Miao Xie16cdcec2011-04-22 18:12:22 +08001386{
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001387 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
Miao Xie16cdcec2011-04-22 18:12:22 +08001388
Nikolay Borisov85777872017-10-23 13:51:49 +03001389 if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1390 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
Miao Xie16cdcec2011-04-22 18:12:22 +08001391 return;
1392
1393 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
Miao Xie03538082013-12-26 13:07:03 +08001394 int seq;
Miao Xie16cdcec2011-04-22 18:12:22 +08001395 int ret;
Miao Xie03538082013-12-26 13:07:03 +08001396
1397 seq = atomic_read(&delayed_root->items_seq);
Chris Masonde3cb942013-03-04 17:13:31 -05001398
Daniel Dresslera585e942014-11-17 22:05:02 +09001399 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
Miao Xie16cdcec2011-04-22 18:12:22 +08001400 if (ret)
1401 return;
1402
Miao Xie03538082013-12-26 13:07:03 +08001403 wait_event_interruptible(delayed_root->wait,
1404 could_end_wait(delayed_root, seq));
Miao Xie4dd466d2013-12-26 13:07:02 +08001405 return;
Miao Xie16cdcec2011-04-22 18:12:22 +08001406 }
1407
Daniel Dresslera585e942014-11-17 22:05:02 +09001408 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
Miao Xie16cdcec2011-04-22 18:12:22 +08001409}
1410
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001411/* Will return 0 or -ENOMEM */
Miao Xie16cdcec2011-04-22 18:12:22 +08001412int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001413 struct btrfs_fs_info *fs_info,
1414 const char *name, int name_len,
Nikolay Borisov6f45d182017-01-10 20:35:35 +02001415 struct btrfs_inode *dir,
Miao Xie16cdcec2011-04-22 18:12:22 +08001416 struct btrfs_disk_key *disk_key, u8 type,
1417 u64 index)
1418{
1419 struct btrfs_delayed_node *delayed_node;
1420 struct btrfs_delayed_item *delayed_item;
1421 struct btrfs_dir_item *dir_item;
1422 int ret;
1423
Nikolay Borisov6f45d182017-01-10 20:35:35 +02001424 delayed_node = btrfs_get_or_create_delayed_node(dir);
Miao Xie16cdcec2011-04-22 18:12:22 +08001425 if (IS_ERR(delayed_node))
1426 return PTR_ERR(delayed_node);
1427
1428 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1429 if (!delayed_item) {
1430 ret = -ENOMEM;
1431 goto release_node;
1432 }
1433
Nikolay Borisov6f45d182017-01-10 20:35:35 +02001434 delayed_item->key.objectid = btrfs_ino(dir);
David Sterba962a2982014-06-04 18:41:45 +02001435 delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
Miao Xie16cdcec2011-04-22 18:12:22 +08001436 delayed_item->key.offset = index;
1437
1438 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1439 dir_item->location = *disk_key;
Qu Wenruo3cae2102013-07-16 11:19:18 +08001440 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1441 btrfs_set_stack_dir_data_len(dir_item, 0);
1442 btrfs_set_stack_dir_name_len(dir_item, name_len);
1443 btrfs_set_stack_dir_type(dir_item, type);
Miao Xie16cdcec2011-04-22 18:12:22 +08001444 memcpy((char *)(dir_item + 1), name, name_len);
1445
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001446 ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, delayed_item);
Josef Bacik8c2a3ca2012-01-10 10:31:31 -05001447 /*
1448 * we have reserved enough space when we start a new transaction,
1449 * so reserving metadata failure is impossible
1450 */
1451 BUG_ON(ret);
1452
1453
Miao Xie16cdcec2011-04-22 18:12:22 +08001454 mutex_lock(&delayed_node->mutex);
1455 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1456 if (unlikely(ret)) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001457 btrfs_err(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04001458 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1459 name_len, name, delayed_node->root->objectid,
1460 delayed_node->inode_id, ret);
Miao Xie16cdcec2011-04-22 18:12:22 +08001461 BUG();
1462 }
1463 mutex_unlock(&delayed_node->mutex);
1464
1465release_node:
1466 btrfs_release_delayed_node(delayed_node);
1467 return ret;
1468}
1469
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001470static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
Miao Xie16cdcec2011-04-22 18:12:22 +08001471 struct btrfs_delayed_node *node,
1472 struct btrfs_key *key)
1473{
1474 struct btrfs_delayed_item *item;
1475
1476 mutex_lock(&node->mutex);
1477 item = __btrfs_lookup_delayed_insertion_item(node, key);
1478 if (!item) {
1479 mutex_unlock(&node->mutex);
1480 return 1;
1481 }
1482
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001483 btrfs_delayed_item_release_metadata(fs_info, item);
Miao Xie16cdcec2011-04-22 18:12:22 +08001484 btrfs_release_delayed_item(item);
1485 mutex_unlock(&node->mutex);
1486 return 0;
1487}
1488
1489int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001490 struct btrfs_fs_info *fs_info,
Nikolay Borisove67bbbb2017-01-10 20:35:36 +02001491 struct btrfs_inode *dir, u64 index)
Miao Xie16cdcec2011-04-22 18:12:22 +08001492{
1493 struct btrfs_delayed_node *node;
1494 struct btrfs_delayed_item *item;
1495 struct btrfs_key item_key;
1496 int ret;
1497
Nikolay Borisove67bbbb2017-01-10 20:35:36 +02001498 node = btrfs_get_or_create_delayed_node(dir);
Miao Xie16cdcec2011-04-22 18:12:22 +08001499 if (IS_ERR(node))
1500 return PTR_ERR(node);
1501
Nikolay Borisove67bbbb2017-01-10 20:35:36 +02001502 item_key.objectid = btrfs_ino(dir);
David Sterba962a2982014-06-04 18:41:45 +02001503 item_key.type = BTRFS_DIR_INDEX_KEY;
Miao Xie16cdcec2011-04-22 18:12:22 +08001504 item_key.offset = index;
1505
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001506 ret = btrfs_delete_delayed_insertion_item(fs_info, node, &item_key);
Miao Xie16cdcec2011-04-22 18:12:22 +08001507 if (!ret)
1508 goto end;
1509
1510 item = btrfs_alloc_delayed_item(0);
1511 if (!item) {
1512 ret = -ENOMEM;
1513 goto end;
1514 }
1515
1516 item->key = item_key;
1517
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001518 ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, item);
Miao Xie16cdcec2011-04-22 18:12:22 +08001519 /*
1520 * we have reserved enough space when we start a new transaction,
1521 * so reserving metadata failure is impossible.
1522 */
1523 BUG_ON(ret);
1524
1525 mutex_lock(&node->mutex);
1526 ret = __btrfs_add_delayed_deletion_item(node, item);
1527 if (unlikely(ret)) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001528 btrfs_err(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04001529 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1530 index, node->root->objectid, node->inode_id, ret);
Miao Xie16cdcec2011-04-22 18:12:22 +08001531 BUG();
1532 }
1533 mutex_unlock(&node->mutex);
1534end:
1535 btrfs_release_delayed_node(node);
1536 return ret;
1537}
1538
Nikolay Borisovf5cc7b82017-01-10 20:35:42 +02001539int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
Miao Xie16cdcec2011-04-22 18:12:22 +08001540{
Nikolay Borisovf5cc7b82017-01-10 20:35:42 +02001541 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
Miao Xie16cdcec2011-04-22 18:12:22 +08001542
1543 if (!delayed_node)
1544 return -ENOENT;
1545
1546 /*
1547 * Since we have held i_mutex of this directory, it is impossible that
1548 * a new directory index is added into the delayed node and index_cnt
1549 * is updated now. So we needn't lock the delayed node.
1550 */
Miao Xie2f7e33d2011-06-23 07:27:13 +00001551 if (!delayed_node->index_cnt) {
1552 btrfs_release_delayed_node(delayed_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001553 return -EINVAL;
Miao Xie2f7e33d2011-06-23 07:27:13 +00001554 }
Miao Xie16cdcec2011-04-22 18:12:22 +08001555
Nikolay Borisovf5cc7b82017-01-10 20:35:42 +02001556 inode->index_cnt = delayed_node->index_cnt;
Miao Xie2f7e33d2011-06-23 07:27:13 +00001557 btrfs_release_delayed_node(delayed_node);
1558 return 0;
Miao Xie16cdcec2011-04-22 18:12:22 +08001559}
1560
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001561bool btrfs_readdir_get_delayed_items(struct inode *inode,
1562 struct list_head *ins_list,
1563 struct list_head *del_list)
Miao Xie16cdcec2011-04-22 18:12:22 +08001564{
1565 struct btrfs_delayed_node *delayed_node;
1566 struct btrfs_delayed_item *item;
1567
Nikolay Borisov340c6ca2017-01-10 20:35:32 +02001568 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
Miao Xie16cdcec2011-04-22 18:12:22 +08001569 if (!delayed_node)
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001570 return false;
1571
1572 /*
1573 * We can only do one readdir with delayed items at a time because of
1574 * item->readdir_list.
1575 */
1576 inode_unlock_shared(inode);
1577 inode_lock(inode);
Miao Xie16cdcec2011-04-22 18:12:22 +08001578
1579 mutex_lock(&delayed_node->mutex);
1580 item = __btrfs_first_delayed_insertion_item(delayed_node);
1581 while (item) {
Elena Reshetova089e77e2017-03-03 10:55:17 +02001582 refcount_inc(&item->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +08001583 list_add_tail(&item->readdir_list, ins_list);
1584 item = __btrfs_next_delayed_item(item);
1585 }
1586
1587 item = __btrfs_first_delayed_deletion_item(delayed_node);
1588 while (item) {
Elena Reshetova089e77e2017-03-03 10:55:17 +02001589 refcount_inc(&item->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +08001590 list_add_tail(&item->readdir_list, del_list);
1591 item = __btrfs_next_delayed_item(item);
1592 }
1593 mutex_unlock(&delayed_node->mutex);
1594 /*
1595 * This delayed node is still cached in the btrfs inode, so refs
1596 * must be > 1 now, and we needn't check it is going to be freed
1597 * or not.
1598 *
1599 * Besides that, this function is used to read dir, we do not
1600 * insert/delete delayed items in this period. So we also needn't
1601 * requeue or dequeue this delayed node.
1602 */
Elena Reshetova6de5f182017-03-03 10:55:16 +02001603 refcount_dec(&delayed_node->refs);
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001604
1605 return true;
Miao Xie16cdcec2011-04-22 18:12:22 +08001606}
1607
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001608void btrfs_readdir_put_delayed_items(struct inode *inode,
1609 struct list_head *ins_list,
1610 struct list_head *del_list)
Miao Xie16cdcec2011-04-22 18:12:22 +08001611{
1612 struct btrfs_delayed_item *curr, *next;
1613
1614 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1615 list_del(&curr->readdir_list);
Elena Reshetova089e77e2017-03-03 10:55:17 +02001616 if (refcount_dec_and_test(&curr->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +08001617 kfree(curr);
1618 }
1619
1620 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1621 list_del(&curr->readdir_list);
Elena Reshetova089e77e2017-03-03 10:55:17 +02001622 if (refcount_dec_and_test(&curr->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +08001623 kfree(curr);
1624 }
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001625
1626 /*
1627 * The VFS is going to do up_read(), so we need to downgrade back to a
1628 * read lock.
1629 */
1630 downgrade_write(&inode->i_rwsem);
Miao Xie16cdcec2011-04-22 18:12:22 +08001631}
1632
1633int btrfs_should_delete_dir_index(struct list_head *del_list,
1634 u64 index)
1635{
Josef Bacike4fd4932018-01-23 15:17:05 -05001636 struct btrfs_delayed_item *curr;
1637 int ret = 0;
Miao Xie16cdcec2011-04-22 18:12:22 +08001638
Josef Bacike4fd4932018-01-23 15:17:05 -05001639 list_for_each_entry(curr, del_list, readdir_list) {
Miao Xie16cdcec2011-04-22 18:12:22 +08001640 if (curr->key.offset > index)
1641 break;
Josef Bacike4fd4932018-01-23 15:17:05 -05001642 if (curr->key.offset == index) {
1643 ret = 1;
1644 break;
1645 }
Miao Xie16cdcec2011-04-22 18:12:22 +08001646 }
Josef Bacike4fd4932018-01-23 15:17:05 -05001647 return ret;
Miao Xie16cdcec2011-04-22 18:12:22 +08001648}
1649
1650/*
1651 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1652 *
1653 */
Al Viro9cdda8d2013-05-22 16:48:09 -04001654int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
Jeff Mahoneyd2fbb2b2016-11-05 13:26:35 -04001655 struct list_head *ins_list)
Miao Xie16cdcec2011-04-22 18:12:22 +08001656{
1657 struct btrfs_dir_item *di;
1658 struct btrfs_delayed_item *curr, *next;
1659 struct btrfs_key location;
1660 char *name;
1661 int name_len;
1662 int over = 0;
1663 unsigned char d_type;
1664
1665 if (list_empty(ins_list))
1666 return 0;
1667
1668 /*
1669 * Changing the data of the delayed item is impossible. So
1670 * we needn't lock them. And we have held i_mutex of the
1671 * directory, nobody can delete any directory indexes now.
1672 */
1673 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1674 list_del(&curr->readdir_list);
1675
Al Viro9cdda8d2013-05-22 16:48:09 -04001676 if (curr->key.offset < ctx->pos) {
Elena Reshetova089e77e2017-03-03 10:55:17 +02001677 if (refcount_dec_and_test(&curr->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +08001678 kfree(curr);
1679 continue;
1680 }
1681
Al Viro9cdda8d2013-05-22 16:48:09 -04001682 ctx->pos = curr->key.offset;
Miao Xie16cdcec2011-04-22 18:12:22 +08001683
1684 di = (struct btrfs_dir_item *)curr->data;
1685 name = (char *)(di + 1);
Qu Wenruo3cae2102013-07-16 11:19:18 +08001686 name_len = btrfs_stack_dir_name_len(di);
Miao Xie16cdcec2011-04-22 18:12:22 +08001687
1688 d_type = btrfs_filetype_table[di->type];
1689 btrfs_disk_key_to_cpu(&location, &di->location);
1690
Al Viro9cdda8d2013-05-22 16:48:09 -04001691 over = !dir_emit(ctx, name, name_len,
Miao Xie16cdcec2011-04-22 18:12:22 +08001692 location.objectid, d_type);
1693
Elena Reshetova089e77e2017-03-03 10:55:17 +02001694 if (refcount_dec_and_test(&curr->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +08001695 kfree(curr);
1696
1697 if (over)
1698 return 1;
Josef Bacik42e9cc42017-07-24 15:14:26 -04001699 ctx->pos++;
Miao Xie16cdcec2011-04-22 18:12:22 +08001700 }
1701 return 0;
1702}
1703
Miao Xie16cdcec2011-04-22 18:12:22 +08001704static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1705 struct btrfs_inode_item *inode_item,
1706 struct inode *inode)
1707{
Eric W. Biederman2f2f43d2012-02-10 11:05:07 -08001708 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1709 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
Miao Xie16cdcec2011-04-22 18:12:22 +08001710 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1711 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1712 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1713 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1714 btrfs_set_stack_inode_generation(inode_item,
1715 BTRFS_I(inode)->generation);
Jeff Laytonc7f88c42017-12-11 06:35:12 -05001716 btrfs_set_stack_inode_sequence(inode_item,
1717 inode_peek_iversion(inode));
Miao Xie16cdcec2011-04-22 18:12:22 +08001718 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1719 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1720 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
Chris Masonff5714c2011-05-28 07:00:39 -04001721 btrfs_set_stack_inode_block_group(inode_item, 0);
Miao Xie16cdcec2011-04-22 18:12:22 +08001722
David Sterbaa937b972014-12-12 17:39:12 +01001723 btrfs_set_stack_timespec_sec(&inode_item->atime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001724 inode->i_atime.tv_sec);
David Sterbaa937b972014-12-12 17:39:12 +01001725 btrfs_set_stack_timespec_nsec(&inode_item->atime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001726 inode->i_atime.tv_nsec);
1727
David Sterbaa937b972014-12-12 17:39:12 +01001728 btrfs_set_stack_timespec_sec(&inode_item->mtime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001729 inode->i_mtime.tv_sec);
David Sterbaa937b972014-12-12 17:39:12 +01001730 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001731 inode->i_mtime.tv_nsec);
1732
David Sterbaa937b972014-12-12 17:39:12 +01001733 btrfs_set_stack_timespec_sec(&inode_item->ctime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001734 inode->i_ctime.tv_sec);
David Sterbaa937b972014-12-12 17:39:12 +01001735 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001736 inode->i_ctime.tv_nsec);
chandan r9cc97d62012-07-04 12:48:07 +05301737
1738 btrfs_set_stack_timespec_sec(&inode_item->otime,
1739 BTRFS_I(inode)->i_otime.tv_sec);
1740 btrfs_set_stack_timespec_nsec(&inode_item->otime,
1741 BTRFS_I(inode)->i_otime.tv_nsec);
Miao Xie16cdcec2011-04-22 18:12:22 +08001742}
1743
Miao Xie2f7e33d2011-06-23 07:27:13 +00001744int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1745{
1746 struct btrfs_delayed_node *delayed_node;
1747 struct btrfs_inode_item *inode_item;
Miao Xie2f7e33d2011-06-23 07:27:13 +00001748
Nikolay Borisov340c6ca2017-01-10 20:35:32 +02001749 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
Miao Xie2f7e33d2011-06-23 07:27:13 +00001750 if (!delayed_node)
1751 return -ENOENT;
1752
1753 mutex_lock(&delayed_node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001754 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Miao Xie2f7e33d2011-06-23 07:27:13 +00001755 mutex_unlock(&delayed_node->mutex);
1756 btrfs_release_delayed_node(delayed_node);
1757 return -ENOENT;
1758 }
1759
1760 inode_item = &delayed_node->inode_item;
1761
Eric W. Biederman2f2f43d2012-02-10 11:05:07 -08001762 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1763 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
Nikolay Borisov6ef06d22017-02-20 13:50:34 +02001764 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
Miao Xie2f7e33d2011-06-23 07:27:13 +00001765 inode->i_mode = btrfs_stack_inode_mode(inode_item);
Miklos Szeredibfe86842011-10-28 14:13:29 +02001766 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
Miao Xie2f7e33d2011-06-23 07:27:13 +00001767 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1768 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
Yang Dongsheng6e17d302015-04-09 12:08:43 +08001769 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1770
Jeff Laytonc7f88c42017-12-11 06:35:12 -05001771 inode_set_iversion_queried(inode,
1772 btrfs_stack_inode_sequence(inode_item));
Miao Xie2f7e33d2011-06-23 07:27:13 +00001773 inode->i_rdev = 0;
1774 *rdev = btrfs_stack_inode_rdev(inode_item);
1775 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1776
David Sterbaa937b972014-12-12 17:39:12 +01001777 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1778 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
Miao Xie2f7e33d2011-06-23 07:27:13 +00001779
David Sterbaa937b972014-12-12 17:39:12 +01001780 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1781 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
Miao Xie2f7e33d2011-06-23 07:27:13 +00001782
David Sterbaa937b972014-12-12 17:39:12 +01001783 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1784 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
Miao Xie2f7e33d2011-06-23 07:27:13 +00001785
chandan r9cc97d62012-07-04 12:48:07 +05301786 BTRFS_I(inode)->i_otime.tv_sec =
1787 btrfs_stack_timespec_sec(&inode_item->otime);
1788 BTRFS_I(inode)->i_otime.tv_nsec =
1789 btrfs_stack_timespec_nsec(&inode_item->otime);
1790
Miao Xie2f7e33d2011-06-23 07:27:13 +00001791 inode->i_generation = BTRFS_I(inode)->generation;
1792 BTRFS_I(inode)->index_cnt = (u64)-1;
1793
1794 mutex_unlock(&delayed_node->mutex);
1795 btrfs_release_delayed_node(delayed_node);
1796 return 0;
1797}
1798
Miao Xie16cdcec2011-04-22 18:12:22 +08001799int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1800 struct btrfs_root *root, struct inode *inode)
1801{
1802 struct btrfs_delayed_node *delayed_node;
David Sterbaaa0467d2011-06-03 16:29:08 +02001803 int ret = 0;
Miao Xie16cdcec2011-04-22 18:12:22 +08001804
Nikolay Borisove5517a72017-01-10 20:35:33 +02001805 delayed_node = btrfs_get_or_create_delayed_node(BTRFS_I(inode));
Miao Xie16cdcec2011-04-22 18:12:22 +08001806 if (IS_ERR(delayed_node))
1807 return PTR_ERR(delayed_node);
1808
1809 mutex_lock(&delayed_node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001810 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Miao Xie16cdcec2011-04-22 18:12:22 +08001811 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1812 goto release_node;
1813 }
1814
Nikolay Borisovfcabdd12017-01-10 20:35:34 +02001815 ret = btrfs_delayed_inode_reserve_metadata(trans, root, BTRFS_I(inode),
Josef Bacik7fd2ae22011-11-08 15:47:34 -05001816 delayed_node);
Josef Bacikc06a0e12011-11-04 19:56:02 -04001817 if (ret)
1818 goto release_node;
Miao Xie16cdcec2011-04-22 18:12:22 +08001819
1820 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
Miao Xie7cf35d92013-12-26 13:07:05 +08001821 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
Miao Xie16cdcec2011-04-22 18:12:22 +08001822 delayed_node->count++;
1823 atomic_inc(&root->fs_info->delayed_root->items);
1824release_node:
1825 mutex_unlock(&delayed_node->mutex);
1826 btrfs_release_delayed_node(delayed_node);
1827 return ret;
1828}
1829
Nikolay Borisove07222c2017-01-10 20:35:37 +02001830int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
Miao Xie67de1172013-12-26 13:07:06 +08001831{
Nikolay Borisove07222c2017-01-10 20:35:37 +02001832 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
Miao Xie67de1172013-12-26 13:07:06 +08001833 struct btrfs_delayed_node *delayed_node;
1834
Chris Mason6f896052014-12-31 12:18:29 -05001835 /*
1836 * we don't do delayed inode updates during log recovery because it
1837 * leads to enospc problems. This means we also can't do
1838 * delayed inode refs
1839 */
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001840 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
Chris Mason6f896052014-12-31 12:18:29 -05001841 return -EAGAIN;
1842
Nikolay Borisove07222c2017-01-10 20:35:37 +02001843 delayed_node = btrfs_get_or_create_delayed_node(inode);
Miao Xie67de1172013-12-26 13:07:06 +08001844 if (IS_ERR(delayed_node))
1845 return PTR_ERR(delayed_node);
1846
1847 /*
1848 * We don't reserve space for inode ref deletion is because:
1849 * - We ONLY do async inode ref deletion for the inode who has only
1850 * one link(i_nlink == 1), it means there is only one inode ref.
1851 * And in most case, the inode ref and the inode item are in the
1852 * same leaf, and we will deal with them at the same time.
1853 * Since we are sure we will reserve the space for the inode item,
1854 * it is unnecessary to reserve space for inode ref deletion.
1855 * - If the inode ref and the inode item are not in the same leaf,
1856 * We also needn't worry about enospc problem, because we reserve
1857 * much more space for the inode update than it needs.
1858 * - At the worst, we can steal some space from the global reservation.
1859 * It is very rare.
1860 */
1861 mutex_lock(&delayed_node->mutex);
1862 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1863 goto release_node;
1864
1865 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1866 delayed_node->count++;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001867 atomic_inc(&fs_info->delayed_root->items);
Miao Xie67de1172013-12-26 13:07:06 +08001868release_node:
1869 mutex_unlock(&delayed_node->mutex);
1870 btrfs_release_delayed_node(delayed_node);
1871 return 0;
1872}
1873
Miao Xie16cdcec2011-04-22 18:12:22 +08001874static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1875{
1876 struct btrfs_root *root = delayed_node->root;
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001877 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +08001878 struct btrfs_delayed_item *curr_item, *prev_item;
1879
1880 mutex_lock(&delayed_node->mutex);
1881 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1882 while (curr_item) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001883 btrfs_delayed_item_release_metadata(fs_info, curr_item);
Miao Xie16cdcec2011-04-22 18:12:22 +08001884 prev_item = curr_item;
1885 curr_item = __btrfs_next_delayed_item(prev_item);
1886 btrfs_release_delayed_item(prev_item);
1887 }
1888
1889 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1890 while (curr_item) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001891 btrfs_delayed_item_release_metadata(fs_info, curr_item);
Miao Xie16cdcec2011-04-22 18:12:22 +08001892 prev_item = curr_item;
1893 curr_item = __btrfs_next_delayed_item(prev_item);
1894 btrfs_release_delayed_item(prev_item);
1895 }
1896
Miao Xie67de1172013-12-26 13:07:06 +08001897 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1898 btrfs_release_delayed_iref(delayed_node);
1899
Miao Xie7cf35d92013-12-26 13:07:05 +08001900 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001901 btrfs_delayed_inode_release_metadata(fs_info, delayed_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001902 btrfs_release_delayed_inode(delayed_node);
1903 }
1904 mutex_unlock(&delayed_node->mutex);
1905}
1906
Nikolay Borisov4ccb5c72017-01-10 20:35:38 +02001907void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
Miao Xie16cdcec2011-04-22 18:12:22 +08001908{
1909 struct btrfs_delayed_node *delayed_node;
1910
Nikolay Borisov4ccb5c72017-01-10 20:35:38 +02001911 delayed_node = btrfs_get_delayed_node(inode);
Miao Xie16cdcec2011-04-22 18:12:22 +08001912 if (!delayed_node)
1913 return;
1914
1915 __btrfs_kill_delayed_node(delayed_node);
1916 btrfs_release_delayed_node(delayed_node);
1917}
1918
1919void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1920{
1921 u64 inode_id = 0;
1922 struct btrfs_delayed_node *delayed_nodes[8];
1923 int i, n;
1924
1925 while (1) {
1926 spin_lock(&root->inode_lock);
1927 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1928 (void **)delayed_nodes, inode_id,
1929 ARRAY_SIZE(delayed_nodes));
1930 if (!n) {
1931 spin_unlock(&root->inode_lock);
1932 break;
1933 }
1934
1935 inode_id = delayed_nodes[n - 1]->inode_id + 1;
1936
1937 for (i = 0; i < n; i++)
Elena Reshetova6de5f182017-03-03 10:55:16 +02001938 refcount_inc(&delayed_nodes[i]->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +08001939 spin_unlock(&root->inode_lock);
1940
1941 for (i = 0; i < n; i++) {
1942 __btrfs_kill_delayed_node(delayed_nodes[i]);
1943 btrfs_release_delayed_node(delayed_nodes[i]);
1944 }
1945 }
1946}
Miao Xie67cde342012-06-14 02:23:22 -06001947
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001948void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
Miao Xie67cde342012-06-14 02:23:22 -06001949{
Miao Xie67cde342012-06-14 02:23:22 -06001950 struct btrfs_delayed_node *curr_node, *prev_node;
1951
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001952 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
Miao Xie67cde342012-06-14 02:23:22 -06001953 while (curr_node) {
1954 __btrfs_kill_delayed_node(curr_node);
1955
1956 prev_node = curr_node;
1957 curr_node = btrfs_next_delayed_node(curr_node);
1958 btrfs_release_delayed_node(prev_node);
1959 }
1960}
1961