blob: 5d73f79ded8bcbd967636b652d98433da64cceb8 [file] [log] [blame]
Miao Xie16cdcec2011-04-22 18:12:22 +08001/*
2 * Copyright (C) 2011 Fujitsu. All rights reserved.
3 * Written by Miao Xie <miaox@cn.fujitsu.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
18 */
19
20#include <linux/slab.h>
21#include "delayed-inode.h"
22#include "disk-io.h"
23#include "transaction.h"
Qu Wenruo3cae2102013-07-16 11:19:18 +080024#include "ctree.h"
Miao Xie16cdcec2011-04-22 18:12:22 +080025
Chris Masonde3cb942013-03-04 17:13:31 -050026#define BTRFS_DELAYED_WRITEBACK 512
27#define BTRFS_DELAYED_BACKGROUND 128
28#define BTRFS_DELAYED_BATCH 16
Miao Xie16cdcec2011-04-22 18:12:22 +080029
30static struct kmem_cache *delayed_node_cache;
31
32int __init btrfs_delayed_inode_init(void)
33{
David Sterba837e1972012-09-07 03:00:48 -060034 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
Miao Xie16cdcec2011-04-22 18:12:22 +080035 sizeof(struct btrfs_delayed_node),
36 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +030037 SLAB_MEM_SPREAD,
Miao Xie16cdcec2011-04-22 18:12:22 +080038 NULL);
39 if (!delayed_node_cache)
40 return -ENOMEM;
41 return 0;
42}
43
44void btrfs_delayed_inode_exit(void)
45{
Kinglong Mee5598e902016-01-29 21:36:35 +080046 kmem_cache_destroy(delayed_node_cache);
Miao Xie16cdcec2011-04-22 18:12:22 +080047}
48
49static inline void btrfs_init_delayed_node(
50 struct btrfs_delayed_node *delayed_node,
51 struct btrfs_root *root, u64 inode_id)
52{
53 delayed_node->root = root;
54 delayed_node->inode_id = inode_id;
Elena Reshetova6de5f182017-03-03 10:55:16 +020055 refcount_set(&delayed_node->refs, 0);
Miao Xie16cdcec2011-04-22 18:12:22 +080056 delayed_node->ins_root = RB_ROOT;
57 delayed_node->del_root = RB_ROOT;
58 mutex_init(&delayed_node->mutex);
Miao Xie16cdcec2011-04-22 18:12:22 +080059 INIT_LIST_HEAD(&delayed_node->n_list);
60 INIT_LIST_HEAD(&delayed_node->p_list);
Miao Xie16cdcec2011-04-22 18:12:22 +080061}
62
63static inline int btrfs_is_continuous_delayed_item(
64 struct btrfs_delayed_item *item1,
65 struct btrfs_delayed_item *item2)
66{
67 if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
68 item1->key.objectid == item2->key.objectid &&
69 item1->key.type == item2->key.type &&
70 item1->key.offset + 1 == item2->key.offset)
71 return 1;
72 return 0;
73}
74
David Sterbaf85b7372017-01-20 14:54:07 +010075static struct btrfs_delayed_node *btrfs_get_delayed_node(
76 struct btrfs_inode *btrfs_inode)
Miao Xie2f7e33d2011-06-23 07:27:13 +000077{
Miao Xie2f7e33d2011-06-23 07:27:13 +000078 struct btrfs_root *root = btrfs_inode->root;
Nikolay Borisov4a0cc7c2017-01-10 20:35:31 +020079 u64 ino = btrfs_ino(btrfs_inode);
Miao Xie2f7e33d2011-06-23 07:27:13 +000080 struct btrfs_delayed_node *node;
81
Seraphime Kirkovski20c7bce2016-12-15 14:38:16 +010082 node = READ_ONCE(btrfs_inode->delayed_node);
Miao Xie2f7e33d2011-06-23 07:27:13 +000083 if (node) {
Elena Reshetova6de5f182017-03-03 10:55:16 +020084 refcount_inc(&node->refs);
Miao Xie2f7e33d2011-06-23 07:27:13 +000085 return node;
86 }
87
88 spin_lock(&root->inode_lock);
89 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
90 if (node) {
91 if (btrfs_inode->delayed_node) {
Elena Reshetova6de5f182017-03-03 10:55:16 +020092 refcount_inc(&node->refs); /* can be accessed */
Miao Xie2f7e33d2011-06-23 07:27:13 +000093 BUG_ON(btrfs_inode->delayed_node != node);
94 spin_unlock(&root->inode_lock);
95 return node;
96 }
97 btrfs_inode->delayed_node = node;
Rashika95e94d12013-10-31 03:12:42 +053098 /* can be accessed and cached in the inode */
Elena Reshetova6de5f182017-03-03 10:55:16 +020099 refcount_add(2, &node->refs);
Miao Xie2f7e33d2011-06-23 07:27:13 +0000100 spin_unlock(&root->inode_lock);
101 return node;
102 }
103 spin_unlock(&root->inode_lock);
104
105 return NULL;
106}
107
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100108/* Will return either the node or PTR_ERR(-ENOMEM) */
Miao Xie16cdcec2011-04-22 18:12:22 +0800109static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
David Sterbaf85b7372017-01-20 14:54:07 +0100110 struct btrfs_inode *btrfs_inode)
Miao Xie16cdcec2011-04-22 18:12:22 +0800111{
112 struct btrfs_delayed_node *node;
Miao Xie16cdcec2011-04-22 18:12:22 +0800113 struct btrfs_root *root = btrfs_inode->root;
Nikolay Borisov4a0cc7c2017-01-10 20:35:31 +0200114 u64 ino = btrfs_ino(btrfs_inode);
Miao Xie16cdcec2011-04-22 18:12:22 +0800115 int ret;
116
117again:
Nikolay Borisov340c6ca2017-01-10 20:35:32 +0200118 node = btrfs_get_delayed_node(btrfs_inode);
Miao Xie2f7e33d2011-06-23 07:27:13 +0000119 if (node)
Miao Xie16cdcec2011-04-22 18:12:22 +0800120 return node;
Miao Xie16cdcec2011-04-22 18:12:22 +0800121
Alexandru Moise352dd9c2015-10-25 20:15:06 +0000122 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
Miao Xie16cdcec2011-04-22 18:12:22 +0800123 if (!node)
124 return ERR_PTR(-ENOMEM);
Chris Mason0d0ca302011-05-22 07:11:22 -0400125 btrfs_init_delayed_node(node, root, ino);
Miao Xie16cdcec2011-04-22 18:12:22 +0800126
Rashika95e94d12013-10-31 03:12:42 +0530127 /* cached in the btrfs inode and can be accessed */
Elena Reshetova6de5f182017-03-03 10:55:16 +0200128 refcount_set(&node->refs, 2);
Miao Xie16cdcec2011-04-22 18:12:22 +0800129
David Sterbae1860a72016-05-09 14:11:38 +0200130 ret = radix_tree_preload(GFP_NOFS);
Miao Xie16cdcec2011-04-22 18:12:22 +0800131 if (ret) {
132 kmem_cache_free(delayed_node_cache, node);
133 return ERR_PTR(ret);
134 }
135
136 spin_lock(&root->inode_lock);
Chris Mason0d0ca302011-05-22 07:11:22 -0400137 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
Miao Xie16cdcec2011-04-22 18:12:22 +0800138 if (ret == -EEXIST) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800139 spin_unlock(&root->inode_lock);
Jeff Mahoney96493032014-05-27 13:53:20 -0400140 kmem_cache_free(delayed_node_cache, node);
Miao Xie16cdcec2011-04-22 18:12:22 +0800141 radix_tree_preload_end();
142 goto again;
143 }
144 btrfs_inode->delayed_node = node;
145 spin_unlock(&root->inode_lock);
146 radix_tree_preload_end();
147
148 return node;
149}
150
151/*
152 * Call it when holding delayed_node->mutex
153 *
154 * If mod = 1, add this node into the prepared list.
155 */
156static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
157 struct btrfs_delayed_node *node,
158 int mod)
159{
160 spin_lock(&root->lock);
Miao Xie7cf35d92013-12-26 13:07:05 +0800161 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800162 if (!list_empty(&node->p_list))
163 list_move_tail(&node->p_list, &root->prepare_list);
164 else if (mod)
165 list_add_tail(&node->p_list, &root->prepare_list);
166 } else {
167 list_add_tail(&node->n_list, &root->node_list);
168 list_add_tail(&node->p_list, &root->prepare_list);
Elena Reshetova6de5f182017-03-03 10:55:16 +0200169 refcount_inc(&node->refs); /* inserted into list */
Miao Xie16cdcec2011-04-22 18:12:22 +0800170 root->nodes++;
Miao Xie7cf35d92013-12-26 13:07:05 +0800171 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
Miao Xie16cdcec2011-04-22 18:12:22 +0800172 }
173 spin_unlock(&root->lock);
174}
175
176/* Call it when holding delayed_node->mutex */
177static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
178 struct btrfs_delayed_node *node)
179{
180 spin_lock(&root->lock);
Miao Xie7cf35d92013-12-26 13:07:05 +0800181 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800182 root->nodes--;
Elena Reshetova6de5f182017-03-03 10:55:16 +0200183 refcount_dec(&node->refs); /* not in the list */
Miao Xie16cdcec2011-04-22 18:12:22 +0800184 list_del_init(&node->n_list);
185 if (!list_empty(&node->p_list))
186 list_del_init(&node->p_list);
Miao Xie7cf35d92013-12-26 13:07:05 +0800187 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
Miao Xie16cdcec2011-04-22 18:12:22 +0800188 }
189 spin_unlock(&root->lock);
190}
191
Eric Sandeen48a3b632013-04-25 20:41:01 +0000192static struct btrfs_delayed_node *btrfs_first_delayed_node(
Miao Xie16cdcec2011-04-22 18:12:22 +0800193 struct btrfs_delayed_root *delayed_root)
194{
195 struct list_head *p;
196 struct btrfs_delayed_node *node = NULL;
197
198 spin_lock(&delayed_root->lock);
199 if (list_empty(&delayed_root->node_list))
200 goto out;
201
202 p = delayed_root->node_list.next;
203 node = list_entry(p, struct btrfs_delayed_node, n_list);
Elena Reshetova6de5f182017-03-03 10:55:16 +0200204 refcount_inc(&node->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +0800205out:
206 spin_unlock(&delayed_root->lock);
207
208 return node;
209}
210
Eric Sandeen48a3b632013-04-25 20:41:01 +0000211static struct btrfs_delayed_node *btrfs_next_delayed_node(
Miao Xie16cdcec2011-04-22 18:12:22 +0800212 struct btrfs_delayed_node *node)
213{
214 struct btrfs_delayed_root *delayed_root;
215 struct list_head *p;
216 struct btrfs_delayed_node *next = NULL;
217
218 delayed_root = node->root->fs_info->delayed_root;
219 spin_lock(&delayed_root->lock);
Miao Xie7cf35d92013-12-26 13:07:05 +0800220 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
221 /* not in the list */
Miao Xie16cdcec2011-04-22 18:12:22 +0800222 if (list_empty(&delayed_root->node_list))
223 goto out;
224 p = delayed_root->node_list.next;
225 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
226 goto out;
227 else
228 p = node->n_list.next;
229
230 next = list_entry(p, struct btrfs_delayed_node, n_list);
Elena Reshetova6de5f182017-03-03 10:55:16 +0200231 refcount_inc(&next->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +0800232out:
233 spin_unlock(&delayed_root->lock);
234
235 return next;
236}
237
238static void __btrfs_release_delayed_node(
239 struct btrfs_delayed_node *delayed_node,
240 int mod)
241{
242 struct btrfs_delayed_root *delayed_root;
243
244 if (!delayed_node)
245 return;
246
247 delayed_root = delayed_node->root->fs_info->delayed_root;
248
249 mutex_lock(&delayed_node->mutex);
250 if (delayed_node->count)
251 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
252 else
253 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
254 mutex_unlock(&delayed_node->mutex);
255
Elena Reshetova6de5f182017-03-03 10:55:16 +0200256 if (refcount_dec_and_test(&delayed_node->refs)) {
Jeff Mahoney96493032014-05-27 13:53:20 -0400257 bool free = false;
Miao Xie16cdcec2011-04-22 18:12:22 +0800258 struct btrfs_root *root = delayed_node->root;
259 spin_lock(&root->inode_lock);
Elena Reshetova6de5f182017-03-03 10:55:16 +0200260 if (refcount_read(&delayed_node->refs) == 0) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800261 radix_tree_delete(&root->delayed_nodes_tree,
262 delayed_node->inode_id);
Jeff Mahoney96493032014-05-27 13:53:20 -0400263 free = true;
Miao Xie16cdcec2011-04-22 18:12:22 +0800264 }
265 spin_unlock(&root->inode_lock);
Jeff Mahoney96493032014-05-27 13:53:20 -0400266 if (free)
267 kmem_cache_free(delayed_node_cache, delayed_node);
Miao Xie16cdcec2011-04-22 18:12:22 +0800268 }
269}
270
271static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
272{
273 __btrfs_release_delayed_node(node, 0);
274}
275
Eric Sandeen48a3b632013-04-25 20:41:01 +0000276static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
Miao Xie16cdcec2011-04-22 18:12:22 +0800277 struct btrfs_delayed_root *delayed_root)
278{
279 struct list_head *p;
280 struct btrfs_delayed_node *node = NULL;
281
282 spin_lock(&delayed_root->lock);
283 if (list_empty(&delayed_root->prepare_list))
284 goto out;
285
286 p = delayed_root->prepare_list.next;
287 list_del_init(p);
288 node = list_entry(p, struct btrfs_delayed_node, p_list);
Elena Reshetova6de5f182017-03-03 10:55:16 +0200289 refcount_inc(&node->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +0800290out:
291 spin_unlock(&delayed_root->lock);
292
293 return node;
294}
295
296static inline void btrfs_release_prepared_delayed_node(
297 struct btrfs_delayed_node *node)
298{
299 __btrfs_release_delayed_node(node, 1);
300}
301
Eric Sandeen48a3b632013-04-25 20:41:01 +0000302static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
Miao Xie16cdcec2011-04-22 18:12:22 +0800303{
304 struct btrfs_delayed_item *item;
305 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
306 if (item) {
307 item->data_len = data_len;
308 item->ins_or_del = 0;
309 item->bytes_reserved = 0;
Miao Xie16cdcec2011-04-22 18:12:22 +0800310 item->delayed_node = NULL;
Elena Reshetova089e77e2017-03-03 10:55:17 +0200311 refcount_set(&item->refs, 1);
Miao Xie16cdcec2011-04-22 18:12:22 +0800312 }
313 return item;
314}
315
316/*
317 * __btrfs_lookup_delayed_item - look up the delayed item by key
318 * @delayed_node: pointer to the delayed node
319 * @key: the key to look up
320 * @prev: used to store the prev item if the right item isn't found
321 * @next: used to store the next item if the right item isn't found
322 *
323 * Note: if we don't find the right item, we will return the prev item and
324 * the next item.
325 */
326static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
327 struct rb_root *root,
328 struct btrfs_key *key,
329 struct btrfs_delayed_item **prev,
330 struct btrfs_delayed_item **next)
331{
332 struct rb_node *node, *prev_node = NULL;
333 struct btrfs_delayed_item *delayed_item = NULL;
334 int ret = 0;
335
336 node = root->rb_node;
337
338 while (node) {
339 delayed_item = rb_entry(node, struct btrfs_delayed_item,
340 rb_node);
341 prev_node = node;
342 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
343 if (ret < 0)
344 node = node->rb_right;
345 else if (ret > 0)
346 node = node->rb_left;
347 else
348 return delayed_item;
349 }
350
351 if (prev) {
352 if (!prev_node)
353 *prev = NULL;
354 else if (ret < 0)
355 *prev = delayed_item;
356 else if ((node = rb_prev(prev_node)) != NULL) {
357 *prev = rb_entry(node, struct btrfs_delayed_item,
358 rb_node);
359 } else
360 *prev = NULL;
361 }
362
363 if (next) {
364 if (!prev_node)
365 *next = NULL;
366 else if (ret > 0)
367 *next = delayed_item;
368 else if ((node = rb_next(prev_node)) != NULL) {
369 *next = rb_entry(node, struct btrfs_delayed_item,
370 rb_node);
371 } else
372 *next = NULL;
373 }
374 return NULL;
375}
376
Eric Sandeen48a3b632013-04-25 20:41:01 +0000377static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
Miao Xie16cdcec2011-04-22 18:12:22 +0800378 struct btrfs_delayed_node *delayed_node,
379 struct btrfs_key *key)
380{
Masahiro Yamadae2c89902016-09-13 04:35:52 +0900381 return __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
Miao Xie16cdcec2011-04-22 18:12:22 +0800382 NULL, NULL);
Miao Xie16cdcec2011-04-22 18:12:22 +0800383}
384
Miao Xie16cdcec2011-04-22 18:12:22 +0800385static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
386 struct btrfs_delayed_item *ins,
387 int action)
388{
389 struct rb_node **p, *node;
390 struct rb_node *parent_node = NULL;
391 struct rb_root *root;
392 struct btrfs_delayed_item *item;
393 int cmp;
394
395 if (action == BTRFS_DELAYED_INSERTION_ITEM)
396 root = &delayed_node->ins_root;
397 else if (action == BTRFS_DELAYED_DELETION_ITEM)
398 root = &delayed_node->del_root;
399 else
400 BUG();
401 p = &root->rb_node;
402 node = &ins->rb_node;
403
404 while (*p) {
405 parent_node = *p;
406 item = rb_entry(parent_node, struct btrfs_delayed_item,
407 rb_node);
408
409 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
410 if (cmp < 0)
411 p = &(*p)->rb_right;
412 else if (cmp > 0)
413 p = &(*p)->rb_left;
414 else
415 return -EEXIST;
416 }
417
418 rb_link_node(node, parent_node, p);
419 rb_insert_color(node, root);
420 ins->delayed_node = delayed_node;
421 ins->ins_or_del = action;
422
423 if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
424 action == BTRFS_DELAYED_INSERTION_ITEM &&
425 ins->key.offset >= delayed_node->index_cnt)
426 delayed_node->index_cnt = ins->key.offset + 1;
427
428 delayed_node->count++;
429 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
430 return 0;
431}
432
433static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
434 struct btrfs_delayed_item *item)
435{
436 return __btrfs_add_delayed_item(node, item,
437 BTRFS_DELAYED_INSERTION_ITEM);
438}
439
440static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
441 struct btrfs_delayed_item *item)
442{
443 return __btrfs_add_delayed_item(node, item,
444 BTRFS_DELAYED_DELETION_ITEM);
445}
446
Chris Masonde3cb942013-03-04 17:13:31 -0500447static void finish_one_item(struct btrfs_delayed_root *delayed_root)
448{
449 int seq = atomic_inc_return(&delayed_root->items_seq);
David Sterbaee863952015-02-16 19:41:40 +0100450
451 /*
452 * atomic_dec_return implies a barrier for waitqueue_active
453 */
Chris Masonde3cb942013-03-04 17:13:31 -0500454 if ((atomic_dec_return(&delayed_root->items) <
455 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
456 waitqueue_active(&delayed_root->wait))
457 wake_up(&delayed_root->wait);
458}
459
Miao Xie16cdcec2011-04-22 18:12:22 +0800460static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
461{
462 struct rb_root *root;
463 struct btrfs_delayed_root *delayed_root;
464
465 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
466
467 BUG_ON(!delayed_root);
468 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
469 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
470
471 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
472 root = &delayed_item->delayed_node->ins_root;
473 else
474 root = &delayed_item->delayed_node->del_root;
475
476 rb_erase(&delayed_item->rb_node, root);
477 delayed_item->delayed_node->count--;
Chris Masonde3cb942013-03-04 17:13:31 -0500478
479 finish_one_item(delayed_root);
Miao Xie16cdcec2011-04-22 18:12:22 +0800480}
481
482static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
483{
484 if (item) {
485 __btrfs_remove_delayed_item(item);
Elena Reshetova089e77e2017-03-03 10:55:17 +0200486 if (refcount_dec_and_test(&item->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +0800487 kfree(item);
488 }
489}
490
Eric Sandeen48a3b632013-04-25 20:41:01 +0000491static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
Miao Xie16cdcec2011-04-22 18:12:22 +0800492 struct btrfs_delayed_node *delayed_node)
493{
494 struct rb_node *p;
495 struct btrfs_delayed_item *item = NULL;
496
497 p = rb_first(&delayed_node->ins_root);
498 if (p)
499 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
500
501 return item;
502}
503
Eric Sandeen48a3b632013-04-25 20:41:01 +0000504static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
Miao Xie16cdcec2011-04-22 18:12:22 +0800505 struct btrfs_delayed_node *delayed_node)
506{
507 struct rb_node *p;
508 struct btrfs_delayed_item *item = NULL;
509
510 p = rb_first(&delayed_node->del_root);
511 if (p)
512 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
513
514 return item;
515}
516
Eric Sandeen48a3b632013-04-25 20:41:01 +0000517static struct btrfs_delayed_item *__btrfs_next_delayed_item(
Miao Xie16cdcec2011-04-22 18:12:22 +0800518 struct btrfs_delayed_item *item)
519{
520 struct rb_node *p;
521 struct btrfs_delayed_item *next = NULL;
522
523 p = rb_next(&item->rb_node);
524 if (p)
525 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
526
527 return next;
528}
529
Miao Xie16cdcec2011-04-22 18:12:22 +0800530static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400531 struct btrfs_fs_info *fs_info,
Miao Xie16cdcec2011-04-22 18:12:22 +0800532 struct btrfs_delayed_item *item)
533{
534 struct btrfs_block_rsv *src_rsv;
535 struct btrfs_block_rsv *dst_rsv;
536 u64 num_bytes;
537 int ret;
538
539 if (!trans->bytes_reserved)
540 return 0;
541
542 src_rsv = trans->block_rsv;
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400543 dst_rsv = &fs_info->delayed_block_rsv;
Miao Xie16cdcec2011-04-22 18:12:22 +0800544
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400545 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
Josef Bacik25d609f2016-03-25 13:25:48 -0400546 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500547 if (!ret) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400548 trace_btrfs_space_reservation(fs_info, "delayed_item",
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500549 item->key.objectid,
550 num_bytes, 1);
Miao Xie16cdcec2011-04-22 18:12:22 +0800551 item->bytes_reserved = num_bytes;
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500552 }
Miao Xie16cdcec2011-04-22 18:12:22 +0800553
554 return ret;
555}
556
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400557static void btrfs_delayed_item_release_metadata(struct btrfs_fs_info *fs_info,
Miao Xie16cdcec2011-04-22 18:12:22 +0800558 struct btrfs_delayed_item *item)
559{
Miao Xie19fd2942011-06-15 10:47:30 +0000560 struct btrfs_block_rsv *rsv;
561
Miao Xie16cdcec2011-04-22 18:12:22 +0800562 if (!item->bytes_reserved)
563 return;
564
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400565 rsv = &fs_info->delayed_block_rsv;
566 trace_btrfs_space_reservation(fs_info, "delayed_item",
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500567 item->key.objectid, item->bytes_reserved,
568 0);
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400569 btrfs_block_rsv_release(fs_info, rsv,
Miao Xie16cdcec2011-04-22 18:12:22 +0800570 item->bytes_reserved);
571}
572
573static int btrfs_delayed_inode_reserve_metadata(
574 struct btrfs_trans_handle *trans,
575 struct btrfs_root *root,
Nikolay Borisovfcabdd12017-01-10 20:35:34 +0200576 struct btrfs_inode *inode,
Miao Xie16cdcec2011-04-22 18:12:22 +0800577 struct btrfs_delayed_node *node)
578{
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400579 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +0800580 struct btrfs_block_rsv *src_rsv;
581 struct btrfs_block_rsv *dst_rsv;
582 u64 num_bytes;
583 int ret;
584
Miao Xie16cdcec2011-04-22 18:12:22 +0800585 src_rsv = trans->block_rsv;
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400586 dst_rsv = &fs_info->delayed_block_rsv;
Miao Xie16cdcec2011-04-22 18:12:22 +0800587
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400588 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
Josef Bacikc06a0e12011-11-04 19:56:02 -0400589
590 /*
591 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
592 * which doesn't reserve space for speed. This is a problem since we
593 * still need to reserve space for this update, so try to reserve the
594 * space.
595 *
596 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
Josef Bacik69fe2d72017-10-19 14:15:57 -0400597 * we always reserve enough to update the inode item.
Josef Bacikc06a0e12011-11-04 19:56:02 -0400598 */
Chris Masone755d9a2011-12-15 13:36:29 -0500599 if (!src_rsv || (!trans->bytes_reserved &&
Miao Xie66d8f3d2012-09-06 04:02:28 -0600600 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
Miao Xie08e007d2012-10-16 11:33:38 +0000601 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
602 BTRFS_RESERVE_NO_FLUSH);
Josef Bacikc06a0e12011-11-04 19:56:02 -0400603 /*
604 * Since we're under a transaction reserve_metadata_bytes could
605 * try to commit the transaction which will make it return
606 * EAGAIN to make us stop the transaction we have, so return
607 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
608 */
609 if (ret == -EAGAIN)
610 ret = -ENOSPC;
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500611 if (!ret) {
Josef Bacikc06a0e12011-11-04 19:56:02 -0400612 node->bytes_reserved = num_bytes;
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400613 trace_btrfs_space_reservation(fs_info,
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500614 "delayed_inode",
Nikolay Borisovfcabdd12017-01-10 20:35:34 +0200615 btrfs_ino(inode),
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500616 num_bytes, 1);
617 }
Josef Bacikc06a0e12011-11-04 19:56:02 -0400618 return ret;
619 }
620
Josef Bacik25d609f2016-03-25 13:25:48 -0400621 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500622 if (!ret) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400623 trace_btrfs_space_reservation(fs_info, "delayed_inode",
Nikolay Borisovfcabdd12017-01-10 20:35:34 +0200624 btrfs_ino(inode), num_bytes, 1);
Miao Xie16cdcec2011-04-22 18:12:22 +0800625 node->bytes_reserved = num_bytes;
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500626 }
Miao Xie16cdcec2011-04-22 18:12:22 +0800627
628 return ret;
629}
630
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400631static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
Miao Xie16cdcec2011-04-22 18:12:22 +0800632 struct btrfs_delayed_node *node)
633{
634 struct btrfs_block_rsv *rsv;
635
636 if (!node->bytes_reserved)
637 return;
638
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400639 rsv = &fs_info->delayed_block_rsv;
640 trace_btrfs_space_reservation(fs_info, "delayed_inode",
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500641 node->inode_id, node->bytes_reserved, 0);
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400642 btrfs_block_rsv_release(fs_info, rsv,
Miao Xie16cdcec2011-04-22 18:12:22 +0800643 node->bytes_reserved);
644 node->bytes_reserved = 0;
645}
646
647/*
648 * This helper will insert some continuous items into the same leaf according
649 * to the free space of the leaf.
650 */
Tsutomu Itohafe5fea2013-04-16 05:18:22 +0000651static int btrfs_batch_insert_items(struct btrfs_root *root,
652 struct btrfs_path *path,
653 struct btrfs_delayed_item *item)
Miao Xie16cdcec2011-04-22 18:12:22 +0800654{
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400655 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +0800656 struct btrfs_delayed_item *curr, *next;
657 int free_space;
658 int total_data_size = 0, total_size = 0;
659 struct extent_buffer *leaf;
660 char *data_ptr;
661 struct btrfs_key *keys;
662 u32 *data_size;
663 struct list_head head;
664 int slot;
665 int nitems;
666 int i;
667 int ret = 0;
668
669 BUG_ON(!path->nodes[0]);
670
671 leaf = path->nodes[0];
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400672 free_space = btrfs_leaf_free_space(fs_info, leaf);
Miao Xie16cdcec2011-04-22 18:12:22 +0800673 INIT_LIST_HEAD(&head);
674
675 next = item;
Chris Mason17aca1c2011-06-03 01:13:45 -0400676 nitems = 0;
Miao Xie16cdcec2011-04-22 18:12:22 +0800677
678 /*
679 * count the number of the continuous items that we can insert in batch
680 */
681 while (total_size + next->data_len + sizeof(struct btrfs_item) <=
682 free_space) {
683 total_data_size += next->data_len;
684 total_size += next->data_len + sizeof(struct btrfs_item);
685 list_add_tail(&next->tree_list, &head);
686 nitems++;
687
688 curr = next;
689 next = __btrfs_next_delayed_item(curr);
690 if (!next)
691 break;
692
693 if (!btrfs_is_continuous_delayed_item(curr, next))
694 break;
695 }
696
697 if (!nitems) {
698 ret = 0;
699 goto out;
700 }
701
702 /*
703 * we need allocate some memory space, but it might cause the task
704 * to sleep, so we set all locked nodes in the path to blocking locks
705 * first.
706 */
707 btrfs_set_path_blocking(path);
708
Dulshani Gunawardhanad9b0d9b2013-10-31 10:32:18 +0530709 keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
Miao Xie16cdcec2011-04-22 18:12:22 +0800710 if (!keys) {
711 ret = -ENOMEM;
712 goto out;
713 }
714
Dulshani Gunawardhanad9b0d9b2013-10-31 10:32:18 +0530715 data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
Miao Xie16cdcec2011-04-22 18:12:22 +0800716 if (!data_size) {
717 ret = -ENOMEM;
718 goto error;
719 }
720
721 /* get keys of all the delayed items */
722 i = 0;
723 list_for_each_entry(next, &head, tree_list) {
724 keys[i] = next->key;
725 data_size[i] = next->data_len;
726 i++;
727 }
728
729 /* reset all the locked nodes in the patch to spinning locks. */
Chris Masonbd681512011-07-16 15:23:14 -0400730 btrfs_clear_path_blocking(path, NULL, 0);
Miao Xie16cdcec2011-04-22 18:12:22 +0800731
732 /* insert the keys of the items */
Tsutomu Itohafe5fea2013-04-16 05:18:22 +0000733 setup_items_for_insert(root, path, keys, data_size,
Jeff Mahoney143bede2012-03-01 14:56:26 +0100734 total_data_size, total_size, nitems);
Miao Xie16cdcec2011-04-22 18:12:22 +0800735
736 /* insert the dir index items */
737 slot = path->slots[0];
738 list_for_each_entry_safe(curr, next, &head, tree_list) {
739 data_ptr = btrfs_item_ptr(leaf, slot, char);
740 write_extent_buffer(leaf, &curr->data,
741 (unsigned long)data_ptr,
742 curr->data_len);
743 slot++;
744
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400745 btrfs_delayed_item_release_metadata(fs_info, curr);
Miao Xie16cdcec2011-04-22 18:12:22 +0800746
747 list_del(&curr->tree_list);
748 btrfs_release_delayed_item(curr);
749 }
750
751error:
752 kfree(data_size);
753 kfree(keys);
754out:
755 return ret;
756}
757
758/*
759 * This helper can just do simple insertion that needn't extend item for new
760 * data, such as directory name index insertion, inode insertion.
761 */
762static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
763 struct btrfs_root *root,
764 struct btrfs_path *path,
765 struct btrfs_delayed_item *delayed_item)
766{
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400767 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +0800768 struct extent_buffer *leaf;
Miao Xie16cdcec2011-04-22 18:12:22 +0800769 char *ptr;
770 int ret;
771
772 ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
773 delayed_item->data_len);
774 if (ret < 0 && ret != -EEXIST)
775 return ret;
776
777 leaf = path->nodes[0];
778
Miao Xie16cdcec2011-04-22 18:12:22 +0800779 ptr = btrfs_item_ptr(leaf, path->slots[0], char);
780
781 write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
782 delayed_item->data_len);
783 btrfs_mark_buffer_dirty(leaf);
784
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400785 btrfs_delayed_item_release_metadata(fs_info, delayed_item);
Miao Xie16cdcec2011-04-22 18:12:22 +0800786 return 0;
787}
788
789/*
790 * we insert an item first, then if there are some continuous items, we try
791 * to insert those items into the same leaf.
792 */
793static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
794 struct btrfs_path *path,
795 struct btrfs_root *root,
796 struct btrfs_delayed_node *node)
797{
798 struct btrfs_delayed_item *curr, *prev;
799 int ret = 0;
800
801do_again:
802 mutex_lock(&node->mutex);
803 curr = __btrfs_first_delayed_insertion_item(node);
804 if (!curr)
805 goto insert_end;
806
807 ret = btrfs_insert_delayed_item(trans, root, path, curr);
808 if (ret < 0) {
Chris Mason945d8962011-05-22 12:33:42 -0400809 btrfs_release_path(path);
Miao Xie16cdcec2011-04-22 18:12:22 +0800810 goto insert_end;
811 }
812
813 prev = curr;
814 curr = __btrfs_next_delayed_item(prev);
815 if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
816 /* insert the continuous items into the same leaf */
817 path->slots[0]++;
Tsutomu Itohafe5fea2013-04-16 05:18:22 +0000818 btrfs_batch_insert_items(root, path, curr);
Miao Xie16cdcec2011-04-22 18:12:22 +0800819 }
820 btrfs_release_delayed_item(prev);
821 btrfs_mark_buffer_dirty(path->nodes[0]);
822
Chris Mason945d8962011-05-22 12:33:42 -0400823 btrfs_release_path(path);
Miao Xie16cdcec2011-04-22 18:12:22 +0800824 mutex_unlock(&node->mutex);
825 goto do_again;
826
827insert_end:
828 mutex_unlock(&node->mutex);
829 return ret;
830}
831
832static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
833 struct btrfs_root *root,
834 struct btrfs_path *path,
835 struct btrfs_delayed_item *item)
836{
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400837 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +0800838 struct btrfs_delayed_item *curr, *next;
839 struct extent_buffer *leaf;
840 struct btrfs_key key;
841 struct list_head head;
842 int nitems, i, last_item;
843 int ret = 0;
844
845 BUG_ON(!path->nodes[0]);
846
847 leaf = path->nodes[0];
848
849 i = path->slots[0];
850 last_item = btrfs_header_nritems(leaf) - 1;
851 if (i > last_item)
852 return -ENOENT; /* FIXME: Is errno suitable? */
853
854 next = item;
855 INIT_LIST_HEAD(&head);
856 btrfs_item_key_to_cpu(leaf, &key, i);
857 nitems = 0;
858 /*
859 * count the number of the dir index items that we can delete in batch
860 */
861 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
862 list_add_tail(&next->tree_list, &head);
863 nitems++;
864
865 curr = next;
866 next = __btrfs_next_delayed_item(curr);
867 if (!next)
868 break;
869
870 if (!btrfs_is_continuous_delayed_item(curr, next))
871 break;
872
873 i++;
874 if (i > last_item)
875 break;
876 btrfs_item_key_to_cpu(leaf, &key, i);
877 }
878
879 if (!nitems)
880 return 0;
881
882 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
883 if (ret)
884 goto out;
885
886 list_for_each_entry_safe(curr, next, &head, tree_list) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400887 btrfs_delayed_item_release_metadata(fs_info, curr);
Miao Xie16cdcec2011-04-22 18:12:22 +0800888 list_del(&curr->tree_list);
889 btrfs_release_delayed_item(curr);
890 }
891
892out:
893 return ret;
894}
895
896static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
897 struct btrfs_path *path,
898 struct btrfs_root *root,
899 struct btrfs_delayed_node *node)
900{
901 struct btrfs_delayed_item *curr, *prev;
902 int ret = 0;
903
904do_again:
905 mutex_lock(&node->mutex);
906 curr = __btrfs_first_delayed_deletion_item(node);
907 if (!curr)
908 goto delete_fail;
909
910 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
911 if (ret < 0)
912 goto delete_fail;
913 else if (ret > 0) {
914 /*
915 * can't find the item which the node points to, so this node
916 * is invalid, just drop it.
917 */
918 prev = curr;
919 curr = __btrfs_next_delayed_item(prev);
920 btrfs_release_delayed_item(prev);
921 ret = 0;
Chris Mason945d8962011-05-22 12:33:42 -0400922 btrfs_release_path(path);
Fengguang Wu62095262012-08-04 01:45:02 -0600923 if (curr) {
924 mutex_unlock(&node->mutex);
Miao Xie16cdcec2011-04-22 18:12:22 +0800925 goto do_again;
Fengguang Wu62095262012-08-04 01:45:02 -0600926 } else
Miao Xie16cdcec2011-04-22 18:12:22 +0800927 goto delete_fail;
928 }
929
930 btrfs_batch_delete_items(trans, root, path, curr);
Chris Mason945d8962011-05-22 12:33:42 -0400931 btrfs_release_path(path);
Miao Xie16cdcec2011-04-22 18:12:22 +0800932 mutex_unlock(&node->mutex);
933 goto do_again;
934
935delete_fail:
Chris Mason945d8962011-05-22 12:33:42 -0400936 btrfs_release_path(path);
Miao Xie16cdcec2011-04-22 18:12:22 +0800937 mutex_unlock(&node->mutex);
938 return ret;
939}
940
941static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
942{
943 struct btrfs_delayed_root *delayed_root;
944
Miao Xie7cf35d92013-12-26 13:07:05 +0800945 if (delayed_node &&
946 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800947 BUG_ON(!delayed_node->root);
Miao Xie7cf35d92013-12-26 13:07:05 +0800948 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
Miao Xie16cdcec2011-04-22 18:12:22 +0800949 delayed_node->count--;
950
951 delayed_root = delayed_node->root->fs_info->delayed_root;
Chris Masonde3cb942013-03-04 17:13:31 -0500952 finish_one_item(delayed_root);
Miao Xie16cdcec2011-04-22 18:12:22 +0800953 }
954}
955
Miao Xie67de1172013-12-26 13:07:06 +0800956static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
957{
958 struct btrfs_delayed_root *delayed_root;
959
960 ASSERT(delayed_node->root);
961 clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
962 delayed_node->count--;
963
964 delayed_root = delayed_node->root->fs_info->delayed_root;
965 finish_one_item(delayed_root);
966}
967
Miao Xie0e8c36a2012-12-19 06:59:51 +0000968static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
969 struct btrfs_root *root,
970 struct btrfs_path *path,
971 struct btrfs_delayed_node *node)
Miao Xie16cdcec2011-04-22 18:12:22 +0800972{
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400973 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +0800974 struct btrfs_key key;
975 struct btrfs_inode_item *inode_item;
976 struct extent_buffer *leaf;
Miao Xie67de1172013-12-26 13:07:06 +0800977 int mod;
Miao Xie16cdcec2011-04-22 18:12:22 +0800978 int ret;
979
Miao Xie16cdcec2011-04-22 18:12:22 +0800980 key.objectid = node->inode_id;
David Sterba962a2982014-06-04 18:41:45 +0200981 key.type = BTRFS_INODE_ITEM_KEY;
Miao Xie16cdcec2011-04-22 18:12:22 +0800982 key.offset = 0;
Miao Xie0e8c36a2012-12-19 06:59:51 +0000983
Miao Xie67de1172013-12-26 13:07:06 +0800984 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
985 mod = -1;
986 else
987 mod = 1;
988
989 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
Miao Xie16cdcec2011-04-22 18:12:22 +0800990 if (ret > 0) {
Chris Mason945d8962011-05-22 12:33:42 -0400991 btrfs_release_path(path);
Miao Xie16cdcec2011-04-22 18:12:22 +0800992 return -ENOENT;
993 } else if (ret < 0) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800994 return ret;
995 }
996
Miao Xie16cdcec2011-04-22 18:12:22 +0800997 leaf = path->nodes[0];
998 inode_item = btrfs_item_ptr(leaf, path->slots[0],
999 struct btrfs_inode_item);
1000 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1001 sizeof(struct btrfs_inode_item));
1002 btrfs_mark_buffer_dirty(leaf);
Miao Xie16cdcec2011-04-22 18:12:22 +08001003
Miao Xie67de1172013-12-26 13:07:06 +08001004 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1005 goto no_iref;
1006
1007 path->slots[0]++;
1008 if (path->slots[0] >= btrfs_header_nritems(leaf))
1009 goto search;
1010again:
1011 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1012 if (key.objectid != node->inode_id)
1013 goto out;
1014
1015 if (key.type != BTRFS_INODE_REF_KEY &&
1016 key.type != BTRFS_INODE_EXTREF_KEY)
1017 goto out;
1018
1019 /*
1020 * Delayed iref deletion is for the inode who has only one link,
1021 * so there is only one iref. The case that several irefs are
1022 * in the same item doesn't exist.
1023 */
1024 btrfs_del_item(trans, root, path);
1025out:
1026 btrfs_release_delayed_iref(node);
1027no_iref:
1028 btrfs_release_path(path);
1029err_out:
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001030 btrfs_delayed_inode_release_metadata(fs_info, node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001031 btrfs_release_delayed_inode(node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001032
Miao Xie67de1172013-12-26 13:07:06 +08001033 return ret;
1034
1035search:
1036 btrfs_release_path(path);
1037
David Sterba962a2982014-06-04 18:41:45 +02001038 key.type = BTRFS_INODE_EXTREF_KEY;
Miao Xie67de1172013-12-26 13:07:06 +08001039 key.offset = -1;
1040 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1041 if (ret < 0)
1042 goto err_out;
1043 ASSERT(ret);
1044
1045 ret = 0;
1046 leaf = path->nodes[0];
1047 path->slots[0]--;
1048 goto again;
Miao Xie16cdcec2011-04-22 18:12:22 +08001049}
1050
Miao Xie0e8c36a2012-12-19 06:59:51 +00001051static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1052 struct btrfs_root *root,
1053 struct btrfs_path *path,
1054 struct btrfs_delayed_node *node)
1055{
1056 int ret;
1057
1058 mutex_lock(&node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001059 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
Miao Xie0e8c36a2012-12-19 06:59:51 +00001060 mutex_unlock(&node->mutex);
1061 return 0;
1062 }
1063
1064 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1065 mutex_unlock(&node->mutex);
1066 return ret;
1067}
1068
Miao Xie4ea41ce2012-12-19 06:59:03 +00001069static inline int
1070__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1071 struct btrfs_path *path,
1072 struct btrfs_delayed_node *node)
1073{
1074 int ret;
1075
1076 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1077 if (ret)
1078 return ret;
1079
1080 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1081 if (ret)
1082 return ret;
1083
1084 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1085 return ret;
1086}
1087
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001088/*
1089 * Called when committing the transaction.
1090 * Returns 0 on success.
1091 * Returns < 0 on error and returns with an aborted transaction with any
1092 * outstanding delayed items cleaned up.
1093 */
Josef Bacik96c3f432012-06-21 14:05:49 -04001094static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001095 struct btrfs_fs_info *fs_info, int nr)
Miao Xie16cdcec2011-04-22 18:12:22 +08001096{
1097 struct btrfs_delayed_root *delayed_root;
1098 struct btrfs_delayed_node *curr_node, *prev_node;
1099 struct btrfs_path *path;
Miao Xie19fd2942011-06-15 10:47:30 +00001100 struct btrfs_block_rsv *block_rsv;
Miao Xie16cdcec2011-04-22 18:12:22 +08001101 int ret = 0;
Josef Bacik96c3f432012-06-21 14:05:49 -04001102 bool count = (nr > 0);
Miao Xie16cdcec2011-04-22 18:12:22 +08001103
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001104 if (trans->aborted)
1105 return -EIO;
1106
Miao Xie16cdcec2011-04-22 18:12:22 +08001107 path = btrfs_alloc_path();
1108 if (!path)
1109 return -ENOMEM;
1110 path->leave_spinning = 1;
1111
Miao Xie19fd2942011-06-15 10:47:30 +00001112 block_rsv = trans->block_rsv;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001113 trans->block_rsv = &fs_info->delayed_block_rsv;
Miao Xie19fd2942011-06-15 10:47:30 +00001114
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001115 delayed_root = fs_info->delayed_root;
Miao Xie16cdcec2011-04-22 18:12:22 +08001116
1117 curr_node = btrfs_first_delayed_node(delayed_root);
Josef Bacik96c3f432012-06-21 14:05:49 -04001118 while (curr_node && (!count || (count && nr--))) {
Miao Xie4ea41ce2012-12-19 06:59:03 +00001119 ret = __btrfs_commit_inode_delayed_items(trans, path,
1120 curr_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001121 if (ret) {
1122 btrfs_release_delayed_node(curr_node);
Josef Bacik96c3f432012-06-21 14:05:49 -04001123 curr_node = NULL;
Jeff Mahoney66642832016-06-10 18:19:25 -04001124 btrfs_abort_transaction(trans, ret);
Miao Xie16cdcec2011-04-22 18:12:22 +08001125 break;
1126 }
1127
1128 prev_node = curr_node;
1129 curr_node = btrfs_next_delayed_node(curr_node);
1130 btrfs_release_delayed_node(prev_node);
1131 }
1132
Josef Bacik96c3f432012-06-21 14:05:49 -04001133 if (curr_node)
1134 btrfs_release_delayed_node(curr_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001135 btrfs_free_path(path);
Miao Xie19fd2942011-06-15 10:47:30 +00001136 trans->block_rsv = block_rsv;
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001137
Miao Xie16cdcec2011-04-22 18:12:22 +08001138 return ret;
1139}
1140
Josef Bacik96c3f432012-06-21 14:05:49 -04001141int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001142 struct btrfs_fs_info *fs_info)
Josef Bacik96c3f432012-06-21 14:05:49 -04001143{
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001144 return __btrfs_run_delayed_items(trans, fs_info, -1);
Josef Bacik96c3f432012-06-21 14:05:49 -04001145}
1146
1147int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001148 struct btrfs_fs_info *fs_info, int nr)
Josef Bacik96c3f432012-06-21 14:05:49 -04001149{
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001150 return __btrfs_run_delayed_items(trans, fs_info, nr);
Josef Bacik96c3f432012-06-21 14:05:49 -04001151}
1152
Miao Xie16cdcec2011-04-22 18:12:22 +08001153int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
Nikolay Borisov5f4b32e2017-01-10 20:35:41 +02001154 struct btrfs_inode *inode)
Miao Xie16cdcec2011-04-22 18:12:22 +08001155{
Nikolay Borisov5f4b32e2017-01-10 20:35:41 +02001156 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
Miao Xie4ea41ce2012-12-19 06:59:03 +00001157 struct btrfs_path *path;
1158 struct btrfs_block_rsv *block_rsv;
Miao Xie16cdcec2011-04-22 18:12:22 +08001159 int ret;
1160
1161 if (!delayed_node)
1162 return 0;
1163
1164 mutex_lock(&delayed_node->mutex);
1165 if (!delayed_node->count) {
1166 mutex_unlock(&delayed_node->mutex);
1167 btrfs_release_delayed_node(delayed_node);
1168 return 0;
1169 }
1170 mutex_unlock(&delayed_node->mutex);
1171
Miao Xie4ea41ce2012-12-19 06:59:03 +00001172 path = btrfs_alloc_path();
Filipe David Borba Manana3c77bd92013-10-12 20:32:59 +01001173 if (!path) {
1174 btrfs_release_delayed_node(delayed_node);
Miao Xie4ea41ce2012-12-19 06:59:03 +00001175 return -ENOMEM;
Filipe David Borba Manana3c77bd92013-10-12 20:32:59 +01001176 }
Miao Xie4ea41ce2012-12-19 06:59:03 +00001177 path->leave_spinning = 1;
1178
1179 block_rsv = trans->block_rsv;
1180 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1181
1182 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1183
Miao Xie16cdcec2011-04-22 18:12:22 +08001184 btrfs_release_delayed_node(delayed_node);
Miao Xie4ea41ce2012-12-19 06:59:03 +00001185 btrfs_free_path(path);
1186 trans->block_rsv = block_rsv;
1187
Miao Xie16cdcec2011-04-22 18:12:22 +08001188 return ret;
1189}
1190
Nikolay Borisovaa790212017-01-10 20:35:40 +02001191int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
Miao Xie0e8c36a2012-12-19 06:59:51 +00001192{
Nikolay Borisovaa790212017-01-10 20:35:40 +02001193 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
Miao Xie0e8c36a2012-12-19 06:59:51 +00001194 struct btrfs_trans_handle *trans;
Nikolay Borisovaa790212017-01-10 20:35:40 +02001195 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
Miao Xie0e8c36a2012-12-19 06:59:51 +00001196 struct btrfs_path *path;
1197 struct btrfs_block_rsv *block_rsv;
1198 int ret;
1199
1200 if (!delayed_node)
1201 return 0;
1202
1203 mutex_lock(&delayed_node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001204 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Miao Xie0e8c36a2012-12-19 06:59:51 +00001205 mutex_unlock(&delayed_node->mutex);
1206 btrfs_release_delayed_node(delayed_node);
1207 return 0;
1208 }
1209 mutex_unlock(&delayed_node->mutex);
1210
1211 trans = btrfs_join_transaction(delayed_node->root);
1212 if (IS_ERR(trans)) {
1213 ret = PTR_ERR(trans);
1214 goto out;
1215 }
1216
1217 path = btrfs_alloc_path();
1218 if (!path) {
1219 ret = -ENOMEM;
1220 goto trans_out;
1221 }
1222 path->leave_spinning = 1;
1223
1224 block_rsv = trans->block_rsv;
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001225 trans->block_rsv = &fs_info->delayed_block_rsv;
Miao Xie0e8c36a2012-12-19 06:59:51 +00001226
1227 mutex_lock(&delayed_node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001228 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
Miao Xie0e8c36a2012-12-19 06:59:51 +00001229 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1230 path, delayed_node);
1231 else
1232 ret = 0;
1233 mutex_unlock(&delayed_node->mutex);
1234
1235 btrfs_free_path(path);
1236 trans->block_rsv = block_rsv;
1237trans_out:
Jeff Mahoney3a45bb22016-09-09 21:39:03 -04001238 btrfs_end_transaction(trans);
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001239 btrfs_btree_balance_dirty(fs_info);
Miao Xie0e8c36a2012-12-19 06:59:51 +00001240out:
1241 btrfs_release_delayed_node(delayed_node);
1242
1243 return ret;
1244}
1245
Nikolay Borisovf48d1cf2017-01-10 20:35:39 +02001246void btrfs_remove_delayed_node(struct btrfs_inode *inode)
Miao Xie16cdcec2011-04-22 18:12:22 +08001247{
1248 struct btrfs_delayed_node *delayed_node;
1249
Nikolay Borisovf48d1cf2017-01-10 20:35:39 +02001250 delayed_node = READ_ONCE(inode->delayed_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001251 if (!delayed_node)
1252 return;
1253
Nikolay Borisovf48d1cf2017-01-10 20:35:39 +02001254 inode->delayed_node = NULL;
Miao Xie16cdcec2011-04-22 18:12:22 +08001255 btrfs_release_delayed_node(delayed_node);
1256}
1257
Chris Masonde3cb942013-03-04 17:13:31 -05001258struct btrfs_async_delayed_work {
1259 struct btrfs_delayed_root *delayed_root;
1260 int nr;
Qu Wenruod458b052014-02-28 10:46:19 +08001261 struct btrfs_work work;
Miao Xie16cdcec2011-04-22 18:12:22 +08001262};
1263
Qu Wenruod458b052014-02-28 10:46:19 +08001264static void btrfs_async_run_delayed_root(struct btrfs_work *work)
Miao Xie16cdcec2011-04-22 18:12:22 +08001265{
Chris Masonde3cb942013-03-04 17:13:31 -05001266 struct btrfs_async_delayed_work *async_work;
1267 struct btrfs_delayed_root *delayed_root;
Miao Xie16cdcec2011-04-22 18:12:22 +08001268 struct btrfs_trans_handle *trans;
1269 struct btrfs_path *path;
1270 struct btrfs_delayed_node *delayed_node = NULL;
1271 struct btrfs_root *root;
Miao Xie19fd2942011-06-15 10:47:30 +00001272 struct btrfs_block_rsv *block_rsv;
Chris Masonde3cb942013-03-04 17:13:31 -05001273 int total_done = 0;
Miao Xie16cdcec2011-04-22 18:12:22 +08001274
Chris Masonde3cb942013-03-04 17:13:31 -05001275 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1276 delayed_root = async_work->delayed_root;
Miao Xie16cdcec2011-04-22 18:12:22 +08001277
1278 path = btrfs_alloc_path();
1279 if (!path)
1280 goto out;
Miao Xie16cdcec2011-04-22 18:12:22 +08001281
Chris Masonde3cb942013-03-04 17:13:31 -05001282again:
1283 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND / 2)
1284 goto free_path;
1285
1286 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1287 if (!delayed_node)
1288 goto free_path;
1289
1290 path->leave_spinning = 1;
Miao Xie16cdcec2011-04-22 18:12:22 +08001291 root = delayed_node->root;
1292
Chris Masonff5714c2011-05-28 07:00:39 -04001293 trans = btrfs_join_transaction(root);
Miao Xie16cdcec2011-04-22 18:12:22 +08001294 if (IS_ERR(trans))
Chris Masonde3cb942013-03-04 17:13:31 -05001295 goto release_path;
Miao Xie16cdcec2011-04-22 18:12:22 +08001296
Miao Xie19fd2942011-06-15 10:47:30 +00001297 block_rsv = trans->block_rsv;
Josef Bacik6d668dd2011-11-03 22:54:25 -04001298 trans->block_rsv = &root->fs_info->delayed_block_rsv;
Miao Xie19fd2942011-06-15 10:47:30 +00001299
Miao Xie4ea41ce2012-12-19 06:59:03 +00001300 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001301
Miao Xie19fd2942011-06-15 10:47:30 +00001302 trans->block_rsv = block_rsv;
Jeff Mahoney3a45bb22016-09-09 21:39:03 -04001303 btrfs_end_transaction(trans);
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001304 btrfs_btree_balance_dirty_nodelay(root->fs_info);
Chris Masonde3cb942013-03-04 17:13:31 -05001305
1306release_path:
1307 btrfs_release_path(path);
1308 total_done++;
1309
1310 btrfs_release_prepared_delayed_node(delayed_node);
Maxim Patlasov2939e1a2016-12-12 14:32:44 -08001311 if ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK) ||
1312 total_done < async_work->nr)
Chris Masonde3cb942013-03-04 17:13:31 -05001313 goto again;
1314
Miao Xie16cdcec2011-04-22 18:12:22 +08001315free_path:
1316 btrfs_free_path(path);
1317out:
Chris Masonde3cb942013-03-04 17:13:31 -05001318 wake_up(&delayed_root->wait);
1319 kfree(async_work);
Miao Xie16cdcec2011-04-22 18:12:22 +08001320}
1321
Miao Xie16cdcec2011-04-22 18:12:22 +08001322
Chris Masonde3cb942013-03-04 17:13:31 -05001323static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
Daniel Dresslera585e942014-11-17 22:05:02 +09001324 struct btrfs_fs_info *fs_info, int nr)
Chris Masonde3cb942013-03-04 17:13:31 -05001325{
1326 struct btrfs_async_delayed_work *async_work;
1327
Maxim Patlasov2939e1a2016-12-12 14:32:44 -08001328 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND ||
1329 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
Miao Xie16cdcec2011-04-22 18:12:22 +08001330 return 0;
1331
Chris Masonde3cb942013-03-04 17:13:31 -05001332 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1333 if (!async_work)
Miao Xie16cdcec2011-04-22 18:12:22 +08001334 return -ENOMEM;
Miao Xie16cdcec2011-04-22 18:12:22 +08001335
Chris Masonde3cb942013-03-04 17:13:31 -05001336 async_work->delayed_root = delayed_root;
Liu Bo9e0af232014-08-15 23:36:53 +08001337 btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
1338 btrfs_async_run_delayed_root, NULL, NULL);
Chris Masonde3cb942013-03-04 17:13:31 -05001339 async_work->nr = nr;
Miao Xie16cdcec2011-04-22 18:12:22 +08001340
Daniel Dresslera585e942014-11-17 22:05:02 +09001341 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
Miao Xie16cdcec2011-04-22 18:12:22 +08001342 return 0;
1343}
1344
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001345void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
Chris Masone9993762011-06-17 16:14:09 -04001346{
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001347 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
Chris Masone9993762011-06-17 16:14:09 -04001348}
1349
Miao Xie03538082013-12-26 13:07:03 +08001350static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
Chris Masonde3cb942013-03-04 17:13:31 -05001351{
1352 int val = atomic_read(&delayed_root->items_seq);
1353
Miao Xie03538082013-12-26 13:07:03 +08001354 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
Chris Masonde3cb942013-03-04 17:13:31 -05001355 return 1;
Miao Xie03538082013-12-26 13:07:03 +08001356
1357 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1358 return 1;
1359
Chris Masonde3cb942013-03-04 17:13:31 -05001360 return 0;
1361}
1362
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001363void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
Miao Xie16cdcec2011-04-22 18:12:22 +08001364{
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001365 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
Miao Xie16cdcec2011-04-22 18:12:22 +08001366
1367 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1368 return;
1369
1370 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
Miao Xie03538082013-12-26 13:07:03 +08001371 int seq;
Miao Xie16cdcec2011-04-22 18:12:22 +08001372 int ret;
Miao Xie03538082013-12-26 13:07:03 +08001373
1374 seq = atomic_read(&delayed_root->items_seq);
Chris Masonde3cb942013-03-04 17:13:31 -05001375
Daniel Dresslera585e942014-11-17 22:05:02 +09001376 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
Miao Xie16cdcec2011-04-22 18:12:22 +08001377 if (ret)
1378 return;
1379
Miao Xie03538082013-12-26 13:07:03 +08001380 wait_event_interruptible(delayed_root->wait,
1381 could_end_wait(delayed_root, seq));
Miao Xie4dd466d2013-12-26 13:07:02 +08001382 return;
Miao Xie16cdcec2011-04-22 18:12:22 +08001383 }
1384
Daniel Dresslera585e942014-11-17 22:05:02 +09001385 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
Miao Xie16cdcec2011-04-22 18:12:22 +08001386}
1387
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001388/* Will return 0 or -ENOMEM */
Miao Xie16cdcec2011-04-22 18:12:22 +08001389int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001390 struct btrfs_fs_info *fs_info,
1391 const char *name, int name_len,
Nikolay Borisov6f45d182017-01-10 20:35:35 +02001392 struct btrfs_inode *dir,
Miao Xie16cdcec2011-04-22 18:12:22 +08001393 struct btrfs_disk_key *disk_key, u8 type,
1394 u64 index)
1395{
1396 struct btrfs_delayed_node *delayed_node;
1397 struct btrfs_delayed_item *delayed_item;
1398 struct btrfs_dir_item *dir_item;
1399 int ret;
1400
Nikolay Borisov6f45d182017-01-10 20:35:35 +02001401 delayed_node = btrfs_get_or_create_delayed_node(dir);
Miao Xie16cdcec2011-04-22 18:12:22 +08001402 if (IS_ERR(delayed_node))
1403 return PTR_ERR(delayed_node);
1404
1405 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1406 if (!delayed_item) {
1407 ret = -ENOMEM;
1408 goto release_node;
1409 }
1410
Nikolay Borisov6f45d182017-01-10 20:35:35 +02001411 delayed_item->key.objectid = btrfs_ino(dir);
David Sterba962a2982014-06-04 18:41:45 +02001412 delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
Miao Xie16cdcec2011-04-22 18:12:22 +08001413 delayed_item->key.offset = index;
1414
1415 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1416 dir_item->location = *disk_key;
Qu Wenruo3cae2102013-07-16 11:19:18 +08001417 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1418 btrfs_set_stack_dir_data_len(dir_item, 0);
1419 btrfs_set_stack_dir_name_len(dir_item, name_len);
1420 btrfs_set_stack_dir_type(dir_item, type);
Miao Xie16cdcec2011-04-22 18:12:22 +08001421 memcpy((char *)(dir_item + 1), name, name_len);
1422
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001423 ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, delayed_item);
Josef Bacik8c2a3ca2012-01-10 10:31:31 -05001424 /*
1425 * we have reserved enough space when we start a new transaction,
1426 * so reserving metadata failure is impossible
1427 */
1428 BUG_ON(ret);
1429
1430
Miao Xie16cdcec2011-04-22 18:12:22 +08001431 mutex_lock(&delayed_node->mutex);
1432 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1433 if (unlikely(ret)) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001434 btrfs_err(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04001435 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1436 name_len, name, delayed_node->root->objectid,
1437 delayed_node->inode_id, ret);
Miao Xie16cdcec2011-04-22 18:12:22 +08001438 BUG();
1439 }
1440 mutex_unlock(&delayed_node->mutex);
1441
1442release_node:
1443 btrfs_release_delayed_node(delayed_node);
1444 return ret;
1445}
1446
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001447static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
Miao Xie16cdcec2011-04-22 18:12:22 +08001448 struct btrfs_delayed_node *node,
1449 struct btrfs_key *key)
1450{
1451 struct btrfs_delayed_item *item;
1452
1453 mutex_lock(&node->mutex);
1454 item = __btrfs_lookup_delayed_insertion_item(node, key);
1455 if (!item) {
1456 mutex_unlock(&node->mutex);
1457 return 1;
1458 }
1459
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001460 btrfs_delayed_item_release_metadata(fs_info, item);
Miao Xie16cdcec2011-04-22 18:12:22 +08001461 btrfs_release_delayed_item(item);
1462 mutex_unlock(&node->mutex);
1463 return 0;
1464}
1465
1466int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001467 struct btrfs_fs_info *fs_info,
Nikolay Borisove67bbbb2017-01-10 20:35:36 +02001468 struct btrfs_inode *dir, u64 index)
Miao Xie16cdcec2011-04-22 18:12:22 +08001469{
1470 struct btrfs_delayed_node *node;
1471 struct btrfs_delayed_item *item;
1472 struct btrfs_key item_key;
1473 int ret;
1474
Nikolay Borisove67bbbb2017-01-10 20:35:36 +02001475 node = btrfs_get_or_create_delayed_node(dir);
Miao Xie16cdcec2011-04-22 18:12:22 +08001476 if (IS_ERR(node))
1477 return PTR_ERR(node);
1478
Nikolay Borisove67bbbb2017-01-10 20:35:36 +02001479 item_key.objectid = btrfs_ino(dir);
David Sterba962a2982014-06-04 18:41:45 +02001480 item_key.type = BTRFS_DIR_INDEX_KEY;
Miao Xie16cdcec2011-04-22 18:12:22 +08001481 item_key.offset = index;
1482
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001483 ret = btrfs_delete_delayed_insertion_item(fs_info, node, &item_key);
Miao Xie16cdcec2011-04-22 18:12:22 +08001484 if (!ret)
1485 goto end;
1486
1487 item = btrfs_alloc_delayed_item(0);
1488 if (!item) {
1489 ret = -ENOMEM;
1490 goto end;
1491 }
1492
1493 item->key = item_key;
1494
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001495 ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, item);
Miao Xie16cdcec2011-04-22 18:12:22 +08001496 /*
1497 * we have reserved enough space when we start a new transaction,
1498 * so reserving metadata failure is impossible.
1499 */
1500 BUG_ON(ret);
1501
1502 mutex_lock(&node->mutex);
1503 ret = __btrfs_add_delayed_deletion_item(node, item);
1504 if (unlikely(ret)) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001505 btrfs_err(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04001506 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1507 index, node->root->objectid, node->inode_id, ret);
Miao Xie16cdcec2011-04-22 18:12:22 +08001508 BUG();
1509 }
1510 mutex_unlock(&node->mutex);
1511end:
1512 btrfs_release_delayed_node(node);
1513 return ret;
1514}
1515
Nikolay Borisovf5cc7b82017-01-10 20:35:42 +02001516int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
Miao Xie16cdcec2011-04-22 18:12:22 +08001517{
Nikolay Borisovf5cc7b82017-01-10 20:35:42 +02001518 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
Miao Xie16cdcec2011-04-22 18:12:22 +08001519
1520 if (!delayed_node)
1521 return -ENOENT;
1522
1523 /*
1524 * Since we have held i_mutex of this directory, it is impossible that
1525 * a new directory index is added into the delayed node and index_cnt
1526 * is updated now. So we needn't lock the delayed node.
1527 */
Miao Xie2f7e33d2011-06-23 07:27:13 +00001528 if (!delayed_node->index_cnt) {
1529 btrfs_release_delayed_node(delayed_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001530 return -EINVAL;
Miao Xie2f7e33d2011-06-23 07:27:13 +00001531 }
Miao Xie16cdcec2011-04-22 18:12:22 +08001532
Nikolay Borisovf5cc7b82017-01-10 20:35:42 +02001533 inode->index_cnt = delayed_node->index_cnt;
Miao Xie2f7e33d2011-06-23 07:27:13 +00001534 btrfs_release_delayed_node(delayed_node);
1535 return 0;
Miao Xie16cdcec2011-04-22 18:12:22 +08001536}
1537
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001538bool btrfs_readdir_get_delayed_items(struct inode *inode,
1539 struct list_head *ins_list,
1540 struct list_head *del_list)
Miao Xie16cdcec2011-04-22 18:12:22 +08001541{
1542 struct btrfs_delayed_node *delayed_node;
1543 struct btrfs_delayed_item *item;
1544
Nikolay Borisov340c6ca2017-01-10 20:35:32 +02001545 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
Miao Xie16cdcec2011-04-22 18:12:22 +08001546 if (!delayed_node)
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001547 return false;
1548
1549 /*
1550 * We can only do one readdir with delayed items at a time because of
1551 * item->readdir_list.
1552 */
1553 inode_unlock_shared(inode);
1554 inode_lock(inode);
Miao Xie16cdcec2011-04-22 18:12:22 +08001555
1556 mutex_lock(&delayed_node->mutex);
1557 item = __btrfs_first_delayed_insertion_item(delayed_node);
1558 while (item) {
Elena Reshetova089e77e2017-03-03 10:55:17 +02001559 refcount_inc(&item->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +08001560 list_add_tail(&item->readdir_list, ins_list);
1561 item = __btrfs_next_delayed_item(item);
1562 }
1563
1564 item = __btrfs_first_delayed_deletion_item(delayed_node);
1565 while (item) {
Elena Reshetova089e77e2017-03-03 10:55:17 +02001566 refcount_inc(&item->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +08001567 list_add_tail(&item->readdir_list, del_list);
1568 item = __btrfs_next_delayed_item(item);
1569 }
1570 mutex_unlock(&delayed_node->mutex);
1571 /*
1572 * This delayed node is still cached in the btrfs inode, so refs
1573 * must be > 1 now, and we needn't check it is going to be freed
1574 * or not.
1575 *
1576 * Besides that, this function is used to read dir, we do not
1577 * insert/delete delayed items in this period. So we also needn't
1578 * requeue or dequeue this delayed node.
1579 */
Elena Reshetova6de5f182017-03-03 10:55:16 +02001580 refcount_dec(&delayed_node->refs);
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001581
1582 return true;
Miao Xie16cdcec2011-04-22 18:12:22 +08001583}
1584
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001585void btrfs_readdir_put_delayed_items(struct inode *inode,
1586 struct list_head *ins_list,
1587 struct list_head *del_list)
Miao Xie16cdcec2011-04-22 18:12:22 +08001588{
1589 struct btrfs_delayed_item *curr, *next;
1590
1591 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1592 list_del(&curr->readdir_list);
Elena Reshetova089e77e2017-03-03 10:55:17 +02001593 if (refcount_dec_and_test(&curr->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +08001594 kfree(curr);
1595 }
1596
1597 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1598 list_del(&curr->readdir_list);
Elena Reshetova089e77e2017-03-03 10:55:17 +02001599 if (refcount_dec_and_test(&curr->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +08001600 kfree(curr);
1601 }
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001602
1603 /*
1604 * The VFS is going to do up_read(), so we need to downgrade back to a
1605 * read lock.
1606 */
1607 downgrade_write(&inode->i_rwsem);
Miao Xie16cdcec2011-04-22 18:12:22 +08001608}
1609
1610int btrfs_should_delete_dir_index(struct list_head *del_list,
1611 u64 index)
1612{
1613 struct btrfs_delayed_item *curr, *next;
1614 int ret;
1615
1616 if (list_empty(del_list))
1617 return 0;
1618
1619 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1620 if (curr->key.offset > index)
1621 break;
1622
1623 list_del(&curr->readdir_list);
1624 ret = (curr->key.offset == index);
1625
Elena Reshetova089e77e2017-03-03 10:55:17 +02001626 if (refcount_dec_and_test(&curr->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +08001627 kfree(curr);
1628
1629 if (ret)
1630 return 1;
1631 else
1632 continue;
1633 }
1634 return 0;
1635}
1636
1637/*
1638 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1639 *
1640 */
Al Viro9cdda8d2013-05-22 16:48:09 -04001641int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
Jeff Mahoneyd2fbb2b2016-11-05 13:26:35 -04001642 struct list_head *ins_list)
Miao Xie16cdcec2011-04-22 18:12:22 +08001643{
1644 struct btrfs_dir_item *di;
1645 struct btrfs_delayed_item *curr, *next;
1646 struct btrfs_key location;
1647 char *name;
1648 int name_len;
1649 int over = 0;
1650 unsigned char d_type;
1651
1652 if (list_empty(ins_list))
1653 return 0;
1654
1655 /*
1656 * Changing the data of the delayed item is impossible. So
1657 * we needn't lock them. And we have held i_mutex of the
1658 * directory, nobody can delete any directory indexes now.
1659 */
1660 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1661 list_del(&curr->readdir_list);
1662
Al Viro9cdda8d2013-05-22 16:48:09 -04001663 if (curr->key.offset < ctx->pos) {
Elena Reshetova089e77e2017-03-03 10:55:17 +02001664 if (refcount_dec_and_test(&curr->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +08001665 kfree(curr);
1666 continue;
1667 }
1668
Al Viro9cdda8d2013-05-22 16:48:09 -04001669 ctx->pos = curr->key.offset;
Miao Xie16cdcec2011-04-22 18:12:22 +08001670
1671 di = (struct btrfs_dir_item *)curr->data;
1672 name = (char *)(di + 1);
Qu Wenruo3cae2102013-07-16 11:19:18 +08001673 name_len = btrfs_stack_dir_name_len(di);
Miao Xie16cdcec2011-04-22 18:12:22 +08001674
1675 d_type = btrfs_filetype_table[di->type];
1676 btrfs_disk_key_to_cpu(&location, &di->location);
1677
Al Viro9cdda8d2013-05-22 16:48:09 -04001678 over = !dir_emit(ctx, name, name_len,
Miao Xie16cdcec2011-04-22 18:12:22 +08001679 location.objectid, d_type);
1680
Elena Reshetova089e77e2017-03-03 10:55:17 +02001681 if (refcount_dec_and_test(&curr->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +08001682 kfree(curr);
1683
1684 if (over)
1685 return 1;
Josef Bacik42e9cc42017-07-24 15:14:26 -04001686 ctx->pos++;
Miao Xie16cdcec2011-04-22 18:12:22 +08001687 }
1688 return 0;
1689}
1690
Miao Xie16cdcec2011-04-22 18:12:22 +08001691static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1692 struct btrfs_inode_item *inode_item,
1693 struct inode *inode)
1694{
Eric W. Biederman2f2f43d2012-02-10 11:05:07 -08001695 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1696 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
Miao Xie16cdcec2011-04-22 18:12:22 +08001697 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1698 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1699 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1700 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1701 btrfs_set_stack_inode_generation(inode_item,
1702 BTRFS_I(inode)->generation);
Josef Bacik0c4d2d92012-04-05 15:03:02 -04001703 btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
Miao Xie16cdcec2011-04-22 18:12:22 +08001704 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1705 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1706 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
Chris Masonff5714c2011-05-28 07:00:39 -04001707 btrfs_set_stack_inode_block_group(inode_item, 0);
Miao Xie16cdcec2011-04-22 18:12:22 +08001708
David Sterbaa937b972014-12-12 17:39:12 +01001709 btrfs_set_stack_timespec_sec(&inode_item->atime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001710 inode->i_atime.tv_sec);
David Sterbaa937b972014-12-12 17:39:12 +01001711 btrfs_set_stack_timespec_nsec(&inode_item->atime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001712 inode->i_atime.tv_nsec);
1713
David Sterbaa937b972014-12-12 17:39:12 +01001714 btrfs_set_stack_timespec_sec(&inode_item->mtime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001715 inode->i_mtime.tv_sec);
David Sterbaa937b972014-12-12 17:39:12 +01001716 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001717 inode->i_mtime.tv_nsec);
1718
David Sterbaa937b972014-12-12 17:39:12 +01001719 btrfs_set_stack_timespec_sec(&inode_item->ctime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001720 inode->i_ctime.tv_sec);
David Sterbaa937b972014-12-12 17:39:12 +01001721 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001722 inode->i_ctime.tv_nsec);
chandan r9cc97d62012-07-04 12:48:07 +05301723
1724 btrfs_set_stack_timespec_sec(&inode_item->otime,
1725 BTRFS_I(inode)->i_otime.tv_sec);
1726 btrfs_set_stack_timespec_nsec(&inode_item->otime,
1727 BTRFS_I(inode)->i_otime.tv_nsec);
Miao Xie16cdcec2011-04-22 18:12:22 +08001728}
1729
Miao Xie2f7e33d2011-06-23 07:27:13 +00001730int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1731{
1732 struct btrfs_delayed_node *delayed_node;
1733 struct btrfs_inode_item *inode_item;
Miao Xie2f7e33d2011-06-23 07:27:13 +00001734
Nikolay Borisov340c6ca2017-01-10 20:35:32 +02001735 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
Miao Xie2f7e33d2011-06-23 07:27:13 +00001736 if (!delayed_node)
1737 return -ENOENT;
1738
1739 mutex_lock(&delayed_node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001740 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Miao Xie2f7e33d2011-06-23 07:27:13 +00001741 mutex_unlock(&delayed_node->mutex);
1742 btrfs_release_delayed_node(delayed_node);
1743 return -ENOENT;
1744 }
1745
1746 inode_item = &delayed_node->inode_item;
1747
Eric W. Biederman2f2f43d2012-02-10 11:05:07 -08001748 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1749 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
Nikolay Borisov6ef06d22017-02-20 13:50:34 +02001750 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
Miao Xie2f7e33d2011-06-23 07:27:13 +00001751 inode->i_mode = btrfs_stack_inode_mode(inode_item);
Miklos Szeredibfe86842011-10-28 14:13:29 +02001752 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
Miao Xie2f7e33d2011-06-23 07:27:13 +00001753 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1754 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
Yang Dongsheng6e17d302015-04-09 12:08:43 +08001755 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1756
Josef Bacik0c4d2d92012-04-05 15:03:02 -04001757 inode->i_version = btrfs_stack_inode_sequence(inode_item);
Miao Xie2f7e33d2011-06-23 07:27:13 +00001758 inode->i_rdev = 0;
1759 *rdev = btrfs_stack_inode_rdev(inode_item);
1760 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1761
David Sterbaa937b972014-12-12 17:39:12 +01001762 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1763 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
Miao Xie2f7e33d2011-06-23 07:27:13 +00001764
David Sterbaa937b972014-12-12 17:39:12 +01001765 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1766 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
Miao Xie2f7e33d2011-06-23 07:27:13 +00001767
David Sterbaa937b972014-12-12 17:39:12 +01001768 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1769 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
Miao Xie2f7e33d2011-06-23 07:27:13 +00001770
chandan r9cc97d62012-07-04 12:48:07 +05301771 BTRFS_I(inode)->i_otime.tv_sec =
1772 btrfs_stack_timespec_sec(&inode_item->otime);
1773 BTRFS_I(inode)->i_otime.tv_nsec =
1774 btrfs_stack_timespec_nsec(&inode_item->otime);
1775
Miao Xie2f7e33d2011-06-23 07:27:13 +00001776 inode->i_generation = BTRFS_I(inode)->generation;
1777 BTRFS_I(inode)->index_cnt = (u64)-1;
1778
1779 mutex_unlock(&delayed_node->mutex);
1780 btrfs_release_delayed_node(delayed_node);
1781 return 0;
1782}
1783
Miao Xie16cdcec2011-04-22 18:12:22 +08001784int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1785 struct btrfs_root *root, struct inode *inode)
1786{
1787 struct btrfs_delayed_node *delayed_node;
David Sterbaaa0467d2011-06-03 16:29:08 +02001788 int ret = 0;
Miao Xie16cdcec2011-04-22 18:12:22 +08001789
Nikolay Borisove5517a72017-01-10 20:35:33 +02001790 delayed_node = btrfs_get_or_create_delayed_node(BTRFS_I(inode));
Miao Xie16cdcec2011-04-22 18:12:22 +08001791 if (IS_ERR(delayed_node))
1792 return PTR_ERR(delayed_node);
1793
1794 mutex_lock(&delayed_node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001795 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Miao Xie16cdcec2011-04-22 18:12:22 +08001796 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1797 goto release_node;
1798 }
1799
Nikolay Borisovfcabdd12017-01-10 20:35:34 +02001800 ret = btrfs_delayed_inode_reserve_metadata(trans, root, BTRFS_I(inode),
Josef Bacik7fd2ae22011-11-08 15:47:34 -05001801 delayed_node);
Josef Bacikc06a0e12011-11-04 19:56:02 -04001802 if (ret)
1803 goto release_node;
Miao Xie16cdcec2011-04-22 18:12:22 +08001804
1805 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
Miao Xie7cf35d92013-12-26 13:07:05 +08001806 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
Miao Xie16cdcec2011-04-22 18:12:22 +08001807 delayed_node->count++;
1808 atomic_inc(&root->fs_info->delayed_root->items);
1809release_node:
1810 mutex_unlock(&delayed_node->mutex);
1811 btrfs_release_delayed_node(delayed_node);
1812 return ret;
1813}
1814
Nikolay Borisove07222c2017-01-10 20:35:37 +02001815int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
Miao Xie67de1172013-12-26 13:07:06 +08001816{
Nikolay Borisove07222c2017-01-10 20:35:37 +02001817 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
Miao Xie67de1172013-12-26 13:07:06 +08001818 struct btrfs_delayed_node *delayed_node;
1819
Chris Mason6f896052014-12-31 12:18:29 -05001820 /*
1821 * we don't do delayed inode updates during log recovery because it
1822 * leads to enospc problems. This means we also can't do
1823 * delayed inode refs
1824 */
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001825 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
Chris Mason6f896052014-12-31 12:18:29 -05001826 return -EAGAIN;
1827
Nikolay Borisove07222c2017-01-10 20:35:37 +02001828 delayed_node = btrfs_get_or_create_delayed_node(inode);
Miao Xie67de1172013-12-26 13:07:06 +08001829 if (IS_ERR(delayed_node))
1830 return PTR_ERR(delayed_node);
1831
1832 /*
1833 * We don't reserve space for inode ref deletion is because:
1834 * - We ONLY do async inode ref deletion for the inode who has only
1835 * one link(i_nlink == 1), it means there is only one inode ref.
1836 * And in most case, the inode ref and the inode item are in the
1837 * same leaf, and we will deal with them at the same time.
1838 * Since we are sure we will reserve the space for the inode item,
1839 * it is unnecessary to reserve space for inode ref deletion.
1840 * - If the inode ref and the inode item are not in the same leaf,
1841 * We also needn't worry about enospc problem, because we reserve
1842 * much more space for the inode update than it needs.
1843 * - At the worst, we can steal some space from the global reservation.
1844 * It is very rare.
1845 */
1846 mutex_lock(&delayed_node->mutex);
1847 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1848 goto release_node;
1849
1850 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1851 delayed_node->count++;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001852 atomic_inc(&fs_info->delayed_root->items);
Miao Xie67de1172013-12-26 13:07:06 +08001853release_node:
1854 mutex_unlock(&delayed_node->mutex);
1855 btrfs_release_delayed_node(delayed_node);
1856 return 0;
1857}
1858
Miao Xie16cdcec2011-04-22 18:12:22 +08001859static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1860{
1861 struct btrfs_root *root = delayed_node->root;
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001862 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +08001863 struct btrfs_delayed_item *curr_item, *prev_item;
1864
1865 mutex_lock(&delayed_node->mutex);
1866 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1867 while (curr_item) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001868 btrfs_delayed_item_release_metadata(fs_info, curr_item);
Miao Xie16cdcec2011-04-22 18:12:22 +08001869 prev_item = curr_item;
1870 curr_item = __btrfs_next_delayed_item(prev_item);
1871 btrfs_release_delayed_item(prev_item);
1872 }
1873
1874 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1875 while (curr_item) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001876 btrfs_delayed_item_release_metadata(fs_info, curr_item);
Miao Xie16cdcec2011-04-22 18:12:22 +08001877 prev_item = curr_item;
1878 curr_item = __btrfs_next_delayed_item(prev_item);
1879 btrfs_release_delayed_item(prev_item);
1880 }
1881
Miao Xie67de1172013-12-26 13:07:06 +08001882 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1883 btrfs_release_delayed_iref(delayed_node);
1884
Miao Xie7cf35d92013-12-26 13:07:05 +08001885 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001886 btrfs_delayed_inode_release_metadata(fs_info, delayed_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001887 btrfs_release_delayed_inode(delayed_node);
1888 }
1889 mutex_unlock(&delayed_node->mutex);
1890}
1891
Nikolay Borisov4ccb5c72017-01-10 20:35:38 +02001892void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
Miao Xie16cdcec2011-04-22 18:12:22 +08001893{
1894 struct btrfs_delayed_node *delayed_node;
1895
Nikolay Borisov4ccb5c72017-01-10 20:35:38 +02001896 delayed_node = btrfs_get_delayed_node(inode);
Miao Xie16cdcec2011-04-22 18:12:22 +08001897 if (!delayed_node)
1898 return;
1899
1900 __btrfs_kill_delayed_node(delayed_node);
1901 btrfs_release_delayed_node(delayed_node);
1902}
1903
1904void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1905{
1906 u64 inode_id = 0;
1907 struct btrfs_delayed_node *delayed_nodes[8];
1908 int i, n;
1909
1910 while (1) {
1911 spin_lock(&root->inode_lock);
1912 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1913 (void **)delayed_nodes, inode_id,
1914 ARRAY_SIZE(delayed_nodes));
1915 if (!n) {
1916 spin_unlock(&root->inode_lock);
1917 break;
1918 }
1919
1920 inode_id = delayed_nodes[n - 1]->inode_id + 1;
1921
1922 for (i = 0; i < n; i++)
Elena Reshetova6de5f182017-03-03 10:55:16 +02001923 refcount_inc(&delayed_nodes[i]->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +08001924 spin_unlock(&root->inode_lock);
1925
1926 for (i = 0; i < n; i++) {
1927 __btrfs_kill_delayed_node(delayed_nodes[i]);
1928 btrfs_release_delayed_node(delayed_nodes[i]);
1929 }
1930 }
1931}
Miao Xie67cde342012-06-14 02:23:22 -06001932
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001933void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
Miao Xie67cde342012-06-14 02:23:22 -06001934{
Miao Xie67cde342012-06-14 02:23:22 -06001935 struct btrfs_delayed_node *curr_node, *prev_node;
1936
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001937 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
Miao Xie67cde342012-06-14 02:23:22 -06001938 while (curr_node) {
1939 __btrfs_kill_delayed_node(curr_node);
1940
1941 prev_node = curr_node;
1942 curr_node = btrfs_next_delayed_node(curr_node);
1943 btrfs_release_delayed_node(prev_node);
1944 }
1945}
1946