blob: 056276101c63a7060b09517353ba24cd00cde9a5 [file] [log] [blame]
Miao Xie16cdcec2011-04-22 18:12:22 +08001/*
2 * Copyright (C) 2011 Fujitsu. All rights reserved.
3 * Written by Miao Xie <miaox@cn.fujitsu.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
18 */
19
20#include <linux/slab.h>
21#include "delayed-inode.h"
22#include "disk-io.h"
23#include "transaction.h"
Qu Wenruo3cae2102013-07-16 11:19:18 +080024#include "ctree.h"
Miao Xie16cdcec2011-04-22 18:12:22 +080025
Chris Masonde3cb942013-03-04 17:13:31 -050026#define BTRFS_DELAYED_WRITEBACK 512
27#define BTRFS_DELAYED_BACKGROUND 128
28#define BTRFS_DELAYED_BATCH 16
Miao Xie16cdcec2011-04-22 18:12:22 +080029
30static struct kmem_cache *delayed_node_cache;
31
32int __init btrfs_delayed_inode_init(void)
33{
David Sterba837e1972012-09-07 03:00:48 -060034 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
Miao Xie16cdcec2011-04-22 18:12:22 +080035 sizeof(struct btrfs_delayed_node),
36 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +030037 SLAB_MEM_SPREAD,
Miao Xie16cdcec2011-04-22 18:12:22 +080038 NULL);
39 if (!delayed_node_cache)
40 return -ENOMEM;
41 return 0;
42}
43
44void btrfs_delayed_inode_exit(void)
45{
Kinglong Mee5598e902016-01-29 21:36:35 +080046 kmem_cache_destroy(delayed_node_cache);
Miao Xie16cdcec2011-04-22 18:12:22 +080047}
48
49static inline void btrfs_init_delayed_node(
50 struct btrfs_delayed_node *delayed_node,
51 struct btrfs_root *root, u64 inode_id)
52{
53 delayed_node->root = root;
54 delayed_node->inode_id = inode_id;
Elena Reshetova6de5f182017-03-03 10:55:16 +020055 refcount_set(&delayed_node->refs, 0);
Miao Xie16cdcec2011-04-22 18:12:22 +080056 delayed_node->ins_root = RB_ROOT;
57 delayed_node->del_root = RB_ROOT;
58 mutex_init(&delayed_node->mutex);
Miao Xie16cdcec2011-04-22 18:12:22 +080059 INIT_LIST_HEAD(&delayed_node->n_list);
60 INIT_LIST_HEAD(&delayed_node->p_list);
Miao Xie16cdcec2011-04-22 18:12:22 +080061}
62
63static inline int btrfs_is_continuous_delayed_item(
64 struct btrfs_delayed_item *item1,
65 struct btrfs_delayed_item *item2)
66{
67 if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
68 item1->key.objectid == item2->key.objectid &&
69 item1->key.type == item2->key.type &&
70 item1->key.offset + 1 == item2->key.offset)
71 return 1;
72 return 0;
73}
74
David Sterbaf85b7372017-01-20 14:54:07 +010075static struct btrfs_delayed_node *btrfs_get_delayed_node(
76 struct btrfs_inode *btrfs_inode)
Miao Xie2f7e33d2011-06-23 07:27:13 +000077{
Miao Xie2f7e33d2011-06-23 07:27:13 +000078 struct btrfs_root *root = btrfs_inode->root;
Nikolay Borisov4a0cc7c2017-01-10 20:35:31 +020079 u64 ino = btrfs_ino(btrfs_inode);
Miao Xie2f7e33d2011-06-23 07:27:13 +000080 struct btrfs_delayed_node *node;
81
Seraphime Kirkovski20c7bce2016-12-15 14:38:16 +010082 node = READ_ONCE(btrfs_inode->delayed_node);
Miao Xie2f7e33d2011-06-23 07:27:13 +000083 if (node) {
Elena Reshetova6de5f182017-03-03 10:55:16 +020084 refcount_inc(&node->refs);
Miao Xie2f7e33d2011-06-23 07:27:13 +000085 return node;
86 }
87
88 spin_lock(&root->inode_lock);
89 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
Chris Masonec35e482017-12-15 11:58:27 -080090
Miao Xie2f7e33d2011-06-23 07:27:13 +000091 if (node) {
92 if (btrfs_inode->delayed_node) {
Elena Reshetova6de5f182017-03-03 10:55:16 +020093 refcount_inc(&node->refs); /* can be accessed */
Miao Xie2f7e33d2011-06-23 07:27:13 +000094 BUG_ON(btrfs_inode->delayed_node != node);
95 spin_unlock(&root->inode_lock);
96 return node;
97 }
Chris Masonec35e482017-12-15 11:58:27 -080098
99 /*
100 * It's possible that we're racing into the middle of removing
101 * this node from the radix tree. In this case, the refcount
102 * was zero and it should never go back to one. Just return
103 * NULL like it was never in the radix at all; our release
104 * function is in the process of removing it.
105 *
106 * Some implementations of refcount_inc refuse to bump the
107 * refcount once it has hit zero. If we don't do this dance
108 * here, refcount_inc() may decide to just WARN_ONCE() instead
109 * of actually bumping the refcount.
110 *
111 * If this node is properly in the radix, we want to bump the
112 * refcount twice, once for the inode and once for this get
113 * operation.
114 */
115 if (refcount_inc_not_zero(&node->refs)) {
116 refcount_inc(&node->refs);
117 btrfs_inode->delayed_node = node;
118 } else {
119 node = NULL;
120 }
121
Miao Xie2f7e33d2011-06-23 07:27:13 +0000122 spin_unlock(&root->inode_lock);
123 return node;
124 }
125 spin_unlock(&root->inode_lock);
126
127 return NULL;
128}
129
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100130/* Will return either the node or PTR_ERR(-ENOMEM) */
Miao Xie16cdcec2011-04-22 18:12:22 +0800131static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
David Sterbaf85b7372017-01-20 14:54:07 +0100132 struct btrfs_inode *btrfs_inode)
Miao Xie16cdcec2011-04-22 18:12:22 +0800133{
134 struct btrfs_delayed_node *node;
Miao Xie16cdcec2011-04-22 18:12:22 +0800135 struct btrfs_root *root = btrfs_inode->root;
Nikolay Borisov4a0cc7c2017-01-10 20:35:31 +0200136 u64 ino = btrfs_ino(btrfs_inode);
Miao Xie16cdcec2011-04-22 18:12:22 +0800137 int ret;
138
139again:
Nikolay Borisov340c6ca2017-01-10 20:35:32 +0200140 node = btrfs_get_delayed_node(btrfs_inode);
Miao Xie2f7e33d2011-06-23 07:27:13 +0000141 if (node)
Miao Xie16cdcec2011-04-22 18:12:22 +0800142 return node;
Miao Xie16cdcec2011-04-22 18:12:22 +0800143
Alexandru Moise352dd9c2015-10-25 20:15:06 +0000144 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
Miao Xie16cdcec2011-04-22 18:12:22 +0800145 if (!node)
146 return ERR_PTR(-ENOMEM);
Chris Mason0d0ca302011-05-22 07:11:22 -0400147 btrfs_init_delayed_node(node, root, ino);
Miao Xie16cdcec2011-04-22 18:12:22 +0800148
Rashika95e94d12013-10-31 03:12:42 +0530149 /* cached in the btrfs inode and can be accessed */
Elena Reshetova6de5f182017-03-03 10:55:16 +0200150 refcount_set(&node->refs, 2);
Miao Xie16cdcec2011-04-22 18:12:22 +0800151
David Sterbae1860a72016-05-09 14:11:38 +0200152 ret = radix_tree_preload(GFP_NOFS);
Miao Xie16cdcec2011-04-22 18:12:22 +0800153 if (ret) {
154 kmem_cache_free(delayed_node_cache, node);
155 return ERR_PTR(ret);
156 }
157
158 spin_lock(&root->inode_lock);
Chris Mason0d0ca302011-05-22 07:11:22 -0400159 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
Miao Xie16cdcec2011-04-22 18:12:22 +0800160 if (ret == -EEXIST) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800161 spin_unlock(&root->inode_lock);
Jeff Mahoney96493032014-05-27 13:53:20 -0400162 kmem_cache_free(delayed_node_cache, node);
Miao Xie16cdcec2011-04-22 18:12:22 +0800163 radix_tree_preload_end();
164 goto again;
165 }
166 btrfs_inode->delayed_node = node;
167 spin_unlock(&root->inode_lock);
168 radix_tree_preload_end();
169
170 return node;
171}
172
173/*
174 * Call it when holding delayed_node->mutex
175 *
176 * If mod = 1, add this node into the prepared list.
177 */
178static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
179 struct btrfs_delayed_node *node,
180 int mod)
181{
182 spin_lock(&root->lock);
Miao Xie7cf35d92013-12-26 13:07:05 +0800183 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800184 if (!list_empty(&node->p_list))
185 list_move_tail(&node->p_list, &root->prepare_list);
186 else if (mod)
187 list_add_tail(&node->p_list, &root->prepare_list);
188 } else {
189 list_add_tail(&node->n_list, &root->node_list);
190 list_add_tail(&node->p_list, &root->prepare_list);
Elena Reshetova6de5f182017-03-03 10:55:16 +0200191 refcount_inc(&node->refs); /* inserted into list */
Miao Xie16cdcec2011-04-22 18:12:22 +0800192 root->nodes++;
Miao Xie7cf35d92013-12-26 13:07:05 +0800193 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
Miao Xie16cdcec2011-04-22 18:12:22 +0800194 }
195 spin_unlock(&root->lock);
196}
197
198/* Call it when holding delayed_node->mutex */
199static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
200 struct btrfs_delayed_node *node)
201{
202 spin_lock(&root->lock);
Miao Xie7cf35d92013-12-26 13:07:05 +0800203 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800204 root->nodes--;
Elena Reshetova6de5f182017-03-03 10:55:16 +0200205 refcount_dec(&node->refs); /* not in the list */
Miao Xie16cdcec2011-04-22 18:12:22 +0800206 list_del_init(&node->n_list);
207 if (!list_empty(&node->p_list))
208 list_del_init(&node->p_list);
Miao Xie7cf35d92013-12-26 13:07:05 +0800209 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
Miao Xie16cdcec2011-04-22 18:12:22 +0800210 }
211 spin_unlock(&root->lock);
212}
213
Eric Sandeen48a3b632013-04-25 20:41:01 +0000214static struct btrfs_delayed_node *btrfs_first_delayed_node(
Miao Xie16cdcec2011-04-22 18:12:22 +0800215 struct btrfs_delayed_root *delayed_root)
216{
217 struct list_head *p;
218 struct btrfs_delayed_node *node = NULL;
219
220 spin_lock(&delayed_root->lock);
221 if (list_empty(&delayed_root->node_list))
222 goto out;
223
224 p = delayed_root->node_list.next;
225 node = list_entry(p, struct btrfs_delayed_node, n_list);
Elena Reshetova6de5f182017-03-03 10:55:16 +0200226 refcount_inc(&node->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +0800227out:
228 spin_unlock(&delayed_root->lock);
229
230 return node;
231}
232
Eric Sandeen48a3b632013-04-25 20:41:01 +0000233static struct btrfs_delayed_node *btrfs_next_delayed_node(
Miao Xie16cdcec2011-04-22 18:12:22 +0800234 struct btrfs_delayed_node *node)
235{
236 struct btrfs_delayed_root *delayed_root;
237 struct list_head *p;
238 struct btrfs_delayed_node *next = NULL;
239
240 delayed_root = node->root->fs_info->delayed_root;
241 spin_lock(&delayed_root->lock);
Miao Xie7cf35d92013-12-26 13:07:05 +0800242 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
243 /* not in the list */
Miao Xie16cdcec2011-04-22 18:12:22 +0800244 if (list_empty(&delayed_root->node_list))
245 goto out;
246 p = delayed_root->node_list.next;
247 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
248 goto out;
249 else
250 p = node->n_list.next;
251
252 next = list_entry(p, struct btrfs_delayed_node, n_list);
Elena Reshetova6de5f182017-03-03 10:55:16 +0200253 refcount_inc(&next->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +0800254out:
255 spin_unlock(&delayed_root->lock);
256
257 return next;
258}
259
260static void __btrfs_release_delayed_node(
261 struct btrfs_delayed_node *delayed_node,
262 int mod)
263{
264 struct btrfs_delayed_root *delayed_root;
265
266 if (!delayed_node)
267 return;
268
269 delayed_root = delayed_node->root->fs_info->delayed_root;
270
271 mutex_lock(&delayed_node->mutex);
272 if (delayed_node->count)
273 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
274 else
275 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
276 mutex_unlock(&delayed_node->mutex);
277
Elena Reshetova6de5f182017-03-03 10:55:16 +0200278 if (refcount_dec_and_test(&delayed_node->refs)) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800279 struct btrfs_root *root = delayed_node->root;
Chris Masonec35e482017-12-15 11:58:27 -0800280
Miao Xie16cdcec2011-04-22 18:12:22 +0800281 spin_lock(&root->inode_lock);
Chris Masonec35e482017-12-15 11:58:27 -0800282 /*
283 * Once our refcount goes to zero, nobody is allowed to bump it
284 * back up. We can delete it now.
285 */
286 ASSERT(refcount_read(&delayed_node->refs) == 0);
287 radix_tree_delete(&root->delayed_nodes_tree,
288 delayed_node->inode_id);
Miao Xie16cdcec2011-04-22 18:12:22 +0800289 spin_unlock(&root->inode_lock);
Chris Masonec35e482017-12-15 11:58:27 -0800290 kmem_cache_free(delayed_node_cache, delayed_node);
Miao Xie16cdcec2011-04-22 18:12:22 +0800291 }
292}
293
294static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
295{
296 __btrfs_release_delayed_node(node, 0);
297}
298
Eric Sandeen48a3b632013-04-25 20:41:01 +0000299static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
Miao Xie16cdcec2011-04-22 18:12:22 +0800300 struct btrfs_delayed_root *delayed_root)
301{
302 struct list_head *p;
303 struct btrfs_delayed_node *node = NULL;
304
305 spin_lock(&delayed_root->lock);
306 if (list_empty(&delayed_root->prepare_list))
307 goto out;
308
309 p = delayed_root->prepare_list.next;
310 list_del_init(p);
311 node = list_entry(p, struct btrfs_delayed_node, p_list);
Elena Reshetova6de5f182017-03-03 10:55:16 +0200312 refcount_inc(&node->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +0800313out:
314 spin_unlock(&delayed_root->lock);
315
316 return node;
317}
318
319static inline void btrfs_release_prepared_delayed_node(
320 struct btrfs_delayed_node *node)
321{
322 __btrfs_release_delayed_node(node, 1);
323}
324
Eric Sandeen48a3b632013-04-25 20:41:01 +0000325static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
Miao Xie16cdcec2011-04-22 18:12:22 +0800326{
327 struct btrfs_delayed_item *item;
328 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
329 if (item) {
330 item->data_len = data_len;
331 item->ins_or_del = 0;
332 item->bytes_reserved = 0;
Miao Xie16cdcec2011-04-22 18:12:22 +0800333 item->delayed_node = NULL;
Elena Reshetova089e77e2017-03-03 10:55:17 +0200334 refcount_set(&item->refs, 1);
Miao Xie16cdcec2011-04-22 18:12:22 +0800335 }
336 return item;
337}
338
339/*
340 * __btrfs_lookup_delayed_item - look up the delayed item by key
341 * @delayed_node: pointer to the delayed node
342 * @key: the key to look up
343 * @prev: used to store the prev item if the right item isn't found
344 * @next: used to store the next item if the right item isn't found
345 *
346 * Note: if we don't find the right item, we will return the prev item and
347 * the next item.
348 */
349static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
350 struct rb_root *root,
351 struct btrfs_key *key,
352 struct btrfs_delayed_item **prev,
353 struct btrfs_delayed_item **next)
354{
355 struct rb_node *node, *prev_node = NULL;
356 struct btrfs_delayed_item *delayed_item = NULL;
357 int ret = 0;
358
359 node = root->rb_node;
360
361 while (node) {
362 delayed_item = rb_entry(node, struct btrfs_delayed_item,
363 rb_node);
364 prev_node = node;
365 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
366 if (ret < 0)
367 node = node->rb_right;
368 else if (ret > 0)
369 node = node->rb_left;
370 else
371 return delayed_item;
372 }
373
374 if (prev) {
375 if (!prev_node)
376 *prev = NULL;
377 else if (ret < 0)
378 *prev = delayed_item;
379 else if ((node = rb_prev(prev_node)) != NULL) {
380 *prev = rb_entry(node, struct btrfs_delayed_item,
381 rb_node);
382 } else
383 *prev = NULL;
384 }
385
386 if (next) {
387 if (!prev_node)
388 *next = NULL;
389 else if (ret > 0)
390 *next = delayed_item;
391 else if ((node = rb_next(prev_node)) != NULL) {
392 *next = rb_entry(node, struct btrfs_delayed_item,
393 rb_node);
394 } else
395 *next = NULL;
396 }
397 return NULL;
398}
399
Eric Sandeen48a3b632013-04-25 20:41:01 +0000400static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
Miao Xie16cdcec2011-04-22 18:12:22 +0800401 struct btrfs_delayed_node *delayed_node,
402 struct btrfs_key *key)
403{
Masahiro Yamadae2c89902016-09-13 04:35:52 +0900404 return __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
Miao Xie16cdcec2011-04-22 18:12:22 +0800405 NULL, NULL);
Miao Xie16cdcec2011-04-22 18:12:22 +0800406}
407
Miao Xie16cdcec2011-04-22 18:12:22 +0800408static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
409 struct btrfs_delayed_item *ins,
410 int action)
411{
412 struct rb_node **p, *node;
413 struct rb_node *parent_node = NULL;
414 struct rb_root *root;
415 struct btrfs_delayed_item *item;
416 int cmp;
417
418 if (action == BTRFS_DELAYED_INSERTION_ITEM)
419 root = &delayed_node->ins_root;
420 else if (action == BTRFS_DELAYED_DELETION_ITEM)
421 root = &delayed_node->del_root;
422 else
423 BUG();
424 p = &root->rb_node;
425 node = &ins->rb_node;
426
427 while (*p) {
428 parent_node = *p;
429 item = rb_entry(parent_node, struct btrfs_delayed_item,
430 rb_node);
431
432 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
433 if (cmp < 0)
434 p = &(*p)->rb_right;
435 else if (cmp > 0)
436 p = &(*p)->rb_left;
437 else
438 return -EEXIST;
439 }
440
441 rb_link_node(node, parent_node, p);
442 rb_insert_color(node, root);
443 ins->delayed_node = delayed_node;
444 ins->ins_or_del = action;
445
446 if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
447 action == BTRFS_DELAYED_INSERTION_ITEM &&
448 ins->key.offset >= delayed_node->index_cnt)
449 delayed_node->index_cnt = ins->key.offset + 1;
450
451 delayed_node->count++;
452 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
453 return 0;
454}
455
456static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
457 struct btrfs_delayed_item *item)
458{
459 return __btrfs_add_delayed_item(node, item,
460 BTRFS_DELAYED_INSERTION_ITEM);
461}
462
463static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
464 struct btrfs_delayed_item *item)
465{
466 return __btrfs_add_delayed_item(node, item,
467 BTRFS_DELAYED_DELETION_ITEM);
468}
469
Chris Masonde3cb942013-03-04 17:13:31 -0500470static void finish_one_item(struct btrfs_delayed_root *delayed_root)
471{
472 int seq = atomic_inc_return(&delayed_root->items_seq);
David Sterbaee863952015-02-16 19:41:40 +0100473
474 /*
475 * atomic_dec_return implies a barrier for waitqueue_active
476 */
Chris Masonde3cb942013-03-04 17:13:31 -0500477 if ((atomic_dec_return(&delayed_root->items) <
478 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
479 waitqueue_active(&delayed_root->wait))
480 wake_up(&delayed_root->wait);
481}
482
Miao Xie16cdcec2011-04-22 18:12:22 +0800483static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
484{
485 struct rb_root *root;
486 struct btrfs_delayed_root *delayed_root;
487
488 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
489
490 BUG_ON(!delayed_root);
491 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
492 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
493
494 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
495 root = &delayed_item->delayed_node->ins_root;
496 else
497 root = &delayed_item->delayed_node->del_root;
498
499 rb_erase(&delayed_item->rb_node, root);
500 delayed_item->delayed_node->count--;
Chris Masonde3cb942013-03-04 17:13:31 -0500501
502 finish_one_item(delayed_root);
Miao Xie16cdcec2011-04-22 18:12:22 +0800503}
504
505static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
506{
507 if (item) {
508 __btrfs_remove_delayed_item(item);
Elena Reshetova089e77e2017-03-03 10:55:17 +0200509 if (refcount_dec_and_test(&item->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +0800510 kfree(item);
511 }
512}
513
Eric Sandeen48a3b632013-04-25 20:41:01 +0000514static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
Miao Xie16cdcec2011-04-22 18:12:22 +0800515 struct btrfs_delayed_node *delayed_node)
516{
517 struct rb_node *p;
518 struct btrfs_delayed_item *item = NULL;
519
520 p = rb_first(&delayed_node->ins_root);
521 if (p)
522 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
523
524 return item;
525}
526
Eric Sandeen48a3b632013-04-25 20:41:01 +0000527static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
Miao Xie16cdcec2011-04-22 18:12:22 +0800528 struct btrfs_delayed_node *delayed_node)
529{
530 struct rb_node *p;
531 struct btrfs_delayed_item *item = NULL;
532
533 p = rb_first(&delayed_node->del_root);
534 if (p)
535 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
536
537 return item;
538}
539
Eric Sandeen48a3b632013-04-25 20:41:01 +0000540static struct btrfs_delayed_item *__btrfs_next_delayed_item(
Miao Xie16cdcec2011-04-22 18:12:22 +0800541 struct btrfs_delayed_item *item)
542{
543 struct rb_node *p;
544 struct btrfs_delayed_item *next = NULL;
545
546 p = rb_next(&item->rb_node);
547 if (p)
548 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
549
550 return next;
551}
552
Miao Xie16cdcec2011-04-22 18:12:22 +0800553static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400554 struct btrfs_fs_info *fs_info,
Miao Xie16cdcec2011-04-22 18:12:22 +0800555 struct btrfs_delayed_item *item)
556{
557 struct btrfs_block_rsv *src_rsv;
558 struct btrfs_block_rsv *dst_rsv;
559 u64 num_bytes;
560 int ret;
561
562 if (!trans->bytes_reserved)
563 return 0;
564
565 src_rsv = trans->block_rsv;
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400566 dst_rsv = &fs_info->delayed_block_rsv;
Miao Xie16cdcec2011-04-22 18:12:22 +0800567
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400568 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
Josef Bacik25d609f2016-03-25 13:25:48 -0400569 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500570 if (!ret) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400571 trace_btrfs_space_reservation(fs_info, "delayed_item",
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500572 item->key.objectid,
573 num_bytes, 1);
Miao Xie16cdcec2011-04-22 18:12:22 +0800574 item->bytes_reserved = num_bytes;
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500575 }
Miao Xie16cdcec2011-04-22 18:12:22 +0800576
577 return ret;
578}
579
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400580static void btrfs_delayed_item_release_metadata(struct btrfs_fs_info *fs_info,
Miao Xie16cdcec2011-04-22 18:12:22 +0800581 struct btrfs_delayed_item *item)
582{
Miao Xie19fd2942011-06-15 10:47:30 +0000583 struct btrfs_block_rsv *rsv;
584
Miao Xie16cdcec2011-04-22 18:12:22 +0800585 if (!item->bytes_reserved)
586 return;
587
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400588 rsv = &fs_info->delayed_block_rsv;
589 trace_btrfs_space_reservation(fs_info, "delayed_item",
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500590 item->key.objectid, item->bytes_reserved,
591 0);
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400592 btrfs_block_rsv_release(fs_info, rsv,
Miao Xie16cdcec2011-04-22 18:12:22 +0800593 item->bytes_reserved);
594}
595
596static int btrfs_delayed_inode_reserve_metadata(
597 struct btrfs_trans_handle *trans,
598 struct btrfs_root *root,
Nikolay Borisovfcabdd12017-01-10 20:35:34 +0200599 struct btrfs_inode *inode,
Miao Xie16cdcec2011-04-22 18:12:22 +0800600 struct btrfs_delayed_node *node)
601{
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400602 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +0800603 struct btrfs_block_rsv *src_rsv;
604 struct btrfs_block_rsv *dst_rsv;
605 u64 num_bytes;
606 int ret;
607
Miao Xie16cdcec2011-04-22 18:12:22 +0800608 src_rsv = trans->block_rsv;
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400609 dst_rsv = &fs_info->delayed_block_rsv;
Miao Xie16cdcec2011-04-22 18:12:22 +0800610
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400611 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
Josef Bacikc06a0e12011-11-04 19:56:02 -0400612
613 /*
614 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
615 * which doesn't reserve space for speed. This is a problem since we
616 * still need to reserve space for this update, so try to reserve the
617 * space.
618 *
619 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
Josef Bacik69fe2d72017-10-19 14:15:57 -0400620 * we always reserve enough to update the inode item.
Josef Bacikc06a0e12011-11-04 19:56:02 -0400621 */
Chris Masone755d9a2011-12-15 13:36:29 -0500622 if (!src_rsv || (!trans->bytes_reserved &&
Miao Xie66d8f3d2012-09-06 04:02:28 -0600623 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
Miao Xie08e007d2012-10-16 11:33:38 +0000624 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
625 BTRFS_RESERVE_NO_FLUSH);
Josef Bacikc06a0e12011-11-04 19:56:02 -0400626 /*
627 * Since we're under a transaction reserve_metadata_bytes could
628 * try to commit the transaction which will make it return
629 * EAGAIN to make us stop the transaction we have, so return
630 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
631 */
632 if (ret == -EAGAIN)
633 ret = -ENOSPC;
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500634 if (!ret) {
Josef Bacikc06a0e12011-11-04 19:56:02 -0400635 node->bytes_reserved = num_bytes;
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400636 trace_btrfs_space_reservation(fs_info,
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500637 "delayed_inode",
Nikolay Borisovfcabdd12017-01-10 20:35:34 +0200638 btrfs_ino(inode),
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500639 num_bytes, 1);
640 }
Josef Bacikc06a0e12011-11-04 19:56:02 -0400641 return ret;
642 }
643
Josef Bacik25d609f2016-03-25 13:25:48 -0400644 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500645 if (!ret) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400646 trace_btrfs_space_reservation(fs_info, "delayed_inode",
Nikolay Borisovfcabdd12017-01-10 20:35:34 +0200647 btrfs_ino(inode), num_bytes, 1);
Miao Xie16cdcec2011-04-22 18:12:22 +0800648 node->bytes_reserved = num_bytes;
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500649 }
Miao Xie16cdcec2011-04-22 18:12:22 +0800650
651 return ret;
652}
653
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400654static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
Miao Xie16cdcec2011-04-22 18:12:22 +0800655 struct btrfs_delayed_node *node)
656{
657 struct btrfs_block_rsv *rsv;
658
659 if (!node->bytes_reserved)
660 return;
661
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400662 rsv = &fs_info->delayed_block_rsv;
663 trace_btrfs_space_reservation(fs_info, "delayed_inode",
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500664 node->inode_id, node->bytes_reserved, 0);
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400665 btrfs_block_rsv_release(fs_info, rsv,
Miao Xie16cdcec2011-04-22 18:12:22 +0800666 node->bytes_reserved);
667 node->bytes_reserved = 0;
668}
669
670/*
671 * This helper will insert some continuous items into the same leaf according
672 * to the free space of the leaf.
673 */
Tsutomu Itohafe5fea2013-04-16 05:18:22 +0000674static int btrfs_batch_insert_items(struct btrfs_root *root,
675 struct btrfs_path *path,
676 struct btrfs_delayed_item *item)
Miao Xie16cdcec2011-04-22 18:12:22 +0800677{
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400678 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +0800679 struct btrfs_delayed_item *curr, *next;
680 int free_space;
681 int total_data_size = 0, total_size = 0;
682 struct extent_buffer *leaf;
683 char *data_ptr;
684 struct btrfs_key *keys;
685 u32 *data_size;
686 struct list_head head;
687 int slot;
688 int nitems;
689 int i;
690 int ret = 0;
691
692 BUG_ON(!path->nodes[0]);
693
694 leaf = path->nodes[0];
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400695 free_space = btrfs_leaf_free_space(fs_info, leaf);
Miao Xie16cdcec2011-04-22 18:12:22 +0800696 INIT_LIST_HEAD(&head);
697
698 next = item;
Chris Mason17aca1c2011-06-03 01:13:45 -0400699 nitems = 0;
Miao Xie16cdcec2011-04-22 18:12:22 +0800700
701 /*
702 * count the number of the continuous items that we can insert in batch
703 */
704 while (total_size + next->data_len + sizeof(struct btrfs_item) <=
705 free_space) {
706 total_data_size += next->data_len;
707 total_size += next->data_len + sizeof(struct btrfs_item);
708 list_add_tail(&next->tree_list, &head);
709 nitems++;
710
711 curr = next;
712 next = __btrfs_next_delayed_item(curr);
713 if (!next)
714 break;
715
716 if (!btrfs_is_continuous_delayed_item(curr, next))
717 break;
718 }
719
720 if (!nitems) {
721 ret = 0;
722 goto out;
723 }
724
725 /*
726 * we need allocate some memory space, but it might cause the task
727 * to sleep, so we set all locked nodes in the path to blocking locks
728 * first.
729 */
730 btrfs_set_path_blocking(path);
731
Dulshani Gunawardhanad9b0d9b2013-10-31 10:32:18 +0530732 keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
Miao Xie16cdcec2011-04-22 18:12:22 +0800733 if (!keys) {
734 ret = -ENOMEM;
735 goto out;
736 }
737
Dulshani Gunawardhanad9b0d9b2013-10-31 10:32:18 +0530738 data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
Miao Xie16cdcec2011-04-22 18:12:22 +0800739 if (!data_size) {
740 ret = -ENOMEM;
741 goto error;
742 }
743
744 /* get keys of all the delayed items */
745 i = 0;
746 list_for_each_entry(next, &head, tree_list) {
747 keys[i] = next->key;
748 data_size[i] = next->data_len;
749 i++;
750 }
751
752 /* reset all the locked nodes in the patch to spinning locks. */
Chris Masonbd681512011-07-16 15:23:14 -0400753 btrfs_clear_path_blocking(path, NULL, 0);
Miao Xie16cdcec2011-04-22 18:12:22 +0800754
755 /* insert the keys of the items */
Tsutomu Itohafe5fea2013-04-16 05:18:22 +0000756 setup_items_for_insert(root, path, keys, data_size,
Jeff Mahoney143bede2012-03-01 14:56:26 +0100757 total_data_size, total_size, nitems);
Miao Xie16cdcec2011-04-22 18:12:22 +0800758
759 /* insert the dir index items */
760 slot = path->slots[0];
761 list_for_each_entry_safe(curr, next, &head, tree_list) {
762 data_ptr = btrfs_item_ptr(leaf, slot, char);
763 write_extent_buffer(leaf, &curr->data,
764 (unsigned long)data_ptr,
765 curr->data_len);
766 slot++;
767
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400768 btrfs_delayed_item_release_metadata(fs_info, curr);
Miao Xie16cdcec2011-04-22 18:12:22 +0800769
770 list_del(&curr->tree_list);
771 btrfs_release_delayed_item(curr);
772 }
773
774error:
775 kfree(data_size);
776 kfree(keys);
777out:
778 return ret;
779}
780
781/*
782 * This helper can just do simple insertion that needn't extend item for new
783 * data, such as directory name index insertion, inode insertion.
784 */
785static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
786 struct btrfs_root *root,
787 struct btrfs_path *path,
788 struct btrfs_delayed_item *delayed_item)
789{
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400790 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +0800791 struct extent_buffer *leaf;
Miao Xie16cdcec2011-04-22 18:12:22 +0800792 char *ptr;
793 int ret;
794
795 ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
796 delayed_item->data_len);
797 if (ret < 0 && ret != -EEXIST)
798 return ret;
799
800 leaf = path->nodes[0];
801
Miao Xie16cdcec2011-04-22 18:12:22 +0800802 ptr = btrfs_item_ptr(leaf, path->slots[0], char);
803
804 write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
805 delayed_item->data_len);
806 btrfs_mark_buffer_dirty(leaf);
807
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400808 btrfs_delayed_item_release_metadata(fs_info, delayed_item);
Miao Xie16cdcec2011-04-22 18:12:22 +0800809 return 0;
810}
811
812/*
813 * we insert an item first, then if there are some continuous items, we try
814 * to insert those items into the same leaf.
815 */
816static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
817 struct btrfs_path *path,
818 struct btrfs_root *root,
819 struct btrfs_delayed_node *node)
820{
821 struct btrfs_delayed_item *curr, *prev;
822 int ret = 0;
823
824do_again:
825 mutex_lock(&node->mutex);
826 curr = __btrfs_first_delayed_insertion_item(node);
827 if (!curr)
828 goto insert_end;
829
830 ret = btrfs_insert_delayed_item(trans, root, path, curr);
831 if (ret < 0) {
Chris Mason945d8962011-05-22 12:33:42 -0400832 btrfs_release_path(path);
Miao Xie16cdcec2011-04-22 18:12:22 +0800833 goto insert_end;
834 }
835
836 prev = curr;
837 curr = __btrfs_next_delayed_item(prev);
838 if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
839 /* insert the continuous items into the same leaf */
840 path->slots[0]++;
Tsutomu Itohafe5fea2013-04-16 05:18:22 +0000841 btrfs_batch_insert_items(root, path, curr);
Miao Xie16cdcec2011-04-22 18:12:22 +0800842 }
843 btrfs_release_delayed_item(prev);
844 btrfs_mark_buffer_dirty(path->nodes[0]);
845
Chris Mason945d8962011-05-22 12:33:42 -0400846 btrfs_release_path(path);
Miao Xie16cdcec2011-04-22 18:12:22 +0800847 mutex_unlock(&node->mutex);
848 goto do_again;
849
850insert_end:
851 mutex_unlock(&node->mutex);
852 return ret;
853}
854
855static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
856 struct btrfs_root *root,
857 struct btrfs_path *path,
858 struct btrfs_delayed_item *item)
859{
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400860 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +0800861 struct btrfs_delayed_item *curr, *next;
862 struct extent_buffer *leaf;
863 struct btrfs_key key;
864 struct list_head head;
865 int nitems, i, last_item;
866 int ret = 0;
867
868 BUG_ON(!path->nodes[0]);
869
870 leaf = path->nodes[0];
871
872 i = path->slots[0];
873 last_item = btrfs_header_nritems(leaf) - 1;
874 if (i > last_item)
875 return -ENOENT; /* FIXME: Is errno suitable? */
876
877 next = item;
878 INIT_LIST_HEAD(&head);
879 btrfs_item_key_to_cpu(leaf, &key, i);
880 nitems = 0;
881 /*
882 * count the number of the dir index items that we can delete in batch
883 */
884 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
885 list_add_tail(&next->tree_list, &head);
886 nitems++;
887
888 curr = next;
889 next = __btrfs_next_delayed_item(curr);
890 if (!next)
891 break;
892
893 if (!btrfs_is_continuous_delayed_item(curr, next))
894 break;
895
896 i++;
897 if (i > last_item)
898 break;
899 btrfs_item_key_to_cpu(leaf, &key, i);
900 }
901
902 if (!nitems)
903 return 0;
904
905 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
906 if (ret)
907 goto out;
908
909 list_for_each_entry_safe(curr, next, &head, tree_list) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400910 btrfs_delayed_item_release_metadata(fs_info, curr);
Miao Xie16cdcec2011-04-22 18:12:22 +0800911 list_del(&curr->tree_list);
912 btrfs_release_delayed_item(curr);
913 }
914
915out:
916 return ret;
917}
918
919static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
920 struct btrfs_path *path,
921 struct btrfs_root *root,
922 struct btrfs_delayed_node *node)
923{
924 struct btrfs_delayed_item *curr, *prev;
925 int ret = 0;
926
927do_again:
928 mutex_lock(&node->mutex);
929 curr = __btrfs_first_delayed_deletion_item(node);
930 if (!curr)
931 goto delete_fail;
932
933 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
934 if (ret < 0)
935 goto delete_fail;
936 else if (ret > 0) {
937 /*
938 * can't find the item which the node points to, so this node
939 * is invalid, just drop it.
940 */
941 prev = curr;
942 curr = __btrfs_next_delayed_item(prev);
943 btrfs_release_delayed_item(prev);
944 ret = 0;
Chris Mason945d8962011-05-22 12:33:42 -0400945 btrfs_release_path(path);
Fengguang Wu62095262012-08-04 01:45:02 -0600946 if (curr) {
947 mutex_unlock(&node->mutex);
Miao Xie16cdcec2011-04-22 18:12:22 +0800948 goto do_again;
Fengguang Wu62095262012-08-04 01:45:02 -0600949 } else
Miao Xie16cdcec2011-04-22 18:12:22 +0800950 goto delete_fail;
951 }
952
953 btrfs_batch_delete_items(trans, root, path, curr);
Chris Mason945d8962011-05-22 12:33:42 -0400954 btrfs_release_path(path);
Miao Xie16cdcec2011-04-22 18:12:22 +0800955 mutex_unlock(&node->mutex);
956 goto do_again;
957
958delete_fail:
Chris Mason945d8962011-05-22 12:33:42 -0400959 btrfs_release_path(path);
Miao Xie16cdcec2011-04-22 18:12:22 +0800960 mutex_unlock(&node->mutex);
961 return ret;
962}
963
964static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
965{
966 struct btrfs_delayed_root *delayed_root;
967
Miao Xie7cf35d92013-12-26 13:07:05 +0800968 if (delayed_node &&
969 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800970 BUG_ON(!delayed_node->root);
Miao Xie7cf35d92013-12-26 13:07:05 +0800971 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
Miao Xie16cdcec2011-04-22 18:12:22 +0800972 delayed_node->count--;
973
974 delayed_root = delayed_node->root->fs_info->delayed_root;
Chris Masonde3cb942013-03-04 17:13:31 -0500975 finish_one_item(delayed_root);
Miao Xie16cdcec2011-04-22 18:12:22 +0800976 }
977}
978
Miao Xie67de1172013-12-26 13:07:06 +0800979static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
980{
981 struct btrfs_delayed_root *delayed_root;
982
983 ASSERT(delayed_node->root);
984 clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
985 delayed_node->count--;
986
987 delayed_root = delayed_node->root->fs_info->delayed_root;
988 finish_one_item(delayed_root);
989}
990
Miao Xie0e8c36a2012-12-19 06:59:51 +0000991static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
992 struct btrfs_root *root,
993 struct btrfs_path *path,
994 struct btrfs_delayed_node *node)
Miao Xie16cdcec2011-04-22 18:12:22 +0800995{
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400996 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +0800997 struct btrfs_key key;
998 struct btrfs_inode_item *inode_item;
999 struct extent_buffer *leaf;
Miao Xie67de1172013-12-26 13:07:06 +08001000 int mod;
Miao Xie16cdcec2011-04-22 18:12:22 +08001001 int ret;
1002
Miao Xie16cdcec2011-04-22 18:12:22 +08001003 key.objectid = node->inode_id;
David Sterba962a2982014-06-04 18:41:45 +02001004 key.type = BTRFS_INODE_ITEM_KEY;
Miao Xie16cdcec2011-04-22 18:12:22 +08001005 key.offset = 0;
Miao Xie0e8c36a2012-12-19 06:59:51 +00001006
Miao Xie67de1172013-12-26 13:07:06 +08001007 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1008 mod = -1;
1009 else
1010 mod = 1;
1011
1012 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
Miao Xie16cdcec2011-04-22 18:12:22 +08001013 if (ret > 0) {
Chris Mason945d8962011-05-22 12:33:42 -04001014 btrfs_release_path(path);
Miao Xie16cdcec2011-04-22 18:12:22 +08001015 return -ENOENT;
1016 } else if (ret < 0) {
Miao Xie16cdcec2011-04-22 18:12:22 +08001017 return ret;
1018 }
1019
Miao Xie16cdcec2011-04-22 18:12:22 +08001020 leaf = path->nodes[0];
1021 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1022 struct btrfs_inode_item);
1023 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1024 sizeof(struct btrfs_inode_item));
1025 btrfs_mark_buffer_dirty(leaf);
Miao Xie16cdcec2011-04-22 18:12:22 +08001026
Miao Xie67de1172013-12-26 13:07:06 +08001027 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1028 goto no_iref;
1029
1030 path->slots[0]++;
1031 if (path->slots[0] >= btrfs_header_nritems(leaf))
1032 goto search;
1033again:
1034 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1035 if (key.objectid != node->inode_id)
1036 goto out;
1037
1038 if (key.type != BTRFS_INODE_REF_KEY &&
1039 key.type != BTRFS_INODE_EXTREF_KEY)
1040 goto out;
1041
1042 /*
1043 * Delayed iref deletion is for the inode who has only one link,
1044 * so there is only one iref. The case that several irefs are
1045 * in the same item doesn't exist.
1046 */
1047 btrfs_del_item(trans, root, path);
1048out:
1049 btrfs_release_delayed_iref(node);
1050no_iref:
1051 btrfs_release_path(path);
1052err_out:
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001053 btrfs_delayed_inode_release_metadata(fs_info, node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001054 btrfs_release_delayed_inode(node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001055
Miao Xie67de1172013-12-26 13:07:06 +08001056 return ret;
1057
1058search:
1059 btrfs_release_path(path);
1060
David Sterba962a2982014-06-04 18:41:45 +02001061 key.type = BTRFS_INODE_EXTREF_KEY;
Miao Xie67de1172013-12-26 13:07:06 +08001062 key.offset = -1;
1063 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1064 if (ret < 0)
1065 goto err_out;
1066 ASSERT(ret);
1067
1068 ret = 0;
1069 leaf = path->nodes[0];
1070 path->slots[0]--;
1071 goto again;
Miao Xie16cdcec2011-04-22 18:12:22 +08001072}
1073
Miao Xie0e8c36a2012-12-19 06:59:51 +00001074static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1075 struct btrfs_root *root,
1076 struct btrfs_path *path,
1077 struct btrfs_delayed_node *node)
1078{
1079 int ret;
1080
1081 mutex_lock(&node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001082 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
Miao Xie0e8c36a2012-12-19 06:59:51 +00001083 mutex_unlock(&node->mutex);
1084 return 0;
1085 }
1086
1087 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1088 mutex_unlock(&node->mutex);
1089 return ret;
1090}
1091
Miao Xie4ea41ce2012-12-19 06:59:03 +00001092static inline int
1093__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1094 struct btrfs_path *path,
1095 struct btrfs_delayed_node *node)
1096{
1097 int ret;
1098
1099 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1100 if (ret)
1101 return ret;
1102
1103 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1104 if (ret)
1105 return ret;
1106
1107 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1108 return ret;
1109}
1110
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001111/*
1112 * Called when committing the transaction.
1113 * Returns 0 on success.
1114 * Returns < 0 on error and returns with an aborted transaction with any
1115 * outstanding delayed items cleaned up.
1116 */
Josef Bacik96c3f432012-06-21 14:05:49 -04001117static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001118 struct btrfs_fs_info *fs_info, int nr)
Miao Xie16cdcec2011-04-22 18:12:22 +08001119{
1120 struct btrfs_delayed_root *delayed_root;
1121 struct btrfs_delayed_node *curr_node, *prev_node;
1122 struct btrfs_path *path;
Miao Xie19fd2942011-06-15 10:47:30 +00001123 struct btrfs_block_rsv *block_rsv;
Miao Xie16cdcec2011-04-22 18:12:22 +08001124 int ret = 0;
Josef Bacik96c3f432012-06-21 14:05:49 -04001125 bool count = (nr > 0);
Miao Xie16cdcec2011-04-22 18:12:22 +08001126
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001127 if (trans->aborted)
1128 return -EIO;
1129
Miao Xie16cdcec2011-04-22 18:12:22 +08001130 path = btrfs_alloc_path();
1131 if (!path)
1132 return -ENOMEM;
1133 path->leave_spinning = 1;
1134
Miao Xie19fd2942011-06-15 10:47:30 +00001135 block_rsv = trans->block_rsv;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001136 trans->block_rsv = &fs_info->delayed_block_rsv;
Miao Xie19fd2942011-06-15 10:47:30 +00001137
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001138 delayed_root = fs_info->delayed_root;
Miao Xie16cdcec2011-04-22 18:12:22 +08001139
1140 curr_node = btrfs_first_delayed_node(delayed_root);
Josef Bacik96c3f432012-06-21 14:05:49 -04001141 while (curr_node && (!count || (count && nr--))) {
Miao Xie4ea41ce2012-12-19 06:59:03 +00001142 ret = __btrfs_commit_inode_delayed_items(trans, path,
1143 curr_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001144 if (ret) {
1145 btrfs_release_delayed_node(curr_node);
Josef Bacik96c3f432012-06-21 14:05:49 -04001146 curr_node = NULL;
Jeff Mahoney66642832016-06-10 18:19:25 -04001147 btrfs_abort_transaction(trans, ret);
Miao Xie16cdcec2011-04-22 18:12:22 +08001148 break;
1149 }
1150
1151 prev_node = curr_node;
1152 curr_node = btrfs_next_delayed_node(curr_node);
1153 btrfs_release_delayed_node(prev_node);
1154 }
1155
Josef Bacik96c3f432012-06-21 14:05:49 -04001156 if (curr_node)
1157 btrfs_release_delayed_node(curr_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001158 btrfs_free_path(path);
Miao Xie19fd2942011-06-15 10:47:30 +00001159 trans->block_rsv = block_rsv;
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001160
Miao Xie16cdcec2011-04-22 18:12:22 +08001161 return ret;
1162}
1163
Josef Bacik96c3f432012-06-21 14:05:49 -04001164int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001165 struct btrfs_fs_info *fs_info)
Josef Bacik96c3f432012-06-21 14:05:49 -04001166{
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001167 return __btrfs_run_delayed_items(trans, fs_info, -1);
Josef Bacik96c3f432012-06-21 14:05:49 -04001168}
1169
1170int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001171 struct btrfs_fs_info *fs_info, int nr)
Josef Bacik96c3f432012-06-21 14:05:49 -04001172{
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001173 return __btrfs_run_delayed_items(trans, fs_info, nr);
Josef Bacik96c3f432012-06-21 14:05:49 -04001174}
1175
Miao Xie16cdcec2011-04-22 18:12:22 +08001176int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
Nikolay Borisov5f4b32e2017-01-10 20:35:41 +02001177 struct btrfs_inode *inode)
Miao Xie16cdcec2011-04-22 18:12:22 +08001178{
Nikolay Borisov5f4b32e2017-01-10 20:35:41 +02001179 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
Miao Xie4ea41ce2012-12-19 06:59:03 +00001180 struct btrfs_path *path;
1181 struct btrfs_block_rsv *block_rsv;
Miao Xie16cdcec2011-04-22 18:12:22 +08001182 int ret;
1183
1184 if (!delayed_node)
1185 return 0;
1186
1187 mutex_lock(&delayed_node->mutex);
1188 if (!delayed_node->count) {
1189 mutex_unlock(&delayed_node->mutex);
1190 btrfs_release_delayed_node(delayed_node);
1191 return 0;
1192 }
1193 mutex_unlock(&delayed_node->mutex);
1194
Miao Xie4ea41ce2012-12-19 06:59:03 +00001195 path = btrfs_alloc_path();
Filipe David Borba Manana3c77bd92013-10-12 20:32:59 +01001196 if (!path) {
1197 btrfs_release_delayed_node(delayed_node);
Miao Xie4ea41ce2012-12-19 06:59:03 +00001198 return -ENOMEM;
Filipe David Borba Manana3c77bd92013-10-12 20:32:59 +01001199 }
Miao Xie4ea41ce2012-12-19 06:59:03 +00001200 path->leave_spinning = 1;
1201
1202 block_rsv = trans->block_rsv;
1203 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1204
1205 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1206
Miao Xie16cdcec2011-04-22 18:12:22 +08001207 btrfs_release_delayed_node(delayed_node);
Miao Xie4ea41ce2012-12-19 06:59:03 +00001208 btrfs_free_path(path);
1209 trans->block_rsv = block_rsv;
1210
Miao Xie16cdcec2011-04-22 18:12:22 +08001211 return ret;
1212}
1213
Nikolay Borisovaa790212017-01-10 20:35:40 +02001214int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
Miao Xie0e8c36a2012-12-19 06:59:51 +00001215{
Nikolay Borisovaa790212017-01-10 20:35:40 +02001216 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
Miao Xie0e8c36a2012-12-19 06:59:51 +00001217 struct btrfs_trans_handle *trans;
Nikolay Borisovaa790212017-01-10 20:35:40 +02001218 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
Miao Xie0e8c36a2012-12-19 06:59:51 +00001219 struct btrfs_path *path;
1220 struct btrfs_block_rsv *block_rsv;
1221 int ret;
1222
1223 if (!delayed_node)
1224 return 0;
1225
1226 mutex_lock(&delayed_node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001227 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Miao Xie0e8c36a2012-12-19 06:59:51 +00001228 mutex_unlock(&delayed_node->mutex);
1229 btrfs_release_delayed_node(delayed_node);
1230 return 0;
1231 }
1232 mutex_unlock(&delayed_node->mutex);
1233
1234 trans = btrfs_join_transaction(delayed_node->root);
1235 if (IS_ERR(trans)) {
1236 ret = PTR_ERR(trans);
1237 goto out;
1238 }
1239
1240 path = btrfs_alloc_path();
1241 if (!path) {
1242 ret = -ENOMEM;
1243 goto trans_out;
1244 }
1245 path->leave_spinning = 1;
1246
1247 block_rsv = trans->block_rsv;
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001248 trans->block_rsv = &fs_info->delayed_block_rsv;
Miao Xie0e8c36a2012-12-19 06:59:51 +00001249
1250 mutex_lock(&delayed_node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001251 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
Miao Xie0e8c36a2012-12-19 06:59:51 +00001252 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1253 path, delayed_node);
1254 else
1255 ret = 0;
1256 mutex_unlock(&delayed_node->mutex);
1257
1258 btrfs_free_path(path);
1259 trans->block_rsv = block_rsv;
1260trans_out:
Jeff Mahoney3a45bb22016-09-09 21:39:03 -04001261 btrfs_end_transaction(trans);
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001262 btrfs_btree_balance_dirty(fs_info);
Miao Xie0e8c36a2012-12-19 06:59:51 +00001263out:
1264 btrfs_release_delayed_node(delayed_node);
1265
1266 return ret;
1267}
1268
Nikolay Borisovf48d1cf2017-01-10 20:35:39 +02001269void btrfs_remove_delayed_node(struct btrfs_inode *inode)
Miao Xie16cdcec2011-04-22 18:12:22 +08001270{
1271 struct btrfs_delayed_node *delayed_node;
1272
Nikolay Borisovf48d1cf2017-01-10 20:35:39 +02001273 delayed_node = READ_ONCE(inode->delayed_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001274 if (!delayed_node)
1275 return;
1276
Nikolay Borisovf48d1cf2017-01-10 20:35:39 +02001277 inode->delayed_node = NULL;
Miao Xie16cdcec2011-04-22 18:12:22 +08001278 btrfs_release_delayed_node(delayed_node);
1279}
1280
Chris Masonde3cb942013-03-04 17:13:31 -05001281struct btrfs_async_delayed_work {
1282 struct btrfs_delayed_root *delayed_root;
1283 int nr;
Qu Wenruod458b052014-02-28 10:46:19 +08001284 struct btrfs_work work;
Miao Xie16cdcec2011-04-22 18:12:22 +08001285};
1286
Qu Wenruod458b052014-02-28 10:46:19 +08001287static void btrfs_async_run_delayed_root(struct btrfs_work *work)
Miao Xie16cdcec2011-04-22 18:12:22 +08001288{
Chris Masonde3cb942013-03-04 17:13:31 -05001289 struct btrfs_async_delayed_work *async_work;
1290 struct btrfs_delayed_root *delayed_root;
Miao Xie16cdcec2011-04-22 18:12:22 +08001291 struct btrfs_trans_handle *trans;
1292 struct btrfs_path *path;
1293 struct btrfs_delayed_node *delayed_node = NULL;
1294 struct btrfs_root *root;
Miao Xie19fd2942011-06-15 10:47:30 +00001295 struct btrfs_block_rsv *block_rsv;
Chris Masonde3cb942013-03-04 17:13:31 -05001296 int total_done = 0;
Miao Xie16cdcec2011-04-22 18:12:22 +08001297
Chris Masonde3cb942013-03-04 17:13:31 -05001298 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1299 delayed_root = async_work->delayed_root;
Miao Xie16cdcec2011-04-22 18:12:22 +08001300
1301 path = btrfs_alloc_path();
1302 if (!path)
1303 goto out;
Miao Xie16cdcec2011-04-22 18:12:22 +08001304
Chris Masonde3cb942013-03-04 17:13:31 -05001305again:
1306 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND / 2)
1307 goto free_path;
1308
1309 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1310 if (!delayed_node)
1311 goto free_path;
1312
1313 path->leave_spinning = 1;
Miao Xie16cdcec2011-04-22 18:12:22 +08001314 root = delayed_node->root;
1315
Chris Masonff5714c2011-05-28 07:00:39 -04001316 trans = btrfs_join_transaction(root);
Miao Xie16cdcec2011-04-22 18:12:22 +08001317 if (IS_ERR(trans))
Chris Masonde3cb942013-03-04 17:13:31 -05001318 goto release_path;
Miao Xie16cdcec2011-04-22 18:12:22 +08001319
Miao Xie19fd2942011-06-15 10:47:30 +00001320 block_rsv = trans->block_rsv;
Josef Bacik6d668dd2011-11-03 22:54:25 -04001321 trans->block_rsv = &root->fs_info->delayed_block_rsv;
Miao Xie19fd2942011-06-15 10:47:30 +00001322
Miao Xie4ea41ce2012-12-19 06:59:03 +00001323 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001324
Miao Xie19fd2942011-06-15 10:47:30 +00001325 trans->block_rsv = block_rsv;
Jeff Mahoney3a45bb22016-09-09 21:39:03 -04001326 btrfs_end_transaction(trans);
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001327 btrfs_btree_balance_dirty_nodelay(root->fs_info);
Chris Masonde3cb942013-03-04 17:13:31 -05001328
1329release_path:
1330 btrfs_release_path(path);
1331 total_done++;
1332
1333 btrfs_release_prepared_delayed_node(delayed_node);
Maxim Patlasov2939e1a2016-12-12 14:32:44 -08001334 if ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK) ||
1335 total_done < async_work->nr)
Chris Masonde3cb942013-03-04 17:13:31 -05001336 goto again;
1337
Miao Xie16cdcec2011-04-22 18:12:22 +08001338free_path:
1339 btrfs_free_path(path);
1340out:
Chris Masonde3cb942013-03-04 17:13:31 -05001341 wake_up(&delayed_root->wait);
1342 kfree(async_work);
Miao Xie16cdcec2011-04-22 18:12:22 +08001343}
1344
Miao Xie16cdcec2011-04-22 18:12:22 +08001345
Chris Masonde3cb942013-03-04 17:13:31 -05001346static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
Daniel Dresslera585e942014-11-17 22:05:02 +09001347 struct btrfs_fs_info *fs_info, int nr)
Chris Masonde3cb942013-03-04 17:13:31 -05001348{
1349 struct btrfs_async_delayed_work *async_work;
1350
Maxim Patlasov2939e1a2016-12-12 14:32:44 -08001351 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND ||
1352 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
Miao Xie16cdcec2011-04-22 18:12:22 +08001353 return 0;
1354
Chris Masonde3cb942013-03-04 17:13:31 -05001355 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1356 if (!async_work)
Miao Xie16cdcec2011-04-22 18:12:22 +08001357 return -ENOMEM;
Miao Xie16cdcec2011-04-22 18:12:22 +08001358
Chris Masonde3cb942013-03-04 17:13:31 -05001359 async_work->delayed_root = delayed_root;
Liu Bo9e0af232014-08-15 23:36:53 +08001360 btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
1361 btrfs_async_run_delayed_root, NULL, NULL);
Chris Masonde3cb942013-03-04 17:13:31 -05001362 async_work->nr = nr;
Miao Xie16cdcec2011-04-22 18:12:22 +08001363
Daniel Dresslera585e942014-11-17 22:05:02 +09001364 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
Miao Xie16cdcec2011-04-22 18:12:22 +08001365 return 0;
1366}
1367
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001368void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
Chris Masone9993762011-06-17 16:14:09 -04001369{
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001370 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
Chris Masone9993762011-06-17 16:14:09 -04001371}
1372
Miao Xie03538082013-12-26 13:07:03 +08001373static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
Chris Masonde3cb942013-03-04 17:13:31 -05001374{
1375 int val = atomic_read(&delayed_root->items_seq);
1376
Miao Xie03538082013-12-26 13:07:03 +08001377 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
Chris Masonde3cb942013-03-04 17:13:31 -05001378 return 1;
Miao Xie03538082013-12-26 13:07:03 +08001379
1380 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1381 return 1;
1382
Chris Masonde3cb942013-03-04 17:13:31 -05001383 return 0;
1384}
1385
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001386void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
Miao Xie16cdcec2011-04-22 18:12:22 +08001387{
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001388 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
Miao Xie16cdcec2011-04-22 18:12:22 +08001389
1390 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1391 return;
1392
1393 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
Miao Xie03538082013-12-26 13:07:03 +08001394 int seq;
Miao Xie16cdcec2011-04-22 18:12:22 +08001395 int ret;
Miao Xie03538082013-12-26 13:07:03 +08001396
1397 seq = atomic_read(&delayed_root->items_seq);
Chris Masonde3cb942013-03-04 17:13:31 -05001398
Daniel Dresslera585e942014-11-17 22:05:02 +09001399 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
Miao Xie16cdcec2011-04-22 18:12:22 +08001400 if (ret)
1401 return;
1402
Miao Xie03538082013-12-26 13:07:03 +08001403 wait_event_interruptible(delayed_root->wait,
1404 could_end_wait(delayed_root, seq));
Miao Xie4dd466d2013-12-26 13:07:02 +08001405 return;
Miao Xie16cdcec2011-04-22 18:12:22 +08001406 }
1407
Daniel Dresslera585e942014-11-17 22:05:02 +09001408 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
Miao Xie16cdcec2011-04-22 18:12:22 +08001409}
1410
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001411/* Will return 0 or -ENOMEM */
Miao Xie16cdcec2011-04-22 18:12:22 +08001412int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001413 struct btrfs_fs_info *fs_info,
1414 const char *name, int name_len,
Nikolay Borisov6f45d182017-01-10 20:35:35 +02001415 struct btrfs_inode *dir,
Miao Xie16cdcec2011-04-22 18:12:22 +08001416 struct btrfs_disk_key *disk_key, u8 type,
1417 u64 index)
1418{
1419 struct btrfs_delayed_node *delayed_node;
1420 struct btrfs_delayed_item *delayed_item;
1421 struct btrfs_dir_item *dir_item;
1422 int ret;
1423
Nikolay Borisov6f45d182017-01-10 20:35:35 +02001424 delayed_node = btrfs_get_or_create_delayed_node(dir);
Miao Xie16cdcec2011-04-22 18:12:22 +08001425 if (IS_ERR(delayed_node))
1426 return PTR_ERR(delayed_node);
1427
1428 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1429 if (!delayed_item) {
1430 ret = -ENOMEM;
1431 goto release_node;
1432 }
1433
Nikolay Borisov6f45d182017-01-10 20:35:35 +02001434 delayed_item->key.objectid = btrfs_ino(dir);
David Sterba962a2982014-06-04 18:41:45 +02001435 delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
Miao Xie16cdcec2011-04-22 18:12:22 +08001436 delayed_item->key.offset = index;
1437
1438 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1439 dir_item->location = *disk_key;
Qu Wenruo3cae2102013-07-16 11:19:18 +08001440 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1441 btrfs_set_stack_dir_data_len(dir_item, 0);
1442 btrfs_set_stack_dir_name_len(dir_item, name_len);
1443 btrfs_set_stack_dir_type(dir_item, type);
Miao Xie16cdcec2011-04-22 18:12:22 +08001444 memcpy((char *)(dir_item + 1), name, name_len);
1445
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001446 ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, delayed_item);
Josef Bacik8c2a3ca2012-01-10 10:31:31 -05001447 /*
1448 * we have reserved enough space when we start a new transaction,
1449 * so reserving metadata failure is impossible
1450 */
1451 BUG_ON(ret);
1452
1453
Miao Xie16cdcec2011-04-22 18:12:22 +08001454 mutex_lock(&delayed_node->mutex);
1455 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1456 if (unlikely(ret)) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001457 btrfs_err(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04001458 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1459 name_len, name, delayed_node->root->objectid,
1460 delayed_node->inode_id, ret);
Miao Xie16cdcec2011-04-22 18:12:22 +08001461 BUG();
1462 }
1463 mutex_unlock(&delayed_node->mutex);
1464
1465release_node:
1466 btrfs_release_delayed_node(delayed_node);
1467 return ret;
1468}
1469
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001470static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
Miao Xie16cdcec2011-04-22 18:12:22 +08001471 struct btrfs_delayed_node *node,
1472 struct btrfs_key *key)
1473{
1474 struct btrfs_delayed_item *item;
1475
1476 mutex_lock(&node->mutex);
1477 item = __btrfs_lookup_delayed_insertion_item(node, key);
1478 if (!item) {
1479 mutex_unlock(&node->mutex);
1480 return 1;
1481 }
1482
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001483 btrfs_delayed_item_release_metadata(fs_info, item);
Miao Xie16cdcec2011-04-22 18:12:22 +08001484 btrfs_release_delayed_item(item);
1485 mutex_unlock(&node->mutex);
1486 return 0;
1487}
1488
1489int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001490 struct btrfs_fs_info *fs_info,
Nikolay Borisove67bbbb2017-01-10 20:35:36 +02001491 struct btrfs_inode *dir, u64 index)
Miao Xie16cdcec2011-04-22 18:12:22 +08001492{
1493 struct btrfs_delayed_node *node;
1494 struct btrfs_delayed_item *item;
1495 struct btrfs_key item_key;
1496 int ret;
1497
Nikolay Borisove67bbbb2017-01-10 20:35:36 +02001498 node = btrfs_get_or_create_delayed_node(dir);
Miao Xie16cdcec2011-04-22 18:12:22 +08001499 if (IS_ERR(node))
1500 return PTR_ERR(node);
1501
Nikolay Borisove67bbbb2017-01-10 20:35:36 +02001502 item_key.objectid = btrfs_ino(dir);
David Sterba962a2982014-06-04 18:41:45 +02001503 item_key.type = BTRFS_DIR_INDEX_KEY;
Miao Xie16cdcec2011-04-22 18:12:22 +08001504 item_key.offset = index;
1505
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001506 ret = btrfs_delete_delayed_insertion_item(fs_info, node, &item_key);
Miao Xie16cdcec2011-04-22 18:12:22 +08001507 if (!ret)
1508 goto end;
1509
1510 item = btrfs_alloc_delayed_item(0);
1511 if (!item) {
1512 ret = -ENOMEM;
1513 goto end;
1514 }
1515
1516 item->key = item_key;
1517
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001518 ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, item);
Miao Xie16cdcec2011-04-22 18:12:22 +08001519 /*
1520 * we have reserved enough space when we start a new transaction,
1521 * so reserving metadata failure is impossible.
1522 */
1523 BUG_ON(ret);
1524
1525 mutex_lock(&node->mutex);
1526 ret = __btrfs_add_delayed_deletion_item(node, item);
1527 if (unlikely(ret)) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001528 btrfs_err(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04001529 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1530 index, node->root->objectid, node->inode_id, ret);
Miao Xie16cdcec2011-04-22 18:12:22 +08001531 BUG();
1532 }
1533 mutex_unlock(&node->mutex);
1534end:
1535 btrfs_release_delayed_node(node);
1536 return ret;
1537}
1538
Nikolay Borisovf5cc7b82017-01-10 20:35:42 +02001539int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
Miao Xie16cdcec2011-04-22 18:12:22 +08001540{
Nikolay Borisovf5cc7b82017-01-10 20:35:42 +02001541 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
Miao Xie16cdcec2011-04-22 18:12:22 +08001542
1543 if (!delayed_node)
1544 return -ENOENT;
1545
1546 /*
1547 * Since we have held i_mutex of this directory, it is impossible that
1548 * a new directory index is added into the delayed node and index_cnt
1549 * is updated now. So we needn't lock the delayed node.
1550 */
Miao Xie2f7e33d2011-06-23 07:27:13 +00001551 if (!delayed_node->index_cnt) {
1552 btrfs_release_delayed_node(delayed_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001553 return -EINVAL;
Miao Xie2f7e33d2011-06-23 07:27:13 +00001554 }
Miao Xie16cdcec2011-04-22 18:12:22 +08001555
Nikolay Borisovf5cc7b82017-01-10 20:35:42 +02001556 inode->index_cnt = delayed_node->index_cnt;
Miao Xie2f7e33d2011-06-23 07:27:13 +00001557 btrfs_release_delayed_node(delayed_node);
1558 return 0;
Miao Xie16cdcec2011-04-22 18:12:22 +08001559}
1560
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001561bool btrfs_readdir_get_delayed_items(struct inode *inode,
1562 struct list_head *ins_list,
1563 struct list_head *del_list)
Miao Xie16cdcec2011-04-22 18:12:22 +08001564{
1565 struct btrfs_delayed_node *delayed_node;
1566 struct btrfs_delayed_item *item;
1567
Nikolay Borisov340c6ca2017-01-10 20:35:32 +02001568 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
Miao Xie16cdcec2011-04-22 18:12:22 +08001569 if (!delayed_node)
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001570 return false;
1571
1572 /*
1573 * We can only do one readdir with delayed items at a time because of
1574 * item->readdir_list.
1575 */
1576 inode_unlock_shared(inode);
1577 inode_lock(inode);
Miao Xie16cdcec2011-04-22 18:12:22 +08001578
1579 mutex_lock(&delayed_node->mutex);
1580 item = __btrfs_first_delayed_insertion_item(delayed_node);
1581 while (item) {
Elena Reshetova089e77e2017-03-03 10:55:17 +02001582 refcount_inc(&item->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +08001583 list_add_tail(&item->readdir_list, ins_list);
1584 item = __btrfs_next_delayed_item(item);
1585 }
1586
1587 item = __btrfs_first_delayed_deletion_item(delayed_node);
1588 while (item) {
Elena Reshetova089e77e2017-03-03 10:55:17 +02001589 refcount_inc(&item->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +08001590 list_add_tail(&item->readdir_list, del_list);
1591 item = __btrfs_next_delayed_item(item);
1592 }
1593 mutex_unlock(&delayed_node->mutex);
1594 /*
1595 * This delayed node is still cached in the btrfs inode, so refs
1596 * must be > 1 now, and we needn't check it is going to be freed
1597 * or not.
1598 *
1599 * Besides that, this function is used to read dir, we do not
1600 * insert/delete delayed items in this period. So we also needn't
1601 * requeue or dequeue this delayed node.
1602 */
Elena Reshetova6de5f182017-03-03 10:55:16 +02001603 refcount_dec(&delayed_node->refs);
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001604
1605 return true;
Miao Xie16cdcec2011-04-22 18:12:22 +08001606}
1607
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001608void btrfs_readdir_put_delayed_items(struct inode *inode,
1609 struct list_head *ins_list,
1610 struct list_head *del_list)
Miao Xie16cdcec2011-04-22 18:12:22 +08001611{
1612 struct btrfs_delayed_item *curr, *next;
1613
1614 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1615 list_del(&curr->readdir_list);
Elena Reshetova089e77e2017-03-03 10:55:17 +02001616 if (refcount_dec_and_test(&curr->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +08001617 kfree(curr);
1618 }
1619
1620 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1621 list_del(&curr->readdir_list);
Elena Reshetova089e77e2017-03-03 10:55:17 +02001622 if (refcount_dec_and_test(&curr->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +08001623 kfree(curr);
1624 }
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001625
1626 /*
1627 * The VFS is going to do up_read(), so we need to downgrade back to a
1628 * read lock.
1629 */
1630 downgrade_write(&inode->i_rwsem);
Miao Xie16cdcec2011-04-22 18:12:22 +08001631}
1632
1633int btrfs_should_delete_dir_index(struct list_head *del_list,
1634 u64 index)
1635{
1636 struct btrfs_delayed_item *curr, *next;
1637 int ret;
1638
1639 if (list_empty(del_list))
1640 return 0;
1641
1642 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1643 if (curr->key.offset > index)
1644 break;
1645
1646 list_del(&curr->readdir_list);
1647 ret = (curr->key.offset == index);
1648
Elena Reshetova089e77e2017-03-03 10:55:17 +02001649 if (refcount_dec_and_test(&curr->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +08001650 kfree(curr);
1651
1652 if (ret)
1653 return 1;
1654 else
1655 continue;
1656 }
1657 return 0;
1658}
1659
1660/*
1661 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1662 *
1663 */
Al Viro9cdda8d2013-05-22 16:48:09 -04001664int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
Jeff Mahoneyd2fbb2b2016-11-05 13:26:35 -04001665 struct list_head *ins_list)
Miao Xie16cdcec2011-04-22 18:12:22 +08001666{
1667 struct btrfs_dir_item *di;
1668 struct btrfs_delayed_item *curr, *next;
1669 struct btrfs_key location;
1670 char *name;
1671 int name_len;
1672 int over = 0;
1673 unsigned char d_type;
1674
1675 if (list_empty(ins_list))
1676 return 0;
1677
1678 /*
1679 * Changing the data of the delayed item is impossible. So
1680 * we needn't lock them. And we have held i_mutex of the
1681 * directory, nobody can delete any directory indexes now.
1682 */
1683 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1684 list_del(&curr->readdir_list);
1685
Al Viro9cdda8d2013-05-22 16:48:09 -04001686 if (curr->key.offset < ctx->pos) {
Elena Reshetova089e77e2017-03-03 10:55:17 +02001687 if (refcount_dec_and_test(&curr->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +08001688 kfree(curr);
1689 continue;
1690 }
1691
Al Viro9cdda8d2013-05-22 16:48:09 -04001692 ctx->pos = curr->key.offset;
Miao Xie16cdcec2011-04-22 18:12:22 +08001693
1694 di = (struct btrfs_dir_item *)curr->data;
1695 name = (char *)(di + 1);
Qu Wenruo3cae2102013-07-16 11:19:18 +08001696 name_len = btrfs_stack_dir_name_len(di);
Miao Xie16cdcec2011-04-22 18:12:22 +08001697
1698 d_type = btrfs_filetype_table[di->type];
1699 btrfs_disk_key_to_cpu(&location, &di->location);
1700
Al Viro9cdda8d2013-05-22 16:48:09 -04001701 over = !dir_emit(ctx, name, name_len,
Miao Xie16cdcec2011-04-22 18:12:22 +08001702 location.objectid, d_type);
1703
Elena Reshetova089e77e2017-03-03 10:55:17 +02001704 if (refcount_dec_and_test(&curr->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +08001705 kfree(curr);
1706
1707 if (over)
1708 return 1;
Josef Bacik42e9cc42017-07-24 15:14:26 -04001709 ctx->pos++;
Miao Xie16cdcec2011-04-22 18:12:22 +08001710 }
1711 return 0;
1712}
1713
Miao Xie16cdcec2011-04-22 18:12:22 +08001714static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1715 struct btrfs_inode_item *inode_item,
1716 struct inode *inode)
1717{
Eric W. Biederman2f2f43d2012-02-10 11:05:07 -08001718 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1719 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
Miao Xie16cdcec2011-04-22 18:12:22 +08001720 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1721 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1722 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1723 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1724 btrfs_set_stack_inode_generation(inode_item,
1725 BTRFS_I(inode)->generation);
Josef Bacik0c4d2d92012-04-05 15:03:02 -04001726 btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
Miao Xie16cdcec2011-04-22 18:12:22 +08001727 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1728 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1729 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
Chris Masonff5714c2011-05-28 07:00:39 -04001730 btrfs_set_stack_inode_block_group(inode_item, 0);
Miao Xie16cdcec2011-04-22 18:12:22 +08001731
David Sterbaa937b972014-12-12 17:39:12 +01001732 btrfs_set_stack_timespec_sec(&inode_item->atime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001733 inode->i_atime.tv_sec);
David Sterbaa937b972014-12-12 17:39:12 +01001734 btrfs_set_stack_timespec_nsec(&inode_item->atime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001735 inode->i_atime.tv_nsec);
1736
David Sterbaa937b972014-12-12 17:39:12 +01001737 btrfs_set_stack_timespec_sec(&inode_item->mtime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001738 inode->i_mtime.tv_sec);
David Sterbaa937b972014-12-12 17:39:12 +01001739 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001740 inode->i_mtime.tv_nsec);
1741
David Sterbaa937b972014-12-12 17:39:12 +01001742 btrfs_set_stack_timespec_sec(&inode_item->ctime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001743 inode->i_ctime.tv_sec);
David Sterbaa937b972014-12-12 17:39:12 +01001744 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001745 inode->i_ctime.tv_nsec);
chandan r9cc97d62012-07-04 12:48:07 +05301746
1747 btrfs_set_stack_timespec_sec(&inode_item->otime,
1748 BTRFS_I(inode)->i_otime.tv_sec);
1749 btrfs_set_stack_timespec_nsec(&inode_item->otime,
1750 BTRFS_I(inode)->i_otime.tv_nsec);
Miao Xie16cdcec2011-04-22 18:12:22 +08001751}
1752
Miao Xie2f7e33d2011-06-23 07:27:13 +00001753int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1754{
1755 struct btrfs_delayed_node *delayed_node;
1756 struct btrfs_inode_item *inode_item;
Miao Xie2f7e33d2011-06-23 07:27:13 +00001757
Nikolay Borisov340c6ca2017-01-10 20:35:32 +02001758 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
Miao Xie2f7e33d2011-06-23 07:27:13 +00001759 if (!delayed_node)
1760 return -ENOENT;
1761
1762 mutex_lock(&delayed_node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001763 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Miao Xie2f7e33d2011-06-23 07:27:13 +00001764 mutex_unlock(&delayed_node->mutex);
1765 btrfs_release_delayed_node(delayed_node);
1766 return -ENOENT;
1767 }
1768
1769 inode_item = &delayed_node->inode_item;
1770
Eric W. Biederman2f2f43d2012-02-10 11:05:07 -08001771 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1772 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
Nikolay Borisov6ef06d22017-02-20 13:50:34 +02001773 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
Miao Xie2f7e33d2011-06-23 07:27:13 +00001774 inode->i_mode = btrfs_stack_inode_mode(inode_item);
Miklos Szeredibfe86842011-10-28 14:13:29 +02001775 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
Miao Xie2f7e33d2011-06-23 07:27:13 +00001776 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1777 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
Yang Dongsheng6e17d302015-04-09 12:08:43 +08001778 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1779
Josef Bacik0c4d2d92012-04-05 15:03:02 -04001780 inode->i_version = btrfs_stack_inode_sequence(inode_item);
Miao Xie2f7e33d2011-06-23 07:27:13 +00001781 inode->i_rdev = 0;
1782 *rdev = btrfs_stack_inode_rdev(inode_item);
1783 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1784
David Sterbaa937b972014-12-12 17:39:12 +01001785 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1786 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
Miao Xie2f7e33d2011-06-23 07:27:13 +00001787
David Sterbaa937b972014-12-12 17:39:12 +01001788 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1789 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
Miao Xie2f7e33d2011-06-23 07:27:13 +00001790
David Sterbaa937b972014-12-12 17:39:12 +01001791 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1792 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
Miao Xie2f7e33d2011-06-23 07:27:13 +00001793
chandan r9cc97d62012-07-04 12:48:07 +05301794 BTRFS_I(inode)->i_otime.tv_sec =
1795 btrfs_stack_timespec_sec(&inode_item->otime);
1796 BTRFS_I(inode)->i_otime.tv_nsec =
1797 btrfs_stack_timespec_nsec(&inode_item->otime);
1798
Miao Xie2f7e33d2011-06-23 07:27:13 +00001799 inode->i_generation = BTRFS_I(inode)->generation;
1800 BTRFS_I(inode)->index_cnt = (u64)-1;
1801
1802 mutex_unlock(&delayed_node->mutex);
1803 btrfs_release_delayed_node(delayed_node);
1804 return 0;
1805}
1806
Miao Xie16cdcec2011-04-22 18:12:22 +08001807int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1808 struct btrfs_root *root, struct inode *inode)
1809{
1810 struct btrfs_delayed_node *delayed_node;
David Sterbaaa0467d2011-06-03 16:29:08 +02001811 int ret = 0;
Miao Xie16cdcec2011-04-22 18:12:22 +08001812
Nikolay Borisove5517a72017-01-10 20:35:33 +02001813 delayed_node = btrfs_get_or_create_delayed_node(BTRFS_I(inode));
Miao Xie16cdcec2011-04-22 18:12:22 +08001814 if (IS_ERR(delayed_node))
1815 return PTR_ERR(delayed_node);
1816
1817 mutex_lock(&delayed_node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001818 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Miao Xie16cdcec2011-04-22 18:12:22 +08001819 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1820 goto release_node;
1821 }
1822
Nikolay Borisovfcabdd12017-01-10 20:35:34 +02001823 ret = btrfs_delayed_inode_reserve_metadata(trans, root, BTRFS_I(inode),
Josef Bacik7fd2ae22011-11-08 15:47:34 -05001824 delayed_node);
Josef Bacikc06a0e12011-11-04 19:56:02 -04001825 if (ret)
1826 goto release_node;
Miao Xie16cdcec2011-04-22 18:12:22 +08001827
1828 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
Miao Xie7cf35d92013-12-26 13:07:05 +08001829 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
Miao Xie16cdcec2011-04-22 18:12:22 +08001830 delayed_node->count++;
1831 atomic_inc(&root->fs_info->delayed_root->items);
1832release_node:
1833 mutex_unlock(&delayed_node->mutex);
1834 btrfs_release_delayed_node(delayed_node);
1835 return ret;
1836}
1837
Nikolay Borisove07222c2017-01-10 20:35:37 +02001838int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
Miao Xie67de1172013-12-26 13:07:06 +08001839{
Nikolay Borisove07222c2017-01-10 20:35:37 +02001840 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
Miao Xie67de1172013-12-26 13:07:06 +08001841 struct btrfs_delayed_node *delayed_node;
1842
Chris Mason6f896052014-12-31 12:18:29 -05001843 /*
1844 * we don't do delayed inode updates during log recovery because it
1845 * leads to enospc problems. This means we also can't do
1846 * delayed inode refs
1847 */
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001848 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
Chris Mason6f896052014-12-31 12:18:29 -05001849 return -EAGAIN;
1850
Nikolay Borisove07222c2017-01-10 20:35:37 +02001851 delayed_node = btrfs_get_or_create_delayed_node(inode);
Miao Xie67de1172013-12-26 13:07:06 +08001852 if (IS_ERR(delayed_node))
1853 return PTR_ERR(delayed_node);
1854
1855 /*
1856 * We don't reserve space for inode ref deletion is because:
1857 * - We ONLY do async inode ref deletion for the inode who has only
1858 * one link(i_nlink == 1), it means there is only one inode ref.
1859 * And in most case, the inode ref and the inode item are in the
1860 * same leaf, and we will deal with them at the same time.
1861 * Since we are sure we will reserve the space for the inode item,
1862 * it is unnecessary to reserve space for inode ref deletion.
1863 * - If the inode ref and the inode item are not in the same leaf,
1864 * We also needn't worry about enospc problem, because we reserve
1865 * much more space for the inode update than it needs.
1866 * - At the worst, we can steal some space from the global reservation.
1867 * It is very rare.
1868 */
1869 mutex_lock(&delayed_node->mutex);
1870 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1871 goto release_node;
1872
1873 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1874 delayed_node->count++;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001875 atomic_inc(&fs_info->delayed_root->items);
Miao Xie67de1172013-12-26 13:07:06 +08001876release_node:
1877 mutex_unlock(&delayed_node->mutex);
1878 btrfs_release_delayed_node(delayed_node);
1879 return 0;
1880}
1881
Miao Xie16cdcec2011-04-22 18:12:22 +08001882static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1883{
1884 struct btrfs_root *root = delayed_node->root;
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001885 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +08001886 struct btrfs_delayed_item *curr_item, *prev_item;
1887
1888 mutex_lock(&delayed_node->mutex);
1889 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1890 while (curr_item) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001891 btrfs_delayed_item_release_metadata(fs_info, curr_item);
Miao Xie16cdcec2011-04-22 18:12:22 +08001892 prev_item = curr_item;
1893 curr_item = __btrfs_next_delayed_item(prev_item);
1894 btrfs_release_delayed_item(prev_item);
1895 }
1896
1897 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1898 while (curr_item) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001899 btrfs_delayed_item_release_metadata(fs_info, curr_item);
Miao Xie16cdcec2011-04-22 18:12:22 +08001900 prev_item = curr_item;
1901 curr_item = __btrfs_next_delayed_item(prev_item);
1902 btrfs_release_delayed_item(prev_item);
1903 }
1904
Miao Xie67de1172013-12-26 13:07:06 +08001905 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1906 btrfs_release_delayed_iref(delayed_node);
1907
Miao Xie7cf35d92013-12-26 13:07:05 +08001908 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001909 btrfs_delayed_inode_release_metadata(fs_info, delayed_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001910 btrfs_release_delayed_inode(delayed_node);
1911 }
1912 mutex_unlock(&delayed_node->mutex);
1913}
1914
Nikolay Borisov4ccb5c72017-01-10 20:35:38 +02001915void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
Miao Xie16cdcec2011-04-22 18:12:22 +08001916{
1917 struct btrfs_delayed_node *delayed_node;
1918
Nikolay Borisov4ccb5c72017-01-10 20:35:38 +02001919 delayed_node = btrfs_get_delayed_node(inode);
Miao Xie16cdcec2011-04-22 18:12:22 +08001920 if (!delayed_node)
1921 return;
1922
1923 __btrfs_kill_delayed_node(delayed_node);
1924 btrfs_release_delayed_node(delayed_node);
1925}
1926
1927void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1928{
1929 u64 inode_id = 0;
1930 struct btrfs_delayed_node *delayed_nodes[8];
1931 int i, n;
1932
1933 while (1) {
1934 spin_lock(&root->inode_lock);
1935 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1936 (void **)delayed_nodes, inode_id,
1937 ARRAY_SIZE(delayed_nodes));
1938 if (!n) {
1939 spin_unlock(&root->inode_lock);
1940 break;
1941 }
1942
1943 inode_id = delayed_nodes[n - 1]->inode_id + 1;
1944
1945 for (i = 0; i < n; i++)
Elena Reshetova6de5f182017-03-03 10:55:16 +02001946 refcount_inc(&delayed_nodes[i]->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +08001947 spin_unlock(&root->inode_lock);
1948
1949 for (i = 0; i < n; i++) {
1950 __btrfs_kill_delayed_node(delayed_nodes[i]);
1951 btrfs_release_delayed_node(delayed_nodes[i]);
1952 }
1953 }
1954}
Miao Xie67cde342012-06-14 02:23:22 -06001955
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001956void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
Miao Xie67cde342012-06-14 02:23:22 -06001957{
Miao Xie67cde342012-06-14 02:23:22 -06001958 struct btrfs_delayed_node *curr_node, *prev_node;
1959
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001960 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
Miao Xie67cde342012-06-14 02:23:22 -06001961 while (curr_node) {
1962 __btrfs_kill_delayed_node(curr_node);
1963
1964 prev_node = curr_node;
1965 curr_node = btrfs_next_delayed_node(curr_node);
1966 btrfs_release_delayed_node(prev_node);
1967 }
1968}
1969