blob: 0177f9434c25fa2c16a26999f4be229141581350 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002 * fs/f2fs/node.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/mpage.h>
14#include <linux/backing-dev.h>
15#include <linux/blkdev.h>
16#include <linux/pagevec.h>
17#include <linux/swap.h>
18
19#include "f2fs.h"
20#include "node.h"
21#include "segment.h"
22
23static struct kmem_cache *nat_entry_slab;
24static struct kmem_cache *free_nid_slab;
25
26static void clear_node_page_dirty(struct page *page)
27{
28 struct address_space *mapping = page->mapping;
29 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
30 unsigned int long flags;
31
32 if (PageDirty(page)) {
33 spin_lock_irqsave(&mapping->tree_lock, flags);
34 radix_tree_tag_clear(&mapping->page_tree,
35 page_index(page),
36 PAGECACHE_TAG_DIRTY);
37 spin_unlock_irqrestore(&mapping->tree_lock, flags);
38
39 clear_page_dirty_for_io(page);
40 dec_page_count(sbi, F2FS_DIRTY_NODES);
41 }
42 ClearPageUptodate(page);
43}
44
45static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
46{
47 pgoff_t index = current_nat_addr(sbi, nid);
48 return get_meta_page(sbi, index);
49}
50
51static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
52{
53 struct page *src_page;
54 struct page *dst_page;
55 pgoff_t src_off;
56 pgoff_t dst_off;
57 void *src_addr;
58 void *dst_addr;
59 struct f2fs_nm_info *nm_i = NM_I(sbi);
60
61 src_off = current_nat_addr(sbi, nid);
62 dst_off = next_nat_addr(sbi, src_off);
63
64 /* get current nat block page with lock */
65 src_page = get_meta_page(sbi, src_off);
66
67 /* Dirty src_page means that it is already the new target NAT page. */
68 if (PageDirty(src_page))
69 return src_page;
70
71 dst_page = grab_meta_page(sbi, dst_off);
72
73 src_addr = page_address(src_page);
74 dst_addr = page_address(dst_page);
75 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
76 set_page_dirty(dst_page);
77 f2fs_put_page(src_page, 1);
78
79 set_to_next_nat(nm_i, nid);
80
81 return dst_page;
82}
83
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +090084/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +090085 * Readahead NAT pages
86 */
87static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid)
88{
89 struct address_space *mapping = sbi->meta_inode->i_mapping;
90 struct f2fs_nm_info *nm_i = NM_I(sbi);
91 struct page *page;
92 pgoff_t index;
93 int i;
94
95 for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) {
96 if (nid >= nm_i->max_nid)
97 nid = 0;
98 index = current_nat_addr(sbi, nid);
99
100 page = grab_cache_page(mapping, index);
101 if (!page)
102 continue;
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900103 if (PageUptodate(page)) {
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900104 f2fs_put_page(page, 1);
105 continue;
106 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900107 if (f2fs_readpage(sbi, page, index, READ))
108 continue;
109
Jaegeuk Kim369a7082013-01-31 10:15:35 +0900110 f2fs_put_page(page, 0);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900111 }
112}
113
114static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
115{
116 return radix_tree_lookup(&nm_i->nat_root, n);
117}
118
119static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
120 nid_t start, unsigned int nr, struct nat_entry **ep)
121{
122 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
123}
124
125static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
126{
127 list_del(&e->list);
128 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
129 nm_i->nat_cnt--;
130 kmem_cache_free(nat_entry_slab, e);
131}
132
133int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
134{
135 struct f2fs_nm_info *nm_i = NM_I(sbi);
136 struct nat_entry *e;
137 int is_cp = 1;
138
139 read_lock(&nm_i->nat_tree_lock);
140 e = __lookup_nat_cache(nm_i, nid);
141 if (e && !e->checkpointed)
142 is_cp = 0;
143 read_unlock(&nm_i->nat_tree_lock);
144 return is_cp;
145}
146
147static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
148{
149 struct nat_entry *new;
150
151 new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
152 if (!new)
153 return NULL;
154 if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
155 kmem_cache_free(nat_entry_slab, new);
156 return NULL;
157 }
158 memset(new, 0, sizeof(struct nat_entry));
159 nat_set_nid(new, nid);
160 list_add_tail(&new->list, &nm_i->nat_entries);
161 nm_i->nat_cnt++;
162 return new;
163}
164
165static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
166 struct f2fs_nat_entry *ne)
167{
168 struct nat_entry *e;
169retry:
170 write_lock(&nm_i->nat_tree_lock);
171 e = __lookup_nat_cache(nm_i, nid);
172 if (!e) {
173 e = grab_nat_entry(nm_i, nid);
174 if (!e) {
175 write_unlock(&nm_i->nat_tree_lock);
176 goto retry;
177 }
178 nat_set_blkaddr(e, le32_to_cpu(ne->block_addr));
179 nat_set_ino(e, le32_to_cpu(ne->ino));
180 nat_set_version(e, ne->version);
181 e->checkpointed = true;
182 }
183 write_unlock(&nm_i->nat_tree_lock);
184}
185
186static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
187 block_t new_blkaddr)
188{
189 struct f2fs_nm_info *nm_i = NM_I(sbi);
190 struct nat_entry *e;
191retry:
192 write_lock(&nm_i->nat_tree_lock);
193 e = __lookup_nat_cache(nm_i, ni->nid);
194 if (!e) {
195 e = grab_nat_entry(nm_i, ni->nid);
196 if (!e) {
197 write_unlock(&nm_i->nat_tree_lock);
198 goto retry;
199 }
200 e->ni = *ni;
201 e->checkpointed = true;
202 BUG_ON(ni->blk_addr == NEW_ADDR);
203 } else if (new_blkaddr == NEW_ADDR) {
204 /*
205 * when nid is reallocated,
206 * previous nat entry can be remained in nat cache.
207 * So, reinitialize it with new information.
208 */
209 e->ni = *ni;
210 BUG_ON(ni->blk_addr != NULL_ADDR);
211 }
212
213 if (new_blkaddr == NEW_ADDR)
214 e->checkpointed = false;
215
216 /* sanity check */
217 BUG_ON(nat_get_blkaddr(e) != ni->blk_addr);
218 BUG_ON(nat_get_blkaddr(e) == NULL_ADDR &&
219 new_blkaddr == NULL_ADDR);
220 BUG_ON(nat_get_blkaddr(e) == NEW_ADDR &&
221 new_blkaddr == NEW_ADDR);
222 BUG_ON(nat_get_blkaddr(e) != NEW_ADDR &&
223 nat_get_blkaddr(e) != NULL_ADDR &&
224 new_blkaddr == NEW_ADDR);
225
226 /* increament version no as node is removed */
227 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
228 unsigned char version = nat_get_version(e);
229 nat_set_version(e, inc_node_version(version));
230 }
231
232 /* change address */
233 nat_set_blkaddr(e, new_blkaddr);
234 __set_nat_cache_dirty(nm_i, e);
235 write_unlock(&nm_i->nat_tree_lock);
236}
237
238static int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
239{
240 struct f2fs_nm_info *nm_i = NM_I(sbi);
241
242 if (nm_i->nat_cnt < 2 * NM_WOUT_THRESHOLD)
243 return 0;
244
245 write_lock(&nm_i->nat_tree_lock);
246 while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
247 struct nat_entry *ne;
248 ne = list_first_entry(&nm_i->nat_entries,
249 struct nat_entry, list);
250 __del_from_nat_cache(nm_i, ne);
251 nr_shrink--;
252 }
253 write_unlock(&nm_i->nat_tree_lock);
254 return nr_shrink;
255}
256
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900257/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900258 * This function returns always success
259 */
260void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
261{
262 struct f2fs_nm_info *nm_i = NM_I(sbi);
263 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
264 struct f2fs_summary_block *sum = curseg->sum_blk;
265 nid_t start_nid = START_NID(nid);
266 struct f2fs_nat_block *nat_blk;
267 struct page *page = NULL;
268 struct f2fs_nat_entry ne;
269 struct nat_entry *e;
270 int i;
271
Namjae Jeonbe4124f2012-12-01 10:55:12 +0900272 memset(&ne, 0, sizeof(struct f2fs_nat_entry));
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900273 ni->nid = nid;
274
275 /* Check nat cache */
276 read_lock(&nm_i->nat_tree_lock);
277 e = __lookup_nat_cache(nm_i, nid);
278 if (e) {
279 ni->ino = nat_get_ino(e);
280 ni->blk_addr = nat_get_blkaddr(e);
281 ni->version = nat_get_version(e);
282 }
283 read_unlock(&nm_i->nat_tree_lock);
284 if (e)
285 return;
286
287 /* Check current segment summary */
288 mutex_lock(&curseg->curseg_mutex);
289 i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
290 if (i >= 0) {
291 ne = nat_in_journal(sum, i);
292 node_info_from_raw_nat(ni, &ne);
293 }
294 mutex_unlock(&curseg->curseg_mutex);
295 if (i >= 0)
296 goto cache;
297
298 /* Fill node_info from nat page */
299 page = get_current_nat_page(sbi, start_nid);
300 nat_blk = (struct f2fs_nat_block *)page_address(page);
301 ne = nat_blk->entries[nid - start_nid];
302 node_info_from_raw_nat(ni, &ne);
303 f2fs_put_page(page, 1);
304cache:
305 /* cache nat entry */
306 cache_nat_entry(NM_I(sbi), nid, &ne);
307}
308
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900309/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900310 * The maximum depth is four.
311 * Offset[0] will have raw inode offset.
312 */
313static int get_node_path(long block, int offset[4], unsigned int noffset[4])
314{
315 const long direct_index = ADDRS_PER_INODE;
316 const long direct_blks = ADDRS_PER_BLOCK;
317 const long dptrs_per_blk = NIDS_PER_BLOCK;
318 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
319 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
320 int n = 0;
321 int level = 0;
322
323 noffset[0] = 0;
324
325 if (block < direct_index) {
Namjae Jeon25c0a6e2013-03-02 12:41:31 +0900326 offset[n] = block;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900327 goto got;
328 }
329 block -= direct_index;
330 if (block < direct_blks) {
331 offset[n++] = NODE_DIR1_BLOCK;
332 noffset[n] = 1;
Namjae Jeon25c0a6e2013-03-02 12:41:31 +0900333 offset[n] = block;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900334 level = 1;
335 goto got;
336 }
337 block -= direct_blks;
338 if (block < direct_blks) {
339 offset[n++] = NODE_DIR2_BLOCK;
340 noffset[n] = 2;
Namjae Jeon25c0a6e2013-03-02 12:41:31 +0900341 offset[n] = block;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900342 level = 1;
343 goto got;
344 }
345 block -= direct_blks;
346 if (block < indirect_blks) {
347 offset[n++] = NODE_IND1_BLOCK;
348 noffset[n] = 3;
349 offset[n++] = block / direct_blks;
350 noffset[n] = 4 + offset[n - 1];
Namjae Jeon25c0a6e2013-03-02 12:41:31 +0900351 offset[n] = block % direct_blks;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900352 level = 2;
353 goto got;
354 }
355 block -= indirect_blks;
356 if (block < indirect_blks) {
357 offset[n++] = NODE_IND2_BLOCK;
358 noffset[n] = 4 + dptrs_per_blk;
359 offset[n++] = block / direct_blks;
360 noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
Namjae Jeon25c0a6e2013-03-02 12:41:31 +0900361 offset[n] = block % direct_blks;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900362 level = 2;
363 goto got;
364 }
365 block -= indirect_blks;
366 if (block < dindirect_blks) {
367 offset[n++] = NODE_DIND_BLOCK;
368 noffset[n] = 5 + (dptrs_per_blk * 2);
369 offset[n++] = block / indirect_blks;
370 noffset[n] = 6 + (dptrs_per_blk * 2) +
371 offset[n - 1] * (dptrs_per_blk + 1);
372 offset[n++] = (block / direct_blks) % dptrs_per_blk;
373 noffset[n] = 7 + (dptrs_per_blk * 2) +
374 offset[n - 2] * (dptrs_per_blk + 1) +
375 offset[n - 1];
Namjae Jeon25c0a6e2013-03-02 12:41:31 +0900376 offset[n] = block % direct_blks;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900377 level = 3;
378 goto got;
379 } else {
380 BUG();
381 }
382got:
383 return level;
384}
385
386/*
387 * Caller should call f2fs_put_dnode(dn).
388 */
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900389int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900390{
391 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
392 struct page *npage[4];
393 struct page *parent;
394 int offset[4];
395 unsigned int noffset[4];
396 nid_t nids[4];
397 int level, i;
398 int err = 0;
399
400 level = get_node_path(index, offset, noffset);
401
402 nids[0] = dn->inode->i_ino;
403 npage[0] = get_node_page(sbi, nids[0]);
404 if (IS_ERR(npage[0]))
405 return PTR_ERR(npage[0]);
406
407 parent = npage[0];
Changman Lee52c2db32013-02-20 07:47:06 +0900408 if (level != 0)
409 nids[1] = get_nid(parent, offset[0], true);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900410 dn->inode_page = npage[0];
411 dn->inode_page_locked = true;
412
413 /* get indirect or direct nodes */
414 for (i = 1; i <= level; i++) {
415 bool done = false;
416
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900417 if (!nids[i] && mode == ALLOC_NODE) {
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900418 mutex_lock_op(sbi, NODE_NEW);
419
420 /* alloc new node */
421 if (!alloc_nid(sbi, &(nids[i]))) {
422 mutex_unlock_op(sbi, NODE_NEW);
423 err = -ENOSPC;
424 goto release_pages;
425 }
426
427 dn->nid = nids[i];
428 npage[i] = new_node_page(dn, noffset[i]);
429 if (IS_ERR(npage[i])) {
430 alloc_nid_failed(sbi, nids[i]);
431 mutex_unlock_op(sbi, NODE_NEW);
432 err = PTR_ERR(npage[i]);
433 goto release_pages;
434 }
435
436 set_nid(parent, offset[i - 1], nids[i], i == 1);
437 alloc_nid_done(sbi, nids[i]);
438 mutex_unlock_op(sbi, NODE_NEW);
439 done = true;
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900440 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900441 npage[i] = get_node_page_ra(parent, offset[i - 1]);
442 if (IS_ERR(npage[i])) {
443 err = PTR_ERR(npage[i]);
444 goto release_pages;
445 }
446 done = true;
447 }
448 if (i == 1) {
449 dn->inode_page_locked = false;
450 unlock_page(parent);
451 } else {
452 f2fs_put_page(parent, 1);
453 }
454
455 if (!done) {
456 npage[i] = get_node_page(sbi, nids[i]);
457 if (IS_ERR(npage[i])) {
458 err = PTR_ERR(npage[i]);
459 f2fs_put_page(npage[0], 0);
460 goto release_out;
461 }
462 }
463 if (i < level) {
464 parent = npage[i];
465 nids[i + 1] = get_nid(parent, offset[i], false);
466 }
467 }
468 dn->nid = nids[level];
469 dn->ofs_in_node = offset[level];
470 dn->node_page = npage[level];
471 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
472 return 0;
473
474release_pages:
475 f2fs_put_page(parent, 1);
476 if (i > 1)
477 f2fs_put_page(npage[0], 0);
478release_out:
479 dn->inode_page = NULL;
480 dn->node_page = NULL;
481 return err;
482}
483
484static void truncate_node(struct dnode_of_data *dn)
485{
486 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
487 struct node_info ni;
488
489 get_node_info(sbi, dn->nid, &ni);
Jaegeuk Kim71e9fec2012-12-20 15:10:06 +0900490 if (dn->inode->i_blocks == 0) {
491 BUG_ON(ni.blk_addr != NULL_ADDR);
492 goto invalidate;
493 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900494 BUG_ON(ni.blk_addr == NULL_ADDR);
495
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900496 /* Deallocate node address */
Jaegeuk Kim71e9fec2012-12-20 15:10:06 +0900497 invalidate_blocks(sbi, ni.blk_addr);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900498 dec_valid_node_count(sbi, dn->inode, 1);
499 set_node_addr(sbi, &ni, NULL_ADDR);
500
501 if (dn->nid == dn->inode->i_ino) {
502 remove_orphan_inode(sbi, dn->nid);
503 dec_valid_inode_count(sbi);
504 } else {
505 sync_inode_page(dn);
506 }
Jaegeuk Kim71e9fec2012-12-20 15:10:06 +0900507invalidate:
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900508 clear_node_page_dirty(dn->node_page);
509 F2FS_SET_SB_DIRT(sbi);
510
511 f2fs_put_page(dn->node_page, 1);
512 dn->node_page = NULL;
513}
514
515static int truncate_dnode(struct dnode_of_data *dn)
516{
517 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
518 struct page *page;
519
520 if (dn->nid == 0)
521 return 1;
522
523 /* get direct node */
524 page = get_node_page(sbi, dn->nid);
525 if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
526 return 1;
527 else if (IS_ERR(page))
528 return PTR_ERR(page);
529
530 /* Make dnode_of_data for parameter */
531 dn->node_page = page;
532 dn->ofs_in_node = 0;
533 truncate_data_blocks(dn);
534 truncate_node(dn);
535 return 1;
536}
537
538static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
539 int ofs, int depth)
540{
541 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
542 struct dnode_of_data rdn = *dn;
543 struct page *page;
544 struct f2fs_node *rn;
545 nid_t child_nid;
546 unsigned int child_nofs;
547 int freed = 0;
548 int i, ret;
549
550 if (dn->nid == 0)
551 return NIDS_PER_BLOCK + 1;
552
553 page = get_node_page(sbi, dn->nid);
554 if (IS_ERR(page))
555 return PTR_ERR(page);
556
557 rn = (struct f2fs_node *)page_address(page);
558 if (depth < 3) {
559 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
560 child_nid = le32_to_cpu(rn->in.nid[i]);
561 if (child_nid == 0)
562 continue;
563 rdn.nid = child_nid;
564 ret = truncate_dnode(&rdn);
565 if (ret < 0)
566 goto out_err;
567 set_nid(page, i, 0, false);
568 }
569 } else {
570 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
571 for (i = ofs; i < NIDS_PER_BLOCK; i++) {
572 child_nid = le32_to_cpu(rn->in.nid[i]);
573 if (child_nid == 0) {
574 child_nofs += NIDS_PER_BLOCK + 1;
575 continue;
576 }
577 rdn.nid = child_nid;
578 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
579 if (ret == (NIDS_PER_BLOCK + 1)) {
580 set_nid(page, i, 0, false);
581 child_nofs += ret;
582 } else if (ret < 0 && ret != -ENOENT) {
583 goto out_err;
584 }
585 }
586 freed = child_nofs;
587 }
588
589 if (!ofs) {
590 /* remove current indirect node */
591 dn->node_page = page;
592 truncate_node(dn);
593 freed++;
594 } else {
595 f2fs_put_page(page, 1);
596 }
597 return freed;
598
599out_err:
600 f2fs_put_page(page, 1);
601 return ret;
602}
603
604static int truncate_partial_nodes(struct dnode_of_data *dn,
605 struct f2fs_inode *ri, int *offset, int depth)
606{
607 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
608 struct page *pages[2];
609 nid_t nid[3];
610 nid_t child_nid;
611 int err = 0;
612 int i;
613 int idx = depth - 2;
614
615 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
616 if (!nid[0])
617 return 0;
618
619 /* get indirect nodes in the path */
620 for (i = 0; i < depth - 1; i++) {
621 /* refernece count'll be increased */
622 pages[i] = get_node_page(sbi, nid[i]);
623 if (IS_ERR(pages[i])) {
624 depth = i + 1;
625 err = PTR_ERR(pages[i]);
626 goto fail;
627 }
628 nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
629 }
630
631 /* free direct nodes linked to a partial indirect node */
632 for (i = offset[depth - 1]; i < NIDS_PER_BLOCK; i++) {
633 child_nid = get_nid(pages[idx], i, false);
634 if (!child_nid)
635 continue;
636 dn->nid = child_nid;
637 err = truncate_dnode(dn);
638 if (err < 0)
639 goto fail;
640 set_nid(pages[idx], i, 0, false);
641 }
642
643 if (offset[depth - 1] == 0) {
644 dn->node_page = pages[idx];
645 dn->nid = nid[idx];
646 truncate_node(dn);
647 } else {
648 f2fs_put_page(pages[idx], 1);
649 }
650 offset[idx]++;
651 offset[depth - 1] = 0;
652fail:
653 for (i = depth - 3; i >= 0; i--)
654 f2fs_put_page(pages[i], 1);
655 return err;
656}
657
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900658/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900659 * All the block addresses of data and nodes should be nullified.
660 */
661int truncate_inode_blocks(struct inode *inode, pgoff_t from)
662{
663 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
664 int err = 0, cont = 1;
665 int level, offset[4], noffset[4];
Jaegeuk Kim7dd690c2013-02-12 07:28:55 +0900666 unsigned int nofs = 0;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900667 struct f2fs_node *rn;
668 struct dnode_of_data dn;
669 struct page *page;
670
671 level = get_node_path(from, offset, noffset);
672
673 page = get_node_page(sbi, inode->i_ino);
674 if (IS_ERR(page))
675 return PTR_ERR(page);
676
677 set_new_dnode(&dn, inode, page, NULL, 0);
678 unlock_page(page);
679
680 rn = page_address(page);
681 switch (level) {
682 case 0:
683 case 1:
684 nofs = noffset[1];
685 break;
686 case 2:
687 nofs = noffset[1];
688 if (!offset[level - 1])
689 goto skip_partial;
690 err = truncate_partial_nodes(&dn, &rn->i, offset, level);
691 if (err < 0 && err != -ENOENT)
692 goto fail;
693 nofs += 1 + NIDS_PER_BLOCK;
694 break;
695 case 3:
696 nofs = 5 + 2 * NIDS_PER_BLOCK;
697 if (!offset[level - 1])
698 goto skip_partial;
699 err = truncate_partial_nodes(&dn, &rn->i, offset, level);
700 if (err < 0 && err != -ENOENT)
701 goto fail;
702 break;
703 default:
704 BUG();
705 }
706
707skip_partial:
708 while (cont) {
709 dn.nid = le32_to_cpu(rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]);
710 switch (offset[0]) {
711 case NODE_DIR1_BLOCK:
712 case NODE_DIR2_BLOCK:
713 err = truncate_dnode(&dn);
714 break;
715
716 case NODE_IND1_BLOCK:
717 case NODE_IND2_BLOCK:
718 err = truncate_nodes(&dn, nofs, offset[1], 2);
719 break;
720
721 case NODE_DIND_BLOCK:
722 err = truncate_nodes(&dn, nofs, offset[1], 3);
723 cont = 0;
724 break;
725
726 default:
727 BUG();
728 }
729 if (err < 0 && err != -ENOENT)
730 goto fail;
731 if (offset[1] == 0 &&
732 rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]) {
733 lock_page(page);
734 wait_on_page_writeback(page);
735 rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
736 set_page_dirty(page);
737 unlock_page(page);
738 }
739 offset[1] = 0;
740 offset[0]++;
741 nofs += err;
742 }
743fail:
744 f2fs_put_page(page, 0);
745 return err > 0 ? 0 : err;
746}
747
748int remove_inode_page(struct inode *inode)
749{
750 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
751 struct page *page;
752 nid_t ino = inode->i_ino;
753 struct dnode_of_data dn;
754
755 mutex_lock_op(sbi, NODE_TRUNC);
756 page = get_node_page(sbi, ino);
757 if (IS_ERR(page)) {
758 mutex_unlock_op(sbi, NODE_TRUNC);
759 return PTR_ERR(page);
760 }
761
762 if (F2FS_I(inode)->i_xattr_nid) {
763 nid_t nid = F2FS_I(inode)->i_xattr_nid;
764 struct page *npage = get_node_page(sbi, nid);
765
766 if (IS_ERR(npage)) {
767 mutex_unlock_op(sbi, NODE_TRUNC);
768 return PTR_ERR(npage);
769 }
770
771 F2FS_I(inode)->i_xattr_nid = 0;
772 set_new_dnode(&dn, inode, page, npage, nid);
773 dn.inode_page_locked = 1;
774 truncate_node(&dn);
775 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900776
Jaegeuk Kim71e9fec2012-12-20 15:10:06 +0900777 /* 0 is possible, after f2fs_new_inode() is failed */
778 BUG_ON(inode->i_blocks != 0 && inode->i_blocks != 1);
779 set_new_dnode(&dn, inode, page, page, ino);
780 truncate_node(&dn);
781
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900782 mutex_unlock_op(sbi, NODE_TRUNC);
783 return 0;
784}
785
Al Viroc0043632013-01-25 16:04:58 -0500786int new_inode_page(struct inode *inode, const struct qstr *name)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900787{
788 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
789 struct page *page;
790 struct dnode_of_data dn;
791
792 /* allocate inode page for new inode */
793 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
794 mutex_lock_op(sbi, NODE_NEW);
795 page = new_node_page(&dn, 0);
Al Viroc0043632013-01-25 16:04:58 -0500796 init_dent_inode(name, page);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900797 mutex_unlock_op(sbi, NODE_NEW);
798 if (IS_ERR(page))
799 return PTR_ERR(page);
800 f2fs_put_page(page, 1);
801 return 0;
802}
803
804struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs)
805{
806 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
807 struct address_space *mapping = sbi->node_inode->i_mapping;
808 struct node_info old_ni, new_ni;
809 struct page *page;
810 int err;
811
812 if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
813 return ERR_PTR(-EPERM);
814
815 page = grab_cache_page(mapping, dn->nid);
816 if (!page)
817 return ERR_PTR(-ENOMEM);
818
819 get_node_info(sbi, dn->nid, &old_ni);
820
821 SetPageUptodate(page);
822 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
823
824 /* Reinitialize old_ni with new node page */
825 BUG_ON(old_ni.blk_addr != NULL_ADDR);
826 new_ni = old_ni;
827 new_ni.ino = dn->inode->i_ino;
828
829 if (!inc_valid_node_count(sbi, dn->inode, 1)) {
830 err = -ENOSPC;
831 goto fail;
832 }
833 set_node_addr(sbi, &new_ni, NEW_ADDR);
Jaegeuk Kim398b1ac2012-12-19 15:28:39 +0900834 set_cold_node(dn->inode, page);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900835
836 dn->node_page = page;
837 sync_inode_page(dn);
838 set_page_dirty(page);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900839 if (ofs == 0)
840 inc_valid_inode_count(sbi);
841
842 return page;
843
844fail:
Jaegeuk Kim71e9fec2012-12-20 15:10:06 +0900845 clear_node_page_dirty(page);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900846 f2fs_put_page(page, 1);
847 return ERR_PTR(err);
848}
849
850static int read_node_page(struct page *page, int type)
851{
852 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
853 struct node_info ni;
854
855 get_node_info(sbi, page->index, &ni);
856
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900857 if (ni.blk_addr == NULL_ADDR) {
858 f2fs_put_page(page, 1);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900859 return -ENOENT;
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900860 }
861
862 if (PageUptodate(page)) {
863 unlock_page(page);
864 return 0;
865 }
866
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900867 return f2fs_readpage(sbi, page, ni.blk_addr, type);
868}
869
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900870/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900871 * Readahead a node page
872 */
873void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
874{
875 struct address_space *mapping = sbi->node_inode->i_mapping;
876 struct page *apage;
877
878 apage = find_get_page(mapping, nid);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900879 if (apage && PageUptodate(apage)) {
880 f2fs_put_page(apage, 0);
881 return;
882 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900883 f2fs_put_page(apage, 0);
884
885 apage = grab_cache_page(mapping, nid);
886 if (!apage)
887 return;
888
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900889 if (read_node_page(apage, READA) == 0)
890 f2fs_put_page(apage, 0);
Namjae Jeona2b52a52013-01-30 22:47:16 +0900891 return;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900892}
893
894struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
895{
896 int err;
897 struct page *page;
898 struct address_space *mapping = sbi->node_inode->i_mapping;
899
900 page = grab_cache_page(mapping, nid);
901 if (!page)
902 return ERR_PTR(-ENOMEM);
903
904 err = read_node_page(page, READ_SYNC);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900905 if (err)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900906 return ERR_PTR(err);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900907
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900908 lock_page(page);
909 if (!PageUptodate(page)) {
910 f2fs_put_page(page, 1);
911 return ERR_PTR(-EIO);
912 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900913 BUG_ON(nid != nid_of_node(page));
914 mark_page_accessed(page);
915 return page;
916}
917
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900918/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900919 * Return a locked page for the desired node page.
920 * And, readahead MAX_RA_NODE number of node pages.
921 */
922struct page *get_node_page_ra(struct page *parent, int start)
923{
924 struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb);
925 struct address_space *mapping = sbi->node_inode->i_mapping;
926 int i, end;
927 int err = 0;
928 nid_t nid;
929 struct page *page;
930
931 /* First, try getting the desired direct node. */
932 nid = get_nid(parent, start, false);
933 if (!nid)
934 return ERR_PTR(-ENOENT);
935
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900936 page = grab_cache_page(mapping, nid);
937 if (!page)
938 return ERR_PTR(-ENOMEM);
Namjae Jeone0f56cb2013-02-02 23:51:51 +0900939 else if (PageUptodate(page))
940 goto page_hit;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900941
Jaegeuk Kim66d36a22013-02-26 12:43:46 +0900942 err = read_node_page(page, READ_SYNC);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900943 if (err)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900944 return ERR_PTR(err);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900945
946 /* Then, try readahead for siblings of the desired node */
947 end = start + MAX_RA_NODE;
948 end = min(end, NIDS_PER_BLOCK);
949 for (i = start + 1; i < end; i++) {
950 nid = get_nid(parent, i, false);
951 if (!nid)
952 continue;
953 ra_node_page(sbi, nid);
954 }
955
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900956 lock_page(page);
Namjae Jeone0f56cb2013-02-02 23:51:51 +0900957
958page_hit:
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900959 if (PageError(page)) {
960 f2fs_put_page(page, 1);
961 return ERR_PTR(-EIO);
962 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900963 mark_page_accessed(page);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900964 return page;
965}
966
967void sync_inode_page(struct dnode_of_data *dn)
968{
969 if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
970 update_inode(dn->inode, dn->node_page);
971 } else if (dn->inode_page) {
972 if (!dn->inode_page_locked)
973 lock_page(dn->inode_page);
974 update_inode(dn->inode, dn->inode_page);
975 if (!dn->inode_page_locked)
976 unlock_page(dn->inode_page);
977 } else {
978 f2fs_write_inode(dn->inode, NULL);
979 }
980}
981
982int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
983 struct writeback_control *wbc)
984{
985 struct address_space *mapping = sbi->node_inode->i_mapping;
986 pgoff_t index, end;
987 struct pagevec pvec;
988 int step = ino ? 2 : 0;
989 int nwritten = 0, wrote = 0;
990
991 pagevec_init(&pvec, 0);
992
993next_step:
994 index = 0;
995 end = LONG_MAX;
996
997 while (index <= end) {
998 int i, nr_pages;
999 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1000 PAGECACHE_TAG_DIRTY,
1001 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1002 if (nr_pages == 0)
1003 break;
1004
1005 for (i = 0; i < nr_pages; i++) {
1006 struct page *page = pvec.pages[i];
1007
1008 /*
1009 * flushing sequence with step:
1010 * 0. indirect nodes
1011 * 1. dentry dnodes
1012 * 2. file dnodes
1013 */
1014 if (step == 0 && IS_DNODE(page))
1015 continue;
1016 if (step == 1 && (!IS_DNODE(page) ||
1017 is_cold_node(page)))
1018 continue;
1019 if (step == 2 && (!IS_DNODE(page) ||
1020 !is_cold_node(page)))
1021 continue;
1022
1023 /*
1024 * If an fsync mode,
1025 * we should not skip writing node pages.
1026 */
1027 if (ino && ino_of_node(page) == ino)
1028 lock_page(page);
1029 else if (!trylock_page(page))
1030 continue;
1031
1032 if (unlikely(page->mapping != mapping)) {
1033continue_unlock:
1034 unlock_page(page);
1035 continue;
1036 }
1037 if (ino && ino_of_node(page) != ino)
1038 goto continue_unlock;
1039
1040 if (!PageDirty(page)) {
1041 /* someone wrote it for us */
1042 goto continue_unlock;
1043 }
1044
1045 if (!clear_page_dirty_for_io(page))
1046 goto continue_unlock;
1047
1048 /* called by fsync() */
1049 if (ino && IS_DNODE(page)) {
1050 int mark = !is_checkpointed_node(sbi, ino);
1051 set_fsync_mark(page, 1);
1052 if (IS_INODE(page))
1053 set_dentry_mark(page, mark);
1054 nwritten++;
1055 } else {
1056 set_fsync_mark(page, 0);
1057 set_dentry_mark(page, 0);
1058 }
1059 mapping->a_ops->writepage(page, wbc);
1060 wrote++;
1061
1062 if (--wbc->nr_to_write == 0)
1063 break;
1064 }
1065 pagevec_release(&pvec);
1066 cond_resched();
1067
1068 if (wbc->nr_to_write == 0) {
1069 step = 2;
1070 break;
1071 }
1072 }
1073
1074 if (step < 2) {
1075 step++;
1076 goto next_step;
1077 }
1078
1079 if (wrote)
1080 f2fs_submit_bio(sbi, NODE, wbc->sync_mode == WB_SYNC_ALL);
1081
1082 return nwritten;
1083}
1084
1085static int f2fs_write_node_page(struct page *page,
1086 struct writeback_control *wbc)
1087{
1088 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
1089 nid_t nid;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001090 block_t new_addr;
1091 struct node_info ni;
1092
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001093 wait_on_page_writeback(page);
1094
1095 mutex_lock_op(sbi, NODE_WRITE);
1096
1097 /* get old block addr of this node page */
1098 nid = nid_of_node(page);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001099 BUG_ON(page->index != nid);
1100
1101 get_node_info(sbi, nid, &ni);
1102
1103 /* This page is already truncated */
1104 if (ni.blk_addr == NULL_ADDR)
Jaegeuk Kim12faafe2013-03-13 17:45:15 +09001105 goto out;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001106
Jaegeuk Kim08d80582013-03-13 17:49:22 +09001107 if (wbc->for_reclaim) {
1108 dec_page_count(sbi, F2FS_DIRTY_NODES);
1109 wbc->pages_skipped++;
1110 set_page_dirty(page);
1111 mutex_unlock_op(sbi, NODE_WRITE);
1112 return AOP_WRITEPAGE_ACTIVATE;
1113 }
1114
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001115 set_page_writeback(page);
1116
1117 /* insert node offset */
1118 write_node_page(sbi, page, nid, ni.blk_addr, &new_addr);
1119 set_node_addr(sbi, &ni, new_addr);
Jaegeuk Kim12faafe2013-03-13 17:45:15 +09001120out:
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001121 dec_page_count(sbi, F2FS_DIRTY_NODES);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001122 mutex_unlock_op(sbi, NODE_WRITE);
1123 unlock_page(page);
1124 return 0;
1125}
1126
Jaegeuk Kima7fdffb2013-01-18 14:54:13 +09001127/*
1128 * It is very important to gather dirty pages and write at once, so that we can
1129 * submit a big bio without interfering other data writes.
1130 * Be default, 512 pages (2MB), a segment size, is quite reasonable.
1131 */
1132#define COLLECT_DIRTY_NODES 512
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001133static int f2fs_write_node_pages(struct address_space *mapping,
1134 struct writeback_control *wbc)
1135{
1136 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1137 struct block_device *bdev = sbi->sb->s_bdev;
1138 long nr_to_write = wbc->nr_to_write;
1139
Jaegeuk Kima7fdffb2013-01-18 14:54:13 +09001140 /* First check balancing cached NAT entries */
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001141 if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
Jaegeuk Kim43727522013-02-04 15:11:17 +09001142 write_checkpoint(sbi, false);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001143 return 0;
1144 }
1145
Jaegeuk Kima7fdffb2013-01-18 14:54:13 +09001146 /* collect a number of dirty node pages and write together */
1147 if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES)
1148 return 0;
1149
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001150 /* if mounting is failed, skip writing node pages */
1151 wbc->nr_to_write = bio_get_nr_vecs(bdev);
1152 sync_node_pages(sbi, 0, wbc);
1153 wbc->nr_to_write = nr_to_write -
1154 (bio_get_nr_vecs(bdev) - wbc->nr_to_write);
1155 return 0;
1156}
1157
1158static int f2fs_set_node_page_dirty(struct page *page)
1159{
1160 struct address_space *mapping = page->mapping;
1161 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1162
1163 SetPageUptodate(page);
1164 if (!PageDirty(page)) {
1165 __set_page_dirty_nobuffers(page);
1166 inc_page_count(sbi, F2FS_DIRTY_NODES);
1167 SetPagePrivate(page);
1168 return 1;
1169 }
1170 return 0;
1171}
1172
1173static void f2fs_invalidate_node_page(struct page *page, unsigned long offset)
1174{
1175 struct inode *inode = page->mapping->host;
1176 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1177 if (PageDirty(page))
1178 dec_page_count(sbi, F2FS_DIRTY_NODES);
1179 ClearPagePrivate(page);
1180}
1181
1182static int f2fs_release_node_page(struct page *page, gfp_t wait)
1183{
1184 ClearPagePrivate(page);
Jaegeuk Kimc3850aa2013-03-14 09:24:32 +09001185 return 1;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001186}
1187
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001188/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001189 * Structure of the f2fs node operations
1190 */
1191const struct address_space_operations f2fs_node_aops = {
1192 .writepage = f2fs_write_node_page,
1193 .writepages = f2fs_write_node_pages,
1194 .set_page_dirty = f2fs_set_node_page_dirty,
1195 .invalidatepage = f2fs_invalidate_node_page,
1196 .releasepage = f2fs_release_node_page,
1197};
1198
1199static struct free_nid *__lookup_free_nid_list(nid_t n, struct list_head *head)
1200{
1201 struct list_head *this;
Namjae Jeon3aa770a2013-03-02 12:40:50 +09001202 struct free_nid *i;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001203 list_for_each(this, head) {
1204 i = list_entry(this, struct free_nid, list);
1205 if (i->nid == n)
Namjae Jeon3aa770a2013-03-02 12:40:50 +09001206 return i;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001207 }
Namjae Jeon3aa770a2013-03-02 12:40:50 +09001208 return NULL;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001209}
1210
1211static void __del_from_free_nid_list(struct free_nid *i)
1212{
1213 list_del(&i->list);
1214 kmem_cache_free(free_nid_slab, i);
1215}
1216
1217static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1218{
1219 struct free_nid *i;
1220
1221 if (nm_i->fcnt > 2 * MAX_FREE_NIDS)
1222 return 0;
1223retry:
1224 i = kmem_cache_alloc(free_nid_slab, GFP_NOFS);
1225 if (!i) {
1226 cond_resched();
1227 goto retry;
1228 }
1229 i->nid = nid;
1230 i->state = NID_NEW;
1231
1232 spin_lock(&nm_i->free_nid_list_lock);
1233 if (__lookup_free_nid_list(nid, &nm_i->free_nid_list)) {
1234 spin_unlock(&nm_i->free_nid_list_lock);
1235 kmem_cache_free(free_nid_slab, i);
1236 return 0;
1237 }
1238 list_add_tail(&i->list, &nm_i->free_nid_list);
1239 nm_i->fcnt++;
1240 spin_unlock(&nm_i->free_nid_list_lock);
1241 return 1;
1242}
1243
1244static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1245{
1246 struct free_nid *i;
1247 spin_lock(&nm_i->free_nid_list_lock);
1248 i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1249 if (i && i->state == NID_NEW) {
1250 __del_from_free_nid_list(i);
1251 nm_i->fcnt--;
1252 }
1253 spin_unlock(&nm_i->free_nid_list_lock);
1254}
1255
1256static int scan_nat_page(struct f2fs_nm_info *nm_i,
1257 struct page *nat_page, nid_t start_nid)
1258{
1259 struct f2fs_nat_block *nat_blk = page_address(nat_page);
1260 block_t blk_addr;
1261 int fcnt = 0;
1262 int i;
1263
1264 /* 0 nid should not be used */
1265 if (start_nid == 0)
1266 ++start_nid;
1267
1268 i = start_nid % NAT_ENTRY_PER_BLOCK;
1269
1270 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
Jaegeuk Kim04431c42013-03-16 08:34:37 +09001271 if (start_nid >= nm_i->max_nid)
1272 break;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001273 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
1274 BUG_ON(blk_addr == NEW_ADDR);
1275 if (blk_addr == NULL_ADDR)
1276 fcnt += add_free_nid(nm_i, start_nid);
1277 }
1278 return fcnt;
1279}
1280
1281static void build_free_nids(struct f2fs_sb_info *sbi)
1282{
1283 struct free_nid *fnid, *next_fnid;
1284 struct f2fs_nm_info *nm_i = NM_I(sbi);
1285 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1286 struct f2fs_summary_block *sum = curseg->sum_blk;
1287 nid_t nid = 0;
1288 bool is_cycled = false;
1289 int fcnt = 0;
1290 int i;
1291
1292 nid = nm_i->next_scan_nid;
1293 nm_i->init_scan_nid = nid;
1294
1295 ra_nat_pages(sbi, nid);
1296
1297 while (1) {
1298 struct page *page = get_current_nat_page(sbi, nid);
1299
1300 fcnt += scan_nat_page(nm_i, page, nid);
1301 f2fs_put_page(page, 1);
1302
1303 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
1304
1305 if (nid >= nm_i->max_nid) {
1306 nid = 0;
1307 is_cycled = true;
1308 }
1309 if (fcnt > MAX_FREE_NIDS)
1310 break;
1311 if (is_cycled && nm_i->init_scan_nid <= nid)
1312 break;
1313 }
1314
Jaegeuk Kim48cb76c2013-03-14 08:49:58 +09001315 /* go to the next nat page in order to reuse free nids first */
1316 nm_i->next_scan_nid = nm_i->init_scan_nid + NAT_ENTRY_PER_BLOCK;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001317
1318 /* find free nids from current sum_pages */
1319 mutex_lock(&curseg->curseg_mutex);
1320 for (i = 0; i < nats_in_cursum(sum); i++) {
1321 block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
1322 nid = le32_to_cpu(nid_in_journal(sum, i));
1323 if (addr == NULL_ADDR)
1324 add_free_nid(nm_i, nid);
1325 else
1326 remove_free_nid(nm_i, nid);
1327 }
1328 mutex_unlock(&curseg->curseg_mutex);
1329
1330 /* remove the free nids from current allocated nids */
1331 list_for_each_entry_safe(fnid, next_fnid, &nm_i->free_nid_list, list) {
1332 struct nat_entry *ne;
1333
1334 read_lock(&nm_i->nat_tree_lock);
1335 ne = __lookup_nat_cache(nm_i, fnid->nid);
1336 if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
1337 remove_free_nid(nm_i, fnid->nid);
1338 read_unlock(&nm_i->nat_tree_lock);
1339 }
1340}
1341
1342/*
1343 * If this function returns success, caller can obtain a new nid
1344 * from second parameter of this function.
1345 * The returned nid could be used ino as well as nid when inode is created.
1346 */
1347bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
1348{
1349 struct f2fs_nm_info *nm_i = NM_I(sbi);
1350 struct free_nid *i = NULL;
1351 struct list_head *this;
1352retry:
1353 mutex_lock(&nm_i->build_lock);
1354 if (!nm_i->fcnt) {
1355 /* scan NAT in order to build free nid list */
1356 build_free_nids(sbi);
1357 if (!nm_i->fcnt) {
1358 mutex_unlock(&nm_i->build_lock);
1359 return false;
1360 }
1361 }
1362 mutex_unlock(&nm_i->build_lock);
1363
1364 /*
1365 * We check fcnt again since previous check is racy as
1366 * we didn't hold free_nid_list_lock. So other thread
1367 * could consume all of free nids.
1368 */
1369 spin_lock(&nm_i->free_nid_list_lock);
1370 if (!nm_i->fcnt) {
1371 spin_unlock(&nm_i->free_nid_list_lock);
1372 goto retry;
1373 }
1374
1375 BUG_ON(list_empty(&nm_i->free_nid_list));
1376 list_for_each(this, &nm_i->free_nid_list) {
1377 i = list_entry(this, struct free_nid, list);
1378 if (i->state == NID_NEW)
1379 break;
1380 }
1381
1382 BUG_ON(i->state != NID_NEW);
1383 *nid = i->nid;
1384 i->state = NID_ALLOC;
1385 nm_i->fcnt--;
1386 spin_unlock(&nm_i->free_nid_list_lock);
1387 return true;
1388}
1389
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001390/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001391 * alloc_nid() should be called prior to this function.
1392 */
1393void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
1394{
1395 struct f2fs_nm_info *nm_i = NM_I(sbi);
1396 struct free_nid *i;
1397
1398 spin_lock(&nm_i->free_nid_list_lock);
1399 i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1400 if (i) {
1401 BUG_ON(i->state != NID_ALLOC);
1402 __del_from_free_nid_list(i);
1403 }
1404 spin_unlock(&nm_i->free_nid_list_lock);
1405}
1406
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001407/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001408 * alloc_nid() should be called prior to this function.
1409 */
1410void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
1411{
1412 alloc_nid_done(sbi, nid);
1413 add_free_nid(NM_I(sbi), nid);
1414}
1415
1416void recover_node_page(struct f2fs_sb_info *sbi, struct page *page,
1417 struct f2fs_summary *sum, struct node_info *ni,
1418 block_t new_blkaddr)
1419{
1420 rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr);
1421 set_node_addr(sbi, ni, new_blkaddr);
1422 clear_node_page_dirty(page);
1423}
1424
1425int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
1426{
1427 struct address_space *mapping = sbi->node_inode->i_mapping;
1428 struct f2fs_node *src, *dst;
1429 nid_t ino = ino_of_node(page);
1430 struct node_info old_ni, new_ni;
1431 struct page *ipage;
1432
1433 ipage = grab_cache_page(mapping, ino);
1434 if (!ipage)
1435 return -ENOMEM;
1436
1437 /* Should not use this inode from free nid list */
1438 remove_free_nid(NM_I(sbi), ino);
1439
1440 get_node_info(sbi, ino, &old_ni);
1441 SetPageUptodate(ipage);
1442 fill_node_footer(ipage, ino, ino, 0, true);
1443
1444 src = (struct f2fs_node *)page_address(page);
1445 dst = (struct f2fs_node *)page_address(ipage);
1446
1447 memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i);
1448 dst->i.i_size = 0;
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001449 dst->i.i_blocks = cpu_to_le64(1);
1450 dst->i.i_links = cpu_to_le32(1);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001451 dst->i.i_xattr_nid = 0;
1452
1453 new_ni = old_ni;
1454 new_ni.ino = ino;
1455
1456 set_node_addr(sbi, &new_ni, NEW_ADDR);
1457 inc_valid_inode_count(sbi);
1458
1459 f2fs_put_page(ipage, 1);
1460 return 0;
1461}
1462
1463int restore_node_summary(struct f2fs_sb_info *sbi,
1464 unsigned int segno, struct f2fs_summary_block *sum)
1465{
1466 struct f2fs_node *rn;
1467 struct f2fs_summary *sum_entry;
1468 struct page *page;
1469 block_t addr;
1470 int i, last_offset;
1471
1472 /* alloc temporal page for read node */
1473 page = alloc_page(GFP_NOFS | __GFP_ZERO);
1474 if (IS_ERR(page))
1475 return PTR_ERR(page);
1476 lock_page(page);
1477
1478 /* scan the node segment */
1479 last_offset = sbi->blocks_per_seg;
1480 addr = START_BLOCK(sbi, segno);
1481 sum_entry = &sum->entries[0];
1482
1483 for (i = 0; i < last_offset; i++, sum_entry++) {
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001484 /*
1485 * In order to read next node page,
1486 * we must clear PageUptodate flag.
1487 */
1488 ClearPageUptodate(page);
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001489
1490 if (f2fs_readpage(sbi, page, addr, READ_SYNC))
1491 goto out;
1492
1493 lock_page(page);
1494 rn = (struct f2fs_node *)page_address(page);
1495 sum_entry->nid = rn->footer.nid;
1496 sum_entry->version = 0;
1497 sum_entry->ofs_in_node = 0;
1498 addr++;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001499 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001500 unlock_page(page);
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001501out:
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001502 __free_pages(page, 0);
1503 return 0;
1504}
1505
1506static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
1507{
1508 struct f2fs_nm_info *nm_i = NM_I(sbi);
1509 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1510 struct f2fs_summary_block *sum = curseg->sum_blk;
1511 int i;
1512
1513 mutex_lock(&curseg->curseg_mutex);
1514
1515 if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) {
1516 mutex_unlock(&curseg->curseg_mutex);
1517 return false;
1518 }
1519
1520 for (i = 0; i < nats_in_cursum(sum); i++) {
1521 struct nat_entry *ne;
1522 struct f2fs_nat_entry raw_ne;
1523 nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
1524
1525 raw_ne = nat_in_journal(sum, i);
1526retry:
1527 write_lock(&nm_i->nat_tree_lock);
1528 ne = __lookup_nat_cache(nm_i, nid);
1529 if (ne) {
1530 __set_nat_cache_dirty(nm_i, ne);
1531 write_unlock(&nm_i->nat_tree_lock);
1532 continue;
1533 }
1534 ne = grab_nat_entry(nm_i, nid);
1535 if (!ne) {
1536 write_unlock(&nm_i->nat_tree_lock);
1537 goto retry;
1538 }
1539 nat_set_blkaddr(ne, le32_to_cpu(raw_ne.block_addr));
1540 nat_set_ino(ne, le32_to_cpu(raw_ne.ino));
1541 nat_set_version(ne, raw_ne.version);
1542 __set_nat_cache_dirty(nm_i, ne);
1543 write_unlock(&nm_i->nat_tree_lock);
1544 }
1545 update_nats_in_cursum(sum, -i);
1546 mutex_unlock(&curseg->curseg_mutex);
1547 return true;
1548}
1549
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001550/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001551 * This function is called during the checkpointing process.
1552 */
1553void flush_nat_entries(struct f2fs_sb_info *sbi)
1554{
1555 struct f2fs_nm_info *nm_i = NM_I(sbi);
1556 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1557 struct f2fs_summary_block *sum = curseg->sum_blk;
1558 struct list_head *cur, *n;
1559 struct page *page = NULL;
1560 struct f2fs_nat_block *nat_blk = NULL;
1561 nid_t start_nid = 0, end_nid = 0;
1562 bool flushed;
1563
1564 flushed = flush_nats_in_journal(sbi);
1565
1566 if (!flushed)
1567 mutex_lock(&curseg->curseg_mutex);
1568
1569 /* 1) flush dirty nat caches */
1570 list_for_each_safe(cur, n, &nm_i->dirty_nat_entries) {
1571 struct nat_entry *ne;
1572 nid_t nid;
1573 struct f2fs_nat_entry raw_ne;
1574 int offset = -1;
Jaegeuk Kim2b506382012-12-26 14:39:50 +09001575 block_t new_blkaddr;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001576
1577 ne = list_entry(cur, struct nat_entry, list);
1578 nid = nat_get_nid(ne);
1579
1580 if (nat_get_blkaddr(ne) == NEW_ADDR)
1581 continue;
1582 if (flushed)
1583 goto to_nat_page;
1584
1585 /* if there is room for nat enries in curseg->sumpage */
1586 offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1);
1587 if (offset >= 0) {
1588 raw_ne = nat_in_journal(sum, offset);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001589 goto flush_now;
1590 }
1591to_nat_page:
1592 if (!page || (start_nid > nid || nid > end_nid)) {
1593 if (page) {
1594 f2fs_put_page(page, 1);
1595 page = NULL;
1596 }
1597 start_nid = START_NID(nid);
1598 end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1;
1599
1600 /*
1601 * get nat block with dirty flag, increased reference
1602 * count, mapped and lock
1603 */
1604 page = get_next_nat_page(sbi, start_nid);
1605 nat_blk = page_address(page);
1606 }
1607
1608 BUG_ON(!nat_blk);
1609 raw_ne = nat_blk->entries[nid - start_nid];
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001610flush_now:
1611 new_blkaddr = nat_get_blkaddr(ne);
1612
1613 raw_ne.ino = cpu_to_le32(nat_get_ino(ne));
1614 raw_ne.block_addr = cpu_to_le32(new_blkaddr);
1615 raw_ne.version = nat_get_version(ne);
1616
1617 if (offset < 0) {
1618 nat_blk->entries[nid - start_nid] = raw_ne;
1619 } else {
1620 nat_in_journal(sum, offset) = raw_ne;
1621 nid_in_journal(sum, offset) = cpu_to_le32(nid);
1622 }
1623
Jaegeuk Kimfa372412013-03-21 12:53:19 +09001624 if (nat_get_blkaddr(ne) == NULL_ADDR &&
1625 !add_free_nid(NM_I(sbi), nid)) {
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001626 write_lock(&nm_i->nat_tree_lock);
1627 __del_from_nat_cache(nm_i, ne);
1628 write_unlock(&nm_i->nat_tree_lock);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001629 } else {
1630 write_lock(&nm_i->nat_tree_lock);
1631 __clear_nat_cache_dirty(nm_i, ne);
1632 ne->checkpointed = true;
1633 write_unlock(&nm_i->nat_tree_lock);
1634 }
1635 }
1636 if (!flushed)
1637 mutex_unlock(&curseg->curseg_mutex);
1638 f2fs_put_page(page, 1);
1639
1640 /* 2) shrink nat caches if necessary */
1641 try_to_free_nats(sbi, nm_i->nat_cnt - NM_WOUT_THRESHOLD);
1642}
1643
1644static int init_node_manager(struct f2fs_sb_info *sbi)
1645{
1646 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
1647 struct f2fs_nm_info *nm_i = NM_I(sbi);
1648 unsigned char *version_bitmap;
1649 unsigned int nat_segs, nat_blocks;
1650
1651 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
1652
1653 /* segment_count_nat includes pair segment so divide to 2. */
1654 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
1655 nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
1656 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
1657 nm_i->fcnt = 0;
1658 nm_i->nat_cnt = 0;
1659
1660 INIT_LIST_HEAD(&nm_i->free_nid_list);
1661 INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC);
1662 INIT_LIST_HEAD(&nm_i->nat_entries);
1663 INIT_LIST_HEAD(&nm_i->dirty_nat_entries);
1664
1665 mutex_init(&nm_i->build_lock);
1666 spin_lock_init(&nm_i->free_nid_list_lock);
1667 rwlock_init(&nm_i->nat_tree_lock);
1668
1669 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
1670 nm_i->init_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
1671 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
1672
1673 nm_i->nat_bitmap = kzalloc(nm_i->bitmap_size, GFP_KERNEL);
1674 if (!nm_i->nat_bitmap)
1675 return -ENOMEM;
1676 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
1677 if (!version_bitmap)
1678 return -EFAULT;
1679
1680 /* copy version bitmap */
1681 memcpy(nm_i->nat_bitmap, version_bitmap, nm_i->bitmap_size);
1682 return 0;
1683}
1684
1685int build_node_manager(struct f2fs_sb_info *sbi)
1686{
1687 int err;
1688
1689 sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
1690 if (!sbi->nm_info)
1691 return -ENOMEM;
1692
1693 err = init_node_manager(sbi);
1694 if (err)
1695 return err;
1696
1697 build_free_nids(sbi);
1698 return 0;
1699}
1700
1701void destroy_node_manager(struct f2fs_sb_info *sbi)
1702{
1703 struct f2fs_nm_info *nm_i = NM_I(sbi);
1704 struct free_nid *i, *next_i;
1705 struct nat_entry *natvec[NATVEC_SIZE];
1706 nid_t nid = 0;
1707 unsigned int found;
1708
1709 if (!nm_i)
1710 return;
1711
1712 /* destroy free nid list */
1713 spin_lock(&nm_i->free_nid_list_lock);
1714 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
1715 BUG_ON(i->state == NID_ALLOC);
1716 __del_from_free_nid_list(i);
1717 nm_i->fcnt--;
1718 }
1719 BUG_ON(nm_i->fcnt);
1720 spin_unlock(&nm_i->free_nid_list_lock);
1721
1722 /* destroy nat cache */
1723 write_lock(&nm_i->nat_tree_lock);
1724 while ((found = __gang_lookup_nat_cache(nm_i,
1725 nid, NATVEC_SIZE, natvec))) {
1726 unsigned idx;
1727 for (idx = 0; idx < found; idx++) {
1728 struct nat_entry *e = natvec[idx];
1729 nid = nat_get_nid(e) + 1;
1730 __del_from_nat_cache(nm_i, e);
1731 }
1732 }
1733 BUG_ON(nm_i->nat_cnt);
1734 write_unlock(&nm_i->nat_tree_lock);
1735
1736 kfree(nm_i->nat_bitmap);
1737 sbi->nm_info = NULL;
1738 kfree(nm_i);
1739}
1740
Namjae Jeon6e6093a2013-01-17 00:08:30 +09001741int __init create_node_manager_caches(void)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001742{
1743 nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
1744 sizeof(struct nat_entry), NULL);
1745 if (!nat_entry_slab)
1746 return -ENOMEM;
1747
1748 free_nid_slab = f2fs_kmem_cache_create("free_nid",
1749 sizeof(struct free_nid), NULL);
1750 if (!free_nid_slab) {
1751 kmem_cache_destroy(nat_entry_slab);
1752 return -ENOMEM;
1753 }
1754 return 0;
1755}
1756
1757void destroy_node_manager_caches(void)
1758{
1759 kmem_cache_destroy(free_nid_slab);
1760 kmem_cache_destroy(nat_entry_slab);
1761}