blob: 3565caf97005c1f25a35bc499184f710343fa766 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002 * fs/f2fs/node.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/mpage.h>
14#include <linux/backing-dev.h>
15#include <linux/blkdev.h>
16#include <linux/pagevec.h>
17#include <linux/swap.h>
18
19#include "f2fs.h"
20#include "node.h"
21#include "segment.h"
Namjae Jeon51dd6242013-04-20 01:28:52 +090022#include <trace/events/f2fs.h>
Jaegeuk Kime05df3b2012-11-02 17:08:50 +090023
24static struct kmem_cache *nat_entry_slab;
25static struct kmem_cache *free_nid_slab;
26
27static void clear_node_page_dirty(struct page *page)
28{
29 struct address_space *mapping = page->mapping;
30 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
31 unsigned int long flags;
32
33 if (PageDirty(page)) {
34 spin_lock_irqsave(&mapping->tree_lock, flags);
35 radix_tree_tag_clear(&mapping->page_tree,
36 page_index(page),
37 PAGECACHE_TAG_DIRTY);
38 spin_unlock_irqrestore(&mapping->tree_lock, flags);
39
40 clear_page_dirty_for_io(page);
41 dec_page_count(sbi, F2FS_DIRTY_NODES);
42 }
43 ClearPageUptodate(page);
44}
45
46static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
47{
48 pgoff_t index = current_nat_addr(sbi, nid);
49 return get_meta_page(sbi, index);
50}
51
52static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
53{
54 struct page *src_page;
55 struct page *dst_page;
56 pgoff_t src_off;
57 pgoff_t dst_off;
58 void *src_addr;
59 void *dst_addr;
60 struct f2fs_nm_info *nm_i = NM_I(sbi);
61
62 src_off = current_nat_addr(sbi, nid);
63 dst_off = next_nat_addr(sbi, src_off);
64
65 /* get current nat block page with lock */
66 src_page = get_meta_page(sbi, src_off);
67
68 /* Dirty src_page means that it is already the new target NAT page. */
69 if (PageDirty(src_page))
70 return src_page;
71
72 dst_page = grab_meta_page(sbi, dst_off);
73
74 src_addr = page_address(src_page);
75 dst_addr = page_address(dst_page);
76 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
77 set_page_dirty(dst_page);
78 f2fs_put_page(src_page, 1);
79
80 set_to_next_nat(nm_i, nid);
81
82 return dst_page;
83}
84
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +090085/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +090086 * Readahead NAT pages
87 */
88static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid)
89{
90 struct address_space *mapping = sbi->meta_inode->i_mapping;
91 struct f2fs_nm_info *nm_i = NM_I(sbi);
92 struct page *page;
93 pgoff_t index;
94 int i;
Jaegeuk Kim458e6192013-12-11 13:54:01 +090095 struct f2fs_io_info fio = {
96 .type = META,
97 .rw = READ_SYNC,
98 .rw_flag = REQ_META | REQ_PRIO
99 };
100
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900101
102 for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) {
Chao Yucfb271d2013-12-05 17:15:22 +0800103 if (unlikely(nid >= nm_i->max_nid))
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900104 nid = 0;
105 index = current_nat_addr(sbi, nid);
106
107 page = grab_cache_page(mapping, index);
108 if (!page)
109 continue;
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900110 if (PageUptodate(page)) {
Jaegeuk Kim7107e0a2013-11-21 13:54:23 +0900111 mark_page_accessed(page);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900112 f2fs_put_page(page, 1);
113 continue;
114 }
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900115 f2fs_submit_page_mbio(sbi, page, index, &fio);
Jaegeuk Kim7107e0a2013-11-21 13:54:23 +0900116 mark_page_accessed(page);
Jaegeuk Kim369a7082013-01-31 10:15:35 +0900117 f2fs_put_page(page, 0);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900118 }
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900119 f2fs_submit_merged_bio(sbi, META, READ);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900120}
121
122static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
123{
124 return radix_tree_lookup(&nm_i->nat_root, n);
125}
126
127static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
128 nid_t start, unsigned int nr, struct nat_entry **ep)
129{
130 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
131}
132
133static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
134{
135 list_del(&e->list);
136 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
137 nm_i->nat_cnt--;
138 kmem_cache_free(nat_entry_slab, e);
139}
140
141int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
142{
143 struct f2fs_nm_info *nm_i = NM_I(sbi);
144 struct nat_entry *e;
145 int is_cp = 1;
146
147 read_lock(&nm_i->nat_tree_lock);
148 e = __lookup_nat_cache(nm_i, nid);
149 if (e && !e->checkpointed)
150 is_cp = 0;
151 read_unlock(&nm_i->nat_tree_lock);
152 return is_cp;
153}
154
155static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
156{
157 struct nat_entry *new;
158
159 new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
160 if (!new)
161 return NULL;
162 if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
163 kmem_cache_free(nat_entry_slab, new);
164 return NULL;
165 }
166 memset(new, 0, sizeof(struct nat_entry));
167 nat_set_nid(new, nid);
168 list_add_tail(&new->list, &nm_i->nat_entries);
169 nm_i->nat_cnt++;
170 return new;
171}
172
173static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
174 struct f2fs_nat_entry *ne)
175{
176 struct nat_entry *e;
177retry:
178 write_lock(&nm_i->nat_tree_lock);
179 e = __lookup_nat_cache(nm_i, nid);
180 if (!e) {
181 e = grab_nat_entry(nm_i, nid);
182 if (!e) {
183 write_unlock(&nm_i->nat_tree_lock);
184 goto retry;
185 }
186 nat_set_blkaddr(e, le32_to_cpu(ne->block_addr));
187 nat_set_ino(e, le32_to_cpu(ne->ino));
188 nat_set_version(e, ne->version);
189 e->checkpointed = true;
190 }
191 write_unlock(&nm_i->nat_tree_lock);
192}
193
194static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
195 block_t new_blkaddr)
196{
197 struct f2fs_nm_info *nm_i = NM_I(sbi);
198 struct nat_entry *e;
199retry:
200 write_lock(&nm_i->nat_tree_lock);
201 e = __lookup_nat_cache(nm_i, ni->nid);
202 if (!e) {
203 e = grab_nat_entry(nm_i, ni->nid);
204 if (!e) {
205 write_unlock(&nm_i->nat_tree_lock);
206 goto retry;
207 }
208 e->ni = *ni;
209 e->checkpointed = true;
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900210 f2fs_bug_on(ni->blk_addr == NEW_ADDR);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900211 } else if (new_blkaddr == NEW_ADDR) {
212 /*
213 * when nid is reallocated,
214 * previous nat entry can be remained in nat cache.
215 * So, reinitialize it with new information.
216 */
217 e->ni = *ni;
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900218 f2fs_bug_on(ni->blk_addr != NULL_ADDR);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900219 }
220
221 if (new_blkaddr == NEW_ADDR)
222 e->checkpointed = false;
223
224 /* sanity check */
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900225 f2fs_bug_on(nat_get_blkaddr(e) != ni->blk_addr);
226 f2fs_bug_on(nat_get_blkaddr(e) == NULL_ADDR &&
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900227 new_blkaddr == NULL_ADDR);
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900228 f2fs_bug_on(nat_get_blkaddr(e) == NEW_ADDR &&
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900229 new_blkaddr == NEW_ADDR);
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900230 f2fs_bug_on(nat_get_blkaddr(e) != NEW_ADDR &&
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900231 nat_get_blkaddr(e) != NULL_ADDR &&
232 new_blkaddr == NEW_ADDR);
233
234 /* increament version no as node is removed */
235 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
236 unsigned char version = nat_get_version(e);
237 nat_set_version(e, inc_node_version(version));
238 }
239
240 /* change address */
241 nat_set_blkaddr(e, new_blkaddr);
242 __set_nat_cache_dirty(nm_i, e);
243 write_unlock(&nm_i->nat_tree_lock);
244}
245
Jaegeuk Kim4660f9c2013-10-24 14:19:18 +0900246int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900247{
248 struct f2fs_nm_info *nm_i = NM_I(sbi);
249
Haicheng Li6cac3752013-04-28 19:16:06 +0800250 if (nm_i->nat_cnt <= NM_WOUT_THRESHOLD)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900251 return 0;
252
253 write_lock(&nm_i->nat_tree_lock);
254 while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
255 struct nat_entry *ne;
256 ne = list_first_entry(&nm_i->nat_entries,
257 struct nat_entry, list);
258 __del_from_nat_cache(nm_i, ne);
259 nr_shrink--;
260 }
261 write_unlock(&nm_i->nat_tree_lock);
262 return nr_shrink;
263}
264
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900265/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900266 * This function returns always success
267 */
268void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
269{
270 struct f2fs_nm_info *nm_i = NM_I(sbi);
271 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
272 struct f2fs_summary_block *sum = curseg->sum_blk;
273 nid_t start_nid = START_NID(nid);
274 struct f2fs_nat_block *nat_blk;
275 struct page *page = NULL;
276 struct f2fs_nat_entry ne;
277 struct nat_entry *e;
278 int i;
279
Namjae Jeonbe4124f2012-12-01 10:55:12 +0900280 memset(&ne, 0, sizeof(struct f2fs_nat_entry));
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900281 ni->nid = nid;
282
283 /* Check nat cache */
284 read_lock(&nm_i->nat_tree_lock);
285 e = __lookup_nat_cache(nm_i, nid);
286 if (e) {
287 ni->ino = nat_get_ino(e);
288 ni->blk_addr = nat_get_blkaddr(e);
289 ni->version = nat_get_version(e);
290 }
291 read_unlock(&nm_i->nat_tree_lock);
292 if (e)
293 return;
294
295 /* Check current segment summary */
296 mutex_lock(&curseg->curseg_mutex);
297 i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
298 if (i >= 0) {
299 ne = nat_in_journal(sum, i);
300 node_info_from_raw_nat(ni, &ne);
301 }
302 mutex_unlock(&curseg->curseg_mutex);
303 if (i >= 0)
304 goto cache;
305
306 /* Fill node_info from nat page */
307 page = get_current_nat_page(sbi, start_nid);
308 nat_blk = (struct f2fs_nat_block *)page_address(page);
309 ne = nat_blk->entries[nid - start_nid];
310 node_info_from_raw_nat(ni, &ne);
311 f2fs_put_page(page, 1);
312cache:
313 /* cache nat entry */
314 cache_nat_entry(NM_I(sbi), nid, &ne);
315}
316
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900317/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900318 * The maximum depth is four.
319 * Offset[0] will have raw inode offset.
320 */
Jaegeuk Kimde936532013-08-12 21:08:03 +0900321static int get_node_path(struct f2fs_inode_info *fi, long block,
322 int offset[4], unsigned int noffset[4])
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900323{
Jaegeuk Kimde936532013-08-12 21:08:03 +0900324 const long direct_index = ADDRS_PER_INODE(fi);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900325 const long direct_blks = ADDRS_PER_BLOCK;
326 const long dptrs_per_blk = NIDS_PER_BLOCK;
327 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
328 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
329 int n = 0;
330 int level = 0;
331
332 noffset[0] = 0;
333
334 if (block < direct_index) {
Namjae Jeon25c0a6e2013-03-02 12:41:31 +0900335 offset[n] = block;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900336 goto got;
337 }
338 block -= direct_index;
339 if (block < direct_blks) {
340 offset[n++] = NODE_DIR1_BLOCK;
341 noffset[n] = 1;
Namjae Jeon25c0a6e2013-03-02 12:41:31 +0900342 offset[n] = block;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900343 level = 1;
344 goto got;
345 }
346 block -= direct_blks;
347 if (block < direct_blks) {
348 offset[n++] = NODE_DIR2_BLOCK;
349 noffset[n] = 2;
Namjae Jeon25c0a6e2013-03-02 12:41:31 +0900350 offset[n] = block;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900351 level = 1;
352 goto got;
353 }
354 block -= direct_blks;
355 if (block < indirect_blks) {
356 offset[n++] = NODE_IND1_BLOCK;
357 noffset[n] = 3;
358 offset[n++] = block / direct_blks;
359 noffset[n] = 4 + offset[n - 1];
Namjae Jeon25c0a6e2013-03-02 12:41:31 +0900360 offset[n] = block % direct_blks;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900361 level = 2;
362 goto got;
363 }
364 block -= indirect_blks;
365 if (block < indirect_blks) {
366 offset[n++] = NODE_IND2_BLOCK;
367 noffset[n] = 4 + dptrs_per_blk;
368 offset[n++] = block / direct_blks;
369 noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
Namjae Jeon25c0a6e2013-03-02 12:41:31 +0900370 offset[n] = block % direct_blks;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900371 level = 2;
372 goto got;
373 }
374 block -= indirect_blks;
375 if (block < dindirect_blks) {
376 offset[n++] = NODE_DIND_BLOCK;
377 noffset[n] = 5 + (dptrs_per_blk * 2);
378 offset[n++] = block / indirect_blks;
379 noffset[n] = 6 + (dptrs_per_blk * 2) +
380 offset[n - 1] * (dptrs_per_blk + 1);
381 offset[n++] = (block / direct_blks) % dptrs_per_blk;
382 noffset[n] = 7 + (dptrs_per_blk * 2) +
383 offset[n - 2] * (dptrs_per_blk + 1) +
384 offset[n - 1];
Namjae Jeon25c0a6e2013-03-02 12:41:31 +0900385 offset[n] = block % direct_blks;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900386 level = 3;
387 goto got;
388 } else {
389 BUG();
390 }
391got:
392 return level;
393}
394
395/*
396 * Caller should call f2fs_put_dnode(dn).
Jaegeuk Kim39936832012-11-22 16:21:29 +0900397 * Also, it should grab and release a mutex by calling mutex_lock_op() and
398 * mutex_unlock_op() only if ro is not set RDONLY_NODE.
399 * In the case of RDONLY_NODE, we don't need to care about mutex.
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900400 */
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900401int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900402{
403 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
404 struct page *npage[4];
405 struct page *parent;
406 int offset[4];
407 unsigned int noffset[4];
408 nid_t nids[4];
409 int level, i;
410 int err = 0;
411
Jaegeuk Kimde936532013-08-12 21:08:03 +0900412 level = get_node_path(F2FS_I(dn->inode), index, offset, noffset);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900413
414 nids[0] = dn->inode->i_ino;
Jaegeuk Kim1646cfa2013-05-20 09:42:28 +0900415 npage[0] = dn->inode_page;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900416
Jaegeuk Kim1646cfa2013-05-20 09:42:28 +0900417 if (!npage[0]) {
418 npage[0] = get_node_page(sbi, nids[0]);
419 if (IS_ERR(npage[0]))
420 return PTR_ERR(npage[0]);
421 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900422 parent = npage[0];
Changman Lee52c2db32013-02-20 07:47:06 +0900423 if (level != 0)
424 nids[1] = get_nid(parent, offset[0], true);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900425 dn->inode_page = npage[0];
426 dn->inode_page_locked = true;
427
428 /* get indirect or direct nodes */
429 for (i = 1; i <= level; i++) {
430 bool done = false;
431
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900432 if (!nids[i] && mode == ALLOC_NODE) {
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900433 /* alloc new node */
434 if (!alloc_nid(sbi, &(nids[i]))) {
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900435 err = -ENOSPC;
436 goto release_pages;
437 }
438
439 dn->nid = nids[i];
Jaegeuk Kim8ae8f162013-06-03 19:46:19 +0900440 npage[i] = new_node_page(dn, noffset[i], NULL);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900441 if (IS_ERR(npage[i])) {
442 alloc_nid_failed(sbi, nids[i]);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900443 err = PTR_ERR(npage[i]);
444 goto release_pages;
445 }
446
447 set_nid(parent, offset[i - 1], nids[i], i == 1);
448 alloc_nid_done(sbi, nids[i]);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900449 done = true;
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900450 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900451 npage[i] = get_node_page_ra(parent, offset[i - 1]);
452 if (IS_ERR(npage[i])) {
453 err = PTR_ERR(npage[i]);
454 goto release_pages;
455 }
456 done = true;
457 }
458 if (i == 1) {
459 dn->inode_page_locked = false;
460 unlock_page(parent);
461 } else {
462 f2fs_put_page(parent, 1);
463 }
464
465 if (!done) {
466 npage[i] = get_node_page(sbi, nids[i]);
467 if (IS_ERR(npage[i])) {
468 err = PTR_ERR(npage[i]);
469 f2fs_put_page(npage[0], 0);
470 goto release_out;
471 }
472 }
473 if (i < level) {
474 parent = npage[i];
475 nids[i + 1] = get_nid(parent, offset[i], false);
476 }
477 }
478 dn->nid = nids[level];
479 dn->ofs_in_node = offset[level];
480 dn->node_page = npage[level];
481 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
482 return 0;
483
484release_pages:
485 f2fs_put_page(parent, 1);
486 if (i > 1)
487 f2fs_put_page(npage[0], 0);
488release_out:
489 dn->inode_page = NULL;
490 dn->node_page = NULL;
491 return err;
492}
493
494static void truncate_node(struct dnode_of_data *dn)
495{
496 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
497 struct node_info ni;
498
499 get_node_info(sbi, dn->nid, &ni);
Jaegeuk Kim71e9fec2012-12-20 15:10:06 +0900500 if (dn->inode->i_blocks == 0) {
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900501 f2fs_bug_on(ni.blk_addr != NULL_ADDR);
Jaegeuk Kim71e9fec2012-12-20 15:10:06 +0900502 goto invalidate;
503 }
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900504 f2fs_bug_on(ni.blk_addr == NULL_ADDR);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900505
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900506 /* Deallocate node address */
Jaegeuk Kim71e9fec2012-12-20 15:10:06 +0900507 invalidate_blocks(sbi, ni.blk_addr);
Gu Zhengef86d702013-11-19 18:03:38 +0800508 dec_valid_node_count(sbi, dn->inode);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900509 set_node_addr(sbi, &ni, NULL_ADDR);
510
511 if (dn->nid == dn->inode->i_ino) {
512 remove_orphan_inode(sbi, dn->nid);
513 dec_valid_inode_count(sbi);
514 } else {
515 sync_inode_page(dn);
516 }
Jaegeuk Kim71e9fec2012-12-20 15:10:06 +0900517invalidate:
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900518 clear_node_page_dirty(dn->node_page);
519 F2FS_SET_SB_DIRT(sbi);
520
521 f2fs_put_page(dn->node_page, 1);
522 dn->node_page = NULL;
Namjae Jeon51dd6242013-04-20 01:28:52 +0900523 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900524}
525
526static int truncate_dnode(struct dnode_of_data *dn)
527{
528 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
529 struct page *page;
530
531 if (dn->nid == 0)
532 return 1;
533
534 /* get direct node */
535 page = get_node_page(sbi, dn->nid);
536 if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
537 return 1;
538 else if (IS_ERR(page))
539 return PTR_ERR(page);
540
541 /* Make dnode_of_data for parameter */
542 dn->node_page = page;
543 dn->ofs_in_node = 0;
544 truncate_data_blocks(dn);
545 truncate_node(dn);
546 return 1;
547}
548
549static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
550 int ofs, int depth)
551{
552 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
553 struct dnode_of_data rdn = *dn;
554 struct page *page;
555 struct f2fs_node *rn;
556 nid_t child_nid;
557 unsigned int child_nofs;
558 int freed = 0;
559 int i, ret;
560
561 if (dn->nid == 0)
562 return NIDS_PER_BLOCK + 1;
563
Namjae Jeon51dd6242013-04-20 01:28:52 +0900564 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
565
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900566 page = get_node_page(sbi, dn->nid);
Namjae Jeon51dd6242013-04-20 01:28:52 +0900567 if (IS_ERR(page)) {
568 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900569 return PTR_ERR(page);
Namjae Jeon51dd6242013-04-20 01:28:52 +0900570 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900571
Gu Zheng45590712013-07-15 17:57:38 +0800572 rn = F2FS_NODE(page);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900573 if (depth < 3) {
574 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
575 child_nid = le32_to_cpu(rn->in.nid[i]);
576 if (child_nid == 0)
577 continue;
578 rdn.nid = child_nid;
579 ret = truncate_dnode(&rdn);
580 if (ret < 0)
581 goto out_err;
582 set_nid(page, i, 0, false);
583 }
584 } else {
585 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
586 for (i = ofs; i < NIDS_PER_BLOCK; i++) {
587 child_nid = le32_to_cpu(rn->in.nid[i]);
588 if (child_nid == 0) {
589 child_nofs += NIDS_PER_BLOCK + 1;
590 continue;
591 }
592 rdn.nid = child_nid;
593 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
594 if (ret == (NIDS_PER_BLOCK + 1)) {
595 set_nid(page, i, 0, false);
596 child_nofs += ret;
597 } else if (ret < 0 && ret != -ENOENT) {
598 goto out_err;
599 }
600 }
601 freed = child_nofs;
602 }
603
604 if (!ofs) {
605 /* remove current indirect node */
606 dn->node_page = page;
607 truncate_node(dn);
608 freed++;
609 } else {
610 f2fs_put_page(page, 1);
611 }
Namjae Jeon51dd6242013-04-20 01:28:52 +0900612 trace_f2fs_truncate_nodes_exit(dn->inode, freed);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900613 return freed;
614
615out_err:
616 f2fs_put_page(page, 1);
Namjae Jeon51dd6242013-04-20 01:28:52 +0900617 trace_f2fs_truncate_nodes_exit(dn->inode, ret);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900618 return ret;
619}
620
621static int truncate_partial_nodes(struct dnode_of_data *dn,
622 struct f2fs_inode *ri, int *offset, int depth)
623{
624 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
625 struct page *pages[2];
626 nid_t nid[3];
627 nid_t child_nid;
628 int err = 0;
629 int i;
630 int idx = depth - 2;
631
632 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
633 if (!nid[0])
634 return 0;
635
636 /* get indirect nodes in the path */
637 for (i = 0; i < depth - 1; i++) {
638 /* refernece count'll be increased */
639 pages[i] = get_node_page(sbi, nid[i]);
640 if (IS_ERR(pages[i])) {
641 depth = i + 1;
642 err = PTR_ERR(pages[i]);
643 goto fail;
644 }
645 nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
646 }
647
648 /* free direct nodes linked to a partial indirect node */
649 for (i = offset[depth - 1]; i < NIDS_PER_BLOCK; i++) {
650 child_nid = get_nid(pages[idx], i, false);
651 if (!child_nid)
652 continue;
653 dn->nid = child_nid;
654 err = truncate_dnode(dn);
655 if (err < 0)
656 goto fail;
657 set_nid(pages[idx], i, 0, false);
658 }
659
660 if (offset[depth - 1] == 0) {
661 dn->node_page = pages[idx];
662 dn->nid = nid[idx];
663 truncate_node(dn);
664 } else {
665 f2fs_put_page(pages[idx], 1);
666 }
667 offset[idx]++;
668 offset[depth - 1] = 0;
669fail:
670 for (i = depth - 3; i >= 0; i--)
671 f2fs_put_page(pages[i], 1);
Namjae Jeon51dd6242013-04-20 01:28:52 +0900672
673 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
674
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900675 return err;
676}
677
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900678/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900679 * All the block addresses of data and nodes should be nullified.
680 */
681int truncate_inode_blocks(struct inode *inode, pgoff_t from)
682{
683 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +0900684 struct address_space *node_mapping = sbi->node_inode->i_mapping;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900685 int err = 0, cont = 1;
686 int level, offset[4], noffset[4];
Jaegeuk Kim7dd690c2013-02-12 07:28:55 +0900687 unsigned int nofs = 0;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900688 struct f2fs_node *rn;
689 struct dnode_of_data dn;
690 struct page *page;
691
Namjae Jeon51dd6242013-04-20 01:28:52 +0900692 trace_f2fs_truncate_inode_blocks_enter(inode, from);
693
Jaegeuk Kimde936532013-08-12 21:08:03 +0900694 level = get_node_path(F2FS_I(inode), from, offset, noffset);
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +0900695restart:
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900696 page = get_node_page(sbi, inode->i_ino);
Namjae Jeon51dd6242013-04-20 01:28:52 +0900697 if (IS_ERR(page)) {
698 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900699 return PTR_ERR(page);
Namjae Jeon51dd6242013-04-20 01:28:52 +0900700 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900701
702 set_new_dnode(&dn, inode, page, NULL, 0);
703 unlock_page(page);
704
Gu Zheng45590712013-07-15 17:57:38 +0800705 rn = F2FS_NODE(page);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900706 switch (level) {
707 case 0:
708 case 1:
709 nofs = noffset[1];
710 break;
711 case 2:
712 nofs = noffset[1];
713 if (!offset[level - 1])
714 goto skip_partial;
715 err = truncate_partial_nodes(&dn, &rn->i, offset, level);
716 if (err < 0 && err != -ENOENT)
717 goto fail;
718 nofs += 1 + NIDS_PER_BLOCK;
719 break;
720 case 3:
721 nofs = 5 + 2 * NIDS_PER_BLOCK;
722 if (!offset[level - 1])
723 goto skip_partial;
724 err = truncate_partial_nodes(&dn, &rn->i, offset, level);
725 if (err < 0 && err != -ENOENT)
726 goto fail;
727 break;
728 default:
729 BUG();
730 }
731
732skip_partial:
733 while (cont) {
734 dn.nid = le32_to_cpu(rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]);
735 switch (offset[0]) {
736 case NODE_DIR1_BLOCK:
737 case NODE_DIR2_BLOCK:
738 err = truncate_dnode(&dn);
739 break;
740
741 case NODE_IND1_BLOCK:
742 case NODE_IND2_BLOCK:
743 err = truncate_nodes(&dn, nofs, offset[1], 2);
744 break;
745
746 case NODE_DIND_BLOCK:
747 err = truncate_nodes(&dn, nofs, offset[1], 3);
748 cont = 0;
749 break;
750
751 default:
752 BUG();
753 }
754 if (err < 0 && err != -ENOENT)
755 goto fail;
756 if (offset[1] == 0 &&
757 rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]) {
758 lock_page(page);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900759 if (unlikely(page->mapping != node_mapping)) {
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +0900760 f2fs_put_page(page, 1);
761 goto restart;
762 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900763 wait_on_page_writeback(page);
764 rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
765 set_page_dirty(page);
766 unlock_page(page);
767 }
768 offset[1] = 0;
769 offset[0]++;
770 nofs += err;
771 }
772fail:
773 f2fs_put_page(page, 0);
Namjae Jeon51dd6242013-04-20 01:28:52 +0900774 trace_f2fs_truncate_inode_blocks_exit(inode, err);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900775 return err > 0 ? 0 : err;
776}
777
Jaegeuk Kim4f16fb02013-08-14 20:40:06 +0900778int truncate_xattr_node(struct inode *inode, struct page *page)
779{
780 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
781 nid_t nid = F2FS_I(inode)->i_xattr_nid;
782 struct dnode_of_data dn;
783 struct page *npage;
784
785 if (!nid)
786 return 0;
787
788 npage = get_node_page(sbi, nid);
789 if (IS_ERR(npage))
790 return PTR_ERR(npage);
791
792 F2FS_I(inode)->i_xattr_nid = 0;
Jaegeuk Kim65985d92013-08-14 21:57:27 +0900793
794 /* need to do checkpoint during fsync */
795 F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi));
796
Jaegeuk Kim4f16fb02013-08-14 20:40:06 +0900797 set_new_dnode(&dn, inode, page, npage, nid);
798
799 if (page)
Chao Yu01d2d1a2013-11-28 15:43:07 +0800800 dn.inode_page_locked = true;
Jaegeuk Kim4f16fb02013-08-14 20:40:06 +0900801 truncate_node(&dn);
802 return 0;
803}
804
Jaegeuk Kim39936832012-11-22 16:21:29 +0900805/*
806 * Caller should grab and release a mutex by calling mutex_lock_op() and
807 * mutex_unlock_op().
808 */
Gu Zheng58e674d2013-11-19 18:03:18 +0800809void remove_inode_page(struct inode *inode)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900810{
811 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
812 struct page *page;
813 nid_t ino = inode->i_ino;
814 struct dnode_of_data dn;
815
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900816 page = get_node_page(sbi, ino);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900817 if (IS_ERR(page))
Gu Zheng58e674d2013-11-19 18:03:18 +0800818 return;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900819
Gu Zheng58e674d2013-11-19 18:03:18 +0800820 if (truncate_xattr_node(inode, page)) {
Jaegeuk Kim4f16fb02013-08-14 20:40:06 +0900821 f2fs_put_page(page, 1);
Gu Zheng58e674d2013-11-19 18:03:18 +0800822 return;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900823 }
Jaegeuk Kim71e9fec2012-12-20 15:10:06 +0900824 /* 0 is possible, after f2fs_new_inode() is failed */
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900825 f2fs_bug_on(inode->i_blocks != 0 && inode->i_blocks != 1);
Jaegeuk Kim71e9fec2012-12-20 15:10:06 +0900826 set_new_dnode(&dn, inode, page, page, ino);
827 truncate_node(&dn);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900828}
829
Jaegeuk Kim44a83ff2013-05-20 10:10:29 +0900830struct page *new_inode_page(struct inode *inode, const struct qstr *name)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900831{
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900832 struct dnode_of_data dn;
833
834 /* allocate inode page for new inode */
835 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
Jaegeuk Kim44a83ff2013-05-20 10:10:29 +0900836
837 /* caller should f2fs_put_page(page, 1); */
Jaegeuk Kim8ae8f162013-06-03 19:46:19 +0900838 return new_node_page(&dn, 0, NULL);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900839}
840
Jaegeuk Kim8ae8f162013-06-03 19:46:19 +0900841struct page *new_node_page(struct dnode_of_data *dn,
842 unsigned int ofs, struct page *ipage)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900843{
844 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
845 struct address_space *mapping = sbi->node_inode->i_mapping;
846 struct node_info old_ni, new_ni;
847 struct page *page;
848 int err;
849
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900850 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900851 return ERR_PTR(-EPERM);
852
853 page = grab_cache_page(mapping, dn->nid);
854 if (!page)
855 return ERR_PTR(-ENOMEM);
856
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900857 if (unlikely(!inc_valid_node_count(sbi, dn->inode))) {
Jaegeuk Kim9c027402013-08-12 16:00:46 +0900858 err = -ENOSPC;
859 goto fail;
860 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900861
Jaegeuk Kim9c027402013-08-12 16:00:46 +0900862 get_node_info(sbi, dn->nid, &old_ni);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900863
864 /* Reinitialize old_ni with new node page */
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900865 f2fs_bug_on(old_ni.blk_addr != NULL_ADDR);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900866 new_ni = old_ni;
867 new_ni.ino = dn->inode->i_ino;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900868 set_node_addr(sbi, &new_ni, NEW_ADDR);
Jaegeuk Kim9c027402013-08-12 16:00:46 +0900869
870 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
Jaegeuk Kim398b1ac2012-12-19 15:28:39 +0900871 set_cold_node(dn->inode, page);
Jaegeuk Kim9c027402013-08-12 16:00:46 +0900872 SetPageUptodate(page);
873 set_page_dirty(page);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900874
Jaegeuk Kim479bd732013-08-12 16:04:53 +0900875 if (ofs == XATTR_NODE_OFFSET)
876 F2FS_I(dn->inode)->i_xattr_nid = dn->nid;
877
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900878 dn->node_page = page;
Jaegeuk Kim8ae8f162013-06-03 19:46:19 +0900879 if (ipage)
880 update_inode(dn->inode, ipage);
881 else
882 sync_inode_page(dn);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900883 if (ofs == 0)
884 inc_valid_inode_count(sbi);
885
886 return page;
887
888fail:
Jaegeuk Kim71e9fec2012-12-20 15:10:06 +0900889 clear_node_page_dirty(page);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900890 f2fs_put_page(page, 1);
891 return ERR_PTR(err);
892}
893
Jaegeuk Kim56ae6742013-03-31 12:47:20 +0900894/*
895 * Caller should do after getting the following values.
896 * 0: f2fs_put_page(page, 0)
897 * LOCKED_PAGE: f2fs_put_page(page, 1)
898 * error: nothing
899 */
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900900static int read_node_page(struct page *page, int rw)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900901{
902 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
903 struct node_info ni;
904
905 get_node_info(sbi, page->index, &ni);
906
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900907 if (unlikely(ni.blk_addr == NULL_ADDR)) {
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900908 f2fs_put_page(page, 1);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900909 return -ENOENT;
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900910 }
911
Jaegeuk Kim56ae6742013-03-31 12:47:20 +0900912 if (PageUptodate(page))
913 return LOCKED_PAGE;
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900914
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900915 return f2fs_submit_page_bio(sbi, page, ni.blk_addr, rw);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900916}
917
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900918/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900919 * Readahead a node page
920 */
921void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
922{
923 struct address_space *mapping = sbi->node_inode->i_mapping;
924 struct page *apage;
Jaegeuk Kim56ae6742013-03-31 12:47:20 +0900925 int err;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900926
927 apage = find_get_page(mapping, nid);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900928 if (apage && PageUptodate(apage)) {
929 f2fs_put_page(apage, 0);
930 return;
931 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900932 f2fs_put_page(apage, 0);
933
934 apage = grab_cache_page(mapping, nid);
935 if (!apage)
936 return;
937
Jaegeuk Kim56ae6742013-03-31 12:47:20 +0900938 err = read_node_page(apage, READA);
939 if (err == 0)
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900940 f2fs_put_page(apage, 0);
Jaegeuk Kim56ae6742013-03-31 12:47:20 +0900941 else if (err == LOCKED_PAGE)
942 f2fs_put_page(apage, 1);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900943}
944
945struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
946{
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900947 struct address_space *mapping = sbi->node_inode->i_mapping;
Jaegeuk Kim56ae6742013-03-31 12:47:20 +0900948 struct page *page;
949 int err;
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +0900950repeat:
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900951 page = grab_cache_page(mapping, nid);
952 if (!page)
953 return ERR_PTR(-ENOMEM);
954
955 err = read_node_page(page, READ_SYNC);
Jaegeuk Kim56ae6742013-03-31 12:47:20 +0900956 if (err < 0)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900957 return ERR_PTR(err);
Jaegeuk Kim56ae6742013-03-31 12:47:20 +0900958 else if (err == LOCKED_PAGE)
959 goto got_it;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900960
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900961 lock_page(page);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900962 if (unlikely(!PageUptodate(page))) {
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900963 f2fs_put_page(page, 1);
964 return ERR_PTR(-EIO);
965 }
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900966 if (unlikely(page->mapping != mapping)) {
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +0900967 f2fs_put_page(page, 1);
968 goto repeat;
969 }
Jaegeuk Kim56ae6742013-03-31 12:47:20 +0900970got_it:
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900971 f2fs_bug_on(nid != nid_of_node(page));
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900972 mark_page_accessed(page);
973 return page;
974}
975
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900976/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900977 * Return a locked page for the desired node page.
978 * And, readahead MAX_RA_NODE number of node pages.
979 */
980struct page *get_node_page_ra(struct page *parent, int start)
981{
982 struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb);
983 struct address_space *mapping = sbi->node_inode->i_mapping;
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900984 struct blk_plug plug;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900985 struct page *page;
Jaegeuk Kim56ae6742013-03-31 12:47:20 +0900986 int err, i, end;
987 nid_t nid;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900988
989 /* First, try getting the desired direct node. */
990 nid = get_nid(parent, start, false);
991 if (!nid)
992 return ERR_PTR(-ENOENT);
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +0900993repeat:
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900994 page = grab_cache_page(mapping, nid);
995 if (!page)
996 return ERR_PTR(-ENOMEM);
997
Jaegeuk Kim66d36a22013-02-26 12:43:46 +0900998 err = read_node_page(page, READ_SYNC);
Jaegeuk Kim56ae6742013-03-31 12:47:20 +0900999 if (err < 0)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001000 return ERR_PTR(err);
Jaegeuk Kim56ae6742013-03-31 12:47:20 +09001001 else if (err == LOCKED_PAGE)
1002 goto page_hit;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001003
Jaegeuk Kimc718379b2013-04-24 13:19:56 +09001004 blk_start_plug(&plug);
1005
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001006 /* Then, try readahead for siblings of the desired node */
1007 end = start + MAX_RA_NODE;
1008 end = min(end, NIDS_PER_BLOCK);
1009 for (i = start + 1; i < end; i++) {
1010 nid = get_nid(parent, i, false);
1011 if (!nid)
1012 continue;
1013 ra_node_page(sbi, nid);
1014 }
1015
Jaegeuk Kimc718379b2013-04-24 13:19:56 +09001016 blk_finish_plug(&plug);
1017
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001018 lock_page(page);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001019 if (unlikely(page->mapping != mapping)) {
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +09001020 f2fs_put_page(page, 1);
1021 goto repeat;
1022 }
Namjae Jeone0f56cb2013-02-02 23:51:51 +09001023page_hit:
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001024 if (unlikely(!PageUptodate(page))) {
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001025 f2fs_put_page(page, 1);
1026 return ERR_PTR(-EIO);
1027 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001028 mark_page_accessed(page);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001029 return page;
1030}
1031
1032void sync_inode_page(struct dnode_of_data *dn)
1033{
1034 if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
1035 update_inode(dn->inode, dn->node_page);
1036 } else if (dn->inode_page) {
1037 if (!dn->inode_page_locked)
1038 lock_page(dn->inode_page);
1039 update_inode(dn->inode, dn->inode_page);
1040 if (!dn->inode_page_locked)
1041 unlock_page(dn->inode_page);
1042 } else {
Jaegeuk Kim39936832012-11-22 16:21:29 +09001043 update_inode_page(dn->inode);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001044 }
1045}
1046
1047int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
1048 struct writeback_control *wbc)
1049{
1050 struct address_space *mapping = sbi->node_inode->i_mapping;
1051 pgoff_t index, end;
1052 struct pagevec pvec;
1053 int step = ino ? 2 : 0;
1054 int nwritten = 0, wrote = 0;
1055
1056 pagevec_init(&pvec, 0);
1057
1058next_step:
1059 index = 0;
1060 end = LONG_MAX;
1061
1062 while (index <= end) {
1063 int i, nr_pages;
1064 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1065 PAGECACHE_TAG_DIRTY,
1066 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1067 if (nr_pages == 0)
1068 break;
1069
1070 for (i = 0; i < nr_pages; i++) {
1071 struct page *page = pvec.pages[i];
1072
1073 /*
1074 * flushing sequence with step:
1075 * 0. indirect nodes
1076 * 1. dentry dnodes
1077 * 2. file dnodes
1078 */
1079 if (step == 0 && IS_DNODE(page))
1080 continue;
1081 if (step == 1 && (!IS_DNODE(page) ||
1082 is_cold_node(page)))
1083 continue;
1084 if (step == 2 && (!IS_DNODE(page) ||
1085 !is_cold_node(page)))
1086 continue;
1087
1088 /*
1089 * If an fsync mode,
1090 * we should not skip writing node pages.
1091 */
1092 if (ino && ino_of_node(page) == ino)
1093 lock_page(page);
1094 else if (!trylock_page(page))
1095 continue;
1096
1097 if (unlikely(page->mapping != mapping)) {
1098continue_unlock:
1099 unlock_page(page);
1100 continue;
1101 }
1102 if (ino && ino_of_node(page) != ino)
1103 goto continue_unlock;
1104
1105 if (!PageDirty(page)) {
1106 /* someone wrote it for us */
1107 goto continue_unlock;
1108 }
1109
1110 if (!clear_page_dirty_for_io(page))
1111 goto continue_unlock;
1112
1113 /* called by fsync() */
1114 if (ino && IS_DNODE(page)) {
1115 int mark = !is_checkpointed_node(sbi, ino);
1116 set_fsync_mark(page, 1);
1117 if (IS_INODE(page))
1118 set_dentry_mark(page, mark);
1119 nwritten++;
1120 } else {
1121 set_fsync_mark(page, 0);
1122 set_dentry_mark(page, 0);
1123 }
1124 mapping->a_ops->writepage(page, wbc);
1125 wrote++;
1126
1127 if (--wbc->nr_to_write == 0)
1128 break;
1129 }
1130 pagevec_release(&pvec);
1131 cond_resched();
1132
1133 if (wbc->nr_to_write == 0) {
1134 step = 2;
1135 break;
1136 }
1137 }
1138
1139 if (step < 2) {
1140 step++;
1141 goto next_step;
1142 }
1143
1144 if (wrote)
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001145 f2fs_submit_merged_bio(sbi, NODE, WRITE);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001146 return nwritten;
1147}
1148
Jaegeuk Kimcfe58f92013-10-31 14:57:01 +09001149int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
1150{
1151 struct address_space *mapping = sbi->node_inode->i_mapping;
1152 pgoff_t index = 0, end = LONG_MAX;
1153 struct pagevec pvec;
1154 int nr_pages;
1155 int ret2 = 0, ret = 0;
1156
1157 pagevec_init(&pvec, 0);
1158 while ((index <= end) &&
1159 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1160 PAGECACHE_TAG_WRITEBACK,
1161 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
1162 unsigned i;
1163
1164 for (i = 0; i < nr_pages; i++) {
1165 struct page *page = pvec.pages[i];
1166
1167 /* until radix tree lookup accepts end_index */
Chao Yucfb271d2013-12-05 17:15:22 +08001168 if (unlikely(page->index > end))
Jaegeuk Kimcfe58f92013-10-31 14:57:01 +09001169 continue;
1170
Chao Yu4bf08ff2013-11-04 10:28:33 +08001171 if (ino && ino_of_node(page) == ino) {
Jaegeuk Kimcfe58f92013-10-31 14:57:01 +09001172 wait_on_page_writeback(page);
Chao Yu4bf08ff2013-11-04 10:28:33 +08001173 if (TestClearPageError(page))
1174 ret = -EIO;
1175 }
Jaegeuk Kimcfe58f92013-10-31 14:57:01 +09001176 }
1177 pagevec_release(&pvec);
1178 cond_resched();
1179 }
1180
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001181 if (unlikely(test_and_clear_bit(AS_ENOSPC, &mapping->flags)))
Jaegeuk Kimcfe58f92013-10-31 14:57:01 +09001182 ret2 = -ENOSPC;
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001183 if (unlikely(test_and_clear_bit(AS_EIO, &mapping->flags)))
Jaegeuk Kimcfe58f92013-10-31 14:57:01 +09001184 ret2 = -EIO;
1185 if (!ret)
1186 ret = ret2;
1187 return ret;
1188}
1189
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001190static int f2fs_write_node_page(struct page *page,
1191 struct writeback_control *wbc)
1192{
1193 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
1194 nid_t nid;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001195 block_t new_addr;
1196 struct node_info ni;
1197
Chao Yucfb271d2013-12-05 17:15:22 +08001198 if (unlikely(sbi->por_doing))
Jaegeuk Kim87a9bd22013-10-16 15:09:26 +09001199 goto redirty_out;
1200
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001201 wait_on_page_writeback(page);
1202
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001203 /* get old block addr of this node page */
1204 nid = nid_of_node(page);
Jaegeuk Kim5d56b672013-10-29 15:14:54 +09001205 f2fs_bug_on(page->index != nid);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001206
1207 get_node_info(sbi, nid, &ni);
1208
1209 /* This page is already truncated */
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001210 if (unlikely(ni.blk_addr == NULL_ADDR)) {
Jaegeuk Kim39936832012-11-22 16:21:29 +09001211 dec_page_count(sbi, F2FS_DIRTY_NODES);
1212 unlock_page(page);
1213 return 0;
1214 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001215
Jaegeuk Kim87a9bd22013-10-16 15:09:26 +09001216 if (wbc->for_reclaim)
1217 goto redirty_out;
Jaegeuk Kim08d80582013-03-13 17:49:22 +09001218
Jaegeuk Kim39936832012-11-22 16:21:29 +09001219 mutex_lock(&sbi->node_write);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001220 set_page_writeback(page);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001221 write_node_page(sbi, page, nid, ni.blk_addr, &new_addr);
1222 set_node_addr(sbi, &ni, new_addr);
1223 dec_page_count(sbi, F2FS_DIRTY_NODES);
Jaegeuk Kim39936832012-11-22 16:21:29 +09001224 mutex_unlock(&sbi->node_write);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001225 unlock_page(page);
1226 return 0;
Jaegeuk Kim87a9bd22013-10-16 15:09:26 +09001227
1228redirty_out:
1229 dec_page_count(sbi, F2FS_DIRTY_NODES);
1230 wbc->pages_skipped++;
1231 set_page_dirty(page);
1232 return AOP_WRITEPAGE_ACTIVATE;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001233}
1234
Jaegeuk Kima7fdffb2013-01-18 14:54:13 +09001235/*
1236 * It is very important to gather dirty pages and write at once, so that we can
1237 * submit a big bio without interfering other data writes.
Jaegeuk Kim423e95c2013-09-05 10:07:15 +09001238 * Be default, 512 pages (2MB) * 3 node types, is more reasonable.
Jaegeuk Kima7fdffb2013-01-18 14:54:13 +09001239 */
Jaegeuk Kim423e95c2013-09-05 10:07:15 +09001240#define COLLECT_DIRTY_NODES 1536
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001241static int f2fs_write_node_pages(struct address_space *mapping,
1242 struct writeback_control *wbc)
1243{
1244 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001245 long nr_to_write = wbc->nr_to_write;
1246
Jaegeuk Kim4660f9c2013-10-24 14:19:18 +09001247 /* balancing f2fs's metadata in background */
1248 f2fs_balance_fs_bg(sbi);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001249
Jaegeuk Kima7fdffb2013-01-18 14:54:13 +09001250 /* collect a number of dirty node pages and write together */
1251 if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES)
1252 return 0;
1253
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001254 /* if mounting is failed, skip writing node pages */
Jaegeuk Kim423e95c2013-09-05 10:07:15 +09001255 wbc->nr_to_write = 3 * max_hw_blocks(sbi);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001256 sync_node_pages(sbi, 0, wbc);
Jaegeuk Kim423e95c2013-09-05 10:07:15 +09001257 wbc->nr_to_write = nr_to_write - (3 * max_hw_blocks(sbi) -
1258 wbc->nr_to_write);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001259 return 0;
1260}
1261
1262static int f2fs_set_node_page_dirty(struct page *page)
1263{
1264 struct address_space *mapping = page->mapping;
1265 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1266
Jaegeuk Kim26c6b882013-10-24 17:53:29 +09001267 trace_f2fs_set_page_dirty(page, NODE);
1268
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001269 SetPageUptodate(page);
1270 if (!PageDirty(page)) {
1271 __set_page_dirty_nobuffers(page);
1272 inc_page_count(sbi, F2FS_DIRTY_NODES);
1273 SetPagePrivate(page);
1274 return 1;
1275 }
1276 return 0;
1277}
1278
Lukas Czernerd47992f2013-05-21 23:17:23 -04001279static void f2fs_invalidate_node_page(struct page *page, unsigned int offset,
1280 unsigned int length)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001281{
1282 struct inode *inode = page->mapping->host;
1283 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1284 if (PageDirty(page))
1285 dec_page_count(sbi, F2FS_DIRTY_NODES);
1286 ClearPagePrivate(page);
1287}
1288
1289static int f2fs_release_node_page(struct page *page, gfp_t wait)
1290{
1291 ClearPagePrivate(page);
Jaegeuk Kimc3850aa2013-03-14 09:24:32 +09001292 return 1;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001293}
1294
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001295/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001296 * Structure of the f2fs node operations
1297 */
1298const struct address_space_operations f2fs_node_aops = {
1299 .writepage = f2fs_write_node_page,
1300 .writepages = f2fs_write_node_pages,
1301 .set_page_dirty = f2fs_set_node_page_dirty,
1302 .invalidatepage = f2fs_invalidate_node_page,
1303 .releasepage = f2fs_release_node_page,
1304};
1305
1306static struct free_nid *__lookup_free_nid_list(nid_t n, struct list_head *head)
1307{
1308 struct list_head *this;
Namjae Jeon3aa770a2013-03-02 12:40:50 +09001309 struct free_nid *i;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001310 list_for_each(this, head) {
1311 i = list_entry(this, struct free_nid, list);
1312 if (i->nid == n)
Namjae Jeon3aa770a2013-03-02 12:40:50 +09001313 return i;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001314 }
Namjae Jeon3aa770a2013-03-02 12:40:50 +09001315 return NULL;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001316}
1317
1318static void __del_from_free_nid_list(struct free_nid *i)
1319{
1320 list_del(&i->list);
1321 kmem_cache_free(free_nid_slab, i);
1322}
1323
Jaegeuk Kim59bbd472013-05-07 20:47:40 +09001324static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001325{
1326 struct free_nid *i;
Jaegeuk Kim59bbd472013-05-07 20:47:40 +09001327 struct nat_entry *ne;
1328 bool allocated = false;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001329
1330 if (nm_i->fcnt > 2 * MAX_FREE_NIDS)
Haicheng Li23d38842013-05-06 23:15:43 +08001331 return -1;
Jaegeuk Kim9198ace2013-04-25 13:21:12 +09001332
1333 /* 0 nid should not be used */
Chao Yucfb271d2013-12-05 17:15:22 +08001334 if (unlikely(nid == 0))
Jaegeuk Kim9198ace2013-04-25 13:21:12 +09001335 return 0;
Jaegeuk Kim59bbd472013-05-07 20:47:40 +09001336
Gu Zheng7bd59382013-10-22 14:52:26 +08001337 if (build) {
1338 /* do not add allocated nids */
1339 read_lock(&nm_i->nat_tree_lock);
1340 ne = __lookup_nat_cache(nm_i, nid);
1341 if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
1342 allocated = true;
1343 read_unlock(&nm_i->nat_tree_lock);
1344 if (allocated)
1345 return 0;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001346 }
Gu Zheng7bd59382013-10-22 14:52:26 +08001347
1348 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001349 i->nid = nid;
1350 i->state = NID_NEW;
1351
1352 spin_lock(&nm_i->free_nid_list_lock);
1353 if (__lookup_free_nid_list(nid, &nm_i->free_nid_list)) {
1354 spin_unlock(&nm_i->free_nid_list_lock);
1355 kmem_cache_free(free_nid_slab, i);
1356 return 0;
1357 }
1358 list_add_tail(&i->list, &nm_i->free_nid_list);
1359 nm_i->fcnt++;
1360 spin_unlock(&nm_i->free_nid_list_lock);
1361 return 1;
1362}
1363
1364static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1365{
1366 struct free_nid *i;
1367 spin_lock(&nm_i->free_nid_list_lock);
1368 i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1369 if (i && i->state == NID_NEW) {
1370 __del_from_free_nid_list(i);
1371 nm_i->fcnt--;
1372 }
1373 spin_unlock(&nm_i->free_nid_list_lock);
1374}
1375
Haicheng Li87609522013-05-06 23:15:42 +08001376static void scan_nat_page(struct f2fs_nm_info *nm_i,
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001377 struct page *nat_page, nid_t start_nid)
1378{
1379 struct f2fs_nat_block *nat_blk = page_address(nat_page);
1380 block_t blk_addr;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001381 int i;
1382
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001383 i = start_nid % NAT_ENTRY_PER_BLOCK;
1384
1385 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
Haicheng Li23d38842013-05-06 23:15:43 +08001386
Chao Yucfb271d2013-12-05 17:15:22 +08001387 if (unlikely(start_nid >= nm_i->max_nid))
Jaegeuk Kim04431c42013-03-16 08:34:37 +09001388 break;
Haicheng Li23d38842013-05-06 23:15:43 +08001389
1390 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
Jaegeuk Kim5d56b672013-10-29 15:14:54 +09001391 f2fs_bug_on(blk_addr == NEW_ADDR);
Haicheng Li23d38842013-05-06 23:15:43 +08001392 if (blk_addr == NULL_ADDR) {
Jaegeuk Kim59bbd472013-05-07 20:47:40 +09001393 if (add_free_nid(nm_i, start_nid, true) < 0)
Haicheng Li23d38842013-05-06 23:15:43 +08001394 break;
1395 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001396 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001397}
1398
1399static void build_free_nids(struct f2fs_sb_info *sbi)
1400{
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001401 struct f2fs_nm_info *nm_i = NM_I(sbi);
1402 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1403 struct f2fs_summary_block *sum = curseg->sum_blk;
Haicheng Li87609522013-05-06 23:15:42 +08001404 int i = 0;
Jaegeuk Kim55008d82013-04-25 16:05:51 +09001405 nid_t nid = nm_i->next_scan_nid;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001406
Jaegeuk Kim55008d82013-04-25 16:05:51 +09001407 /* Enough entries */
1408 if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK)
1409 return;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001410
Jaegeuk Kim55008d82013-04-25 16:05:51 +09001411 /* readahead nat pages to be scanned */
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001412 ra_nat_pages(sbi, nid);
1413
1414 while (1) {
1415 struct page *page = get_current_nat_page(sbi, nid);
1416
Haicheng Li87609522013-05-06 23:15:42 +08001417 scan_nat_page(nm_i, page, nid);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001418 f2fs_put_page(page, 1);
1419
1420 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
Chao Yucfb271d2013-12-05 17:15:22 +08001421 if (unlikely(nid >= nm_i->max_nid))
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001422 nid = 0;
Jaegeuk Kim55008d82013-04-25 16:05:51 +09001423
1424 if (i++ == FREE_NID_PAGES)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001425 break;
1426 }
1427
Jaegeuk Kim55008d82013-04-25 16:05:51 +09001428 /* go to the next free nat pages to find free nids abundantly */
1429 nm_i->next_scan_nid = nid;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001430
1431 /* find free nids from current sum_pages */
1432 mutex_lock(&curseg->curseg_mutex);
1433 for (i = 0; i < nats_in_cursum(sum); i++) {
1434 block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
1435 nid = le32_to_cpu(nid_in_journal(sum, i));
1436 if (addr == NULL_ADDR)
Jaegeuk Kim59bbd472013-05-07 20:47:40 +09001437 add_free_nid(nm_i, nid, true);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001438 else
1439 remove_free_nid(nm_i, nid);
1440 }
1441 mutex_unlock(&curseg->curseg_mutex);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001442}
1443
1444/*
1445 * If this function returns success, caller can obtain a new nid
1446 * from second parameter of this function.
1447 * The returned nid could be used ino as well as nid when inode is created.
1448 */
1449bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
1450{
1451 struct f2fs_nm_info *nm_i = NM_I(sbi);
1452 struct free_nid *i = NULL;
1453 struct list_head *this;
1454retry:
Chao Yucfb271d2013-12-05 17:15:22 +08001455 if (unlikely(sbi->total_valid_node_count + 1 >= nm_i->max_nid))
Jaegeuk Kim55008d82013-04-25 16:05:51 +09001456 return false;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001457
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001458 spin_lock(&nm_i->free_nid_list_lock);
Jaegeuk Kim55008d82013-04-25 16:05:51 +09001459
1460 /* We should not use stale free nids created by build_free_nids */
1461 if (nm_i->fcnt && !sbi->on_build_free_nids) {
Jaegeuk Kim5d56b672013-10-29 15:14:54 +09001462 f2fs_bug_on(list_empty(&nm_i->free_nid_list));
Jaegeuk Kim55008d82013-04-25 16:05:51 +09001463 list_for_each(this, &nm_i->free_nid_list) {
1464 i = list_entry(this, struct free_nid, list);
1465 if (i->state == NID_NEW)
1466 break;
1467 }
1468
Jaegeuk Kim5d56b672013-10-29 15:14:54 +09001469 f2fs_bug_on(i->state != NID_NEW);
Jaegeuk Kim55008d82013-04-25 16:05:51 +09001470 *nid = i->nid;
1471 i->state = NID_ALLOC;
1472 nm_i->fcnt--;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001473 spin_unlock(&nm_i->free_nid_list_lock);
Jaegeuk Kim55008d82013-04-25 16:05:51 +09001474 return true;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001475 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001476 spin_unlock(&nm_i->free_nid_list_lock);
Jaegeuk Kim55008d82013-04-25 16:05:51 +09001477
1478 /* Let's scan nat pages and its caches to get free nids */
1479 mutex_lock(&nm_i->build_lock);
Haicheng Liaabe5132013-10-23 12:39:32 +08001480 sbi->on_build_free_nids = true;
Jaegeuk Kim55008d82013-04-25 16:05:51 +09001481 build_free_nids(sbi);
Haicheng Liaabe5132013-10-23 12:39:32 +08001482 sbi->on_build_free_nids = false;
Jaegeuk Kim55008d82013-04-25 16:05:51 +09001483 mutex_unlock(&nm_i->build_lock);
1484 goto retry;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001485}
1486
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001487/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001488 * alloc_nid() should be called prior to this function.
1489 */
1490void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
1491{
1492 struct f2fs_nm_info *nm_i = NM_I(sbi);
1493 struct free_nid *i;
1494
1495 spin_lock(&nm_i->free_nid_list_lock);
1496 i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
Jaegeuk Kim5d56b672013-10-29 15:14:54 +09001497 f2fs_bug_on(!i || i->state != NID_ALLOC);
Jaegeuk Kim49952fa2013-04-03 22:19:03 +09001498 __del_from_free_nid_list(i);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001499 spin_unlock(&nm_i->free_nid_list_lock);
1500}
1501
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001502/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001503 * alloc_nid() should be called prior to this function.
1504 */
1505void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
1506{
Jaegeuk Kim49952fa2013-04-03 22:19:03 +09001507 struct f2fs_nm_info *nm_i = NM_I(sbi);
1508 struct free_nid *i;
1509
Jaegeuk Kim65985d92013-08-14 21:57:27 +09001510 if (!nid)
1511 return;
1512
Jaegeuk Kim49952fa2013-04-03 22:19:03 +09001513 spin_lock(&nm_i->free_nid_list_lock);
1514 i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
Jaegeuk Kim5d56b672013-10-29 15:14:54 +09001515 f2fs_bug_on(!i || i->state != NID_ALLOC);
Haicheng Li95630cb2013-05-06 23:15:41 +08001516 if (nm_i->fcnt > 2 * MAX_FREE_NIDS) {
1517 __del_from_free_nid_list(i);
1518 } else {
1519 i->state = NID_NEW;
1520 nm_i->fcnt++;
1521 }
Jaegeuk Kim49952fa2013-04-03 22:19:03 +09001522 spin_unlock(&nm_i->free_nid_list_lock);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001523}
1524
1525void recover_node_page(struct f2fs_sb_info *sbi, struct page *page,
1526 struct f2fs_summary *sum, struct node_info *ni,
1527 block_t new_blkaddr)
1528{
1529 rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr);
1530 set_node_addr(sbi, ni, new_blkaddr);
1531 clear_node_page_dirty(page);
1532}
1533
1534int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
1535{
1536 struct address_space *mapping = sbi->node_inode->i_mapping;
1537 struct f2fs_node *src, *dst;
1538 nid_t ino = ino_of_node(page);
1539 struct node_info old_ni, new_ni;
1540 struct page *ipage;
1541
1542 ipage = grab_cache_page(mapping, ino);
1543 if (!ipage)
1544 return -ENOMEM;
1545
1546 /* Should not use this inode from free nid list */
1547 remove_free_nid(NM_I(sbi), ino);
1548
1549 get_node_info(sbi, ino, &old_ni);
1550 SetPageUptodate(ipage);
1551 fill_node_footer(ipage, ino, ino, 0, true);
1552
Gu Zheng45590712013-07-15 17:57:38 +08001553 src = F2FS_NODE(page);
1554 dst = F2FS_NODE(ipage);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001555
1556 memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i);
1557 dst->i.i_size = 0;
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001558 dst->i.i_blocks = cpu_to_le64(1);
1559 dst->i.i_links = cpu_to_le32(1);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001560 dst->i.i_xattr_nid = 0;
1561
1562 new_ni = old_ni;
1563 new_ni.ino = ino;
1564
Chao Yucfb271d2013-12-05 17:15:22 +08001565 if (unlikely(!inc_valid_node_count(sbi, NULL)))
Jaegeuk Kim65e5cd02013-05-14 15:47:43 +09001566 WARN_ON(1);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001567 set_node_addr(sbi, &new_ni, NEW_ADDR);
1568 inc_valid_inode_count(sbi);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001569 f2fs_put_page(ipage, 1);
1570 return 0;
1571}
1572
Chao Yu9af0ff12013-11-22 15:48:54 +08001573/*
1574 * ra_sum_pages() merge contiguous pages into one bio and submit.
1575 * these pre-readed pages are linked in pages list.
1576 */
1577static int ra_sum_pages(struct f2fs_sb_info *sbi, struct list_head *pages,
1578 int start, int nrpages)
1579{
1580 struct page *page;
1581 int page_idx = start;
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001582 struct f2fs_io_info fio = {
1583 .type = META,
1584 .rw = READ_SYNC,
1585 .rw_flag = REQ_META | REQ_PRIO
1586 };
Chao Yu9af0ff12013-11-22 15:48:54 +08001587
1588 for (; page_idx < start + nrpages; page_idx++) {
1589 /* alloc temporal page for read node summary info*/
Chao Yua0acdfe2013-12-05 09:54:00 +08001590 page = alloc_page(GFP_F2FS_ZERO);
Chao Yu9af0ff12013-11-22 15:48:54 +08001591 if (!page) {
1592 struct page *tmp;
1593 list_for_each_entry_safe(page, tmp, pages, lru) {
1594 list_del(&page->lru);
1595 unlock_page(page);
1596 __free_pages(page, 0);
1597 }
1598 return -ENOMEM;
1599 }
1600
1601 lock_page(page);
1602 page->index = page_idx;
1603 list_add_tail(&page->lru, pages);
1604 }
1605
1606 list_for_each_entry(page, pages, lru)
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001607 f2fs_submit_page_mbio(sbi, page, page->index, &fio);
Chao Yu9af0ff12013-11-22 15:48:54 +08001608
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001609 f2fs_submit_merged_bio(sbi, META, READ);
Chao Yu9af0ff12013-11-22 15:48:54 +08001610 return 0;
1611}
1612
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001613int restore_node_summary(struct f2fs_sb_info *sbi,
1614 unsigned int segno, struct f2fs_summary_block *sum)
1615{
1616 struct f2fs_node *rn;
1617 struct f2fs_summary *sum_entry;
Chao Yu9af0ff12013-11-22 15:48:54 +08001618 struct page *page, *tmp;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001619 block_t addr;
Chao Yu9af0ff12013-11-22 15:48:54 +08001620 int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
1621 int i, last_offset, nrpages, err = 0;
1622 LIST_HEAD(page_list);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001623
1624 /* scan the node segment */
1625 last_offset = sbi->blocks_per_seg;
1626 addr = START_BLOCK(sbi, segno);
1627 sum_entry = &sum->entries[0];
1628
Chao Yu9af0ff12013-11-22 15:48:54 +08001629 for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
1630 nrpages = min(last_offset - i, bio_blocks);
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001631
Chao Yu9af0ff12013-11-22 15:48:54 +08001632 /* read ahead node pages */
1633 err = ra_sum_pages(sbi, &page_list, addr, nrpages);
1634 if (err)
1635 return err;
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001636
Chao Yu9af0ff12013-11-22 15:48:54 +08001637 list_for_each_entry_safe(page, tmp, &page_list, lru) {
1638
1639 lock_page(page);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001640 if (unlikely(!PageUptodate(page))) {
1641 err = -EIO;
1642 } else {
Chao Yu9af0ff12013-11-22 15:48:54 +08001643 rn = F2FS_NODE(page);
1644 sum_entry->nid = rn->footer.nid;
1645 sum_entry->version = 0;
1646 sum_entry->ofs_in_node = 0;
1647 sum_entry++;
Chao Yu9af0ff12013-11-22 15:48:54 +08001648 }
1649
1650 list_del(&page->lru);
1651 unlock_page(page);
1652 __free_pages(page, 0);
1653 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001654 }
Chao Yu9af0ff12013-11-22 15:48:54 +08001655 return err;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001656}
1657
1658static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
1659{
1660 struct f2fs_nm_info *nm_i = NM_I(sbi);
1661 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1662 struct f2fs_summary_block *sum = curseg->sum_blk;
1663 int i;
1664
1665 mutex_lock(&curseg->curseg_mutex);
1666
1667 if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) {
1668 mutex_unlock(&curseg->curseg_mutex);
1669 return false;
1670 }
1671
1672 for (i = 0; i < nats_in_cursum(sum); i++) {
1673 struct nat_entry *ne;
1674 struct f2fs_nat_entry raw_ne;
1675 nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
1676
1677 raw_ne = nat_in_journal(sum, i);
1678retry:
1679 write_lock(&nm_i->nat_tree_lock);
1680 ne = __lookup_nat_cache(nm_i, nid);
1681 if (ne) {
1682 __set_nat_cache_dirty(nm_i, ne);
1683 write_unlock(&nm_i->nat_tree_lock);
1684 continue;
1685 }
1686 ne = grab_nat_entry(nm_i, nid);
1687 if (!ne) {
1688 write_unlock(&nm_i->nat_tree_lock);
1689 goto retry;
1690 }
1691 nat_set_blkaddr(ne, le32_to_cpu(raw_ne.block_addr));
1692 nat_set_ino(ne, le32_to_cpu(raw_ne.ino));
1693 nat_set_version(ne, raw_ne.version);
1694 __set_nat_cache_dirty(nm_i, ne);
1695 write_unlock(&nm_i->nat_tree_lock);
1696 }
1697 update_nats_in_cursum(sum, -i);
1698 mutex_unlock(&curseg->curseg_mutex);
1699 return true;
1700}
1701
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001702/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001703 * This function is called during the checkpointing process.
1704 */
1705void flush_nat_entries(struct f2fs_sb_info *sbi)
1706{
1707 struct f2fs_nm_info *nm_i = NM_I(sbi);
1708 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1709 struct f2fs_summary_block *sum = curseg->sum_blk;
1710 struct list_head *cur, *n;
1711 struct page *page = NULL;
1712 struct f2fs_nat_block *nat_blk = NULL;
1713 nid_t start_nid = 0, end_nid = 0;
1714 bool flushed;
1715
1716 flushed = flush_nats_in_journal(sbi);
1717
1718 if (!flushed)
1719 mutex_lock(&curseg->curseg_mutex);
1720
1721 /* 1) flush dirty nat caches */
1722 list_for_each_safe(cur, n, &nm_i->dirty_nat_entries) {
1723 struct nat_entry *ne;
1724 nid_t nid;
1725 struct f2fs_nat_entry raw_ne;
1726 int offset = -1;
Jaegeuk Kim2b506382012-12-26 14:39:50 +09001727 block_t new_blkaddr;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001728
1729 ne = list_entry(cur, struct nat_entry, list);
1730 nid = nat_get_nid(ne);
1731
1732 if (nat_get_blkaddr(ne) == NEW_ADDR)
1733 continue;
1734 if (flushed)
1735 goto to_nat_page;
1736
1737 /* if there is room for nat enries in curseg->sumpage */
1738 offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1);
1739 if (offset >= 0) {
1740 raw_ne = nat_in_journal(sum, offset);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001741 goto flush_now;
1742 }
1743to_nat_page:
1744 if (!page || (start_nid > nid || nid > end_nid)) {
1745 if (page) {
1746 f2fs_put_page(page, 1);
1747 page = NULL;
1748 }
1749 start_nid = START_NID(nid);
1750 end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1;
1751
1752 /*
1753 * get nat block with dirty flag, increased reference
1754 * count, mapped and lock
1755 */
1756 page = get_next_nat_page(sbi, start_nid);
1757 nat_blk = page_address(page);
1758 }
1759
Jaegeuk Kim5d56b672013-10-29 15:14:54 +09001760 f2fs_bug_on(!nat_blk);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001761 raw_ne = nat_blk->entries[nid - start_nid];
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001762flush_now:
1763 new_blkaddr = nat_get_blkaddr(ne);
1764
1765 raw_ne.ino = cpu_to_le32(nat_get_ino(ne));
1766 raw_ne.block_addr = cpu_to_le32(new_blkaddr);
1767 raw_ne.version = nat_get_version(ne);
1768
1769 if (offset < 0) {
1770 nat_blk->entries[nid - start_nid] = raw_ne;
1771 } else {
1772 nat_in_journal(sum, offset) = raw_ne;
1773 nid_in_journal(sum, offset) = cpu_to_le32(nid);
1774 }
1775
Jaegeuk Kimfa372412013-03-21 12:53:19 +09001776 if (nat_get_blkaddr(ne) == NULL_ADDR &&
Jaegeuk Kim59bbd472013-05-07 20:47:40 +09001777 add_free_nid(NM_I(sbi), nid, false) <= 0) {
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001778 write_lock(&nm_i->nat_tree_lock);
1779 __del_from_nat_cache(nm_i, ne);
1780 write_unlock(&nm_i->nat_tree_lock);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001781 } else {
1782 write_lock(&nm_i->nat_tree_lock);
1783 __clear_nat_cache_dirty(nm_i, ne);
1784 ne->checkpointed = true;
1785 write_unlock(&nm_i->nat_tree_lock);
1786 }
1787 }
1788 if (!flushed)
1789 mutex_unlock(&curseg->curseg_mutex);
1790 f2fs_put_page(page, 1);
1791
1792 /* 2) shrink nat caches if necessary */
1793 try_to_free_nats(sbi, nm_i->nat_cnt - NM_WOUT_THRESHOLD);
1794}
1795
1796static int init_node_manager(struct f2fs_sb_info *sbi)
1797{
1798 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
1799 struct f2fs_nm_info *nm_i = NM_I(sbi);
1800 unsigned char *version_bitmap;
1801 unsigned int nat_segs, nat_blocks;
1802
1803 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
1804
1805 /* segment_count_nat includes pair segment so divide to 2. */
1806 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
1807 nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
1808 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
1809 nm_i->fcnt = 0;
1810 nm_i->nat_cnt = 0;
1811
1812 INIT_LIST_HEAD(&nm_i->free_nid_list);
1813 INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC);
1814 INIT_LIST_HEAD(&nm_i->nat_entries);
1815 INIT_LIST_HEAD(&nm_i->dirty_nat_entries);
1816
1817 mutex_init(&nm_i->build_lock);
1818 spin_lock_init(&nm_i->free_nid_list_lock);
1819 rwlock_init(&nm_i->nat_tree_lock);
1820
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001821 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
Alexandru Gheorghiu79b57932013-03-28 02:24:53 +02001822 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001823 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
1824 if (!version_bitmap)
1825 return -EFAULT;
1826
Alexandru Gheorghiu79b57932013-03-28 02:24:53 +02001827 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
1828 GFP_KERNEL);
1829 if (!nm_i->nat_bitmap)
1830 return -ENOMEM;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001831 return 0;
1832}
1833
1834int build_node_manager(struct f2fs_sb_info *sbi)
1835{
1836 int err;
1837
1838 sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
1839 if (!sbi->nm_info)
1840 return -ENOMEM;
1841
1842 err = init_node_manager(sbi);
1843 if (err)
1844 return err;
1845
1846 build_free_nids(sbi);
1847 return 0;
1848}
1849
1850void destroy_node_manager(struct f2fs_sb_info *sbi)
1851{
1852 struct f2fs_nm_info *nm_i = NM_I(sbi);
1853 struct free_nid *i, *next_i;
1854 struct nat_entry *natvec[NATVEC_SIZE];
1855 nid_t nid = 0;
1856 unsigned int found;
1857
1858 if (!nm_i)
1859 return;
1860
1861 /* destroy free nid list */
1862 spin_lock(&nm_i->free_nid_list_lock);
1863 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
Jaegeuk Kim5d56b672013-10-29 15:14:54 +09001864 f2fs_bug_on(i->state == NID_ALLOC);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001865 __del_from_free_nid_list(i);
1866 nm_i->fcnt--;
1867 }
Jaegeuk Kim5d56b672013-10-29 15:14:54 +09001868 f2fs_bug_on(nm_i->fcnt);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001869 spin_unlock(&nm_i->free_nid_list_lock);
1870
1871 /* destroy nat cache */
1872 write_lock(&nm_i->nat_tree_lock);
1873 while ((found = __gang_lookup_nat_cache(nm_i,
1874 nid, NATVEC_SIZE, natvec))) {
1875 unsigned idx;
1876 for (idx = 0; idx < found; idx++) {
1877 struct nat_entry *e = natvec[idx];
1878 nid = nat_get_nid(e) + 1;
1879 __del_from_nat_cache(nm_i, e);
1880 }
1881 }
Jaegeuk Kim5d56b672013-10-29 15:14:54 +09001882 f2fs_bug_on(nm_i->nat_cnt);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001883 write_unlock(&nm_i->nat_tree_lock);
1884
1885 kfree(nm_i->nat_bitmap);
1886 sbi->nm_info = NULL;
1887 kfree(nm_i);
1888}
1889
Namjae Jeon6e6093a2013-01-17 00:08:30 +09001890int __init create_node_manager_caches(void)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001891{
1892 nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
1893 sizeof(struct nat_entry), NULL);
1894 if (!nat_entry_slab)
1895 return -ENOMEM;
1896
1897 free_nid_slab = f2fs_kmem_cache_create("free_nid",
1898 sizeof(struct free_nid), NULL);
1899 if (!free_nid_slab) {
1900 kmem_cache_destroy(nat_entry_slab);
1901 return -ENOMEM;
1902 }
1903 return 0;
1904}
1905
1906void destroy_node_manager_caches(void)
1907{
1908 kmem_cache_destroy(free_nid_slab);
1909 kmem_cache_destroy(nat_entry_slab);
1910}