blob: 95298ef68262e00b086ab6333db7c2799b9fb054 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09002 * fs/f2fs/node.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/mpage.h>
14#include <linux/backing-dev.h>
15#include <linux/blkdev.h>
16#include <linux/pagevec.h>
17#include <linux/swap.h>
18
19#include "f2fs.h"
20#include "node.h"
21#include "segment.h"
22
23static struct kmem_cache *nat_entry_slab;
24static struct kmem_cache *free_nid_slab;
25
26static void clear_node_page_dirty(struct page *page)
27{
28 struct address_space *mapping = page->mapping;
29 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
30 unsigned int long flags;
31
32 if (PageDirty(page)) {
33 spin_lock_irqsave(&mapping->tree_lock, flags);
34 radix_tree_tag_clear(&mapping->page_tree,
35 page_index(page),
36 PAGECACHE_TAG_DIRTY);
37 spin_unlock_irqrestore(&mapping->tree_lock, flags);
38
39 clear_page_dirty_for_io(page);
40 dec_page_count(sbi, F2FS_DIRTY_NODES);
41 }
42 ClearPageUptodate(page);
43}
44
45static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
46{
47 pgoff_t index = current_nat_addr(sbi, nid);
48 return get_meta_page(sbi, index);
49}
50
51static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
52{
53 struct page *src_page;
54 struct page *dst_page;
55 pgoff_t src_off;
56 pgoff_t dst_off;
57 void *src_addr;
58 void *dst_addr;
59 struct f2fs_nm_info *nm_i = NM_I(sbi);
60
61 src_off = current_nat_addr(sbi, nid);
62 dst_off = next_nat_addr(sbi, src_off);
63
64 /* get current nat block page with lock */
65 src_page = get_meta_page(sbi, src_off);
66
67 /* Dirty src_page means that it is already the new target NAT page. */
68 if (PageDirty(src_page))
69 return src_page;
70
71 dst_page = grab_meta_page(sbi, dst_off);
72
73 src_addr = page_address(src_page);
74 dst_addr = page_address(dst_page);
75 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
76 set_page_dirty(dst_page);
77 f2fs_put_page(src_page, 1);
78
79 set_to_next_nat(nm_i, nid);
80
81 return dst_page;
82}
83
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +090084/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +090085 * Readahead NAT pages
86 */
87static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid)
88{
89 struct address_space *mapping = sbi->meta_inode->i_mapping;
90 struct f2fs_nm_info *nm_i = NM_I(sbi);
91 struct page *page;
92 pgoff_t index;
93 int i;
94
95 for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) {
96 if (nid >= nm_i->max_nid)
97 nid = 0;
98 index = current_nat_addr(sbi, nid);
99
100 page = grab_cache_page(mapping, index);
101 if (!page)
102 continue;
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900103 if (PageUptodate(page)) {
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900104 f2fs_put_page(page, 1);
105 continue;
106 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900107 if (f2fs_readpage(sbi, page, index, READ))
108 continue;
109
Jaegeuk Kim369a7082013-01-31 10:15:35 +0900110 f2fs_put_page(page, 0);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900111 }
112}
113
114static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
115{
116 return radix_tree_lookup(&nm_i->nat_root, n);
117}
118
119static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
120 nid_t start, unsigned int nr, struct nat_entry **ep)
121{
122 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
123}
124
125static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
126{
127 list_del(&e->list);
128 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
129 nm_i->nat_cnt--;
130 kmem_cache_free(nat_entry_slab, e);
131}
132
133int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
134{
135 struct f2fs_nm_info *nm_i = NM_I(sbi);
136 struct nat_entry *e;
137 int is_cp = 1;
138
139 read_lock(&nm_i->nat_tree_lock);
140 e = __lookup_nat_cache(nm_i, nid);
141 if (e && !e->checkpointed)
142 is_cp = 0;
143 read_unlock(&nm_i->nat_tree_lock);
144 return is_cp;
145}
146
147static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
148{
149 struct nat_entry *new;
150
151 new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
152 if (!new)
153 return NULL;
154 if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
155 kmem_cache_free(nat_entry_slab, new);
156 return NULL;
157 }
158 memset(new, 0, sizeof(struct nat_entry));
159 nat_set_nid(new, nid);
160 list_add_tail(&new->list, &nm_i->nat_entries);
161 nm_i->nat_cnt++;
162 return new;
163}
164
165static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
166 struct f2fs_nat_entry *ne)
167{
168 struct nat_entry *e;
169retry:
170 write_lock(&nm_i->nat_tree_lock);
171 e = __lookup_nat_cache(nm_i, nid);
172 if (!e) {
173 e = grab_nat_entry(nm_i, nid);
174 if (!e) {
175 write_unlock(&nm_i->nat_tree_lock);
176 goto retry;
177 }
178 nat_set_blkaddr(e, le32_to_cpu(ne->block_addr));
179 nat_set_ino(e, le32_to_cpu(ne->ino));
180 nat_set_version(e, ne->version);
181 e->checkpointed = true;
182 }
183 write_unlock(&nm_i->nat_tree_lock);
184}
185
186static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
187 block_t new_blkaddr)
188{
189 struct f2fs_nm_info *nm_i = NM_I(sbi);
190 struct nat_entry *e;
191retry:
192 write_lock(&nm_i->nat_tree_lock);
193 e = __lookup_nat_cache(nm_i, ni->nid);
194 if (!e) {
195 e = grab_nat_entry(nm_i, ni->nid);
196 if (!e) {
197 write_unlock(&nm_i->nat_tree_lock);
198 goto retry;
199 }
200 e->ni = *ni;
201 e->checkpointed = true;
202 BUG_ON(ni->blk_addr == NEW_ADDR);
203 } else if (new_blkaddr == NEW_ADDR) {
204 /*
205 * when nid is reallocated,
206 * previous nat entry can be remained in nat cache.
207 * So, reinitialize it with new information.
208 */
209 e->ni = *ni;
210 BUG_ON(ni->blk_addr != NULL_ADDR);
211 }
212
213 if (new_blkaddr == NEW_ADDR)
214 e->checkpointed = false;
215
216 /* sanity check */
217 BUG_ON(nat_get_blkaddr(e) != ni->blk_addr);
218 BUG_ON(nat_get_blkaddr(e) == NULL_ADDR &&
219 new_blkaddr == NULL_ADDR);
220 BUG_ON(nat_get_blkaddr(e) == NEW_ADDR &&
221 new_blkaddr == NEW_ADDR);
222 BUG_ON(nat_get_blkaddr(e) != NEW_ADDR &&
223 nat_get_blkaddr(e) != NULL_ADDR &&
224 new_blkaddr == NEW_ADDR);
225
226 /* increament version no as node is removed */
227 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
228 unsigned char version = nat_get_version(e);
229 nat_set_version(e, inc_node_version(version));
230 }
231
232 /* change address */
233 nat_set_blkaddr(e, new_blkaddr);
234 __set_nat_cache_dirty(nm_i, e);
235 write_unlock(&nm_i->nat_tree_lock);
236}
237
238static int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
239{
240 struct f2fs_nm_info *nm_i = NM_I(sbi);
241
242 if (nm_i->nat_cnt < 2 * NM_WOUT_THRESHOLD)
243 return 0;
244
245 write_lock(&nm_i->nat_tree_lock);
246 while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
247 struct nat_entry *ne;
248 ne = list_first_entry(&nm_i->nat_entries,
249 struct nat_entry, list);
250 __del_from_nat_cache(nm_i, ne);
251 nr_shrink--;
252 }
253 write_unlock(&nm_i->nat_tree_lock);
254 return nr_shrink;
255}
256
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900257/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900258 * This function returns always success
259 */
260void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
261{
262 struct f2fs_nm_info *nm_i = NM_I(sbi);
263 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
264 struct f2fs_summary_block *sum = curseg->sum_blk;
265 nid_t start_nid = START_NID(nid);
266 struct f2fs_nat_block *nat_blk;
267 struct page *page = NULL;
268 struct f2fs_nat_entry ne;
269 struct nat_entry *e;
270 int i;
271
Namjae Jeonbe4124f2012-12-01 10:55:12 +0900272 memset(&ne, 0, sizeof(struct f2fs_nat_entry));
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900273 ni->nid = nid;
274
275 /* Check nat cache */
276 read_lock(&nm_i->nat_tree_lock);
277 e = __lookup_nat_cache(nm_i, nid);
278 if (e) {
279 ni->ino = nat_get_ino(e);
280 ni->blk_addr = nat_get_blkaddr(e);
281 ni->version = nat_get_version(e);
282 }
283 read_unlock(&nm_i->nat_tree_lock);
284 if (e)
285 return;
286
287 /* Check current segment summary */
288 mutex_lock(&curseg->curseg_mutex);
289 i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
290 if (i >= 0) {
291 ne = nat_in_journal(sum, i);
292 node_info_from_raw_nat(ni, &ne);
293 }
294 mutex_unlock(&curseg->curseg_mutex);
295 if (i >= 0)
296 goto cache;
297
298 /* Fill node_info from nat page */
299 page = get_current_nat_page(sbi, start_nid);
300 nat_blk = (struct f2fs_nat_block *)page_address(page);
301 ne = nat_blk->entries[nid - start_nid];
302 node_info_from_raw_nat(ni, &ne);
303 f2fs_put_page(page, 1);
304cache:
305 /* cache nat entry */
306 cache_nat_entry(NM_I(sbi), nid, &ne);
307}
308
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900309/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900310 * The maximum depth is four.
311 * Offset[0] will have raw inode offset.
312 */
313static int get_node_path(long block, int offset[4], unsigned int noffset[4])
314{
315 const long direct_index = ADDRS_PER_INODE;
316 const long direct_blks = ADDRS_PER_BLOCK;
317 const long dptrs_per_blk = NIDS_PER_BLOCK;
318 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
319 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
320 int n = 0;
321 int level = 0;
322
323 noffset[0] = 0;
324
325 if (block < direct_index) {
Namjae Jeon25c0a6e2013-03-02 12:41:31 +0900326 offset[n] = block;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900327 goto got;
328 }
329 block -= direct_index;
330 if (block < direct_blks) {
331 offset[n++] = NODE_DIR1_BLOCK;
332 noffset[n] = 1;
Namjae Jeon25c0a6e2013-03-02 12:41:31 +0900333 offset[n] = block;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900334 level = 1;
335 goto got;
336 }
337 block -= direct_blks;
338 if (block < direct_blks) {
339 offset[n++] = NODE_DIR2_BLOCK;
340 noffset[n] = 2;
Namjae Jeon25c0a6e2013-03-02 12:41:31 +0900341 offset[n] = block;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900342 level = 1;
343 goto got;
344 }
345 block -= direct_blks;
346 if (block < indirect_blks) {
347 offset[n++] = NODE_IND1_BLOCK;
348 noffset[n] = 3;
349 offset[n++] = block / direct_blks;
350 noffset[n] = 4 + offset[n - 1];
Namjae Jeon25c0a6e2013-03-02 12:41:31 +0900351 offset[n] = block % direct_blks;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900352 level = 2;
353 goto got;
354 }
355 block -= indirect_blks;
356 if (block < indirect_blks) {
357 offset[n++] = NODE_IND2_BLOCK;
358 noffset[n] = 4 + dptrs_per_blk;
359 offset[n++] = block / direct_blks;
360 noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
Namjae Jeon25c0a6e2013-03-02 12:41:31 +0900361 offset[n] = block % direct_blks;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900362 level = 2;
363 goto got;
364 }
365 block -= indirect_blks;
366 if (block < dindirect_blks) {
367 offset[n++] = NODE_DIND_BLOCK;
368 noffset[n] = 5 + (dptrs_per_blk * 2);
369 offset[n++] = block / indirect_blks;
370 noffset[n] = 6 + (dptrs_per_blk * 2) +
371 offset[n - 1] * (dptrs_per_blk + 1);
372 offset[n++] = (block / direct_blks) % dptrs_per_blk;
373 noffset[n] = 7 + (dptrs_per_blk * 2) +
374 offset[n - 2] * (dptrs_per_blk + 1) +
375 offset[n - 1];
Namjae Jeon25c0a6e2013-03-02 12:41:31 +0900376 offset[n] = block % direct_blks;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900377 level = 3;
378 goto got;
379 } else {
380 BUG();
381 }
382got:
383 return level;
384}
385
386/*
387 * Caller should call f2fs_put_dnode(dn).
388 */
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900389int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900390{
391 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
392 struct page *npage[4];
393 struct page *parent;
394 int offset[4];
395 unsigned int noffset[4];
396 nid_t nids[4];
397 int level, i;
398 int err = 0;
399
400 level = get_node_path(index, offset, noffset);
401
402 nids[0] = dn->inode->i_ino;
403 npage[0] = get_node_page(sbi, nids[0]);
404 if (IS_ERR(npage[0]))
405 return PTR_ERR(npage[0]);
406
407 parent = npage[0];
Changman Lee52c2db32013-02-20 07:47:06 +0900408 if (level != 0)
409 nids[1] = get_nid(parent, offset[0], true);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900410 dn->inode_page = npage[0];
411 dn->inode_page_locked = true;
412
413 /* get indirect or direct nodes */
414 for (i = 1; i <= level; i++) {
415 bool done = false;
416
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900417 if (!nids[i] && mode == ALLOC_NODE) {
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900418 mutex_lock_op(sbi, NODE_NEW);
419
420 /* alloc new node */
421 if (!alloc_nid(sbi, &(nids[i]))) {
422 mutex_unlock_op(sbi, NODE_NEW);
423 err = -ENOSPC;
424 goto release_pages;
425 }
426
427 dn->nid = nids[i];
428 npage[i] = new_node_page(dn, noffset[i]);
429 if (IS_ERR(npage[i])) {
430 alloc_nid_failed(sbi, nids[i]);
431 mutex_unlock_op(sbi, NODE_NEW);
432 err = PTR_ERR(npage[i]);
433 goto release_pages;
434 }
435
436 set_nid(parent, offset[i - 1], nids[i], i == 1);
437 alloc_nid_done(sbi, nids[i]);
438 mutex_unlock_op(sbi, NODE_NEW);
439 done = true;
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900440 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900441 npage[i] = get_node_page_ra(parent, offset[i - 1]);
442 if (IS_ERR(npage[i])) {
443 err = PTR_ERR(npage[i]);
444 goto release_pages;
445 }
446 done = true;
447 }
448 if (i == 1) {
449 dn->inode_page_locked = false;
450 unlock_page(parent);
451 } else {
452 f2fs_put_page(parent, 1);
453 }
454
455 if (!done) {
456 npage[i] = get_node_page(sbi, nids[i]);
457 if (IS_ERR(npage[i])) {
458 err = PTR_ERR(npage[i]);
459 f2fs_put_page(npage[0], 0);
460 goto release_out;
461 }
462 }
463 if (i < level) {
464 parent = npage[i];
465 nids[i + 1] = get_nid(parent, offset[i], false);
466 }
467 }
468 dn->nid = nids[level];
469 dn->ofs_in_node = offset[level];
470 dn->node_page = npage[level];
471 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
472 return 0;
473
474release_pages:
475 f2fs_put_page(parent, 1);
476 if (i > 1)
477 f2fs_put_page(npage[0], 0);
478release_out:
479 dn->inode_page = NULL;
480 dn->node_page = NULL;
481 return err;
482}
483
484static void truncate_node(struct dnode_of_data *dn)
485{
486 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
487 struct node_info ni;
488
489 get_node_info(sbi, dn->nid, &ni);
Jaegeuk Kim71e9fec2012-12-20 15:10:06 +0900490 if (dn->inode->i_blocks == 0) {
491 BUG_ON(ni.blk_addr != NULL_ADDR);
492 goto invalidate;
493 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900494 BUG_ON(ni.blk_addr == NULL_ADDR);
495
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900496 /* Deallocate node address */
Jaegeuk Kim71e9fec2012-12-20 15:10:06 +0900497 invalidate_blocks(sbi, ni.blk_addr);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900498 dec_valid_node_count(sbi, dn->inode, 1);
499 set_node_addr(sbi, &ni, NULL_ADDR);
500
501 if (dn->nid == dn->inode->i_ino) {
502 remove_orphan_inode(sbi, dn->nid);
503 dec_valid_inode_count(sbi);
504 } else {
505 sync_inode_page(dn);
506 }
Jaegeuk Kim71e9fec2012-12-20 15:10:06 +0900507invalidate:
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900508 clear_node_page_dirty(dn->node_page);
509 F2FS_SET_SB_DIRT(sbi);
510
511 f2fs_put_page(dn->node_page, 1);
512 dn->node_page = NULL;
513}
514
515static int truncate_dnode(struct dnode_of_data *dn)
516{
517 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
518 struct page *page;
519
520 if (dn->nid == 0)
521 return 1;
522
523 /* get direct node */
524 page = get_node_page(sbi, dn->nid);
525 if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
526 return 1;
527 else if (IS_ERR(page))
528 return PTR_ERR(page);
529
530 /* Make dnode_of_data for parameter */
531 dn->node_page = page;
532 dn->ofs_in_node = 0;
533 truncate_data_blocks(dn);
534 truncate_node(dn);
535 return 1;
536}
537
538static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
539 int ofs, int depth)
540{
541 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
542 struct dnode_of_data rdn = *dn;
543 struct page *page;
544 struct f2fs_node *rn;
545 nid_t child_nid;
546 unsigned int child_nofs;
547 int freed = 0;
548 int i, ret;
549
550 if (dn->nid == 0)
551 return NIDS_PER_BLOCK + 1;
552
553 page = get_node_page(sbi, dn->nid);
554 if (IS_ERR(page))
555 return PTR_ERR(page);
556
557 rn = (struct f2fs_node *)page_address(page);
558 if (depth < 3) {
559 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
560 child_nid = le32_to_cpu(rn->in.nid[i]);
561 if (child_nid == 0)
562 continue;
563 rdn.nid = child_nid;
564 ret = truncate_dnode(&rdn);
565 if (ret < 0)
566 goto out_err;
567 set_nid(page, i, 0, false);
568 }
569 } else {
570 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
571 for (i = ofs; i < NIDS_PER_BLOCK; i++) {
572 child_nid = le32_to_cpu(rn->in.nid[i]);
573 if (child_nid == 0) {
574 child_nofs += NIDS_PER_BLOCK + 1;
575 continue;
576 }
577 rdn.nid = child_nid;
578 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
579 if (ret == (NIDS_PER_BLOCK + 1)) {
580 set_nid(page, i, 0, false);
581 child_nofs += ret;
582 } else if (ret < 0 && ret != -ENOENT) {
583 goto out_err;
584 }
585 }
586 freed = child_nofs;
587 }
588
589 if (!ofs) {
590 /* remove current indirect node */
591 dn->node_page = page;
592 truncate_node(dn);
593 freed++;
594 } else {
595 f2fs_put_page(page, 1);
596 }
597 return freed;
598
599out_err:
600 f2fs_put_page(page, 1);
601 return ret;
602}
603
604static int truncate_partial_nodes(struct dnode_of_data *dn,
605 struct f2fs_inode *ri, int *offset, int depth)
606{
607 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
608 struct page *pages[2];
609 nid_t nid[3];
610 nid_t child_nid;
611 int err = 0;
612 int i;
613 int idx = depth - 2;
614
615 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
616 if (!nid[0])
617 return 0;
618
619 /* get indirect nodes in the path */
620 for (i = 0; i < depth - 1; i++) {
621 /* refernece count'll be increased */
622 pages[i] = get_node_page(sbi, nid[i]);
623 if (IS_ERR(pages[i])) {
624 depth = i + 1;
625 err = PTR_ERR(pages[i]);
626 goto fail;
627 }
628 nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
629 }
630
631 /* free direct nodes linked to a partial indirect node */
632 for (i = offset[depth - 1]; i < NIDS_PER_BLOCK; i++) {
633 child_nid = get_nid(pages[idx], i, false);
634 if (!child_nid)
635 continue;
636 dn->nid = child_nid;
637 err = truncate_dnode(dn);
638 if (err < 0)
639 goto fail;
640 set_nid(pages[idx], i, 0, false);
641 }
642
643 if (offset[depth - 1] == 0) {
644 dn->node_page = pages[idx];
645 dn->nid = nid[idx];
646 truncate_node(dn);
647 } else {
648 f2fs_put_page(pages[idx], 1);
649 }
650 offset[idx]++;
651 offset[depth - 1] = 0;
652fail:
653 for (i = depth - 3; i >= 0; i--)
654 f2fs_put_page(pages[i], 1);
655 return err;
656}
657
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900658/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900659 * All the block addresses of data and nodes should be nullified.
660 */
661int truncate_inode_blocks(struct inode *inode, pgoff_t from)
662{
663 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
664 int err = 0, cont = 1;
665 int level, offset[4], noffset[4];
Jaegeuk Kim7dd690c2013-02-12 07:28:55 +0900666 unsigned int nofs = 0;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900667 struct f2fs_node *rn;
668 struct dnode_of_data dn;
669 struct page *page;
670
671 level = get_node_path(from, offset, noffset);
672
673 page = get_node_page(sbi, inode->i_ino);
674 if (IS_ERR(page))
675 return PTR_ERR(page);
676
677 set_new_dnode(&dn, inode, page, NULL, 0);
678 unlock_page(page);
679
680 rn = page_address(page);
681 switch (level) {
682 case 0:
683 case 1:
684 nofs = noffset[1];
685 break;
686 case 2:
687 nofs = noffset[1];
688 if (!offset[level - 1])
689 goto skip_partial;
690 err = truncate_partial_nodes(&dn, &rn->i, offset, level);
691 if (err < 0 && err != -ENOENT)
692 goto fail;
693 nofs += 1 + NIDS_PER_BLOCK;
694 break;
695 case 3:
696 nofs = 5 + 2 * NIDS_PER_BLOCK;
697 if (!offset[level - 1])
698 goto skip_partial;
699 err = truncate_partial_nodes(&dn, &rn->i, offset, level);
700 if (err < 0 && err != -ENOENT)
701 goto fail;
702 break;
703 default:
704 BUG();
705 }
706
707skip_partial:
708 while (cont) {
709 dn.nid = le32_to_cpu(rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]);
710 switch (offset[0]) {
711 case NODE_DIR1_BLOCK:
712 case NODE_DIR2_BLOCK:
713 err = truncate_dnode(&dn);
714 break;
715
716 case NODE_IND1_BLOCK:
717 case NODE_IND2_BLOCK:
718 err = truncate_nodes(&dn, nofs, offset[1], 2);
719 break;
720
721 case NODE_DIND_BLOCK:
722 err = truncate_nodes(&dn, nofs, offset[1], 3);
723 cont = 0;
724 break;
725
726 default:
727 BUG();
728 }
729 if (err < 0 && err != -ENOENT)
730 goto fail;
731 if (offset[1] == 0 &&
732 rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]) {
733 lock_page(page);
734 wait_on_page_writeback(page);
735 rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
736 set_page_dirty(page);
737 unlock_page(page);
738 }
739 offset[1] = 0;
740 offset[0]++;
741 nofs += err;
742 }
743fail:
744 f2fs_put_page(page, 0);
745 return err > 0 ? 0 : err;
746}
747
748int remove_inode_page(struct inode *inode)
749{
750 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
751 struct page *page;
752 nid_t ino = inode->i_ino;
753 struct dnode_of_data dn;
754
755 mutex_lock_op(sbi, NODE_TRUNC);
756 page = get_node_page(sbi, ino);
757 if (IS_ERR(page)) {
758 mutex_unlock_op(sbi, NODE_TRUNC);
759 return PTR_ERR(page);
760 }
761
762 if (F2FS_I(inode)->i_xattr_nid) {
763 nid_t nid = F2FS_I(inode)->i_xattr_nid;
764 struct page *npage = get_node_page(sbi, nid);
765
766 if (IS_ERR(npage)) {
767 mutex_unlock_op(sbi, NODE_TRUNC);
768 return PTR_ERR(npage);
769 }
770
771 F2FS_I(inode)->i_xattr_nid = 0;
772 set_new_dnode(&dn, inode, page, npage, nid);
773 dn.inode_page_locked = 1;
774 truncate_node(&dn);
775 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900776
Jaegeuk Kim71e9fec2012-12-20 15:10:06 +0900777 /* 0 is possible, after f2fs_new_inode() is failed */
778 BUG_ON(inode->i_blocks != 0 && inode->i_blocks != 1);
779 set_new_dnode(&dn, inode, page, page, ino);
780 truncate_node(&dn);
781
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900782 mutex_unlock_op(sbi, NODE_TRUNC);
783 return 0;
784}
785
Al Viroc0043632013-01-25 16:04:58 -0500786int new_inode_page(struct inode *inode, const struct qstr *name)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900787{
788 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
789 struct page *page;
790 struct dnode_of_data dn;
791
792 /* allocate inode page for new inode */
793 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
794 mutex_lock_op(sbi, NODE_NEW);
795 page = new_node_page(&dn, 0);
Al Viroc0043632013-01-25 16:04:58 -0500796 init_dent_inode(name, page);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900797 mutex_unlock_op(sbi, NODE_NEW);
798 if (IS_ERR(page))
799 return PTR_ERR(page);
800 f2fs_put_page(page, 1);
801 return 0;
802}
803
804struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs)
805{
806 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
807 struct address_space *mapping = sbi->node_inode->i_mapping;
808 struct node_info old_ni, new_ni;
809 struct page *page;
810 int err;
811
812 if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
813 return ERR_PTR(-EPERM);
814
815 page = grab_cache_page(mapping, dn->nid);
816 if (!page)
817 return ERR_PTR(-ENOMEM);
818
819 get_node_info(sbi, dn->nid, &old_ni);
820
821 SetPageUptodate(page);
822 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
823
824 /* Reinitialize old_ni with new node page */
825 BUG_ON(old_ni.blk_addr != NULL_ADDR);
826 new_ni = old_ni;
827 new_ni.ino = dn->inode->i_ino;
828
829 if (!inc_valid_node_count(sbi, dn->inode, 1)) {
830 err = -ENOSPC;
831 goto fail;
832 }
833 set_node_addr(sbi, &new_ni, NEW_ADDR);
Jaegeuk Kim398b1ac2012-12-19 15:28:39 +0900834 set_cold_node(dn->inode, page);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900835
836 dn->node_page = page;
837 sync_inode_page(dn);
838 set_page_dirty(page);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900839 if (ofs == 0)
840 inc_valid_inode_count(sbi);
841
842 return page;
843
844fail:
Jaegeuk Kim71e9fec2012-12-20 15:10:06 +0900845 clear_node_page_dirty(page);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900846 f2fs_put_page(page, 1);
847 return ERR_PTR(err);
848}
849
Jaegeuk Kim56ae6742013-03-31 12:47:20 +0900850/*
851 * Caller should do after getting the following values.
852 * 0: f2fs_put_page(page, 0)
853 * LOCKED_PAGE: f2fs_put_page(page, 1)
854 * error: nothing
855 */
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900856static int read_node_page(struct page *page, int type)
857{
858 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
859 struct node_info ni;
860
861 get_node_info(sbi, page->index, &ni);
862
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900863 if (ni.blk_addr == NULL_ADDR) {
864 f2fs_put_page(page, 1);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900865 return -ENOENT;
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900866 }
867
Jaegeuk Kim56ae6742013-03-31 12:47:20 +0900868 if (PageUptodate(page))
869 return LOCKED_PAGE;
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900870
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900871 return f2fs_readpage(sbi, page, ni.blk_addr, type);
872}
873
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900874/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900875 * Readahead a node page
876 */
877void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
878{
879 struct address_space *mapping = sbi->node_inode->i_mapping;
880 struct page *apage;
Jaegeuk Kim56ae6742013-03-31 12:47:20 +0900881 int err;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900882
883 apage = find_get_page(mapping, nid);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900884 if (apage && PageUptodate(apage)) {
885 f2fs_put_page(apage, 0);
886 return;
887 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900888 f2fs_put_page(apage, 0);
889
890 apage = grab_cache_page(mapping, nid);
891 if (!apage)
892 return;
893
Jaegeuk Kim56ae6742013-03-31 12:47:20 +0900894 err = read_node_page(apage, READA);
895 if (err == 0)
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900896 f2fs_put_page(apage, 0);
Jaegeuk Kim56ae6742013-03-31 12:47:20 +0900897 else if (err == LOCKED_PAGE)
898 f2fs_put_page(apage, 1);
Namjae Jeona2b52a52013-01-30 22:47:16 +0900899 return;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900900}
901
902struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
903{
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900904 struct address_space *mapping = sbi->node_inode->i_mapping;
Jaegeuk Kim56ae6742013-03-31 12:47:20 +0900905 struct page *page;
906 int err;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900907
908 page = grab_cache_page(mapping, nid);
909 if (!page)
910 return ERR_PTR(-ENOMEM);
911
912 err = read_node_page(page, READ_SYNC);
Jaegeuk Kim56ae6742013-03-31 12:47:20 +0900913 if (err < 0)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900914 return ERR_PTR(err);
Jaegeuk Kim56ae6742013-03-31 12:47:20 +0900915 else if (err == LOCKED_PAGE)
916 goto got_it;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900917
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900918 lock_page(page);
919 if (!PageUptodate(page)) {
920 f2fs_put_page(page, 1);
921 return ERR_PTR(-EIO);
922 }
Jaegeuk Kim56ae6742013-03-31 12:47:20 +0900923got_it:
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900924 BUG_ON(nid != nid_of_node(page));
925 mark_page_accessed(page);
926 return page;
927}
928
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900929/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900930 * Return a locked page for the desired node page.
931 * And, readahead MAX_RA_NODE number of node pages.
932 */
933struct page *get_node_page_ra(struct page *parent, int start)
934{
935 struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb);
936 struct address_space *mapping = sbi->node_inode->i_mapping;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900937 struct page *page;
Jaegeuk Kim56ae6742013-03-31 12:47:20 +0900938 int err, i, end;
939 nid_t nid;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900940
941 /* First, try getting the desired direct node. */
942 nid = get_nid(parent, start, false);
943 if (!nid)
944 return ERR_PTR(-ENOENT);
945
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900946 page = grab_cache_page(mapping, nid);
947 if (!page)
948 return ERR_PTR(-ENOMEM);
949
Jaegeuk Kim66d36a22013-02-26 12:43:46 +0900950 err = read_node_page(page, READ_SYNC);
Jaegeuk Kim56ae6742013-03-31 12:47:20 +0900951 if (err < 0)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900952 return ERR_PTR(err);
Jaegeuk Kim56ae6742013-03-31 12:47:20 +0900953 else if (err == LOCKED_PAGE)
954 goto page_hit;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900955
956 /* Then, try readahead for siblings of the desired node */
957 end = start + MAX_RA_NODE;
958 end = min(end, NIDS_PER_BLOCK);
959 for (i = start + 1; i < end; i++) {
960 nid = get_nid(parent, i, false);
961 if (!nid)
962 continue;
963 ra_node_page(sbi, nid);
964 }
965
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900966 lock_page(page);
Namjae Jeone0f56cb2013-02-02 23:51:51 +0900967
968page_hit:
Jaegeuk Kim56ae6742013-03-31 12:47:20 +0900969 if (!PageUptodate(page)) {
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900970 f2fs_put_page(page, 1);
971 return ERR_PTR(-EIO);
972 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900973 mark_page_accessed(page);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +0900974 return page;
975}
976
977void sync_inode_page(struct dnode_of_data *dn)
978{
979 if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
980 update_inode(dn->inode, dn->node_page);
981 } else if (dn->inode_page) {
982 if (!dn->inode_page_locked)
983 lock_page(dn->inode_page);
984 update_inode(dn->inode, dn->inode_page);
985 if (!dn->inode_page_locked)
986 unlock_page(dn->inode_page);
987 } else {
988 f2fs_write_inode(dn->inode, NULL);
989 }
990}
991
992int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
993 struct writeback_control *wbc)
994{
995 struct address_space *mapping = sbi->node_inode->i_mapping;
996 pgoff_t index, end;
997 struct pagevec pvec;
998 int step = ino ? 2 : 0;
999 int nwritten = 0, wrote = 0;
1000
1001 pagevec_init(&pvec, 0);
1002
1003next_step:
1004 index = 0;
1005 end = LONG_MAX;
1006
1007 while (index <= end) {
1008 int i, nr_pages;
1009 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1010 PAGECACHE_TAG_DIRTY,
1011 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1012 if (nr_pages == 0)
1013 break;
1014
1015 for (i = 0; i < nr_pages; i++) {
1016 struct page *page = pvec.pages[i];
1017
1018 /*
1019 * flushing sequence with step:
1020 * 0. indirect nodes
1021 * 1. dentry dnodes
1022 * 2. file dnodes
1023 */
1024 if (step == 0 && IS_DNODE(page))
1025 continue;
1026 if (step == 1 && (!IS_DNODE(page) ||
1027 is_cold_node(page)))
1028 continue;
1029 if (step == 2 && (!IS_DNODE(page) ||
1030 !is_cold_node(page)))
1031 continue;
1032
1033 /*
1034 * If an fsync mode,
1035 * we should not skip writing node pages.
1036 */
1037 if (ino && ino_of_node(page) == ino)
1038 lock_page(page);
1039 else if (!trylock_page(page))
1040 continue;
1041
1042 if (unlikely(page->mapping != mapping)) {
1043continue_unlock:
1044 unlock_page(page);
1045 continue;
1046 }
1047 if (ino && ino_of_node(page) != ino)
1048 goto continue_unlock;
1049
1050 if (!PageDirty(page)) {
1051 /* someone wrote it for us */
1052 goto continue_unlock;
1053 }
1054
1055 if (!clear_page_dirty_for_io(page))
1056 goto continue_unlock;
1057
1058 /* called by fsync() */
1059 if (ino && IS_DNODE(page)) {
1060 int mark = !is_checkpointed_node(sbi, ino);
1061 set_fsync_mark(page, 1);
1062 if (IS_INODE(page))
1063 set_dentry_mark(page, mark);
1064 nwritten++;
1065 } else {
1066 set_fsync_mark(page, 0);
1067 set_dentry_mark(page, 0);
1068 }
1069 mapping->a_ops->writepage(page, wbc);
1070 wrote++;
1071
1072 if (--wbc->nr_to_write == 0)
1073 break;
1074 }
1075 pagevec_release(&pvec);
1076 cond_resched();
1077
1078 if (wbc->nr_to_write == 0) {
1079 step = 2;
1080 break;
1081 }
1082 }
1083
1084 if (step < 2) {
1085 step++;
1086 goto next_step;
1087 }
1088
1089 if (wrote)
1090 f2fs_submit_bio(sbi, NODE, wbc->sync_mode == WB_SYNC_ALL);
1091
1092 return nwritten;
1093}
1094
1095static int f2fs_write_node_page(struct page *page,
1096 struct writeback_control *wbc)
1097{
1098 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
1099 nid_t nid;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001100 block_t new_addr;
1101 struct node_info ni;
1102
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001103 wait_on_page_writeback(page);
1104
1105 mutex_lock_op(sbi, NODE_WRITE);
1106
1107 /* get old block addr of this node page */
1108 nid = nid_of_node(page);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001109 BUG_ON(page->index != nid);
1110
1111 get_node_info(sbi, nid, &ni);
1112
1113 /* This page is already truncated */
1114 if (ni.blk_addr == NULL_ADDR)
Jaegeuk Kim12faafe2013-03-13 17:45:15 +09001115 goto out;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001116
Jaegeuk Kim08d80582013-03-13 17:49:22 +09001117 if (wbc->for_reclaim) {
1118 dec_page_count(sbi, F2FS_DIRTY_NODES);
1119 wbc->pages_skipped++;
1120 set_page_dirty(page);
1121 mutex_unlock_op(sbi, NODE_WRITE);
1122 return AOP_WRITEPAGE_ACTIVATE;
1123 }
1124
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001125 set_page_writeback(page);
1126
1127 /* insert node offset */
1128 write_node_page(sbi, page, nid, ni.blk_addr, &new_addr);
1129 set_node_addr(sbi, &ni, new_addr);
Jaegeuk Kim12faafe2013-03-13 17:45:15 +09001130out:
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001131 dec_page_count(sbi, F2FS_DIRTY_NODES);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001132 mutex_unlock_op(sbi, NODE_WRITE);
1133 unlock_page(page);
1134 return 0;
1135}
1136
Jaegeuk Kima7fdffb2013-01-18 14:54:13 +09001137/*
1138 * It is very important to gather dirty pages and write at once, so that we can
1139 * submit a big bio without interfering other data writes.
1140 * Be default, 512 pages (2MB), a segment size, is quite reasonable.
1141 */
1142#define COLLECT_DIRTY_NODES 512
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001143static int f2fs_write_node_pages(struct address_space *mapping,
1144 struct writeback_control *wbc)
1145{
1146 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1147 struct block_device *bdev = sbi->sb->s_bdev;
1148 long nr_to_write = wbc->nr_to_write;
1149
Jaegeuk Kima7fdffb2013-01-18 14:54:13 +09001150 /* First check balancing cached NAT entries */
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001151 if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
Jaegeuk Kimb7473752013-04-01 08:32:21 +09001152 f2fs_sync_fs(sbi->sb, true);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001153 return 0;
1154 }
1155
Jaegeuk Kima7fdffb2013-01-18 14:54:13 +09001156 /* collect a number of dirty node pages and write together */
1157 if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES)
1158 return 0;
1159
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001160 /* if mounting is failed, skip writing node pages */
1161 wbc->nr_to_write = bio_get_nr_vecs(bdev);
1162 sync_node_pages(sbi, 0, wbc);
1163 wbc->nr_to_write = nr_to_write -
1164 (bio_get_nr_vecs(bdev) - wbc->nr_to_write);
1165 return 0;
1166}
1167
1168static int f2fs_set_node_page_dirty(struct page *page)
1169{
1170 struct address_space *mapping = page->mapping;
1171 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1172
1173 SetPageUptodate(page);
1174 if (!PageDirty(page)) {
1175 __set_page_dirty_nobuffers(page);
1176 inc_page_count(sbi, F2FS_DIRTY_NODES);
1177 SetPagePrivate(page);
1178 return 1;
1179 }
1180 return 0;
1181}
1182
1183static void f2fs_invalidate_node_page(struct page *page, unsigned long offset)
1184{
1185 struct inode *inode = page->mapping->host;
1186 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1187 if (PageDirty(page))
1188 dec_page_count(sbi, F2FS_DIRTY_NODES);
1189 ClearPagePrivate(page);
1190}
1191
1192static int f2fs_release_node_page(struct page *page, gfp_t wait)
1193{
1194 ClearPagePrivate(page);
Jaegeuk Kimc3850aa2013-03-14 09:24:32 +09001195 return 1;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001196}
1197
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001198/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001199 * Structure of the f2fs node operations
1200 */
1201const struct address_space_operations f2fs_node_aops = {
1202 .writepage = f2fs_write_node_page,
1203 .writepages = f2fs_write_node_pages,
1204 .set_page_dirty = f2fs_set_node_page_dirty,
1205 .invalidatepage = f2fs_invalidate_node_page,
1206 .releasepage = f2fs_release_node_page,
1207};
1208
1209static struct free_nid *__lookup_free_nid_list(nid_t n, struct list_head *head)
1210{
1211 struct list_head *this;
Namjae Jeon3aa770a2013-03-02 12:40:50 +09001212 struct free_nid *i;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001213 list_for_each(this, head) {
1214 i = list_entry(this, struct free_nid, list);
1215 if (i->nid == n)
Namjae Jeon3aa770a2013-03-02 12:40:50 +09001216 return i;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001217 }
Namjae Jeon3aa770a2013-03-02 12:40:50 +09001218 return NULL;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001219}
1220
1221static void __del_from_free_nid_list(struct free_nid *i)
1222{
1223 list_del(&i->list);
1224 kmem_cache_free(free_nid_slab, i);
1225}
1226
1227static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1228{
1229 struct free_nid *i;
1230
1231 if (nm_i->fcnt > 2 * MAX_FREE_NIDS)
1232 return 0;
1233retry:
1234 i = kmem_cache_alloc(free_nid_slab, GFP_NOFS);
1235 if (!i) {
1236 cond_resched();
1237 goto retry;
1238 }
1239 i->nid = nid;
1240 i->state = NID_NEW;
1241
1242 spin_lock(&nm_i->free_nid_list_lock);
1243 if (__lookup_free_nid_list(nid, &nm_i->free_nid_list)) {
1244 spin_unlock(&nm_i->free_nid_list_lock);
1245 kmem_cache_free(free_nid_slab, i);
1246 return 0;
1247 }
1248 list_add_tail(&i->list, &nm_i->free_nid_list);
1249 nm_i->fcnt++;
1250 spin_unlock(&nm_i->free_nid_list_lock);
1251 return 1;
1252}
1253
1254static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1255{
1256 struct free_nid *i;
1257 spin_lock(&nm_i->free_nid_list_lock);
1258 i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1259 if (i && i->state == NID_NEW) {
1260 __del_from_free_nid_list(i);
1261 nm_i->fcnt--;
1262 }
1263 spin_unlock(&nm_i->free_nid_list_lock);
1264}
1265
1266static int scan_nat_page(struct f2fs_nm_info *nm_i,
1267 struct page *nat_page, nid_t start_nid)
1268{
1269 struct f2fs_nat_block *nat_blk = page_address(nat_page);
1270 block_t blk_addr;
1271 int fcnt = 0;
1272 int i;
1273
1274 /* 0 nid should not be used */
1275 if (start_nid == 0)
1276 ++start_nid;
1277
1278 i = start_nid % NAT_ENTRY_PER_BLOCK;
1279
1280 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
Jaegeuk Kim04431c42013-03-16 08:34:37 +09001281 if (start_nid >= nm_i->max_nid)
1282 break;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001283 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
1284 BUG_ON(blk_addr == NEW_ADDR);
1285 if (blk_addr == NULL_ADDR)
1286 fcnt += add_free_nid(nm_i, start_nid);
1287 }
1288 return fcnt;
1289}
1290
1291static void build_free_nids(struct f2fs_sb_info *sbi)
1292{
1293 struct free_nid *fnid, *next_fnid;
1294 struct f2fs_nm_info *nm_i = NM_I(sbi);
1295 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1296 struct f2fs_summary_block *sum = curseg->sum_blk;
1297 nid_t nid = 0;
1298 bool is_cycled = false;
1299 int fcnt = 0;
1300 int i;
1301
1302 nid = nm_i->next_scan_nid;
1303 nm_i->init_scan_nid = nid;
1304
1305 ra_nat_pages(sbi, nid);
1306
1307 while (1) {
1308 struct page *page = get_current_nat_page(sbi, nid);
1309
1310 fcnt += scan_nat_page(nm_i, page, nid);
1311 f2fs_put_page(page, 1);
1312
1313 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
1314
1315 if (nid >= nm_i->max_nid) {
1316 nid = 0;
1317 is_cycled = true;
1318 }
1319 if (fcnt > MAX_FREE_NIDS)
1320 break;
1321 if (is_cycled && nm_i->init_scan_nid <= nid)
1322 break;
1323 }
1324
Jaegeuk Kim48cb76c2013-03-14 08:49:58 +09001325 /* go to the next nat page in order to reuse free nids first */
1326 nm_i->next_scan_nid = nm_i->init_scan_nid + NAT_ENTRY_PER_BLOCK;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001327
1328 /* find free nids from current sum_pages */
1329 mutex_lock(&curseg->curseg_mutex);
1330 for (i = 0; i < nats_in_cursum(sum); i++) {
1331 block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
1332 nid = le32_to_cpu(nid_in_journal(sum, i));
1333 if (addr == NULL_ADDR)
1334 add_free_nid(nm_i, nid);
1335 else
1336 remove_free_nid(nm_i, nid);
1337 }
1338 mutex_unlock(&curseg->curseg_mutex);
1339
1340 /* remove the free nids from current allocated nids */
1341 list_for_each_entry_safe(fnid, next_fnid, &nm_i->free_nid_list, list) {
1342 struct nat_entry *ne;
1343
1344 read_lock(&nm_i->nat_tree_lock);
1345 ne = __lookup_nat_cache(nm_i, fnid->nid);
1346 if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
1347 remove_free_nid(nm_i, fnid->nid);
1348 read_unlock(&nm_i->nat_tree_lock);
1349 }
1350}
1351
1352/*
1353 * If this function returns success, caller can obtain a new nid
1354 * from second parameter of this function.
1355 * The returned nid could be used ino as well as nid when inode is created.
1356 */
1357bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
1358{
1359 struct f2fs_nm_info *nm_i = NM_I(sbi);
1360 struct free_nid *i = NULL;
1361 struct list_head *this;
1362retry:
1363 mutex_lock(&nm_i->build_lock);
1364 if (!nm_i->fcnt) {
1365 /* scan NAT in order to build free nid list */
1366 build_free_nids(sbi);
1367 if (!nm_i->fcnt) {
1368 mutex_unlock(&nm_i->build_lock);
1369 return false;
1370 }
1371 }
1372 mutex_unlock(&nm_i->build_lock);
1373
1374 /*
1375 * We check fcnt again since previous check is racy as
1376 * we didn't hold free_nid_list_lock. So other thread
1377 * could consume all of free nids.
1378 */
1379 spin_lock(&nm_i->free_nid_list_lock);
1380 if (!nm_i->fcnt) {
1381 spin_unlock(&nm_i->free_nid_list_lock);
1382 goto retry;
1383 }
1384
1385 BUG_ON(list_empty(&nm_i->free_nid_list));
1386 list_for_each(this, &nm_i->free_nid_list) {
1387 i = list_entry(this, struct free_nid, list);
1388 if (i->state == NID_NEW)
1389 break;
1390 }
1391
1392 BUG_ON(i->state != NID_NEW);
1393 *nid = i->nid;
1394 i->state = NID_ALLOC;
1395 nm_i->fcnt--;
1396 spin_unlock(&nm_i->free_nid_list_lock);
1397 return true;
1398}
1399
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001400/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001401 * alloc_nid() should be called prior to this function.
1402 */
1403void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
1404{
1405 struct f2fs_nm_info *nm_i = NM_I(sbi);
1406 struct free_nid *i;
1407
1408 spin_lock(&nm_i->free_nid_list_lock);
1409 i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1410 if (i) {
1411 BUG_ON(i->state != NID_ALLOC);
1412 __del_from_free_nid_list(i);
1413 }
1414 spin_unlock(&nm_i->free_nid_list_lock);
1415}
1416
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001417/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001418 * alloc_nid() should be called prior to this function.
1419 */
1420void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
1421{
1422 alloc_nid_done(sbi, nid);
1423 add_free_nid(NM_I(sbi), nid);
1424}
1425
1426void recover_node_page(struct f2fs_sb_info *sbi, struct page *page,
1427 struct f2fs_summary *sum, struct node_info *ni,
1428 block_t new_blkaddr)
1429{
1430 rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr);
1431 set_node_addr(sbi, ni, new_blkaddr);
1432 clear_node_page_dirty(page);
1433}
1434
1435int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
1436{
1437 struct address_space *mapping = sbi->node_inode->i_mapping;
1438 struct f2fs_node *src, *dst;
1439 nid_t ino = ino_of_node(page);
1440 struct node_info old_ni, new_ni;
1441 struct page *ipage;
1442
1443 ipage = grab_cache_page(mapping, ino);
1444 if (!ipage)
1445 return -ENOMEM;
1446
1447 /* Should not use this inode from free nid list */
1448 remove_free_nid(NM_I(sbi), ino);
1449
1450 get_node_info(sbi, ino, &old_ni);
1451 SetPageUptodate(ipage);
1452 fill_node_footer(ipage, ino, ino, 0, true);
1453
1454 src = (struct f2fs_node *)page_address(page);
1455 dst = (struct f2fs_node *)page_address(ipage);
1456
1457 memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i);
1458 dst->i.i_size = 0;
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001459 dst->i.i_blocks = cpu_to_le64(1);
1460 dst->i.i_links = cpu_to_le32(1);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001461 dst->i.i_xattr_nid = 0;
1462
1463 new_ni = old_ni;
1464 new_ni.ino = ino;
1465
1466 set_node_addr(sbi, &new_ni, NEW_ADDR);
1467 inc_valid_inode_count(sbi);
1468
1469 f2fs_put_page(ipage, 1);
1470 return 0;
1471}
1472
1473int restore_node_summary(struct f2fs_sb_info *sbi,
1474 unsigned int segno, struct f2fs_summary_block *sum)
1475{
1476 struct f2fs_node *rn;
1477 struct f2fs_summary *sum_entry;
1478 struct page *page;
1479 block_t addr;
1480 int i, last_offset;
1481
1482 /* alloc temporal page for read node */
1483 page = alloc_page(GFP_NOFS | __GFP_ZERO);
1484 if (IS_ERR(page))
1485 return PTR_ERR(page);
1486 lock_page(page);
1487
1488 /* scan the node segment */
1489 last_offset = sbi->blocks_per_seg;
1490 addr = START_BLOCK(sbi, segno);
1491 sum_entry = &sum->entries[0];
1492
1493 for (i = 0; i < last_offset; i++, sum_entry++) {
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001494 /*
1495 * In order to read next node page,
1496 * we must clear PageUptodate flag.
1497 */
1498 ClearPageUptodate(page);
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001499
1500 if (f2fs_readpage(sbi, page, addr, READ_SYNC))
1501 goto out;
1502
1503 lock_page(page);
1504 rn = (struct f2fs_node *)page_address(page);
1505 sum_entry->nid = rn->footer.nid;
1506 sum_entry->version = 0;
1507 sum_entry->ofs_in_node = 0;
1508 addr++;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001509 }
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001510 unlock_page(page);
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001511out:
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001512 __free_pages(page, 0);
1513 return 0;
1514}
1515
1516static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
1517{
1518 struct f2fs_nm_info *nm_i = NM_I(sbi);
1519 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1520 struct f2fs_summary_block *sum = curseg->sum_blk;
1521 int i;
1522
1523 mutex_lock(&curseg->curseg_mutex);
1524
1525 if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) {
1526 mutex_unlock(&curseg->curseg_mutex);
1527 return false;
1528 }
1529
1530 for (i = 0; i < nats_in_cursum(sum); i++) {
1531 struct nat_entry *ne;
1532 struct f2fs_nat_entry raw_ne;
1533 nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
1534
1535 raw_ne = nat_in_journal(sum, i);
1536retry:
1537 write_lock(&nm_i->nat_tree_lock);
1538 ne = __lookup_nat_cache(nm_i, nid);
1539 if (ne) {
1540 __set_nat_cache_dirty(nm_i, ne);
1541 write_unlock(&nm_i->nat_tree_lock);
1542 continue;
1543 }
1544 ne = grab_nat_entry(nm_i, nid);
1545 if (!ne) {
1546 write_unlock(&nm_i->nat_tree_lock);
1547 goto retry;
1548 }
1549 nat_set_blkaddr(ne, le32_to_cpu(raw_ne.block_addr));
1550 nat_set_ino(ne, le32_to_cpu(raw_ne.ino));
1551 nat_set_version(ne, raw_ne.version);
1552 __set_nat_cache_dirty(nm_i, ne);
1553 write_unlock(&nm_i->nat_tree_lock);
1554 }
1555 update_nats_in_cursum(sum, -i);
1556 mutex_unlock(&curseg->curseg_mutex);
1557 return true;
1558}
1559
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001560/*
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001561 * This function is called during the checkpointing process.
1562 */
1563void flush_nat_entries(struct f2fs_sb_info *sbi)
1564{
1565 struct f2fs_nm_info *nm_i = NM_I(sbi);
1566 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1567 struct f2fs_summary_block *sum = curseg->sum_blk;
1568 struct list_head *cur, *n;
1569 struct page *page = NULL;
1570 struct f2fs_nat_block *nat_blk = NULL;
1571 nid_t start_nid = 0, end_nid = 0;
1572 bool flushed;
1573
1574 flushed = flush_nats_in_journal(sbi);
1575
1576 if (!flushed)
1577 mutex_lock(&curseg->curseg_mutex);
1578
1579 /* 1) flush dirty nat caches */
1580 list_for_each_safe(cur, n, &nm_i->dirty_nat_entries) {
1581 struct nat_entry *ne;
1582 nid_t nid;
1583 struct f2fs_nat_entry raw_ne;
1584 int offset = -1;
Jaegeuk Kim2b506382012-12-26 14:39:50 +09001585 block_t new_blkaddr;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001586
1587 ne = list_entry(cur, struct nat_entry, list);
1588 nid = nat_get_nid(ne);
1589
1590 if (nat_get_blkaddr(ne) == NEW_ADDR)
1591 continue;
1592 if (flushed)
1593 goto to_nat_page;
1594
1595 /* if there is room for nat enries in curseg->sumpage */
1596 offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1);
1597 if (offset >= 0) {
1598 raw_ne = nat_in_journal(sum, offset);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001599 goto flush_now;
1600 }
1601to_nat_page:
1602 if (!page || (start_nid > nid || nid > end_nid)) {
1603 if (page) {
1604 f2fs_put_page(page, 1);
1605 page = NULL;
1606 }
1607 start_nid = START_NID(nid);
1608 end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1;
1609
1610 /*
1611 * get nat block with dirty flag, increased reference
1612 * count, mapped and lock
1613 */
1614 page = get_next_nat_page(sbi, start_nid);
1615 nat_blk = page_address(page);
1616 }
1617
1618 BUG_ON(!nat_blk);
1619 raw_ne = nat_blk->entries[nid - start_nid];
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001620flush_now:
1621 new_blkaddr = nat_get_blkaddr(ne);
1622
1623 raw_ne.ino = cpu_to_le32(nat_get_ino(ne));
1624 raw_ne.block_addr = cpu_to_le32(new_blkaddr);
1625 raw_ne.version = nat_get_version(ne);
1626
1627 if (offset < 0) {
1628 nat_blk->entries[nid - start_nid] = raw_ne;
1629 } else {
1630 nat_in_journal(sum, offset) = raw_ne;
1631 nid_in_journal(sum, offset) = cpu_to_le32(nid);
1632 }
1633
Jaegeuk Kimfa372412013-03-21 12:53:19 +09001634 if (nat_get_blkaddr(ne) == NULL_ADDR &&
1635 !add_free_nid(NM_I(sbi), nid)) {
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001636 write_lock(&nm_i->nat_tree_lock);
1637 __del_from_nat_cache(nm_i, ne);
1638 write_unlock(&nm_i->nat_tree_lock);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001639 } else {
1640 write_lock(&nm_i->nat_tree_lock);
1641 __clear_nat_cache_dirty(nm_i, ne);
1642 ne->checkpointed = true;
1643 write_unlock(&nm_i->nat_tree_lock);
1644 }
1645 }
1646 if (!flushed)
1647 mutex_unlock(&curseg->curseg_mutex);
1648 f2fs_put_page(page, 1);
1649
1650 /* 2) shrink nat caches if necessary */
1651 try_to_free_nats(sbi, nm_i->nat_cnt - NM_WOUT_THRESHOLD);
1652}
1653
1654static int init_node_manager(struct f2fs_sb_info *sbi)
1655{
1656 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
1657 struct f2fs_nm_info *nm_i = NM_I(sbi);
1658 unsigned char *version_bitmap;
1659 unsigned int nat_segs, nat_blocks;
1660
1661 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
1662
1663 /* segment_count_nat includes pair segment so divide to 2. */
1664 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
1665 nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
1666 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
1667 nm_i->fcnt = 0;
1668 nm_i->nat_cnt = 0;
1669
1670 INIT_LIST_HEAD(&nm_i->free_nid_list);
1671 INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC);
1672 INIT_LIST_HEAD(&nm_i->nat_entries);
1673 INIT_LIST_HEAD(&nm_i->dirty_nat_entries);
1674
1675 mutex_init(&nm_i->build_lock);
1676 spin_lock_init(&nm_i->free_nid_list_lock);
1677 rwlock_init(&nm_i->nat_tree_lock);
1678
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001679 nm_i->init_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
1680 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
Alexandru Gheorghiu79b57932013-03-28 02:24:53 +02001681 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001682 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
1683 if (!version_bitmap)
1684 return -EFAULT;
1685
Alexandru Gheorghiu79b57932013-03-28 02:24:53 +02001686 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
1687 GFP_KERNEL);
1688 if (!nm_i->nat_bitmap)
1689 return -ENOMEM;
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001690 return 0;
1691}
1692
1693int build_node_manager(struct f2fs_sb_info *sbi)
1694{
1695 int err;
1696
1697 sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
1698 if (!sbi->nm_info)
1699 return -ENOMEM;
1700
1701 err = init_node_manager(sbi);
1702 if (err)
1703 return err;
1704
1705 build_free_nids(sbi);
1706 return 0;
1707}
1708
1709void destroy_node_manager(struct f2fs_sb_info *sbi)
1710{
1711 struct f2fs_nm_info *nm_i = NM_I(sbi);
1712 struct free_nid *i, *next_i;
1713 struct nat_entry *natvec[NATVEC_SIZE];
1714 nid_t nid = 0;
1715 unsigned int found;
1716
1717 if (!nm_i)
1718 return;
1719
1720 /* destroy free nid list */
1721 spin_lock(&nm_i->free_nid_list_lock);
1722 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
1723 BUG_ON(i->state == NID_ALLOC);
1724 __del_from_free_nid_list(i);
1725 nm_i->fcnt--;
1726 }
1727 BUG_ON(nm_i->fcnt);
1728 spin_unlock(&nm_i->free_nid_list_lock);
1729
1730 /* destroy nat cache */
1731 write_lock(&nm_i->nat_tree_lock);
1732 while ((found = __gang_lookup_nat_cache(nm_i,
1733 nid, NATVEC_SIZE, natvec))) {
1734 unsigned idx;
1735 for (idx = 0; idx < found; idx++) {
1736 struct nat_entry *e = natvec[idx];
1737 nid = nat_get_nid(e) + 1;
1738 __del_from_nat_cache(nm_i, e);
1739 }
1740 }
1741 BUG_ON(nm_i->nat_cnt);
1742 write_unlock(&nm_i->nat_tree_lock);
1743
1744 kfree(nm_i->nat_bitmap);
1745 sbi->nm_info = NULL;
1746 kfree(nm_i);
1747}
1748
Namjae Jeon6e6093a2013-01-17 00:08:30 +09001749int __init create_node_manager_caches(void)
Jaegeuk Kime05df3b2012-11-02 17:08:50 +09001750{
1751 nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
1752 sizeof(struct nat_entry), NULL);
1753 if (!nat_entry_slab)
1754 return -ENOMEM;
1755
1756 free_nid_slab = f2fs_kmem_cache_create("free_nid",
1757 sizeof(struct free_nid), NULL);
1758 if (!free_nid_slab) {
1759 kmem_cache_destroy(nat_entry_slab);
1760 return -ENOMEM;
1761 }
1762 return 0;
1763}
1764
1765void destroy_node_manager_caches(void)
1766{
1767 kmem_cache_destroy(free_nid_slab);
1768 kmem_cache_destroy(nat_entry_slab);
1769}