blob: d3b34d05211f9b246ff10cfd01e930457a7e5e7a [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kim127e6702012-11-02 17:08:18 +09002 * fs/f2fs/checkpoint.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/bio.h>
13#include <linux/mpage.h>
14#include <linux/writeback.h>
15#include <linux/blkdev.h>
16#include <linux/f2fs_fs.h>
17#include <linux/pagevec.h>
18#include <linux/swap.h>
19
20#include "f2fs.h"
21#include "node.h"
22#include "segment.h"
23
24static struct kmem_cache *orphan_entry_slab;
25static struct kmem_cache *inode_entry_slab;
26
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +090027/*
Jaegeuk Kim127e6702012-11-02 17:08:18 +090028 * We guarantee no failure on the returned page.
29 */
30struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
31{
32 struct address_space *mapping = sbi->meta_inode->i_mapping;
33 struct page *page = NULL;
34repeat:
35 page = grab_cache_page(mapping, index);
36 if (!page) {
37 cond_resched();
38 goto repeat;
39 }
40
41 /* We wait writeback only inside grab_meta_page() */
42 wait_on_page_writeback(page);
43 SetPageUptodate(page);
44 return page;
45}
46
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +090047/*
Jaegeuk Kim127e6702012-11-02 17:08:18 +090048 * We guarantee no failure on the returned page.
49 */
50struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
51{
52 struct address_space *mapping = sbi->meta_inode->i_mapping;
53 struct page *page;
54repeat:
55 page = grab_cache_page(mapping, index);
56 if (!page) {
57 cond_resched();
58 goto repeat;
59 }
60 if (f2fs_readpage(sbi, page, index, READ_SYNC)) {
61 f2fs_put_page(page, 1);
62 goto repeat;
63 }
64 mark_page_accessed(page);
65
66 /* We do not allow returning an errorneous page */
67 return page;
68}
69
70static int f2fs_write_meta_page(struct page *page,
71 struct writeback_control *wbc)
72{
73 struct inode *inode = page->mapping->host;
74 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
Jaegeuk Kim577e3492013-01-24 19:56:11 +090075
76 /* Should not write any meta pages, if any IO error was occurred */
77 if (wbc->for_reclaim ||
78 is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG)) {
79 dec_page_count(sbi, F2FS_DIRTY_META);
80 wbc->pages_skipped++;
81 set_page_dirty(page);
82 return AOP_WRITEPAGE_ACTIVATE;
83 }
Jaegeuk Kim127e6702012-11-02 17:08:18 +090084
85 wait_on_page_writeback(page);
86
Jaegeuk Kim577e3492013-01-24 19:56:11 +090087 write_meta_page(sbi, page);
Jaegeuk Kim127e6702012-11-02 17:08:18 +090088 dec_page_count(sbi, F2FS_DIRTY_META);
Jaegeuk Kim577e3492013-01-24 19:56:11 +090089 unlock_page(page);
90 return 0;
Jaegeuk Kim127e6702012-11-02 17:08:18 +090091}
92
93static int f2fs_write_meta_pages(struct address_space *mapping,
94 struct writeback_control *wbc)
95{
96 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
97 struct block_device *bdev = sbi->sb->s_bdev;
98 long written;
99
100 if (wbc->for_kupdate)
101 return 0;
102
103 if (get_pages(sbi, F2FS_DIRTY_META) == 0)
104 return 0;
105
106 /* if mounting is failed, skip writing node pages */
107 mutex_lock(&sbi->cp_mutex);
108 written = sync_meta_pages(sbi, META, bio_get_nr_vecs(bdev));
109 mutex_unlock(&sbi->cp_mutex);
110 wbc->nr_to_write -= written;
111 return 0;
112}
113
114long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
115 long nr_to_write)
116{
117 struct address_space *mapping = sbi->meta_inode->i_mapping;
118 pgoff_t index = 0, end = LONG_MAX;
119 struct pagevec pvec;
120 long nwritten = 0;
121 struct writeback_control wbc = {
122 .for_reclaim = 0,
123 };
124
125 pagevec_init(&pvec, 0);
126
127 while (index <= end) {
128 int i, nr_pages;
129 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
130 PAGECACHE_TAG_DIRTY,
131 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
132 if (nr_pages == 0)
133 break;
134
135 for (i = 0; i < nr_pages; i++) {
136 struct page *page = pvec.pages[i];
137 lock_page(page);
138 BUG_ON(page->mapping != mapping);
139 BUG_ON(!PageDirty(page));
140 clear_page_dirty_for_io(page);
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900141 if (f2fs_write_meta_page(page, &wbc)) {
142 unlock_page(page);
143 break;
144 }
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900145 if (nwritten++ >= nr_to_write)
146 break;
147 }
148 pagevec_release(&pvec);
149 cond_resched();
150 }
151
152 if (nwritten)
153 f2fs_submit_bio(sbi, type, nr_to_write == LONG_MAX);
154
155 return nwritten;
156}
157
158static int f2fs_set_meta_page_dirty(struct page *page)
159{
160 struct address_space *mapping = page->mapping;
161 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
162
163 SetPageUptodate(page);
164 if (!PageDirty(page)) {
165 __set_page_dirty_nobuffers(page);
166 inc_page_count(sbi, F2FS_DIRTY_META);
167 F2FS_SET_SB_DIRT(sbi);
168 return 1;
169 }
170 return 0;
171}
172
173const struct address_space_operations f2fs_meta_aops = {
174 .writepage = f2fs_write_meta_page,
175 .writepages = f2fs_write_meta_pages,
176 .set_page_dirty = f2fs_set_meta_page_dirty,
177};
178
179int check_orphan_space(struct f2fs_sb_info *sbi)
180{
181 unsigned int max_orphans;
182 int err = 0;
183
184 /*
185 * considering 512 blocks in a segment 5 blocks are needed for cp
186 * and log segment summaries. Remaining blocks are used to keep
187 * orphan entries with the limitation one reserved segment
188 * for cp pack we can have max 1020*507 orphan entries
189 */
190 max_orphans = (sbi->blocks_per_seg - 5) * F2FS_ORPHANS_PER_BLOCK;
191 mutex_lock(&sbi->orphan_inode_mutex);
192 if (sbi->n_orphans >= max_orphans)
193 err = -ENOSPC;
194 mutex_unlock(&sbi->orphan_inode_mutex);
195 return err;
196}
197
198void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
199{
200 struct list_head *head, *this;
201 struct orphan_inode_entry *new = NULL, *orphan = NULL;
202
203 mutex_lock(&sbi->orphan_inode_mutex);
204 head = &sbi->orphan_inode_list;
205 list_for_each(this, head) {
206 orphan = list_entry(this, struct orphan_inode_entry, list);
207 if (orphan->ino == ino)
208 goto out;
209 if (orphan->ino > ino)
210 break;
211 orphan = NULL;
212 }
213retry:
214 new = kmem_cache_alloc(orphan_entry_slab, GFP_ATOMIC);
215 if (!new) {
216 cond_resched();
217 goto retry;
218 }
219 new->ino = ino;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900220
221 /* add new_oentry into list which is sorted by inode number */
majianpenga2617dc2013-01-29 16:19:02 +0800222 if (orphan)
223 list_add(&new->list, this->prev);
224 else
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900225 list_add_tail(&new->list, head);
majianpenga2617dc2013-01-29 16:19:02 +0800226
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900227 sbi->n_orphans++;
228out:
229 mutex_unlock(&sbi->orphan_inode_mutex);
230}
231
232void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
233{
234 struct list_head *this, *next, *head;
235 struct orphan_inode_entry *orphan;
236
237 mutex_lock(&sbi->orphan_inode_mutex);
238 head = &sbi->orphan_inode_list;
239 list_for_each_safe(this, next, head) {
240 orphan = list_entry(this, struct orphan_inode_entry, list);
241 if (orphan->ino == ino) {
242 list_del(&orphan->list);
243 kmem_cache_free(orphan_entry_slab, orphan);
244 sbi->n_orphans--;
245 break;
246 }
247 }
248 mutex_unlock(&sbi->orphan_inode_mutex);
249}
250
251static void recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
252{
253 struct inode *inode = f2fs_iget(sbi->sb, ino);
254 BUG_ON(IS_ERR(inode));
255 clear_nlink(inode);
256
257 /* truncate all the data during iput */
258 iput(inode);
259}
260
261int recover_orphan_inodes(struct f2fs_sb_info *sbi)
262{
263 block_t start_blk, orphan_blkaddr, i, j;
264
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900265 if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900266 return 0;
267
268 sbi->por_doing = 1;
269 start_blk = __start_cp_addr(sbi) + 1;
270 orphan_blkaddr = __start_sum_addr(sbi) - 1;
271
272 for (i = 0; i < orphan_blkaddr; i++) {
273 struct page *page = get_meta_page(sbi, start_blk + i);
274 struct f2fs_orphan_block *orphan_blk;
275
276 orphan_blk = (struct f2fs_orphan_block *)page_address(page);
277 for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
278 nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
279 recover_orphan_inode(sbi, ino);
280 }
281 f2fs_put_page(page, 1);
282 }
283 /* clear Orphan Flag */
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900284 clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900285 sbi->por_doing = 0;
286 return 0;
287}
288
289static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
290{
291 struct list_head *head, *this, *next;
292 struct f2fs_orphan_block *orphan_blk = NULL;
293 struct page *page = NULL;
294 unsigned int nentries = 0;
295 unsigned short index = 1;
296 unsigned short orphan_blocks;
297
298 orphan_blocks = (unsigned short)((sbi->n_orphans +
299 (F2FS_ORPHANS_PER_BLOCK - 1)) / F2FS_ORPHANS_PER_BLOCK);
300
301 mutex_lock(&sbi->orphan_inode_mutex);
302 head = &sbi->orphan_inode_list;
303
304 /* loop for each orphan inode entry and write them in Jornal block */
305 list_for_each_safe(this, next, head) {
306 struct orphan_inode_entry *orphan;
307
308 orphan = list_entry(this, struct orphan_inode_entry, list);
309
310 if (nentries == F2FS_ORPHANS_PER_BLOCK) {
311 /*
312 * an orphan block is full of 1020 entries,
313 * then we need to flush current orphan blocks
314 * and bring another one in memory
315 */
316 orphan_blk->blk_addr = cpu_to_le16(index);
317 orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
318 orphan_blk->entry_count = cpu_to_le32(nentries);
319 set_page_dirty(page);
320 f2fs_put_page(page, 1);
321 index++;
322 start_blk++;
323 nentries = 0;
324 page = NULL;
325 }
326 if (page)
327 goto page_exist;
328
329 page = grab_meta_page(sbi, start_blk);
330 orphan_blk = (struct f2fs_orphan_block *)page_address(page);
331 memset(orphan_blk, 0, sizeof(*orphan_blk));
332page_exist:
333 orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino);
334 }
335 if (!page)
336 goto end;
337
338 orphan_blk->blk_addr = cpu_to_le16(index);
339 orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
340 orphan_blk->entry_count = cpu_to_le32(nentries);
341 set_page_dirty(page);
342 f2fs_put_page(page, 1);
343end:
344 mutex_unlock(&sbi->orphan_inode_mutex);
345}
346
347static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
348 block_t cp_addr, unsigned long long *version)
349{
350 struct page *cp_page_1, *cp_page_2 = NULL;
351 unsigned long blk_size = sbi->blocksize;
352 struct f2fs_checkpoint *cp_block;
353 unsigned long long cur_version = 0, pre_version = 0;
354 unsigned int crc = 0;
355 size_t crc_offset;
356
357 /* Read the 1st cp block in this CP pack */
358 cp_page_1 = get_meta_page(sbi, cp_addr);
359
360 /* get the version number */
361 cp_block = (struct f2fs_checkpoint *)page_address(cp_page_1);
362 crc_offset = le32_to_cpu(cp_block->checksum_offset);
363 if (crc_offset >= blk_size)
364 goto invalid_cp1;
365
366 crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset);
367 if (!f2fs_crc_valid(crc, cp_block, crc_offset))
368 goto invalid_cp1;
369
370 pre_version = le64_to_cpu(cp_block->checkpoint_ver);
371
372 /* Read the 2nd cp block in this CP pack */
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900373 cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900374 cp_page_2 = get_meta_page(sbi, cp_addr);
375
376 cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2);
377 crc_offset = le32_to_cpu(cp_block->checksum_offset);
378 if (crc_offset >= blk_size)
379 goto invalid_cp2;
380
381 crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset);
382 if (!f2fs_crc_valid(crc, cp_block, crc_offset))
383 goto invalid_cp2;
384
385 cur_version = le64_to_cpu(cp_block->checkpoint_ver);
386
387 if (cur_version == pre_version) {
388 *version = cur_version;
389 f2fs_put_page(cp_page_2, 1);
390 return cp_page_1;
391 }
392invalid_cp2:
393 f2fs_put_page(cp_page_2, 1);
394invalid_cp1:
395 f2fs_put_page(cp_page_1, 1);
396 return NULL;
397}
398
399int get_valid_checkpoint(struct f2fs_sb_info *sbi)
400{
401 struct f2fs_checkpoint *cp_block;
402 struct f2fs_super_block *fsb = sbi->raw_super;
403 struct page *cp1, *cp2, *cur_page;
404 unsigned long blk_size = sbi->blocksize;
405 unsigned long long cp1_version = 0, cp2_version = 0;
406 unsigned long long cp_start_blk_no;
407
408 sbi->ckpt = kzalloc(blk_size, GFP_KERNEL);
409 if (!sbi->ckpt)
410 return -ENOMEM;
411 /*
412 * Finding out valid cp block involves read both
413 * sets( cp pack1 and cp pack 2)
414 */
415 cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr);
416 cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
417
418 /* The second checkpoint pack should start at the next segment */
419 cp_start_blk_no += 1 << le32_to_cpu(fsb->log_blocks_per_seg);
420 cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
421
422 if (cp1 && cp2) {
423 if (ver_after(cp2_version, cp1_version))
424 cur_page = cp2;
425 else
426 cur_page = cp1;
427 } else if (cp1) {
428 cur_page = cp1;
429 } else if (cp2) {
430 cur_page = cp2;
431 } else {
432 goto fail_no_cp;
433 }
434
435 cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
436 memcpy(sbi->ckpt, cp_block, blk_size);
437
438 f2fs_put_page(cp1, 1);
439 f2fs_put_page(cp2, 1);
440 return 0;
441
442fail_no_cp:
443 kfree(sbi->ckpt);
444 return -EINVAL;
445}
446
447void set_dirty_dir_page(struct inode *inode, struct page *page)
448{
449 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
450 struct list_head *head = &sbi->dir_inode_list;
451 struct dir_inode_entry *new;
452 struct list_head *this;
453
454 if (!S_ISDIR(inode->i_mode))
455 return;
456retry:
457 new = kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
458 if (!new) {
459 cond_resched();
460 goto retry;
461 }
462 new->inode = inode;
463 INIT_LIST_HEAD(&new->list);
464
465 spin_lock(&sbi->dir_inode_lock);
466 list_for_each(this, head) {
467 struct dir_inode_entry *entry;
468 entry = list_entry(this, struct dir_inode_entry, list);
469 if (entry->inode == inode) {
470 kmem_cache_free(inode_entry_slab, new);
471 goto out;
472 }
473 }
474 list_add_tail(&new->list, head);
475 sbi->n_dirty_dirs++;
476
477 BUG_ON(!S_ISDIR(inode->i_mode));
478out:
479 inc_page_count(sbi, F2FS_DIRTY_DENTS);
480 inode_inc_dirty_dents(inode);
481 SetPagePrivate(page);
482
483 spin_unlock(&sbi->dir_inode_lock);
484}
485
486void remove_dirty_dir_inode(struct inode *inode)
487{
488 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
489 struct list_head *head = &sbi->dir_inode_list;
490 struct list_head *this;
491
492 if (!S_ISDIR(inode->i_mode))
493 return;
494
495 spin_lock(&sbi->dir_inode_lock);
496 if (atomic_read(&F2FS_I(inode)->dirty_dents))
497 goto out;
498
499 list_for_each(this, head) {
500 struct dir_inode_entry *entry;
501 entry = list_entry(this, struct dir_inode_entry, list);
502 if (entry->inode == inode) {
503 list_del(&entry->list);
504 kmem_cache_free(inode_entry_slab, entry);
505 sbi->n_dirty_dirs--;
506 break;
507 }
508 }
509out:
510 spin_unlock(&sbi->dir_inode_lock);
511}
512
513void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi)
514{
515 struct list_head *head = &sbi->dir_inode_list;
516 struct dir_inode_entry *entry;
517 struct inode *inode;
518retry:
519 spin_lock(&sbi->dir_inode_lock);
520 if (list_empty(head)) {
521 spin_unlock(&sbi->dir_inode_lock);
522 return;
523 }
524 entry = list_entry(head->next, struct dir_inode_entry, list);
525 inode = igrab(entry->inode);
526 spin_unlock(&sbi->dir_inode_lock);
527 if (inode) {
528 filemap_flush(inode->i_mapping);
529 iput(inode);
530 } else {
531 /*
532 * We should submit bio, since it exists several
533 * wribacking dentry pages in the freeing inode.
534 */
535 f2fs_submit_bio(sbi, DATA, true);
536 }
537 goto retry;
538}
539
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900540/*
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900541 * Freeze all the FS-operations for checkpoint.
542 */
543void block_operations(struct f2fs_sb_info *sbi)
544{
545 int t;
546 struct writeback_control wbc = {
547 .sync_mode = WB_SYNC_ALL,
548 .nr_to_write = LONG_MAX,
549 .for_reclaim = 0,
550 };
551
552 /* Stop renaming operation */
553 mutex_lock_op(sbi, RENAME);
554 mutex_lock_op(sbi, DENTRY_OPS);
555
556retry_dents:
557 /* write all the dirty dentry pages */
558 sync_dirty_dir_inodes(sbi);
559
560 mutex_lock_op(sbi, DATA_WRITE);
561 if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
562 mutex_unlock_op(sbi, DATA_WRITE);
563 goto retry_dents;
564 }
565
566 /* block all the operations */
567 for (t = DATA_NEW; t <= NODE_TRUNC; t++)
568 mutex_lock_op(sbi, t);
569
570 mutex_lock(&sbi->write_inode);
571
572 /*
573 * POR: we should ensure that there is no dirty node pages
574 * until finishing nat/sit flush.
575 */
576retry:
577 sync_node_pages(sbi, 0, &wbc);
578
579 mutex_lock_op(sbi, NODE_WRITE);
580
581 if (get_pages(sbi, F2FS_DIRTY_NODES)) {
582 mutex_unlock_op(sbi, NODE_WRITE);
583 goto retry;
584 }
585 mutex_unlock(&sbi->write_inode);
586}
587
588static void unblock_operations(struct f2fs_sb_info *sbi)
589{
590 int t;
591 for (t = NODE_WRITE; t >= RENAME; t--)
592 mutex_unlock_op(sbi, t);
593}
594
595static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
596{
597 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
598 nid_t last_nid = 0;
599 block_t start_blk;
600 struct page *cp_page;
601 unsigned int data_sum_blocks, orphan_blocks;
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900602 unsigned int crc32 = 0;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900603 void *kaddr;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900604 int i;
605
606 /* Flush all the NAT/SIT pages */
607 while (get_pages(sbi, F2FS_DIRTY_META))
608 sync_meta_pages(sbi, META, LONG_MAX);
609
610 next_free_nid(sbi, &last_nid);
611
612 /*
613 * modify checkpoint
614 * version number is already updated
615 */
616 ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi));
617 ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
618 ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
619 for (i = 0; i < 3; i++) {
620 ckpt->cur_node_segno[i] =
621 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE));
622 ckpt->cur_node_blkoff[i] =
623 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE));
624 ckpt->alloc_type[i + CURSEG_HOT_NODE] =
625 curseg_alloc_type(sbi, i + CURSEG_HOT_NODE);
626 }
627 for (i = 0; i < 3; i++) {
628 ckpt->cur_data_segno[i] =
629 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA));
630 ckpt->cur_data_blkoff[i] =
631 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA));
632 ckpt->alloc_type[i + CURSEG_HOT_DATA] =
633 curseg_alloc_type(sbi, i + CURSEG_HOT_DATA);
634 }
635
636 ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi));
637 ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi));
638 ckpt->next_free_nid = cpu_to_le32(last_nid);
639
640 /* 2 cp + n data seg summary + orphan inode blocks */
641 data_sum_blocks = npages_for_summary_flush(sbi);
642 if (data_sum_blocks < 3)
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900643 set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900644 else
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900645 clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900646
647 orphan_blocks = (sbi->n_orphans + F2FS_ORPHANS_PER_BLOCK - 1)
648 / F2FS_ORPHANS_PER_BLOCK;
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900649 ckpt->cp_pack_start_sum = cpu_to_le32(1 + orphan_blocks);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900650
651 if (is_umount) {
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900652 set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
653 ckpt->cp_pack_total_block_count = cpu_to_le32(2 +
654 data_sum_blocks + orphan_blocks + NR_CURSEG_NODE_TYPE);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900655 } else {
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900656 clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
657 ckpt->cp_pack_total_block_count = cpu_to_le32(2 +
658 data_sum_blocks + orphan_blocks);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900659 }
660
661 if (sbi->n_orphans)
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900662 set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900663 else
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900664 clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900665
666 /* update SIT/NAT bitmap */
667 get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
668 get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
669
670 crc32 = f2fs_crc32(ckpt, le32_to_cpu(ckpt->checksum_offset));
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900671 *(__le32 *)((unsigned char *)ckpt +
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900672 le32_to_cpu(ckpt->checksum_offset))
673 = cpu_to_le32(crc32);
674
675 start_blk = __start_cp_addr(sbi);
676
677 /* write out checkpoint buffer at block 0 */
678 cp_page = grab_meta_page(sbi, start_blk++);
679 kaddr = page_address(cp_page);
680 memcpy(kaddr, ckpt, (1 << sbi->log_blocksize));
681 set_page_dirty(cp_page);
682 f2fs_put_page(cp_page, 1);
683
684 if (sbi->n_orphans) {
685 write_orphan_inodes(sbi, start_blk);
686 start_blk += orphan_blocks;
687 }
688
689 write_data_summaries(sbi, start_blk);
690 start_blk += data_sum_blocks;
691 if (is_umount) {
692 write_node_summaries(sbi, start_blk);
693 start_blk += NR_CURSEG_NODE_TYPE;
694 }
695
696 /* writeout checkpoint block */
697 cp_page = grab_meta_page(sbi, start_blk);
698 kaddr = page_address(cp_page);
699 memcpy(kaddr, ckpt, (1 << sbi->log_blocksize));
700 set_page_dirty(cp_page);
701 f2fs_put_page(cp_page, 1);
702
703 /* wait for previous submitted node/meta pages writeback */
704 while (get_pages(sbi, F2FS_WRITEBACK))
705 congestion_wait(BLK_RW_ASYNC, HZ / 50);
706
707 filemap_fdatawait_range(sbi->node_inode->i_mapping, 0, LONG_MAX);
708 filemap_fdatawait_range(sbi->meta_inode->i_mapping, 0, LONG_MAX);
709
710 /* update user_block_counts */
711 sbi->last_valid_block_count = sbi->total_valid_block_count;
712 sbi->alloc_valid_block_count = 0;
713
714 /* Here, we only have one bio having CP pack */
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900715 sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900716
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900717 if (!is_set_ckpt_flags(ckpt, CP_ERROR_FLAG)) {
718 clear_prefree_segments(sbi);
719 F2FS_RESET_SB_DIRT(sbi);
720 }
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900721}
722
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900723/*
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900724 * We guarantee that this checkpoint procedure should not fail.
725 */
726void write_checkpoint(struct f2fs_sb_info *sbi, bool blocked, bool is_umount)
727{
728 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
729 unsigned long long ckpt_ver;
730
731 if (!blocked) {
732 mutex_lock(&sbi->cp_mutex);
733 block_operations(sbi);
734 }
735
736 f2fs_submit_bio(sbi, DATA, true);
737 f2fs_submit_bio(sbi, NODE, true);
738 f2fs_submit_bio(sbi, META, true);
739
740 /*
741 * update checkpoint pack index
742 * Increase the version number so that
743 * SIT entries and seg summaries are written at correct place
744 */
745 ckpt_ver = le64_to_cpu(ckpt->checkpoint_ver);
746 ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver);
747
748 /* write cached NAT/SIT entries to NAT/SIT area */
749 flush_nat_entries(sbi);
750 flush_sit_entries(sbi);
751
752 reset_victim_segmap(sbi);
753
754 /* unlock all the fs_lock[] in do_checkpoint() */
755 do_checkpoint(sbi, is_umount);
756
757 unblock_operations(sbi);
758 mutex_unlock(&sbi->cp_mutex);
759}
760
761void init_orphan_info(struct f2fs_sb_info *sbi)
762{
763 mutex_init(&sbi->orphan_inode_mutex);
764 INIT_LIST_HEAD(&sbi->orphan_inode_list);
765 sbi->n_orphans = 0;
766}
767
Namjae Jeon6e6093a2013-01-17 00:08:30 +0900768int __init create_checkpoint_caches(void)
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900769{
770 orphan_entry_slab = f2fs_kmem_cache_create("f2fs_orphan_entry",
771 sizeof(struct orphan_inode_entry), NULL);
772 if (unlikely(!orphan_entry_slab))
773 return -ENOMEM;
774 inode_entry_slab = f2fs_kmem_cache_create("f2fs_dirty_dir_entry",
775 sizeof(struct dir_inode_entry), NULL);
776 if (unlikely(!inode_entry_slab)) {
777 kmem_cache_destroy(orphan_entry_slab);
778 return -ENOMEM;
779 }
780 return 0;
781}
782
783void destroy_checkpoint_caches(void)
784{
785 kmem_cache_destroy(orphan_entry_slab);
786 kmem_cache_destroy(inode_entry_slab);
787}