blob: 3aa5ce7cab83d30012e86942bfed4317ac0b2b99 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002 * fs/f2fs/data.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/buffer_head.h>
14#include <linux/mpage.h>
15#include <linux/writeback.h>
16#include <linux/backing-dev.h>
17#include <linux/blkdev.h>
18#include <linux/bio.h>
Geert Uytterhoeven690e4a32012-12-19 22:19:30 +010019#include <linux/prefetch.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090020
21#include "f2fs.h"
22#include "node.h"
23#include "segment.h"
24
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +090025/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090026 * Lock ordering for the change of data block address:
27 * ->data_page
28 * ->node_page
29 * update block addresses in the node page
30 */
31static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
32{
33 struct f2fs_node *rn;
34 __le32 *addr_array;
35 struct page *node_page = dn->node_page;
36 unsigned int ofs_in_node = dn->ofs_in_node;
37
38 wait_on_page_writeback(node_page);
39
40 rn = (struct f2fs_node *)page_address(node_page);
41
42 /* Get physical address of data block */
43 addr_array = blkaddr_in_node(rn);
44 addr_array[ofs_in_node] = cpu_to_le32(new_addr);
45 set_page_dirty(node_page);
46}
47
48int reserve_new_block(struct dnode_of_data *dn)
49{
50 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
51
52 if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
53 return -EPERM;
54 if (!inc_valid_block_count(sbi, dn->inode, 1))
55 return -ENOSPC;
56
57 __set_data_blkaddr(dn, NEW_ADDR);
58 dn->data_blkaddr = NEW_ADDR;
59 sync_inode_page(dn);
60 return 0;
61}
62
63static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
64 struct buffer_head *bh_result)
65{
66 struct f2fs_inode_info *fi = F2FS_I(inode);
67 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
68 pgoff_t start_fofs, end_fofs;
69 block_t start_blkaddr;
70
71 read_lock(&fi->ext.ext_lock);
72 if (fi->ext.len == 0) {
73 read_unlock(&fi->ext.ext_lock);
74 return 0;
75 }
76
77 sbi->total_hit_ext++;
78 start_fofs = fi->ext.fofs;
79 end_fofs = fi->ext.fofs + fi->ext.len - 1;
80 start_blkaddr = fi->ext.blk_addr;
81
82 if (pgofs >= start_fofs && pgofs <= end_fofs) {
83 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
84 size_t count;
85
86 clear_buffer_new(bh_result);
87 map_bh(bh_result, inode->i_sb,
88 start_blkaddr + pgofs - start_fofs);
89 count = end_fofs - pgofs + 1;
90 if (count < (UINT_MAX >> blkbits))
91 bh_result->b_size = (count << blkbits);
92 else
93 bh_result->b_size = UINT_MAX;
94
95 sbi->read_hit_ext++;
96 read_unlock(&fi->ext.ext_lock);
97 return 1;
98 }
99 read_unlock(&fi->ext.ext_lock);
100 return 0;
101}
102
103void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
104{
105 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
106 pgoff_t fofs, start_fofs, end_fofs;
107 block_t start_blkaddr, end_blkaddr;
108
109 BUG_ON(blk_addr == NEW_ADDR);
110 fofs = start_bidx_of_node(ofs_of_node(dn->node_page)) + dn->ofs_in_node;
111
112 /* Update the page address in the parent node */
113 __set_data_blkaddr(dn, blk_addr);
114
115 write_lock(&fi->ext.ext_lock);
116
117 start_fofs = fi->ext.fofs;
118 end_fofs = fi->ext.fofs + fi->ext.len - 1;
119 start_blkaddr = fi->ext.blk_addr;
120 end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
121
122 /* Drop and initialize the matched extent */
123 if (fi->ext.len == 1 && fofs == start_fofs)
124 fi->ext.len = 0;
125
126 /* Initial extent */
127 if (fi->ext.len == 0) {
128 if (blk_addr != NULL_ADDR) {
129 fi->ext.fofs = fofs;
130 fi->ext.blk_addr = blk_addr;
131 fi->ext.len = 1;
132 }
133 goto end_update;
134 }
135
136 /* Frone merge */
137 if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
138 fi->ext.fofs--;
139 fi->ext.blk_addr--;
140 fi->ext.len++;
141 goto end_update;
142 }
143
144 /* Back merge */
145 if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) {
146 fi->ext.len++;
147 goto end_update;
148 }
149
150 /* Split the existing extent */
151 if (fi->ext.len > 1 &&
152 fofs >= start_fofs && fofs <= end_fofs) {
153 if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
154 fi->ext.len = fofs - start_fofs;
155 } else {
156 fi->ext.fofs = fofs + 1;
157 fi->ext.blk_addr = start_blkaddr +
158 fofs - start_fofs + 1;
159 fi->ext.len -= fofs - start_fofs + 1;
160 }
161 goto end_update;
162 }
163 write_unlock(&fi->ext.ext_lock);
164 return;
165
166end_update:
167 write_unlock(&fi->ext.ext_lock);
168 sync_inode_page(dn);
169 return;
170}
171
172struct page *find_data_page(struct inode *inode, pgoff_t index)
173{
174 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
175 struct address_space *mapping = inode->i_mapping;
176 struct dnode_of_data dn;
177 struct page *page;
178 int err;
179
180 page = find_get_page(mapping, index);
181 if (page && PageUptodate(page))
182 return page;
183 f2fs_put_page(page, 0);
184
185 set_new_dnode(&dn, inode, NULL, NULL, 0);
186 err = get_dnode_of_data(&dn, index, RDONLY_NODE);
187 if (err)
188 return ERR_PTR(err);
189 f2fs_put_dnode(&dn);
190
191 if (dn.data_blkaddr == NULL_ADDR)
192 return ERR_PTR(-ENOENT);
193
194 /* By fallocate(), there is no cached page, but with NEW_ADDR */
195 if (dn.data_blkaddr == NEW_ADDR)
196 return ERR_PTR(-EINVAL);
197
198 page = grab_cache_page(mapping, index);
199 if (!page)
200 return ERR_PTR(-ENOMEM);
201
202 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
203 if (err) {
204 f2fs_put_page(page, 1);
205 return ERR_PTR(err);
206 }
207 unlock_page(page);
208 return page;
209}
210
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900211/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900212 * If it tries to access a hole, return an error.
213 * Because, the callers, functions in dir.c and GC, should be able to know
214 * whether this page exists or not.
215 */
216struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
217{
218 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
219 struct address_space *mapping = inode->i_mapping;
220 struct dnode_of_data dn;
221 struct page *page;
222 int err;
223
224 set_new_dnode(&dn, inode, NULL, NULL, 0);
225 err = get_dnode_of_data(&dn, index, RDONLY_NODE);
226 if (err)
227 return ERR_PTR(err);
228 f2fs_put_dnode(&dn);
229
230 if (dn.data_blkaddr == NULL_ADDR)
231 return ERR_PTR(-ENOENT);
232
233 page = grab_cache_page(mapping, index);
234 if (!page)
235 return ERR_PTR(-ENOMEM);
236
237 if (PageUptodate(page))
238 return page;
239
240 BUG_ON(dn.data_blkaddr == NEW_ADDR);
241 BUG_ON(dn.data_blkaddr == NULL_ADDR);
242
243 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
244 if (err) {
245 f2fs_put_page(page, 1);
246 return ERR_PTR(err);
247 }
248 return page;
249}
250
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900251/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900252 * Caller ensures that this data page is never allocated.
253 * A new zero-filled data page is allocated in the page cache.
254 */
255struct page *get_new_data_page(struct inode *inode, pgoff_t index,
256 bool new_i_size)
257{
258 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
259 struct address_space *mapping = inode->i_mapping;
260 struct page *page;
261 struct dnode_of_data dn;
262 int err;
263
264 set_new_dnode(&dn, inode, NULL, NULL, 0);
265 err = get_dnode_of_data(&dn, index, 0);
266 if (err)
267 return ERR_PTR(err);
268
269 if (dn.data_blkaddr == NULL_ADDR) {
270 if (reserve_new_block(&dn)) {
271 f2fs_put_dnode(&dn);
272 return ERR_PTR(-ENOSPC);
273 }
274 }
275 f2fs_put_dnode(&dn);
276
277 page = grab_cache_page(mapping, index);
278 if (!page)
279 return ERR_PTR(-ENOMEM);
280
281 if (PageUptodate(page))
282 return page;
283
284 if (dn.data_blkaddr == NEW_ADDR) {
285 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
286 } else {
287 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
288 if (err) {
289 f2fs_put_page(page, 1);
290 return ERR_PTR(err);
291 }
292 }
293 SetPageUptodate(page);
294
295 if (new_i_size &&
296 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
297 i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
298 mark_inode_dirty_sync(inode);
299 }
300 return page;
301}
302
303static void read_end_io(struct bio *bio, int err)
304{
305 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
306 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
307
308 do {
309 struct page *page = bvec->bv_page;
310
311 if (--bvec >= bio->bi_io_vec)
312 prefetchw(&bvec->bv_page->flags);
313
314 if (uptodate) {
315 SetPageUptodate(page);
316 } else {
317 ClearPageUptodate(page);
318 SetPageError(page);
319 }
320 unlock_page(page);
321 } while (bvec >= bio->bi_io_vec);
322 kfree(bio->bi_private);
323 bio_put(bio);
324}
325
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900326/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900327 * Fill the locked page with data located in the block address.
328 * Read operation is synchronous, and caller must unlock the page.
329 */
330int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page,
331 block_t blk_addr, int type)
332{
333 struct block_device *bdev = sbi->sb->s_bdev;
334 bool sync = (type == READ_SYNC);
335 struct bio *bio;
336
337 /* This page can be already read by other threads */
338 if (PageUptodate(page)) {
339 if (!sync)
340 unlock_page(page);
341 return 0;
342 }
343
344 down_read(&sbi->bio_sem);
345
346 /* Allocate a new bio */
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900347 bio = f2fs_bio_alloc(bdev, 1);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900348
349 /* Initialize the bio */
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900350 bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900351 bio->bi_end_io = read_end_io;
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900352
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900353 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
354 kfree(bio->bi_private);
355 bio_put(bio);
356 up_read(&sbi->bio_sem);
357 return -EFAULT;
358 }
359
360 submit_bio(type, bio);
361 up_read(&sbi->bio_sem);
362
363 /* wait for read completion if sync */
364 if (sync) {
365 lock_page(page);
366 if (PageError(page))
367 return -EIO;
368 }
369 return 0;
370}
371
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900372/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900373 * This function should be used by the data read flow only where it
374 * does not check the "create" flag that indicates block allocation.
375 * The reason for this special functionality is to exploit VFS readahead
376 * mechanism.
377 */
378static int get_data_block_ro(struct inode *inode, sector_t iblock,
379 struct buffer_head *bh_result, int create)
380{
381 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
382 unsigned maxblocks = bh_result->b_size >> blkbits;
383 struct dnode_of_data dn;
384 pgoff_t pgofs;
385 int err;
386
387 /* Get the page offset from the block offset(iblock) */
388 pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
389
390 if (check_extent_cache(inode, pgofs, bh_result))
391 return 0;
392
393 /* When reading holes, we need its node page */
394 set_new_dnode(&dn, inode, NULL, NULL, 0);
395 err = get_dnode_of_data(&dn, pgofs, RDONLY_NODE);
396 if (err)
397 return (err == -ENOENT) ? 0 : err;
398
399 /* It does not support data allocation */
400 BUG_ON(create);
401
402 if (dn.data_blkaddr != NEW_ADDR && dn.data_blkaddr != NULL_ADDR) {
403 int i;
404 unsigned int end_offset;
405
406 end_offset = IS_INODE(dn.node_page) ?
407 ADDRS_PER_INODE :
408 ADDRS_PER_BLOCK;
409
410 clear_buffer_new(bh_result);
411
412 /* Give more consecutive addresses for the read ahead */
413 for (i = 0; i < end_offset - dn.ofs_in_node; i++)
414 if (((datablock_addr(dn.node_page,
415 dn.ofs_in_node + i))
416 != (dn.data_blkaddr + i)) || maxblocks == i)
417 break;
418 map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
419 bh_result->b_size = (i << blkbits);
420 }
421 f2fs_put_dnode(&dn);
422 return 0;
423}
424
425static int f2fs_read_data_page(struct file *file, struct page *page)
426{
427 return mpage_readpage(page, get_data_block_ro);
428}
429
430static int f2fs_read_data_pages(struct file *file,
431 struct address_space *mapping,
432 struct list_head *pages, unsigned nr_pages)
433{
434 return mpage_readpages(mapping, pages, nr_pages, get_data_block_ro);
435}
436
437int do_write_data_page(struct page *page)
438{
439 struct inode *inode = page->mapping->host;
440 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
441 block_t old_blk_addr, new_blk_addr;
442 struct dnode_of_data dn;
443 int err = 0;
444
445 set_new_dnode(&dn, inode, NULL, NULL, 0);
446 err = get_dnode_of_data(&dn, page->index, RDONLY_NODE);
447 if (err)
448 return err;
449
450 old_blk_addr = dn.data_blkaddr;
451
452 /* This page is already truncated */
453 if (old_blk_addr == NULL_ADDR)
454 goto out_writepage;
455
456 set_page_writeback(page);
457
458 /*
459 * If current allocation needs SSR,
460 * it had better in-place writes for updated data.
461 */
462 if (old_blk_addr != NEW_ADDR && !is_cold_data(page) &&
463 need_inplace_update(inode)) {
464 rewrite_data_page(F2FS_SB(inode->i_sb), page,
465 old_blk_addr);
466 } else {
467 write_data_page(inode, page, &dn,
468 old_blk_addr, &new_blk_addr);
469 update_extent_cache(new_blk_addr, &dn);
470 F2FS_I(inode)->data_version =
471 le64_to_cpu(F2FS_CKPT(sbi)->checkpoint_ver);
472 }
473out_writepage:
474 f2fs_put_dnode(&dn);
475 return err;
476}
477
478static int f2fs_write_data_page(struct page *page,
479 struct writeback_control *wbc)
480{
481 struct inode *inode = page->mapping->host;
482 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
483 loff_t i_size = i_size_read(inode);
484 const pgoff_t end_index = ((unsigned long long) i_size)
485 >> PAGE_CACHE_SHIFT;
486 unsigned offset;
487 int err = 0;
488
489 if (page->index < end_index)
490 goto out;
491
492 /*
493 * If the offset is out-of-range of file size,
494 * this page does not have to be written to disk.
495 */
496 offset = i_size & (PAGE_CACHE_SIZE - 1);
497 if ((page->index >= end_index + 1) || !offset) {
498 if (S_ISDIR(inode->i_mode)) {
499 dec_page_count(sbi, F2FS_DIRTY_DENTS);
500 inode_dec_dirty_dents(inode);
501 }
502 goto unlock_out;
503 }
504
505 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
506out:
507 if (sbi->por_doing)
508 goto redirty_out;
509
510 if (wbc->for_reclaim && !S_ISDIR(inode->i_mode) && !is_cold_data(page))
511 goto redirty_out;
512
513 mutex_lock_op(sbi, DATA_WRITE);
514 if (S_ISDIR(inode->i_mode)) {
515 dec_page_count(sbi, F2FS_DIRTY_DENTS);
516 inode_dec_dirty_dents(inode);
517 }
518 err = do_write_data_page(page);
519 if (err && err != -ENOENT) {
520 wbc->pages_skipped++;
521 set_page_dirty(page);
522 }
523 mutex_unlock_op(sbi, DATA_WRITE);
524
525 if (wbc->for_reclaim)
526 f2fs_submit_bio(sbi, DATA, true);
527
528 if (err == -ENOENT)
529 goto unlock_out;
530
531 clear_cold_data(page);
532 unlock_page(page);
533
534 if (!wbc->for_reclaim && !S_ISDIR(inode->i_mode))
535 f2fs_balance_fs(sbi);
536 return 0;
537
538unlock_out:
539 unlock_page(page);
540 return (err == -ENOENT) ? 0 : err;
541
542redirty_out:
543 wbc->pages_skipped++;
544 set_page_dirty(page);
545 return AOP_WRITEPAGE_ACTIVATE;
546}
547
548#define MAX_DESIRED_PAGES_WP 4096
549
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900550static int f2fs_write_data_pages(struct address_space *mapping,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900551 struct writeback_control *wbc)
552{
553 struct inode *inode = mapping->host;
554 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
555 int ret;
556 long excess_nrtw = 0, desired_nrtw;
557
558 if (wbc->nr_to_write < MAX_DESIRED_PAGES_WP) {
559 desired_nrtw = MAX_DESIRED_PAGES_WP;
560 excess_nrtw = desired_nrtw - wbc->nr_to_write;
561 wbc->nr_to_write = desired_nrtw;
562 }
563
564 if (!S_ISDIR(inode->i_mode))
565 mutex_lock(&sbi->writepages);
566 ret = generic_writepages(mapping, wbc);
567 if (!S_ISDIR(inode->i_mode))
568 mutex_unlock(&sbi->writepages);
569 f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL));
570
571 remove_dirty_dir_inode(inode);
572
573 wbc->nr_to_write -= excess_nrtw;
574 return ret;
575}
576
577static int f2fs_write_begin(struct file *file, struct address_space *mapping,
578 loff_t pos, unsigned len, unsigned flags,
579 struct page **pagep, void **fsdata)
580{
581 struct inode *inode = mapping->host;
582 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
583 struct page *page;
584 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
585 struct dnode_of_data dn;
586 int err = 0;
587
588 /* for nobh_write_end */
589 *fsdata = NULL;
590
591 f2fs_balance_fs(sbi);
592
593 page = grab_cache_page_write_begin(mapping, index, flags);
594 if (!page)
595 return -ENOMEM;
596 *pagep = page;
597
598 mutex_lock_op(sbi, DATA_NEW);
599
600 set_new_dnode(&dn, inode, NULL, NULL, 0);
601 err = get_dnode_of_data(&dn, index, 0);
602 if (err) {
603 mutex_unlock_op(sbi, DATA_NEW);
604 f2fs_put_page(page, 1);
605 return err;
606 }
607
608 if (dn.data_blkaddr == NULL_ADDR) {
609 err = reserve_new_block(&dn);
610 if (err) {
611 f2fs_put_dnode(&dn);
612 mutex_unlock_op(sbi, DATA_NEW);
613 f2fs_put_page(page, 1);
614 return err;
615 }
616 }
617 f2fs_put_dnode(&dn);
618
619 mutex_unlock_op(sbi, DATA_NEW);
620
621 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
622 return 0;
623
624 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
625 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
626 unsigned end = start + len;
627
628 /* Reading beyond i_size is simple: memset to zero */
629 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
630 return 0;
631 }
632
633 if (dn.data_blkaddr == NEW_ADDR) {
634 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
635 } else {
636 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
637 if (err) {
638 f2fs_put_page(page, 1);
639 return err;
640 }
641 }
642 SetPageUptodate(page);
643 clear_cold_data(page);
644 return 0;
645}
646
647static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
648 const struct iovec *iov, loff_t offset, unsigned long nr_segs)
649{
650 struct file *file = iocb->ki_filp;
651 struct inode *inode = file->f_mapping->host;
652
653 if (rw == WRITE)
654 return 0;
655
656 /* Needs synchronization with the cleaner */
657 return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
658 get_data_block_ro);
659}
660
661static void f2fs_invalidate_data_page(struct page *page, unsigned long offset)
662{
663 struct inode *inode = page->mapping->host;
664 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
665 if (S_ISDIR(inode->i_mode) && PageDirty(page)) {
666 dec_page_count(sbi, F2FS_DIRTY_DENTS);
667 inode_dec_dirty_dents(inode);
668 }
669 ClearPagePrivate(page);
670}
671
672static int f2fs_release_data_page(struct page *page, gfp_t wait)
673{
674 ClearPagePrivate(page);
675 return 0;
676}
677
678static int f2fs_set_data_page_dirty(struct page *page)
679{
680 struct address_space *mapping = page->mapping;
681 struct inode *inode = mapping->host;
682
683 SetPageUptodate(page);
684 if (!PageDirty(page)) {
685 __set_page_dirty_nobuffers(page);
686 set_dirty_dir_page(inode, page);
687 return 1;
688 }
689 return 0;
690}
691
692const struct address_space_operations f2fs_dblock_aops = {
693 .readpage = f2fs_read_data_page,
694 .readpages = f2fs_read_data_pages,
695 .writepage = f2fs_write_data_page,
696 .writepages = f2fs_write_data_pages,
697 .write_begin = f2fs_write_begin,
698 .write_end = nobh_write_end,
699 .set_page_dirty = f2fs_set_data_page_dirty,
700 .invalidatepage = f2fs_invalidate_data_page,
701 .releasepage = f2fs_release_data_page,
702 .direct_IO = f2fs_direct_IO,
703};