blob: 6b328de41728ffda20230fa3e5dbe5dc22a14229 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002 * fs/f2fs/data.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/buffer_head.h>
14#include <linux/mpage.h>
Kent Overstreeta27bb332013-05-07 16:19:08 -070015#include <linux/aio.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090016#include <linux/writeback.h>
17#include <linux/backing-dev.h>
18#include <linux/blkdev.h>
19#include <linux/bio.h>
Geert Uytterhoeven690e4a32012-12-19 22:19:30 +010020#include <linux/prefetch.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090021
22#include "f2fs.h"
23#include "node.h"
24#include "segment.h"
Namjae Jeon848753a2013-04-23 16:38:02 +090025#include <trace/events/f2fs.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090026
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +090027/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090028 * Lock ordering for the change of data block address:
29 * ->data_page
30 * ->node_page
31 * update block addresses in the node page
32 */
33static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
34{
35 struct f2fs_node *rn;
36 __le32 *addr_array;
37 struct page *node_page = dn->node_page;
38 unsigned int ofs_in_node = dn->ofs_in_node;
39
Jin Xua5694692013-08-05 20:02:04 +080040 f2fs_wait_on_page_writeback(node_page, NODE, false);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090041
Gu Zheng45590712013-07-15 17:57:38 +080042 rn = F2FS_NODE(node_page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090043
44 /* Get physical address of data block */
45 addr_array = blkaddr_in_node(rn);
46 addr_array[ofs_in_node] = cpu_to_le32(new_addr);
47 set_page_dirty(node_page);
48}
49
50int reserve_new_block(struct dnode_of_data *dn)
51{
52 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
53
54 if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
55 return -EPERM;
56 if (!inc_valid_block_count(sbi, dn->inode, 1))
57 return -ENOSPC;
58
Namjae Jeonc01e2852013-04-23 17:00:52 +090059 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
60
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090061 __set_data_blkaddr(dn, NEW_ADDR);
62 dn->data_blkaddr = NEW_ADDR;
63 sync_inode_page(dn);
64 return 0;
65}
66
67static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
68 struct buffer_head *bh_result)
69{
70 struct f2fs_inode_info *fi = F2FS_I(inode);
Namjae Jeon35b09d82013-05-23 22:57:53 +090071#ifdef CONFIG_F2FS_STAT_FS
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090072 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
Namjae Jeon35b09d82013-05-23 22:57:53 +090073#endif
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090074 pgoff_t start_fofs, end_fofs;
75 block_t start_blkaddr;
76
77 read_lock(&fi->ext.ext_lock);
78 if (fi->ext.len == 0) {
79 read_unlock(&fi->ext.ext_lock);
80 return 0;
81 }
82
Namjae Jeon35b09d82013-05-23 22:57:53 +090083#ifdef CONFIG_F2FS_STAT_FS
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090084 sbi->total_hit_ext++;
Namjae Jeon35b09d82013-05-23 22:57:53 +090085#endif
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090086 start_fofs = fi->ext.fofs;
87 end_fofs = fi->ext.fofs + fi->ext.len - 1;
88 start_blkaddr = fi->ext.blk_addr;
89
90 if (pgofs >= start_fofs && pgofs <= end_fofs) {
91 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
92 size_t count;
93
94 clear_buffer_new(bh_result);
95 map_bh(bh_result, inode->i_sb,
96 start_blkaddr + pgofs - start_fofs);
97 count = end_fofs - pgofs + 1;
98 if (count < (UINT_MAX >> blkbits))
99 bh_result->b_size = (count << blkbits);
100 else
101 bh_result->b_size = UINT_MAX;
102
Namjae Jeon35b09d82013-05-23 22:57:53 +0900103#ifdef CONFIG_F2FS_STAT_FS
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900104 sbi->read_hit_ext++;
Namjae Jeon35b09d82013-05-23 22:57:53 +0900105#endif
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900106 read_unlock(&fi->ext.ext_lock);
107 return 1;
108 }
109 read_unlock(&fi->ext.ext_lock);
110 return 0;
111}
112
113void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
114{
115 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
116 pgoff_t fofs, start_fofs, end_fofs;
117 block_t start_blkaddr, end_blkaddr;
118
119 BUG_ON(blk_addr == NEW_ADDR);
120 fofs = start_bidx_of_node(ofs_of_node(dn->node_page)) + dn->ofs_in_node;
121
122 /* Update the page address in the parent node */
123 __set_data_blkaddr(dn, blk_addr);
124
125 write_lock(&fi->ext.ext_lock);
126
127 start_fofs = fi->ext.fofs;
128 end_fofs = fi->ext.fofs + fi->ext.len - 1;
129 start_blkaddr = fi->ext.blk_addr;
130 end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
131
132 /* Drop and initialize the matched extent */
133 if (fi->ext.len == 1 && fofs == start_fofs)
134 fi->ext.len = 0;
135
136 /* Initial extent */
137 if (fi->ext.len == 0) {
138 if (blk_addr != NULL_ADDR) {
139 fi->ext.fofs = fofs;
140 fi->ext.blk_addr = blk_addr;
141 fi->ext.len = 1;
142 }
143 goto end_update;
144 }
145
Namjae Jeon6224da82013-04-06 14:44:32 +0900146 /* Front merge */
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900147 if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
148 fi->ext.fofs--;
149 fi->ext.blk_addr--;
150 fi->ext.len++;
151 goto end_update;
152 }
153
154 /* Back merge */
155 if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) {
156 fi->ext.len++;
157 goto end_update;
158 }
159
160 /* Split the existing extent */
161 if (fi->ext.len > 1 &&
162 fofs >= start_fofs && fofs <= end_fofs) {
163 if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
164 fi->ext.len = fofs - start_fofs;
165 } else {
166 fi->ext.fofs = fofs + 1;
167 fi->ext.blk_addr = start_blkaddr +
168 fofs - start_fofs + 1;
169 fi->ext.len -= fofs - start_fofs + 1;
170 }
171 goto end_update;
172 }
173 write_unlock(&fi->ext.ext_lock);
174 return;
175
176end_update:
177 write_unlock(&fi->ext.ext_lock);
178 sync_inode_page(dn);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900179}
180
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900181struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900182{
183 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
184 struct address_space *mapping = inode->i_mapping;
185 struct dnode_of_data dn;
186 struct page *page;
187 int err;
188
189 page = find_get_page(mapping, index);
190 if (page && PageUptodate(page))
191 return page;
192 f2fs_put_page(page, 0);
193
194 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900195 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900196 if (err)
197 return ERR_PTR(err);
198 f2fs_put_dnode(&dn);
199
200 if (dn.data_blkaddr == NULL_ADDR)
201 return ERR_PTR(-ENOENT);
202
203 /* By fallocate(), there is no cached page, but with NEW_ADDR */
204 if (dn.data_blkaddr == NEW_ADDR)
205 return ERR_PTR(-EINVAL);
206
Jaegeuk Kim6f85b352013-05-20 16:15:22 +0900207 page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900208 if (!page)
209 return ERR_PTR(-ENOMEM);
210
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900211 if (PageUptodate(page)) {
212 unlock_page(page);
213 return page;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900214 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900215
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900216 err = f2fs_readpage(sbi, page, dn.data_blkaddr,
217 sync ? READ_SYNC : READA);
218 if (sync) {
219 wait_on_page_locked(page);
220 if (!PageUptodate(page)) {
221 f2fs_put_page(page, 0);
222 return ERR_PTR(-EIO);
223 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900224 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900225 return page;
226}
227
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900228/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900229 * If it tries to access a hole, return an error.
230 * Because, the callers, functions in dir.c and GC, should be able to know
231 * whether this page exists or not.
232 */
233struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
234{
235 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
236 struct address_space *mapping = inode->i_mapping;
237 struct dnode_of_data dn;
238 struct page *page;
239 int err;
240
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +0900241repeat:
Jaegeuk Kim6f85b352013-05-20 16:15:22 +0900242 page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900243 if (!page)
244 return ERR_PTR(-ENOMEM);
245
Jaegeuk Kim650495d2013-05-13 08:38:35 +0900246 set_new_dnode(&dn, inode, NULL, NULL, 0);
247 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
248 if (err) {
249 f2fs_put_page(page, 1);
250 return ERR_PTR(err);
251 }
252 f2fs_put_dnode(&dn);
253
254 if (dn.data_blkaddr == NULL_ADDR) {
255 f2fs_put_page(page, 1);
256 return ERR_PTR(-ENOENT);
257 }
258
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900259 if (PageUptodate(page))
260 return page;
261
Jaegeuk Kimd59ff4d2013-08-20 19:13:07 +0900262 /*
263 * A new dentry page is allocated but not able to be written, since its
264 * new inode page couldn't be allocated due to -ENOSPC.
265 * In such the case, its blkaddr can be remained as NEW_ADDR.
266 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
267 */
268 if (dn.data_blkaddr == NEW_ADDR) {
269 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
270 SetPageUptodate(page);
271 return page;
272 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900273
274 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900275 if (err)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900276 return ERR_PTR(err);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900277
278 lock_page(page);
279 if (!PageUptodate(page)) {
280 f2fs_put_page(page, 1);
281 return ERR_PTR(-EIO);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900282 }
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +0900283 if (page->mapping != mapping) {
284 f2fs_put_page(page, 1);
285 goto repeat;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900286 }
287 return page;
288}
289
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900290/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900291 * Caller ensures that this data page is never allocated.
292 * A new zero-filled data page is allocated in the page cache.
Jaegeuk Kim39936832012-11-22 16:21:29 +0900293 *
294 * Also, caller should grab and release a mutex by calling mutex_lock_op() and
295 * mutex_unlock_op().
Jaegeuk Kim44a83ff2013-05-20 10:10:29 +0900296 * Note that, npage is set only by make_empty_dir.
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900297 */
Jaegeuk Kim64aa7ed2013-05-20 09:55:50 +0900298struct page *get_new_data_page(struct inode *inode,
299 struct page *npage, pgoff_t index, bool new_i_size)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900300{
301 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
302 struct address_space *mapping = inode->i_mapping;
303 struct page *page;
304 struct dnode_of_data dn;
305 int err;
306
Jaegeuk Kim64aa7ed2013-05-20 09:55:50 +0900307 set_new_dnode(&dn, inode, npage, npage, 0);
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900308 err = get_dnode_of_data(&dn, index, ALLOC_NODE);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900309 if (err)
310 return ERR_PTR(err);
311
312 if (dn.data_blkaddr == NULL_ADDR) {
313 if (reserve_new_block(&dn)) {
Jaegeuk Kim64aa7ed2013-05-20 09:55:50 +0900314 if (!npage)
315 f2fs_put_dnode(&dn);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900316 return ERR_PTR(-ENOSPC);
317 }
318 }
Jaegeuk Kim64aa7ed2013-05-20 09:55:50 +0900319 if (!npage)
320 f2fs_put_dnode(&dn);
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +0900321repeat:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900322 page = grab_cache_page(mapping, index);
323 if (!page)
324 return ERR_PTR(-ENOMEM);
325
326 if (PageUptodate(page))
327 return page;
328
329 if (dn.data_blkaddr == NEW_ADDR) {
330 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900331 SetPageUptodate(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900332 } else {
333 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900334 if (err)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900335 return ERR_PTR(err);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900336 lock_page(page);
337 if (!PageUptodate(page)) {
338 f2fs_put_page(page, 1);
339 return ERR_PTR(-EIO);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900340 }
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +0900341 if (page->mapping != mapping) {
342 f2fs_put_page(page, 1);
343 goto repeat;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900344 }
345 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900346
347 if (new_i_size &&
348 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
349 i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
Jaegeuk Kim699489b2013-06-07 22:08:23 +0900350 /* Only the directory inode sets new_i_size */
351 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900352 mark_inode_dirty_sync(inode);
353 }
354 return page;
355}
356
357static void read_end_io(struct bio *bio, int err)
358{
359 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
360 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
361
362 do {
363 struct page *page = bvec->bv_page;
364
365 if (--bvec >= bio->bi_io_vec)
366 prefetchw(&bvec->bv_page->flags);
367
368 if (uptodate) {
369 SetPageUptodate(page);
370 } else {
371 ClearPageUptodate(page);
372 SetPageError(page);
373 }
374 unlock_page(page);
375 } while (bvec >= bio->bi_io_vec);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900376 bio_put(bio);
377}
378
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900379/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900380 * Fill the locked page with data located in the block address.
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900381 * Return unlocked page.
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900382 */
383int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page,
384 block_t blk_addr, int type)
385{
386 struct block_device *bdev = sbi->sb->s_bdev;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900387 struct bio *bio;
388
Namjae Jeon848753a2013-04-23 16:38:02 +0900389 trace_f2fs_readpage(page, blk_addr, type);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900390
391 down_read(&sbi->bio_sem);
392
393 /* Allocate a new bio */
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900394 bio = f2fs_bio_alloc(bdev, 1);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900395
396 /* Initialize the bio */
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900397 bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900398 bio->bi_end_io = read_end_io;
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900399
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900400 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900401 bio_put(bio);
402 up_read(&sbi->bio_sem);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900403 f2fs_put_page(page, 1);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900404 return -EFAULT;
405 }
406
407 submit_bio(type, bio);
408 up_read(&sbi->bio_sem);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900409 return 0;
410}
411
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900412/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900413 * This function should be used by the data read flow only where it
414 * does not check the "create" flag that indicates block allocation.
415 * The reason for this special functionality is to exploit VFS readahead
416 * mechanism.
417 */
418static int get_data_block_ro(struct inode *inode, sector_t iblock,
419 struct buffer_head *bh_result, int create)
420{
421 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
422 unsigned maxblocks = bh_result->b_size >> blkbits;
423 struct dnode_of_data dn;
424 pgoff_t pgofs;
425 int err;
426
427 /* Get the page offset from the block offset(iblock) */
428 pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
429
Namjae Jeon848753a2013-04-23 16:38:02 +0900430 if (check_extent_cache(inode, pgofs, bh_result)) {
431 trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900432 return 0;
Namjae Jeon848753a2013-04-23 16:38:02 +0900433 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900434
435 /* When reading holes, we need its node page */
436 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900437 err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
Namjae Jeon848753a2013-04-23 16:38:02 +0900438 if (err) {
439 trace_f2fs_get_data_block(inode, iblock, bh_result, err);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900440 return (err == -ENOENT) ? 0 : err;
Namjae Jeon848753a2013-04-23 16:38:02 +0900441 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900442
443 /* It does not support data allocation */
444 BUG_ON(create);
445
446 if (dn.data_blkaddr != NEW_ADDR && dn.data_blkaddr != NULL_ADDR) {
447 int i;
448 unsigned int end_offset;
449
450 end_offset = IS_INODE(dn.node_page) ?
451 ADDRS_PER_INODE :
452 ADDRS_PER_BLOCK;
453
454 clear_buffer_new(bh_result);
455
456 /* Give more consecutive addresses for the read ahead */
457 for (i = 0; i < end_offset - dn.ofs_in_node; i++)
458 if (((datablock_addr(dn.node_page,
459 dn.ofs_in_node + i))
460 != (dn.data_blkaddr + i)) || maxblocks == i)
461 break;
462 map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
463 bh_result->b_size = (i << blkbits);
464 }
465 f2fs_put_dnode(&dn);
Namjae Jeon848753a2013-04-23 16:38:02 +0900466 trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900467 return 0;
468}
469
470static int f2fs_read_data_page(struct file *file, struct page *page)
471{
472 return mpage_readpage(page, get_data_block_ro);
473}
474
475static int f2fs_read_data_pages(struct file *file,
476 struct address_space *mapping,
477 struct list_head *pages, unsigned nr_pages)
478{
479 return mpage_readpages(mapping, pages, nr_pages, get_data_block_ro);
480}
481
482int do_write_data_page(struct page *page)
483{
484 struct inode *inode = page->mapping->host;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900485 block_t old_blk_addr, new_blk_addr;
486 struct dnode_of_data dn;
487 int err = 0;
488
489 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900490 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900491 if (err)
492 return err;
493
494 old_blk_addr = dn.data_blkaddr;
495
496 /* This page is already truncated */
497 if (old_blk_addr == NULL_ADDR)
498 goto out_writepage;
499
500 set_page_writeback(page);
501
502 /*
503 * If current allocation needs SSR,
504 * it had better in-place writes for updated data.
505 */
Haicheng Lib25958b2013-06-13 16:59:29 +0800506 if (unlikely(old_blk_addr != NEW_ADDR &&
507 !is_cold_data(page) &&
508 need_inplace_update(inode))) {
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900509 rewrite_data_page(F2FS_SB(inode->i_sb), page,
510 old_blk_addr);
511 } else {
512 write_data_page(inode, page, &dn,
513 old_blk_addr, &new_blk_addr);
514 update_extent_cache(new_blk_addr, &dn);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900515 }
516out_writepage:
517 f2fs_put_dnode(&dn);
518 return err;
519}
520
521static int f2fs_write_data_page(struct page *page,
522 struct writeback_control *wbc)
523{
524 struct inode *inode = page->mapping->host;
525 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
526 loff_t i_size = i_size_read(inode);
527 const pgoff_t end_index = ((unsigned long long) i_size)
528 >> PAGE_CACHE_SHIFT;
529 unsigned offset;
Jaegeuk Kim39936832012-11-22 16:21:29 +0900530 bool need_balance_fs = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900531 int err = 0;
532
533 if (page->index < end_index)
Jaegeuk Kim39936832012-11-22 16:21:29 +0900534 goto write;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900535
536 /*
537 * If the offset is out-of-range of file size,
538 * this page does not have to be written to disk.
539 */
540 offset = i_size & (PAGE_CACHE_SIZE - 1);
541 if ((page->index >= end_index + 1) || !offset) {
542 if (S_ISDIR(inode->i_mode)) {
543 dec_page_count(sbi, F2FS_DIRTY_DENTS);
544 inode_dec_dirty_dents(inode);
545 }
Jaegeuk Kim39936832012-11-22 16:21:29 +0900546 goto out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900547 }
548
549 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900550write:
551 if (sbi->por_doing) {
552 err = AOP_WRITEPAGE_ACTIVATE;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900553 goto redirty_out;
Jaegeuk Kim39936832012-11-22 16:21:29 +0900554 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900555
Jaegeuk Kim39936832012-11-22 16:21:29 +0900556 /* Dentry blocks are controlled by checkpoint */
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900557 if (S_ISDIR(inode->i_mode)) {
558 dec_page_count(sbi, F2FS_DIRTY_DENTS);
559 inode_dec_dirty_dents(inode);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900560 err = do_write_data_page(page);
561 } else {
562 int ilock = mutex_lock_op(sbi);
563 err = do_write_data_page(page);
564 mutex_unlock_op(sbi, ilock);
565 need_balance_fs = true;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900566 }
Jaegeuk Kim39936832012-11-22 16:21:29 +0900567 if (err == -ENOENT)
568 goto out;
569 else if (err)
570 goto redirty_out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900571
572 if (wbc->for_reclaim)
573 f2fs_submit_bio(sbi, DATA, true);
574
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900575 clear_cold_data(page);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900576out:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900577 unlock_page(page);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900578 if (need_balance_fs)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900579 f2fs_balance_fs(sbi);
580 return 0;
581
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900582redirty_out:
583 wbc->pages_skipped++;
584 set_page_dirty(page);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900585 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900586}
587
588#define MAX_DESIRED_PAGES_WP 4096
589
Namjae Jeonfa9150a2013-01-15 16:45:24 +0900590static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
591 void *data)
592{
593 struct address_space *mapping = data;
594 int ret = mapping->a_ops->writepage(page, wbc);
595 mapping_set_error(mapping, ret);
596 return ret;
597}
598
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900599static int f2fs_write_data_pages(struct address_space *mapping,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900600 struct writeback_control *wbc)
601{
602 struct inode *inode = mapping->host;
603 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
Jaegeuk Kim531ad7d2013-04-30 11:33:27 +0900604 bool locked = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900605 int ret;
606 long excess_nrtw = 0, desired_nrtw;
607
P J Pcfb185a2013-04-03 11:38:00 +0900608 /* deal with chardevs and other special file */
609 if (!mapping->a_ops->writepage)
610 return 0;
611
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900612 if (wbc->nr_to_write < MAX_DESIRED_PAGES_WP) {
613 desired_nrtw = MAX_DESIRED_PAGES_WP;
614 excess_nrtw = desired_nrtw - wbc->nr_to_write;
615 wbc->nr_to_write = desired_nrtw;
616 }
617
Jaegeuk Kim531ad7d2013-04-30 11:33:27 +0900618 if (!S_ISDIR(inode->i_mode)) {
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900619 mutex_lock(&sbi->writepages);
Jaegeuk Kim531ad7d2013-04-30 11:33:27 +0900620 locked = true;
621 }
Namjae Jeonfa9150a2013-01-15 16:45:24 +0900622 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
Jaegeuk Kim531ad7d2013-04-30 11:33:27 +0900623 if (locked)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900624 mutex_unlock(&sbi->writepages);
625 f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL));
626
627 remove_dirty_dir_inode(inode);
628
629 wbc->nr_to_write -= excess_nrtw;
630 return ret;
631}
632
633static int f2fs_write_begin(struct file *file, struct address_space *mapping,
634 loff_t pos, unsigned len, unsigned flags,
635 struct page **pagep, void **fsdata)
636{
637 struct inode *inode = mapping->host;
638 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
639 struct page *page;
640 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
641 struct dnode_of_data dn;
642 int err = 0;
Jaegeuk Kim39936832012-11-22 16:21:29 +0900643 int ilock;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900644
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900645 f2fs_balance_fs(sbi);
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +0900646repeat:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900647 page = grab_cache_page_write_begin(mapping, index, flags);
648 if (!page)
649 return -ENOMEM;
650 *pagep = page;
651
Jaegeuk Kim39936832012-11-22 16:21:29 +0900652 ilock = mutex_lock_op(sbi);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900653
654 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900655 err = get_dnode_of_data(&dn, index, ALLOC_NODE);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900656 if (err)
657 goto err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900658
Jaegeuk Kim39936832012-11-22 16:21:29 +0900659 if (dn.data_blkaddr == NULL_ADDR)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900660 err = reserve_new_block(&dn);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900661
Jaegeuk Kim39936832012-11-22 16:21:29 +0900662 f2fs_put_dnode(&dn);
663 if (err)
664 goto err;
665
666 mutex_unlock_op(sbi, ilock);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900667
668 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
669 return 0;
670
671 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
672 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
673 unsigned end = start + len;
674
675 /* Reading beyond i_size is simple: memset to zero */
676 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900677 goto out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900678 }
679
680 if (dn.data_blkaddr == NEW_ADDR) {
681 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
682 } else {
683 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900684 if (err)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900685 return err;
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900686 lock_page(page);
687 if (!PageUptodate(page)) {
688 f2fs_put_page(page, 1);
689 return -EIO;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900690 }
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +0900691 if (page->mapping != mapping) {
692 f2fs_put_page(page, 1);
693 goto repeat;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900694 }
695 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900696out:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900697 SetPageUptodate(page);
698 clear_cold_data(page);
699 return 0;
Jaegeuk Kim39936832012-11-22 16:21:29 +0900700
701err:
702 mutex_unlock_op(sbi, ilock);
703 f2fs_put_page(page, 1);
704 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900705}
706
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +0900707static int f2fs_write_end(struct file *file,
708 struct address_space *mapping,
709 loff_t pos, unsigned len, unsigned copied,
710 struct page *page, void *fsdata)
711{
712 struct inode *inode = page->mapping->host;
713
714 SetPageUptodate(page);
715 set_page_dirty(page);
716
717 if (pos + copied > i_size_read(inode)) {
718 i_size_write(inode, pos + copied);
719 mark_inode_dirty(inode);
720 update_inode_page(inode);
721 }
722
723 unlock_page(page);
724 page_cache_release(page);
725 return copied;
726}
727
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900728static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
729 const struct iovec *iov, loff_t offset, unsigned long nr_segs)
730{
731 struct file *file = iocb->ki_filp;
732 struct inode *inode = file->f_mapping->host;
733
734 if (rw == WRITE)
735 return 0;
736
737 /* Needs synchronization with the cleaner */
738 return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
739 get_data_block_ro);
740}
741
Lukas Czernerd47992f2013-05-21 23:17:23 -0400742static void f2fs_invalidate_data_page(struct page *page, unsigned int offset,
743 unsigned int length)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900744{
745 struct inode *inode = page->mapping->host;
746 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
747 if (S_ISDIR(inode->i_mode) && PageDirty(page)) {
748 dec_page_count(sbi, F2FS_DIRTY_DENTS);
749 inode_dec_dirty_dents(inode);
750 }
751 ClearPagePrivate(page);
752}
753
754static int f2fs_release_data_page(struct page *page, gfp_t wait)
755{
756 ClearPagePrivate(page);
Jaegeuk Kimc3850aa2013-03-14 09:24:32 +0900757 return 1;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900758}
759
760static int f2fs_set_data_page_dirty(struct page *page)
761{
762 struct address_space *mapping = page->mapping;
763 struct inode *inode = mapping->host;
764
765 SetPageUptodate(page);
766 if (!PageDirty(page)) {
767 __set_page_dirty_nobuffers(page);
768 set_dirty_dir_page(inode, page);
769 return 1;
770 }
771 return 0;
772}
773
Jaegeuk Kimc01e54b2013-01-17 20:30:23 +0900774static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
775{
776 return generic_block_bmap(mapping, block, get_data_block_ro);
777}
778
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900779const struct address_space_operations f2fs_dblock_aops = {
780 .readpage = f2fs_read_data_page,
781 .readpages = f2fs_read_data_pages,
782 .writepage = f2fs_write_data_page,
783 .writepages = f2fs_write_data_pages,
784 .write_begin = f2fs_write_begin,
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +0900785 .write_end = f2fs_write_end,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900786 .set_page_dirty = f2fs_set_data_page_dirty,
787 .invalidatepage = f2fs_invalidate_data_page,
788 .releasepage = f2fs_release_data_page,
789 .direct_IO = f2fs_direct_IO,
Jaegeuk Kimc01e54b2013-01-17 20:30:23 +0900790 .bmap = f2fs_bmap,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900791};