blob: aa3438c571fa9a2dfaf3e1ab61bd4328f580b0d6 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002 * fs/f2fs/data.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/buffer_head.h>
14#include <linux/mpage.h>
Kent Overstreeta27bb332013-05-07 16:19:08 -070015#include <linux/aio.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090016#include <linux/writeback.h>
17#include <linux/backing-dev.h>
18#include <linux/blkdev.h>
19#include <linux/bio.h>
Geert Uytterhoeven690e4a32012-12-19 22:19:30 +010020#include <linux/prefetch.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090021
22#include "f2fs.h"
23#include "node.h"
24#include "segment.h"
Namjae Jeon848753a2013-04-23 16:38:02 +090025#include <trace/events/f2fs.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090026
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +090027/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090028 * Lock ordering for the change of data block address:
29 * ->data_page
30 * ->node_page
31 * update block addresses in the node page
32 */
33static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
34{
35 struct f2fs_node *rn;
36 __le32 *addr_array;
37 struct page *node_page = dn->node_page;
38 unsigned int ofs_in_node = dn->ofs_in_node;
39
Jin Xua5694692013-08-05 20:02:04 +080040 f2fs_wait_on_page_writeback(node_page, NODE, false);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090041
Gu Zheng45590712013-07-15 17:57:38 +080042 rn = F2FS_NODE(node_page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090043
44 /* Get physical address of data block */
45 addr_array = blkaddr_in_node(rn);
46 addr_array[ofs_in_node] = cpu_to_le32(new_addr);
47 set_page_dirty(node_page);
48}
49
50int reserve_new_block(struct dnode_of_data *dn)
51{
52 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
53
54 if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
55 return -EPERM;
56 if (!inc_valid_block_count(sbi, dn->inode, 1))
57 return -ENOSPC;
58
Namjae Jeonc01e2852013-04-23 17:00:52 +090059 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
60
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090061 __set_data_blkaddr(dn, NEW_ADDR);
62 dn->data_blkaddr = NEW_ADDR;
63 sync_inode_page(dn);
64 return 0;
65}
66
67static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
68 struct buffer_head *bh_result)
69{
70 struct f2fs_inode_info *fi = F2FS_I(inode);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090071 pgoff_t start_fofs, end_fofs;
72 block_t start_blkaddr;
73
74 read_lock(&fi->ext.ext_lock);
75 if (fi->ext.len == 0) {
76 read_unlock(&fi->ext.ext_lock);
77 return 0;
78 }
79
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +090080 stat_inc_total_hit(inode->i_sb);
81
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090082 start_fofs = fi->ext.fofs;
83 end_fofs = fi->ext.fofs + fi->ext.len - 1;
84 start_blkaddr = fi->ext.blk_addr;
85
86 if (pgofs >= start_fofs && pgofs <= end_fofs) {
87 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
88 size_t count;
89
90 clear_buffer_new(bh_result);
91 map_bh(bh_result, inode->i_sb,
92 start_blkaddr + pgofs - start_fofs);
93 count = end_fofs - pgofs + 1;
94 if (count < (UINT_MAX >> blkbits))
95 bh_result->b_size = (count << blkbits);
96 else
97 bh_result->b_size = UINT_MAX;
98
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +090099 stat_inc_read_hit(inode->i_sb);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900100 read_unlock(&fi->ext.ext_lock);
101 return 1;
102 }
103 read_unlock(&fi->ext.ext_lock);
104 return 0;
105}
106
107void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
108{
109 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
110 pgoff_t fofs, start_fofs, end_fofs;
111 block_t start_blkaddr, end_blkaddr;
112
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900113 f2fs_bug_on(blk_addr == NEW_ADDR);
Jaegeuk Kimde936532013-08-12 21:08:03 +0900114 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
115 dn->ofs_in_node;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900116
117 /* Update the page address in the parent node */
118 __set_data_blkaddr(dn, blk_addr);
119
120 write_lock(&fi->ext.ext_lock);
121
122 start_fofs = fi->ext.fofs;
123 end_fofs = fi->ext.fofs + fi->ext.len - 1;
124 start_blkaddr = fi->ext.blk_addr;
125 end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
126
127 /* Drop and initialize the matched extent */
128 if (fi->ext.len == 1 && fofs == start_fofs)
129 fi->ext.len = 0;
130
131 /* Initial extent */
132 if (fi->ext.len == 0) {
133 if (blk_addr != NULL_ADDR) {
134 fi->ext.fofs = fofs;
135 fi->ext.blk_addr = blk_addr;
136 fi->ext.len = 1;
137 }
138 goto end_update;
139 }
140
Namjae Jeon6224da82013-04-06 14:44:32 +0900141 /* Front merge */
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900142 if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
143 fi->ext.fofs--;
144 fi->ext.blk_addr--;
145 fi->ext.len++;
146 goto end_update;
147 }
148
149 /* Back merge */
150 if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) {
151 fi->ext.len++;
152 goto end_update;
153 }
154
155 /* Split the existing extent */
156 if (fi->ext.len > 1 &&
157 fofs >= start_fofs && fofs <= end_fofs) {
158 if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
159 fi->ext.len = fofs - start_fofs;
160 } else {
161 fi->ext.fofs = fofs + 1;
162 fi->ext.blk_addr = start_blkaddr +
163 fofs - start_fofs + 1;
164 fi->ext.len -= fofs - start_fofs + 1;
165 }
166 goto end_update;
167 }
168 write_unlock(&fi->ext.ext_lock);
169 return;
170
171end_update:
172 write_unlock(&fi->ext.ext_lock);
173 sync_inode_page(dn);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900174}
175
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900176struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900177{
178 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
179 struct address_space *mapping = inode->i_mapping;
180 struct dnode_of_data dn;
181 struct page *page;
182 int err;
183
184 page = find_get_page(mapping, index);
185 if (page && PageUptodate(page))
186 return page;
187 f2fs_put_page(page, 0);
188
189 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900190 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900191 if (err)
192 return ERR_PTR(err);
193 f2fs_put_dnode(&dn);
194
195 if (dn.data_blkaddr == NULL_ADDR)
196 return ERR_PTR(-ENOENT);
197
198 /* By fallocate(), there is no cached page, but with NEW_ADDR */
199 if (dn.data_blkaddr == NEW_ADDR)
200 return ERR_PTR(-EINVAL);
201
Jaegeuk Kim6f85b352013-05-20 16:15:22 +0900202 page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900203 if (!page)
204 return ERR_PTR(-ENOMEM);
205
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900206 if (PageUptodate(page)) {
207 unlock_page(page);
208 return page;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900209 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900210
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900211 err = f2fs_readpage(sbi, page, dn.data_blkaddr,
212 sync ? READ_SYNC : READA);
213 if (sync) {
214 wait_on_page_locked(page);
215 if (!PageUptodate(page)) {
216 f2fs_put_page(page, 0);
217 return ERR_PTR(-EIO);
218 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900219 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900220 return page;
221}
222
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900223/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900224 * If it tries to access a hole, return an error.
225 * Because, the callers, functions in dir.c and GC, should be able to know
226 * whether this page exists or not.
227 */
228struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
229{
230 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
231 struct address_space *mapping = inode->i_mapping;
232 struct dnode_of_data dn;
233 struct page *page;
234 int err;
235
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +0900236repeat:
Jaegeuk Kim6f85b352013-05-20 16:15:22 +0900237 page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900238 if (!page)
239 return ERR_PTR(-ENOMEM);
240
Jaegeuk Kim650495d2013-05-13 08:38:35 +0900241 set_new_dnode(&dn, inode, NULL, NULL, 0);
242 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
243 if (err) {
244 f2fs_put_page(page, 1);
245 return ERR_PTR(err);
246 }
247 f2fs_put_dnode(&dn);
248
249 if (dn.data_blkaddr == NULL_ADDR) {
250 f2fs_put_page(page, 1);
251 return ERR_PTR(-ENOENT);
252 }
253
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900254 if (PageUptodate(page))
255 return page;
256
Jaegeuk Kimd59ff4d2013-08-20 19:13:07 +0900257 /*
258 * A new dentry page is allocated but not able to be written, since its
259 * new inode page couldn't be allocated due to -ENOSPC.
260 * In such the case, its blkaddr can be remained as NEW_ADDR.
261 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
262 */
263 if (dn.data_blkaddr == NEW_ADDR) {
264 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
265 SetPageUptodate(page);
266 return page;
267 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900268
269 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900270 if (err)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900271 return ERR_PTR(err);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900272
273 lock_page(page);
274 if (!PageUptodate(page)) {
275 f2fs_put_page(page, 1);
276 return ERR_PTR(-EIO);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900277 }
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +0900278 if (page->mapping != mapping) {
279 f2fs_put_page(page, 1);
280 goto repeat;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900281 }
282 return page;
283}
284
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900285/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900286 * Caller ensures that this data page is never allocated.
287 * A new zero-filled data page is allocated in the page cache.
Jaegeuk Kim39936832012-11-22 16:21:29 +0900288 *
289 * Also, caller should grab and release a mutex by calling mutex_lock_op() and
290 * mutex_unlock_op().
Jaegeuk Kim44a83ff2013-05-20 10:10:29 +0900291 * Note that, npage is set only by make_empty_dir.
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900292 */
Jaegeuk Kim64aa7ed2013-05-20 09:55:50 +0900293struct page *get_new_data_page(struct inode *inode,
294 struct page *npage, pgoff_t index, bool new_i_size)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900295{
296 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
297 struct address_space *mapping = inode->i_mapping;
298 struct page *page;
299 struct dnode_of_data dn;
300 int err;
301
Jaegeuk Kim64aa7ed2013-05-20 09:55:50 +0900302 set_new_dnode(&dn, inode, npage, npage, 0);
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900303 err = get_dnode_of_data(&dn, index, ALLOC_NODE);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900304 if (err)
305 return ERR_PTR(err);
306
307 if (dn.data_blkaddr == NULL_ADDR) {
308 if (reserve_new_block(&dn)) {
Jaegeuk Kim64aa7ed2013-05-20 09:55:50 +0900309 if (!npage)
310 f2fs_put_dnode(&dn);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900311 return ERR_PTR(-ENOSPC);
312 }
313 }
Jaegeuk Kim64aa7ed2013-05-20 09:55:50 +0900314 if (!npage)
315 f2fs_put_dnode(&dn);
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +0900316repeat:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900317 page = grab_cache_page(mapping, index);
318 if (!page)
319 return ERR_PTR(-ENOMEM);
320
321 if (PageUptodate(page))
322 return page;
323
324 if (dn.data_blkaddr == NEW_ADDR) {
325 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900326 SetPageUptodate(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900327 } else {
328 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900329 if (err)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900330 return ERR_PTR(err);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900331 lock_page(page);
332 if (!PageUptodate(page)) {
333 f2fs_put_page(page, 1);
334 return ERR_PTR(-EIO);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900335 }
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +0900336 if (page->mapping != mapping) {
337 f2fs_put_page(page, 1);
338 goto repeat;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900339 }
340 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900341
342 if (new_i_size &&
343 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
344 i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
Jaegeuk Kim699489b2013-06-07 22:08:23 +0900345 /* Only the directory inode sets new_i_size */
346 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900347 mark_inode_dirty_sync(inode);
348 }
349 return page;
350}
351
352static void read_end_io(struct bio *bio, int err)
353{
354 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
355 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
356
357 do {
358 struct page *page = bvec->bv_page;
359
360 if (--bvec >= bio->bi_io_vec)
361 prefetchw(&bvec->bv_page->flags);
362
363 if (uptodate) {
364 SetPageUptodate(page);
365 } else {
366 ClearPageUptodate(page);
367 SetPageError(page);
368 }
369 unlock_page(page);
370 } while (bvec >= bio->bi_io_vec);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900371 bio_put(bio);
372}
373
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900374/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900375 * Fill the locked page with data located in the block address.
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900376 * Return unlocked page.
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900377 */
378int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page,
379 block_t blk_addr, int type)
380{
381 struct block_device *bdev = sbi->sb->s_bdev;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900382 struct bio *bio;
383
Namjae Jeon848753a2013-04-23 16:38:02 +0900384 trace_f2fs_readpage(page, blk_addr, type);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900385
386 down_read(&sbi->bio_sem);
387
388 /* Allocate a new bio */
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900389 bio = f2fs_bio_alloc(bdev, 1);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900390
391 /* Initialize the bio */
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900392 bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900393 bio->bi_end_io = read_end_io;
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900394
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900395 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900396 bio_put(bio);
397 up_read(&sbi->bio_sem);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900398 f2fs_put_page(page, 1);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900399 return -EFAULT;
400 }
401
402 submit_bio(type, bio);
403 up_read(&sbi->bio_sem);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900404 return 0;
405}
406
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900407/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900408 * This function should be used by the data read flow only where it
409 * does not check the "create" flag that indicates block allocation.
410 * The reason for this special functionality is to exploit VFS readahead
411 * mechanism.
412 */
413static int get_data_block_ro(struct inode *inode, sector_t iblock,
414 struct buffer_head *bh_result, int create)
415{
416 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
417 unsigned maxblocks = bh_result->b_size >> blkbits;
418 struct dnode_of_data dn;
419 pgoff_t pgofs;
420 int err;
421
422 /* Get the page offset from the block offset(iblock) */
423 pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
424
Namjae Jeon848753a2013-04-23 16:38:02 +0900425 if (check_extent_cache(inode, pgofs, bh_result)) {
426 trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900427 return 0;
Namjae Jeon848753a2013-04-23 16:38:02 +0900428 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900429
430 /* When reading holes, we need its node page */
431 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900432 err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
Namjae Jeon848753a2013-04-23 16:38:02 +0900433 if (err) {
434 trace_f2fs_get_data_block(inode, iblock, bh_result, err);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900435 return (err == -ENOENT) ? 0 : err;
Namjae Jeon848753a2013-04-23 16:38:02 +0900436 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900437
438 /* It does not support data allocation */
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900439 f2fs_bug_on(create);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900440
441 if (dn.data_blkaddr != NEW_ADDR && dn.data_blkaddr != NULL_ADDR) {
442 int i;
443 unsigned int end_offset;
444
445 end_offset = IS_INODE(dn.node_page) ?
Jaegeuk Kimde936532013-08-12 21:08:03 +0900446 ADDRS_PER_INODE(F2FS_I(inode)) :
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900447 ADDRS_PER_BLOCK;
448
449 clear_buffer_new(bh_result);
450
451 /* Give more consecutive addresses for the read ahead */
452 for (i = 0; i < end_offset - dn.ofs_in_node; i++)
453 if (((datablock_addr(dn.node_page,
454 dn.ofs_in_node + i))
455 != (dn.data_blkaddr + i)) || maxblocks == i)
456 break;
457 map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
458 bh_result->b_size = (i << blkbits);
459 }
460 f2fs_put_dnode(&dn);
Namjae Jeon848753a2013-04-23 16:38:02 +0900461 trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900462 return 0;
463}
464
465static int f2fs_read_data_page(struct file *file, struct page *page)
466{
467 return mpage_readpage(page, get_data_block_ro);
468}
469
470static int f2fs_read_data_pages(struct file *file,
471 struct address_space *mapping,
472 struct list_head *pages, unsigned nr_pages)
473{
474 return mpage_readpages(mapping, pages, nr_pages, get_data_block_ro);
475}
476
477int do_write_data_page(struct page *page)
478{
479 struct inode *inode = page->mapping->host;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900480 block_t old_blk_addr, new_blk_addr;
481 struct dnode_of_data dn;
482 int err = 0;
483
484 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900485 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900486 if (err)
487 return err;
488
489 old_blk_addr = dn.data_blkaddr;
490
491 /* This page is already truncated */
492 if (old_blk_addr == NULL_ADDR)
493 goto out_writepage;
494
495 set_page_writeback(page);
496
497 /*
498 * If current allocation needs SSR,
499 * it had better in-place writes for updated data.
500 */
Haicheng Lib25958b2013-06-13 16:59:29 +0800501 if (unlikely(old_blk_addr != NEW_ADDR &&
502 !is_cold_data(page) &&
503 need_inplace_update(inode))) {
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900504 rewrite_data_page(F2FS_SB(inode->i_sb), page,
505 old_blk_addr);
506 } else {
507 write_data_page(inode, page, &dn,
508 old_blk_addr, &new_blk_addr);
509 update_extent_cache(new_blk_addr, &dn);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900510 }
511out_writepage:
512 f2fs_put_dnode(&dn);
513 return err;
514}
515
516static int f2fs_write_data_page(struct page *page,
517 struct writeback_control *wbc)
518{
519 struct inode *inode = page->mapping->host;
520 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
521 loff_t i_size = i_size_read(inode);
522 const pgoff_t end_index = ((unsigned long long) i_size)
523 >> PAGE_CACHE_SHIFT;
524 unsigned offset;
Jaegeuk Kim39936832012-11-22 16:21:29 +0900525 bool need_balance_fs = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900526 int err = 0;
527
528 if (page->index < end_index)
Jaegeuk Kim39936832012-11-22 16:21:29 +0900529 goto write;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900530
531 /*
532 * If the offset is out-of-range of file size,
533 * this page does not have to be written to disk.
534 */
535 offset = i_size & (PAGE_CACHE_SIZE - 1);
536 if ((page->index >= end_index + 1) || !offset) {
537 if (S_ISDIR(inode->i_mode)) {
538 dec_page_count(sbi, F2FS_DIRTY_DENTS);
539 inode_dec_dirty_dents(inode);
540 }
Jaegeuk Kim39936832012-11-22 16:21:29 +0900541 goto out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900542 }
543
544 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900545write:
546 if (sbi->por_doing) {
547 err = AOP_WRITEPAGE_ACTIVATE;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900548 goto redirty_out;
Jaegeuk Kim39936832012-11-22 16:21:29 +0900549 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900550
Jaegeuk Kim39936832012-11-22 16:21:29 +0900551 /* Dentry blocks are controlled by checkpoint */
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900552 if (S_ISDIR(inode->i_mode)) {
553 dec_page_count(sbi, F2FS_DIRTY_DENTS);
554 inode_dec_dirty_dents(inode);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900555 err = do_write_data_page(page);
556 } else {
Gu Zhenge4795562013-09-27 18:08:30 +0800557 f2fs_lock_op(sbi);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900558 err = do_write_data_page(page);
Gu Zhenge4795562013-09-27 18:08:30 +0800559 f2fs_unlock_op(sbi);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900560 need_balance_fs = true;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900561 }
Jaegeuk Kim39936832012-11-22 16:21:29 +0900562 if (err == -ENOENT)
563 goto out;
564 else if (err)
565 goto redirty_out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900566
567 if (wbc->for_reclaim)
568 f2fs_submit_bio(sbi, DATA, true);
569
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900570 clear_cold_data(page);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900571out:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900572 unlock_page(page);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900573 if (need_balance_fs)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900574 f2fs_balance_fs(sbi);
575 return 0;
576
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900577redirty_out:
578 wbc->pages_skipped++;
579 set_page_dirty(page);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900580 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900581}
582
583#define MAX_DESIRED_PAGES_WP 4096
584
Namjae Jeonfa9150a2013-01-15 16:45:24 +0900585static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
586 void *data)
587{
588 struct address_space *mapping = data;
589 int ret = mapping->a_ops->writepage(page, wbc);
590 mapping_set_error(mapping, ret);
591 return ret;
592}
593
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900594static int f2fs_write_data_pages(struct address_space *mapping,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900595 struct writeback_control *wbc)
596{
597 struct inode *inode = mapping->host;
598 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
Jaegeuk Kim531ad7d2013-04-30 11:33:27 +0900599 bool locked = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900600 int ret;
601 long excess_nrtw = 0, desired_nrtw;
602
P J Pcfb185a2013-04-03 11:38:00 +0900603 /* deal with chardevs and other special file */
604 if (!mapping->a_ops->writepage)
605 return 0;
606
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900607 if (wbc->nr_to_write < MAX_DESIRED_PAGES_WP) {
608 desired_nrtw = MAX_DESIRED_PAGES_WP;
609 excess_nrtw = desired_nrtw - wbc->nr_to_write;
610 wbc->nr_to_write = desired_nrtw;
611 }
612
Jaegeuk Kim531ad7d2013-04-30 11:33:27 +0900613 if (!S_ISDIR(inode->i_mode)) {
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900614 mutex_lock(&sbi->writepages);
Jaegeuk Kim531ad7d2013-04-30 11:33:27 +0900615 locked = true;
616 }
Namjae Jeonfa9150a2013-01-15 16:45:24 +0900617 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
Jaegeuk Kim531ad7d2013-04-30 11:33:27 +0900618 if (locked)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900619 mutex_unlock(&sbi->writepages);
620 f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL));
621
622 remove_dirty_dir_inode(inode);
623
624 wbc->nr_to_write -= excess_nrtw;
625 return ret;
626}
627
628static int f2fs_write_begin(struct file *file, struct address_space *mapping,
629 loff_t pos, unsigned len, unsigned flags,
630 struct page **pagep, void **fsdata)
631{
632 struct inode *inode = mapping->host;
633 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
634 struct page *page;
635 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
636 struct dnode_of_data dn;
637 int err = 0;
638
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900639 f2fs_balance_fs(sbi);
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +0900640repeat:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900641 page = grab_cache_page_write_begin(mapping, index, flags);
642 if (!page)
643 return -ENOMEM;
644 *pagep = page;
645
Gu Zhenge4795562013-09-27 18:08:30 +0800646 f2fs_lock_op(sbi);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900647
648 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900649 err = get_dnode_of_data(&dn, index, ALLOC_NODE);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900650 if (err)
651 goto err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900652
Jaegeuk Kim39936832012-11-22 16:21:29 +0900653 if (dn.data_blkaddr == NULL_ADDR)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900654 err = reserve_new_block(&dn);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900655
Jaegeuk Kim39936832012-11-22 16:21:29 +0900656 f2fs_put_dnode(&dn);
657 if (err)
658 goto err;
659
Gu Zhenge4795562013-09-27 18:08:30 +0800660 f2fs_unlock_op(sbi);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900661
662 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
663 return 0;
664
665 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
666 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
667 unsigned end = start + len;
668
669 /* Reading beyond i_size is simple: memset to zero */
670 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900671 goto out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900672 }
673
674 if (dn.data_blkaddr == NEW_ADDR) {
675 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
676 } else {
677 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900678 if (err)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900679 return err;
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900680 lock_page(page);
681 if (!PageUptodate(page)) {
682 f2fs_put_page(page, 1);
683 return -EIO;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900684 }
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +0900685 if (page->mapping != mapping) {
686 f2fs_put_page(page, 1);
687 goto repeat;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900688 }
689 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900690out:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900691 SetPageUptodate(page);
692 clear_cold_data(page);
693 return 0;
Jaegeuk Kim39936832012-11-22 16:21:29 +0900694
695err:
Gu Zhenge4795562013-09-27 18:08:30 +0800696 f2fs_unlock_op(sbi);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900697 f2fs_put_page(page, 1);
698 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900699}
700
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +0900701static int f2fs_write_end(struct file *file,
702 struct address_space *mapping,
703 loff_t pos, unsigned len, unsigned copied,
704 struct page *page, void *fsdata)
705{
706 struct inode *inode = page->mapping->host;
707
708 SetPageUptodate(page);
709 set_page_dirty(page);
710
711 if (pos + copied > i_size_read(inode)) {
712 i_size_write(inode, pos + copied);
713 mark_inode_dirty(inode);
714 update_inode_page(inode);
715 }
716
717 unlock_page(page);
718 page_cache_release(page);
719 return copied;
720}
721
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900722static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
723 const struct iovec *iov, loff_t offset, unsigned long nr_segs)
724{
725 struct file *file = iocb->ki_filp;
726 struct inode *inode = file->f_mapping->host;
727
728 if (rw == WRITE)
729 return 0;
730
731 /* Needs synchronization with the cleaner */
732 return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
733 get_data_block_ro);
734}
735
Lukas Czernerd47992f2013-05-21 23:17:23 -0400736static void f2fs_invalidate_data_page(struct page *page, unsigned int offset,
737 unsigned int length)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900738{
739 struct inode *inode = page->mapping->host;
740 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
741 if (S_ISDIR(inode->i_mode) && PageDirty(page)) {
742 dec_page_count(sbi, F2FS_DIRTY_DENTS);
743 inode_dec_dirty_dents(inode);
744 }
745 ClearPagePrivate(page);
746}
747
748static int f2fs_release_data_page(struct page *page, gfp_t wait)
749{
750 ClearPagePrivate(page);
Jaegeuk Kimc3850aa2013-03-14 09:24:32 +0900751 return 1;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900752}
753
754static int f2fs_set_data_page_dirty(struct page *page)
755{
756 struct address_space *mapping = page->mapping;
757 struct inode *inode = mapping->host;
758
Jaegeuk Kim26c6b882013-10-24 17:53:29 +0900759 trace_f2fs_set_page_dirty(page, DATA);
760
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900761 SetPageUptodate(page);
762 if (!PageDirty(page)) {
763 __set_page_dirty_nobuffers(page);
764 set_dirty_dir_page(inode, page);
765 return 1;
766 }
767 return 0;
768}
769
Jaegeuk Kimc01e54b2013-01-17 20:30:23 +0900770static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
771{
772 return generic_block_bmap(mapping, block, get_data_block_ro);
773}
774
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900775const struct address_space_operations f2fs_dblock_aops = {
776 .readpage = f2fs_read_data_page,
777 .readpages = f2fs_read_data_pages,
778 .writepage = f2fs_write_data_page,
779 .writepages = f2fs_write_data_pages,
780 .write_begin = f2fs_write_begin,
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +0900781 .write_end = f2fs_write_end,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900782 .set_page_dirty = f2fs_set_data_page_dirty,
783 .invalidatepage = f2fs_invalidate_data_page,
784 .releasepage = f2fs_release_data_page,
785 .direct_IO = f2fs_direct_IO,
Jaegeuk Kimc01e54b2013-01-17 20:30:23 +0900786 .bmap = f2fs_bmap,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900787};