blob: 2d02cf36d806ab7d9f47b40f279c285c7556f75d [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002 * fs/f2fs/data.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/buffer_head.h>
14#include <linux/mpage.h>
Kent Overstreeta27bb332013-05-07 16:19:08 -070015#include <linux/aio.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090016#include <linux/writeback.h>
17#include <linux/backing-dev.h>
18#include <linux/blkdev.h>
19#include <linux/bio.h>
Geert Uytterhoeven690e4a32012-12-19 22:19:30 +010020#include <linux/prefetch.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090021
22#include "f2fs.h"
23#include "node.h"
24#include "segment.h"
Namjae Jeon848753a2013-04-23 16:38:02 +090025#include <trace/events/f2fs.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090026
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +090027/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090028 * Lock ordering for the change of data block address:
29 * ->data_page
30 * ->node_page
31 * update block addresses in the node page
32 */
33static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
34{
35 struct f2fs_node *rn;
36 __le32 *addr_array;
37 struct page *node_page = dn->node_page;
38 unsigned int ofs_in_node = dn->ofs_in_node;
39
Jin Xua5694692013-08-05 20:02:04 +080040 f2fs_wait_on_page_writeback(node_page, NODE, false);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090041
Gu Zheng45590712013-07-15 17:57:38 +080042 rn = F2FS_NODE(node_page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090043
44 /* Get physical address of data block */
45 addr_array = blkaddr_in_node(rn);
46 addr_array[ofs_in_node] = cpu_to_le32(new_addr);
47 set_page_dirty(node_page);
48}
49
50int reserve_new_block(struct dnode_of_data *dn)
51{
52 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
53
54 if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
55 return -EPERM;
56 if (!inc_valid_block_count(sbi, dn->inode, 1))
57 return -ENOSPC;
58
Namjae Jeonc01e2852013-04-23 17:00:52 +090059 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
60
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090061 __set_data_blkaddr(dn, NEW_ADDR);
62 dn->data_blkaddr = NEW_ADDR;
63 sync_inode_page(dn);
64 return 0;
65}
66
Huajun Lib6009652013-11-10 23:13:18 +080067int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
68{
69 bool need_put = dn->inode_page ? false : true;
70 int err;
71
72 err = get_dnode_of_data(dn, index, ALLOC_NODE);
73 if (err)
74 return err;
75 if (dn->data_blkaddr == NULL_ADDR)
76 err = reserve_new_block(dn);
77
78 if (need_put)
79 f2fs_put_dnode(dn);
80 return err;
81}
82
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090083static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
84 struct buffer_head *bh_result)
85{
86 struct f2fs_inode_info *fi = F2FS_I(inode);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090087 pgoff_t start_fofs, end_fofs;
88 block_t start_blkaddr;
89
Jaegeuk Kimc11abd12013-11-19 10:41:54 +090090 if (is_inode_flag_set(fi, FI_NO_EXTENT))
91 return 0;
92
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090093 read_lock(&fi->ext.ext_lock);
94 if (fi->ext.len == 0) {
95 read_unlock(&fi->ext.ext_lock);
96 return 0;
97 }
98
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +090099 stat_inc_total_hit(inode->i_sb);
100
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900101 start_fofs = fi->ext.fofs;
102 end_fofs = fi->ext.fofs + fi->ext.len - 1;
103 start_blkaddr = fi->ext.blk_addr;
104
105 if (pgofs >= start_fofs && pgofs <= end_fofs) {
106 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
107 size_t count;
108
109 clear_buffer_new(bh_result);
110 map_bh(bh_result, inode->i_sb,
111 start_blkaddr + pgofs - start_fofs);
112 count = end_fofs - pgofs + 1;
113 if (count < (UINT_MAX >> blkbits))
114 bh_result->b_size = (count << blkbits);
115 else
116 bh_result->b_size = UINT_MAX;
117
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +0900118 stat_inc_read_hit(inode->i_sb);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900119 read_unlock(&fi->ext.ext_lock);
120 return 1;
121 }
122 read_unlock(&fi->ext.ext_lock);
123 return 0;
124}
125
126void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
127{
128 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
129 pgoff_t fofs, start_fofs, end_fofs;
130 block_t start_blkaddr, end_blkaddr;
Jaegeuk Kimc11abd12013-11-19 10:41:54 +0900131 int need_update = true;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900132
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900133 f2fs_bug_on(blk_addr == NEW_ADDR);
Jaegeuk Kimde936532013-08-12 21:08:03 +0900134 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
135 dn->ofs_in_node;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900136
137 /* Update the page address in the parent node */
138 __set_data_blkaddr(dn, blk_addr);
139
Jaegeuk Kimc11abd12013-11-19 10:41:54 +0900140 if (is_inode_flag_set(fi, FI_NO_EXTENT))
141 return;
142
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900143 write_lock(&fi->ext.ext_lock);
144
145 start_fofs = fi->ext.fofs;
146 end_fofs = fi->ext.fofs + fi->ext.len - 1;
147 start_blkaddr = fi->ext.blk_addr;
148 end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
149
150 /* Drop and initialize the matched extent */
151 if (fi->ext.len == 1 && fofs == start_fofs)
152 fi->ext.len = 0;
153
154 /* Initial extent */
155 if (fi->ext.len == 0) {
156 if (blk_addr != NULL_ADDR) {
157 fi->ext.fofs = fofs;
158 fi->ext.blk_addr = blk_addr;
159 fi->ext.len = 1;
160 }
161 goto end_update;
162 }
163
Namjae Jeon6224da82013-04-06 14:44:32 +0900164 /* Front merge */
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900165 if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
166 fi->ext.fofs--;
167 fi->ext.blk_addr--;
168 fi->ext.len++;
169 goto end_update;
170 }
171
172 /* Back merge */
173 if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) {
174 fi->ext.len++;
175 goto end_update;
176 }
177
178 /* Split the existing extent */
179 if (fi->ext.len > 1 &&
180 fofs >= start_fofs && fofs <= end_fofs) {
181 if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
182 fi->ext.len = fofs - start_fofs;
183 } else {
184 fi->ext.fofs = fofs + 1;
185 fi->ext.blk_addr = start_blkaddr +
186 fofs - start_fofs + 1;
187 fi->ext.len -= fofs - start_fofs + 1;
188 }
Jaegeuk Kimc11abd12013-11-19 10:41:54 +0900189 } else {
190 need_update = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900191 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900192
Jaegeuk Kimc11abd12013-11-19 10:41:54 +0900193 /* Finally, if the extent is very fragmented, let's drop the cache. */
194 if (fi->ext.len < F2FS_MIN_EXTENT_LEN) {
195 fi->ext.len = 0;
196 set_inode_flag(fi, FI_NO_EXTENT);
197 need_update = true;
198 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900199end_update:
200 write_unlock(&fi->ext.ext_lock);
Jaegeuk Kimc11abd12013-11-19 10:41:54 +0900201 if (need_update)
202 sync_inode_page(dn);
203 return;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900204}
205
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900206struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900207{
208 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
209 struct address_space *mapping = inode->i_mapping;
210 struct dnode_of_data dn;
211 struct page *page;
212 int err;
213
214 page = find_get_page(mapping, index);
215 if (page && PageUptodate(page))
216 return page;
217 f2fs_put_page(page, 0);
218
219 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900220 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900221 if (err)
222 return ERR_PTR(err);
223 f2fs_put_dnode(&dn);
224
225 if (dn.data_blkaddr == NULL_ADDR)
226 return ERR_PTR(-ENOENT);
227
228 /* By fallocate(), there is no cached page, but with NEW_ADDR */
229 if (dn.data_blkaddr == NEW_ADDR)
230 return ERR_PTR(-EINVAL);
231
Jaegeuk Kim6f85b352013-05-20 16:15:22 +0900232 page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900233 if (!page)
234 return ERR_PTR(-ENOMEM);
235
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900236 if (PageUptodate(page)) {
237 unlock_page(page);
238 return page;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900239 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900240
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900241 err = f2fs_readpage(sbi, page, dn.data_blkaddr,
242 sync ? READ_SYNC : READA);
243 if (sync) {
244 wait_on_page_locked(page);
245 if (!PageUptodate(page)) {
246 f2fs_put_page(page, 0);
247 return ERR_PTR(-EIO);
248 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900249 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900250 return page;
251}
252
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900253/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900254 * If it tries to access a hole, return an error.
255 * Because, the callers, functions in dir.c and GC, should be able to know
256 * whether this page exists or not.
257 */
258struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
259{
260 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
261 struct address_space *mapping = inode->i_mapping;
262 struct dnode_of_data dn;
263 struct page *page;
264 int err;
265
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +0900266repeat:
Jaegeuk Kim6f85b352013-05-20 16:15:22 +0900267 page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900268 if (!page)
269 return ERR_PTR(-ENOMEM);
270
Jaegeuk Kim650495d2013-05-13 08:38:35 +0900271 set_new_dnode(&dn, inode, NULL, NULL, 0);
272 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
273 if (err) {
274 f2fs_put_page(page, 1);
275 return ERR_PTR(err);
276 }
277 f2fs_put_dnode(&dn);
278
279 if (dn.data_blkaddr == NULL_ADDR) {
280 f2fs_put_page(page, 1);
281 return ERR_PTR(-ENOENT);
282 }
283
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900284 if (PageUptodate(page))
285 return page;
286
Jaegeuk Kimd59ff4d2013-08-20 19:13:07 +0900287 /*
288 * A new dentry page is allocated but not able to be written, since its
289 * new inode page couldn't be allocated due to -ENOSPC.
290 * In such the case, its blkaddr can be remained as NEW_ADDR.
291 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
292 */
293 if (dn.data_blkaddr == NEW_ADDR) {
294 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
295 SetPageUptodate(page);
296 return page;
297 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900298
299 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900300 if (err)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900301 return ERR_PTR(err);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900302
303 lock_page(page);
304 if (!PageUptodate(page)) {
305 f2fs_put_page(page, 1);
306 return ERR_PTR(-EIO);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900307 }
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +0900308 if (page->mapping != mapping) {
309 f2fs_put_page(page, 1);
310 goto repeat;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900311 }
312 return page;
313}
314
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900315/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900316 * Caller ensures that this data page is never allocated.
317 * A new zero-filled data page is allocated in the page cache.
Jaegeuk Kim39936832012-11-22 16:21:29 +0900318 *
319 * Also, caller should grab and release a mutex by calling mutex_lock_op() and
320 * mutex_unlock_op().
Jaegeuk Kim44a83ff2013-05-20 10:10:29 +0900321 * Note that, npage is set only by make_empty_dir.
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900322 */
Jaegeuk Kim64aa7ed2013-05-20 09:55:50 +0900323struct page *get_new_data_page(struct inode *inode,
324 struct page *npage, pgoff_t index, bool new_i_size)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900325{
326 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
327 struct address_space *mapping = inode->i_mapping;
328 struct page *page;
329 struct dnode_of_data dn;
330 int err;
331
Jaegeuk Kim64aa7ed2013-05-20 09:55:50 +0900332 set_new_dnode(&dn, inode, npage, npage, 0);
Huajun Lib6009652013-11-10 23:13:18 +0800333 err = f2fs_reserve_block(&dn, index);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900334 if (err)
335 return ERR_PTR(err);
336
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +0900337repeat:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900338 page = grab_cache_page(mapping, index);
339 if (!page)
340 return ERR_PTR(-ENOMEM);
341
342 if (PageUptodate(page))
343 return page;
344
345 if (dn.data_blkaddr == NEW_ADDR) {
346 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900347 SetPageUptodate(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900348 } else {
349 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900350 if (err)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900351 return ERR_PTR(err);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900352 lock_page(page);
353 if (!PageUptodate(page)) {
354 f2fs_put_page(page, 1);
355 return ERR_PTR(-EIO);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900356 }
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +0900357 if (page->mapping != mapping) {
358 f2fs_put_page(page, 1);
359 goto repeat;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900360 }
361 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900362
363 if (new_i_size &&
364 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
365 i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
Jaegeuk Kim699489b2013-06-07 22:08:23 +0900366 /* Only the directory inode sets new_i_size */
367 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900368 mark_inode_dirty_sync(inode);
369 }
370 return page;
371}
372
373static void read_end_io(struct bio *bio, int err)
374{
375 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
376 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
377
378 do {
379 struct page *page = bvec->bv_page;
380
381 if (--bvec >= bio->bi_io_vec)
382 prefetchw(&bvec->bv_page->flags);
383
384 if (uptodate) {
385 SetPageUptodate(page);
386 } else {
387 ClearPageUptodate(page);
388 SetPageError(page);
389 }
390 unlock_page(page);
391 } while (bvec >= bio->bi_io_vec);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900392 bio_put(bio);
393}
394
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900395/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900396 * Fill the locked page with data located in the block address.
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900397 * Return unlocked page.
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900398 */
399int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page,
400 block_t blk_addr, int type)
401{
402 struct block_device *bdev = sbi->sb->s_bdev;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900403 struct bio *bio;
404
Namjae Jeon848753a2013-04-23 16:38:02 +0900405 trace_f2fs_readpage(page, blk_addr, type);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900406
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900407 /* Allocate a new bio */
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900408 bio = f2fs_bio_alloc(bdev, 1);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900409
410 /* Initialize the bio */
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900411 bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900412 bio->bi_end_io = read_end_io;
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900413
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900414 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900415 bio_put(bio);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900416 f2fs_put_page(page, 1);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900417 return -EFAULT;
418 }
419
420 submit_bio(type, bio);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900421 return 0;
422}
423
Chao Yu924b7202013-11-20 14:46:39 +0800424void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, int rw)
425{
426 struct f2fs_bio_info *io = &sbi->read_io;
427
428 if (!io->bio)
429 return;
430
Chao Yud4d288b2013-11-24 12:36:42 +0900431 trace_f2fs_submit_read_bio(sbi->sb, rw, META, io->bio);
432
Chao Yu924b7202013-11-20 14:46:39 +0800433 mutex_lock(&io->io_mutex);
434 if (io->bio) {
435 submit_bio(rw, io->bio);
436 io->bio = NULL;
437 }
438 mutex_unlock(&io->io_mutex);
439}
440
441void submit_read_page(struct f2fs_sb_info *sbi, struct page *page,
442 block_t blk_addr, int rw)
443{
444 struct block_device *bdev = sbi->sb->s_bdev;
445 struct f2fs_bio_info *io = &sbi->read_io;
446 int bio_blocks;
447
448 verify_block_addr(sbi, blk_addr);
449
450 mutex_lock(&io->io_mutex);
451
452 if (io->bio && io->last_block_in_bio != blk_addr - 1) {
453 submit_bio(rw, io->bio);
454 io->bio = NULL;
455 }
456alloc_new:
457 if (io->bio == NULL) {
458 bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
459 io->bio = f2fs_bio_alloc(bdev, bio_blocks);
460 io->bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
461 io->bio->bi_end_io = read_end_io;
462 }
463
464 if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) <
465 PAGE_CACHE_SIZE) {
466 submit_bio(rw, io->bio);
467 io->bio = NULL;
468 goto alloc_new;
469 }
470
471 io->last_block_in_bio = blk_addr;
472
473 mutex_unlock(&io->io_mutex);
Chao Yu87b88722013-11-20 16:40:10 +0800474 trace_f2fs_submit_read_page(page, rw, META, blk_addr);
Chao Yu924b7202013-11-20 14:46:39 +0800475}
476
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900477/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900478 * This function should be used by the data read flow only where it
479 * does not check the "create" flag that indicates block allocation.
480 * The reason for this special functionality is to exploit VFS readahead
481 * mechanism.
482 */
483static int get_data_block_ro(struct inode *inode, sector_t iblock,
484 struct buffer_head *bh_result, int create)
485{
486 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
487 unsigned maxblocks = bh_result->b_size >> blkbits;
488 struct dnode_of_data dn;
489 pgoff_t pgofs;
490 int err;
491
492 /* Get the page offset from the block offset(iblock) */
493 pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
494
Namjae Jeon848753a2013-04-23 16:38:02 +0900495 if (check_extent_cache(inode, pgofs, bh_result)) {
496 trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900497 return 0;
Namjae Jeon848753a2013-04-23 16:38:02 +0900498 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900499
500 /* When reading holes, we need its node page */
501 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900502 err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
Namjae Jeon848753a2013-04-23 16:38:02 +0900503 if (err) {
504 trace_f2fs_get_data_block(inode, iblock, bh_result, err);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900505 return (err == -ENOENT) ? 0 : err;
Namjae Jeon848753a2013-04-23 16:38:02 +0900506 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900507
508 /* It does not support data allocation */
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900509 f2fs_bug_on(create);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900510
511 if (dn.data_blkaddr != NEW_ADDR && dn.data_blkaddr != NULL_ADDR) {
512 int i;
513 unsigned int end_offset;
514
515 end_offset = IS_INODE(dn.node_page) ?
Jaegeuk Kimde936532013-08-12 21:08:03 +0900516 ADDRS_PER_INODE(F2FS_I(inode)) :
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900517 ADDRS_PER_BLOCK;
518
519 clear_buffer_new(bh_result);
520
521 /* Give more consecutive addresses for the read ahead */
522 for (i = 0; i < end_offset - dn.ofs_in_node; i++)
523 if (((datablock_addr(dn.node_page,
524 dn.ofs_in_node + i))
525 != (dn.data_blkaddr + i)) || maxblocks == i)
526 break;
527 map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
528 bh_result->b_size = (i << blkbits);
529 }
530 f2fs_put_dnode(&dn);
Namjae Jeon848753a2013-04-23 16:38:02 +0900531 trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900532 return 0;
533}
534
535static int f2fs_read_data_page(struct file *file, struct page *page)
536{
537 return mpage_readpage(page, get_data_block_ro);
538}
539
540static int f2fs_read_data_pages(struct file *file,
541 struct address_space *mapping,
542 struct list_head *pages, unsigned nr_pages)
543{
544 return mpage_readpages(mapping, pages, nr_pages, get_data_block_ro);
545}
546
547int do_write_data_page(struct page *page)
548{
549 struct inode *inode = page->mapping->host;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900550 block_t old_blk_addr, new_blk_addr;
551 struct dnode_of_data dn;
552 int err = 0;
553
554 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900555 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900556 if (err)
557 return err;
558
559 old_blk_addr = dn.data_blkaddr;
560
561 /* This page is already truncated */
562 if (old_blk_addr == NULL_ADDR)
563 goto out_writepage;
564
565 set_page_writeback(page);
566
567 /*
568 * If current allocation needs SSR,
569 * it had better in-place writes for updated data.
570 */
Haicheng Lib25958b2013-06-13 16:59:29 +0800571 if (unlikely(old_blk_addr != NEW_ADDR &&
572 !is_cold_data(page) &&
573 need_inplace_update(inode))) {
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900574 rewrite_data_page(F2FS_SB(inode->i_sb), page,
575 old_blk_addr);
576 } else {
577 write_data_page(inode, page, &dn,
578 old_blk_addr, &new_blk_addr);
579 update_extent_cache(new_blk_addr, &dn);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900580 }
581out_writepage:
582 f2fs_put_dnode(&dn);
583 return err;
584}
585
586static int f2fs_write_data_page(struct page *page,
587 struct writeback_control *wbc)
588{
589 struct inode *inode = page->mapping->host;
590 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
591 loff_t i_size = i_size_read(inode);
592 const pgoff_t end_index = ((unsigned long long) i_size)
593 >> PAGE_CACHE_SHIFT;
594 unsigned offset;
Jaegeuk Kim39936832012-11-22 16:21:29 +0900595 bool need_balance_fs = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900596 int err = 0;
597
598 if (page->index < end_index)
Jaegeuk Kim39936832012-11-22 16:21:29 +0900599 goto write;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900600
601 /*
602 * If the offset is out-of-range of file size,
603 * this page does not have to be written to disk.
604 */
605 offset = i_size & (PAGE_CACHE_SIZE - 1);
606 if ((page->index >= end_index + 1) || !offset) {
607 if (S_ISDIR(inode->i_mode)) {
608 dec_page_count(sbi, F2FS_DIRTY_DENTS);
609 inode_dec_dirty_dents(inode);
610 }
Jaegeuk Kim39936832012-11-22 16:21:29 +0900611 goto out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900612 }
613
614 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900615write:
616 if (sbi->por_doing) {
617 err = AOP_WRITEPAGE_ACTIVATE;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900618 goto redirty_out;
Jaegeuk Kim39936832012-11-22 16:21:29 +0900619 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900620
Jaegeuk Kim39936832012-11-22 16:21:29 +0900621 /* Dentry blocks are controlled by checkpoint */
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900622 if (S_ISDIR(inode->i_mode)) {
623 dec_page_count(sbi, F2FS_DIRTY_DENTS);
624 inode_dec_dirty_dents(inode);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900625 err = do_write_data_page(page);
626 } else {
Gu Zhenge4795562013-09-27 18:08:30 +0800627 f2fs_lock_op(sbi);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900628 err = do_write_data_page(page);
Gu Zhenge4795562013-09-27 18:08:30 +0800629 f2fs_unlock_op(sbi);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900630 need_balance_fs = true;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900631 }
Jaegeuk Kim39936832012-11-22 16:21:29 +0900632 if (err == -ENOENT)
633 goto out;
634 else if (err)
635 goto redirty_out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900636
637 if (wbc->for_reclaim)
638 f2fs_submit_bio(sbi, DATA, true);
639
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900640 clear_cold_data(page);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900641out:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900642 unlock_page(page);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900643 if (need_balance_fs)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900644 f2fs_balance_fs(sbi);
645 return 0;
646
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900647redirty_out:
648 wbc->pages_skipped++;
649 set_page_dirty(page);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900650 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900651}
652
653#define MAX_DESIRED_PAGES_WP 4096
654
Namjae Jeonfa9150a2013-01-15 16:45:24 +0900655static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
656 void *data)
657{
658 struct address_space *mapping = data;
659 int ret = mapping->a_ops->writepage(page, wbc);
660 mapping_set_error(mapping, ret);
661 return ret;
662}
663
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900664static int f2fs_write_data_pages(struct address_space *mapping,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900665 struct writeback_control *wbc)
666{
667 struct inode *inode = mapping->host;
668 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
Jaegeuk Kim531ad7d2013-04-30 11:33:27 +0900669 bool locked = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900670 int ret;
671 long excess_nrtw = 0, desired_nrtw;
672
P J Pcfb185a2013-04-03 11:38:00 +0900673 /* deal with chardevs and other special file */
674 if (!mapping->a_ops->writepage)
675 return 0;
676
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900677 if (wbc->nr_to_write < MAX_DESIRED_PAGES_WP) {
678 desired_nrtw = MAX_DESIRED_PAGES_WP;
679 excess_nrtw = desired_nrtw - wbc->nr_to_write;
680 wbc->nr_to_write = desired_nrtw;
681 }
682
Jaegeuk Kim531ad7d2013-04-30 11:33:27 +0900683 if (!S_ISDIR(inode->i_mode)) {
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900684 mutex_lock(&sbi->writepages);
Jaegeuk Kim531ad7d2013-04-30 11:33:27 +0900685 locked = true;
686 }
Namjae Jeonfa9150a2013-01-15 16:45:24 +0900687 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
Jaegeuk Kim531ad7d2013-04-30 11:33:27 +0900688 if (locked)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900689 mutex_unlock(&sbi->writepages);
690 f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL));
691
692 remove_dirty_dir_inode(inode);
693
694 wbc->nr_to_write -= excess_nrtw;
695 return ret;
696}
697
698static int f2fs_write_begin(struct file *file, struct address_space *mapping,
699 loff_t pos, unsigned len, unsigned flags,
700 struct page **pagep, void **fsdata)
701{
702 struct inode *inode = mapping->host;
703 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
704 struct page *page;
705 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
706 struct dnode_of_data dn;
707 int err = 0;
708
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900709 f2fs_balance_fs(sbi);
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +0900710repeat:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900711 page = grab_cache_page_write_begin(mapping, index, flags);
712 if (!page)
713 return -ENOMEM;
714 *pagep = page;
715
Gu Zhenge4795562013-09-27 18:08:30 +0800716 f2fs_lock_op(sbi);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900717 set_new_dnode(&dn, inode, NULL, NULL, 0);
Huajun Lib6009652013-11-10 23:13:18 +0800718 err = f2fs_reserve_block(&dn, index);
Gu Zhenge4795562013-09-27 18:08:30 +0800719 f2fs_unlock_op(sbi);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900720
Huajun Lib6009652013-11-10 23:13:18 +0800721 if (err) {
722 f2fs_put_page(page, 1);
723 return err;
724 }
725
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900726 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
727 return 0;
728
729 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
730 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
731 unsigned end = start + len;
732
733 /* Reading beyond i_size is simple: memset to zero */
734 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900735 goto out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900736 }
737
738 if (dn.data_blkaddr == NEW_ADDR) {
739 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
740 } else {
741 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900742 if (err)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900743 return err;
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900744 lock_page(page);
745 if (!PageUptodate(page)) {
746 f2fs_put_page(page, 1);
747 return -EIO;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900748 }
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +0900749 if (page->mapping != mapping) {
750 f2fs_put_page(page, 1);
751 goto repeat;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900752 }
753 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900754out:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900755 SetPageUptodate(page);
756 clear_cold_data(page);
757 return 0;
758}
759
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +0900760static int f2fs_write_end(struct file *file,
761 struct address_space *mapping,
762 loff_t pos, unsigned len, unsigned copied,
763 struct page *page, void *fsdata)
764{
765 struct inode *inode = page->mapping->host;
766
767 SetPageUptodate(page);
768 set_page_dirty(page);
769
770 if (pos + copied > i_size_read(inode)) {
771 i_size_write(inode, pos + copied);
772 mark_inode_dirty(inode);
773 update_inode_page(inode);
774 }
775
Chao Yu75c3c8b2013-11-16 14:15:59 +0800776 f2fs_put_page(page, 1);
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +0900777 return copied;
778}
779
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900780static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
781 const struct iovec *iov, loff_t offset, unsigned long nr_segs)
782{
783 struct file *file = iocb->ki_filp;
784 struct inode *inode = file->f_mapping->host;
785
786 if (rw == WRITE)
787 return 0;
788
789 /* Needs synchronization with the cleaner */
790 return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
791 get_data_block_ro);
792}
793
Lukas Czernerd47992f2013-05-21 23:17:23 -0400794static void f2fs_invalidate_data_page(struct page *page, unsigned int offset,
795 unsigned int length)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900796{
797 struct inode *inode = page->mapping->host;
798 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
799 if (S_ISDIR(inode->i_mode) && PageDirty(page)) {
800 dec_page_count(sbi, F2FS_DIRTY_DENTS);
801 inode_dec_dirty_dents(inode);
802 }
803 ClearPagePrivate(page);
804}
805
806static int f2fs_release_data_page(struct page *page, gfp_t wait)
807{
808 ClearPagePrivate(page);
Jaegeuk Kimc3850aa2013-03-14 09:24:32 +0900809 return 1;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900810}
811
812static int f2fs_set_data_page_dirty(struct page *page)
813{
814 struct address_space *mapping = page->mapping;
815 struct inode *inode = mapping->host;
816
Jaegeuk Kim26c6b882013-10-24 17:53:29 +0900817 trace_f2fs_set_page_dirty(page, DATA);
818
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900819 SetPageUptodate(page);
820 if (!PageDirty(page)) {
821 __set_page_dirty_nobuffers(page);
822 set_dirty_dir_page(inode, page);
823 return 1;
824 }
825 return 0;
826}
827
Jaegeuk Kimc01e54b2013-01-17 20:30:23 +0900828static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
829{
830 return generic_block_bmap(mapping, block, get_data_block_ro);
831}
832
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900833const struct address_space_operations f2fs_dblock_aops = {
834 .readpage = f2fs_read_data_page,
835 .readpages = f2fs_read_data_pages,
836 .writepage = f2fs_write_data_page,
837 .writepages = f2fs_write_data_pages,
838 .write_begin = f2fs_write_begin,
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +0900839 .write_end = f2fs_write_end,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900840 .set_page_dirty = f2fs_set_data_page_dirty,
841 .invalidatepage = f2fs_invalidate_data_page,
842 .releasepage = f2fs_release_data_page,
843 .direct_IO = f2fs_direct_IO,
Jaegeuk Kimc01e54b2013-01-17 20:30:23 +0900844 .bmap = f2fs_bmap,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900845};