blob: 3b762611ff6d4cce993cc444451a5bb78b7f4534 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002 * fs/f2fs/data.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/buffer_head.h>
14#include <linux/mpage.h>
15#include <linux/writeback.h>
16#include <linux/backing-dev.h>
17#include <linux/blkdev.h>
18#include <linux/bio.h>
Geert Uytterhoeven690e4a32012-12-19 22:19:30 +010019#include <linux/prefetch.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080020#include <linux/uio.h>
Jaegeuk Kimf1e88662015-04-09 11:20:42 -070021#include <linux/cleancache.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090022
23#include "f2fs.h"
24#include "node.h"
25#include "segment.h"
Jaegeuk Kimdb9f7c12014-12-17 20:04:08 -080026#include "trace.h"
Namjae Jeon848753a2013-04-23 16:38:02 +090027#include <trace/events/f2fs.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090028
Chao Yu429511c2015-02-05 17:54:31 +080029static struct kmem_cache *extent_tree_slab;
30static struct kmem_cache *extent_node_slab;
31
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090032static void f2fs_read_end_io(struct bio *bio, int err)
33{
Linus Torvaldsf5688492014-01-30 11:19:05 -080034 struct bio_vec *bvec;
35 int i;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090036
Linus Torvaldsf5688492014-01-30 11:19:05 -080037 bio_for_each_segment_all(bvec, bio, i) {
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090038 struct page *page = bvec->bv_page;
39
Linus Torvaldsf5688492014-01-30 11:19:05 -080040 if (!err) {
41 SetPageUptodate(page);
42 } else {
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090043 ClearPageUptodate(page);
44 SetPageError(page);
45 }
46 unlock_page(page);
Linus Torvaldsf5688492014-01-30 11:19:05 -080047 }
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090048 bio_put(bio);
49}
50
Jaegeuk Kimf1e88662015-04-09 11:20:42 -070051/*
52 * I/O completion handler for multipage BIOs.
53 * copied from fs/mpage.c
54 */
55static void mpage_end_io(struct bio *bio, int err)
56{
57 struct bio_vec *bv;
58 int i;
59
60 bio_for_each_segment_all(bv, bio, i) {
61 struct page *page = bv->bv_page;
62
63 if (!err) {
64 SetPageUptodate(page);
65 } else {
66 ClearPageUptodate(page);
67 SetPageError(page);
68 }
69 unlock_page(page);
70 }
71
72 bio_put(bio);
73}
74
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090075static void f2fs_write_end_io(struct bio *bio, int err)
76{
Jaegeuk Kim1b1f5592014-02-03 10:50:22 +090077 struct f2fs_sb_info *sbi = bio->bi_private;
Linus Torvaldsf5688492014-01-30 11:19:05 -080078 struct bio_vec *bvec;
79 int i;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090080
Linus Torvaldsf5688492014-01-30 11:19:05 -080081 bio_for_each_segment_all(bvec, bio, i) {
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090082 struct page *page = bvec->bv_page;
83
Linus Torvaldsf5688492014-01-30 11:19:05 -080084 if (unlikely(err)) {
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -070085 set_page_dirty(page);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090086 set_bit(AS_EIO, &page->mapping->flags);
Jaegeuk Kim744602c2014-01-24 09:42:16 +090087 f2fs_stop_checkpoint(sbi);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090088 }
89 end_page_writeback(page);
90 dec_page_count(sbi, F2FS_WRITEBACK);
Linus Torvaldsf5688492014-01-30 11:19:05 -080091 }
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090092
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090093 if (!get_pages(sbi, F2FS_WRITEBACK) &&
94 !list_empty(&sbi->cp_wait.task_list))
95 wake_up(&sbi->cp_wait);
96
97 bio_put(bio);
98}
99
Gu Zheng940a6d32013-12-20 17:39:59 +0800100/*
101 * Low-level block read/write IO operations.
102 */
103static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
104 int npages, bool is_read)
105{
106 struct bio *bio;
107
108 /* No failure on bio allocation */
109 bio = bio_alloc(GFP_NOIO, npages);
110
111 bio->bi_bdev = sbi->sb->s_bdev;
Chao Yu55cf9cb2014-09-15 18:01:10 +0800112 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
Gu Zheng940a6d32013-12-20 17:39:59 +0800113 bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
Jaegeuk Kim1b1f5592014-02-03 10:50:22 +0900114 bio->bi_private = sbi;
Gu Zheng940a6d32013-12-20 17:39:59 +0800115
116 return bio;
117}
118
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900119static void __submit_merged_bio(struct f2fs_bio_info *io)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900120{
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900121 struct f2fs_io_info *fio = &io->fio;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900122
123 if (!io->bio)
124 return;
125
Jaegeuk Kim6a8f8ca2014-10-29 14:37:22 -0700126 if (is_read_io(fio->rw))
Chao Yu2ace38e2014-12-24 16:08:14 +0800127 trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
Jaegeuk Kim6a8f8ca2014-10-29 14:37:22 -0700128 else
Chao Yu2ace38e2014-12-24 16:08:14 +0800129 trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900130
Jaegeuk Kim6a8f8ca2014-10-29 14:37:22 -0700131 submit_bio(fio->rw, io->bio);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900132 io->bio = NULL;
133}
134
135void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900136 enum page_type type, int rw)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900137{
138 enum page_type btype = PAGE_TYPE_OF_BIO(type);
139 struct f2fs_bio_info *io;
140
141 io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
142
Chao Yudf0f8dc2014-03-22 14:57:23 +0800143 down_write(&io->io_rwsem);
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900144
145 /* change META to META_FLUSH in the checkpoint procedure */
146 if (type >= META_FLUSH) {
147 io->fio.type = META_FLUSH;
Jaegeuk Kim0f7b2ab2014-07-23 09:57:31 -0700148 if (test_opt(sbi, NOBARRIER))
149 io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO;
150 else
151 io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900152 }
153 __submit_merged_bio(io);
Chao Yudf0f8dc2014-03-22 14:57:23 +0800154 up_write(&io->io_rwsem);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900155}
156
157/*
158 * Fill the locked page with data located in the block address.
159 * Return unlocked page.
160 */
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700161int f2fs_submit_page_bio(struct f2fs_io_info *fio)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900162{
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900163 struct bio *bio;
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700164 struct page *page = fio->page;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900165
Chao Yu2ace38e2014-12-24 16:08:14 +0800166 trace_f2fs_submit_page_bio(page, fio);
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700167 f2fs_trace_ios(fio, 0);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900168
169 /* Allocate a new bio */
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700170 bio = __bio_alloc(fio->sbi, fio->blk_addr, 1, is_read_io(fio->rw));
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900171
172 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
173 bio_put(bio);
174 f2fs_put_page(page, 1);
175 return -EFAULT;
176 }
177
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -0800178 submit_bio(fio->rw, bio);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900179 return 0;
180}
181
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700182void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900183{
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700184 struct f2fs_sb_info *sbi = fio->sbi;
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900185 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900186 struct f2fs_bio_info *io;
Gu Zheng940a6d32013-12-20 17:39:59 +0800187 bool is_read = is_read_io(fio->rw);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900188
Gu Zheng940a6d32013-12-20 17:39:59 +0800189 io = is_read ? &sbi->read_io : &sbi->write_io[btype];
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900190
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -0800191 verify_block_addr(sbi, fio->blk_addr);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900192
Chao Yudf0f8dc2014-03-22 14:57:23 +0800193 down_write(&io->io_rwsem);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900194
Gu Zheng940a6d32013-12-20 17:39:59 +0800195 if (!is_read)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900196 inc_page_count(sbi, F2FS_WRITEBACK);
197
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -0800198 if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 ||
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900199 io->fio.rw != fio->rw))
200 __submit_merged_bio(io);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900201alloc_new:
202 if (io->bio == NULL) {
Jaegeuk Kim90a893c2014-09-22 16:21:07 -0700203 int bio_blocks = MAX_BIO_BLOCKS(sbi);
Gu Zheng940a6d32013-12-20 17:39:59 +0800204
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -0800205 io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read);
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900206 io->fio = *fio;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900207 }
208
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700209 if (bio_add_page(io->bio, fio->page, PAGE_CACHE_SIZE, 0) <
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900210 PAGE_CACHE_SIZE) {
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900211 __submit_merged_bio(io);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900212 goto alloc_new;
213 }
214
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -0800215 io->last_block_in_bio = fio->blk_addr;
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700216 f2fs_trace_ios(fio, 0);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900217
Chao Yudf0f8dc2014-03-22 14:57:23 +0800218 up_write(&io->io_rwsem);
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700219 trace_f2fs_submit_page_mbio(fio->page, fio);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900220}
221
222/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900223 * Lock ordering for the change of data block address:
224 * ->data_page
225 * ->node_page
226 * update block addresses in the node page
227 */
Chao Yu216a6202015-03-19 19:23:32 +0800228void set_data_blkaddr(struct dnode_of_data *dn)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900229{
230 struct f2fs_node *rn;
231 __le32 *addr_array;
232 struct page *node_page = dn->node_page;
233 unsigned int ofs_in_node = dn->ofs_in_node;
234
Yuan Zhong5514f0a2014-01-10 07:26:14 +0000235 f2fs_wait_on_page_writeback(node_page, NODE);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900236
Gu Zheng45590712013-07-15 17:57:38 +0800237 rn = F2FS_NODE(node_page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900238
239 /* Get physical address of data block */
240 addr_array = blkaddr_in_node(rn);
Jaegeuk Kime1509cf2014-12-30 22:57:55 -0800241 addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900242 set_page_dirty(node_page);
243}
244
245int reserve_new_block(struct dnode_of_data *dn)
246{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700247 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900248
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900249 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900250 return -EPERM;
Chao Yucfb271d2013-12-05 17:15:22 +0800251 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900252 return -ENOSPC;
253
Namjae Jeonc01e2852013-04-23 17:00:52 +0900254 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
255
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900256 dn->data_blkaddr = NEW_ADDR;
Chao Yu216a6202015-03-19 19:23:32 +0800257 set_data_blkaddr(dn);
Jaegeuk Kima18ff062014-01-21 13:32:12 +0900258 mark_inode_dirty(dn->inode);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900259 sync_inode_page(dn);
260 return 0;
261}
262
Huajun Lib6009652013-11-10 23:13:18 +0800263int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
264{
265 bool need_put = dn->inode_page ? false : true;
266 int err;
267
268 err = get_dnode_of_data(dn, index, ALLOC_NODE);
269 if (err)
270 return err;
Jaegeuk Kima8865372013-12-27 17:04:17 +0900271
Huajun Lib6009652013-11-10 23:13:18 +0800272 if (dn->data_blkaddr == NULL_ADDR)
273 err = reserve_new_block(dn);
Jaegeuk Kima8865372013-12-27 17:04:17 +0900274 if (err || need_put)
Huajun Lib6009652013-11-10 23:13:18 +0800275 f2fs_put_dnode(dn);
276 return err;
277}
278
Chao Yu7e4dde72015-02-05 17:51:34 +0800279static bool lookup_extent_info(struct inode *inode, pgoff_t pgofs,
280 struct extent_info *ei)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900281{
282 struct f2fs_inode_info *fi = F2FS_I(inode);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900283 pgoff_t start_fofs, end_fofs;
284 block_t start_blkaddr;
285
Chao Yu0c872e22015-02-05 17:46:29 +0800286 read_lock(&fi->ext_lock);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900287 if (fi->ext.len == 0) {
Chao Yu0c872e22015-02-05 17:46:29 +0800288 read_unlock(&fi->ext_lock);
Chao Yu7e4dde72015-02-05 17:51:34 +0800289 return false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900290 }
291
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +0900292 stat_inc_total_hit(inode->i_sb);
293
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900294 start_fofs = fi->ext.fofs;
295 end_fofs = fi->ext.fofs + fi->ext.len - 1;
Chao Yu4d0b0bd2015-02-05 17:47:25 +0800296 start_blkaddr = fi->ext.blk;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900297
298 if (pgofs >= start_fofs && pgofs <= end_fofs) {
Chao Yua2e7d1b2015-02-05 17:50:30 +0800299 *ei = fi->ext;
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +0900300 stat_inc_read_hit(inode->i_sb);
Chao Yu0c872e22015-02-05 17:46:29 +0800301 read_unlock(&fi->ext_lock);
Chao Yu7e4dde72015-02-05 17:51:34 +0800302 return true;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900303 }
Chao Yu0c872e22015-02-05 17:46:29 +0800304 read_unlock(&fi->ext_lock);
Chao Yu7e4dde72015-02-05 17:51:34 +0800305 return false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900306}
307
Chao Yu7e4dde72015-02-05 17:51:34 +0800308static bool update_extent_info(struct inode *inode, pgoff_t fofs,
309 block_t blkaddr)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900310{
Chao Yu7e4dde72015-02-05 17:51:34 +0800311 struct f2fs_inode_info *fi = F2FS_I(inode);
312 pgoff_t start_fofs, end_fofs;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900313 block_t start_blkaddr, end_blkaddr;
Jaegeuk Kimc11abd12013-11-19 10:41:54 +0900314 int need_update = true;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900315
Chao Yu0c872e22015-02-05 17:46:29 +0800316 write_lock(&fi->ext_lock);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900317
318 start_fofs = fi->ext.fofs;
319 end_fofs = fi->ext.fofs + fi->ext.len - 1;
Chao Yu4d0b0bd2015-02-05 17:47:25 +0800320 start_blkaddr = fi->ext.blk;
321 end_blkaddr = fi->ext.blk + fi->ext.len - 1;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900322
323 /* Drop and initialize the matched extent */
324 if (fi->ext.len == 1 && fofs == start_fofs)
325 fi->ext.len = 0;
326
327 /* Initial extent */
328 if (fi->ext.len == 0) {
Chao Yu7e4dde72015-02-05 17:51:34 +0800329 if (blkaddr != NULL_ADDR) {
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900330 fi->ext.fofs = fofs;
Chao Yu7e4dde72015-02-05 17:51:34 +0800331 fi->ext.blk = blkaddr;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900332 fi->ext.len = 1;
333 }
334 goto end_update;
335 }
336
Namjae Jeon6224da82013-04-06 14:44:32 +0900337 /* Front merge */
Chao Yu7e4dde72015-02-05 17:51:34 +0800338 if (fofs == start_fofs - 1 && blkaddr == start_blkaddr - 1) {
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900339 fi->ext.fofs--;
Chao Yu4d0b0bd2015-02-05 17:47:25 +0800340 fi->ext.blk--;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900341 fi->ext.len++;
342 goto end_update;
343 }
344
345 /* Back merge */
Chao Yu7e4dde72015-02-05 17:51:34 +0800346 if (fofs == end_fofs + 1 && blkaddr == end_blkaddr + 1) {
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900347 fi->ext.len++;
348 goto end_update;
349 }
350
351 /* Split the existing extent */
352 if (fi->ext.len > 1 &&
353 fofs >= start_fofs && fofs <= end_fofs) {
354 if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
355 fi->ext.len = fofs - start_fofs;
356 } else {
357 fi->ext.fofs = fofs + 1;
Chao Yu4d0b0bd2015-02-05 17:47:25 +0800358 fi->ext.blk = start_blkaddr + fofs - start_fofs + 1;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900359 fi->ext.len -= fofs - start_fofs + 1;
360 }
Jaegeuk Kimc11abd12013-11-19 10:41:54 +0900361 } else {
362 need_update = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900363 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900364
Jaegeuk Kimc11abd12013-11-19 10:41:54 +0900365 /* Finally, if the extent is very fragmented, let's drop the cache. */
366 if (fi->ext.len < F2FS_MIN_EXTENT_LEN) {
367 fi->ext.len = 0;
368 set_inode_flag(fi, FI_NO_EXTENT);
369 need_update = true;
370 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900371end_update:
Chao Yu0c872e22015-02-05 17:46:29 +0800372 write_unlock(&fi->ext_lock);
Chao Yu7e4dde72015-02-05 17:51:34 +0800373 return need_update;
374}
375
Chao Yu429511c2015-02-05 17:54:31 +0800376static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
377 struct extent_tree *et, struct extent_info *ei,
378 struct rb_node *parent, struct rb_node **p)
379{
380 struct extent_node *en;
381
382 en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC);
383 if (!en)
384 return NULL;
385
386 en->ei = *ei;
387 INIT_LIST_HEAD(&en->list);
388
389 rb_link_node(&en->rb_node, parent, p);
390 rb_insert_color(&en->rb_node, &et->root);
391 et->count++;
392 atomic_inc(&sbi->total_ext_node);
393 return en;
394}
395
396static void __detach_extent_node(struct f2fs_sb_info *sbi,
397 struct extent_tree *et, struct extent_node *en)
398{
399 rb_erase(&en->rb_node, &et->root);
400 et->count--;
401 atomic_dec(&sbi->total_ext_node);
Chao Yu62c8af62015-02-05 18:01:39 +0800402
403 if (et->cached_en == en)
404 et->cached_en = NULL;
Chao Yu429511c2015-02-05 17:54:31 +0800405}
406
Chao Yu93dfc522015-03-19 19:24:59 +0800407static struct extent_tree *__find_extent_tree(struct f2fs_sb_info *sbi,
408 nid_t ino)
409{
410 struct extent_tree *et;
411
412 down_read(&sbi->extent_tree_lock);
413 et = radix_tree_lookup(&sbi->extent_tree_root, ino);
414 if (!et) {
415 up_read(&sbi->extent_tree_lock);
416 return NULL;
417 }
418 atomic_inc(&et->refcount);
419 up_read(&sbi->extent_tree_lock);
420
421 return et;
422}
423
424static struct extent_tree *__grab_extent_tree(struct inode *inode)
425{
426 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
427 struct extent_tree *et;
428 nid_t ino = inode->i_ino;
429
430 down_write(&sbi->extent_tree_lock);
431 et = radix_tree_lookup(&sbi->extent_tree_root, ino);
432 if (!et) {
433 et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
434 f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
435 memset(et, 0, sizeof(struct extent_tree));
436 et->ino = ino;
437 et->root = RB_ROOT;
438 et->cached_en = NULL;
439 rwlock_init(&et->lock);
440 atomic_set(&et->refcount, 0);
441 et->count = 0;
442 sbi->total_ext_tree++;
443 }
444 atomic_inc(&et->refcount);
445 up_write(&sbi->extent_tree_lock);
446
447 return et;
448}
449
Chao Yu429511c2015-02-05 17:54:31 +0800450static struct extent_node *__lookup_extent_tree(struct extent_tree *et,
451 unsigned int fofs)
452{
453 struct rb_node *node = et->root.rb_node;
454 struct extent_node *en;
455
Chao Yu62c8af62015-02-05 18:01:39 +0800456 if (et->cached_en) {
457 struct extent_info *cei = &et->cached_en->ei;
458
459 if (cei->fofs <= fofs && cei->fofs + cei->len > fofs)
460 return et->cached_en;
461 }
462
Chao Yu429511c2015-02-05 17:54:31 +0800463 while (node) {
464 en = rb_entry(node, struct extent_node, rb_node);
465
Chao Yu62c8af62015-02-05 18:01:39 +0800466 if (fofs < en->ei.fofs) {
Chao Yu429511c2015-02-05 17:54:31 +0800467 node = node->rb_left;
Chao Yu62c8af62015-02-05 18:01:39 +0800468 } else if (fofs >= en->ei.fofs + en->ei.len) {
Chao Yu429511c2015-02-05 17:54:31 +0800469 node = node->rb_right;
Chao Yu62c8af62015-02-05 18:01:39 +0800470 } else {
471 et->cached_en = en;
Chao Yu429511c2015-02-05 17:54:31 +0800472 return en;
Chao Yu62c8af62015-02-05 18:01:39 +0800473 }
Chao Yu429511c2015-02-05 17:54:31 +0800474 }
475 return NULL;
476}
477
478static struct extent_node *__try_back_merge(struct f2fs_sb_info *sbi,
479 struct extent_tree *et, struct extent_node *en)
480{
481 struct extent_node *prev;
482 struct rb_node *node;
483
484 node = rb_prev(&en->rb_node);
485 if (!node)
486 return NULL;
487
488 prev = rb_entry(node, struct extent_node, rb_node);
489 if (__is_back_mergeable(&en->ei, &prev->ei)) {
490 en->ei.fofs = prev->ei.fofs;
491 en->ei.blk = prev->ei.blk;
492 en->ei.len += prev->ei.len;
493 __detach_extent_node(sbi, et, prev);
494 return prev;
495 }
496 return NULL;
497}
498
499static struct extent_node *__try_front_merge(struct f2fs_sb_info *sbi,
500 struct extent_tree *et, struct extent_node *en)
501{
502 struct extent_node *next;
503 struct rb_node *node;
504
505 node = rb_next(&en->rb_node);
506 if (!node)
507 return NULL;
508
509 next = rb_entry(node, struct extent_node, rb_node);
510 if (__is_front_mergeable(&en->ei, &next->ei)) {
511 en->ei.len += next->ei.len;
512 __detach_extent_node(sbi, et, next);
513 return next;
514 }
515 return NULL;
516}
517
518static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
519 struct extent_tree *et, struct extent_info *ei,
520 struct extent_node **den)
521{
522 struct rb_node **p = &et->root.rb_node;
523 struct rb_node *parent = NULL;
524 struct extent_node *en;
525
526 while (*p) {
527 parent = *p;
528 en = rb_entry(parent, struct extent_node, rb_node);
529
530 if (ei->fofs < en->ei.fofs) {
531 if (__is_front_mergeable(ei, &en->ei)) {
532 f2fs_bug_on(sbi, !den);
533 en->ei.fofs = ei->fofs;
534 en->ei.blk = ei->blk;
535 en->ei.len += ei->len;
536 *den = __try_back_merge(sbi, et, en);
537 return en;
538 }
539 p = &(*p)->rb_left;
540 } else if (ei->fofs >= en->ei.fofs + en->ei.len) {
541 if (__is_back_mergeable(ei, &en->ei)) {
542 f2fs_bug_on(sbi, !den);
543 en->ei.len += ei->len;
544 *den = __try_front_merge(sbi, et, en);
545 return en;
546 }
547 p = &(*p)->rb_right;
548 } else {
549 f2fs_bug_on(sbi, 1);
550 }
551 }
552
553 return __attach_extent_node(sbi, et, ei, parent, p);
554}
555
556static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
557 struct extent_tree *et, bool free_all)
558{
559 struct rb_node *node, *next;
560 struct extent_node *en;
561 unsigned int count = et->count;
562
563 node = rb_first(&et->root);
564 while (node) {
565 next = rb_next(node);
566 en = rb_entry(node, struct extent_node, rb_node);
567
568 if (free_all) {
569 spin_lock(&sbi->extent_lock);
570 if (!list_empty(&en->list))
571 list_del_init(&en->list);
572 spin_unlock(&sbi->extent_lock);
573 }
574
575 if (free_all || list_empty(&en->list)) {
576 __detach_extent_node(sbi, et, en);
577 kmem_cache_free(extent_node_slab, en);
578 }
579 node = next;
580 }
581
582 return count - et->count;
583}
584
Chao Yu028a41e2015-03-19 19:26:02 +0800585static void f2fs_init_extent_tree(struct inode *inode,
586 struct f2fs_extent *i_ext)
587{
588 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
589 struct extent_tree *et;
590 struct extent_node *en;
591 struct extent_info ei;
592
593 if (le32_to_cpu(i_ext->len) < F2FS_MIN_EXTENT_LEN)
594 return;
595
596 et = __grab_extent_tree(inode);
597
598 write_lock(&et->lock);
599 if (et->count)
600 goto out;
601
602 set_extent_info(&ei, le32_to_cpu(i_ext->fofs),
603 le32_to_cpu(i_ext->blk), le32_to_cpu(i_ext->len));
604
605 en = __insert_extent_tree(sbi, et, &ei, NULL);
606 if (en) {
607 et->cached_en = en;
608
609 spin_lock(&sbi->extent_lock);
610 list_add_tail(&en->list, &sbi->extent_list);
611 spin_unlock(&sbi->extent_lock);
612 }
613out:
614 write_unlock(&et->lock);
615 atomic_dec(&et->refcount);
616}
617
Chao Yu429511c2015-02-05 17:54:31 +0800618static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
619 struct extent_info *ei)
620{
621 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
622 struct extent_tree *et;
623 struct extent_node *en;
624
Chao Yu1ec46102015-02-05 17:59:59 +0800625 trace_f2fs_lookup_extent_tree_start(inode, pgofs);
626
Chao Yu93dfc522015-03-19 19:24:59 +0800627 et = __find_extent_tree(sbi, inode->i_ino);
628 if (!et)
Chao Yu429511c2015-02-05 17:54:31 +0800629 return false;
Chao Yu429511c2015-02-05 17:54:31 +0800630
631 read_lock(&et->lock);
632 en = __lookup_extent_tree(et, pgofs);
633 if (en) {
634 *ei = en->ei;
635 spin_lock(&sbi->extent_lock);
636 if (!list_empty(&en->list))
637 list_move_tail(&en->list, &sbi->extent_list);
638 spin_unlock(&sbi->extent_lock);
639 stat_inc_read_hit(sbi->sb);
640 }
641 stat_inc_total_hit(sbi->sb);
642 read_unlock(&et->lock);
643
Chao Yu1ec46102015-02-05 17:59:59 +0800644 trace_f2fs_lookup_extent_tree_end(inode, pgofs, en);
645
Chao Yu429511c2015-02-05 17:54:31 +0800646 atomic_dec(&et->refcount);
647 return en ? true : false;
648}
649
650static void f2fs_update_extent_tree(struct inode *inode, pgoff_t fofs,
651 block_t blkaddr)
652{
653 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yu429511c2015-02-05 17:54:31 +0800654 struct extent_tree *et;
655 struct extent_node *en = NULL, *en1 = NULL, *en2 = NULL, *en3 = NULL;
656 struct extent_node *den = NULL;
657 struct extent_info ei, dei;
658 unsigned int endofs;
659
Chao Yu1ec46102015-02-05 17:59:59 +0800660 trace_f2fs_update_extent_tree(inode, fofs, blkaddr);
661
Chao Yu93dfc522015-03-19 19:24:59 +0800662 et = __grab_extent_tree(inode);
Chao Yu429511c2015-02-05 17:54:31 +0800663
664 write_lock(&et->lock);
665
666 /* 1. lookup and remove existing extent info in cache */
667 en = __lookup_extent_tree(et, fofs);
668 if (!en)
669 goto update_extent;
670
671 dei = en->ei;
672 __detach_extent_node(sbi, et, en);
673
674 /* 2. if extent can be split more, split and insert the left part */
675 if (dei.len > 1) {
676 /* insert left part of split extent into cache */
677 if (fofs - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
678 set_extent_info(&ei, dei.fofs, dei.blk,
679 fofs - dei.fofs);
680 en1 = __insert_extent_tree(sbi, et, &ei, NULL);
681 }
682
683 /* insert right part of split extent into cache */
684 endofs = dei.fofs + dei.len - 1;
685 if (endofs - fofs >= F2FS_MIN_EXTENT_LEN) {
686 set_extent_info(&ei, fofs + 1,
687 fofs - dei.fofs + dei.blk, endofs - fofs);
688 en2 = __insert_extent_tree(sbi, et, &ei, NULL);
689 }
690 }
691
692update_extent:
693 /* 3. update extent in extent cache */
694 if (blkaddr) {
695 set_extent_info(&ei, fofs, blkaddr, 1);
696 en3 = __insert_extent_tree(sbi, et, &ei, &den);
697 }
698
699 /* 4. update in global extent list */
700 spin_lock(&sbi->extent_lock);
701 if (en && !list_empty(&en->list))
702 list_del(&en->list);
703 /*
704 * en1 and en2 split from en, they will become more and more smaller
705 * fragments after splitting several times. So if the length is smaller
706 * than F2FS_MIN_EXTENT_LEN, we will not add them into extent tree.
707 */
708 if (en1)
709 list_add_tail(&en1->list, &sbi->extent_list);
710 if (en2)
711 list_add_tail(&en2->list, &sbi->extent_list);
712 if (en3) {
713 if (list_empty(&en3->list))
714 list_add_tail(&en3->list, &sbi->extent_list);
715 else
716 list_move_tail(&en3->list, &sbi->extent_list);
717 }
718 if (den && !list_empty(&den->list))
719 list_del(&den->list);
720 spin_unlock(&sbi->extent_lock);
721
722 /* 5. release extent node */
723 if (en)
724 kmem_cache_free(extent_node_slab, en);
725 if (den)
726 kmem_cache_free(extent_node_slab, den);
727
728 write_unlock(&et->lock);
729 atomic_dec(&et->refcount);
730}
731
Chao Yu0bdee482015-03-19 19:27:51 +0800732void f2fs_preserve_extent_tree(struct inode *inode)
733{
734 struct extent_tree *et;
735 struct extent_info *ext = &F2FS_I(inode)->ext;
736 bool sync = false;
737
738 if (!test_opt(F2FS_I_SB(inode), EXTENT_CACHE))
739 return;
740
741 et = __find_extent_tree(F2FS_I_SB(inode), inode->i_ino);
742 if (!et) {
743 if (ext->len) {
744 ext->len = 0;
745 update_inode_page(inode);
746 }
747 return;
748 }
749
750 read_lock(&et->lock);
751 if (et->count) {
752 struct extent_node *en;
753
754 if (et->cached_en) {
755 en = et->cached_en;
756 } else {
757 struct rb_node *node = rb_first(&et->root);
758
759 if (!node)
760 node = rb_last(&et->root);
761 en = rb_entry(node, struct extent_node, rb_node);
762 }
763
764 if (__is_extent_same(ext, &en->ei))
765 goto out;
766
767 *ext = en->ei;
768 sync = true;
769 } else if (ext->len) {
770 ext->len = 0;
771 sync = true;
772 }
773out:
774 read_unlock(&et->lock);
775 atomic_dec(&et->refcount);
776
777 if (sync)
778 update_inode_page(inode);
779}
780
Chao Yu429511c2015-02-05 17:54:31 +0800781void f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
782{
783 struct extent_tree *treevec[EXT_TREE_VEC_SIZE];
784 struct extent_node *en, *tmp;
785 unsigned long ino = F2FS_ROOT_INO(sbi);
786 struct radix_tree_iter iter;
787 void **slot;
788 unsigned int found;
Chao Yu1ec46102015-02-05 17:59:59 +0800789 unsigned int node_cnt = 0, tree_cnt = 0;
Chao Yu429511c2015-02-05 17:54:31 +0800790
Chao Yu1dcc3362015-02-05 17:57:31 +0800791 if (!test_opt(sbi, EXTENT_CACHE))
792 return;
793
Chao Yu429511c2015-02-05 17:54:31 +0800794 if (available_free_memory(sbi, EXTENT_CACHE))
795 return;
796
797 spin_lock(&sbi->extent_lock);
798 list_for_each_entry_safe(en, tmp, &sbi->extent_list, list) {
799 if (!nr_shrink--)
800 break;
801 list_del_init(&en->list);
802 }
803 spin_unlock(&sbi->extent_lock);
804
805 down_read(&sbi->extent_tree_lock);
806 while ((found = radix_tree_gang_lookup(&sbi->extent_tree_root,
807 (void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
808 unsigned i;
809
810 ino = treevec[found - 1]->ino + 1;
811 for (i = 0; i < found; i++) {
812 struct extent_tree *et = treevec[i];
813
814 atomic_inc(&et->refcount);
815 write_lock(&et->lock);
Chao Yu1ec46102015-02-05 17:59:59 +0800816 node_cnt += __free_extent_tree(sbi, et, false);
Chao Yu429511c2015-02-05 17:54:31 +0800817 write_unlock(&et->lock);
818 atomic_dec(&et->refcount);
819 }
820 }
821 up_read(&sbi->extent_tree_lock);
822
823 down_write(&sbi->extent_tree_lock);
824 radix_tree_for_each_slot(slot, &sbi->extent_tree_root, &iter,
825 F2FS_ROOT_INO(sbi)) {
826 struct extent_tree *et = (struct extent_tree *)*slot;
827
828 if (!atomic_read(&et->refcount) && !et->count) {
829 radix_tree_delete(&sbi->extent_tree_root, et->ino);
830 kmem_cache_free(extent_tree_slab, et);
831 sbi->total_ext_tree--;
Chao Yu1ec46102015-02-05 17:59:59 +0800832 tree_cnt++;
Chao Yu429511c2015-02-05 17:54:31 +0800833 }
834 }
835 up_write(&sbi->extent_tree_lock);
Chao Yu1ec46102015-02-05 17:59:59 +0800836
837 trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
Chao Yu429511c2015-02-05 17:54:31 +0800838}
839
840void f2fs_destroy_extent_tree(struct inode *inode)
841{
842 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
843 struct extent_tree *et;
Chao Yu1ec46102015-02-05 17:59:59 +0800844 unsigned int node_cnt = 0;
Chao Yu429511c2015-02-05 17:54:31 +0800845
Chao Yu1dcc3362015-02-05 17:57:31 +0800846 if (!test_opt(sbi, EXTENT_CACHE))
847 return;
848
Chao Yu93dfc522015-03-19 19:24:59 +0800849 et = __find_extent_tree(sbi, inode->i_ino);
850 if (!et)
Chao Yu429511c2015-02-05 17:54:31 +0800851 goto out;
Chao Yu429511c2015-02-05 17:54:31 +0800852
853 /* free all extent info belong to this extent tree */
854 write_lock(&et->lock);
Chao Yu1ec46102015-02-05 17:59:59 +0800855 node_cnt = __free_extent_tree(sbi, et, true);
Chao Yu429511c2015-02-05 17:54:31 +0800856 write_unlock(&et->lock);
857
858 atomic_dec(&et->refcount);
859
860 /* try to find and delete extent tree entry in radix tree */
861 down_write(&sbi->extent_tree_lock);
862 et = radix_tree_lookup(&sbi->extent_tree_root, inode->i_ino);
863 if (!et) {
864 up_write(&sbi->extent_tree_lock);
865 goto out;
866 }
867 f2fs_bug_on(sbi, atomic_read(&et->refcount) || et->count);
868 radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
869 kmem_cache_free(extent_tree_slab, et);
870 sbi->total_ext_tree--;
871 up_write(&sbi->extent_tree_lock);
872out:
Chao Yu1ec46102015-02-05 17:59:59 +0800873 trace_f2fs_destroy_extent_tree(inode, node_cnt);
Jaegeuk Kimc11abd12013-11-19 10:41:54 +0900874 return;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900875}
876
Chao Yu028a41e2015-03-19 19:26:02 +0800877void f2fs_init_extent_cache(struct inode *inode, struct f2fs_extent *i_ext)
878{
879 if (test_opt(F2FS_I_SB(inode), EXTENT_CACHE))
880 f2fs_init_extent_tree(inode, i_ext);
881
882 write_lock(&F2FS_I(inode)->ext_lock);
883 get_extent_info(&F2FS_I(inode)->ext, *i_ext);
884 write_unlock(&F2FS_I(inode)->ext_lock);
885}
886
Chao Yu7e4dde72015-02-05 17:51:34 +0800887static bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
888 struct extent_info *ei)
889{
Chao Yu91c5d9b2015-02-05 18:02:44 +0800890 if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
891 return false;
892
Chao Yu1dcc3362015-02-05 17:57:31 +0800893 if (test_opt(F2FS_I_SB(inode), EXTENT_CACHE))
894 return f2fs_lookup_extent_tree(inode, pgofs, ei);
895
Chao Yu7e4dde72015-02-05 17:51:34 +0800896 return lookup_extent_info(inode, pgofs, ei);
897}
898
899void f2fs_update_extent_cache(struct dnode_of_data *dn)
900{
901 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
902 pgoff_t fofs;
903
904 f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR);
905
Chao Yu91c5d9b2015-02-05 18:02:44 +0800906 if (is_inode_flag_set(fi, FI_NO_EXTENT))
907 return;
908
Chao Yu7e4dde72015-02-05 17:51:34 +0800909 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
910 dn->ofs_in_node;
911
Chao Yu1dcc3362015-02-05 17:57:31 +0800912 if (test_opt(F2FS_I_SB(dn->inode), EXTENT_CACHE))
913 return f2fs_update_extent_tree(dn->inode, fofs,
914 dn->data_blkaddr);
915
Chao Yu7e4dde72015-02-05 17:51:34 +0800916 if (update_extent_info(dn->inode, fofs, dn->data_blkaddr))
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900917 sync_inode_page(dn);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900918}
919
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700920struct page *get_read_data_page(struct inode *inode, pgoff_t index, int rw)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900921{
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900922 struct address_space *mapping = inode->i_mapping;
923 struct dnode_of_data dn;
924 struct page *page;
Chao Yucb3bc9e2015-02-05 18:03:40 +0800925 struct extent_info ei;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900926 int err;
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -0800927 struct f2fs_io_info fio = {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700928 .sbi = F2FS_I_SB(inode),
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -0800929 .type = DATA,
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700930 .rw = rw,
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -0800931 };
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900932
Jaegeuk Kim9ac13492014-04-29 17:35:10 +0900933 page = grab_cache_page(mapping, index);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900934 if (!page)
935 return ERR_PTR(-ENOMEM);
936
Chao Yucb3bc9e2015-02-05 18:03:40 +0800937 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
938 dn.data_blkaddr = ei.blk + index - ei.fofs;
939 goto got_it;
940 }
941
Jaegeuk Kim650495d2013-05-13 08:38:35 +0900942 set_new_dnode(&dn, inode, NULL, NULL, 0);
943 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
944 if (err) {
945 f2fs_put_page(page, 1);
946 return ERR_PTR(err);
947 }
948 f2fs_put_dnode(&dn);
949
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900950 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
Jaegeuk Kim650495d2013-05-13 08:38:35 +0900951 f2fs_put_page(page, 1);
952 return ERR_PTR(-ENOENT);
953 }
Chao Yucb3bc9e2015-02-05 18:03:40 +0800954got_it:
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700955 if (PageUptodate(page)) {
956 unlock_page(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900957 return page;
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700958 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900959
Jaegeuk Kimd59ff4d2013-08-20 19:13:07 +0900960 /*
961 * A new dentry page is allocated but not able to be written, since its
962 * new inode page couldn't be allocated due to -ENOSPC.
963 * In such the case, its blkaddr can be remained as NEW_ADDR.
964 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
965 */
966 if (dn.data_blkaddr == NEW_ADDR) {
967 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
968 SetPageUptodate(page);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700969 unlock_page(page);
Jaegeuk Kimd59ff4d2013-08-20 19:13:07 +0900970 return page;
971 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900972
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -0800973 fio.blk_addr = dn.data_blkaddr;
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700974 fio.page = page;
975 err = f2fs_submit_page_bio(&fio);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900976 if (err)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900977 return ERR_PTR(err);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700978 return page;
979}
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900980
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700981struct page *find_data_page(struct inode *inode, pgoff_t index)
982{
983 struct address_space *mapping = inode->i_mapping;
984 struct page *page;
985
986 page = find_get_page(mapping, index);
987 if (page && PageUptodate(page))
988 return page;
989 f2fs_put_page(page, 0);
990
991 page = get_read_data_page(inode, index, READ_SYNC);
992 if (IS_ERR(page))
993 return page;
994
995 if (PageUptodate(page))
996 return page;
997
998 wait_on_page_locked(page);
999 if (unlikely(!PageUptodate(page))) {
1000 f2fs_put_page(page, 0);
1001 return ERR_PTR(-EIO);
1002 }
1003 return page;
1004}
1005
1006/*
1007 * If it tries to access a hole, return an error.
1008 * Because, the callers, functions in dir.c and GC, should be able to know
1009 * whether this page exists or not.
1010 */
1011struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
1012{
1013 struct address_space *mapping = inode->i_mapping;
1014 struct page *page;
1015repeat:
1016 page = get_read_data_page(inode, index, READ_SYNC);
1017 if (IS_ERR(page))
1018 return page;
1019
1020 /* wait for read completion */
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001021 lock_page(page);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001022 if (unlikely(!PageUptodate(page))) {
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001023 f2fs_put_page(page, 1);
1024 return ERR_PTR(-EIO);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001025 }
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001026 if (unlikely(page->mapping != mapping)) {
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +09001027 f2fs_put_page(page, 1);
1028 goto repeat;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001029 }
1030 return page;
1031}
1032
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001033/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001034 * Caller ensures that this data page is never allocated.
1035 * A new zero-filled data page is allocated in the page cache.
Jaegeuk Kim39936832012-11-22 16:21:29 +09001036 *
Chao Yu4f4124d2013-12-21 18:02:14 +08001037 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
1038 * f2fs_unlock_op().
Jaegeuk Kima8865372013-12-27 17:04:17 +09001039 * Note that, ipage is set only by make_empty_dir.
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001040 */
Jaegeuk Kim64aa7ed2013-05-20 09:55:50 +09001041struct page *get_new_data_page(struct inode *inode,
Jaegeuk Kima8865372013-12-27 17:04:17 +09001042 struct page *ipage, pgoff_t index, bool new_i_size)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001043{
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001044 struct address_space *mapping = inode->i_mapping;
1045 struct page *page;
1046 struct dnode_of_data dn;
1047 int err;
Jaegeuk Kim01f28612015-04-29 11:18:42 -07001048repeat:
1049 page = grab_cache_page(mapping, index);
1050 if (!page)
1051 return ERR_PTR(-ENOMEM);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001052
Jaegeuk Kima8865372013-12-27 17:04:17 +09001053 set_new_dnode(&dn, inode, ipage, NULL, 0);
Huajun Lib6009652013-11-10 23:13:18 +08001054 err = f2fs_reserve_block(&dn, index);
Jaegeuk Kim01f28612015-04-29 11:18:42 -07001055 if (err) {
1056 f2fs_put_page(page, 1);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001057 return ERR_PTR(err);
Jaegeuk Kima8865372013-12-27 17:04:17 +09001058 }
Jaegeuk Kim01f28612015-04-29 11:18:42 -07001059 if (!ipage)
1060 f2fs_put_dnode(&dn);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001061
1062 if (PageUptodate(page))
Jaegeuk Kim01f28612015-04-29 11:18:42 -07001063 goto got_it;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001064
1065 if (dn.data_blkaddr == NEW_ADDR) {
1066 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001067 SetPageUptodate(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001068 } else {
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -08001069 struct f2fs_io_info fio = {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001070 .sbi = F2FS_I_SB(inode),
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -08001071 .type = DATA,
1072 .rw = READ_SYNC,
1073 .blk_addr = dn.data_blkaddr,
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001074 .page = page,
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -08001075 };
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001076 err = f2fs_submit_page_bio(&fio);
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001077 if (err)
Jaegeuk Kim01f28612015-04-29 11:18:42 -07001078 return ERR_PTR(err);
Jaegeuk Kima8865372013-12-27 17:04:17 +09001079
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001080 lock_page(page);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001081 if (unlikely(!PageUptodate(page))) {
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001082 f2fs_put_page(page, 1);
Jaegeuk Kim01f28612015-04-29 11:18:42 -07001083 return ERR_PTR(-EIO);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001084 }
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001085 if (unlikely(page->mapping != mapping)) {
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +09001086 f2fs_put_page(page, 1);
1087 goto repeat;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001088 }
1089 }
Jaegeuk Kim01f28612015-04-29 11:18:42 -07001090got_it:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001091 if (new_i_size &&
1092 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
1093 i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
Jaegeuk Kim699489b2013-06-07 22:08:23 +09001094 /* Only the directory inode sets new_i_size */
1095 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001096 }
1097 return page;
1098}
1099
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001100static int __allocate_data_block(struct dnode_of_data *dn)
1101{
Jaegeuk Kim40813632014-09-02 15:31:18 -07001102 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
Jaegeuk Kim976e4c52014-09-15 19:32:16 -07001103 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001104 struct f2fs_summary sum;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001105 struct node_info ni;
Jaegeuk Kim38aa0882015-01-05 16:02:20 -08001106 int seg = CURSEG_WARM_DATA;
Jaegeuk Kim976e4c52014-09-15 19:32:16 -07001107 pgoff_t fofs;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001108
1109 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
1110 return -EPERM;
Chao Yudf6136e2015-03-23 10:33:37 +08001111
1112 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
1113 if (dn->data_blkaddr == NEW_ADDR)
1114 goto alloc;
1115
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001116 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
1117 return -ENOSPC;
1118
Chao Yudf6136e2015-03-23 10:33:37 +08001119alloc:
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001120 get_node_info(sbi, dn->nid, &ni);
1121 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
1122
Jaegeuk Kim38aa0882015-01-05 16:02:20 -08001123 if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
1124 seg = CURSEG_DIRECT_IO;
1125
Chao Yudf6136e2015-03-23 10:33:37 +08001126 allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
1127 &sum, seg);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001128
1129 /* direct IO doesn't use extent cache to maximize the performance */
Chao Yu216a6202015-03-19 19:23:32 +08001130 set_data_blkaddr(dn);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001131
Jaegeuk Kim976e4c52014-09-15 19:32:16 -07001132 /* update i_size */
1133 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
1134 dn->ofs_in_node;
1135 if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT))
1136 i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT));
1137
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001138 return 0;
1139}
1140
Jaegeuk Kim59b802e2015-02-09 12:09:53 -08001141static void __allocate_data_blocks(struct inode *inode, loff_t offset,
1142 size_t count)
1143{
1144 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1145 struct dnode_of_data dn;
1146 u64 start = F2FS_BYTES_TO_BLK(offset);
1147 u64 len = F2FS_BYTES_TO_BLK(count);
1148 bool allocated;
1149 u64 end_offset;
1150
1151 while (len) {
1152 f2fs_balance_fs(sbi);
1153 f2fs_lock_op(sbi);
1154
1155 /* When reading holes, we need its node page */
1156 set_new_dnode(&dn, inode, NULL, NULL, 0);
1157 if (get_dnode_of_data(&dn, start, ALLOC_NODE))
1158 goto out;
1159
1160 allocated = false;
1161 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
1162
1163 while (dn.ofs_in_node < end_offset && len) {
Chao Yud6d4f1c2015-03-12 17:04:24 +08001164 block_t blkaddr;
1165
1166 blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
Chao Yudf6136e2015-03-23 10:33:37 +08001167 if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) {
Jaegeuk Kim59b802e2015-02-09 12:09:53 -08001168 if (__allocate_data_block(&dn))
1169 goto sync_out;
1170 allocated = true;
1171 }
1172 len--;
1173 start++;
1174 dn.ofs_in_node++;
1175 }
1176
1177 if (allocated)
1178 sync_inode_page(&dn);
1179
1180 f2fs_put_dnode(&dn);
1181 f2fs_unlock_op(sbi);
1182 }
1183 return;
1184
1185sync_out:
1186 if (allocated)
1187 sync_inode_page(&dn);
1188 f2fs_put_dnode(&dn);
1189out:
1190 f2fs_unlock_op(sbi);
1191 return;
1192}
1193
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001194/*
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001195 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
1196 * f2fs_map_blocks structure.
Chao Yu4f4124d2013-12-21 18:02:14 +08001197 * If original data blocks are allocated, then give them to blockdev.
1198 * Otherwise,
1199 * a. preallocate requested block addresses
1200 * b. do not use extent cache for better performance
1201 * c. give the block addresses to blockdev
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001202 */
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001203static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
1204 int create, bool fiemap)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001205{
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001206 unsigned int maxblocks = map->m_len;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001207 struct dnode_of_data dn;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001208 int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
1209 pgoff_t pgofs, end_offset;
1210 int err = 0, ofs = 1;
Chao Yua2e7d1b2015-02-05 17:50:30 +08001211 struct extent_info ei;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001212 bool allocated = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001213
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001214 map->m_len = 0;
1215 map->m_flags = 0;
1216
1217 /* it only supports block size == page size */
1218 pgofs = (pgoff_t)map->m_lblk;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001219
Chao Yu7e4dde72015-02-05 17:51:34 +08001220 if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001221 map->m_pblk = ei.blk + pgofs - ei.fofs;
1222 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
1223 map->m_flags = F2FS_MAP_MAPPED;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001224 goto out;
Chao Yua2e7d1b2015-02-05 17:50:30 +08001225 }
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001226
Jaegeuk Kim59b802e2015-02-09 12:09:53 -08001227 if (create)
Jaegeuk Kim40813632014-09-02 15:31:18 -07001228 f2fs_lock_op(F2FS_I_SB(inode));
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001229
1230 /* When reading holes, we need its node page */
1231 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001232 err = get_dnode_of_data(&dn, pgofs, mode);
Jaegeuk Kim1ec79082013-12-26 16:55:22 +09001233 if (err) {
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001234 if (err == -ENOENT)
1235 err = 0;
1236 goto unlock_out;
Namjae Jeon848753a2013-04-23 16:38:02 +09001237 }
Jaegeuk Kimccfb3002014-06-13 13:02:11 +09001238 if (dn.data_blkaddr == NEW_ADDR && !fiemap)
Jaegeuk Kim1ec79082013-12-26 16:55:22 +09001239 goto put_out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001240
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001241 if (dn.data_blkaddr != NULL_ADDR) {
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001242 map->m_flags = F2FS_MAP_MAPPED;
1243 map->m_pblk = dn.data_blkaddr;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001244 } else if (create) {
1245 err = __allocate_data_block(&dn);
1246 if (err)
1247 goto put_out;
1248 allocated = true;
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001249 map->m_flags = F2FS_MAP_NEW | F2FS_MAP_MAPPED;
1250 map->m_pblk = dn.data_blkaddr;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001251 } else {
1252 goto put_out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001253 }
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001254
Chao Yu6403eb12014-04-26 19:59:52 +08001255 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001256 map->m_len = 1;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001257 dn.ofs_in_node++;
1258 pgofs++;
1259
1260get_next:
1261 if (dn.ofs_in_node >= end_offset) {
1262 if (allocated)
1263 sync_inode_page(&dn);
1264 allocated = false;
1265 f2fs_put_dnode(&dn);
1266
1267 set_new_dnode(&dn, inode, NULL, NULL, 0);
1268 err = get_dnode_of_data(&dn, pgofs, mode);
Jaegeuk Kim1ec79082013-12-26 16:55:22 +09001269 if (err) {
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001270 if (err == -ENOENT)
1271 err = 0;
1272 goto unlock_out;
1273 }
Jaegeuk Kimccfb3002014-06-13 13:02:11 +09001274 if (dn.data_blkaddr == NEW_ADDR && !fiemap)
Jaegeuk Kim1ec79082013-12-26 16:55:22 +09001275 goto put_out;
1276
Chao Yu6403eb12014-04-26 19:59:52 +08001277 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001278 }
1279
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001280 if (maxblocks > map->m_len) {
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001281 block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
1282 if (blkaddr == NULL_ADDR && create) {
1283 err = __allocate_data_block(&dn);
1284 if (err)
1285 goto sync_out;
1286 allocated = true;
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001287 map->m_flags |= F2FS_MAP_NEW;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001288 blkaddr = dn.data_blkaddr;
1289 }
arter97e1c42042014-08-06 23:22:50 +09001290 /* Give more consecutive addresses for the readahead */
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001291 if (map->m_pblk != NEW_ADDR && blkaddr == (map->m_pblk + ofs)) {
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001292 ofs++;
1293 dn.ofs_in_node++;
1294 pgofs++;
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001295 map->m_len++;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001296 goto get_next;
1297 }
1298 }
1299sync_out:
1300 if (allocated)
1301 sync_inode_page(&dn);
1302put_out:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001303 f2fs_put_dnode(&dn);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001304unlock_out:
1305 if (create)
Jaegeuk Kim40813632014-09-02 15:31:18 -07001306 f2fs_unlock_op(F2FS_I_SB(inode));
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001307out:
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001308 trace_f2fs_map_blocks(inode, map, err);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001309 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001310}
1311
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001312static int __get_data_block(struct inode *inode, sector_t iblock,
1313 struct buffer_head *bh, int create, bool fiemap)
1314{
1315 struct f2fs_map_blocks map;
1316 int ret;
1317
1318 map.m_lblk = iblock;
1319 map.m_len = bh->b_size >> inode->i_blkbits;
1320
1321 ret = f2fs_map_blocks(inode, &map, create, fiemap);
1322 if (!ret) {
1323 map_bh(bh, inode->i_sb, map.m_pblk);
1324 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
1325 bh->b_size = map.m_len << inode->i_blkbits;
1326 }
1327 return ret;
1328}
1329
Jaegeuk Kimccfb3002014-06-13 13:02:11 +09001330static int get_data_block(struct inode *inode, sector_t iblock,
1331 struct buffer_head *bh_result, int create)
1332{
1333 return __get_data_block(inode, iblock, bh_result, create, false);
1334}
1335
1336static int get_data_block_fiemap(struct inode *inode, sector_t iblock,
1337 struct buffer_head *bh_result, int create)
1338{
1339 return __get_data_block(inode, iblock, bh_result, create, true);
1340}
1341
Jaegeuk Kim9ab70132014-06-08 04:30:14 +09001342int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1343 u64 start, u64 len)
1344{
Jaegeuk Kimccfb3002014-06-13 13:02:11 +09001345 return generic_block_fiemap(inode, fieinfo,
1346 start, len, get_data_block_fiemap);
Jaegeuk Kim9ab70132014-06-08 04:30:14 +09001347}
1348
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001349/*
1350 * This function was originally taken from fs/mpage.c, and customized for f2fs.
1351 * Major change was from block_size == page_size in f2fs by default.
1352 */
1353static int f2fs_mpage_readpages(struct address_space *mapping,
1354 struct list_head *pages, struct page *page,
1355 unsigned nr_pages)
1356{
1357 struct bio *bio = NULL;
1358 unsigned page_idx;
1359 sector_t last_block_in_bio = 0;
1360 struct inode *inode = mapping->host;
1361 const unsigned blkbits = inode->i_blkbits;
1362 const unsigned blocksize = 1 << blkbits;
1363 sector_t block_in_file;
1364 sector_t last_block;
1365 sector_t last_block_in_file;
1366 sector_t block_nr;
1367 struct block_device *bdev = inode->i_sb->s_bdev;
1368 struct f2fs_map_blocks map;
1369
1370 map.m_pblk = 0;
1371 map.m_lblk = 0;
1372 map.m_len = 0;
1373 map.m_flags = 0;
1374
1375 for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
1376
1377 prefetchw(&page->flags);
1378 if (pages) {
1379 page = list_entry(pages->prev, struct page, lru);
1380 list_del(&page->lru);
1381 if (add_to_page_cache_lru(page, mapping,
1382 page->index, GFP_KERNEL))
1383 goto next_page;
1384 }
1385
1386 block_in_file = (sector_t)page->index;
1387 last_block = block_in_file + nr_pages;
1388 last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
1389 blkbits;
1390 if (last_block > last_block_in_file)
1391 last_block = last_block_in_file;
1392
1393 /*
1394 * Map blocks using the previous result first.
1395 */
1396 if ((map.m_flags & F2FS_MAP_MAPPED) &&
1397 block_in_file > map.m_lblk &&
1398 block_in_file < (map.m_lblk + map.m_len))
1399 goto got_it;
1400
1401 /*
1402 * Then do more f2fs_map_blocks() calls until we are
1403 * done with this page.
1404 */
1405 map.m_flags = 0;
1406
1407 if (block_in_file < last_block) {
1408 map.m_lblk = block_in_file;
1409 map.m_len = last_block - block_in_file;
1410
1411 if (f2fs_map_blocks(inode, &map, 0, false))
1412 goto set_error_page;
1413 }
1414got_it:
1415 if ((map.m_flags & F2FS_MAP_MAPPED)) {
1416 block_nr = map.m_pblk + block_in_file - map.m_lblk;
1417 SetPageMappedToDisk(page);
1418
1419 if (!PageUptodate(page) && !cleancache_get_page(page)) {
1420 SetPageUptodate(page);
1421 goto confused;
1422 }
1423 } else {
1424 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
1425 SetPageUptodate(page);
1426 unlock_page(page);
1427 goto next_page;
1428 }
1429
1430 /*
1431 * This page will go to BIO. Do we need to send this
1432 * BIO off first?
1433 */
1434 if (bio && (last_block_in_bio != block_nr - 1)) {
1435submit_and_realloc:
1436 submit_bio(READ, bio);
1437 bio = NULL;
1438 }
1439 if (bio == NULL) {
1440 bio = bio_alloc(GFP_KERNEL,
1441 min_t(int, nr_pages, bio_get_nr_vecs(bdev)));
1442 if (!bio)
1443 goto set_error_page;
1444 bio->bi_bdev = bdev;
1445 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
1446 bio->bi_end_io = mpage_end_io;
1447 bio->bi_private = NULL;
1448 }
1449
1450 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
1451 goto submit_and_realloc;
1452
1453 last_block_in_bio = block_nr;
1454 goto next_page;
1455set_error_page:
1456 SetPageError(page);
1457 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
1458 unlock_page(page);
1459 goto next_page;
1460confused:
1461 if (bio) {
1462 submit_bio(READ, bio);
1463 bio = NULL;
1464 }
1465 unlock_page(page);
1466next_page:
1467 if (pages)
1468 page_cache_release(page);
1469 }
1470 BUG_ON(pages && !list_empty(pages));
1471 if (bio)
1472 submit_bio(READ, bio);
1473 return 0;
1474}
1475
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001476static int f2fs_read_data_page(struct file *file, struct page *page)
1477{
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001478 struct inode *inode = page->mapping->host;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001479 int ret = -EAGAIN;
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001480
Chao Yuc20e89c2014-05-06 16:53:08 +08001481 trace_f2fs_readpage(page, DATA);
1482
arter97e1c42042014-08-06 23:22:50 +09001483 /* If the file has inline data, try to read it directly */
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001484 if (f2fs_has_inline_data(inode))
1485 ret = f2fs_read_inline_data(inode, page);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001486 if (ret == -EAGAIN)
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001487 ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001488 return ret;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001489}
1490
1491static int f2fs_read_data_pages(struct file *file,
1492 struct address_space *mapping,
1493 struct list_head *pages, unsigned nr_pages)
1494{
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001495 struct inode *inode = file->f_mapping->host;
1496
1497 /* If the file has inline data, skip readpages */
1498 if (f2fs_has_inline_data(inode))
1499 return 0;
1500
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001501 return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001502}
1503
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001504int do_write_data_page(struct f2fs_io_info *fio)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001505{
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001506 struct page *page = fio->page;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001507 struct inode *inode = page->mapping->host;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001508 struct dnode_of_data dn;
1509 int err = 0;
1510
1511 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kim266e97a2013-02-26 13:10:46 +09001512 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001513 if (err)
1514 return err;
1515
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -08001516 fio->blk_addr = dn.data_blkaddr;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001517
1518 /* This page is already truncated */
Jaegeuk Kim2bca1e22015-02-25 19:25:01 -08001519 if (fio->blk_addr == NULL_ADDR) {
1520 ClearPageUptodate(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001521 goto out_writepage;
Jaegeuk Kim2bca1e22015-02-25 19:25:01 -08001522 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001523
1524 set_page_writeback(page);
1525
1526 /*
1527 * If current allocation needs SSR,
1528 * it had better in-place writes for updated data.
1529 */
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -08001530 if (unlikely(fio->blk_addr != NEW_ADDR &&
Haicheng Lib25958b2013-06-13 16:59:29 +08001531 !is_cold_data(page) &&
1532 need_inplace_update(inode))) {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001533 rewrite_data_page(fio);
Jaegeuk Kimfff04f92014-07-25 07:40:59 -07001534 set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
Jaegeuk Kim8ce67cb2015-03-17 17:58:08 -07001535 trace_f2fs_do_write_data_page(page, IPU);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001536 } else {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001537 write_data_page(&dn, fio);
Chao Yu216a6202015-03-19 19:23:32 +08001538 set_data_blkaddr(&dn);
Chao Yu7e4dde72015-02-05 17:51:34 +08001539 f2fs_update_extent_cache(&dn);
Jaegeuk Kim8ce67cb2015-03-17 17:58:08 -07001540 trace_f2fs_do_write_data_page(page, OPU);
Jaegeuk Kimfff04f92014-07-25 07:40:59 -07001541 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
Jaegeuk Kim3c6c2be2015-03-17 17:16:35 -07001542 if (page->index == 0)
1543 set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001544 }
1545out_writepage:
1546 f2fs_put_dnode(&dn);
1547 return err;
1548}
1549
1550static int f2fs_write_data_page(struct page *page,
1551 struct writeback_control *wbc)
1552{
1553 struct inode *inode = page->mapping->host;
Jaegeuk Kim40813632014-09-02 15:31:18 -07001554 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001555 loff_t i_size = i_size_read(inode);
1556 const pgoff_t end_index = ((unsigned long long) i_size)
1557 >> PAGE_CACHE_SHIFT;
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001558 unsigned offset = 0;
Jaegeuk Kim39936832012-11-22 16:21:29 +09001559 bool need_balance_fs = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001560 int err = 0;
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001561 struct f2fs_io_info fio = {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001562 .sbi = sbi,
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001563 .type = DATA,
Chris Fries6c311ec2014-01-17 14:44:39 -06001564 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001565 .page = page,
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001566 };
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001567
Chao Yuecda0de2014-05-06 16:48:26 +08001568 trace_f2fs_writepage(page, DATA);
1569
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001570 if (page->index < end_index)
Jaegeuk Kim39936832012-11-22 16:21:29 +09001571 goto write;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001572
1573 /*
1574 * If the offset is out-of-range of file size,
1575 * this page does not have to be written to disk.
1576 */
1577 offset = i_size & (PAGE_CACHE_SIZE - 1);
Jaegeuk Kim76f60262014-04-15 16:04:15 +09001578 if ((page->index >= end_index + 1) || !offset)
Jaegeuk Kim39936832012-11-22 16:21:29 +09001579 goto out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001580
1581 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
Jaegeuk Kim39936832012-11-22 16:21:29 +09001582write:
Chao Yucaf00472015-01-28 17:48:42 +08001583 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001584 goto redirty_out;
Jaegeuk Kim1e843712014-12-09 06:08:59 -08001585 if (f2fs_is_drop_cache(inode))
1586 goto out;
1587 if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim &&
1588 available_free_memory(sbi, BASE_CHECK))
1589 goto redirty_out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001590
Jaegeuk Kim39936832012-11-22 16:21:29 +09001591 /* Dentry blocks are controlled by checkpoint */
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001592 if (S_ISDIR(inode->i_mode)) {
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001593 if (unlikely(f2fs_cp_error(sbi)))
1594 goto redirty_out;
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001595 err = do_write_data_page(&fio);
Jaegeuk Kim8618b882014-02-17 19:29:27 +09001596 goto done;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001597 }
Jaegeuk Kim8618b882014-02-17 19:29:27 +09001598
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001599 /* we should bypass data pages to proceed the kworkder jobs */
1600 if (unlikely(f2fs_cp_error(sbi))) {
1601 SetPageError(page);
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07001602 goto out;
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001603 }
1604
Jaegeuk Kim8618b882014-02-17 19:29:27 +09001605 if (!wbc->for_reclaim)
1606 need_balance_fs = true;
1607 else if (has_not_enough_free_secs(sbi, 0))
Jaegeuk Kim39936832012-11-22 16:21:29 +09001608 goto redirty_out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001609
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001610 err = -EAGAIN;
Jaegeuk Kim8618b882014-02-17 19:29:27 +09001611 f2fs_lock_op(sbi);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001612 if (f2fs_has_inline_data(inode))
1613 err = f2fs_write_inline_data(inode, page);
1614 if (err == -EAGAIN)
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001615 err = do_write_data_page(&fio);
Jaegeuk Kim8618b882014-02-17 19:29:27 +09001616 f2fs_unlock_op(sbi);
1617done:
1618 if (err && err != -ENOENT)
1619 goto redirty_out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001620
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001621 clear_cold_data(page);
Jaegeuk Kim39936832012-11-22 16:21:29 +09001622out:
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07001623 inode_dec_dirty_pages(inode);
Jaegeuk Kim2bca1e22015-02-25 19:25:01 -08001624 if (err)
1625 ClearPageUptodate(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001626 unlock_page(page);
Jaegeuk Kim39936832012-11-22 16:21:29 +09001627 if (need_balance_fs)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001628 f2fs_balance_fs(sbi);
Jaegeuk Kim2aea39e2014-04-24 09:49:52 +09001629 if (wbc->for_reclaim)
1630 f2fs_submit_merged_bio(sbi, DATA, WRITE);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001631 return 0;
1632
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001633redirty_out:
Jaegeuk Kim76f60262014-04-15 16:04:15 +09001634 redirty_page_for_writepage(wbc, page);
Jaegeuk Kim8618b882014-02-17 19:29:27 +09001635 return AOP_WRITEPAGE_ACTIVATE;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001636}
1637
Namjae Jeonfa9150a2013-01-15 16:45:24 +09001638static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
1639 void *data)
1640{
1641 struct address_space *mapping = data;
1642 int ret = mapping->a_ops->writepage(page, wbc);
1643 mapping_set_error(mapping, ret);
1644 return ret;
1645}
1646
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001647static int f2fs_write_data_pages(struct address_space *mapping,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001648 struct writeback_control *wbc)
1649{
1650 struct inode *inode = mapping->host;
Jaegeuk Kim40813632014-09-02 15:31:18 -07001651 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim5463e7c2015-04-21 10:40:54 -07001652 bool locked = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001653 int ret;
Jaegeuk Kim50c8cdb2014-03-18 13:47:11 +09001654 long diff;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001655
Chao Yue5748432014-05-06 16:51:24 +08001656 trace_f2fs_writepages(mapping->host, wbc, DATA);
1657
P J Pcfb185a2013-04-03 11:38:00 +09001658 /* deal with chardevs and other special file */
1659 if (!mapping->a_ops->writepage)
1660 return 0;
1661
Jaegeuk Kim87d6f892014-03-18 12:40:49 +09001662 if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07001663 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
Jaegeuk Kim6fb03f32014-04-16 10:47:06 +09001664 available_free_memory(sbi, DIRTY_DENTS))
Jaegeuk Kimd3baf952014-03-18 13:43:05 +09001665 goto skip_write;
Jaegeuk Kim87d6f892014-03-18 12:40:49 +09001666
Jaegeuk Kimd5669f72015-02-27 13:37:39 -08001667 /* during POR, we don't need to trigger writepage at all. */
1668 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1669 goto skip_write;
1670
Jaegeuk Kim50c8cdb2014-03-18 13:47:11 +09001671 diff = nr_pages_to_write(sbi, DATA, wbc);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001672
Jaegeuk Kim5463e7c2015-04-21 10:40:54 -07001673 if (!S_ISDIR(inode->i_mode)) {
1674 mutex_lock(&sbi->writepages);
1675 locked = true;
1676 }
Namjae Jeonfa9150a2013-01-15 16:45:24 +09001677 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
Jaegeuk Kim5463e7c2015-04-21 10:40:54 -07001678 if (locked)
1679 mutex_unlock(&sbi->writepages);
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001680
1681 f2fs_submit_merged_bio(sbi, DATA, WRITE);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001682
1683 remove_dirty_dir_inode(inode);
1684
Jaegeuk Kim50c8cdb2014-03-18 13:47:11 +09001685 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001686 return ret;
Jaegeuk Kimd3baf952014-03-18 13:43:05 +09001687
1688skip_write:
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07001689 wbc->pages_skipped += get_dirty_pages(inode);
Jaegeuk Kimd3baf952014-03-18 13:43:05 +09001690 return 0;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001691}
1692
Chao Yu3aab8f82014-07-02 13:25:04 +08001693static void f2fs_write_failed(struct address_space *mapping, loff_t to)
1694{
1695 struct inode *inode = mapping->host;
1696
1697 if (to > inode->i_size) {
1698 truncate_pagecache(inode, inode->i_size);
Jaegeuk Kim764aa3e2014-08-14 16:32:54 -07001699 truncate_blocks(inode, inode->i_size, true);
Chao Yu3aab8f82014-07-02 13:25:04 +08001700 }
1701}
1702
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001703static int f2fs_write_begin(struct file *file, struct address_space *mapping,
1704 loff_t pos, unsigned len, unsigned flags,
1705 struct page **pagep, void **fsdata)
1706{
1707 struct inode *inode = mapping->host;
Jaegeuk Kim40813632014-09-02 15:31:18 -07001708 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07001709 struct page *page, *ipage;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001710 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
1711 struct dnode_of_data dn;
1712 int err = 0;
1713
Chao Yu62aed042014-05-06 16:46:04 +08001714 trace_f2fs_write_begin(inode, pos, len, flags);
1715
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001716 f2fs_balance_fs(sbi);
Jaegeuk Kim5f727392014-11-25 10:59:45 -08001717
1718 /*
1719 * We should check this at this moment to avoid deadlock on inode page
1720 * and #0 page. The locking rule for inline_data conversion should be:
1721 * lock_page(page #0) -> lock_page(inode_page)
1722 */
1723 if (index != 0) {
1724 err = f2fs_convert_inline_inode(inode);
1725 if (err)
1726 goto fail;
1727 }
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +09001728repeat:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001729 page = grab_cache_page_write_begin(mapping, index, flags);
Chao Yu3aab8f82014-07-02 13:25:04 +08001730 if (!page) {
1731 err = -ENOMEM;
1732 goto fail;
1733 }
Jaegeuk Kimd5f66992014-04-30 09:22:45 +09001734
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001735 *pagep = page;
1736
Gu Zhenge4795562013-09-27 18:08:30 +08001737 f2fs_lock_op(sbi);
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07001738
1739 /* check inline_data */
1740 ipage = get_node_page(sbi, inode->i_ino);
Chao Yucd34e292014-12-01 11:30:20 +08001741 if (IS_ERR(ipage)) {
1742 err = PTR_ERR(ipage);
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07001743 goto unlock_fail;
Chao Yucd34e292014-12-01 11:30:20 +08001744 }
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07001745
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001746 set_new_dnode(&dn, inode, ipage, ipage, 0);
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07001747
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001748 if (f2fs_has_inline_data(inode)) {
1749 if (pos + len <= MAX_INLINE_DATA) {
1750 read_inline_data(page, ipage);
1751 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
1752 sync_inode_page(&dn);
1753 goto put_next;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001754 }
Jaegeuk Kim5f727392014-11-25 10:59:45 -08001755 err = f2fs_convert_inline_page(&dn, page);
1756 if (err)
1757 goto put_fail;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001758 }
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07001759 err = f2fs_reserve_block(&dn, index);
1760 if (err)
Jaegeuk Kim8cdcb712014-11-17 16:14:11 -08001761 goto put_fail;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001762put_next:
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07001763 f2fs_put_dnode(&dn);
1764 f2fs_unlock_op(sbi);
1765
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001766 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
1767 return 0;
1768
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001769 f2fs_wait_on_page_writeback(page, DATA);
1770
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001771 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
1772 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
1773 unsigned end = start + len;
1774
1775 /* Reading beyond i_size is simple: memset to zero */
1776 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001777 goto out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001778 }
1779
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001780 if (dn.data_blkaddr == NEW_ADDR) {
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001781 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
1782 } else {
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -08001783 struct f2fs_io_info fio = {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001784 .sbi = sbi,
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -08001785 .type = DATA,
1786 .rw = READ_SYNC,
1787 .blk_addr = dn.data_blkaddr,
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001788 .page = page,
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -08001789 };
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001790 err = f2fs_submit_page_bio(&fio);
Jan Kara9234f312014-10-22 15:21:47 +02001791 if (err)
1792 goto fail;
Chao Yud54c7952014-03-29 15:30:40 +08001793
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001794 lock_page(page);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001795 if (unlikely(!PageUptodate(page))) {
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001796 f2fs_put_page(page, 1);
Chao Yu3aab8f82014-07-02 13:25:04 +08001797 err = -EIO;
1798 goto fail;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001799 }
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001800 if (unlikely(page->mapping != mapping)) {
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +09001801 f2fs_put_page(page, 1);
1802 goto repeat;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001803 }
1804 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001805out:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001806 SetPageUptodate(page);
1807 clear_cold_data(page);
1808 return 0;
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07001809
Jaegeuk Kim8cdcb712014-11-17 16:14:11 -08001810put_fail:
1811 f2fs_put_dnode(&dn);
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07001812unlock_fail:
1813 f2fs_unlock_op(sbi);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001814 f2fs_put_page(page, 1);
Chao Yu3aab8f82014-07-02 13:25:04 +08001815fail:
1816 f2fs_write_failed(mapping, pos + len);
1817 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001818}
1819
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09001820static int f2fs_write_end(struct file *file,
1821 struct address_space *mapping,
1822 loff_t pos, unsigned len, unsigned copied,
1823 struct page *page, void *fsdata)
1824{
1825 struct inode *inode = page->mapping->host;
1826
Chao Yudfb2bf32014-05-06 16:47:23 +08001827 trace_f2fs_write_end(inode, pos, len, copied);
1828
Jaegeuk Kim34ba94b2014-10-09 13:19:53 -07001829 set_page_dirty(page);
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09001830
1831 if (pos + copied > i_size_read(inode)) {
1832 i_size_write(inode, pos + copied);
1833 mark_inode_dirty(inode);
1834 update_inode_page(inode);
1835 }
1836
Chao Yu75c3c8b2013-11-16 14:15:59 +08001837 f2fs_put_page(page, 1);
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09001838 return copied;
1839}
1840
Omar Sandoval6f673762015-03-16 04:33:52 -07001841static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
1842 loff_t offset)
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09001843{
1844 unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09001845
Omar Sandoval6f673762015-03-16 04:33:52 -07001846 if (iov_iter_rw(iter) == READ)
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09001847 return 0;
1848
1849 if (offset & blocksize_mask)
1850 return -EINVAL;
1851
Al Viro5b46f252014-03-16 18:07:34 -04001852 if (iov_iter_alignment(iter) & blocksize_mask)
1853 return -EINVAL;
1854
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09001855 return 0;
1856}
1857
Omar Sandoval22c61862015-03-16 04:33:53 -07001858static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
1859 loff_t offset)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001860{
1861 struct file *file = iocb->ki_filp;
Chao Yu3aab8f82014-07-02 13:25:04 +08001862 struct address_space *mapping = file->f_mapping;
1863 struct inode *inode = mapping->host;
1864 size_t count = iov_iter_count(iter);
1865 int err;
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09001866
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001867 /* we don't need to use inline_data strictly */
1868 if (f2fs_has_inline_data(inode)) {
1869 err = f2fs_convert_inline_inode(inode);
1870 if (err)
1871 return err;
1872 }
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001873
Omar Sandoval6f673762015-03-16 04:33:52 -07001874 if (check_direct_IO(inode, iter, offset))
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09001875 return 0;
1876
Omar Sandoval6f673762015-03-16 04:33:52 -07001877 trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
Chao Yu70407fa2014-07-31 21:11:22 +08001878
Omar Sandoval6f673762015-03-16 04:33:52 -07001879 if (iov_iter_rw(iter) == WRITE)
Jaegeuk Kim59b802e2015-02-09 12:09:53 -08001880 __allocate_data_blocks(inode, offset, count);
1881
Omar Sandoval17f8c842015-03-16 04:33:50 -07001882 err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block);
Omar Sandoval6f673762015-03-16 04:33:52 -07001883 if (err < 0 && iov_iter_rw(iter) == WRITE)
Chao Yu3aab8f82014-07-02 13:25:04 +08001884 f2fs_write_failed(mapping, offset + count);
Chao Yu70407fa2014-07-31 21:11:22 +08001885
Omar Sandoval6f673762015-03-16 04:33:52 -07001886 trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err);
Chao Yu70407fa2014-07-31 21:11:22 +08001887
Chao Yu3aab8f82014-07-02 13:25:04 +08001888 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001889}
1890
Chao Yu487261f2015-02-05 17:44:29 +08001891void f2fs_invalidate_page(struct page *page, unsigned int offset,
1892 unsigned int length)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001893{
1894 struct inode *inode = page->mapping->host;
Chao Yu487261f2015-02-05 17:44:29 +08001895 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07001896
Chao Yu487261f2015-02-05 17:44:29 +08001897 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
1898 (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE))
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07001899 return;
1900
Chao Yu487261f2015-02-05 17:44:29 +08001901 if (PageDirty(page)) {
1902 if (inode->i_ino == F2FS_META_INO(sbi))
1903 dec_page_count(sbi, F2FS_DIRTY_META);
1904 else if (inode->i_ino == F2FS_NODE_INO(sbi))
1905 dec_page_count(sbi, F2FS_DIRTY_NODES);
1906 else
1907 inode_dec_dirty_pages(inode);
1908 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001909 ClearPagePrivate(page);
1910}
1911
Chao Yu487261f2015-02-05 17:44:29 +08001912int f2fs_release_page(struct page *page, gfp_t wait)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001913{
Jaegeuk Kimf68daee2015-01-30 11:39:08 -08001914 /* If this is dirty page, keep PagePrivate */
1915 if (PageDirty(page))
1916 return 0;
1917
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001918 ClearPagePrivate(page);
Jaegeuk Kimc3850aa2013-03-14 09:24:32 +09001919 return 1;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001920}
1921
1922static int f2fs_set_data_page_dirty(struct page *page)
1923{
1924 struct address_space *mapping = page->mapping;
1925 struct inode *inode = mapping->host;
1926
Jaegeuk Kim26c6b882013-10-24 17:53:29 +09001927 trace_f2fs_set_page_dirty(page, DATA);
1928
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001929 SetPageUptodate(page);
Jaegeuk Kim34ba94b2014-10-09 13:19:53 -07001930
Jaegeuk Kim1e843712014-12-09 06:08:59 -08001931 if (f2fs_is_atomic_file(inode)) {
Jaegeuk Kim34ba94b2014-10-09 13:19:53 -07001932 register_inmem_page(inode, page);
1933 return 1;
1934 }
1935
Jaegeuk Kima18ff062014-01-21 13:32:12 +09001936 mark_inode_dirty(inode);
1937
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001938 if (!PageDirty(page)) {
1939 __set_page_dirty_nobuffers(page);
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07001940 update_dirty_page(inode, page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001941 return 1;
1942 }
1943 return 0;
1944}
1945
Jaegeuk Kimc01e54b2013-01-17 20:30:23 +09001946static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
1947{
Chao Yu454ae7e2014-04-22 13:34:01 +08001948 struct inode *inode = mapping->host;
1949
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001950 /* we don't need to use inline_data strictly */
1951 if (f2fs_has_inline_data(inode)) {
1952 int err = f2fs_convert_inline_inode(inode);
1953 if (err)
1954 return err;
1955 }
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001956 return generic_block_bmap(mapping, block, get_data_block);
Jaegeuk Kimc01e54b2013-01-17 20:30:23 +09001957}
1958
Chao Yu429511c2015-02-05 17:54:31 +08001959void init_extent_cache_info(struct f2fs_sb_info *sbi)
1960{
1961 INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
1962 init_rwsem(&sbi->extent_tree_lock);
1963 INIT_LIST_HEAD(&sbi->extent_list);
1964 spin_lock_init(&sbi->extent_lock);
1965 sbi->total_ext_tree = 0;
1966 atomic_set(&sbi->total_ext_node, 0);
1967}
1968
1969int __init create_extent_cache(void)
1970{
1971 extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
1972 sizeof(struct extent_tree));
1973 if (!extent_tree_slab)
1974 return -ENOMEM;
1975 extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
1976 sizeof(struct extent_node));
1977 if (!extent_node_slab) {
1978 kmem_cache_destroy(extent_tree_slab);
1979 return -ENOMEM;
1980 }
1981 return 0;
1982}
1983
1984void destroy_extent_cache(void)
1985{
1986 kmem_cache_destroy(extent_node_slab);
1987 kmem_cache_destroy(extent_tree_slab);
1988}
1989
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001990const struct address_space_operations f2fs_dblock_aops = {
1991 .readpage = f2fs_read_data_page,
1992 .readpages = f2fs_read_data_pages,
1993 .writepage = f2fs_write_data_page,
1994 .writepages = f2fs_write_data_pages,
1995 .write_begin = f2fs_write_begin,
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09001996 .write_end = f2fs_write_end,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001997 .set_page_dirty = f2fs_set_data_page_dirty,
Chao Yu487261f2015-02-05 17:44:29 +08001998 .invalidatepage = f2fs_invalidate_page,
1999 .releasepage = f2fs_release_page,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002000 .direct_IO = f2fs_direct_IO,
Jaegeuk Kimc01e54b2013-01-17 20:30:23 +09002001 .bmap = f2fs_bmap,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002002};