blob: 8d04e24a889ff3063560e329a1c91915b06d8b9f [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002 * fs/f2fs/data.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/buffer_head.h>
14#include <linux/mpage.h>
15#include <linux/writeback.h>
16#include <linux/backing-dev.h>
17#include <linux/blkdev.h>
18#include <linux/bio.h>
Geert Uytterhoeven690e4a32012-12-19 22:19:30 +010019#include <linux/prefetch.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080020#include <linux/uio.h>
Jaegeuk Kimf1e88662015-04-09 11:20:42 -070021#include <linux/cleancache.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090022
23#include "f2fs.h"
24#include "node.h"
25#include "segment.h"
Jaegeuk Kimdb9f7c12014-12-17 20:04:08 -080026#include "trace.h"
Namjae Jeon848753a2013-04-23 16:38:02 +090027#include <trace/events/f2fs.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090028
Chao Yu429511c2015-02-05 17:54:31 +080029static struct kmem_cache *extent_tree_slab;
30static struct kmem_cache *extent_node_slab;
31
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090032static void f2fs_read_end_io(struct bio *bio, int err)
33{
Linus Torvaldsf5688492014-01-30 11:19:05 -080034 struct bio_vec *bvec;
35 int i;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090036
Linus Torvaldsf5688492014-01-30 11:19:05 -080037 bio_for_each_segment_all(bvec, bio, i) {
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090038 struct page *page = bvec->bv_page;
39
Linus Torvaldsf5688492014-01-30 11:19:05 -080040 if (!err) {
41 SetPageUptodate(page);
42 } else {
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090043 ClearPageUptodate(page);
44 SetPageError(page);
45 }
46 unlock_page(page);
Linus Torvaldsf5688492014-01-30 11:19:05 -080047 }
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090048 bio_put(bio);
49}
50
Jaegeuk Kimf1e88662015-04-09 11:20:42 -070051/*
52 * I/O completion handler for multipage BIOs.
53 * copied from fs/mpage.c
54 */
55static void mpage_end_io(struct bio *bio, int err)
56{
57 struct bio_vec *bv;
58 int i;
59
Jaegeuk Kim4375a332015-04-23 12:04:33 -070060 if (f2fs_bio_encrypted(bio)) {
61 if (err) {
62 f2fs_release_crypto_ctx(bio->bi_private);
63 } else {
64 f2fs_end_io_crypto_work(bio->bi_private, bio);
65 return;
66 }
67 }
68
Jaegeuk Kimf1e88662015-04-09 11:20:42 -070069 bio_for_each_segment_all(bv, bio, i) {
70 struct page *page = bv->bv_page;
71
72 if (!err) {
73 SetPageUptodate(page);
74 } else {
75 ClearPageUptodate(page);
76 SetPageError(page);
77 }
78 unlock_page(page);
79 }
80
81 bio_put(bio);
82}
83
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090084static void f2fs_write_end_io(struct bio *bio, int err)
85{
Jaegeuk Kim1b1f5592014-02-03 10:50:22 +090086 struct f2fs_sb_info *sbi = bio->bi_private;
Linus Torvaldsf5688492014-01-30 11:19:05 -080087 struct bio_vec *bvec;
88 int i;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090089
Linus Torvaldsf5688492014-01-30 11:19:05 -080090 bio_for_each_segment_all(bvec, bio, i) {
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090091 struct page *page = bvec->bv_page;
92
Jaegeuk Kim4375a332015-04-23 12:04:33 -070093 f2fs_restore_and_release_control_page(&page);
94
Linus Torvaldsf5688492014-01-30 11:19:05 -080095 if (unlikely(err)) {
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -070096 set_page_dirty(page);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090097 set_bit(AS_EIO, &page->mapping->flags);
Jaegeuk Kim744602c2014-01-24 09:42:16 +090098 f2fs_stop_checkpoint(sbi);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090099 }
100 end_page_writeback(page);
101 dec_page_count(sbi, F2FS_WRITEBACK);
Linus Torvaldsf5688492014-01-30 11:19:05 -0800102 }
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900103
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900104 if (!get_pages(sbi, F2FS_WRITEBACK) &&
105 !list_empty(&sbi->cp_wait.task_list))
106 wake_up(&sbi->cp_wait);
107
108 bio_put(bio);
109}
110
Gu Zheng940a6d32013-12-20 17:39:59 +0800111/*
112 * Low-level block read/write IO operations.
113 */
114static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
115 int npages, bool is_read)
116{
117 struct bio *bio;
118
119 /* No failure on bio allocation */
120 bio = bio_alloc(GFP_NOIO, npages);
121
122 bio->bi_bdev = sbi->sb->s_bdev;
Chao Yu55cf9cb2014-09-15 18:01:10 +0800123 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
Gu Zheng940a6d32013-12-20 17:39:59 +0800124 bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
Jaegeuk Kim1b1f5592014-02-03 10:50:22 +0900125 bio->bi_private = sbi;
Gu Zheng940a6d32013-12-20 17:39:59 +0800126
127 return bio;
128}
129
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900130static void __submit_merged_bio(struct f2fs_bio_info *io)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900131{
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900132 struct f2fs_io_info *fio = &io->fio;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900133
134 if (!io->bio)
135 return;
136
Jaegeuk Kim6a8f8ca2014-10-29 14:37:22 -0700137 if (is_read_io(fio->rw))
Chao Yu2ace38e2014-12-24 16:08:14 +0800138 trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
Jaegeuk Kim6a8f8ca2014-10-29 14:37:22 -0700139 else
Chao Yu2ace38e2014-12-24 16:08:14 +0800140 trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900141
Jaegeuk Kim6a8f8ca2014-10-29 14:37:22 -0700142 submit_bio(fio->rw, io->bio);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900143 io->bio = NULL;
144}
145
146void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900147 enum page_type type, int rw)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900148{
149 enum page_type btype = PAGE_TYPE_OF_BIO(type);
150 struct f2fs_bio_info *io;
151
152 io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
153
Chao Yudf0f8dc2014-03-22 14:57:23 +0800154 down_write(&io->io_rwsem);
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900155
156 /* change META to META_FLUSH in the checkpoint procedure */
157 if (type >= META_FLUSH) {
158 io->fio.type = META_FLUSH;
Jaegeuk Kim0f7b2ab2014-07-23 09:57:31 -0700159 if (test_opt(sbi, NOBARRIER))
160 io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO;
161 else
162 io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900163 }
164 __submit_merged_bio(io);
Chao Yudf0f8dc2014-03-22 14:57:23 +0800165 up_write(&io->io_rwsem);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900166}
167
168/*
169 * Fill the locked page with data located in the block address.
170 * Return unlocked page.
171 */
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700172int f2fs_submit_page_bio(struct f2fs_io_info *fio)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900173{
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900174 struct bio *bio;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700175 struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900176
Chao Yu2ace38e2014-12-24 16:08:14 +0800177 trace_f2fs_submit_page_bio(page, fio);
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700178 f2fs_trace_ios(fio, 0);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900179
180 /* Allocate a new bio */
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700181 bio = __bio_alloc(fio->sbi, fio->blk_addr, 1, is_read_io(fio->rw));
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900182
183 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
184 bio_put(bio);
185 f2fs_put_page(page, 1);
186 return -EFAULT;
187 }
188
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -0800189 submit_bio(fio->rw, bio);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900190 return 0;
191}
192
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700193void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900194{
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700195 struct f2fs_sb_info *sbi = fio->sbi;
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900196 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900197 struct f2fs_bio_info *io;
Gu Zheng940a6d32013-12-20 17:39:59 +0800198 bool is_read = is_read_io(fio->rw);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700199 struct page *bio_page;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900200
Gu Zheng940a6d32013-12-20 17:39:59 +0800201 io = is_read ? &sbi->read_io : &sbi->write_io[btype];
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900202
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -0800203 verify_block_addr(sbi, fio->blk_addr);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900204
Chao Yudf0f8dc2014-03-22 14:57:23 +0800205 down_write(&io->io_rwsem);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900206
Gu Zheng940a6d32013-12-20 17:39:59 +0800207 if (!is_read)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900208 inc_page_count(sbi, F2FS_WRITEBACK);
209
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -0800210 if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 ||
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900211 io->fio.rw != fio->rw))
212 __submit_merged_bio(io);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900213alloc_new:
214 if (io->bio == NULL) {
Jaegeuk Kim90a893c2014-09-22 16:21:07 -0700215 int bio_blocks = MAX_BIO_BLOCKS(sbi);
Gu Zheng940a6d32013-12-20 17:39:59 +0800216
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -0800217 io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read);
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900218 io->fio = *fio;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900219 }
220
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700221 bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
222
223 if (bio_add_page(io->bio, bio_page, PAGE_CACHE_SIZE, 0) <
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900224 PAGE_CACHE_SIZE) {
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900225 __submit_merged_bio(io);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900226 goto alloc_new;
227 }
228
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -0800229 io->last_block_in_bio = fio->blk_addr;
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700230 f2fs_trace_ios(fio, 0);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900231
Chao Yudf0f8dc2014-03-22 14:57:23 +0800232 up_write(&io->io_rwsem);
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700233 trace_f2fs_submit_page_mbio(fio->page, fio);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900234}
235
236/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900237 * Lock ordering for the change of data block address:
238 * ->data_page
239 * ->node_page
240 * update block addresses in the node page
241 */
Chao Yu216a6202015-03-19 19:23:32 +0800242void set_data_blkaddr(struct dnode_of_data *dn)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900243{
244 struct f2fs_node *rn;
245 __le32 *addr_array;
246 struct page *node_page = dn->node_page;
247 unsigned int ofs_in_node = dn->ofs_in_node;
248
Yuan Zhong5514f0a2014-01-10 07:26:14 +0000249 f2fs_wait_on_page_writeback(node_page, NODE);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900250
Gu Zheng45590712013-07-15 17:57:38 +0800251 rn = F2FS_NODE(node_page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900252
253 /* Get physical address of data block */
254 addr_array = blkaddr_in_node(rn);
Jaegeuk Kime1509cf2014-12-30 22:57:55 -0800255 addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900256 set_page_dirty(node_page);
257}
258
259int reserve_new_block(struct dnode_of_data *dn)
260{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700261 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900262
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900263 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900264 return -EPERM;
Chao Yucfb271d2013-12-05 17:15:22 +0800265 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900266 return -ENOSPC;
267
Namjae Jeonc01e2852013-04-23 17:00:52 +0900268 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
269
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900270 dn->data_blkaddr = NEW_ADDR;
Chao Yu216a6202015-03-19 19:23:32 +0800271 set_data_blkaddr(dn);
Jaegeuk Kima18ff062014-01-21 13:32:12 +0900272 mark_inode_dirty(dn->inode);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900273 sync_inode_page(dn);
274 return 0;
275}
276
Huajun Lib6009652013-11-10 23:13:18 +0800277int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
278{
279 bool need_put = dn->inode_page ? false : true;
280 int err;
281
282 err = get_dnode_of_data(dn, index, ALLOC_NODE);
283 if (err)
284 return err;
Jaegeuk Kima8865372013-12-27 17:04:17 +0900285
Huajun Lib6009652013-11-10 23:13:18 +0800286 if (dn->data_blkaddr == NULL_ADDR)
287 err = reserve_new_block(dn);
Jaegeuk Kima8865372013-12-27 17:04:17 +0900288 if (err || need_put)
Huajun Lib6009652013-11-10 23:13:18 +0800289 f2fs_put_dnode(dn);
290 return err;
291}
292
Chao Yu7e4dde72015-02-05 17:51:34 +0800293static bool lookup_extent_info(struct inode *inode, pgoff_t pgofs,
294 struct extent_info *ei)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900295{
296 struct f2fs_inode_info *fi = F2FS_I(inode);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900297 pgoff_t start_fofs, end_fofs;
298 block_t start_blkaddr;
299
Chao Yu0c872e22015-02-05 17:46:29 +0800300 read_lock(&fi->ext_lock);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900301 if (fi->ext.len == 0) {
Chao Yu0c872e22015-02-05 17:46:29 +0800302 read_unlock(&fi->ext_lock);
Chao Yu7e4dde72015-02-05 17:51:34 +0800303 return false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900304 }
305
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +0900306 stat_inc_total_hit(inode->i_sb);
307
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900308 start_fofs = fi->ext.fofs;
309 end_fofs = fi->ext.fofs + fi->ext.len - 1;
Chao Yu4d0b0bd2015-02-05 17:47:25 +0800310 start_blkaddr = fi->ext.blk;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900311
312 if (pgofs >= start_fofs && pgofs <= end_fofs) {
Chao Yua2e7d1b2015-02-05 17:50:30 +0800313 *ei = fi->ext;
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +0900314 stat_inc_read_hit(inode->i_sb);
Chao Yu0c872e22015-02-05 17:46:29 +0800315 read_unlock(&fi->ext_lock);
Chao Yu7e4dde72015-02-05 17:51:34 +0800316 return true;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900317 }
Chao Yu0c872e22015-02-05 17:46:29 +0800318 read_unlock(&fi->ext_lock);
Chao Yu7e4dde72015-02-05 17:51:34 +0800319 return false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900320}
321
Chao Yu7e4dde72015-02-05 17:51:34 +0800322static bool update_extent_info(struct inode *inode, pgoff_t fofs,
323 block_t blkaddr)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900324{
Chao Yu7e4dde72015-02-05 17:51:34 +0800325 struct f2fs_inode_info *fi = F2FS_I(inode);
326 pgoff_t start_fofs, end_fofs;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900327 block_t start_blkaddr, end_blkaddr;
Jaegeuk Kimc11abd12013-11-19 10:41:54 +0900328 int need_update = true;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900329
Chao Yu0c872e22015-02-05 17:46:29 +0800330 write_lock(&fi->ext_lock);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900331
332 start_fofs = fi->ext.fofs;
333 end_fofs = fi->ext.fofs + fi->ext.len - 1;
Chao Yu4d0b0bd2015-02-05 17:47:25 +0800334 start_blkaddr = fi->ext.blk;
335 end_blkaddr = fi->ext.blk + fi->ext.len - 1;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900336
337 /* Drop and initialize the matched extent */
338 if (fi->ext.len == 1 && fofs == start_fofs)
339 fi->ext.len = 0;
340
341 /* Initial extent */
342 if (fi->ext.len == 0) {
Chao Yu7e4dde72015-02-05 17:51:34 +0800343 if (blkaddr != NULL_ADDR) {
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900344 fi->ext.fofs = fofs;
Chao Yu7e4dde72015-02-05 17:51:34 +0800345 fi->ext.blk = blkaddr;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900346 fi->ext.len = 1;
347 }
348 goto end_update;
349 }
350
Namjae Jeon6224da82013-04-06 14:44:32 +0900351 /* Front merge */
Chao Yu7e4dde72015-02-05 17:51:34 +0800352 if (fofs == start_fofs - 1 && blkaddr == start_blkaddr - 1) {
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900353 fi->ext.fofs--;
Chao Yu4d0b0bd2015-02-05 17:47:25 +0800354 fi->ext.blk--;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900355 fi->ext.len++;
356 goto end_update;
357 }
358
359 /* Back merge */
Chao Yu7e4dde72015-02-05 17:51:34 +0800360 if (fofs == end_fofs + 1 && blkaddr == end_blkaddr + 1) {
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900361 fi->ext.len++;
362 goto end_update;
363 }
364
365 /* Split the existing extent */
366 if (fi->ext.len > 1 &&
367 fofs >= start_fofs && fofs <= end_fofs) {
368 if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
369 fi->ext.len = fofs - start_fofs;
370 } else {
371 fi->ext.fofs = fofs + 1;
Chao Yu4d0b0bd2015-02-05 17:47:25 +0800372 fi->ext.blk = start_blkaddr + fofs - start_fofs + 1;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900373 fi->ext.len -= fofs - start_fofs + 1;
374 }
Jaegeuk Kimc11abd12013-11-19 10:41:54 +0900375 } else {
376 need_update = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900377 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900378
Jaegeuk Kimc11abd12013-11-19 10:41:54 +0900379 /* Finally, if the extent is very fragmented, let's drop the cache. */
380 if (fi->ext.len < F2FS_MIN_EXTENT_LEN) {
381 fi->ext.len = 0;
382 set_inode_flag(fi, FI_NO_EXTENT);
383 need_update = true;
384 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900385end_update:
Chao Yu0c872e22015-02-05 17:46:29 +0800386 write_unlock(&fi->ext_lock);
Chao Yu7e4dde72015-02-05 17:51:34 +0800387 return need_update;
388}
389
Chao Yu429511c2015-02-05 17:54:31 +0800390static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
391 struct extent_tree *et, struct extent_info *ei,
392 struct rb_node *parent, struct rb_node **p)
393{
394 struct extent_node *en;
395
396 en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC);
397 if (!en)
398 return NULL;
399
400 en->ei = *ei;
401 INIT_LIST_HEAD(&en->list);
402
403 rb_link_node(&en->rb_node, parent, p);
404 rb_insert_color(&en->rb_node, &et->root);
405 et->count++;
406 atomic_inc(&sbi->total_ext_node);
407 return en;
408}
409
410static void __detach_extent_node(struct f2fs_sb_info *sbi,
411 struct extent_tree *et, struct extent_node *en)
412{
413 rb_erase(&en->rb_node, &et->root);
414 et->count--;
415 atomic_dec(&sbi->total_ext_node);
Chao Yu62c8af62015-02-05 18:01:39 +0800416
417 if (et->cached_en == en)
418 et->cached_en = NULL;
Chao Yu429511c2015-02-05 17:54:31 +0800419}
420
Chao Yu93dfc522015-03-19 19:24:59 +0800421static struct extent_tree *__find_extent_tree(struct f2fs_sb_info *sbi,
422 nid_t ino)
423{
424 struct extent_tree *et;
425
426 down_read(&sbi->extent_tree_lock);
427 et = radix_tree_lookup(&sbi->extent_tree_root, ino);
428 if (!et) {
429 up_read(&sbi->extent_tree_lock);
430 return NULL;
431 }
432 atomic_inc(&et->refcount);
433 up_read(&sbi->extent_tree_lock);
434
435 return et;
436}
437
438static struct extent_tree *__grab_extent_tree(struct inode *inode)
439{
440 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
441 struct extent_tree *et;
442 nid_t ino = inode->i_ino;
443
444 down_write(&sbi->extent_tree_lock);
445 et = radix_tree_lookup(&sbi->extent_tree_root, ino);
446 if (!et) {
447 et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
448 f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
449 memset(et, 0, sizeof(struct extent_tree));
450 et->ino = ino;
451 et->root = RB_ROOT;
452 et->cached_en = NULL;
453 rwlock_init(&et->lock);
454 atomic_set(&et->refcount, 0);
455 et->count = 0;
456 sbi->total_ext_tree++;
457 }
458 atomic_inc(&et->refcount);
459 up_write(&sbi->extent_tree_lock);
460
461 return et;
462}
463
Chao Yu429511c2015-02-05 17:54:31 +0800464static struct extent_node *__lookup_extent_tree(struct extent_tree *et,
465 unsigned int fofs)
466{
467 struct rb_node *node = et->root.rb_node;
468 struct extent_node *en;
469
Chao Yu62c8af62015-02-05 18:01:39 +0800470 if (et->cached_en) {
471 struct extent_info *cei = &et->cached_en->ei;
472
473 if (cei->fofs <= fofs && cei->fofs + cei->len > fofs)
474 return et->cached_en;
475 }
476
Chao Yu429511c2015-02-05 17:54:31 +0800477 while (node) {
478 en = rb_entry(node, struct extent_node, rb_node);
479
Chao Yu62c8af62015-02-05 18:01:39 +0800480 if (fofs < en->ei.fofs) {
Chao Yu429511c2015-02-05 17:54:31 +0800481 node = node->rb_left;
Chao Yu62c8af62015-02-05 18:01:39 +0800482 } else if (fofs >= en->ei.fofs + en->ei.len) {
Chao Yu429511c2015-02-05 17:54:31 +0800483 node = node->rb_right;
Chao Yu62c8af62015-02-05 18:01:39 +0800484 } else {
485 et->cached_en = en;
Chao Yu429511c2015-02-05 17:54:31 +0800486 return en;
Chao Yu62c8af62015-02-05 18:01:39 +0800487 }
Chao Yu429511c2015-02-05 17:54:31 +0800488 }
489 return NULL;
490}
491
492static struct extent_node *__try_back_merge(struct f2fs_sb_info *sbi,
493 struct extent_tree *et, struct extent_node *en)
494{
495 struct extent_node *prev;
496 struct rb_node *node;
497
498 node = rb_prev(&en->rb_node);
499 if (!node)
500 return NULL;
501
502 prev = rb_entry(node, struct extent_node, rb_node);
503 if (__is_back_mergeable(&en->ei, &prev->ei)) {
504 en->ei.fofs = prev->ei.fofs;
505 en->ei.blk = prev->ei.blk;
506 en->ei.len += prev->ei.len;
507 __detach_extent_node(sbi, et, prev);
508 return prev;
509 }
510 return NULL;
511}
512
513static struct extent_node *__try_front_merge(struct f2fs_sb_info *sbi,
514 struct extent_tree *et, struct extent_node *en)
515{
516 struct extent_node *next;
517 struct rb_node *node;
518
519 node = rb_next(&en->rb_node);
520 if (!node)
521 return NULL;
522
523 next = rb_entry(node, struct extent_node, rb_node);
524 if (__is_front_mergeable(&en->ei, &next->ei)) {
525 en->ei.len += next->ei.len;
526 __detach_extent_node(sbi, et, next);
527 return next;
528 }
529 return NULL;
530}
531
532static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
533 struct extent_tree *et, struct extent_info *ei,
534 struct extent_node **den)
535{
536 struct rb_node **p = &et->root.rb_node;
537 struct rb_node *parent = NULL;
538 struct extent_node *en;
539
540 while (*p) {
541 parent = *p;
542 en = rb_entry(parent, struct extent_node, rb_node);
543
544 if (ei->fofs < en->ei.fofs) {
545 if (__is_front_mergeable(ei, &en->ei)) {
546 f2fs_bug_on(sbi, !den);
547 en->ei.fofs = ei->fofs;
548 en->ei.blk = ei->blk;
549 en->ei.len += ei->len;
550 *den = __try_back_merge(sbi, et, en);
551 return en;
552 }
553 p = &(*p)->rb_left;
554 } else if (ei->fofs >= en->ei.fofs + en->ei.len) {
555 if (__is_back_mergeable(ei, &en->ei)) {
556 f2fs_bug_on(sbi, !den);
557 en->ei.len += ei->len;
558 *den = __try_front_merge(sbi, et, en);
559 return en;
560 }
561 p = &(*p)->rb_right;
562 } else {
563 f2fs_bug_on(sbi, 1);
564 }
565 }
566
567 return __attach_extent_node(sbi, et, ei, parent, p);
568}
569
570static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
571 struct extent_tree *et, bool free_all)
572{
573 struct rb_node *node, *next;
574 struct extent_node *en;
575 unsigned int count = et->count;
576
577 node = rb_first(&et->root);
578 while (node) {
579 next = rb_next(node);
580 en = rb_entry(node, struct extent_node, rb_node);
581
582 if (free_all) {
583 spin_lock(&sbi->extent_lock);
584 if (!list_empty(&en->list))
585 list_del_init(&en->list);
586 spin_unlock(&sbi->extent_lock);
587 }
588
589 if (free_all || list_empty(&en->list)) {
590 __detach_extent_node(sbi, et, en);
591 kmem_cache_free(extent_node_slab, en);
592 }
593 node = next;
594 }
595
596 return count - et->count;
597}
598
Chao Yu028a41e2015-03-19 19:26:02 +0800599static void f2fs_init_extent_tree(struct inode *inode,
600 struct f2fs_extent *i_ext)
601{
602 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
603 struct extent_tree *et;
604 struct extent_node *en;
605 struct extent_info ei;
606
607 if (le32_to_cpu(i_ext->len) < F2FS_MIN_EXTENT_LEN)
608 return;
609
610 et = __grab_extent_tree(inode);
611
612 write_lock(&et->lock);
613 if (et->count)
614 goto out;
615
616 set_extent_info(&ei, le32_to_cpu(i_ext->fofs),
617 le32_to_cpu(i_ext->blk), le32_to_cpu(i_ext->len));
618
619 en = __insert_extent_tree(sbi, et, &ei, NULL);
620 if (en) {
621 et->cached_en = en;
622
623 spin_lock(&sbi->extent_lock);
624 list_add_tail(&en->list, &sbi->extent_list);
625 spin_unlock(&sbi->extent_lock);
626 }
627out:
628 write_unlock(&et->lock);
629 atomic_dec(&et->refcount);
630}
631
Chao Yu429511c2015-02-05 17:54:31 +0800632static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
633 struct extent_info *ei)
634{
635 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
636 struct extent_tree *et;
637 struct extent_node *en;
638
Chao Yu1ec46102015-02-05 17:59:59 +0800639 trace_f2fs_lookup_extent_tree_start(inode, pgofs);
640
Chao Yu93dfc522015-03-19 19:24:59 +0800641 et = __find_extent_tree(sbi, inode->i_ino);
642 if (!et)
Chao Yu429511c2015-02-05 17:54:31 +0800643 return false;
Chao Yu429511c2015-02-05 17:54:31 +0800644
645 read_lock(&et->lock);
646 en = __lookup_extent_tree(et, pgofs);
647 if (en) {
648 *ei = en->ei;
649 spin_lock(&sbi->extent_lock);
650 if (!list_empty(&en->list))
651 list_move_tail(&en->list, &sbi->extent_list);
652 spin_unlock(&sbi->extent_lock);
653 stat_inc_read_hit(sbi->sb);
654 }
655 stat_inc_total_hit(sbi->sb);
656 read_unlock(&et->lock);
657
Chao Yu1ec46102015-02-05 17:59:59 +0800658 trace_f2fs_lookup_extent_tree_end(inode, pgofs, en);
659
Chao Yu429511c2015-02-05 17:54:31 +0800660 atomic_dec(&et->refcount);
661 return en ? true : false;
662}
663
664static void f2fs_update_extent_tree(struct inode *inode, pgoff_t fofs,
665 block_t blkaddr)
666{
667 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yu429511c2015-02-05 17:54:31 +0800668 struct extent_tree *et;
669 struct extent_node *en = NULL, *en1 = NULL, *en2 = NULL, *en3 = NULL;
670 struct extent_node *den = NULL;
671 struct extent_info ei, dei;
672 unsigned int endofs;
673
Chao Yu1ec46102015-02-05 17:59:59 +0800674 trace_f2fs_update_extent_tree(inode, fofs, blkaddr);
675
Chao Yu93dfc522015-03-19 19:24:59 +0800676 et = __grab_extent_tree(inode);
Chao Yu429511c2015-02-05 17:54:31 +0800677
678 write_lock(&et->lock);
679
680 /* 1. lookup and remove existing extent info in cache */
681 en = __lookup_extent_tree(et, fofs);
682 if (!en)
683 goto update_extent;
684
685 dei = en->ei;
686 __detach_extent_node(sbi, et, en);
687
688 /* 2. if extent can be split more, split and insert the left part */
689 if (dei.len > 1) {
690 /* insert left part of split extent into cache */
691 if (fofs - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
692 set_extent_info(&ei, dei.fofs, dei.blk,
693 fofs - dei.fofs);
694 en1 = __insert_extent_tree(sbi, et, &ei, NULL);
695 }
696
697 /* insert right part of split extent into cache */
698 endofs = dei.fofs + dei.len - 1;
699 if (endofs - fofs >= F2FS_MIN_EXTENT_LEN) {
700 set_extent_info(&ei, fofs + 1,
701 fofs - dei.fofs + dei.blk, endofs - fofs);
702 en2 = __insert_extent_tree(sbi, et, &ei, NULL);
703 }
704 }
705
706update_extent:
707 /* 3. update extent in extent cache */
708 if (blkaddr) {
709 set_extent_info(&ei, fofs, blkaddr, 1);
710 en3 = __insert_extent_tree(sbi, et, &ei, &den);
711 }
712
713 /* 4. update in global extent list */
714 spin_lock(&sbi->extent_lock);
715 if (en && !list_empty(&en->list))
716 list_del(&en->list);
717 /*
718 * en1 and en2 split from en, they will become more and more smaller
719 * fragments after splitting several times. So if the length is smaller
720 * than F2FS_MIN_EXTENT_LEN, we will not add them into extent tree.
721 */
722 if (en1)
723 list_add_tail(&en1->list, &sbi->extent_list);
724 if (en2)
725 list_add_tail(&en2->list, &sbi->extent_list);
726 if (en3) {
727 if (list_empty(&en3->list))
728 list_add_tail(&en3->list, &sbi->extent_list);
729 else
730 list_move_tail(&en3->list, &sbi->extent_list);
731 }
732 if (den && !list_empty(&den->list))
733 list_del(&den->list);
734 spin_unlock(&sbi->extent_lock);
735
736 /* 5. release extent node */
737 if (en)
738 kmem_cache_free(extent_node_slab, en);
739 if (den)
740 kmem_cache_free(extent_node_slab, den);
741
742 write_unlock(&et->lock);
743 atomic_dec(&et->refcount);
744}
745
Chao Yu0bdee482015-03-19 19:27:51 +0800746void f2fs_preserve_extent_tree(struct inode *inode)
747{
748 struct extent_tree *et;
749 struct extent_info *ext = &F2FS_I(inode)->ext;
750 bool sync = false;
751
752 if (!test_opt(F2FS_I_SB(inode), EXTENT_CACHE))
753 return;
754
755 et = __find_extent_tree(F2FS_I_SB(inode), inode->i_ino);
756 if (!et) {
757 if (ext->len) {
758 ext->len = 0;
759 update_inode_page(inode);
760 }
761 return;
762 }
763
764 read_lock(&et->lock);
765 if (et->count) {
766 struct extent_node *en;
767
768 if (et->cached_en) {
769 en = et->cached_en;
770 } else {
771 struct rb_node *node = rb_first(&et->root);
772
773 if (!node)
774 node = rb_last(&et->root);
775 en = rb_entry(node, struct extent_node, rb_node);
776 }
777
778 if (__is_extent_same(ext, &en->ei))
779 goto out;
780
781 *ext = en->ei;
782 sync = true;
783 } else if (ext->len) {
784 ext->len = 0;
785 sync = true;
786 }
787out:
788 read_unlock(&et->lock);
789 atomic_dec(&et->refcount);
790
791 if (sync)
792 update_inode_page(inode);
793}
794
Chao Yu429511c2015-02-05 17:54:31 +0800795void f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
796{
797 struct extent_tree *treevec[EXT_TREE_VEC_SIZE];
798 struct extent_node *en, *tmp;
799 unsigned long ino = F2FS_ROOT_INO(sbi);
800 struct radix_tree_iter iter;
801 void **slot;
802 unsigned int found;
Chao Yu1ec46102015-02-05 17:59:59 +0800803 unsigned int node_cnt = 0, tree_cnt = 0;
Chao Yu429511c2015-02-05 17:54:31 +0800804
Chao Yu1dcc3362015-02-05 17:57:31 +0800805 if (!test_opt(sbi, EXTENT_CACHE))
806 return;
807
Chao Yu429511c2015-02-05 17:54:31 +0800808 if (available_free_memory(sbi, EXTENT_CACHE))
809 return;
810
811 spin_lock(&sbi->extent_lock);
812 list_for_each_entry_safe(en, tmp, &sbi->extent_list, list) {
813 if (!nr_shrink--)
814 break;
815 list_del_init(&en->list);
816 }
817 spin_unlock(&sbi->extent_lock);
818
819 down_read(&sbi->extent_tree_lock);
820 while ((found = radix_tree_gang_lookup(&sbi->extent_tree_root,
821 (void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
822 unsigned i;
823
824 ino = treevec[found - 1]->ino + 1;
825 for (i = 0; i < found; i++) {
826 struct extent_tree *et = treevec[i];
827
828 atomic_inc(&et->refcount);
829 write_lock(&et->lock);
Chao Yu1ec46102015-02-05 17:59:59 +0800830 node_cnt += __free_extent_tree(sbi, et, false);
Chao Yu429511c2015-02-05 17:54:31 +0800831 write_unlock(&et->lock);
832 atomic_dec(&et->refcount);
833 }
834 }
835 up_read(&sbi->extent_tree_lock);
836
837 down_write(&sbi->extent_tree_lock);
838 radix_tree_for_each_slot(slot, &sbi->extent_tree_root, &iter,
839 F2FS_ROOT_INO(sbi)) {
840 struct extent_tree *et = (struct extent_tree *)*slot;
841
842 if (!atomic_read(&et->refcount) && !et->count) {
843 radix_tree_delete(&sbi->extent_tree_root, et->ino);
844 kmem_cache_free(extent_tree_slab, et);
845 sbi->total_ext_tree--;
Chao Yu1ec46102015-02-05 17:59:59 +0800846 tree_cnt++;
Chao Yu429511c2015-02-05 17:54:31 +0800847 }
848 }
849 up_write(&sbi->extent_tree_lock);
Chao Yu1ec46102015-02-05 17:59:59 +0800850
851 trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
Chao Yu429511c2015-02-05 17:54:31 +0800852}
853
854void f2fs_destroy_extent_tree(struct inode *inode)
855{
856 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
857 struct extent_tree *et;
Chao Yu1ec46102015-02-05 17:59:59 +0800858 unsigned int node_cnt = 0;
Chao Yu429511c2015-02-05 17:54:31 +0800859
Chao Yu1dcc3362015-02-05 17:57:31 +0800860 if (!test_opt(sbi, EXTENT_CACHE))
861 return;
862
Chao Yu93dfc522015-03-19 19:24:59 +0800863 et = __find_extent_tree(sbi, inode->i_ino);
864 if (!et)
Chao Yu429511c2015-02-05 17:54:31 +0800865 goto out;
Chao Yu429511c2015-02-05 17:54:31 +0800866
867 /* free all extent info belong to this extent tree */
868 write_lock(&et->lock);
Chao Yu1ec46102015-02-05 17:59:59 +0800869 node_cnt = __free_extent_tree(sbi, et, true);
Chao Yu429511c2015-02-05 17:54:31 +0800870 write_unlock(&et->lock);
871
872 atomic_dec(&et->refcount);
873
874 /* try to find and delete extent tree entry in radix tree */
875 down_write(&sbi->extent_tree_lock);
876 et = radix_tree_lookup(&sbi->extent_tree_root, inode->i_ino);
877 if (!et) {
878 up_write(&sbi->extent_tree_lock);
879 goto out;
880 }
881 f2fs_bug_on(sbi, atomic_read(&et->refcount) || et->count);
882 radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
883 kmem_cache_free(extent_tree_slab, et);
884 sbi->total_ext_tree--;
885 up_write(&sbi->extent_tree_lock);
886out:
Chao Yu1ec46102015-02-05 17:59:59 +0800887 trace_f2fs_destroy_extent_tree(inode, node_cnt);
Jaegeuk Kimc11abd12013-11-19 10:41:54 +0900888 return;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900889}
890
Chao Yu028a41e2015-03-19 19:26:02 +0800891void f2fs_init_extent_cache(struct inode *inode, struct f2fs_extent *i_ext)
892{
893 if (test_opt(F2FS_I_SB(inode), EXTENT_CACHE))
894 f2fs_init_extent_tree(inode, i_ext);
895
896 write_lock(&F2FS_I(inode)->ext_lock);
897 get_extent_info(&F2FS_I(inode)->ext, *i_ext);
898 write_unlock(&F2FS_I(inode)->ext_lock);
899}
900
Chao Yu7e4dde72015-02-05 17:51:34 +0800901static bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
902 struct extent_info *ei)
903{
Chao Yu91c5d9b2015-02-05 18:02:44 +0800904 if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
905 return false;
906
Chao Yu1dcc3362015-02-05 17:57:31 +0800907 if (test_opt(F2FS_I_SB(inode), EXTENT_CACHE))
908 return f2fs_lookup_extent_tree(inode, pgofs, ei);
909
Chao Yu7e4dde72015-02-05 17:51:34 +0800910 return lookup_extent_info(inode, pgofs, ei);
911}
912
913void f2fs_update_extent_cache(struct dnode_of_data *dn)
914{
915 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
916 pgoff_t fofs;
917
918 f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR);
919
Chao Yu91c5d9b2015-02-05 18:02:44 +0800920 if (is_inode_flag_set(fi, FI_NO_EXTENT))
921 return;
922
Chao Yu7e4dde72015-02-05 17:51:34 +0800923 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
924 dn->ofs_in_node;
925
Chao Yu1dcc3362015-02-05 17:57:31 +0800926 if (test_opt(F2FS_I_SB(dn->inode), EXTENT_CACHE))
927 return f2fs_update_extent_tree(dn->inode, fofs,
928 dn->data_blkaddr);
929
Chao Yu7e4dde72015-02-05 17:51:34 +0800930 if (update_extent_info(dn->inode, fofs, dn->data_blkaddr))
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900931 sync_inode_page(dn);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900932}
933
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700934struct page *get_read_data_page(struct inode *inode, pgoff_t index, int rw)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900935{
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900936 struct address_space *mapping = inode->i_mapping;
937 struct dnode_of_data dn;
938 struct page *page;
Chao Yucb3bc9e2015-02-05 18:03:40 +0800939 struct extent_info ei;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900940 int err;
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -0800941 struct f2fs_io_info fio = {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700942 .sbi = F2FS_I_SB(inode),
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -0800943 .type = DATA,
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700944 .rw = rw,
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700945 .encrypted_page = NULL,
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -0800946 };
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900947
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700948 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
949 return read_mapping_page(mapping, index, NULL);
950
Jaegeuk Kim9ac13492014-04-29 17:35:10 +0900951 page = grab_cache_page(mapping, index);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900952 if (!page)
953 return ERR_PTR(-ENOMEM);
954
Chao Yucb3bc9e2015-02-05 18:03:40 +0800955 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
956 dn.data_blkaddr = ei.blk + index - ei.fofs;
957 goto got_it;
958 }
959
Jaegeuk Kim650495d2013-05-13 08:38:35 +0900960 set_new_dnode(&dn, inode, NULL, NULL, 0);
961 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
962 if (err) {
963 f2fs_put_page(page, 1);
964 return ERR_PTR(err);
965 }
966 f2fs_put_dnode(&dn);
967
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900968 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
Jaegeuk Kim650495d2013-05-13 08:38:35 +0900969 f2fs_put_page(page, 1);
970 return ERR_PTR(-ENOENT);
971 }
Chao Yucb3bc9e2015-02-05 18:03:40 +0800972got_it:
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700973 if (PageUptodate(page)) {
974 unlock_page(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900975 return page;
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700976 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900977
Jaegeuk Kimd59ff4d2013-08-20 19:13:07 +0900978 /*
979 * A new dentry page is allocated but not able to be written, since its
980 * new inode page couldn't be allocated due to -ENOSPC.
981 * In such the case, its blkaddr can be remained as NEW_ADDR.
982 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
983 */
984 if (dn.data_blkaddr == NEW_ADDR) {
985 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
986 SetPageUptodate(page);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700987 unlock_page(page);
Jaegeuk Kimd59ff4d2013-08-20 19:13:07 +0900988 return page;
989 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900990
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -0800991 fio.blk_addr = dn.data_blkaddr;
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700992 fio.page = page;
993 err = f2fs_submit_page_bio(&fio);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900994 if (err)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900995 return ERR_PTR(err);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700996 return page;
997}
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900998
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700999struct page *find_data_page(struct inode *inode, pgoff_t index)
1000{
1001 struct address_space *mapping = inode->i_mapping;
1002 struct page *page;
1003
1004 page = find_get_page(mapping, index);
1005 if (page && PageUptodate(page))
1006 return page;
1007 f2fs_put_page(page, 0);
1008
1009 page = get_read_data_page(inode, index, READ_SYNC);
1010 if (IS_ERR(page))
1011 return page;
1012
1013 if (PageUptodate(page))
1014 return page;
1015
1016 wait_on_page_locked(page);
1017 if (unlikely(!PageUptodate(page))) {
1018 f2fs_put_page(page, 0);
1019 return ERR_PTR(-EIO);
1020 }
1021 return page;
1022}
1023
1024/*
1025 * If it tries to access a hole, return an error.
1026 * Because, the callers, functions in dir.c and GC, should be able to know
1027 * whether this page exists or not.
1028 */
1029struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
1030{
1031 struct address_space *mapping = inode->i_mapping;
1032 struct page *page;
1033repeat:
1034 page = get_read_data_page(inode, index, READ_SYNC);
1035 if (IS_ERR(page))
1036 return page;
1037
1038 /* wait for read completion */
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001039 lock_page(page);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001040 if (unlikely(!PageUptodate(page))) {
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001041 f2fs_put_page(page, 1);
1042 return ERR_PTR(-EIO);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001043 }
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001044 if (unlikely(page->mapping != mapping)) {
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +09001045 f2fs_put_page(page, 1);
1046 goto repeat;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001047 }
1048 return page;
1049}
1050
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001051/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001052 * Caller ensures that this data page is never allocated.
1053 * A new zero-filled data page is allocated in the page cache.
Jaegeuk Kim39936832012-11-22 16:21:29 +09001054 *
Chao Yu4f4124d2013-12-21 18:02:14 +08001055 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
1056 * f2fs_unlock_op().
Jaegeuk Kima8865372013-12-27 17:04:17 +09001057 * Note that, ipage is set only by make_empty_dir.
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001058 */
Jaegeuk Kim64aa7ed2013-05-20 09:55:50 +09001059struct page *get_new_data_page(struct inode *inode,
Jaegeuk Kima8865372013-12-27 17:04:17 +09001060 struct page *ipage, pgoff_t index, bool new_i_size)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001061{
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001062 struct address_space *mapping = inode->i_mapping;
1063 struct page *page;
1064 struct dnode_of_data dn;
1065 int err;
Jaegeuk Kim01f28612015-04-29 11:18:42 -07001066repeat:
1067 page = grab_cache_page(mapping, index);
1068 if (!page)
1069 return ERR_PTR(-ENOMEM);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001070
Jaegeuk Kima8865372013-12-27 17:04:17 +09001071 set_new_dnode(&dn, inode, ipage, NULL, 0);
Huajun Lib6009652013-11-10 23:13:18 +08001072 err = f2fs_reserve_block(&dn, index);
Jaegeuk Kim01f28612015-04-29 11:18:42 -07001073 if (err) {
1074 f2fs_put_page(page, 1);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001075 return ERR_PTR(err);
Jaegeuk Kima8865372013-12-27 17:04:17 +09001076 }
Jaegeuk Kim01f28612015-04-29 11:18:42 -07001077 if (!ipage)
1078 f2fs_put_dnode(&dn);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001079
1080 if (PageUptodate(page))
Jaegeuk Kim01f28612015-04-29 11:18:42 -07001081 goto got_it;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001082
1083 if (dn.data_blkaddr == NEW_ADDR) {
1084 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001085 SetPageUptodate(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001086 } else {
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001087 f2fs_put_page(page, 1);
Jaegeuk Kima8865372013-12-27 17:04:17 +09001088
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001089 page = get_read_data_page(inode, index, READ_SYNC);
1090 if (IS_ERR(page))
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +09001091 goto repeat;
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001092
1093 /* wait for read completion */
1094 lock_page(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001095 }
Jaegeuk Kim01f28612015-04-29 11:18:42 -07001096got_it:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001097 if (new_i_size &&
1098 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
1099 i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
Jaegeuk Kim699489b2013-06-07 22:08:23 +09001100 /* Only the directory inode sets new_i_size */
1101 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001102 }
1103 return page;
1104}
1105
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001106static int __allocate_data_block(struct dnode_of_data *dn)
1107{
Jaegeuk Kim40813632014-09-02 15:31:18 -07001108 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
Jaegeuk Kim976e4c52014-09-15 19:32:16 -07001109 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001110 struct f2fs_summary sum;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001111 struct node_info ni;
Jaegeuk Kim38aa0882015-01-05 16:02:20 -08001112 int seg = CURSEG_WARM_DATA;
Jaegeuk Kim976e4c52014-09-15 19:32:16 -07001113 pgoff_t fofs;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001114
1115 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
1116 return -EPERM;
Chao Yudf6136e2015-03-23 10:33:37 +08001117
1118 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
1119 if (dn->data_blkaddr == NEW_ADDR)
1120 goto alloc;
1121
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001122 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
1123 return -ENOSPC;
1124
Chao Yudf6136e2015-03-23 10:33:37 +08001125alloc:
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001126 get_node_info(sbi, dn->nid, &ni);
1127 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
1128
Jaegeuk Kim38aa0882015-01-05 16:02:20 -08001129 if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
1130 seg = CURSEG_DIRECT_IO;
1131
Chao Yudf6136e2015-03-23 10:33:37 +08001132 allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
1133 &sum, seg);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001134
1135 /* direct IO doesn't use extent cache to maximize the performance */
Chao Yu216a6202015-03-19 19:23:32 +08001136 set_data_blkaddr(dn);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001137
Jaegeuk Kim976e4c52014-09-15 19:32:16 -07001138 /* update i_size */
1139 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
1140 dn->ofs_in_node;
1141 if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT))
1142 i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT));
1143
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001144 return 0;
1145}
1146
Jaegeuk Kim59b802e2015-02-09 12:09:53 -08001147static void __allocate_data_blocks(struct inode *inode, loff_t offset,
1148 size_t count)
1149{
1150 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1151 struct dnode_of_data dn;
1152 u64 start = F2FS_BYTES_TO_BLK(offset);
1153 u64 len = F2FS_BYTES_TO_BLK(count);
1154 bool allocated;
1155 u64 end_offset;
1156
1157 while (len) {
1158 f2fs_balance_fs(sbi);
1159 f2fs_lock_op(sbi);
1160
1161 /* When reading holes, we need its node page */
1162 set_new_dnode(&dn, inode, NULL, NULL, 0);
1163 if (get_dnode_of_data(&dn, start, ALLOC_NODE))
1164 goto out;
1165
1166 allocated = false;
1167 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
1168
1169 while (dn.ofs_in_node < end_offset && len) {
Chao Yud6d4f1c2015-03-12 17:04:24 +08001170 block_t blkaddr;
1171
1172 blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
Chao Yudf6136e2015-03-23 10:33:37 +08001173 if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) {
Jaegeuk Kim59b802e2015-02-09 12:09:53 -08001174 if (__allocate_data_block(&dn))
1175 goto sync_out;
1176 allocated = true;
1177 }
1178 len--;
1179 start++;
1180 dn.ofs_in_node++;
1181 }
1182
1183 if (allocated)
1184 sync_inode_page(&dn);
1185
1186 f2fs_put_dnode(&dn);
1187 f2fs_unlock_op(sbi);
1188 }
1189 return;
1190
1191sync_out:
1192 if (allocated)
1193 sync_inode_page(&dn);
1194 f2fs_put_dnode(&dn);
1195out:
1196 f2fs_unlock_op(sbi);
1197 return;
1198}
1199
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001200/*
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001201 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
1202 * f2fs_map_blocks structure.
Chao Yu4f4124d2013-12-21 18:02:14 +08001203 * If original data blocks are allocated, then give them to blockdev.
1204 * Otherwise,
1205 * a. preallocate requested block addresses
1206 * b. do not use extent cache for better performance
1207 * c. give the block addresses to blockdev
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001208 */
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001209static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
1210 int create, bool fiemap)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001211{
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001212 unsigned int maxblocks = map->m_len;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001213 struct dnode_of_data dn;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001214 int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
1215 pgoff_t pgofs, end_offset;
1216 int err = 0, ofs = 1;
Chao Yua2e7d1b2015-02-05 17:50:30 +08001217 struct extent_info ei;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001218 bool allocated = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001219
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001220 map->m_len = 0;
1221 map->m_flags = 0;
1222
1223 /* it only supports block size == page size */
1224 pgofs = (pgoff_t)map->m_lblk;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001225
Chao Yu7e4dde72015-02-05 17:51:34 +08001226 if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001227 map->m_pblk = ei.blk + pgofs - ei.fofs;
1228 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
1229 map->m_flags = F2FS_MAP_MAPPED;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001230 goto out;
Chao Yua2e7d1b2015-02-05 17:50:30 +08001231 }
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001232
Jaegeuk Kim59b802e2015-02-09 12:09:53 -08001233 if (create)
Jaegeuk Kim40813632014-09-02 15:31:18 -07001234 f2fs_lock_op(F2FS_I_SB(inode));
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001235
1236 /* When reading holes, we need its node page */
1237 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001238 err = get_dnode_of_data(&dn, pgofs, mode);
Jaegeuk Kim1ec79082013-12-26 16:55:22 +09001239 if (err) {
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001240 if (err == -ENOENT)
1241 err = 0;
1242 goto unlock_out;
Namjae Jeon848753a2013-04-23 16:38:02 +09001243 }
Jaegeuk Kimccfb3002014-06-13 13:02:11 +09001244 if (dn.data_blkaddr == NEW_ADDR && !fiemap)
Jaegeuk Kim1ec79082013-12-26 16:55:22 +09001245 goto put_out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001246
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001247 if (dn.data_blkaddr != NULL_ADDR) {
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001248 map->m_flags = F2FS_MAP_MAPPED;
1249 map->m_pblk = dn.data_blkaddr;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001250 if (dn.data_blkaddr == NEW_ADDR)
1251 map->m_flags |= F2FS_MAP_UNWRITTEN;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001252 } else if (create) {
1253 err = __allocate_data_block(&dn);
1254 if (err)
1255 goto put_out;
1256 allocated = true;
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001257 map->m_flags = F2FS_MAP_NEW | F2FS_MAP_MAPPED;
1258 map->m_pblk = dn.data_blkaddr;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001259 } else {
1260 goto put_out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001261 }
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001262
Chao Yu6403eb12014-04-26 19:59:52 +08001263 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001264 map->m_len = 1;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001265 dn.ofs_in_node++;
1266 pgofs++;
1267
1268get_next:
1269 if (dn.ofs_in_node >= end_offset) {
1270 if (allocated)
1271 sync_inode_page(&dn);
1272 allocated = false;
1273 f2fs_put_dnode(&dn);
1274
1275 set_new_dnode(&dn, inode, NULL, NULL, 0);
1276 err = get_dnode_of_data(&dn, pgofs, mode);
Jaegeuk Kim1ec79082013-12-26 16:55:22 +09001277 if (err) {
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001278 if (err == -ENOENT)
1279 err = 0;
1280 goto unlock_out;
1281 }
Jaegeuk Kimccfb3002014-06-13 13:02:11 +09001282 if (dn.data_blkaddr == NEW_ADDR && !fiemap)
Jaegeuk Kim1ec79082013-12-26 16:55:22 +09001283 goto put_out;
1284
Chao Yu6403eb12014-04-26 19:59:52 +08001285 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001286 }
1287
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001288 if (maxblocks > map->m_len) {
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001289 block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
1290 if (blkaddr == NULL_ADDR && create) {
1291 err = __allocate_data_block(&dn);
1292 if (err)
1293 goto sync_out;
1294 allocated = true;
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001295 map->m_flags |= F2FS_MAP_NEW;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001296 blkaddr = dn.data_blkaddr;
1297 }
arter97e1c42042014-08-06 23:22:50 +09001298 /* Give more consecutive addresses for the readahead */
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001299 if ((map->m_pblk != NEW_ADDR &&
1300 blkaddr == (map->m_pblk + ofs)) ||
1301 (map->m_pblk == NEW_ADDR &&
1302 blkaddr == NEW_ADDR)) {
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001303 ofs++;
1304 dn.ofs_in_node++;
1305 pgofs++;
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001306 map->m_len++;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001307 goto get_next;
1308 }
1309 }
1310sync_out:
1311 if (allocated)
1312 sync_inode_page(&dn);
1313put_out:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001314 f2fs_put_dnode(&dn);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001315unlock_out:
1316 if (create)
Jaegeuk Kim40813632014-09-02 15:31:18 -07001317 f2fs_unlock_op(F2FS_I_SB(inode));
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001318out:
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001319 trace_f2fs_map_blocks(inode, map, err);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001320 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001321}
1322
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001323static int __get_data_block(struct inode *inode, sector_t iblock,
1324 struct buffer_head *bh, int create, bool fiemap)
1325{
1326 struct f2fs_map_blocks map;
1327 int ret;
1328
1329 map.m_lblk = iblock;
1330 map.m_len = bh->b_size >> inode->i_blkbits;
1331
1332 ret = f2fs_map_blocks(inode, &map, create, fiemap);
1333 if (!ret) {
1334 map_bh(bh, inode->i_sb, map.m_pblk);
1335 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
1336 bh->b_size = map.m_len << inode->i_blkbits;
1337 }
1338 return ret;
1339}
1340
Jaegeuk Kimccfb3002014-06-13 13:02:11 +09001341static int get_data_block(struct inode *inode, sector_t iblock,
1342 struct buffer_head *bh_result, int create)
1343{
1344 return __get_data_block(inode, iblock, bh_result, create, false);
1345}
1346
1347static int get_data_block_fiemap(struct inode *inode, sector_t iblock,
1348 struct buffer_head *bh_result, int create)
1349{
1350 return __get_data_block(inode, iblock, bh_result, create, true);
1351}
1352
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001353static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
1354{
1355 return (offset >> inode->i_blkbits);
1356}
1357
1358static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
1359{
1360 return (blk << inode->i_blkbits);
1361}
1362
Jaegeuk Kim9ab70132014-06-08 04:30:14 +09001363int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1364 u64 start, u64 len)
1365{
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001366 struct buffer_head map_bh;
1367 sector_t start_blk, last_blk;
1368 loff_t isize = i_size_read(inode);
1369 u64 logical = 0, phys = 0, size = 0;
1370 u32 flags = 0;
1371 bool past_eof = false, whole_file = false;
1372 int ret = 0;
1373
1374 ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
1375 if (ret)
1376 return ret;
1377
1378 mutex_lock(&inode->i_mutex);
1379
1380 if (len >= isize) {
1381 whole_file = true;
1382 len = isize;
1383 }
1384
1385 if (logical_to_blk(inode, len) == 0)
1386 len = blk_to_logical(inode, 1);
1387
1388 start_blk = logical_to_blk(inode, start);
1389 last_blk = logical_to_blk(inode, start + len - 1);
1390next:
1391 memset(&map_bh, 0, sizeof(struct buffer_head));
1392 map_bh.b_size = len;
1393
1394 ret = get_data_block_fiemap(inode, start_blk, &map_bh, 0);
1395 if (ret)
1396 goto out;
1397
1398 /* HOLE */
1399 if (!buffer_mapped(&map_bh)) {
1400 start_blk++;
1401
1402 if (!past_eof && blk_to_logical(inode, start_blk) >= isize)
1403 past_eof = 1;
1404
1405 if (past_eof && size) {
1406 flags |= FIEMAP_EXTENT_LAST;
1407 ret = fiemap_fill_next_extent(fieinfo, logical,
1408 phys, size, flags);
1409 } else if (size) {
1410 ret = fiemap_fill_next_extent(fieinfo, logical,
1411 phys, size, flags);
1412 size = 0;
1413 }
1414
1415 /* if we have holes up to/past EOF then we're done */
1416 if (start_blk > last_blk || past_eof || ret)
1417 goto out;
1418 } else {
1419 if (start_blk > last_blk && !whole_file) {
1420 ret = fiemap_fill_next_extent(fieinfo, logical,
1421 phys, size, flags);
1422 goto out;
1423 }
1424
1425 /*
1426 * if size != 0 then we know we already have an extent
1427 * to add, so add it.
1428 */
1429 if (size) {
1430 ret = fiemap_fill_next_extent(fieinfo, logical,
1431 phys, size, flags);
1432 if (ret)
1433 goto out;
1434 }
1435
1436 logical = blk_to_logical(inode, start_blk);
1437 phys = blk_to_logical(inode, map_bh.b_blocknr);
1438 size = map_bh.b_size;
1439 flags = 0;
1440 if (buffer_unwritten(&map_bh))
1441 flags = FIEMAP_EXTENT_UNWRITTEN;
1442
1443 start_blk += logical_to_blk(inode, size);
1444
1445 /*
1446 * If we are past the EOF, then we need to make sure as
1447 * soon as we find a hole that the last extent we found
1448 * is marked with FIEMAP_EXTENT_LAST
1449 */
1450 if (!past_eof && logical + size >= isize)
1451 past_eof = true;
1452 }
1453 cond_resched();
1454 if (fatal_signal_pending(current))
1455 ret = -EINTR;
1456 else
1457 goto next;
1458out:
1459 if (ret == 1)
1460 ret = 0;
1461
1462 mutex_unlock(&inode->i_mutex);
1463 return ret;
Jaegeuk Kim9ab70132014-06-08 04:30:14 +09001464}
1465
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001466/*
1467 * This function was originally taken from fs/mpage.c, and customized for f2fs.
1468 * Major change was from block_size == page_size in f2fs by default.
1469 */
1470static int f2fs_mpage_readpages(struct address_space *mapping,
1471 struct list_head *pages, struct page *page,
1472 unsigned nr_pages)
1473{
1474 struct bio *bio = NULL;
1475 unsigned page_idx;
1476 sector_t last_block_in_bio = 0;
1477 struct inode *inode = mapping->host;
1478 const unsigned blkbits = inode->i_blkbits;
1479 const unsigned blocksize = 1 << blkbits;
1480 sector_t block_in_file;
1481 sector_t last_block;
1482 sector_t last_block_in_file;
1483 sector_t block_nr;
1484 struct block_device *bdev = inode->i_sb->s_bdev;
1485 struct f2fs_map_blocks map;
1486
1487 map.m_pblk = 0;
1488 map.m_lblk = 0;
1489 map.m_len = 0;
1490 map.m_flags = 0;
1491
1492 for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
1493
1494 prefetchw(&page->flags);
1495 if (pages) {
1496 page = list_entry(pages->prev, struct page, lru);
1497 list_del(&page->lru);
1498 if (add_to_page_cache_lru(page, mapping,
1499 page->index, GFP_KERNEL))
1500 goto next_page;
1501 }
1502
1503 block_in_file = (sector_t)page->index;
1504 last_block = block_in_file + nr_pages;
1505 last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
1506 blkbits;
1507 if (last_block > last_block_in_file)
1508 last_block = last_block_in_file;
1509
1510 /*
1511 * Map blocks using the previous result first.
1512 */
1513 if ((map.m_flags & F2FS_MAP_MAPPED) &&
1514 block_in_file > map.m_lblk &&
1515 block_in_file < (map.m_lblk + map.m_len))
1516 goto got_it;
1517
1518 /*
1519 * Then do more f2fs_map_blocks() calls until we are
1520 * done with this page.
1521 */
1522 map.m_flags = 0;
1523
1524 if (block_in_file < last_block) {
1525 map.m_lblk = block_in_file;
1526 map.m_len = last_block - block_in_file;
1527
1528 if (f2fs_map_blocks(inode, &map, 0, false))
1529 goto set_error_page;
1530 }
1531got_it:
1532 if ((map.m_flags & F2FS_MAP_MAPPED)) {
1533 block_nr = map.m_pblk + block_in_file - map.m_lblk;
1534 SetPageMappedToDisk(page);
1535
1536 if (!PageUptodate(page) && !cleancache_get_page(page)) {
1537 SetPageUptodate(page);
1538 goto confused;
1539 }
1540 } else {
1541 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
1542 SetPageUptodate(page);
1543 unlock_page(page);
1544 goto next_page;
1545 }
1546
1547 /*
1548 * This page will go to BIO. Do we need to send this
1549 * BIO off first?
1550 */
1551 if (bio && (last_block_in_bio != block_nr - 1)) {
1552submit_and_realloc:
1553 submit_bio(READ, bio);
1554 bio = NULL;
1555 }
1556 if (bio == NULL) {
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001557 struct f2fs_crypto_ctx *ctx = NULL;
1558
1559 if (f2fs_encrypted_inode(inode) &&
1560 S_ISREG(inode->i_mode)) {
1561 struct page *cpage;
1562
1563 ctx = f2fs_get_crypto_ctx(inode);
1564 if (IS_ERR(ctx))
1565 goto set_error_page;
1566
1567 /* wait the page to be moved by cleaning */
1568 cpage = find_lock_page(
1569 META_MAPPING(F2FS_I_SB(inode)),
1570 block_nr);
1571 if (cpage) {
1572 f2fs_wait_on_page_writeback(cpage,
1573 DATA);
1574 f2fs_put_page(cpage, 1);
1575 }
1576 }
1577
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001578 bio = bio_alloc(GFP_KERNEL,
1579 min_t(int, nr_pages, bio_get_nr_vecs(bdev)));
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001580 if (!bio) {
1581 if (ctx)
1582 f2fs_release_crypto_ctx(ctx);
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001583 goto set_error_page;
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001584 }
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001585 bio->bi_bdev = bdev;
1586 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
1587 bio->bi_end_io = mpage_end_io;
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001588 bio->bi_private = ctx;
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001589 }
1590
1591 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
1592 goto submit_and_realloc;
1593
1594 last_block_in_bio = block_nr;
1595 goto next_page;
1596set_error_page:
1597 SetPageError(page);
1598 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
1599 unlock_page(page);
1600 goto next_page;
1601confused:
1602 if (bio) {
1603 submit_bio(READ, bio);
1604 bio = NULL;
1605 }
1606 unlock_page(page);
1607next_page:
1608 if (pages)
1609 page_cache_release(page);
1610 }
1611 BUG_ON(pages && !list_empty(pages));
1612 if (bio)
1613 submit_bio(READ, bio);
1614 return 0;
1615}
1616
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001617static int f2fs_read_data_page(struct file *file, struct page *page)
1618{
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001619 struct inode *inode = page->mapping->host;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001620 int ret = -EAGAIN;
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001621
Chao Yuc20e89c2014-05-06 16:53:08 +08001622 trace_f2fs_readpage(page, DATA);
1623
arter97e1c42042014-08-06 23:22:50 +09001624 /* If the file has inline data, try to read it directly */
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001625 if (f2fs_has_inline_data(inode))
1626 ret = f2fs_read_inline_data(inode, page);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001627 if (ret == -EAGAIN)
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001628 ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001629 return ret;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001630}
1631
1632static int f2fs_read_data_pages(struct file *file,
1633 struct address_space *mapping,
1634 struct list_head *pages, unsigned nr_pages)
1635{
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001636 struct inode *inode = file->f_mapping->host;
1637
1638 /* If the file has inline data, skip readpages */
1639 if (f2fs_has_inline_data(inode))
1640 return 0;
1641
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001642 return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001643}
1644
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001645int do_write_data_page(struct f2fs_io_info *fio)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001646{
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001647 struct page *page = fio->page;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001648 struct inode *inode = page->mapping->host;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001649 struct dnode_of_data dn;
1650 int err = 0;
1651
1652 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kim266e97a2013-02-26 13:10:46 +09001653 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001654 if (err)
1655 return err;
1656
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -08001657 fio->blk_addr = dn.data_blkaddr;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001658
1659 /* This page is already truncated */
Jaegeuk Kim2bca1e22015-02-25 19:25:01 -08001660 if (fio->blk_addr == NULL_ADDR) {
1661 ClearPageUptodate(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001662 goto out_writepage;
Jaegeuk Kim2bca1e22015-02-25 19:25:01 -08001663 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001664
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001665 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1666 fio->encrypted_page = f2fs_encrypt(inode, fio->page);
1667 if (IS_ERR(fio->encrypted_page)) {
1668 err = PTR_ERR(fio->encrypted_page);
1669 goto out_writepage;
1670 }
1671 }
1672
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001673 set_page_writeback(page);
1674
1675 /*
1676 * If current allocation needs SSR,
1677 * it had better in-place writes for updated data.
1678 */
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -08001679 if (unlikely(fio->blk_addr != NEW_ADDR &&
Haicheng Lib25958b2013-06-13 16:59:29 +08001680 !is_cold_data(page) &&
1681 need_inplace_update(inode))) {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001682 rewrite_data_page(fio);
Jaegeuk Kimfff04f92014-07-25 07:40:59 -07001683 set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
Jaegeuk Kim8ce67cb2015-03-17 17:58:08 -07001684 trace_f2fs_do_write_data_page(page, IPU);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001685 } else {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001686 write_data_page(&dn, fio);
Chao Yu216a6202015-03-19 19:23:32 +08001687 set_data_blkaddr(&dn);
Chao Yu7e4dde72015-02-05 17:51:34 +08001688 f2fs_update_extent_cache(&dn);
Jaegeuk Kim8ce67cb2015-03-17 17:58:08 -07001689 trace_f2fs_do_write_data_page(page, OPU);
Jaegeuk Kimfff04f92014-07-25 07:40:59 -07001690 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
Jaegeuk Kim3c6c2be2015-03-17 17:16:35 -07001691 if (page->index == 0)
1692 set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001693 }
1694out_writepage:
1695 f2fs_put_dnode(&dn);
1696 return err;
1697}
1698
1699static int f2fs_write_data_page(struct page *page,
1700 struct writeback_control *wbc)
1701{
1702 struct inode *inode = page->mapping->host;
Jaegeuk Kim40813632014-09-02 15:31:18 -07001703 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001704 loff_t i_size = i_size_read(inode);
1705 const pgoff_t end_index = ((unsigned long long) i_size)
1706 >> PAGE_CACHE_SHIFT;
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001707 unsigned offset = 0;
Jaegeuk Kim39936832012-11-22 16:21:29 +09001708 bool need_balance_fs = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001709 int err = 0;
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001710 struct f2fs_io_info fio = {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001711 .sbi = sbi,
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001712 .type = DATA,
Chris Fries6c311ec2014-01-17 14:44:39 -06001713 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001714 .page = page,
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001715 .encrypted_page = NULL,
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001716 };
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001717
Chao Yuecda0de2014-05-06 16:48:26 +08001718 trace_f2fs_writepage(page, DATA);
1719
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001720 if (page->index < end_index)
Jaegeuk Kim39936832012-11-22 16:21:29 +09001721 goto write;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001722
1723 /*
1724 * If the offset is out-of-range of file size,
1725 * this page does not have to be written to disk.
1726 */
1727 offset = i_size & (PAGE_CACHE_SIZE - 1);
Jaegeuk Kim76f60262014-04-15 16:04:15 +09001728 if ((page->index >= end_index + 1) || !offset)
Jaegeuk Kim39936832012-11-22 16:21:29 +09001729 goto out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001730
1731 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
Jaegeuk Kim39936832012-11-22 16:21:29 +09001732write:
Chao Yucaf00472015-01-28 17:48:42 +08001733 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001734 goto redirty_out;
Jaegeuk Kim1e843712014-12-09 06:08:59 -08001735 if (f2fs_is_drop_cache(inode))
1736 goto out;
1737 if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim &&
1738 available_free_memory(sbi, BASE_CHECK))
1739 goto redirty_out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001740
Jaegeuk Kim39936832012-11-22 16:21:29 +09001741 /* Dentry blocks are controlled by checkpoint */
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001742 if (S_ISDIR(inode->i_mode)) {
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001743 if (unlikely(f2fs_cp_error(sbi)))
1744 goto redirty_out;
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001745 err = do_write_data_page(&fio);
Jaegeuk Kim8618b882014-02-17 19:29:27 +09001746 goto done;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001747 }
Jaegeuk Kim8618b882014-02-17 19:29:27 +09001748
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001749 /* we should bypass data pages to proceed the kworkder jobs */
1750 if (unlikely(f2fs_cp_error(sbi))) {
1751 SetPageError(page);
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07001752 goto out;
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001753 }
1754
Jaegeuk Kim8618b882014-02-17 19:29:27 +09001755 if (!wbc->for_reclaim)
1756 need_balance_fs = true;
1757 else if (has_not_enough_free_secs(sbi, 0))
Jaegeuk Kim39936832012-11-22 16:21:29 +09001758 goto redirty_out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001759
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001760 err = -EAGAIN;
Jaegeuk Kim8618b882014-02-17 19:29:27 +09001761 f2fs_lock_op(sbi);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001762 if (f2fs_has_inline_data(inode))
1763 err = f2fs_write_inline_data(inode, page);
1764 if (err == -EAGAIN)
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001765 err = do_write_data_page(&fio);
Jaegeuk Kim8618b882014-02-17 19:29:27 +09001766 f2fs_unlock_op(sbi);
1767done:
1768 if (err && err != -ENOENT)
1769 goto redirty_out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001770
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001771 clear_cold_data(page);
Jaegeuk Kim39936832012-11-22 16:21:29 +09001772out:
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07001773 inode_dec_dirty_pages(inode);
Jaegeuk Kim2bca1e22015-02-25 19:25:01 -08001774 if (err)
1775 ClearPageUptodate(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001776 unlock_page(page);
Jaegeuk Kim39936832012-11-22 16:21:29 +09001777 if (need_balance_fs)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001778 f2fs_balance_fs(sbi);
Jaegeuk Kim2aea39e2014-04-24 09:49:52 +09001779 if (wbc->for_reclaim)
1780 f2fs_submit_merged_bio(sbi, DATA, WRITE);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001781 return 0;
1782
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001783redirty_out:
Jaegeuk Kim76f60262014-04-15 16:04:15 +09001784 redirty_page_for_writepage(wbc, page);
Jaegeuk Kim8618b882014-02-17 19:29:27 +09001785 return AOP_WRITEPAGE_ACTIVATE;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001786}
1787
Namjae Jeonfa9150a2013-01-15 16:45:24 +09001788static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
1789 void *data)
1790{
1791 struct address_space *mapping = data;
1792 int ret = mapping->a_ops->writepage(page, wbc);
1793 mapping_set_error(mapping, ret);
1794 return ret;
1795}
1796
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001797static int f2fs_write_data_pages(struct address_space *mapping,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001798 struct writeback_control *wbc)
1799{
1800 struct inode *inode = mapping->host;
Jaegeuk Kim40813632014-09-02 15:31:18 -07001801 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim5463e7c2015-04-21 10:40:54 -07001802 bool locked = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001803 int ret;
Jaegeuk Kim50c8cdb2014-03-18 13:47:11 +09001804 long diff;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001805
Chao Yue5748432014-05-06 16:51:24 +08001806 trace_f2fs_writepages(mapping->host, wbc, DATA);
1807
P J Pcfb185a2013-04-03 11:38:00 +09001808 /* deal with chardevs and other special file */
1809 if (!mapping->a_ops->writepage)
1810 return 0;
1811
Jaegeuk Kim87d6f892014-03-18 12:40:49 +09001812 if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07001813 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
Jaegeuk Kim6fb03f32014-04-16 10:47:06 +09001814 available_free_memory(sbi, DIRTY_DENTS))
Jaegeuk Kimd3baf952014-03-18 13:43:05 +09001815 goto skip_write;
Jaegeuk Kim87d6f892014-03-18 12:40:49 +09001816
Jaegeuk Kimd5669f72015-02-27 13:37:39 -08001817 /* during POR, we don't need to trigger writepage at all. */
1818 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1819 goto skip_write;
1820
Jaegeuk Kim50c8cdb2014-03-18 13:47:11 +09001821 diff = nr_pages_to_write(sbi, DATA, wbc);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001822
Jaegeuk Kim5463e7c2015-04-21 10:40:54 -07001823 if (!S_ISDIR(inode->i_mode)) {
1824 mutex_lock(&sbi->writepages);
1825 locked = true;
1826 }
Namjae Jeonfa9150a2013-01-15 16:45:24 +09001827 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
Jaegeuk Kim5463e7c2015-04-21 10:40:54 -07001828 if (locked)
1829 mutex_unlock(&sbi->writepages);
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001830
1831 f2fs_submit_merged_bio(sbi, DATA, WRITE);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001832
1833 remove_dirty_dir_inode(inode);
1834
Jaegeuk Kim50c8cdb2014-03-18 13:47:11 +09001835 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001836 return ret;
Jaegeuk Kimd3baf952014-03-18 13:43:05 +09001837
1838skip_write:
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07001839 wbc->pages_skipped += get_dirty_pages(inode);
Jaegeuk Kimd3baf952014-03-18 13:43:05 +09001840 return 0;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001841}
1842
Chao Yu3aab8f82014-07-02 13:25:04 +08001843static void f2fs_write_failed(struct address_space *mapping, loff_t to)
1844{
1845 struct inode *inode = mapping->host;
1846
1847 if (to > inode->i_size) {
1848 truncate_pagecache(inode, inode->i_size);
Jaegeuk Kim764aa3e2014-08-14 16:32:54 -07001849 truncate_blocks(inode, inode->i_size, true);
Chao Yu3aab8f82014-07-02 13:25:04 +08001850 }
1851}
1852
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001853static int f2fs_write_begin(struct file *file, struct address_space *mapping,
1854 loff_t pos, unsigned len, unsigned flags,
1855 struct page **pagep, void **fsdata)
1856{
1857 struct inode *inode = mapping->host;
Jaegeuk Kim40813632014-09-02 15:31:18 -07001858 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07001859 struct page *page, *ipage;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001860 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
1861 struct dnode_of_data dn;
1862 int err = 0;
1863
Chao Yu62aed042014-05-06 16:46:04 +08001864 trace_f2fs_write_begin(inode, pos, len, flags);
1865
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001866 f2fs_balance_fs(sbi);
Jaegeuk Kim5f727392014-11-25 10:59:45 -08001867
1868 /*
1869 * We should check this at this moment to avoid deadlock on inode page
1870 * and #0 page. The locking rule for inline_data conversion should be:
1871 * lock_page(page #0) -> lock_page(inode_page)
1872 */
1873 if (index != 0) {
1874 err = f2fs_convert_inline_inode(inode);
1875 if (err)
1876 goto fail;
1877 }
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +09001878repeat:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001879 page = grab_cache_page_write_begin(mapping, index, flags);
Chao Yu3aab8f82014-07-02 13:25:04 +08001880 if (!page) {
1881 err = -ENOMEM;
1882 goto fail;
1883 }
Jaegeuk Kimd5f66992014-04-30 09:22:45 +09001884
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001885 *pagep = page;
1886
Gu Zhenge4795562013-09-27 18:08:30 +08001887 f2fs_lock_op(sbi);
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07001888
1889 /* check inline_data */
1890 ipage = get_node_page(sbi, inode->i_ino);
Chao Yucd34e292014-12-01 11:30:20 +08001891 if (IS_ERR(ipage)) {
1892 err = PTR_ERR(ipage);
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07001893 goto unlock_fail;
Chao Yucd34e292014-12-01 11:30:20 +08001894 }
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07001895
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001896 set_new_dnode(&dn, inode, ipage, ipage, 0);
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07001897
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001898 if (f2fs_has_inline_data(inode)) {
1899 if (pos + len <= MAX_INLINE_DATA) {
1900 read_inline_data(page, ipage);
1901 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
1902 sync_inode_page(&dn);
1903 goto put_next;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001904 }
Jaegeuk Kim5f727392014-11-25 10:59:45 -08001905 err = f2fs_convert_inline_page(&dn, page);
1906 if (err)
1907 goto put_fail;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001908 }
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07001909 err = f2fs_reserve_block(&dn, index);
1910 if (err)
Jaegeuk Kim8cdcb712014-11-17 16:14:11 -08001911 goto put_fail;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001912put_next:
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07001913 f2fs_put_dnode(&dn);
1914 f2fs_unlock_op(sbi);
1915
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001916 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
1917 return 0;
1918
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001919 f2fs_wait_on_page_writeback(page, DATA);
1920
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001921 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
1922 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
1923 unsigned end = start + len;
1924
1925 /* Reading beyond i_size is simple: memset to zero */
1926 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001927 goto out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001928 }
1929
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001930 if (dn.data_blkaddr == NEW_ADDR) {
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001931 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
1932 } else {
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -08001933 struct f2fs_io_info fio = {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001934 .sbi = sbi,
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -08001935 .type = DATA,
1936 .rw = READ_SYNC,
1937 .blk_addr = dn.data_blkaddr,
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001938 .page = page,
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001939 .encrypted_page = NULL,
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -08001940 };
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001941 err = f2fs_submit_page_bio(&fio);
Jan Kara9234f312014-10-22 15:21:47 +02001942 if (err)
1943 goto fail;
Chao Yud54c7952014-03-29 15:30:40 +08001944
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001945 lock_page(page);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001946 if (unlikely(!PageUptodate(page))) {
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001947 f2fs_put_page(page, 1);
Chao Yu3aab8f82014-07-02 13:25:04 +08001948 err = -EIO;
1949 goto fail;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001950 }
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001951 if (unlikely(page->mapping != mapping)) {
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +09001952 f2fs_put_page(page, 1);
1953 goto repeat;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001954 }
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001955
1956 /* avoid symlink page */
1957 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1958 err = f2fs_decrypt_one(inode, page);
1959 if (err) {
1960 f2fs_put_page(page, 1);
1961 goto fail;
1962 }
1963 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001964 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001965out:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001966 SetPageUptodate(page);
1967 clear_cold_data(page);
1968 return 0;
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07001969
Jaegeuk Kim8cdcb712014-11-17 16:14:11 -08001970put_fail:
1971 f2fs_put_dnode(&dn);
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07001972unlock_fail:
1973 f2fs_unlock_op(sbi);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001974 f2fs_put_page(page, 1);
Chao Yu3aab8f82014-07-02 13:25:04 +08001975fail:
1976 f2fs_write_failed(mapping, pos + len);
1977 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001978}
1979
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09001980static int f2fs_write_end(struct file *file,
1981 struct address_space *mapping,
1982 loff_t pos, unsigned len, unsigned copied,
1983 struct page *page, void *fsdata)
1984{
1985 struct inode *inode = page->mapping->host;
1986
Chao Yudfb2bf32014-05-06 16:47:23 +08001987 trace_f2fs_write_end(inode, pos, len, copied);
1988
Jaegeuk Kim34ba94b2014-10-09 13:19:53 -07001989 set_page_dirty(page);
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09001990
1991 if (pos + copied > i_size_read(inode)) {
1992 i_size_write(inode, pos + copied);
1993 mark_inode_dirty(inode);
1994 update_inode_page(inode);
1995 }
1996
Chao Yu75c3c8b2013-11-16 14:15:59 +08001997 f2fs_put_page(page, 1);
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09001998 return copied;
1999}
2000
Omar Sandoval6f673762015-03-16 04:33:52 -07002001static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
2002 loff_t offset)
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09002003{
2004 unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09002005
Omar Sandoval6f673762015-03-16 04:33:52 -07002006 if (iov_iter_rw(iter) == READ)
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09002007 return 0;
2008
2009 if (offset & blocksize_mask)
2010 return -EINVAL;
2011
Al Viro5b46f252014-03-16 18:07:34 -04002012 if (iov_iter_alignment(iter) & blocksize_mask)
2013 return -EINVAL;
2014
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09002015 return 0;
2016}
2017
Omar Sandoval22c61862015-03-16 04:33:53 -07002018static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
2019 loff_t offset)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002020{
2021 struct file *file = iocb->ki_filp;
Chao Yu3aab8f82014-07-02 13:25:04 +08002022 struct address_space *mapping = file->f_mapping;
2023 struct inode *inode = mapping->host;
2024 size_t count = iov_iter_count(iter);
2025 int err;
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09002026
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07002027 /* we don't need to use inline_data strictly */
2028 if (f2fs_has_inline_data(inode)) {
2029 err = f2fs_convert_inline_inode(inode);
2030 if (err)
2031 return err;
2032 }
Huajun Li9ffe0fb2013-11-10 23:13:20 +08002033
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -07002034 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
2035 return 0;
2036
Omar Sandoval6f673762015-03-16 04:33:52 -07002037 if (check_direct_IO(inode, iter, offset))
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09002038 return 0;
2039
Omar Sandoval6f673762015-03-16 04:33:52 -07002040 trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
Chao Yu70407fa2014-07-31 21:11:22 +08002041
Omar Sandoval6f673762015-03-16 04:33:52 -07002042 if (iov_iter_rw(iter) == WRITE)
Jaegeuk Kim59b802e2015-02-09 12:09:53 -08002043 __allocate_data_blocks(inode, offset, count);
2044
Omar Sandoval17f8c842015-03-16 04:33:50 -07002045 err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block);
Omar Sandoval6f673762015-03-16 04:33:52 -07002046 if (err < 0 && iov_iter_rw(iter) == WRITE)
Chao Yu3aab8f82014-07-02 13:25:04 +08002047 f2fs_write_failed(mapping, offset + count);
Chao Yu70407fa2014-07-31 21:11:22 +08002048
Omar Sandoval6f673762015-03-16 04:33:52 -07002049 trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err);
Chao Yu70407fa2014-07-31 21:11:22 +08002050
Chao Yu3aab8f82014-07-02 13:25:04 +08002051 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002052}
2053
Chao Yu487261f2015-02-05 17:44:29 +08002054void f2fs_invalidate_page(struct page *page, unsigned int offset,
2055 unsigned int length)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002056{
2057 struct inode *inode = page->mapping->host;
Chao Yu487261f2015-02-05 17:44:29 +08002058 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07002059
Chao Yu487261f2015-02-05 17:44:29 +08002060 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
2061 (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE))
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07002062 return;
2063
Chao Yu487261f2015-02-05 17:44:29 +08002064 if (PageDirty(page)) {
2065 if (inode->i_ino == F2FS_META_INO(sbi))
2066 dec_page_count(sbi, F2FS_DIRTY_META);
2067 else if (inode->i_ino == F2FS_NODE_INO(sbi))
2068 dec_page_count(sbi, F2FS_DIRTY_NODES);
2069 else
2070 inode_dec_dirty_pages(inode);
2071 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002072 ClearPagePrivate(page);
2073}
2074
Chao Yu487261f2015-02-05 17:44:29 +08002075int f2fs_release_page(struct page *page, gfp_t wait)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002076{
Jaegeuk Kimf68daee2015-01-30 11:39:08 -08002077 /* If this is dirty page, keep PagePrivate */
2078 if (PageDirty(page))
2079 return 0;
2080
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002081 ClearPagePrivate(page);
Jaegeuk Kimc3850aa2013-03-14 09:24:32 +09002082 return 1;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002083}
2084
2085static int f2fs_set_data_page_dirty(struct page *page)
2086{
2087 struct address_space *mapping = page->mapping;
2088 struct inode *inode = mapping->host;
2089
Jaegeuk Kim26c6b882013-10-24 17:53:29 +09002090 trace_f2fs_set_page_dirty(page, DATA);
2091
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002092 SetPageUptodate(page);
Jaegeuk Kim34ba94b2014-10-09 13:19:53 -07002093
Jaegeuk Kim1e843712014-12-09 06:08:59 -08002094 if (f2fs_is_atomic_file(inode)) {
Jaegeuk Kim34ba94b2014-10-09 13:19:53 -07002095 register_inmem_page(inode, page);
2096 return 1;
2097 }
2098
Jaegeuk Kima18ff062014-01-21 13:32:12 +09002099 mark_inode_dirty(inode);
2100
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002101 if (!PageDirty(page)) {
2102 __set_page_dirty_nobuffers(page);
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07002103 update_dirty_page(inode, page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002104 return 1;
2105 }
2106 return 0;
2107}
2108
Jaegeuk Kimc01e54b2013-01-17 20:30:23 +09002109static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
2110{
Chao Yu454ae7e2014-04-22 13:34:01 +08002111 struct inode *inode = mapping->host;
2112
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07002113 /* we don't need to use inline_data strictly */
2114 if (f2fs_has_inline_data(inode)) {
2115 int err = f2fs_convert_inline_inode(inode);
2116 if (err)
2117 return err;
2118 }
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09002119 return generic_block_bmap(mapping, block, get_data_block);
Jaegeuk Kimc01e54b2013-01-17 20:30:23 +09002120}
2121
Chao Yu429511c2015-02-05 17:54:31 +08002122void init_extent_cache_info(struct f2fs_sb_info *sbi)
2123{
2124 INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
2125 init_rwsem(&sbi->extent_tree_lock);
2126 INIT_LIST_HEAD(&sbi->extent_list);
2127 spin_lock_init(&sbi->extent_lock);
2128 sbi->total_ext_tree = 0;
2129 atomic_set(&sbi->total_ext_node, 0);
2130}
2131
2132int __init create_extent_cache(void)
2133{
2134 extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
2135 sizeof(struct extent_tree));
2136 if (!extent_tree_slab)
2137 return -ENOMEM;
2138 extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
2139 sizeof(struct extent_node));
2140 if (!extent_node_slab) {
2141 kmem_cache_destroy(extent_tree_slab);
2142 return -ENOMEM;
2143 }
2144 return 0;
2145}
2146
2147void destroy_extent_cache(void)
2148{
2149 kmem_cache_destroy(extent_node_slab);
2150 kmem_cache_destroy(extent_tree_slab);
2151}
2152
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002153const struct address_space_operations f2fs_dblock_aops = {
2154 .readpage = f2fs_read_data_page,
2155 .readpages = f2fs_read_data_pages,
2156 .writepage = f2fs_write_data_page,
2157 .writepages = f2fs_write_data_pages,
2158 .write_begin = f2fs_write_begin,
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09002159 .write_end = f2fs_write_end,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002160 .set_page_dirty = f2fs_set_data_page_dirty,
Chao Yu487261f2015-02-05 17:44:29 +08002161 .invalidatepage = f2fs_invalidate_page,
2162 .releasepage = f2fs_release_page,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002163 .direct_IO = f2fs_direct_IO,
Jaegeuk Kimc01e54b2013-01-17 20:30:23 +09002164 .bmap = f2fs_bmap,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002165};