blob: 9041805096e0cf70f2bcd7b9386780429534083b [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002 * fs/f2fs/data.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/buffer_head.h>
14#include <linux/mpage.h>
15#include <linux/writeback.h>
16#include <linux/backing-dev.h>
Chao Yu8f46dca2015-07-14 18:56:10 +080017#include <linux/pagevec.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090018#include <linux/blkdev.h>
19#include <linux/bio.h>
Geert Uytterhoeven690e4a32012-12-19 22:19:30 +010020#include <linux/prefetch.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080021#include <linux/uio.h>
Jaegeuk Kimfe76b792016-06-30 18:40:10 -070022#include <linux/mm.h>
23#include <linux/memcontrol.h>
Jaegeuk Kimf1e88662015-04-09 11:20:42 -070024#include <linux/cleancache.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090025
26#include "f2fs.h"
27#include "node.h"
28#include "segment.h"
Jaegeuk Kimdb9f7c12014-12-17 20:04:08 -080029#include "trace.h"
Namjae Jeon848753a2013-04-23 16:38:02 +090030#include <trace/events/f2fs.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090031
Christoph Hellwig4246a0b2015-07-20 15:29:37 +020032static void f2fs_read_end_io(struct bio *bio)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090033{
Linus Torvaldsf5688492014-01-30 11:19:05 -080034 struct bio_vec *bvec;
35 int i;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090036
Chao Yu8b038c72016-09-18 23:30:07 +080037#ifdef CONFIG_F2FS_FAULT_INJECTION
Chao Yu1ecc0c52016-09-23 21:30:09 +080038 if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO))
Chao Yu8b038c72016-09-18 23:30:07 +080039 bio->bi_error = -EIO;
40#endif
41
Chao Yu12377022015-05-25 18:03:38 +080042 if (f2fs_bio_encrypted(bio)) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +020043 if (bio->bi_error) {
Jaegeuk Kim0b81d072015-05-15 16:26:10 -070044 fscrypt_release_ctx(bio->bi_private);
Chao Yu12377022015-05-25 18:03:38 +080045 } else {
Jaegeuk Kim0b81d072015-05-15 16:26:10 -070046 fscrypt_decrypt_bio_pages(bio->bi_private, bio);
Chao Yu12377022015-05-25 18:03:38 +080047 return;
48 }
49 }
50
Linus Torvaldsf5688492014-01-30 11:19:05 -080051 bio_for_each_segment_all(bvec, bio, i) {
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090052 struct page *page = bvec->bv_page;
53
Christoph Hellwig4246a0b2015-07-20 15:29:37 +020054 if (!bio->bi_error) {
Jaegeuk Kim237c0792016-06-30 18:49:15 -070055 if (!PageUptodate(page))
56 SetPageUptodate(page);
Linus Torvaldsf5688492014-01-30 11:19:05 -080057 } else {
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090058 ClearPageUptodate(page);
59 SetPageError(page);
60 }
61 unlock_page(page);
Linus Torvaldsf5688492014-01-30 11:19:05 -080062 }
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090063 bio_put(bio);
64}
65
Christoph Hellwig4246a0b2015-07-20 15:29:37 +020066static void f2fs_write_end_io(struct bio *bio)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090067{
Jaegeuk Kim1b1f5592014-02-03 10:50:22 +090068 struct f2fs_sb_info *sbi = bio->bi_private;
Linus Torvaldsf5688492014-01-30 11:19:05 -080069 struct bio_vec *bvec;
70 int i;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090071
Linus Torvaldsf5688492014-01-30 11:19:05 -080072 bio_for_each_segment_all(bvec, bio, i) {
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090073 struct page *page = bvec->bv_page;
74
Jaegeuk Kim0b81d072015-05-15 16:26:10 -070075 fscrypt_pullback_bio_page(&page, true);
Jaegeuk Kim4375a332015-04-23 12:04:33 -070076
Christoph Hellwig4246a0b2015-07-20 15:29:37 +020077 if (unlikely(bio->bi_error)) {
Michal Hocko5114a972016-10-11 13:56:01 -070078 mapping_set_error(page->mapping, -EIO);
Jaegeuk Kim38f91ca2016-05-18 14:07:56 -070079 f2fs_stop_checkpoint(sbi, true);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090080 }
81 end_page_writeback(page);
Linus Torvaldsf5688492014-01-30 11:19:05 -080082 }
Jaegeuk Kimf5730182016-05-17 16:23:36 -070083 if (atomic_dec_and_test(&sbi->nr_wb_bios) &&
84 wq_has_sleeper(&sbi->cp_wait))
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090085 wake_up(&sbi->cp_wait);
86
87 bio_put(bio);
88}
89
Gu Zheng940a6d32013-12-20 17:39:59 +080090/*
91 * Low-level block read/write IO operations.
92 */
93static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
94 int npages, bool is_read)
95{
96 struct bio *bio;
97
Jaegeuk Kim740432f2015-08-14 11:43:56 -070098 bio = f2fs_bio_alloc(npages);
Gu Zheng940a6d32013-12-20 17:39:59 +080099
100 bio->bi_bdev = sbi->sb->s_bdev;
Chao Yu55cf9cb2014-09-15 18:01:10 +0800101 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
Gu Zheng940a6d32013-12-20 17:39:59 +0800102 bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
Chao Yu12377022015-05-25 18:03:38 +0800103 bio->bi_private = is_read ? NULL : sbi;
Gu Zheng940a6d32013-12-20 17:39:59 +0800104
105 return bio;
106}
107
Linus Torvalds4fc29c12016-07-27 10:36:31 -0700108static inline void __submit_bio(struct f2fs_sb_info *sbi,
109 struct bio *bio, enum page_type type)
Jaegeuk Kimf5730182016-05-17 16:23:36 -0700110{
Linus Torvalds4fc29c12016-07-27 10:36:31 -0700111 if (!is_read_io(bio_op(bio))) {
Jaegeuk Kimf5730182016-05-17 16:23:36 -0700112 atomic_inc(&sbi->nr_wb_bios);
Jaegeuk Kim52763a42016-06-13 09:47:48 -0700113 if (f2fs_sb_mounted_hmsmr(sbi->sb) &&
114 current->plug && (type == DATA || type == NODE))
Jaegeuk Kim19a5f5e2016-06-04 14:25:24 -0700115 blk_finish_plug(current->plug);
116 }
Mike Christie4e49ea42016-06-05 14:31:41 -0500117 submit_bio(bio);
Jaegeuk Kimf5730182016-05-17 16:23:36 -0700118}
119
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900120static void __submit_merged_bio(struct f2fs_bio_info *io)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900121{
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900122 struct f2fs_io_info *fio = &io->fio;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900123
124 if (!io->bio)
125 return;
126
Mike Christie04d328d2016-06-05 14:31:55 -0500127 if (is_read_io(fio->op))
Chao Yu2ace38e2014-12-24 16:08:14 +0800128 trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
Jaegeuk Kim6a8f8ca2014-10-29 14:37:22 -0700129 else
Chao Yu2ace38e2014-12-24 16:08:14 +0800130 trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900131
Mike Christie04d328d2016-06-05 14:31:55 -0500132 bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
133
Linus Torvalds4fc29c12016-07-27 10:36:31 -0700134 __submit_bio(io->sbi, io->bio, fio->type);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900135 io->bio = NULL;
136}
137
Chao Yu0c3a5792016-01-18 18:28:11 +0800138static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
139 struct page *page, nid_t ino)
Chao Yu0fd785e2016-01-18 18:24:59 +0800140{
Chao Yu0fd785e2016-01-18 18:24:59 +0800141 struct bio_vec *bvec;
142 struct page *target;
143 int i;
144
Chao Yu0c3a5792016-01-18 18:28:11 +0800145 if (!io->bio)
Chao Yu0fd785e2016-01-18 18:24:59 +0800146 return false;
Chao Yu0c3a5792016-01-18 18:28:11 +0800147
148 if (!inode && !page && !ino)
149 return true;
Chao Yu0fd785e2016-01-18 18:24:59 +0800150
151 bio_for_each_segment_all(bvec, io->bio, i) {
152
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700153 if (bvec->bv_page->mapping)
Chao Yu0fd785e2016-01-18 18:24:59 +0800154 target = bvec->bv_page;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700155 else
156 target = fscrypt_control_page(bvec->bv_page);
Chao Yu0fd785e2016-01-18 18:24:59 +0800157
Chao Yu0c3a5792016-01-18 18:28:11 +0800158 if (inode && inode == target->mapping->host)
Chao Yu0fd785e2016-01-18 18:24:59 +0800159 return true;
Chao Yu0c3a5792016-01-18 18:28:11 +0800160 if (page && page == target)
161 return true;
162 if (ino && ino == ino_of_node(target))
163 return true;
Chao Yu0fd785e2016-01-18 18:24:59 +0800164 }
165
Chao Yu0fd785e2016-01-18 18:24:59 +0800166 return false;
167}
168
Chao Yu0c3a5792016-01-18 18:28:11 +0800169static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
170 struct page *page, nid_t ino,
171 enum page_type type)
172{
173 enum page_type btype = PAGE_TYPE_OF_BIO(type);
174 struct f2fs_bio_info *io = &sbi->write_io[btype];
175 bool ret;
176
177 down_read(&io->io_rwsem);
178 ret = __has_merged_page(io, inode, page, ino);
179 up_read(&io->io_rwsem);
180 return ret;
181}
182
183static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
184 struct inode *inode, struct page *page,
185 nid_t ino, enum page_type type, int rw)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900186{
187 enum page_type btype = PAGE_TYPE_OF_BIO(type);
188 struct f2fs_bio_info *io;
189
190 io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
191
Chao Yudf0f8dc2014-03-22 14:57:23 +0800192 down_write(&io->io_rwsem);
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900193
Chao Yu0c3a5792016-01-18 18:28:11 +0800194 if (!__has_merged_page(io, inode, page, ino))
195 goto out;
196
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900197 /* change META to META_FLUSH in the checkpoint procedure */
198 if (type >= META_FLUSH) {
199 io->fio.type = META_FLUSH;
Mike Christie04d328d2016-06-05 14:31:55 -0500200 io->fio.op = REQ_OP_WRITE;
Jaegeuk Kim0f7b2ab2014-07-23 09:57:31 -0700201 if (test_opt(sbi, NOBARRIER))
Mike Christie04d328d2016-06-05 14:31:55 -0500202 io->fio.op_flags = WRITE_FLUSH | REQ_META | REQ_PRIO;
Jaegeuk Kim0f7b2ab2014-07-23 09:57:31 -0700203 else
Mike Christie04d328d2016-06-05 14:31:55 -0500204 io->fio.op_flags = WRITE_FLUSH_FUA | REQ_META |
205 REQ_PRIO;
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900206 }
207 __submit_merged_bio(io);
Chao Yu0c3a5792016-01-18 18:28:11 +0800208out:
Chao Yudf0f8dc2014-03-22 14:57:23 +0800209 up_write(&io->io_rwsem);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900210}
211
Chao Yu0c3a5792016-01-18 18:28:11 +0800212void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type,
213 int rw)
214{
215 __f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw);
216}
217
218void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi,
219 struct inode *inode, struct page *page,
220 nid_t ino, enum page_type type, int rw)
221{
222 if (has_merged_page(sbi, inode, page, ino, type))
223 __f2fs_submit_merged_bio(sbi, inode, page, ino, type, rw);
224}
225
Chao Yu406657d2016-02-24 17:17:55 +0800226void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi)
227{
228 f2fs_submit_merged_bio(sbi, DATA, WRITE);
229 f2fs_submit_merged_bio(sbi, NODE, WRITE);
230 f2fs_submit_merged_bio(sbi, META, WRITE);
231}
232
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900233/*
234 * Fill the locked page with data located in the block address.
235 * Return unlocked page.
236 */
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700237int f2fs_submit_page_bio(struct f2fs_io_info *fio)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900238{
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900239 struct bio *bio;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700240 struct page *page = fio->encrypted_page ?
241 fio->encrypted_page : fio->page;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900242
Chao Yuaafb3712018-08-01 19:13:44 +0800243 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
244 __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
245 return -EFAULT;
246
Chao Yu2ace38e2014-12-24 16:08:14 +0800247 trace_f2fs_submit_page_bio(page, fio);
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700248 f2fs_trace_ios(fio, 0);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900249
250 /* Allocate a new bio */
Mike Christie04d328d2016-06-05 14:31:55 -0500251 bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->op));
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900252
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300253 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900254 bio_put(bio);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900255 return -EFAULT;
256 }
Mike Christie04d328d2016-06-05 14:31:55 -0500257 bio_set_op_attrs(bio, fio->op, fio->op_flags);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900258
Linus Torvalds4fc29c12016-07-27 10:36:31 -0700259 __submit_bio(fio->sbi, bio, fio->type);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900260 return 0;
261}
262
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700263void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900264{
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700265 struct f2fs_sb_info *sbi = fio->sbi;
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900266 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900267 struct f2fs_bio_info *io;
Mike Christie04d328d2016-06-05 14:31:55 -0500268 bool is_read = is_read_io(fio->op);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700269 struct page *bio_page;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900270
Gu Zheng940a6d32013-12-20 17:39:59 +0800271 io = is_read ? &sbi->read_io : &sbi->write_io[btype];
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900272
Chao Yud4511882018-06-05 17:44:11 +0800273 if (__is_valid_data_blkaddr(fio->old_blkaddr))
Yunlei He89d78cf2018-03-08 16:29:13 +0800274 verify_block_addr(fio, fio->old_blkaddr);
275 verify_block_addr(fio, fio->new_blkaddr);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900276
Chao Yudf0f8dc2014-03-22 14:57:23 +0800277 down_write(&io->io_rwsem);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900278
Chao Yu7a9d7542016-02-22 18:36:38 +0800279 if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
Mike Christie04d328d2016-06-05 14:31:55 -0500280 (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags)))
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900281 __submit_merged_bio(io);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900282alloc_new:
283 if (io->bio == NULL) {
Jaegeuk Kim90a893c2014-09-22 16:21:07 -0700284 int bio_blocks = MAX_BIO_BLOCKS(sbi);
Gu Zheng940a6d32013-12-20 17:39:59 +0800285
Chao Yu7a9d7542016-02-22 18:36:38 +0800286 io->bio = __bio_alloc(sbi, fio->new_blkaddr,
287 bio_blocks, is_read);
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900288 io->fio = *fio;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900289 }
290
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700291 bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
292
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300293 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) <
294 PAGE_SIZE) {
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900295 __submit_merged_bio(io);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900296 goto alloc_new;
297 }
298
Chao Yu7a9d7542016-02-22 18:36:38 +0800299 io->last_block_in_bio = fio->new_blkaddr;
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700300 f2fs_trace_ios(fio, 0);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900301
Chao Yudf0f8dc2014-03-22 14:57:23 +0800302 up_write(&io->io_rwsem);
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700303 trace_f2fs_submit_page_mbio(fio->page, fio);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900304}
305
Chao Yu46008c62016-05-09 19:56:30 +0800306static void __set_data_blkaddr(struct dnode_of_data *dn)
307{
308 struct f2fs_node *rn = F2FS_NODE(dn->node_page);
309 __le32 *addr_array;
310
311 /* Get physical address of data block */
312 addr_array = blkaddr_in_node(rn);
313 addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
314}
315
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900316/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900317 * Lock ordering for the change of data block address:
318 * ->data_page
319 * ->node_page
320 * update block addresses in the node page
321 */
Chao Yu216a6202015-03-19 19:23:32 +0800322void set_data_blkaddr(struct dnode_of_data *dn)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900323{
Chao Yu46008c62016-05-09 19:56:30 +0800324 f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
325 __set_data_blkaddr(dn);
326 if (set_page_dirty(dn->node_page))
Jaegeuk Kim12719ae2016-01-07 13:23:12 -0800327 dn->node_changed = true;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900328}
329
Chao Yuf28b3432016-02-24 17:16:47 +0800330void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
331{
332 dn->data_blkaddr = blkaddr;
333 set_data_blkaddr(dn);
334 f2fs_update_extent_cache(dn);
335}
336
Chao Yu46008c62016-05-09 19:56:30 +0800337/* dn->ofs_in_node will be returned with up-to-date last block pointer */
338int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900339{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700340 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900341
Chao Yu46008c62016-05-09 19:56:30 +0800342 if (!count)
343 return 0;
344
Jaegeuk Kim91942322016-05-20 10:13:22 -0700345 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900346 return -EPERM;
Chao Yu46008c62016-05-09 19:56:30 +0800347 if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900348 return -ENOSPC;
349
Chao Yu46008c62016-05-09 19:56:30 +0800350 trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
351 dn->ofs_in_node, count);
Namjae Jeonc01e2852013-04-23 17:00:52 +0900352
Chao Yu46008c62016-05-09 19:56:30 +0800353 f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
354
355 for (; count > 0; dn->ofs_in_node++) {
356 block_t blkaddr =
357 datablock_addr(dn->node_page, dn->ofs_in_node);
358 if (blkaddr == NULL_ADDR) {
359 dn->data_blkaddr = NEW_ADDR;
360 __set_data_blkaddr(dn);
361 count--;
362 }
363 }
364
365 if (set_page_dirty(dn->node_page))
366 dn->node_changed = true;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900367 return 0;
368}
369
Chao Yu46008c62016-05-09 19:56:30 +0800370/* Should keep dn->ofs_in_node unchanged */
371int reserve_new_block(struct dnode_of_data *dn)
372{
373 unsigned int ofs_in_node = dn->ofs_in_node;
374 int ret;
375
376 ret = reserve_new_blocks(dn, 1);
377 dn->ofs_in_node = ofs_in_node;
378 return ret;
379}
380
Huajun Lib6009652013-11-10 23:13:18 +0800381int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
382{
383 bool need_put = dn->inode_page ? false : true;
384 int err;
385
386 err = get_dnode_of_data(dn, index, ALLOC_NODE);
387 if (err)
388 return err;
Jaegeuk Kima8865372013-12-27 17:04:17 +0900389
Huajun Lib6009652013-11-10 23:13:18 +0800390 if (dn->data_blkaddr == NULL_ADDR)
391 err = reserve_new_block(dn);
Jaegeuk Kima8865372013-12-27 17:04:17 +0900392 if (err || need_put)
Huajun Lib6009652013-11-10 23:13:18 +0800393 f2fs_put_dnode(dn);
394 return err;
395}
396
Fan Li759af1c2015-08-05 15:52:16 +0800397int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900398{
Chao Yu028a41e2015-03-19 19:26:02 +0800399 struct extent_info ei;
Fan Li759af1c2015-08-05 15:52:16 +0800400 struct inode *inode = dn->inode;
Chao Yu028a41e2015-03-19 19:26:02 +0800401
Fan Li759af1c2015-08-05 15:52:16 +0800402 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
403 dn->data_blkaddr = ei.blk + index - ei.fofs;
404 return 0;
Chao Yu429511c2015-02-05 17:54:31 +0800405 }
406
Fan Li759af1c2015-08-05 15:52:16 +0800407 return f2fs_reserve_block(dn, index);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900408}
409
Jaegeuk Kima56c7c62015-10-09 15:11:38 -0700410struct page *get_read_data_page(struct inode *inode, pgoff_t index,
Mike Christie04d328d2016-06-05 14:31:55 -0500411 int op_flags, bool for_write)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900412{
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900413 struct address_space *mapping = inode->i_mapping;
414 struct dnode_of_data dn;
415 struct page *page;
Chao Yucb3bc9e2015-02-05 18:03:40 +0800416 struct extent_info ei;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900417 int err;
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -0800418 struct f2fs_io_info fio = {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700419 .sbi = F2FS_I_SB(inode),
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -0800420 .type = DATA,
Mike Christie04d328d2016-06-05 14:31:55 -0500421 .op = REQ_OP_READ,
422 .op_flags = op_flags,
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700423 .encrypted_page = NULL,
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -0800424 };
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900425
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700426 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
427 return read_mapping_page(mapping, index, NULL);
428
Jaegeuk Kima56c7c62015-10-09 15:11:38 -0700429 page = f2fs_grab_cache_page(mapping, index, for_write);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900430 if (!page)
431 return ERR_PTR(-ENOMEM);
432
Chao Yucb3bc9e2015-02-05 18:03:40 +0800433 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
434 dn.data_blkaddr = ei.blk + index - ei.fofs;
435 goto got_it;
436 }
437
Jaegeuk Kim650495d2013-05-13 08:38:35 +0900438 set_new_dnode(&dn, inode, NULL, NULL, 0);
439 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
Jaegeuk Kim86531d62015-07-15 13:08:21 -0700440 if (err)
441 goto put_err;
Jaegeuk Kim650495d2013-05-13 08:38:35 +0900442 f2fs_put_dnode(&dn);
443
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900444 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
Jaegeuk Kim86531d62015-07-15 13:08:21 -0700445 err = -ENOENT;
446 goto put_err;
Jaegeuk Kim650495d2013-05-13 08:38:35 +0900447 }
Chao Yucb3bc9e2015-02-05 18:03:40 +0800448got_it:
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700449 if (PageUptodate(page)) {
450 unlock_page(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900451 return page;
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700452 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900453
Jaegeuk Kimd59ff4d2013-08-20 19:13:07 +0900454 /*
455 * A new dentry page is allocated but not able to be written, since its
456 * new inode page couldn't be allocated due to -ENOSPC.
457 * In such the case, its blkaddr can be remained as NEW_ADDR.
458 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
459 */
460 if (dn.data_blkaddr == NEW_ADDR) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300461 zero_user_segment(page, 0, PAGE_SIZE);
Jaegeuk Kim237c0792016-06-30 18:49:15 -0700462 if (!PageUptodate(page))
463 SetPageUptodate(page);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700464 unlock_page(page);
Jaegeuk Kimd59ff4d2013-08-20 19:13:07 +0900465 return page;
466 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900467
Chao Yu7a9d7542016-02-22 18:36:38 +0800468 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700469 fio.page = page;
470 err = f2fs_submit_page_bio(&fio);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900471 if (err)
Jaegeuk Kim86531d62015-07-15 13:08:21 -0700472 goto put_err;
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700473 return page;
Jaegeuk Kim86531d62015-07-15 13:08:21 -0700474
475put_err:
476 f2fs_put_page(page, 1);
477 return ERR_PTR(err);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700478}
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900479
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700480struct page *find_data_page(struct inode *inode, pgoff_t index)
481{
482 struct address_space *mapping = inode->i_mapping;
483 struct page *page;
484
485 page = find_get_page(mapping, index);
486 if (page && PageUptodate(page))
487 return page;
488 f2fs_put_page(page, 0);
489
Jaegeuk Kima56c7c62015-10-09 15:11:38 -0700490 page = get_read_data_page(inode, index, READ_SYNC, false);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700491 if (IS_ERR(page))
492 return page;
493
494 if (PageUptodate(page))
495 return page;
496
497 wait_on_page_locked(page);
498 if (unlikely(!PageUptodate(page))) {
499 f2fs_put_page(page, 0);
500 return ERR_PTR(-EIO);
501 }
502 return page;
503}
504
505/*
506 * If it tries to access a hole, return an error.
507 * Because, the callers, functions in dir.c and GC, should be able to know
508 * whether this page exists or not.
509 */
Jaegeuk Kima56c7c62015-10-09 15:11:38 -0700510struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
511 bool for_write)
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700512{
513 struct address_space *mapping = inode->i_mapping;
514 struct page *page;
515repeat:
Jaegeuk Kima56c7c62015-10-09 15:11:38 -0700516 page = get_read_data_page(inode, index, READ_SYNC, for_write);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700517 if (IS_ERR(page))
518 return page;
519
520 /* wait for read completion */
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900521 lock_page(page);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900522 if (unlikely(page->mapping != mapping)) {
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +0900523 f2fs_put_page(page, 1);
524 goto repeat;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900525 }
Chao Yu1563ac72016-07-03 22:05:12 +0800526 if (unlikely(!PageUptodate(page))) {
527 f2fs_put_page(page, 1);
528 return ERR_PTR(-EIO);
529 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900530 return page;
531}
532
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900533/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900534 * Caller ensures that this data page is never allocated.
535 * A new zero-filled data page is allocated in the page cache.
Jaegeuk Kim39936832012-11-22 16:21:29 +0900536 *
Chao Yu4f4124d2013-12-21 18:02:14 +0800537 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
538 * f2fs_unlock_op().
Chao Yu470f00e2015-07-14 18:14:06 +0800539 * Note that, ipage is set only by make_empty_dir, and if any error occur,
540 * ipage should be released by this function.
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900541 */
Jaegeuk Kim64aa7ed2013-05-20 09:55:50 +0900542struct page *get_new_data_page(struct inode *inode,
Jaegeuk Kima8865372013-12-27 17:04:17 +0900543 struct page *ipage, pgoff_t index, bool new_i_size)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900544{
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900545 struct address_space *mapping = inode->i_mapping;
546 struct page *page;
547 struct dnode_of_data dn;
548 int err;
Jaegeuk Kim76121182016-01-01 22:03:47 -0800549
Jaegeuk Kima56c7c62015-10-09 15:11:38 -0700550 page = f2fs_grab_cache_page(mapping, index, true);
Chao Yu470f00e2015-07-14 18:14:06 +0800551 if (!page) {
552 /*
553 * before exiting, we should make sure ipage will be released
554 * if any error occur.
555 */
556 f2fs_put_page(ipage, 1);
Jaegeuk Kim01f28612015-04-29 11:18:42 -0700557 return ERR_PTR(-ENOMEM);
Chao Yu470f00e2015-07-14 18:14:06 +0800558 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900559
Jaegeuk Kima8865372013-12-27 17:04:17 +0900560 set_new_dnode(&dn, inode, ipage, NULL, 0);
Huajun Lib6009652013-11-10 23:13:18 +0800561 err = f2fs_reserve_block(&dn, index);
Jaegeuk Kim01f28612015-04-29 11:18:42 -0700562 if (err) {
563 f2fs_put_page(page, 1);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900564 return ERR_PTR(err);
Jaegeuk Kima8865372013-12-27 17:04:17 +0900565 }
Jaegeuk Kim01f28612015-04-29 11:18:42 -0700566 if (!ipage)
567 f2fs_put_dnode(&dn);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900568
569 if (PageUptodate(page))
Jaegeuk Kim01f28612015-04-29 11:18:42 -0700570 goto got_it;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900571
572 if (dn.data_blkaddr == NEW_ADDR) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300573 zero_user_segment(page, 0, PAGE_SIZE);
Jaegeuk Kim237c0792016-06-30 18:49:15 -0700574 if (!PageUptodate(page))
575 SetPageUptodate(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900576 } else {
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700577 f2fs_put_page(page, 1);
Jaegeuk Kima8865372013-12-27 17:04:17 +0900578
Jaegeuk Kim76121182016-01-01 22:03:47 -0800579 /* if ipage exists, blkaddr should be NEW_ADDR */
580 f2fs_bug_on(F2FS_I_SB(inode), ipage);
581 page = get_lock_data_page(inode, index, true);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700582 if (IS_ERR(page))
Jaegeuk Kim76121182016-01-01 22:03:47 -0800583 return page;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900584 }
Jaegeuk Kim01f28612015-04-29 11:18:42 -0700585got_it:
Chao Yu9edcdab2015-09-11 14:43:52 +0800586 if (new_i_size && i_size_read(inode) <
Jaegeuk Kimee6d1822016-05-20 16:32:49 -0700587 ((loff_t)(index + 1) << PAGE_SHIFT))
Jaegeuk Kimfc9581c2016-05-20 09:22:03 -0700588 f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900589 return page;
590}
591
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900592static int __allocate_data_block(struct dnode_of_data *dn)
593{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700594 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900595 struct f2fs_summary sum;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900596 struct node_info ni;
Jaegeuk Kim38aa0882015-01-05 16:02:20 -0800597 int seg = CURSEG_WARM_DATA;
Jaegeuk Kim976e4c52014-09-15 19:32:16 -0700598 pgoff_t fofs;
Chao Yu46008c62016-05-09 19:56:30 +0800599 blkcnt_t count = 1;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900600
Jaegeuk Kim91942322016-05-20 10:13:22 -0700601 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900602 return -EPERM;
Chao Yudf6136e2015-03-23 10:33:37 +0800603
604 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
605 if (dn->data_blkaddr == NEW_ADDR)
606 goto alloc;
607
Chao Yu46008c62016-05-09 19:56:30 +0800608 if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900609 return -ENOSPC;
610
Chao Yudf6136e2015-03-23 10:33:37 +0800611alloc:
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900612 get_node_info(sbi, dn->nid, &ni);
613 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
614
Jaegeuk Kim38aa0882015-01-05 16:02:20 -0800615 if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
616 seg = CURSEG_DIRECT_IO;
617
Chao Yudf6136e2015-03-23 10:33:37 +0800618 allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
619 &sum, seg);
Chao Yu216a6202015-03-19 19:23:32 +0800620 set_data_blkaddr(dn);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900621
Jaegeuk Kim976e4c52014-09-15 19:32:16 -0700622 /* update i_size */
Chao Yu81ca7352016-01-26 15:39:35 +0800623 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
Jaegeuk Kim976e4c52014-09-15 19:32:16 -0700624 dn->ofs_in_node;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300625 if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
Jaegeuk Kimfc9581c2016-05-20 09:22:03 -0700626 f2fs_i_size_write(dn->inode,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300627 ((loff_t)(fofs + 1) << PAGE_SHIFT));
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900628 return 0;
629}
630
Jaegeuk Kimb439b102016-02-03 13:09:09 -0800631ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
Jaegeuk Kim59b802e2015-02-09 12:09:53 -0800632{
Jaegeuk Kimb439b102016-02-03 13:09:09 -0800633 struct inode *inode = file_inode(iocb->ki_filp);
Chao Yu5b8db7f2016-01-26 15:38:29 +0800634 struct f2fs_map_blocks map;
Jaegeuk Kimb439b102016-02-03 13:09:09 -0800635 ssize_t ret = 0;
Jaegeuk Kim59b802e2015-02-09 12:09:53 -0800636
Jaegeuk Kim0080c502016-05-07 08:52:57 -0700637 map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
Chao Yudfd02e42016-08-20 15:12:01 +0800638 map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
639 if (map.m_len > map.m_lblk)
640 map.m_len -= map.m_lblk;
641 else
642 map.m_len = 0;
643
Chao Yuda859852016-01-26 15:42:58 +0800644 map.m_next_pgofs = NULL;
Jaegeuk Kim59b802e2015-02-09 12:09:53 -0800645
Jaegeuk Kim24b84912016-02-03 13:49:44 -0800646 if (iocb->ki_flags & IOCB_DIRECT) {
Jaegeuk Kimb439b102016-02-03 13:09:09 -0800647 ret = f2fs_convert_inline_inode(inode);
648 if (ret)
649 return ret;
Jaegeuk Kim24b84912016-02-03 13:49:44 -0800650 return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
Jaegeuk Kimb439b102016-02-03 13:09:09 -0800651 }
Jaegeuk Kim24b84912016-02-03 13:49:44 -0800652 if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
653 ret = f2fs_convert_inline_inode(inode);
654 if (ret)
655 return ret;
656 }
657 if (!f2fs_has_inline_data(inode))
658 return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
Jaegeuk Kimb439b102016-02-03 13:09:09 -0800659 return ret;
Jaegeuk Kim59b802e2015-02-09 12:09:53 -0800660}
661
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900662/*
Jaegeuk Kim003a3e12015-04-06 19:55:34 -0700663 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
664 * f2fs_map_blocks structure.
Chao Yu4f4124d2013-12-21 18:02:14 +0800665 * If original data blocks are allocated, then give them to blockdev.
666 * Otherwise,
667 * a. preallocate requested block addresses
668 * b. do not use extent cache for better performance
669 * c. give the block addresses to blockdev
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900670 */
Chao Yud323d002015-10-27 09:53:45 +0800671int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
Chao Yue2b4e2b2015-08-19 19:11:19 +0800672 int create, int flag)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900673{
Jaegeuk Kim003a3e12015-04-06 19:55:34 -0700674 unsigned int maxblocks = map->m_len;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900675 struct dnode_of_data dn;
Chao Yuf9811702015-09-21 20:17:52 +0800676 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kimac6f1992016-06-16 17:03:23 -0700677 int mode = create ? ALLOC_NODE : LOOKUP_NODE;
Chao Yu46008c62016-05-09 19:56:30 +0800678 pgoff_t pgofs, end_offset, end;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900679 int err = 0, ofs = 1;
Chao Yu46008c62016-05-09 19:56:30 +0800680 unsigned int ofs_in_node, last_ofs_in_node;
681 blkcnt_t prealloc;
Chao Yua2e7d1b2015-02-05 17:50:30 +0800682 struct extent_info ei;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900683 bool allocated = false;
Fan Li7df3a432015-12-17 13:20:59 +0800684 block_t blkaddr;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900685
Chao Yudfd02e42016-08-20 15:12:01 +0800686 if (!maxblocks)
687 return 0;
688
Jaegeuk Kim003a3e12015-04-06 19:55:34 -0700689 map->m_len = 0;
690 map->m_flags = 0;
691
692 /* it only supports block size == page size */
693 pgofs = (pgoff_t)map->m_lblk;
Chao Yu46008c62016-05-09 19:56:30 +0800694 end = pgofs + maxblocks;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900695
Jaegeuk Kim24b84912016-02-03 13:49:44 -0800696 if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
Jaegeuk Kim003a3e12015-04-06 19:55:34 -0700697 map->m_pblk = ei.blk + pgofs - ei.fofs;
698 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
699 map->m_flags = F2FS_MAP_MAPPED;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900700 goto out;
Chao Yua2e7d1b2015-02-05 17:50:30 +0800701 }
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900702
Chao Yu4fe71e82016-01-26 15:37:38 +0800703next_dnode:
Jaegeuk Kim59b802e2015-02-09 12:09:53 -0800704 if (create)
Chao Yu3104af32015-12-23 17:11:43 +0800705 f2fs_lock_op(sbi);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900706
707 /* When reading holes, we need its node page */
708 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900709 err = get_dnode_of_data(&dn, pgofs, mode);
Jaegeuk Kim1ec79082013-12-26 16:55:22 +0900710 if (err) {
Chao Yu43473f92016-05-05 19:13:02 +0800711 if (flag == F2FS_GET_BLOCK_BMAP)
712 map->m_pblk = 0;
Chao Yuda859852016-01-26 15:42:58 +0800713 if (err == -ENOENT) {
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900714 err = 0;
Chao Yuda859852016-01-26 15:42:58 +0800715 if (map->m_next_pgofs)
716 *map->m_next_pgofs =
717 get_next_page_offset(&dn, pgofs);
718 }
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900719 goto unlock_out;
Namjae Jeon848753a2013-04-23 16:38:02 +0900720 }
Chao Yu973163f2015-09-18 16:51:51 +0800721
Chao Yu46008c62016-05-09 19:56:30 +0800722 prealloc = 0;
Arnd Bergmanna299abd2016-11-02 14:52:15 +0100723 last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
Chao Yu81ca7352016-01-26 15:39:35 +0800724 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900725
Chao Yu4fe71e82016-01-26 15:37:38 +0800726next_block:
Fan Li7df3a432015-12-17 13:20:59 +0800727 blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
Chao Yu973163f2015-09-18 16:51:51 +0800728
Chao Yuaafb3712018-08-01 19:13:44 +0800729 if (__is_valid_data_blkaddr(blkaddr) &&
730 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
731 err = -EFAULT;
732 goto sync_out;
733 }
734
Chao Yud4511882018-06-05 17:44:11 +0800735 if (!is_valid_data_blkaddr(sbi, blkaddr)) {
Fan Li7df3a432015-12-17 13:20:59 +0800736 if (create) {
737 if (unlikely(f2fs_cp_error(sbi))) {
738 err = -EIO;
739 goto sync_out;
Chao Yu973163f2015-09-18 16:51:51 +0800740 }
Jaegeuk Kim24b84912016-02-03 13:49:44 -0800741 if (flag == F2FS_GET_BLOCK_PRE_AIO) {
Chao Yu46008c62016-05-09 19:56:30 +0800742 if (blkaddr == NULL_ADDR) {
743 prealloc++;
744 last_ofs_in_node = dn.ofs_in_node;
745 }
Jaegeuk Kim24b84912016-02-03 13:49:44 -0800746 } else {
747 err = __allocate_data_block(&dn);
Chao Yu46008c62016-05-09 19:56:30 +0800748 if (!err) {
Jaegeuk Kim91942322016-05-20 10:13:22 -0700749 set_inode_flag(inode, FI_APPEND_WRITE);
Chao Yu46008c62016-05-09 19:56:30 +0800750 allocated = true;
751 }
Jaegeuk Kim24b84912016-02-03 13:49:44 -0800752 }
Fan Li7df3a432015-12-17 13:20:59 +0800753 if (err)
754 goto sync_out;
Chao Yu4fe71e82016-01-26 15:37:38 +0800755 map->m_flags = F2FS_MAP_NEW;
Fan Li7df3a432015-12-17 13:20:59 +0800756 blkaddr = dn.data_blkaddr;
757 } else {
Chao Yu43473f92016-05-05 19:13:02 +0800758 if (flag == F2FS_GET_BLOCK_BMAP) {
759 map->m_pblk = 0;
760 goto sync_out;
761 }
Chao Yuda859852016-01-26 15:42:58 +0800762 if (flag == F2FS_GET_BLOCK_FIEMAP &&
763 blkaddr == NULL_ADDR) {
764 if (map->m_next_pgofs)
765 *map->m_next_pgofs = pgofs + 1;
766 }
Fan Li7df3a432015-12-17 13:20:59 +0800767 if (flag != F2FS_GET_BLOCK_FIEMAP ||
Chao Yu43473f92016-05-05 19:13:02 +0800768 blkaddr != NEW_ADDR)
Fan Li7df3a432015-12-17 13:20:59 +0800769 goto sync_out;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900770 }
771 }
Fan Li7df3a432015-12-17 13:20:59 +0800772
Chao Yu46008c62016-05-09 19:56:30 +0800773 if (flag == F2FS_GET_BLOCK_PRE_AIO)
774 goto skip;
775
Chao Yu4fe71e82016-01-26 15:37:38 +0800776 if (map->m_len == 0) {
777 /* preallocated unwritten block should be mapped for fiemap. */
778 if (blkaddr == NEW_ADDR)
779 map->m_flags |= F2FS_MAP_UNWRITTEN;
780 map->m_flags |= F2FS_MAP_MAPPED;
781
782 map->m_pblk = blkaddr;
783 map->m_len = 1;
784 } else if ((map->m_pblk != NEW_ADDR &&
Fan Li7df3a432015-12-17 13:20:59 +0800785 blkaddr == (map->m_pblk + ofs)) ||
Jaegeuk Kimb439b102016-02-03 13:09:09 -0800786 (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
Chao Yu46008c62016-05-09 19:56:30 +0800787 flag == F2FS_GET_BLOCK_PRE_DIO) {
Fan Li7df3a432015-12-17 13:20:59 +0800788 ofs++;
Fan Li7df3a432015-12-17 13:20:59 +0800789 map->m_len++;
Chao Yu4fe71e82016-01-26 15:37:38 +0800790 } else {
791 goto sync_out;
792 }
793
Chao Yu46008c62016-05-09 19:56:30 +0800794skip:
Chao Yu4fe71e82016-01-26 15:37:38 +0800795 dn.ofs_in_node++;
796 pgofs++;
797
Chao Yu46008c62016-05-09 19:56:30 +0800798 /* preallocate blocks in batch for one dnode page */
799 if (flag == F2FS_GET_BLOCK_PRE_AIO &&
800 (pgofs == end || dn.ofs_in_node == end_offset)) {
Chao Yu4fe71e82016-01-26 15:37:38 +0800801
Chao Yu46008c62016-05-09 19:56:30 +0800802 dn.ofs_in_node = ofs_in_node;
803 err = reserve_new_blocks(&dn, prealloc);
804 if (err)
805 goto sync_out;
Chao Yu58383be2016-08-20 15:12:02 +0800806 allocated = dn.node_changed;
Chao Yu4fe71e82016-01-26 15:37:38 +0800807
Chao Yu46008c62016-05-09 19:56:30 +0800808 map->m_len += dn.ofs_in_node - ofs_in_node;
809 if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
810 err = -ENOSPC;
811 goto sync_out;
Chao Yu4fe71e82016-01-26 15:37:38 +0800812 }
Chao Yu46008c62016-05-09 19:56:30 +0800813 dn.ofs_in_node = end_offset;
Fan Li7df3a432015-12-17 13:20:59 +0800814 }
815
Chao Yu46008c62016-05-09 19:56:30 +0800816 if (pgofs >= end)
817 goto sync_out;
818 else if (dn.ofs_in_node < end_offset)
819 goto next_block;
820
Chao Yu46008c62016-05-09 19:56:30 +0800821 f2fs_put_dnode(&dn);
822
823 if (create) {
824 f2fs_unlock_op(sbi);
825 f2fs_balance_fs(sbi, allocated);
826 }
827 allocated = false;
828 goto next_dnode;
829
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900830sync_out:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900831 f2fs_put_dnode(&dn);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900832unlock_out:
Jaegeuk Kim2a340762015-12-22 13:23:35 -0800833 if (create) {
Chao Yu3104af32015-12-23 17:11:43 +0800834 f2fs_unlock_op(sbi);
Jaegeuk Kim3c082b72016-01-23 13:35:18 -0800835 f2fs_balance_fs(sbi, allocated);
Jaegeuk Kim2a340762015-12-22 13:23:35 -0800836 }
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900837out:
Jaegeuk Kim003a3e12015-04-06 19:55:34 -0700838 trace_f2fs_map_blocks(inode, map, err);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900839 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900840}
841
Jaegeuk Kim003a3e12015-04-06 19:55:34 -0700842static int __get_data_block(struct inode *inode, sector_t iblock,
Chao Yuda859852016-01-26 15:42:58 +0800843 struct buffer_head *bh, int create, int flag,
844 pgoff_t *next_pgofs)
Jaegeuk Kim003a3e12015-04-06 19:55:34 -0700845{
846 struct f2fs_map_blocks map;
847 int ret;
848
849 map.m_lblk = iblock;
850 map.m_len = bh->b_size >> inode->i_blkbits;
Chao Yuda859852016-01-26 15:42:58 +0800851 map.m_next_pgofs = next_pgofs;
Jaegeuk Kim003a3e12015-04-06 19:55:34 -0700852
Chao Yue2b4e2b2015-08-19 19:11:19 +0800853 ret = f2fs_map_blocks(inode, &map, create, flag);
Jaegeuk Kim003a3e12015-04-06 19:55:34 -0700854 if (!ret) {
855 map_bh(bh, inode->i_sb, map.m_pblk);
856 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
Wei Fangb8bf4b82017-01-22 12:21:02 +0800857 bh->b_size = (u64)map.m_len << inode->i_blkbits;
Jaegeuk Kim003a3e12015-04-06 19:55:34 -0700858 }
859 return ret;
860}
861
Jaegeuk Kimccfb3002014-06-13 13:02:11 +0900862static int get_data_block(struct inode *inode, sector_t iblock,
Chao Yuda859852016-01-26 15:42:58 +0800863 struct buffer_head *bh_result, int create, int flag,
864 pgoff_t *next_pgofs)
Jaegeuk Kimccfb3002014-06-13 13:02:11 +0900865{
Chao Yuda859852016-01-26 15:42:58 +0800866 return __get_data_block(inode, iblock, bh_result, create,
867 flag, next_pgofs);
Jaegeuk Kimccfb3002014-06-13 13:02:11 +0900868}
869
Chao Yue2b4e2b2015-08-19 19:11:19 +0800870static int get_data_block_dio(struct inode *inode, sector_t iblock,
Jaegeuk Kimccfb3002014-06-13 13:02:11 +0900871 struct buffer_head *bh_result, int create)
872{
Chao Yue2b4e2b2015-08-19 19:11:19 +0800873 return __get_data_block(inode, iblock, bh_result, create,
Chao Yuda859852016-01-26 15:42:58 +0800874 F2FS_GET_BLOCK_DIO, NULL);
Chao Yue2b4e2b2015-08-19 19:11:19 +0800875}
876
877static int get_data_block_bmap(struct inode *inode, sector_t iblock,
878 struct buffer_head *bh_result, int create)
879{
Yunlei He179448b2015-12-28 21:48:32 +0800880 /* Block number less than F2FS MAX BLOCKS */
Chao Yue0afc4d2015-12-31 14:35:37 +0800881 if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
Yunlei He179448b2015-12-28 21:48:32 +0800882 return -EFBIG;
883
Chao Yue2b4e2b2015-08-19 19:11:19 +0800884 return __get_data_block(inode, iblock, bh_result, create,
Chao Yuda859852016-01-26 15:42:58 +0800885 F2FS_GET_BLOCK_BMAP, NULL);
Jaegeuk Kimccfb3002014-06-13 13:02:11 +0900886}
887
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -0700888static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
889{
890 return (offset >> inode->i_blkbits);
891}
892
893static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
894{
895 return (blk << inode->i_blkbits);
896}
897
Jaegeuk Kim9ab70132014-06-08 04:30:14 +0900898int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
899 u64 start, u64 len)
900{
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -0700901 struct buffer_head map_bh;
902 sector_t start_blk, last_blk;
Chao Yuda859852016-01-26 15:42:58 +0800903 pgoff_t next_pgofs;
Fan Lide1475c2016-01-04 15:56:50 +0800904 loff_t isize;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -0700905 u64 logical = 0, phys = 0, size = 0;
906 u32 flags = 0;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -0700907 int ret = 0;
908
909 ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
910 if (ret)
911 return ret;
912
Jaegeuk Kim67f8cf3c2015-10-15 11:34:49 -0700913 if (f2fs_has_inline_data(inode)) {
914 ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
915 if (ret != -EAGAIN)
916 return ret;
917 }
918
Al Viro59551022016-01-22 15:40:57 -0500919 inode_lock(inode);
Fan Lide1475c2016-01-04 15:56:50 +0800920
921 isize = i_size_read(inode);
Fan Li9a950d52015-12-26 18:07:41 +0800922 if (start >= isize)
923 goto out;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -0700924
Fan Li9a950d52015-12-26 18:07:41 +0800925 if (start + len > isize)
926 len = isize - start;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -0700927
928 if (logical_to_blk(inode, len) == 0)
929 len = blk_to_logical(inode, 1);
930
931 start_blk = logical_to_blk(inode, start);
932 last_blk = logical_to_blk(inode, start + len - 1);
Fan Li9a950d52015-12-26 18:07:41 +0800933
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -0700934next:
935 memset(&map_bh, 0, sizeof(struct buffer_head));
936 map_bh.b_size = len;
937
Chao Yue2b4e2b2015-08-19 19:11:19 +0800938 ret = get_data_block(inode, start_blk, &map_bh, 0,
Chao Yuda859852016-01-26 15:42:58 +0800939 F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -0700940 if (ret)
941 goto out;
942
943 /* HOLE */
944 if (!buffer_mapped(&map_bh)) {
Chao Yuda859852016-01-26 15:42:58 +0800945 start_blk = next_pgofs;
Fan Li9a950d52015-12-26 18:07:41 +0800946 /* Go through holes util pass the EOF */
Chao Yuda859852016-01-26 15:42:58 +0800947 if (blk_to_logical(inode, start_blk) < isize)
Fan Li9a950d52015-12-26 18:07:41 +0800948 goto prep_next;
949 /* Found a hole beyond isize means no more extents.
950 * Note that the premise is that filesystems don't
951 * punch holes beyond isize and keep size unchanged.
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -0700952 */
Fan Li9a950d52015-12-26 18:07:41 +0800953 flags |= FIEMAP_EXTENT_LAST;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -0700954 }
Fan Li9a950d52015-12-26 18:07:41 +0800955
Chao Yuda5af122016-01-08 20:19:27 +0800956 if (size) {
957 if (f2fs_encrypted_inode(inode))
958 flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
959
Fan Li9a950d52015-12-26 18:07:41 +0800960 ret = fiemap_fill_next_extent(fieinfo, logical,
961 phys, size, flags);
Chao Yuda5af122016-01-08 20:19:27 +0800962 }
Fan Li9a950d52015-12-26 18:07:41 +0800963
964 if (start_blk > last_blk || ret)
965 goto out;
966
967 logical = blk_to_logical(inode, start_blk);
968 phys = blk_to_logical(inode, map_bh.b_blocknr);
969 size = map_bh.b_size;
970 flags = 0;
971 if (buffer_unwritten(&map_bh))
972 flags = FIEMAP_EXTENT_UNWRITTEN;
973
974 start_blk += logical_to_blk(inode, size);
975
976prep_next:
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -0700977 cond_resched();
978 if (fatal_signal_pending(current))
979 ret = -EINTR;
980 else
981 goto next;
982out:
983 if (ret == 1)
984 ret = 0;
985
Al Viro59551022016-01-22 15:40:57 -0500986 inode_unlock(inode);
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -0700987 return ret;
Jaegeuk Kim9ab70132014-06-08 04:30:14 +0900988}
989
Wei Yongjun6a7a3ae2016-08-23 15:23:59 +0000990static struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
991 unsigned nr_pages)
Chao Yu78682f72016-07-03 22:05:11 +0800992{
993 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
994 struct fscrypt_ctx *ctx = NULL;
995 struct block_device *bdev = sbi->sb->s_bdev;
996 struct bio *bio;
997
Chao Yub10a6ac2018-07-10 23:01:45 +0800998 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
999 return ERR_PTR(-EFAULT);
1000
Chao Yu78682f72016-07-03 22:05:11 +08001001 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1002 ctx = fscrypt_get_ctx(inode, GFP_NOFS);
1003 if (IS_ERR(ctx))
1004 return ERR_CAST(ctx);
1005
1006 /* wait the page to be moved by cleaning */
1007 f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
1008 }
1009
1010 bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
1011 if (!bio) {
1012 if (ctx)
1013 fscrypt_release_ctx(ctx);
1014 return ERR_PTR(-ENOMEM);
1015 }
1016 bio->bi_bdev = bdev;
1017 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blkaddr);
1018 bio->bi_end_io = f2fs_read_end_io;
1019 bio->bi_private = ctx;
1020
1021 return bio;
1022}
1023
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001024/*
1025 * This function was originally taken from fs/mpage.c, and customized for f2fs.
1026 * Major change was from block_size == page_size in f2fs by default.
1027 */
1028static int f2fs_mpage_readpages(struct address_space *mapping,
1029 struct list_head *pages, struct page *page,
1030 unsigned nr_pages)
1031{
1032 struct bio *bio = NULL;
1033 unsigned page_idx;
1034 sector_t last_block_in_bio = 0;
1035 struct inode *inode = mapping->host;
1036 const unsigned blkbits = inode->i_blkbits;
1037 const unsigned blocksize = 1 << blkbits;
1038 sector_t block_in_file;
1039 sector_t last_block;
1040 sector_t last_block_in_file;
1041 sector_t block_nr;
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001042 struct f2fs_map_blocks map;
1043
1044 map.m_pblk = 0;
1045 map.m_lblk = 0;
1046 map.m_len = 0;
1047 map.m_flags = 0;
Chao Yuda859852016-01-26 15:42:58 +08001048 map.m_next_pgofs = NULL;
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001049
1050 for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
1051
1052 prefetchw(&page->flags);
1053 if (pages) {
1054 page = list_entry(pages->prev, struct page, lru);
1055 list_del(&page->lru);
1056 if (add_to_page_cache_lru(page, mapping,
Michal Hocko8a5c7432016-07-26 15:24:53 -07001057 page->index,
1058 readahead_gfp_mask(mapping)))
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001059 goto next_page;
1060 }
1061
1062 block_in_file = (sector_t)page->index;
1063 last_block = block_in_file + nr_pages;
1064 last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
1065 blkbits;
1066 if (last_block > last_block_in_file)
1067 last_block = last_block_in_file;
1068
1069 /*
1070 * Map blocks using the previous result first.
1071 */
1072 if ((map.m_flags & F2FS_MAP_MAPPED) &&
1073 block_in_file > map.m_lblk &&
1074 block_in_file < (map.m_lblk + map.m_len))
1075 goto got_it;
1076
1077 /*
1078 * Then do more f2fs_map_blocks() calls until we are
1079 * done with this page.
1080 */
1081 map.m_flags = 0;
1082
1083 if (block_in_file < last_block) {
1084 map.m_lblk = block_in_file;
1085 map.m_len = last_block - block_in_file;
1086
Chao Yu46c9e142015-09-18 16:54:16 +08001087 if (f2fs_map_blocks(inode, &map, 0,
Chao Yuda859852016-01-26 15:42:58 +08001088 F2FS_GET_BLOCK_READ))
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001089 goto set_error_page;
1090 }
1091got_it:
1092 if ((map.m_flags & F2FS_MAP_MAPPED)) {
1093 block_nr = map.m_pblk + block_in_file - map.m_lblk;
1094 SetPageMappedToDisk(page);
1095
1096 if (!PageUptodate(page) && !cleancache_get_page(page)) {
1097 SetPageUptodate(page);
1098 goto confused;
1099 }
Chao Yuaafb3712018-08-01 19:13:44 +08001100
1101 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
1102 DATA_GENERIC))
1103 goto set_error_page;
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001104 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001105 zero_user_segment(page, 0, PAGE_SIZE);
Jaegeuk Kim237c0792016-06-30 18:49:15 -07001106 if (!PageUptodate(page))
1107 SetPageUptodate(page);
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001108 unlock_page(page);
1109 goto next_page;
1110 }
1111
1112 /*
1113 * This page will go to BIO. Do we need to send this
1114 * BIO off first?
1115 */
1116 if (bio && (last_block_in_bio != block_nr - 1)) {
1117submit_and_realloc:
Linus Torvalds4fc29c12016-07-27 10:36:31 -07001118 __submit_bio(F2FS_I_SB(inode), bio, DATA);
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001119 bio = NULL;
1120 }
1121 if (bio == NULL) {
Chao Yu78682f72016-07-03 22:05:11 +08001122 bio = f2fs_grab_bio(inode, block_nr, nr_pages);
Jaegeuk Kim1d353eb2016-07-12 09:38:48 -07001123 if (IS_ERR(bio)) {
1124 bio = NULL;
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001125 goto set_error_page;
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001126 }
Mike Christie04d328d2016-06-05 14:31:55 -05001127 bio_set_op_attrs(bio, REQ_OP_READ, 0);
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001128 }
1129
1130 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
1131 goto submit_and_realloc;
1132
1133 last_block_in_bio = block_nr;
1134 goto next_page;
1135set_error_page:
1136 SetPageError(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001137 zero_user_segment(page, 0, PAGE_SIZE);
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001138 unlock_page(page);
1139 goto next_page;
1140confused:
1141 if (bio) {
Linus Torvalds4fc29c12016-07-27 10:36:31 -07001142 __submit_bio(F2FS_I_SB(inode), bio, DATA);
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001143 bio = NULL;
1144 }
1145 unlock_page(page);
1146next_page:
1147 if (pages)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001148 put_page(page);
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001149 }
1150 BUG_ON(pages && !list_empty(pages));
1151 if (bio)
Linus Torvalds4fc29c12016-07-27 10:36:31 -07001152 __submit_bio(F2FS_I_SB(inode), bio, DATA);
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001153 return 0;
1154}
1155
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001156static int f2fs_read_data_page(struct file *file, struct page *page)
1157{
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001158 struct inode *inode = page->mapping->host;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001159 int ret = -EAGAIN;
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001160
Chao Yuc20e89c2014-05-06 16:53:08 +08001161 trace_f2fs_readpage(page, DATA);
1162
arter97e1c42042014-08-06 23:22:50 +09001163 /* If the file has inline data, try to read it directly */
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001164 if (f2fs_has_inline_data(inode))
1165 ret = f2fs_read_inline_data(inode, page);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001166 if (ret == -EAGAIN)
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001167 ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001168 return ret;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001169}
1170
1171static int f2fs_read_data_pages(struct file *file,
1172 struct address_space *mapping,
1173 struct list_head *pages, unsigned nr_pages)
1174{
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001175 struct inode *inode = file->f_mapping->host;
Chao Yub8c29402015-10-12 17:02:26 +08001176 struct page *page = list_entry(pages->prev, struct page, lru);
1177
1178 trace_f2fs_readpages(inode, page, nr_pages);
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001179
1180 /* If the file has inline data, skip readpages */
1181 if (f2fs_has_inline_data(inode))
1182 return 0;
1183
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001184 return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001185}
1186
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001187int do_write_data_page(struct f2fs_io_info *fio)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001188{
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001189 struct page *page = fio->page;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001190 struct inode *inode = page->mapping->host;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001191 struct dnode_of_data dn;
1192 int err = 0;
1193
1194 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kim266e97a2013-02-26 13:10:46 +09001195 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001196 if (err)
1197 return err;
1198
Chao Yu28bc1062016-02-06 14:40:34 +08001199 fio->old_blkaddr = dn.data_blkaddr;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001200
1201 /* This page is already truncated */
Chao Yu7a9d7542016-02-22 18:36:38 +08001202 if (fio->old_blkaddr == NULL_ADDR) {
Jaegeuk Kim2bca1e22015-02-25 19:25:01 -08001203 ClearPageUptodate(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001204 goto out_writepage;
Jaegeuk Kim2bca1e22015-02-25 19:25:01 -08001205 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001206
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001207 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
Jaegeuk Kimb32e4482016-04-11 15:51:57 -07001208 gfp_t gfp_flags = GFP_NOFS;
Chao Yu08b39fb2015-10-08 13:27:34 +08001209
1210 /* wait for GCed encrypted page writeback */
1211 f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
Chao Yu7a9d7542016-02-22 18:36:38 +08001212 fio->old_blkaddr);
Jaegeuk Kimb32e4482016-04-11 15:51:57 -07001213retry_encrypt:
1214 fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
1215 gfp_flags);
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001216 if (IS_ERR(fio->encrypted_page)) {
1217 err = PTR_ERR(fio->encrypted_page);
Jaegeuk Kimb32e4482016-04-11 15:51:57 -07001218 if (err == -ENOMEM) {
1219 /* flush pending ios and wait for a while */
1220 f2fs_flush_merged_bios(F2FS_I_SB(inode));
1221 congestion_wait(BLK_RW_ASYNC, HZ/50);
1222 gfp_flags |= __GFP_NOFAIL;
1223 err = 0;
1224 goto retry_encrypt;
1225 }
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001226 goto out_writepage;
1227 }
1228 }
1229
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001230 set_page_writeback(page);
1231
Chao Yuaafb3712018-08-01 19:13:44 +08001232 if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
1233 !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
1234 DATA_GENERIC)) {
1235 err = -EFAULT;
1236 goto out_writepage;
1237 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001238 /*
1239 * If current allocation needs SSR,
1240 * it had better in-place writes for updated data.
1241 */
Chao Yud4511882018-06-05 17:44:11 +08001242 if (unlikely(is_valid_data_blkaddr(fio->sbi, fio->old_blkaddr) &&
Haicheng Lib25958b2013-06-13 16:59:29 +08001243 !is_cold_data(page) &&
Chao Yu2da3e0272015-10-28 17:56:14 +08001244 !IS_ATOMIC_WRITTEN_PAGE(page) &&
Haicheng Lib25958b2013-06-13 16:59:29 +08001245 need_inplace_update(inode))) {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001246 rewrite_data_page(fio);
Jaegeuk Kim91942322016-05-20 10:13:22 -07001247 set_inode_flag(inode, FI_UPDATE_WRITE);
Jaegeuk Kim8ce67cb2015-03-17 17:58:08 -07001248 trace_f2fs_do_write_data_page(page, IPU);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001249 } else {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001250 write_data_page(&dn, fio);
Jaegeuk Kim8ce67cb2015-03-17 17:58:08 -07001251 trace_f2fs_do_write_data_page(page, OPU);
Jaegeuk Kim91942322016-05-20 10:13:22 -07001252 set_inode_flag(inode, FI_APPEND_WRITE);
Jaegeuk Kim3c6c2be2015-03-17 17:16:35 -07001253 if (page->index == 0)
Jaegeuk Kim91942322016-05-20 10:13:22 -07001254 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001255 }
1256out_writepage:
1257 f2fs_put_dnode(&dn);
1258 return err;
1259}
1260
1261static int f2fs_write_data_page(struct page *page,
1262 struct writeback_control *wbc)
1263{
1264 struct inode *inode = page->mapping->host;
Jaegeuk Kim40813632014-09-02 15:31:18 -07001265 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001266 loff_t i_size = i_size_read(inode);
1267 const pgoff_t end_index = ((unsigned long long) i_size)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001268 >> PAGE_SHIFT;
Jaegeuk Kim26de9b12016-05-20 20:42:37 -07001269 loff_t psize = (page->index + 1) << PAGE_SHIFT;
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001270 unsigned offset = 0;
Jaegeuk Kim39936832012-11-22 16:21:29 +09001271 bool need_balance_fs = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001272 int err = 0;
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001273 struct f2fs_io_info fio = {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001274 .sbi = sbi,
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001275 .type = DATA,
Mike Christie04d328d2016-06-05 14:31:55 -05001276 .op = REQ_OP_WRITE,
1277 .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0,
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001278 .page = page,
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001279 .encrypted_page = NULL,
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001280 };
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001281
Chao Yuecda0de2014-05-06 16:48:26 +08001282 trace_f2fs_writepage(page, DATA);
1283
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001284 if (page->index < end_index)
Jaegeuk Kim39936832012-11-22 16:21:29 +09001285 goto write;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001286
1287 /*
1288 * If the offset is out-of-range of file size,
1289 * this page does not have to be written to disk.
1290 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001291 offset = i_size & (PAGE_SIZE - 1);
Jaegeuk Kim76f60262014-04-15 16:04:15 +09001292 if ((page->index >= end_index + 1) || !offset)
Jaegeuk Kim39936832012-11-22 16:21:29 +09001293 goto out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001294
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001295 zero_user_segment(page, offset, PAGE_SIZE);
Jaegeuk Kim39936832012-11-22 16:21:29 +09001296write:
Chao Yucaf00472015-01-28 17:48:42 +08001297 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001298 goto redirty_out;
Jaegeuk Kim1e843712014-12-09 06:08:59 -08001299 if (f2fs_is_drop_cache(inode))
1300 goto out;
Jaegeuk Kime6e5f562016-04-14 16:48:52 -07001301 /* we should not write 0'th page having journal header */
1302 if (f2fs_is_volatile_file(inode) && (!page->index ||
1303 (!wbc->for_reclaim &&
1304 available_free_memory(sbi, BASE_CHECK))))
Jaegeuk Kim1e843712014-12-09 06:08:59 -08001305 goto redirty_out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001306
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001307 /* we should bypass data pages to proceed the kworkder jobs */
1308 if (unlikely(f2fs_cp_error(sbi))) {
Jaegeuk Kim7f319972016-06-03 12:28:26 -07001309 mapping_set_error(page->mapping, -EIO);
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07001310 goto out;
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001311 }
1312
Jaegeuk Kimb230e6c2016-05-29 21:18:23 -07001313 /* Dentry blocks are controlled by checkpoint */
1314 if (S_ISDIR(inode->i_mode)) {
1315 err = do_write_data_page(&fio);
1316 goto done;
1317 }
1318
Jaegeuk Kim8618b882014-02-17 19:29:27 +09001319 if (!wbc->for_reclaim)
1320 need_balance_fs = true;
Jaegeuk Kim7f3037a2016-09-01 12:02:51 -07001321 else if (has_not_enough_free_secs(sbi, 0, 0))
Jaegeuk Kim39936832012-11-22 16:21:29 +09001322 goto redirty_out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001323
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001324 err = -EAGAIN;
Jaegeuk Kim8618b882014-02-17 19:29:27 +09001325 f2fs_lock_op(sbi);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001326 if (f2fs_has_inline_data(inode))
1327 err = f2fs_write_inline_data(inode, page);
1328 if (err == -EAGAIN)
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001329 err = do_write_data_page(&fio);
Jaegeuk Kim26de9b12016-05-20 20:42:37 -07001330 if (F2FS_I(inode)->last_disk_size < psize)
1331 F2FS_I(inode)->last_disk_size = psize;
Jaegeuk Kim8618b882014-02-17 19:29:27 +09001332 f2fs_unlock_op(sbi);
1333done:
1334 if (err && err != -ENOENT)
1335 goto redirty_out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001336
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001337 clear_cold_data(page);
Jaegeuk Kim39936832012-11-22 16:21:29 +09001338out:
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07001339 inode_dec_dirty_pages(inode);
Jaegeuk Kim2bca1e22015-02-25 19:25:01 -08001340 if (err)
1341 ClearPageUptodate(page);
Chao Yu0c3a5792016-01-18 18:28:11 +08001342
1343 if (wbc->for_reclaim) {
1344 f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, DATA, WRITE);
Chao Yuc227f912015-12-16 13:09:20 +08001345 remove_dirty_inode(inode);
Chao Yueb7e8132015-11-10 18:45:07 +08001346 }
Chao Yu0c3a5792016-01-18 18:28:11 +08001347
1348 unlock_page(page);
1349 f2fs_balance_fs(sbi, need_balance_fs);
1350
1351 if (unlikely(f2fs_cp_error(sbi)))
1352 f2fs_submit_merged_bio(sbi, DATA, WRITE);
1353
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001354 return 0;
1355
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001356redirty_out:
Jaegeuk Kim76f60262014-04-15 16:04:15 +09001357 redirty_page_for_writepage(wbc, page);
Jaegeuk Kimb230e6c2016-05-29 21:18:23 -07001358 unlock_page(page);
1359 return err;
Namjae Jeonfa9150a2013-01-15 16:45:24 +09001360}
1361
Chao Yu8f46dca2015-07-14 18:56:10 +08001362/*
1363 * This function was copied from write_cche_pages from mm/page-writeback.c.
1364 * The major change is making write step of cold data page separately from
1365 * warm/hot data page.
1366 */
1367static int f2fs_write_cache_pages(struct address_space *mapping,
Jaegeuk Kimb230e6c2016-05-29 21:18:23 -07001368 struct writeback_control *wbc)
Chao Yu8f46dca2015-07-14 18:56:10 +08001369{
1370 int ret = 0;
1371 int done = 0;
1372 struct pagevec pvec;
1373 int nr_pages;
1374 pgoff_t uninitialized_var(writeback_index);
1375 pgoff_t index;
1376 pgoff_t end; /* Inclusive */
1377 pgoff_t done_index;
1378 int cycled;
1379 int range_whole = 0;
1380 int tag;
Chao Yu6ca56ca2016-09-29 18:50:11 +08001381 int nwritten = 0;
Chao Yu8f46dca2015-07-14 18:56:10 +08001382
1383 pagevec_init(&pvec, 0);
Jaegeuk Kim46ae9572016-05-25 20:57:16 -07001384
Chao Yu8f46dca2015-07-14 18:56:10 +08001385 if (wbc->range_cyclic) {
1386 writeback_index = mapping->writeback_index; /* prev offset */
1387 index = writeback_index;
1388 if (index == 0)
1389 cycled = 1;
1390 else
1391 cycled = 0;
1392 end = -1;
1393 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001394 index = wbc->range_start >> PAGE_SHIFT;
1395 end = wbc->range_end >> PAGE_SHIFT;
Chao Yu8f46dca2015-07-14 18:56:10 +08001396 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1397 range_whole = 1;
1398 cycled = 1; /* ignore range_cyclic tests */
1399 }
1400 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1401 tag = PAGECACHE_TAG_TOWRITE;
1402 else
1403 tag = PAGECACHE_TAG_DIRTY;
1404retry:
1405 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1406 tag_pages_for_writeback(mapping, index, end);
1407 done_index = index;
1408 while (!done && (index <= end)) {
1409 int i;
1410
1411 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
1412 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
1413 if (nr_pages == 0)
1414 break;
1415
1416 for (i = 0; i < nr_pages; i++) {
1417 struct page *page = pvec.pages[i];
1418
1419 if (page->index > end) {
1420 done = 1;
1421 break;
1422 }
1423
1424 done_index = page->index;
1425
1426 lock_page(page);
1427
1428 if (unlikely(page->mapping != mapping)) {
1429continue_unlock:
1430 unlock_page(page);
1431 continue;
1432 }
1433
1434 if (!PageDirty(page)) {
1435 /* someone wrote it for us */
1436 goto continue_unlock;
1437 }
1438
Chao Yu8f46dca2015-07-14 18:56:10 +08001439 if (PageWriteback(page)) {
1440 if (wbc->sync_mode != WB_SYNC_NONE)
Jaegeuk Kimfec1d652016-01-20 23:43:51 +08001441 f2fs_wait_on_page_writeback(page,
1442 DATA, true);
Chao Yu8f46dca2015-07-14 18:56:10 +08001443 else
1444 goto continue_unlock;
1445 }
1446
1447 BUG_ON(PageWriteback(page));
1448 if (!clear_page_dirty_for_io(page))
1449 goto continue_unlock;
1450
Jaegeuk Kimb230e6c2016-05-29 21:18:23 -07001451 ret = mapping->a_ops->writepage(page, wbc);
Chao Yu8f46dca2015-07-14 18:56:10 +08001452 if (unlikely(ret)) {
Jaegeuk Kimb230e6c2016-05-29 21:18:23 -07001453 done_index = page->index + 1;
1454 done = 1;
1455 break;
Chao Yu6ca56ca2016-09-29 18:50:11 +08001456 } else {
1457 nwritten++;
Chao Yu8f46dca2015-07-14 18:56:10 +08001458 }
1459
1460 if (--wbc->nr_to_write <= 0 &&
1461 wbc->sync_mode == WB_SYNC_NONE) {
1462 done = 1;
1463 break;
1464 }
1465 }
1466 pagevec_release(&pvec);
1467 cond_resched();
1468 }
1469
Chao Yu8f46dca2015-07-14 18:56:10 +08001470 if (!cycled && !done) {
1471 cycled = 1;
1472 index = 0;
1473 end = writeback_index - 1;
1474 goto retry;
1475 }
1476 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1477 mapping->writeback_index = done_index;
1478
Chao Yu6ca56ca2016-09-29 18:50:11 +08001479 if (nwritten)
1480 f2fs_submit_merged_bio_cond(F2FS_M_SB(mapping), mapping->host,
1481 NULL, 0, DATA, WRITE);
1482
Chao Yu8f46dca2015-07-14 18:56:10 +08001483 return ret;
1484}
1485
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001486static int f2fs_write_data_pages(struct address_space *mapping,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001487 struct writeback_control *wbc)
1488{
1489 struct inode *inode = mapping->host;
Jaegeuk Kim40813632014-09-02 15:31:18 -07001490 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim9dfa1ba2016-07-13 19:33:19 -07001491 struct blk_plug plug;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001492 int ret;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001493
P J Pcfb185a2013-04-03 11:38:00 +09001494 /* deal with chardevs and other special file */
1495 if (!mapping->a_ops->writepage)
1496 return 0;
1497
Chao Yu6a290542015-07-17 18:02:39 +08001498 /* skip writing if there is no dirty page in this inode */
1499 if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
1500 return 0;
1501
Jaegeuk Kima1257022015-10-08 10:40:07 -07001502 if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
1503 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
1504 available_free_memory(sbi, DIRTY_DENTS))
1505 goto skip_write;
1506
Chao Yud323d002015-10-27 09:53:45 +08001507 /* skip writing during file defragment */
Jaegeuk Kim91942322016-05-20 10:13:22 -07001508 if (is_inode_flag_set(inode, FI_DO_DEFRAG))
Chao Yud323d002015-10-27 09:53:45 +08001509 goto skip_write;
1510
Jaegeuk Kimd5669f72015-02-27 13:37:39 -08001511 /* during POR, we don't need to trigger writepage at all. */
1512 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1513 goto skip_write;
1514
Yunlei Hed31c7c32016-02-04 16:14:00 +08001515 trace_f2fs_writepages(mapping->host, wbc, DATA);
1516
Jaegeuk Kim9dfa1ba2016-07-13 19:33:19 -07001517 blk_start_plug(&plug);
Jaegeuk Kimb230e6c2016-05-29 21:18:23 -07001518 ret = f2fs_write_cache_pages(mapping, wbc);
Jaegeuk Kim9dfa1ba2016-07-13 19:33:19 -07001519 blk_finish_plug(&plug);
Jaegeuk Kim28ea6162016-05-25 17:17:56 -07001520 /*
1521 * if some pages were truncated, we cannot guarantee its mapping->host
1522 * to detect pending bios.
1523 */
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001524
Chao Yuc227f912015-12-16 13:09:20 +08001525 remove_dirty_inode(inode);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001526 return ret;
Jaegeuk Kimd3baf952014-03-18 13:43:05 +09001527
1528skip_write:
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07001529 wbc->pages_skipped += get_dirty_pages(inode);
Yunlei Hed31c7c32016-02-04 16:14:00 +08001530 trace_f2fs_writepages(mapping->host, wbc, DATA);
Jaegeuk Kimd3baf952014-03-18 13:43:05 +09001531 return 0;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001532}
1533
Chao Yu3aab8f82014-07-02 13:25:04 +08001534static void f2fs_write_failed(struct address_space *mapping, loff_t to)
1535{
1536 struct inode *inode = mapping->host;
Jaegeuk Kim819d9152015-12-28 13:48:11 -08001537 loff_t i_size = i_size_read(inode);
Chao Yu3aab8f82014-07-02 13:25:04 +08001538
Jaegeuk Kim819d9152015-12-28 13:48:11 -08001539 if (to > i_size) {
1540 truncate_pagecache(inode, i_size);
1541 truncate_blocks(inode, i_size, true);
Chao Yu3aab8f82014-07-02 13:25:04 +08001542 }
1543}
1544
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08001545static int prepare_write_begin(struct f2fs_sb_info *sbi,
1546 struct page *page, loff_t pos, unsigned len,
1547 block_t *blk_addr, bool *node_changed)
1548{
1549 struct inode *inode = page->mapping->host;
1550 pgoff_t index = page->index;
1551 struct dnode_of_data dn;
1552 struct page *ipage;
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08001553 bool locked = false;
1554 struct extent_info ei;
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08001555 int err = 0;
1556
Jaegeuk Kim24b84912016-02-03 13:49:44 -08001557 /*
1558 * we already allocated all the blocks, so we don't need to get
1559 * the block addresses when there is no need to fill the page.
1560 */
Yunlei He5d4c0af2016-09-18 08:16:56 +08001561 if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE)
Jaegeuk Kim24b84912016-02-03 13:49:44 -08001562 return 0;
1563
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08001564 if (f2fs_has_inline_data(inode) ||
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001565 (pos & PAGE_MASK) >= i_size_read(inode)) {
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08001566 f2fs_lock_op(sbi);
1567 locked = true;
1568 }
1569restart:
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08001570 /* check inline_data */
1571 ipage = get_node_page(sbi, inode->i_ino);
1572 if (IS_ERR(ipage)) {
1573 err = PTR_ERR(ipage);
1574 goto unlock_out;
1575 }
1576
1577 set_new_dnode(&dn, inode, ipage, ipage, 0);
1578
1579 if (f2fs_has_inline_data(inode)) {
1580 if (pos + len <= MAX_INLINE_DATA) {
1581 read_inline_data(page, ipage);
Jaegeuk Kim91942322016-05-20 10:13:22 -07001582 set_inode_flag(inode, FI_DATA_EXIST);
Chao Yuab470362016-05-11 19:48:44 +08001583 if (inode->i_nlink)
1584 set_inline_node(ipage);
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08001585 } else {
1586 err = f2fs_convert_inline_page(&dn, page);
1587 if (err)
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08001588 goto out;
1589 if (dn.data_blkaddr == NULL_ADDR)
1590 err = f2fs_get_block(&dn, index);
1591 }
1592 } else if (locked) {
1593 err = f2fs_get_block(&dn, index);
1594 } else {
1595 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1596 dn.data_blkaddr = ei.blk + index - ei.fofs;
1597 } else {
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08001598 /* hole case */
1599 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
Jaegeuk Kim4da7bf52016-04-06 11:27:03 -07001600 if (err || dn.data_blkaddr == NULL_ADDR) {
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08001601 f2fs_put_dnode(&dn);
1602 f2fs_lock_op(sbi);
1603 locked = true;
1604 goto restart;
1605 }
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08001606 }
1607 }
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08001608
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08001609 /* convert_inline_page can make node_changed */
1610 *blk_addr = dn.data_blkaddr;
1611 *node_changed = dn.node_changed;
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08001612out:
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08001613 f2fs_put_dnode(&dn);
1614unlock_out:
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08001615 if (locked)
1616 f2fs_unlock_op(sbi);
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08001617 return err;
1618}
1619
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001620static int f2fs_write_begin(struct file *file, struct address_space *mapping,
1621 loff_t pos, unsigned len, unsigned flags,
1622 struct page **pagep, void **fsdata)
1623{
1624 struct inode *inode = mapping->host;
Jaegeuk Kim40813632014-09-02 15:31:18 -07001625 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim86531d62015-07-15 13:08:21 -07001626 struct page *page = NULL;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001627 pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08001628 bool need_balance = false;
1629 block_t blkaddr = NULL_ADDR;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001630 int err = 0;
1631
Chao Yu62aed042014-05-06 16:46:04 +08001632 trace_f2fs_write_begin(inode, pos, len, flags);
1633
Jaegeuk Kim5f727392014-11-25 10:59:45 -08001634 /*
1635 * We should check this at this moment to avoid deadlock on inode page
1636 * and #0 page. The locking rule for inline_data conversion should be:
1637 * lock_page(page #0) -> lock_page(inode_page)
1638 */
1639 if (index != 0) {
1640 err = f2fs_convert_inline_inode(inode);
1641 if (err)
1642 goto fail;
1643 }
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +09001644repeat:
Jaegeuk Kime9afe7c2017-02-17 09:55:55 -08001645 /*
1646 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
1647 * wait_for_stable_page. Will wait that below with our IO control.
1648 */
1649 page = pagecache_get_page(mapping, index,
1650 FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
Chao Yu3aab8f82014-07-02 13:25:04 +08001651 if (!page) {
1652 err = -ENOMEM;
1653 goto fail;
1654 }
Jaegeuk Kimd5f66992014-04-30 09:22:45 +09001655
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001656 *pagep = page;
1657
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08001658 err = prepare_write_begin(sbi, page, pos, len,
1659 &blkaddr, &need_balance);
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07001660 if (err)
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08001661 goto fail;
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07001662
Jaegeuk Kim7f3037a2016-09-01 12:02:51 -07001663 if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) {
Jaegeuk Kim2a340762015-12-22 13:23:35 -08001664 unlock_page(page);
Jaegeuk Kim2c4db1a2016-01-07 14:15:04 -08001665 f2fs_balance_fs(sbi, true);
Jaegeuk Kim2a340762015-12-22 13:23:35 -08001666 lock_page(page);
1667 if (page->mapping != mapping) {
1668 /* The page got truncated from under us */
1669 f2fs_put_page(page, 1);
1670 goto repeat;
1671 }
1672 }
1673
Jaegeuk Kimfec1d652016-01-20 23:43:51 +08001674 f2fs_wait_on_page_writeback(page, DATA, false);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001675
Chao Yu08b39fb2015-10-08 13:27:34 +08001676 /* wait for GCed encrypted page writeback */
1677 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08001678 f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
Chao Yu08b39fb2015-10-08 13:27:34 +08001679
Jaegeuk Kim649d7df2016-09-06 11:02:03 -07001680 if (len == PAGE_SIZE || PageUptodate(page))
1681 return 0;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001682
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08001683 if (blkaddr == NEW_ADDR) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001684 zero_user_segment(page, 0, PAGE_SIZE);
Jaegeuk Kim649d7df2016-09-06 11:02:03 -07001685 SetPageUptodate(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001686 } else {
Chao Yu78682f72016-07-03 22:05:11 +08001687 struct bio *bio;
Chao Yud54c7952014-03-29 15:30:40 +08001688
Chao Yu78682f72016-07-03 22:05:11 +08001689 bio = f2fs_grab_bio(inode, blkaddr, 1);
1690 if (IS_ERR(bio)) {
1691 err = PTR_ERR(bio);
Chao Yu3aab8f82014-07-02 13:25:04 +08001692 goto fail;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001693 }
Linus Torvalds4fc29c12016-07-27 10:36:31 -07001694 bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC);
Chao Yu78682f72016-07-03 22:05:11 +08001695 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
1696 bio_put(bio);
1697 err = -EFAULT;
1698 goto fail;
1699 }
1700
Linus Torvalds4fc29c12016-07-27 10:36:31 -07001701 __submit_bio(sbi, bio, DATA);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001702
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +09001703 lock_page(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001704 if (unlikely(page->mapping != mapping)) {
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +09001705 f2fs_put_page(page, 1);
1706 goto repeat;
1707 }
Chao Yu1563ac72016-07-03 22:05:12 +08001708 if (unlikely(!PageUptodate(page))) {
1709 err = -EIO;
1710 goto fail;
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001711 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001712 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001713 return 0;
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07001714
Chao Yu3aab8f82014-07-02 13:25:04 +08001715fail:
Jaegeuk Kim86531d62015-07-15 13:08:21 -07001716 f2fs_put_page(page, 1);
Chao Yu3aab8f82014-07-02 13:25:04 +08001717 f2fs_write_failed(mapping, pos + len);
1718 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001719}
1720
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09001721static int f2fs_write_end(struct file *file,
1722 struct address_space *mapping,
1723 loff_t pos, unsigned len, unsigned copied,
1724 struct page *page, void *fsdata)
1725{
1726 struct inode *inode = page->mapping->host;
1727
Chao Yudfb2bf32014-05-06 16:47:23 +08001728 trace_f2fs_write_end(inode, pos, len, copied);
1729
Jaegeuk Kim649d7df2016-09-06 11:02:03 -07001730 /*
1731 * This should be come from len == PAGE_SIZE, and we expect copied
1732 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
1733 * let generic_perform_write() try to copy data again through copied=0.
1734 */
1735 if (!PageUptodate(page)) {
1736 if (unlikely(copied != PAGE_SIZE))
1737 copied = 0;
1738 else
1739 SetPageUptodate(page);
1740 }
1741 if (!copied)
1742 goto unlock_out;
1743
Jaegeuk Kim34ba94b2014-10-09 13:19:53 -07001744 set_page_dirty(page);
Jaegeuk Kim649d7df2016-09-06 11:02:03 -07001745 clear_cold_data(page);
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09001746
Jaegeuk Kimfc9581c2016-05-20 09:22:03 -07001747 if (pos + copied > i_size_read(inode))
1748 f2fs_i_size_write(inode, pos + copied);
Jaegeuk Kim649d7df2016-09-06 11:02:03 -07001749unlock_out:
Chao Yu3024c9a2016-08-06 21:09:41 +08001750 f2fs_put_page(page, 1);
Jaegeuk Kimd0239e12016-01-08 16:57:48 -08001751 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09001752 return copied;
1753}
1754
Omar Sandoval6f673762015-03-16 04:33:52 -07001755static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
1756 loff_t offset)
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09001757{
1758 unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09001759
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09001760 if (offset & blocksize_mask)
1761 return -EINVAL;
1762
Al Viro5b46f252014-03-16 18:07:34 -04001763 if (iov_iter_alignment(iter) & blocksize_mask)
1764 return -EINVAL;
1765
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09001766 return 0;
1767}
1768
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07001769static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001770{
Jaegeuk Kimb439b102016-02-03 13:09:09 -08001771 struct address_space *mapping = iocb->ki_filp->f_mapping;
Chao Yu3aab8f82014-07-02 13:25:04 +08001772 struct inode *inode = mapping->host;
1773 size_t count = iov_iter_count(iter);
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07001774 loff_t offset = iocb->ki_pos;
Chao Yu82e0a5a2016-07-13 09:18:29 +08001775 int rw = iov_iter_rw(iter);
Chao Yu3aab8f82014-07-02 13:25:04 +08001776 int err;
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09001777
Jaegeuk Kimb439b102016-02-03 13:09:09 -08001778 err = check_direct_IO(inode, iter, offset);
Jaegeuk Kimb9d777b2015-12-22 11:09:35 -08001779 if (err)
1780 return err;
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001781
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -07001782 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
1783 return 0;
Jaegeuk Kim36abef42016-06-03 19:29:38 -07001784 if (test_opt(F2FS_I_SB(inode), LFS))
1785 return 0;
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -07001786
Jaegeuk Kim5302fb02016-07-22 15:25:47 -07001787 trace_f2fs_direct_IO_enter(inode, offset, count, rw);
Chao Yu70407fa2014-07-31 21:11:22 +08001788
Chao Yu82e0a5a2016-07-13 09:18:29 +08001789 down_read(&F2FS_I(inode)->dio_rwsem[rw]);
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07001790 err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
Chao Yu82e0a5a2016-07-13 09:18:29 +08001791 up_read(&F2FS_I(inode)->dio_rwsem[rw]);
1792
1793 if (rw == WRITE) {
Jaegeuk Kim6bfc4912016-04-18 17:07:44 -04001794 if (err > 0)
Jaegeuk Kim91942322016-05-20 10:13:22 -07001795 set_inode_flag(inode, FI_UPDATE_WRITE);
Jaegeuk Kim6bfc4912016-04-18 17:07:44 -04001796 else if (err < 0)
1797 f2fs_write_failed(mapping, offset + count);
1798 }
Chao Yu70407fa2014-07-31 21:11:22 +08001799
Jaegeuk Kim5302fb02016-07-22 15:25:47 -07001800 trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
Chao Yu70407fa2014-07-31 21:11:22 +08001801
Chao Yu3aab8f82014-07-02 13:25:04 +08001802 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001803}
1804
Chao Yu487261f2015-02-05 17:44:29 +08001805void f2fs_invalidate_page(struct page *page, unsigned int offset,
1806 unsigned int length)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001807{
1808 struct inode *inode = page->mapping->host;
Chao Yu487261f2015-02-05 17:44:29 +08001809 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07001810
Chao Yu487261f2015-02-05 17:44:29 +08001811 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001812 (offset % PAGE_SIZE || length != PAGE_SIZE))
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07001813 return;
1814
Chao Yu487261f2015-02-05 17:44:29 +08001815 if (PageDirty(page)) {
1816 if (inode->i_ino == F2FS_META_INO(sbi))
1817 dec_page_count(sbi, F2FS_DIRTY_META);
1818 else if (inode->i_ino == F2FS_NODE_INO(sbi))
1819 dec_page_count(sbi, F2FS_DIRTY_NODES);
1820 else
1821 inode_dec_dirty_pages(inode);
1822 }
Chao Yudecd36b2015-08-07 18:42:09 +08001823
1824 /* This is atomic written page, keep Private */
1825 if (IS_ATOMIC_WRITTEN_PAGE(page))
1826 return;
1827
Chao Yu23dc9742016-04-29 20:09:15 +08001828 set_page_private(page, 0);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001829 ClearPagePrivate(page);
1830}
1831
Chao Yu487261f2015-02-05 17:44:29 +08001832int f2fs_release_page(struct page *page, gfp_t wait)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001833{
Jaegeuk Kimf68daee2015-01-30 11:39:08 -08001834 /* If this is dirty page, keep PagePrivate */
1835 if (PageDirty(page))
1836 return 0;
1837
Chao Yudecd36b2015-08-07 18:42:09 +08001838 /* This is atomic written page, keep Private */
1839 if (IS_ATOMIC_WRITTEN_PAGE(page))
1840 return 0;
1841
Chao Yu23dc9742016-04-29 20:09:15 +08001842 set_page_private(page, 0);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001843 ClearPagePrivate(page);
Jaegeuk Kimc3850aa2013-03-14 09:24:32 +09001844 return 1;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001845}
1846
Jaegeuk Kimfe76b792016-06-30 18:40:10 -07001847/*
1848 * This was copied from __set_page_dirty_buffers which gives higher performance
1849 * in very high speed storages. (e.g., pmem)
1850 */
1851void f2fs_set_page_dirty_nobuffers(struct page *page)
1852{
1853 struct address_space *mapping = page->mapping;
1854 unsigned long flags;
1855
1856 if (unlikely(!mapping))
1857 return;
1858
1859 spin_lock(&mapping->private_lock);
1860 lock_page_memcg(page);
1861 SetPageDirty(page);
1862 spin_unlock(&mapping->private_lock);
1863
1864 spin_lock_irqsave(&mapping->tree_lock, flags);
1865 WARN_ON_ONCE(!PageUptodate(page));
1866 account_page_dirtied(page, mapping);
1867 radix_tree_tag_set(&mapping->page_tree,
1868 page_index(page), PAGECACHE_TAG_DIRTY);
1869 spin_unlock_irqrestore(&mapping->tree_lock, flags);
1870 unlock_page_memcg(page);
1871
1872 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1873 return;
1874}
1875
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001876static int f2fs_set_data_page_dirty(struct page *page)
1877{
1878 struct address_space *mapping = page->mapping;
1879 struct inode *inode = mapping->host;
1880
Jaegeuk Kim26c6b882013-10-24 17:53:29 +09001881 trace_f2fs_set_page_dirty(page, DATA);
1882
Jaegeuk Kim237c0792016-06-30 18:49:15 -07001883 if (!PageUptodate(page))
1884 SetPageUptodate(page);
Jaegeuk Kim34ba94b2014-10-09 13:19:53 -07001885
Jaegeuk Kim1e843712014-12-09 06:08:59 -08001886 if (f2fs_is_atomic_file(inode)) {
Chao Yudecd36b2015-08-07 18:42:09 +08001887 if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
1888 register_inmem_page(inode, page);
1889 return 1;
1890 }
1891 /*
1892 * Previously, this page has been registered, we just
1893 * return here.
1894 */
1895 return 0;
Jaegeuk Kim34ba94b2014-10-09 13:19:53 -07001896 }
1897
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001898 if (!PageDirty(page)) {
Jaegeuk Kimfe76b792016-06-30 18:40:10 -07001899 f2fs_set_page_dirty_nobuffers(page);
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07001900 update_dirty_page(inode, page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001901 return 1;
1902 }
1903 return 0;
1904}
1905
Jaegeuk Kimc01e54b2013-01-17 20:30:23 +09001906static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
1907{
Chao Yu454ae7e2014-04-22 13:34:01 +08001908 struct inode *inode = mapping->host;
1909
Jaegeuk Kim1d373a02015-10-19 10:29:51 -07001910 if (f2fs_has_inline_data(inode))
1911 return 0;
1912
1913 /* make sure allocating whole blocks */
1914 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
1915 filemap_write_and_wait(mapping);
1916
Chao Yue2b4e2b2015-08-19 19:11:19 +08001917 return generic_block_bmap(mapping, block, get_data_block_bmap);
Chao Yu429511c2015-02-05 17:54:31 +08001918}
1919
Weichao Guo5b7a4872016-09-20 05:03:27 +08001920#ifdef CONFIG_MIGRATION
1921#include <linux/migrate.h>
1922
1923int f2fs_migrate_page(struct address_space *mapping,
1924 struct page *newpage, struct page *page, enum migrate_mode mode)
1925{
1926 int rc, extra_count;
1927 struct f2fs_inode_info *fi = F2FS_I(mapping->host);
1928 bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
1929
1930 BUG_ON(PageWriteback(page));
1931
1932 /* migrating an atomic written page is safe with the inmem_lock hold */
1933 if (atomic_written && !mutex_trylock(&fi->inmem_lock))
1934 return -EAGAIN;
1935
1936 /*
1937 * A reference is expected if PagePrivate set when move mapping,
1938 * however F2FS breaks this for maintaining dirty page counts when
1939 * truncating pages. So here adjusting the 'extra_count' make it work.
1940 */
1941 extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
1942 rc = migrate_page_move_mapping(mapping, newpage,
1943 page, NULL, mode, extra_count);
1944 if (rc != MIGRATEPAGE_SUCCESS) {
1945 if (atomic_written)
1946 mutex_unlock(&fi->inmem_lock);
1947 return rc;
1948 }
1949
1950 if (atomic_written) {
1951 struct inmem_pages *cur;
1952 list_for_each_entry(cur, &fi->inmem_pages, list)
1953 if (cur->page == page) {
1954 cur->page = newpage;
1955 break;
1956 }
1957 mutex_unlock(&fi->inmem_lock);
1958 put_page(page);
1959 get_page(newpage);
1960 }
1961
1962 if (PagePrivate(page))
1963 SetPagePrivate(newpage);
1964 set_page_private(newpage, page_private(page));
1965
1966 migrate_page_copy(newpage, page);
1967
1968 return MIGRATEPAGE_SUCCESS;
1969}
1970#endif
1971
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001972const struct address_space_operations f2fs_dblock_aops = {
1973 .readpage = f2fs_read_data_page,
1974 .readpages = f2fs_read_data_pages,
1975 .writepage = f2fs_write_data_page,
1976 .writepages = f2fs_write_data_pages,
1977 .write_begin = f2fs_write_begin,
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09001978 .write_end = f2fs_write_end,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001979 .set_page_dirty = f2fs_set_data_page_dirty,
Chao Yu487261f2015-02-05 17:44:29 +08001980 .invalidatepage = f2fs_invalidate_page,
1981 .releasepage = f2fs_release_page,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001982 .direct_IO = f2fs_direct_IO,
Jaegeuk Kimc01e54b2013-01-17 20:30:23 +09001983 .bmap = f2fs_bmap,
Weichao Guo5b7a4872016-09-20 05:03:27 +08001984#ifdef CONFIG_MIGRATION
1985 .migratepage = f2fs_migrate_page,
1986#endif
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001987};