blob: b61e2d40cdfb3d7e5cc2b04d6f5caa8d720ac0d6 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002 * fs/f2fs/data.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/buffer_head.h>
14#include <linux/mpage.h>
15#include <linux/writeback.h>
16#include <linux/backing-dev.h>
Chao Yu8f46dca2015-07-14 18:56:10 +080017#include <linux/pagevec.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090018#include <linux/blkdev.h>
19#include <linux/bio.h>
Geert Uytterhoeven690e4a32012-12-19 22:19:30 +010020#include <linux/prefetch.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080021#include <linux/uio.h>
Jaegeuk Kimf1e88662015-04-09 11:20:42 -070022#include <linux/cleancache.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090023
24#include "f2fs.h"
25#include "node.h"
26#include "segment.h"
Jaegeuk Kimdb9f7c12014-12-17 20:04:08 -080027#include "trace.h"
Namjae Jeon848753a2013-04-23 16:38:02 +090028#include <trace/events/f2fs.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090029
Christoph Hellwig4246a0b2015-07-20 15:29:37 +020030static void f2fs_read_end_io(struct bio *bio)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090031{
Linus Torvaldsf5688492014-01-30 11:19:05 -080032 struct bio_vec *bvec;
33 int i;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090034
Chao Yu12377022015-05-25 18:03:38 +080035 if (f2fs_bio_encrypted(bio)) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +020036 if (bio->bi_error) {
Jaegeuk Kim0b81d072015-05-15 16:26:10 -070037 fscrypt_release_ctx(bio->bi_private);
Chao Yu12377022015-05-25 18:03:38 +080038 } else {
Jaegeuk Kim0b81d072015-05-15 16:26:10 -070039 fscrypt_decrypt_bio_pages(bio->bi_private, bio);
Chao Yu12377022015-05-25 18:03:38 +080040 return;
41 }
42 }
43
Linus Torvaldsf5688492014-01-30 11:19:05 -080044 bio_for_each_segment_all(bvec, bio, i) {
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090045 struct page *page = bvec->bv_page;
46
Christoph Hellwig4246a0b2015-07-20 15:29:37 +020047 if (!bio->bi_error) {
Linus Torvaldsf5688492014-01-30 11:19:05 -080048 SetPageUptodate(page);
49 } else {
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090050 ClearPageUptodate(page);
51 SetPageError(page);
52 }
53 unlock_page(page);
Linus Torvaldsf5688492014-01-30 11:19:05 -080054 }
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090055 bio_put(bio);
56}
57
Christoph Hellwig4246a0b2015-07-20 15:29:37 +020058static void f2fs_write_end_io(struct bio *bio)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090059{
Jaegeuk Kim1b1f5592014-02-03 10:50:22 +090060 struct f2fs_sb_info *sbi = bio->bi_private;
Linus Torvaldsf5688492014-01-30 11:19:05 -080061 struct bio_vec *bvec;
62 int i;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090063
Linus Torvaldsf5688492014-01-30 11:19:05 -080064 bio_for_each_segment_all(bvec, bio, i) {
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090065 struct page *page = bvec->bv_page;
66
Jaegeuk Kim0b81d072015-05-15 16:26:10 -070067 fscrypt_pullback_bio_page(&page, true);
Jaegeuk Kim4375a332015-04-23 12:04:33 -070068
Christoph Hellwig4246a0b2015-07-20 15:29:37 +020069 if (unlikely(bio->bi_error)) {
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090070 set_bit(AS_EIO, &page->mapping->flags);
Jaegeuk Kim744602c2014-01-24 09:42:16 +090071 f2fs_stop_checkpoint(sbi);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090072 }
73 end_page_writeback(page);
74 dec_page_count(sbi, F2FS_WRITEBACK);
Linus Torvaldsf5688492014-01-30 11:19:05 -080075 }
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090076
Jaegeuk Kim7c506892016-01-26 11:55:35 -080077 if (!get_pages(sbi, F2FS_WRITEBACK) && wq_has_sleeper(&sbi->cp_wait))
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +090078 wake_up(&sbi->cp_wait);
79
80 bio_put(bio);
81}
82
Gu Zheng940a6d32013-12-20 17:39:59 +080083/*
84 * Low-level block read/write IO operations.
85 */
86static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
87 int npages, bool is_read)
88{
89 struct bio *bio;
90
Jaegeuk Kim740432f2015-08-14 11:43:56 -070091 bio = f2fs_bio_alloc(npages);
Gu Zheng940a6d32013-12-20 17:39:59 +080092
93 bio->bi_bdev = sbi->sb->s_bdev;
Chao Yu55cf9cb2014-09-15 18:01:10 +080094 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
Gu Zheng940a6d32013-12-20 17:39:59 +080095 bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
Chao Yu12377022015-05-25 18:03:38 +080096 bio->bi_private = is_read ? NULL : sbi;
Gu Zheng940a6d32013-12-20 17:39:59 +080097
98 return bio;
99}
100
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900101static void __submit_merged_bio(struct f2fs_bio_info *io)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900102{
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900103 struct f2fs_io_info *fio = &io->fio;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900104
105 if (!io->bio)
106 return;
107
Jaegeuk Kim6a8f8ca2014-10-29 14:37:22 -0700108 if (is_read_io(fio->rw))
Chao Yu2ace38e2014-12-24 16:08:14 +0800109 trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
Jaegeuk Kim6a8f8ca2014-10-29 14:37:22 -0700110 else
Chao Yu2ace38e2014-12-24 16:08:14 +0800111 trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900112
Jaegeuk Kim6a8f8ca2014-10-29 14:37:22 -0700113 submit_bio(fio->rw, io->bio);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900114 io->bio = NULL;
115}
116
Chao Yu0c3a5792016-01-18 18:28:11 +0800117static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
118 struct page *page, nid_t ino)
Chao Yu0fd785e2016-01-18 18:24:59 +0800119{
Chao Yu0fd785e2016-01-18 18:24:59 +0800120 struct bio_vec *bvec;
121 struct page *target;
122 int i;
123
Chao Yu0c3a5792016-01-18 18:28:11 +0800124 if (!io->bio)
Chao Yu0fd785e2016-01-18 18:24:59 +0800125 return false;
Chao Yu0c3a5792016-01-18 18:28:11 +0800126
127 if (!inode && !page && !ino)
128 return true;
Chao Yu0fd785e2016-01-18 18:24:59 +0800129
130 bio_for_each_segment_all(bvec, io->bio, i) {
131
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700132 if (bvec->bv_page->mapping)
Chao Yu0fd785e2016-01-18 18:24:59 +0800133 target = bvec->bv_page;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700134 else
135 target = fscrypt_control_page(bvec->bv_page);
Chao Yu0fd785e2016-01-18 18:24:59 +0800136
Chao Yu0c3a5792016-01-18 18:28:11 +0800137 if (inode && inode == target->mapping->host)
Chao Yu0fd785e2016-01-18 18:24:59 +0800138 return true;
Chao Yu0c3a5792016-01-18 18:28:11 +0800139 if (page && page == target)
140 return true;
141 if (ino && ino == ino_of_node(target))
142 return true;
Chao Yu0fd785e2016-01-18 18:24:59 +0800143 }
144
Chao Yu0fd785e2016-01-18 18:24:59 +0800145 return false;
146}
147
Chao Yu0c3a5792016-01-18 18:28:11 +0800148static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
149 struct page *page, nid_t ino,
150 enum page_type type)
151{
152 enum page_type btype = PAGE_TYPE_OF_BIO(type);
153 struct f2fs_bio_info *io = &sbi->write_io[btype];
154 bool ret;
155
156 down_read(&io->io_rwsem);
157 ret = __has_merged_page(io, inode, page, ino);
158 up_read(&io->io_rwsem);
159 return ret;
160}
161
162static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
163 struct inode *inode, struct page *page,
164 nid_t ino, enum page_type type, int rw)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900165{
166 enum page_type btype = PAGE_TYPE_OF_BIO(type);
167 struct f2fs_bio_info *io;
168
169 io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
170
Chao Yudf0f8dc2014-03-22 14:57:23 +0800171 down_write(&io->io_rwsem);
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900172
Chao Yu0c3a5792016-01-18 18:28:11 +0800173 if (!__has_merged_page(io, inode, page, ino))
174 goto out;
175
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900176 /* change META to META_FLUSH in the checkpoint procedure */
177 if (type >= META_FLUSH) {
178 io->fio.type = META_FLUSH;
Jaegeuk Kim0f7b2ab2014-07-23 09:57:31 -0700179 if (test_opt(sbi, NOBARRIER))
180 io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO;
181 else
182 io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900183 }
184 __submit_merged_bio(io);
Chao Yu0c3a5792016-01-18 18:28:11 +0800185out:
Chao Yudf0f8dc2014-03-22 14:57:23 +0800186 up_write(&io->io_rwsem);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900187}
188
Chao Yu0c3a5792016-01-18 18:28:11 +0800189void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type,
190 int rw)
191{
192 __f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw);
193}
194
195void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi,
196 struct inode *inode, struct page *page,
197 nid_t ino, enum page_type type, int rw)
198{
199 if (has_merged_page(sbi, inode, page, ino, type))
200 __f2fs_submit_merged_bio(sbi, inode, page, ino, type, rw);
201}
202
Chao Yu406657d2016-02-24 17:17:55 +0800203void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi)
204{
205 f2fs_submit_merged_bio(sbi, DATA, WRITE);
206 f2fs_submit_merged_bio(sbi, NODE, WRITE);
207 f2fs_submit_merged_bio(sbi, META, WRITE);
208}
209
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900210/*
211 * Fill the locked page with data located in the block address.
212 * Return unlocked page.
213 */
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700214int f2fs_submit_page_bio(struct f2fs_io_info *fio)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900215{
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900216 struct bio *bio;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700217 struct page *page = fio->encrypted_page ?
218 fio->encrypted_page : fio->page;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900219
Chao Yu2ace38e2014-12-24 16:08:14 +0800220 trace_f2fs_submit_page_bio(page, fio);
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700221 f2fs_trace_ios(fio, 0);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900222
223 /* Allocate a new bio */
Chao Yu7a9d7542016-02-22 18:36:38 +0800224 bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw));
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900225
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300226 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900227 bio_put(bio);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900228 return -EFAULT;
229 }
230
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -0800231 submit_bio(fio->rw, bio);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900232 return 0;
233}
234
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700235void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900236{
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700237 struct f2fs_sb_info *sbi = fio->sbi;
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900238 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900239 struct f2fs_bio_info *io;
Gu Zheng940a6d32013-12-20 17:39:59 +0800240 bool is_read = is_read_io(fio->rw);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700241 struct page *bio_page;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900242
Gu Zheng940a6d32013-12-20 17:39:59 +0800243 io = is_read ? &sbi->read_io : &sbi->write_io[btype];
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900244
Chao Yu7a9d7542016-02-22 18:36:38 +0800245 if (fio->old_blkaddr != NEW_ADDR)
246 verify_block_addr(sbi, fio->old_blkaddr);
247 verify_block_addr(sbi, fio->new_blkaddr);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900248
Chao Yudf0f8dc2014-03-22 14:57:23 +0800249 down_write(&io->io_rwsem);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900250
Gu Zheng940a6d32013-12-20 17:39:59 +0800251 if (!is_read)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900252 inc_page_count(sbi, F2FS_WRITEBACK);
253
Chao Yu7a9d7542016-02-22 18:36:38 +0800254 if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900255 io->fio.rw != fio->rw))
256 __submit_merged_bio(io);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900257alloc_new:
258 if (io->bio == NULL) {
Jaegeuk Kim90a893c2014-09-22 16:21:07 -0700259 int bio_blocks = MAX_BIO_BLOCKS(sbi);
Gu Zheng940a6d32013-12-20 17:39:59 +0800260
Chao Yu7a9d7542016-02-22 18:36:38 +0800261 io->bio = __bio_alloc(sbi, fio->new_blkaddr,
262 bio_blocks, is_read);
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900263 io->fio = *fio;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900264 }
265
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700266 bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
267
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300268 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) <
269 PAGE_SIZE) {
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900270 __submit_merged_bio(io);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900271 goto alloc_new;
272 }
273
Chao Yu7a9d7542016-02-22 18:36:38 +0800274 io->last_block_in_bio = fio->new_blkaddr;
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700275 f2fs_trace_ios(fio, 0);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900276
Chao Yudf0f8dc2014-03-22 14:57:23 +0800277 up_write(&io->io_rwsem);
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700278 trace_f2fs_submit_page_mbio(fio->page, fio);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900279}
280
Chao Yu46008c62016-05-09 19:56:30 +0800281static void __set_data_blkaddr(struct dnode_of_data *dn)
282{
283 struct f2fs_node *rn = F2FS_NODE(dn->node_page);
284 __le32 *addr_array;
285
286 /* Get physical address of data block */
287 addr_array = blkaddr_in_node(rn);
288 addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
289}
290
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900291/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900292 * Lock ordering for the change of data block address:
293 * ->data_page
294 * ->node_page
295 * update block addresses in the node page
296 */
Chao Yu216a6202015-03-19 19:23:32 +0800297void set_data_blkaddr(struct dnode_of_data *dn)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900298{
Chao Yu46008c62016-05-09 19:56:30 +0800299 f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
300 __set_data_blkaddr(dn);
301 if (set_page_dirty(dn->node_page))
Jaegeuk Kim12719ae2016-01-07 13:23:12 -0800302 dn->node_changed = true;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900303}
304
Chao Yuf28b3432016-02-24 17:16:47 +0800305void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
306{
307 dn->data_blkaddr = blkaddr;
308 set_data_blkaddr(dn);
309 f2fs_update_extent_cache(dn);
310}
311
Chao Yu46008c62016-05-09 19:56:30 +0800312/* dn->ofs_in_node will be returned with up-to-date last block pointer */
313int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900314{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700315 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900316
Chao Yu46008c62016-05-09 19:56:30 +0800317 if (!count)
318 return 0;
319
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900320 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900321 return -EPERM;
Chao Yu46008c62016-05-09 19:56:30 +0800322 if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900323 return -ENOSPC;
324
Chao Yu46008c62016-05-09 19:56:30 +0800325 trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
326 dn->ofs_in_node, count);
Namjae Jeonc01e2852013-04-23 17:00:52 +0900327
Chao Yu46008c62016-05-09 19:56:30 +0800328 f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
329
330 for (; count > 0; dn->ofs_in_node++) {
331 block_t blkaddr =
332 datablock_addr(dn->node_page, dn->ofs_in_node);
333 if (blkaddr == NULL_ADDR) {
334 dn->data_blkaddr = NEW_ADDR;
335 __set_data_blkaddr(dn);
336 count--;
337 }
338 }
339
340 if (set_page_dirty(dn->node_page))
341 dn->node_changed = true;
342
Jaegeuk Kima18ff062014-01-21 13:32:12 +0900343 mark_inode_dirty(dn->inode);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900344 sync_inode_page(dn);
345 return 0;
346}
347
Chao Yu46008c62016-05-09 19:56:30 +0800348/* Should keep dn->ofs_in_node unchanged */
349int reserve_new_block(struct dnode_of_data *dn)
350{
351 unsigned int ofs_in_node = dn->ofs_in_node;
352 int ret;
353
354 ret = reserve_new_blocks(dn, 1);
355 dn->ofs_in_node = ofs_in_node;
356 return ret;
357}
358
Huajun Lib6009652013-11-10 23:13:18 +0800359int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
360{
361 bool need_put = dn->inode_page ? false : true;
362 int err;
363
364 err = get_dnode_of_data(dn, index, ALLOC_NODE);
365 if (err)
366 return err;
Jaegeuk Kima8865372013-12-27 17:04:17 +0900367
Huajun Lib6009652013-11-10 23:13:18 +0800368 if (dn->data_blkaddr == NULL_ADDR)
369 err = reserve_new_block(dn);
Jaegeuk Kima8865372013-12-27 17:04:17 +0900370 if (err || need_put)
Huajun Lib6009652013-11-10 23:13:18 +0800371 f2fs_put_dnode(dn);
372 return err;
373}
374
Fan Li759af1c2015-08-05 15:52:16 +0800375int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900376{
Chao Yu028a41e2015-03-19 19:26:02 +0800377 struct extent_info ei;
Fan Li759af1c2015-08-05 15:52:16 +0800378 struct inode *inode = dn->inode;
Chao Yu028a41e2015-03-19 19:26:02 +0800379
Fan Li759af1c2015-08-05 15:52:16 +0800380 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
381 dn->data_blkaddr = ei.blk + index - ei.fofs;
382 return 0;
Chao Yu429511c2015-02-05 17:54:31 +0800383 }
384
Fan Li759af1c2015-08-05 15:52:16 +0800385 return f2fs_reserve_block(dn, index);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900386}
387
Jaegeuk Kima56c7c62015-10-09 15:11:38 -0700388struct page *get_read_data_page(struct inode *inode, pgoff_t index,
389 int rw, bool for_write)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900390{
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900391 struct address_space *mapping = inode->i_mapping;
392 struct dnode_of_data dn;
393 struct page *page;
Chao Yucb3bc9e2015-02-05 18:03:40 +0800394 struct extent_info ei;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900395 int err;
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -0800396 struct f2fs_io_info fio = {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700397 .sbi = F2FS_I_SB(inode),
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -0800398 .type = DATA,
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700399 .rw = rw,
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700400 .encrypted_page = NULL,
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -0800401 };
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900402
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700403 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
404 return read_mapping_page(mapping, index, NULL);
405
Jaegeuk Kima56c7c62015-10-09 15:11:38 -0700406 page = f2fs_grab_cache_page(mapping, index, for_write);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900407 if (!page)
408 return ERR_PTR(-ENOMEM);
409
Chao Yucb3bc9e2015-02-05 18:03:40 +0800410 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
411 dn.data_blkaddr = ei.blk + index - ei.fofs;
412 goto got_it;
413 }
414
Jaegeuk Kim650495d2013-05-13 08:38:35 +0900415 set_new_dnode(&dn, inode, NULL, NULL, 0);
416 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
Jaegeuk Kim86531d62015-07-15 13:08:21 -0700417 if (err)
418 goto put_err;
Jaegeuk Kim650495d2013-05-13 08:38:35 +0900419 f2fs_put_dnode(&dn);
420
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900421 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
Jaegeuk Kim86531d62015-07-15 13:08:21 -0700422 err = -ENOENT;
423 goto put_err;
Jaegeuk Kim650495d2013-05-13 08:38:35 +0900424 }
Chao Yucb3bc9e2015-02-05 18:03:40 +0800425got_it:
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700426 if (PageUptodate(page)) {
427 unlock_page(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900428 return page;
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700429 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900430
Jaegeuk Kimd59ff4d2013-08-20 19:13:07 +0900431 /*
432 * A new dentry page is allocated but not able to be written, since its
433 * new inode page couldn't be allocated due to -ENOSPC.
434 * In such the case, its blkaddr can be remained as NEW_ADDR.
435 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
436 */
437 if (dn.data_blkaddr == NEW_ADDR) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300438 zero_user_segment(page, 0, PAGE_SIZE);
Jaegeuk Kimd59ff4d2013-08-20 19:13:07 +0900439 SetPageUptodate(page);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700440 unlock_page(page);
Jaegeuk Kimd59ff4d2013-08-20 19:13:07 +0900441 return page;
442 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900443
Chao Yu7a9d7542016-02-22 18:36:38 +0800444 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700445 fio.page = page;
446 err = f2fs_submit_page_bio(&fio);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900447 if (err)
Jaegeuk Kim86531d62015-07-15 13:08:21 -0700448 goto put_err;
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700449 return page;
Jaegeuk Kim86531d62015-07-15 13:08:21 -0700450
451put_err:
452 f2fs_put_page(page, 1);
453 return ERR_PTR(err);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700454}
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900455
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700456struct page *find_data_page(struct inode *inode, pgoff_t index)
457{
458 struct address_space *mapping = inode->i_mapping;
459 struct page *page;
460
461 page = find_get_page(mapping, index);
462 if (page && PageUptodate(page))
463 return page;
464 f2fs_put_page(page, 0);
465
Jaegeuk Kima56c7c62015-10-09 15:11:38 -0700466 page = get_read_data_page(inode, index, READ_SYNC, false);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700467 if (IS_ERR(page))
468 return page;
469
470 if (PageUptodate(page))
471 return page;
472
473 wait_on_page_locked(page);
474 if (unlikely(!PageUptodate(page))) {
475 f2fs_put_page(page, 0);
476 return ERR_PTR(-EIO);
477 }
478 return page;
479}
480
481/*
482 * If it tries to access a hole, return an error.
483 * Because, the callers, functions in dir.c and GC, should be able to know
484 * whether this page exists or not.
485 */
Jaegeuk Kima56c7c62015-10-09 15:11:38 -0700486struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
487 bool for_write)
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700488{
489 struct address_space *mapping = inode->i_mapping;
490 struct page *page;
491repeat:
Jaegeuk Kima56c7c62015-10-09 15:11:38 -0700492 page = get_read_data_page(inode, index, READ_SYNC, for_write);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700493 if (IS_ERR(page))
494 return page;
495
496 /* wait for read completion */
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900497 lock_page(page);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900498 if (unlikely(!PageUptodate(page))) {
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900499 f2fs_put_page(page, 1);
500 return ERR_PTR(-EIO);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900501 }
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900502 if (unlikely(page->mapping != mapping)) {
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +0900503 f2fs_put_page(page, 1);
504 goto repeat;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900505 }
506 return page;
507}
508
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900509/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900510 * Caller ensures that this data page is never allocated.
511 * A new zero-filled data page is allocated in the page cache.
Jaegeuk Kim39936832012-11-22 16:21:29 +0900512 *
Chao Yu4f4124d2013-12-21 18:02:14 +0800513 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
514 * f2fs_unlock_op().
Chao Yu470f00e2015-07-14 18:14:06 +0800515 * Note that, ipage is set only by make_empty_dir, and if any error occur,
516 * ipage should be released by this function.
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900517 */
Jaegeuk Kim64aa7ed2013-05-20 09:55:50 +0900518struct page *get_new_data_page(struct inode *inode,
Jaegeuk Kima8865372013-12-27 17:04:17 +0900519 struct page *ipage, pgoff_t index, bool new_i_size)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900520{
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900521 struct address_space *mapping = inode->i_mapping;
522 struct page *page;
523 struct dnode_of_data dn;
524 int err;
Jaegeuk Kim76121182016-01-01 22:03:47 -0800525
Jaegeuk Kima56c7c62015-10-09 15:11:38 -0700526 page = f2fs_grab_cache_page(mapping, index, true);
Chao Yu470f00e2015-07-14 18:14:06 +0800527 if (!page) {
528 /*
529 * before exiting, we should make sure ipage will be released
530 * if any error occur.
531 */
532 f2fs_put_page(ipage, 1);
Jaegeuk Kim01f28612015-04-29 11:18:42 -0700533 return ERR_PTR(-ENOMEM);
Chao Yu470f00e2015-07-14 18:14:06 +0800534 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900535
Jaegeuk Kima8865372013-12-27 17:04:17 +0900536 set_new_dnode(&dn, inode, ipage, NULL, 0);
Huajun Lib6009652013-11-10 23:13:18 +0800537 err = f2fs_reserve_block(&dn, index);
Jaegeuk Kim01f28612015-04-29 11:18:42 -0700538 if (err) {
539 f2fs_put_page(page, 1);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900540 return ERR_PTR(err);
Jaegeuk Kima8865372013-12-27 17:04:17 +0900541 }
Jaegeuk Kim01f28612015-04-29 11:18:42 -0700542 if (!ipage)
543 f2fs_put_dnode(&dn);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900544
545 if (PageUptodate(page))
Jaegeuk Kim01f28612015-04-29 11:18:42 -0700546 goto got_it;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900547
548 if (dn.data_blkaddr == NEW_ADDR) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300549 zero_user_segment(page, 0, PAGE_SIZE);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900550 SetPageUptodate(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900551 } else {
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700552 f2fs_put_page(page, 1);
Jaegeuk Kima8865372013-12-27 17:04:17 +0900553
Jaegeuk Kim76121182016-01-01 22:03:47 -0800554 /* if ipage exists, blkaddr should be NEW_ADDR */
555 f2fs_bug_on(F2FS_I_SB(inode), ipage);
556 page = get_lock_data_page(inode, index, true);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700557 if (IS_ERR(page))
Jaegeuk Kim76121182016-01-01 22:03:47 -0800558 return page;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900559 }
Jaegeuk Kim01f28612015-04-29 11:18:42 -0700560got_it:
Chao Yu9edcdab2015-09-11 14:43:52 +0800561 if (new_i_size && i_size_read(inode) <
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300562 ((loff_t)(index + 1) << PAGE_SHIFT)) {
563 i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
Jaegeuk Kim699489b2013-06-07 22:08:23 +0900564 /* Only the directory inode sets new_i_size */
565 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900566 }
567 return page;
568}
569
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900570static int __allocate_data_block(struct dnode_of_data *dn)
571{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700572 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900573 struct f2fs_summary sum;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900574 struct node_info ni;
Jaegeuk Kim38aa0882015-01-05 16:02:20 -0800575 int seg = CURSEG_WARM_DATA;
Jaegeuk Kim976e4c52014-09-15 19:32:16 -0700576 pgoff_t fofs;
Chao Yu46008c62016-05-09 19:56:30 +0800577 blkcnt_t count = 1;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900578
579 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
580 return -EPERM;
Chao Yudf6136e2015-03-23 10:33:37 +0800581
582 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
583 if (dn->data_blkaddr == NEW_ADDR)
584 goto alloc;
585
Chao Yu46008c62016-05-09 19:56:30 +0800586 if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900587 return -ENOSPC;
588
Chao Yudf6136e2015-03-23 10:33:37 +0800589alloc:
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900590 get_node_info(sbi, dn->nid, &ni);
591 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
592
Jaegeuk Kim38aa0882015-01-05 16:02:20 -0800593 if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
594 seg = CURSEG_DIRECT_IO;
595
Chao Yudf6136e2015-03-23 10:33:37 +0800596 allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
597 &sum, seg);
Chao Yu216a6202015-03-19 19:23:32 +0800598 set_data_blkaddr(dn);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900599
Jaegeuk Kim976e4c52014-09-15 19:32:16 -0700600 /* update i_size */
Chao Yu81ca7352016-01-26 15:39:35 +0800601 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
Jaegeuk Kim976e4c52014-09-15 19:32:16 -0700602 dn->ofs_in_node;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300603 if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
Chao Yu9edcdab2015-09-11 14:43:52 +0800604 i_size_write(dn->inode,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300605 ((loff_t)(fofs + 1) << PAGE_SHIFT));
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900606 return 0;
607}
608
Jaegeuk Kimb439b102016-02-03 13:09:09 -0800609ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
Jaegeuk Kim59b802e2015-02-09 12:09:53 -0800610{
Jaegeuk Kimb439b102016-02-03 13:09:09 -0800611 struct inode *inode = file_inode(iocb->ki_filp);
Chao Yu5b8db7f2016-01-26 15:38:29 +0800612 struct f2fs_map_blocks map;
Jaegeuk Kimb439b102016-02-03 13:09:09 -0800613 ssize_t ret = 0;
Jaegeuk Kim59b802e2015-02-09 12:09:53 -0800614
Jaegeuk Kim0080c502016-05-07 08:52:57 -0700615 map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
616 map.m_len = F2FS_BYTES_TO_BLK(iov_iter_count(from));
Chao Yuda859852016-01-26 15:42:58 +0800617 map.m_next_pgofs = NULL;
Jaegeuk Kim59b802e2015-02-09 12:09:53 -0800618
Jaegeuk Kim24b84912016-02-03 13:49:44 -0800619 if (f2fs_encrypted_inode(inode))
620 return 0;
621
622 if (iocb->ki_flags & IOCB_DIRECT) {
Jaegeuk Kimb439b102016-02-03 13:09:09 -0800623 ret = f2fs_convert_inline_inode(inode);
624 if (ret)
625 return ret;
Jaegeuk Kim24b84912016-02-03 13:49:44 -0800626 return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
Jaegeuk Kimb439b102016-02-03 13:09:09 -0800627 }
Jaegeuk Kim24b84912016-02-03 13:49:44 -0800628 if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
629 ret = f2fs_convert_inline_inode(inode);
630 if (ret)
631 return ret;
632 }
633 if (!f2fs_has_inline_data(inode))
634 return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
Jaegeuk Kimb439b102016-02-03 13:09:09 -0800635 return ret;
Jaegeuk Kim59b802e2015-02-09 12:09:53 -0800636}
637
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900638/*
Jaegeuk Kim003a3e12015-04-06 19:55:34 -0700639 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
640 * f2fs_map_blocks structure.
Chao Yu4f4124d2013-12-21 18:02:14 +0800641 * If original data blocks are allocated, then give them to blockdev.
642 * Otherwise,
643 * a. preallocate requested block addresses
644 * b. do not use extent cache for better performance
645 * c. give the block addresses to blockdev
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900646 */
Chao Yud323d002015-10-27 09:53:45 +0800647int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
Chao Yue2b4e2b2015-08-19 19:11:19 +0800648 int create, int flag)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900649{
Jaegeuk Kim003a3e12015-04-06 19:55:34 -0700650 unsigned int maxblocks = map->m_len;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900651 struct dnode_of_data dn;
Chao Yuf9811702015-09-21 20:17:52 +0800652 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900653 int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
Chao Yu46008c62016-05-09 19:56:30 +0800654 pgoff_t pgofs, end_offset, end;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900655 int err = 0, ofs = 1;
Chao Yu46008c62016-05-09 19:56:30 +0800656 unsigned int ofs_in_node, last_ofs_in_node;
657 blkcnt_t prealloc;
Chao Yua2e7d1b2015-02-05 17:50:30 +0800658 struct extent_info ei;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900659 bool allocated = false;
Fan Li7df3a432015-12-17 13:20:59 +0800660 block_t blkaddr;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900661
Jaegeuk Kim003a3e12015-04-06 19:55:34 -0700662 map->m_len = 0;
663 map->m_flags = 0;
664
665 /* it only supports block size == page size */
666 pgofs = (pgoff_t)map->m_lblk;
Chao Yu46008c62016-05-09 19:56:30 +0800667 end = pgofs + maxblocks;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900668
Jaegeuk Kim24b84912016-02-03 13:49:44 -0800669 if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
Jaegeuk Kim003a3e12015-04-06 19:55:34 -0700670 map->m_pblk = ei.blk + pgofs - ei.fofs;
671 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
672 map->m_flags = F2FS_MAP_MAPPED;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900673 goto out;
Chao Yua2e7d1b2015-02-05 17:50:30 +0800674 }
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900675
Chao Yu4fe71e82016-01-26 15:37:38 +0800676next_dnode:
Jaegeuk Kim59b802e2015-02-09 12:09:53 -0800677 if (create)
Chao Yu3104af32015-12-23 17:11:43 +0800678 f2fs_lock_op(sbi);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900679
680 /* When reading holes, we need its node page */
681 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900682 err = get_dnode_of_data(&dn, pgofs, mode);
Jaegeuk Kim1ec79082013-12-26 16:55:22 +0900683 if (err) {
Chao Yu43473f92016-05-05 19:13:02 +0800684 if (flag == F2FS_GET_BLOCK_BMAP)
685 map->m_pblk = 0;
Chao Yuda859852016-01-26 15:42:58 +0800686 if (err == -ENOENT) {
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900687 err = 0;
Chao Yuda859852016-01-26 15:42:58 +0800688 if (map->m_next_pgofs)
689 *map->m_next_pgofs =
690 get_next_page_offset(&dn, pgofs);
691 }
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900692 goto unlock_out;
Namjae Jeon848753a2013-04-23 16:38:02 +0900693 }
Chao Yu973163f2015-09-18 16:51:51 +0800694
Chao Yu46008c62016-05-09 19:56:30 +0800695 prealloc = 0;
696 ofs_in_node = dn.ofs_in_node;
Chao Yu81ca7352016-01-26 15:39:35 +0800697 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900698
Chao Yu4fe71e82016-01-26 15:37:38 +0800699next_block:
Fan Li7df3a432015-12-17 13:20:59 +0800700 blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
Chao Yu973163f2015-09-18 16:51:51 +0800701
Fan Li7df3a432015-12-17 13:20:59 +0800702 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
703 if (create) {
704 if (unlikely(f2fs_cp_error(sbi))) {
705 err = -EIO;
706 goto sync_out;
Chao Yu973163f2015-09-18 16:51:51 +0800707 }
Jaegeuk Kim24b84912016-02-03 13:49:44 -0800708 if (flag == F2FS_GET_BLOCK_PRE_AIO) {
Chao Yu46008c62016-05-09 19:56:30 +0800709 if (blkaddr == NULL_ADDR) {
710 prealloc++;
711 last_ofs_in_node = dn.ofs_in_node;
712 }
Jaegeuk Kim24b84912016-02-03 13:49:44 -0800713 } else {
714 err = __allocate_data_block(&dn);
Chao Yu46008c62016-05-09 19:56:30 +0800715 if (!err) {
Jaegeuk Kim6bfc4912016-04-18 17:07:44 -0400716 set_inode_flag(F2FS_I(inode),
717 FI_APPEND_WRITE);
Chao Yu46008c62016-05-09 19:56:30 +0800718 allocated = true;
719 }
Jaegeuk Kim24b84912016-02-03 13:49:44 -0800720 }
Fan Li7df3a432015-12-17 13:20:59 +0800721 if (err)
722 goto sync_out;
Chao Yu4fe71e82016-01-26 15:37:38 +0800723 map->m_flags = F2FS_MAP_NEW;
Fan Li7df3a432015-12-17 13:20:59 +0800724 blkaddr = dn.data_blkaddr;
725 } else {
Chao Yu43473f92016-05-05 19:13:02 +0800726 if (flag == F2FS_GET_BLOCK_BMAP) {
727 map->m_pblk = 0;
728 goto sync_out;
729 }
Chao Yuda859852016-01-26 15:42:58 +0800730 if (flag == F2FS_GET_BLOCK_FIEMAP &&
731 blkaddr == NULL_ADDR) {
732 if (map->m_next_pgofs)
733 *map->m_next_pgofs = pgofs + 1;
734 }
Fan Li7df3a432015-12-17 13:20:59 +0800735 if (flag != F2FS_GET_BLOCK_FIEMAP ||
Chao Yu43473f92016-05-05 19:13:02 +0800736 blkaddr != NEW_ADDR)
Fan Li7df3a432015-12-17 13:20:59 +0800737 goto sync_out;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900738 }
739 }
Fan Li7df3a432015-12-17 13:20:59 +0800740
Chao Yu46008c62016-05-09 19:56:30 +0800741 if (flag == F2FS_GET_BLOCK_PRE_AIO)
742 goto skip;
743
Chao Yu4fe71e82016-01-26 15:37:38 +0800744 if (map->m_len == 0) {
745 /* preallocated unwritten block should be mapped for fiemap. */
746 if (blkaddr == NEW_ADDR)
747 map->m_flags |= F2FS_MAP_UNWRITTEN;
748 map->m_flags |= F2FS_MAP_MAPPED;
749
750 map->m_pblk = blkaddr;
751 map->m_len = 1;
752 } else if ((map->m_pblk != NEW_ADDR &&
Fan Li7df3a432015-12-17 13:20:59 +0800753 blkaddr == (map->m_pblk + ofs)) ||
Jaegeuk Kimb439b102016-02-03 13:09:09 -0800754 (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
Chao Yu46008c62016-05-09 19:56:30 +0800755 flag == F2FS_GET_BLOCK_PRE_DIO) {
Fan Li7df3a432015-12-17 13:20:59 +0800756 ofs++;
Fan Li7df3a432015-12-17 13:20:59 +0800757 map->m_len++;
Chao Yu4fe71e82016-01-26 15:37:38 +0800758 } else {
759 goto sync_out;
760 }
761
Chao Yu46008c62016-05-09 19:56:30 +0800762skip:
Chao Yu4fe71e82016-01-26 15:37:38 +0800763 dn.ofs_in_node++;
764 pgofs++;
765
Chao Yu46008c62016-05-09 19:56:30 +0800766 /* preallocate blocks in batch for one dnode page */
767 if (flag == F2FS_GET_BLOCK_PRE_AIO &&
768 (pgofs == end || dn.ofs_in_node == end_offset)) {
Chao Yu4fe71e82016-01-26 15:37:38 +0800769
Chao Yu46008c62016-05-09 19:56:30 +0800770 dn.ofs_in_node = ofs_in_node;
771 err = reserve_new_blocks(&dn, prealloc);
772 if (err)
773 goto sync_out;
Chao Yu4fe71e82016-01-26 15:37:38 +0800774
Chao Yu46008c62016-05-09 19:56:30 +0800775 map->m_len += dn.ofs_in_node - ofs_in_node;
776 if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
777 err = -ENOSPC;
778 goto sync_out;
Chao Yu4fe71e82016-01-26 15:37:38 +0800779 }
Chao Yu46008c62016-05-09 19:56:30 +0800780 dn.ofs_in_node = end_offset;
Fan Li7df3a432015-12-17 13:20:59 +0800781 }
782
Chao Yu46008c62016-05-09 19:56:30 +0800783 if (pgofs >= end)
784 goto sync_out;
785 else if (dn.ofs_in_node < end_offset)
786 goto next_block;
787
788 if (allocated)
789 sync_inode_page(&dn);
790 f2fs_put_dnode(&dn);
791
792 if (create) {
793 f2fs_unlock_op(sbi);
794 f2fs_balance_fs(sbi, allocated);
795 }
796 allocated = false;
797 goto next_dnode;
798
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900799sync_out:
800 if (allocated)
801 sync_inode_page(&dn);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900802 f2fs_put_dnode(&dn);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900803unlock_out:
Jaegeuk Kim2a340762015-12-22 13:23:35 -0800804 if (create) {
Chao Yu3104af32015-12-23 17:11:43 +0800805 f2fs_unlock_op(sbi);
Jaegeuk Kim3c082b72016-01-23 13:35:18 -0800806 f2fs_balance_fs(sbi, allocated);
Jaegeuk Kim2a340762015-12-22 13:23:35 -0800807 }
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900808out:
Jaegeuk Kim003a3e12015-04-06 19:55:34 -0700809 trace_f2fs_map_blocks(inode, map, err);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900810 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +0900811}
812
Jaegeuk Kim003a3e12015-04-06 19:55:34 -0700813static int __get_data_block(struct inode *inode, sector_t iblock,
Chao Yuda859852016-01-26 15:42:58 +0800814 struct buffer_head *bh, int create, int flag,
815 pgoff_t *next_pgofs)
Jaegeuk Kim003a3e12015-04-06 19:55:34 -0700816{
817 struct f2fs_map_blocks map;
818 int ret;
819
820 map.m_lblk = iblock;
821 map.m_len = bh->b_size >> inode->i_blkbits;
Chao Yuda859852016-01-26 15:42:58 +0800822 map.m_next_pgofs = next_pgofs;
Jaegeuk Kim003a3e12015-04-06 19:55:34 -0700823
Chao Yue2b4e2b2015-08-19 19:11:19 +0800824 ret = f2fs_map_blocks(inode, &map, create, flag);
Jaegeuk Kim003a3e12015-04-06 19:55:34 -0700825 if (!ret) {
826 map_bh(bh, inode->i_sb, map.m_pblk);
827 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
828 bh->b_size = map.m_len << inode->i_blkbits;
829 }
830 return ret;
831}
832
Jaegeuk Kimccfb3002014-06-13 13:02:11 +0900833static int get_data_block(struct inode *inode, sector_t iblock,
Chao Yuda859852016-01-26 15:42:58 +0800834 struct buffer_head *bh_result, int create, int flag,
835 pgoff_t *next_pgofs)
Jaegeuk Kimccfb3002014-06-13 13:02:11 +0900836{
Chao Yuda859852016-01-26 15:42:58 +0800837 return __get_data_block(inode, iblock, bh_result, create,
838 flag, next_pgofs);
Jaegeuk Kimccfb3002014-06-13 13:02:11 +0900839}
840
Chao Yue2b4e2b2015-08-19 19:11:19 +0800841static int get_data_block_dio(struct inode *inode, sector_t iblock,
Jaegeuk Kimccfb3002014-06-13 13:02:11 +0900842 struct buffer_head *bh_result, int create)
843{
Chao Yue2b4e2b2015-08-19 19:11:19 +0800844 return __get_data_block(inode, iblock, bh_result, create,
Chao Yuda859852016-01-26 15:42:58 +0800845 F2FS_GET_BLOCK_DIO, NULL);
Chao Yue2b4e2b2015-08-19 19:11:19 +0800846}
847
848static int get_data_block_bmap(struct inode *inode, sector_t iblock,
849 struct buffer_head *bh_result, int create)
850{
Yunlei He179448b2015-12-28 21:48:32 +0800851 /* Block number less than F2FS MAX BLOCKS */
Chao Yue0afc4d2015-12-31 14:35:37 +0800852 if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
Yunlei He179448b2015-12-28 21:48:32 +0800853 return -EFBIG;
854
Chao Yue2b4e2b2015-08-19 19:11:19 +0800855 return __get_data_block(inode, iblock, bh_result, create,
Chao Yuda859852016-01-26 15:42:58 +0800856 F2FS_GET_BLOCK_BMAP, NULL);
Jaegeuk Kimccfb3002014-06-13 13:02:11 +0900857}
858
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -0700859static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
860{
861 return (offset >> inode->i_blkbits);
862}
863
864static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
865{
866 return (blk << inode->i_blkbits);
867}
868
Jaegeuk Kim9ab70132014-06-08 04:30:14 +0900869int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
870 u64 start, u64 len)
871{
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -0700872 struct buffer_head map_bh;
873 sector_t start_blk, last_blk;
Chao Yuda859852016-01-26 15:42:58 +0800874 pgoff_t next_pgofs;
Fan Lide1475c2016-01-04 15:56:50 +0800875 loff_t isize;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -0700876 u64 logical = 0, phys = 0, size = 0;
877 u32 flags = 0;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -0700878 int ret = 0;
879
880 ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
881 if (ret)
882 return ret;
883
Jaegeuk Kim67f8cf3c2015-10-15 11:34:49 -0700884 if (f2fs_has_inline_data(inode)) {
885 ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
886 if (ret != -EAGAIN)
887 return ret;
888 }
889
Al Viro59551022016-01-22 15:40:57 -0500890 inode_lock(inode);
Fan Lide1475c2016-01-04 15:56:50 +0800891
892 isize = i_size_read(inode);
Fan Li9a950d52015-12-26 18:07:41 +0800893 if (start >= isize)
894 goto out;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -0700895
Fan Li9a950d52015-12-26 18:07:41 +0800896 if (start + len > isize)
897 len = isize - start;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -0700898
899 if (logical_to_blk(inode, len) == 0)
900 len = blk_to_logical(inode, 1);
901
902 start_blk = logical_to_blk(inode, start);
903 last_blk = logical_to_blk(inode, start + len - 1);
Fan Li9a950d52015-12-26 18:07:41 +0800904
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -0700905next:
906 memset(&map_bh, 0, sizeof(struct buffer_head));
907 map_bh.b_size = len;
908
Chao Yue2b4e2b2015-08-19 19:11:19 +0800909 ret = get_data_block(inode, start_blk, &map_bh, 0,
Chao Yuda859852016-01-26 15:42:58 +0800910 F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -0700911 if (ret)
912 goto out;
913
914 /* HOLE */
915 if (!buffer_mapped(&map_bh)) {
Chao Yuda859852016-01-26 15:42:58 +0800916 start_blk = next_pgofs;
Fan Li9a950d52015-12-26 18:07:41 +0800917 /* Go through holes util pass the EOF */
Chao Yuda859852016-01-26 15:42:58 +0800918 if (blk_to_logical(inode, start_blk) < isize)
Fan Li9a950d52015-12-26 18:07:41 +0800919 goto prep_next;
920 /* Found a hole beyond isize means no more extents.
921 * Note that the premise is that filesystems don't
922 * punch holes beyond isize and keep size unchanged.
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -0700923 */
Fan Li9a950d52015-12-26 18:07:41 +0800924 flags |= FIEMAP_EXTENT_LAST;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -0700925 }
Fan Li9a950d52015-12-26 18:07:41 +0800926
Chao Yuda5af122016-01-08 20:19:27 +0800927 if (size) {
928 if (f2fs_encrypted_inode(inode))
929 flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
930
Fan Li9a950d52015-12-26 18:07:41 +0800931 ret = fiemap_fill_next_extent(fieinfo, logical,
932 phys, size, flags);
Chao Yuda5af122016-01-08 20:19:27 +0800933 }
Fan Li9a950d52015-12-26 18:07:41 +0800934
935 if (start_blk > last_blk || ret)
936 goto out;
937
938 logical = blk_to_logical(inode, start_blk);
939 phys = blk_to_logical(inode, map_bh.b_blocknr);
940 size = map_bh.b_size;
941 flags = 0;
942 if (buffer_unwritten(&map_bh))
943 flags = FIEMAP_EXTENT_UNWRITTEN;
944
945 start_blk += logical_to_blk(inode, size);
946
947prep_next:
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -0700948 cond_resched();
949 if (fatal_signal_pending(current))
950 ret = -EINTR;
951 else
952 goto next;
953out:
954 if (ret == 1)
955 ret = 0;
956
Al Viro59551022016-01-22 15:40:57 -0500957 inode_unlock(inode);
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -0700958 return ret;
Jaegeuk Kim9ab70132014-06-08 04:30:14 +0900959}
960
Jaegeuk Kimf1e88662015-04-09 11:20:42 -0700961/*
962 * This function was originally taken from fs/mpage.c, and customized for f2fs.
963 * Major change was from block_size == page_size in f2fs by default.
964 */
965static int f2fs_mpage_readpages(struct address_space *mapping,
966 struct list_head *pages, struct page *page,
967 unsigned nr_pages)
968{
969 struct bio *bio = NULL;
970 unsigned page_idx;
971 sector_t last_block_in_bio = 0;
972 struct inode *inode = mapping->host;
973 const unsigned blkbits = inode->i_blkbits;
974 const unsigned blocksize = 1 << blkbits;
975 sector_t block_in_file;
976 sector_t last_block;
977 sector_t last_block_in_file;
978 sector_t block_nr;
979 struct block_device *bdev = inode->i_sb->s_bdev;
980 struct f2fs_map_blocks map;
981
982 map.m_pblk = 0;
983 map.m_lblk = 0;
984 map.m_len = 0;
985 map.m_flags = 0;
Chao Yuda859852016-01-26 15:42:58 +0800986 map.m_next_pgofs = NULL;
Jaegeuk Kimf1e88662015-04-09 11:20:42 -0700987
988 for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
989
990 prefetchw(&page->flags);
991 if (pages) {
992 page = list_entry(pages->prev, struct page, lru);
993 list_del(&page->lru);
994 if (add_to_page_cache_lru(page, mapping,
995 page->index, GFP_KERNEL))
996 goto next_page;
997 }
998
999 block_in_file = (sector_t)page->index;
1000 last_block = block_in_file + nr_pages;
1001 last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
1002 blkbits;
1003 if (last_block > last_block_in_file)
1004 last_block = last_block_in_file;
1005
1006 /*
1007 * Map blocks using the previous result first.
1008 */
1009 if ((map.m_flags & F2FS_MAP_MAPPED) &&
1010 block_in_file > map.m_lblk &&
1011 block_in_file < (map.m_lblk + map.m_len))
1012 goto got_it;
1013
1014 /*
1015 * Then do more f2fs_map_blocks() calls until we are
1016 * done with this page.
1017 */
1018 map.m_flags = 0;
1019
1020 if (block_in_file < last_block) {
1021 map.m_lblk = block_in_file;
1022 map.m_len = last_block - block_in_file;
1023
Chao Yu46c9e142015-09-18 16:54:16 +08001024 if (f2fs_map_blocks(inode, &map, 0,
Chao Yuda859852016-01-26 15:42:58 +08001025 F2FS_GET_BLOCK_READ))
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001026 goto set_error_page;
1027 }
1028got_it:
1029 if ((map.m_flags & F2FS_MAP_MAPPED)) {
1030 block_nr = map.m_pblk + block_in_file - map.m_lblk;
1031 SetPageMappedToDisk(page);
1032
1033 if (!PageUptodate(page) && !cleancache_get_page(page)) {
1034 SetPageUptodate(page);
1035 goto confused;
1036 }
1037 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001038 zero_user_segment(page, 0, PAGE_SIZE);
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001039 SetPageUptodate(page);
1040 unlock_page(page);
1041 goto next_page;
1042 }
1043
1044 /*
1045 * This page will go to BIO. Do we need to send this
1046 * BIO off first?
1047 */
1048 if (bio && (last_block_in_bio != block_nr - 1)) {
1049submit_and_realloc:
1050 submit_bio(READ, bio);
1051 bio = NULL;
1052 }
1053 if (bio == NULL) {
Jaegeuk Kim0b81d072015-05-15 16:26:10 -07001054 struct fscrypt_ctx *ctx = NULL;
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001055
1056 if (f2fs_encrypted_inode(inode) &&
1057 S_ISREG(inode->i_mode)) {
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001058
Jaegeuk Kimb32e4482016-04-11 15:51:57 -07001059 ctx = fscrypt_get_ctx(inode, GFP_NOFS);
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001060 if (IS_ERR(ctx))
1061 goto set_error_page;
1062
1063 /* wait the page to be moved by cleaning */
Chao Yu08b39fb2015-10-08 13:27:34 +08001064 f2fs_wait_on_encrypted_page_writeback(
1065 F2FS_I_SB(inode), block_nr);
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001066 }
1067
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001068 bio = bio_alloc(GFP_KERNEL,
Kent Overstreetb54ffb72015-05-19 14:31:01 +02001069 min_t(int, nr_pages, BIO_MAX_PAGES));
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001070 if (!bio) {
1071 if (ctx)
Jaegeuk Kim0b81d072015-05-15 16:26:10 -07001072 fscrypt_release_ctx(ctx);
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001073 goto set_error_page;
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001074 }
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001075 bio->bi_bdev = bdev;
1076 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
Chao Yu12377022015-05-25 18:03:38 +08001077 bio->bi_end_io = f2fs_read_end_io;
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001078 bio->bi_private = ctx;
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001079 }
1080
1081 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
1082 goto submit_and_realloc;
1083
1084 last_block_in_bio = block_nr;
1085 goto next_page;
1086set_error_page:
1087 SetPageError(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001088 zero_user_segment(page, 0, PAGE_SIZE);
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001089 unlock_page(page);
1090 goto next_page;
1091confused:
1092 if (bio) {
1093 submit_bio(READ, bio);
1094 bio = NULL;
1095 }
1096 unlock_page(page);
1097next_page:
1098 if (pages)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001099 put_page(page);
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001100 }
1101 BUG_ON(pages && !list_empty(pages));
1102 if (bio)
1103 submit_bio(READ, bio);
1104 return 0;
1105}
1106
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001107static int f2fs_read_data_page(struct file *file, struct page *page)
1108{
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001109 struct inode *inode = page->mapping->host;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001110 int ret = -EAGAIN;
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001111
Chao Yuc20e89c2014-05-06 16:53:08 +08001112 trace_f2fs_readpage(page, DATA);
1113
arter97e1c42042014-08-06 23:22:50 +09001114 /* If the file has inline data, try to read it directly */
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001115 if (f2fs_has_inline_data(inode))
1116 ret = f2fs_read_inline_data(inode, page);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001117 if (ret == -EAGAIN)
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001118 ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001119 return ret;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001120}
1121
1122static int f2fs_read_data_pages(struct file *file,
1123 struct address_space *mapping,
1124 struct list_head *pages, unsigned nr_pages)
1125{
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001126 struct inode *inode = file->f_mapping->host;
Chao Yub8c29402015-10-12 17:02:26 +08001127 struct page *page = list_entry(pages->prev, struct page, lru);
1128
1129 trace_f2fs_readpages(inode, page, nr_pages);
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001130
1131 /* If the file has inline data, skip readpages */
1132 if (f2fs_has_inline_data(inode))
1133 return 0;
1134
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07001135 return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001136}
1137
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001138int do_write_data_page(struct f2fs_io_info *fio)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001139{
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001140 struct page *page = fio->page;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001141 struct inode *inode = page->mapping->host;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001142 struct dnode_of_data dn;
1143 int err = 0;
1144
1145 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kim266e97a2013-02-26 13:10:46 +09001146 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001147 if (err)
1148 return err;
1149
Chao Yu28bc1062016-02-06 14:40:34 +08001150 fio->old_blkaddr = dn.data_blkaddr;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001151
1152 /* This page is already truncated */
Chao Yu7a9d7542016-02-22 18:36:38 +08001153 if (fio->old_blkaddr == NULL_ADDR) {
Jaegeuk Kim2bca1e22015-02-25 19:25:01 -08001154 ClearPageUptodate(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001155 goto out_writepage;
Jaegeuk Kim2bca1e22015-02-25 19:25:01 -08001156 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001157
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001158 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
Jaegeuk Kimb32e4482016-04-11 15:51:57 -07001159 gfp_t gfp_flags = GFP_NOFS;
Chao Yu08b39fb2015-10-08 13:27:34 +08001160
1161 /* wait for GCed encrypted page writeback */
1162 f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
Chao Yu7a9d7542016-02-22 18:36:38 +08001163 fio->old_blkaddr);
Jaegeuk Kimb32e4482016-04-11 15:51:57 -07001164retry_encrypt:
1165 fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
1166 gfp_flags);
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001167 if (IS_ERR(fio->encrypted_page)) {
1168 err = PTR_ERR(fio->encrypted_page);
Jaegeuk Kimb32e4482016-04-11 15:51:57 -07001169 if (err == -ENOMEM) {
1170 /* flush pending ios and wait for a while */
1171 f2fs_flush_merged_bios(F2FS_I_SB(inode));
1172 congestion_wait(BLK_RW_ASYNC, HZ/50);
1173 gfp_flags |= __GFP_NOFAIL;
1174 err = 0;
1175 goto retry_encrypt;
1176 }
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001177 goto out_writepage;
1178 }
1179 }
1180
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001181 set_page_writeback(page);
1182
1183 /*
1184 * If current allocation needs SSR,
1185 * it had better in-place writes for updated data.
1186 */
Chao Yu7a9d7542016-02-22 18:36:38 +08001187 if (unlikely(fio->old_blkaddr != NEW_ADDR &&
Haicheng Lib25958b2013-06-13 16:59:29 +08001188 !is_cold_data(page) &&
Chao Yu2da3e0272015-10-28 17:56:14 +08001189 !IS_ATOMIC_WRITTEN_PAGE(page) &&
Haicheng Lib25958b2013-06-13 16:59:29 +08001190 need_inplace_update(inode))) {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001191 rewrite_data_page(fio);
Jaegeuk Kimfff04f92014-07-25 07:40:59 -07001192 set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
Jaegeuk Kim8ce67cb2015-03-17 17:58:08 -07001193 trace_f2fs_do_write_data_page(page, IPU);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001194 } else {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001195 write_data_page(&dn, fio);
Jaegeuk Kim8ce67cb2015-03-17 17:58:08 -07001196 trace_f2fs_do_write_data_page(page, OPU);
Jaegeuk Kimfff04f92014-07-25 07:40:59 -07001197 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
Jaegeuk Kim3c6c2be2015-03-17 17:16:35 -07001198 if (page->index == 0)
1199 set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001200 }
1201out_writepage:
1202 f2fs_put_dnode(&dn);
1203 return err;
1204}
1205
1206static int f2fs_write_data_page(struct page *page,
1207 struct writeback_control *wbc)
1208{
1209 struct inode *inode = page->mapping->host;
Jaegeuk Kim40813632014-09-02 15:31:18 -07001210 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001211 loff_t i_size = i_size_read(inode);
1212 const pgoff_t end_index = ((unsigned long long) i_size)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001213 >> PAGE_SHIFT;
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001214 unsigned offset = 0;
Jaegeuk Kim39936832012-11-22 16:21:29 +09001215 bool need_balance_fs = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001216 int err = 0;
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001217 struct f2fs_io_info fio = {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001218 .sbi = sbi,
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001219 .type = DATA,
Chris Fries6c311ec2014-01-17 14:44:39 -06001220 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001221 .page = page,
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001222 .encrypted_page = NULL,
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001223 };
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001224
Chao Yuecda0de2014-05-06 16:48:26 +08001225 trace_f2fs_writepage(page, DATA);
1226
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001227 if (page->index < end_index)
Jaegeuk Kim39936832012-11-22 16:21:29 +09001228 goto write;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001229
1230 /*
1231 * If the offset is out-of-range of file size,
1232 * this page does not have to be written to disk.
1233 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001234 offset = i_size & (PAGE_SIZE - 1);
Jaegeuk Kim76f60262014-04-15 16:04:15 +09001235 if ((page->index >= end_index + 1) || !offset)
Jaegeuk Kim39936832012-11-22 16:21:29 +09001236 goto out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001237
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001238 zero_user_segment(page, offset, PAGE_SIZE);
Jaegeuk Kim39936832012-11-22 16:21:29 +09001239write:
Chao Yucaf00472015-01-28 17:48:42 +08001240 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001241 goto redirty_out;
Jaegeuk Kim1e843712014-12-09 06:08:59 -08001242 if (f2fs_is_drop_cache(inode))
1243 goto out;
Jaegeuk Kime6e5f562016-04-14 16:48:52 -07001244 /* we should not write 0'th page having journal header */
1245 if (f2fs_is_volatile_file(inode) && (!page->index ||
1246 (!wbc->for_reclaim &&
1247 available_free_memory(sbi, BASE_CHECK))))
Jaegeuk Kim1e843712014-12-09 06:08:59 -08001248 goto redirty_out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001249
Jaegeuk Kim39936832012-11-22 16:21:29 +09001250 /* Dentry blocks are controlled by checkpoint */
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001251 if (S_ISDIR(inode->i_mode)) {
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001252 if (unlikely(f2fs_cp_error(sbi)))
1253 goto redirty_out;
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001254 err = do_write_data_page(&fio);
Jaegeuk Kim8618b882014-02-17 19:29:27 +09001255 goto done;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001256 }
Jaegeuk Kim8618b882014-02-17 19:29:27 +09001257
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001258 /* we should bypass data pages to proceed the kworkder jobs */
1259 if (unlikely(f2fs_cp_error(sbi))) {
1260 SetPageError(page);
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07001261 goto out;
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001262 }
1263
Jaegeuk Kim8618b882014-02-17 19:29:27 +09001264 if (!wbc->for_reclaim)
1265 need_balance_fs = true;
1266 else if (has_not_enough_free_secs(sbi, 0))
Jaegeuk Kim39936832012-11-22 16:21:29 +09001267 goto redirty_out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001268
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001269 err = -EAGAIN;
Jaegeuk Kim8618b882014-02-17 19:29:27 +09001270 f2fs_lock_op(sbi);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001271 if (f2fs_has_inline_data(inode))
1272 err = f2fs_write_inline_data(inode, page);
1273 if (err == -EAGAIN)
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001274 err = do_write_data_page(&fio);
Jaegeuk Kim8618b882014-02-17 19:29:27 +09001275 f2fs_unlock_op(sbi);
1276done:
1277 if (err && err != -ENOENT)
1278 goto redirty_out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001279
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001280 clear_cold_data(page);
Jaegeuk Kim39936832012-11-22 16:21:29 +09001281out:
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07001282 inode_dec_dirty_pages(inode);
Jaegeuk Kim2bca1e22015-02-25 19:25:01 -08001283 if (err)
1284 ClearPageUptodate(page);
Chao Yu0c3a5792016-01-18 18:28:11 +08001285
1286 if (wbc->for_reclaim) {
1287 f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, DATA, WRITE);
Chao Yuc227f912015-12-16 13:09:20 +08001288 remove_dirty_inode(inode);
Chao Yueb7e8132015-11-10 18:45:07 +08001289 }
Chao Yu0c3a5792016-01-18 18:28:11 +08001290
1291 unlock_page(page);
1292 f2fs_balance_fs(sbi, need_balance_fs);
1293
1294 if (unlikely(f2fs_cp_error(sbi)))
1295 f2fs_submit_merged_bio(sbi, DATA, WRITE);
1296
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001297 return 0;
1298
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001299redirty_out:
Jaegeuk Kim76f60262014-04-15 16:04:15 +09001300 redirty_page_for_writepage(wbc, page);
Jaegeuk Kim8618b882014-02-17 19:29:27 +09001301 return AOP_WRITEPAGE_ACTIVATE;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001302}
1303
Namjae Jeonfa9150a2013-01-15 16:45:24 +09001304static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
1305 void *data)
1306{
1307 struct address_space *mapping = data;
1308 int ret = mapping->a_ops->writepage(page, wbc);
1309 mapping_set_error(mapping, ret);
1310 return ret;
1311}
1312
Chao Yu8f46dca2015-07-14 18:56:10 +08001313/*
1314 * This function was copied from write_cche_pages from mm/page-writeback.c.
1315 * The major change is making write step of cold data page separately from
1316 * warm/hot data page.
1317 */
1318static int f2fs_write_cache_pages(struct address_space *mapping,
1319 struct writeback_control *wbc, writepage_t writepage,
1320 void *data)
1321{
1322 int ret = 0;
1323 int done = 0;
1324 struct pagevec pvec;
1325 int nr_pages;
1326 pgoff_t uninitialized_var(writeback_index);
1327 pgoff_t index;
1328 pgoff_t end; /* Inclusive */
1329 pgoff_t done_index;
1330 int cycled;
1331 int range_whole = 0;
1332 int tag;
1333 int step = 0;
1334
1335 pagevec_init(&pvec, 0);
1336next:
1337 if (wbc->range_cyclic) {
1338 writeback_index = mapping->writeback_index; /* prev offset */
1339 index = writeback_index;
1340 if (index == 0)
1341 cycled = 1;
1342 else
1343 cycled = 0;
1344 end = -1;
1345 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001346 index = wbc->range_start >> PAGE_SHIFT;
1347 end = wbc->range_end >> PAGE_SHIFT;
Chao Yu8f46dca2015-07-14 18:56:10 +08001348 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1349 range_whole = 1;
1350 cycled = 1; /* ignore range_cyclic tests */
1351 }
1352 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1353 tag = PAGECACHE_TAG_TOWRITE;
1354 else
1355 tag = PAGECACHE_TAG_DIRTY;
1356retry:
1357 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1358 tag_pages_for_writeback(mapping, index, end);
1359 done_index = index;
1360 while (!done && (index <= end)) {
1361 int i;
1362
1363 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
1364 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
1365 if (nr_pages == 0)
1366 break;
1367
1368 for (i = 0; i < nr_pages; i++) {
1369 struct page *page = pvec.pages[i];
1370
1371 if (page->index > end) {
1372 done = 1;
1373 break;
1374 }
1375
1376 done_index = page->index;
1377
1378 lock_page(page);
1379
1380 if (unlikely(page->mapping != mapping)) {
1381continue_unlock:
1382 unlock_page(page);
1383 continue;
1384 }
1385
1386 if (!PageDirty(page)) {
1387 /* someone wrote it for us */
1388 goto continue_unlock;
1389 }
1390
Tiezhu Yang737f1892015-07-17 12:56:00 +08001391 if (step == is_cold_data(page))
Chao Yu8f46dca2015-07-14 18:56:10 +08001392 goto continue_unlock;
1393
1394 if (PageWriteback(page)) {
1395 if (wbc->sync_mode != WB_SYNC_NONE)
Jaegeuk Kimfec1d652016-01-20 23:43:51 +08001396 f2fs_wait_on_page_writeback(page,
1397 DATA, true);
Chao Yu8f46dca2015-07-14 18:56:10 +08001398 else
1399 goto continue_unlock;
1400 }
1401
1402 BUG_ON(PageWriteback(page));
1403 if (!clear_page_dirty_for_io(page))
1404 goto continue_unlock;
1405
1406 ret = (*writepage)(page, wbc, data);
1407 if (unlikely(ret)) {
1408 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1409 unlock_page(page);
1410 ret = 0;
1411 } else {
1412 done_index = page->index + 1;
1413 done = 1;
1414 break;
1415 }
1416 }
1417
1418 if (--wbc->nr_to_write <= 0 &&
1419 wbc->sync_mode == WB_SYNC_NONE) {
1420 done = 1;
1421 break;
1422 }
1423 }
1424 pagevec_release(&pvec);
1425 cond_resched();
1426 }
1427
1428 if (step < 1) {
1429 step++;
1430 goto next;
1431 }
1432
1433 if (!cycled && !done) {
1434 cycled = 1;
1435 index = 0;
1436 end = writeback_index - 1;
1437 goto retry;
1438 }
1439 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1440 mapping->writeback_index = done_index;
1441
1442 return ret;
1443}
1444
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001445static int f2fs_write_data_pages(struct address_space *mapping,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001446 struct writeback_control *wbc)
1447{
1448 struct inode *inode = mapping->host;
Jaegeuk Kim40813632014-09-02 15:31:18 -07001449 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim5463e7c2015-04-21 10:40:54 -07001450 bool locked = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001451 int ret;
Jaegeuk Kim50c8cdb2014-03-18 13:47:11 +09001452 long diff;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001453
P J Pcfb185a2013-04-03 11:38:00 +09001454 /* deal with chardevs and other special file */
1455 if (!mapping->a_ops->writepage)
1456 return 0;
1457
Chao Yu6a290542015-07-17 18:02:39 +08001458 /* skip writing if there is no dirty page in this inode */
1459 if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
1460 return 0;
1461
Jaegeuk Kima1257022015-10-08 10:40:07 -07001462 if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
1463 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
1464 available_free_memory(sbi, DIRTY_DENTS))
1465 goto skip_write;
1466
Chao Yud323d002015-10-27 09:53:45 +08001467 /* skip writing during file defragment */
1468 if (is_inode_flag_set(F2FS_I(inode), FI_DO_DEFRAG))
1469 goto skip_write;
1470
Jaegeuk Kimd5669f72015-02-27 13:37:39 -08001471 /* during POR, we don't need to trigger writepage at all. */
1472 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1473 goto skip_write;
1474
Yunlei Hed31c7c32016-02-04 16:14:00 +08001475 trace_f2fs_writepages(mapping->host, wbc, DATA);
1476
Jaegeuk Kim50c8cdb2014-03-18 13:47:11 +09001477 diff = nr_pages_to_write(sbi, DATA, wbc);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001478
Jaegeuk Kim25c13552016-01-20 23:46:05 +08001479 if (!S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_ALL) {
Jaegeuk Kim5463e7c2015-04-21 10:40:54 -07001480 mutex_lock(&sbi->writepages);
1481 locked = true;
1482 }
Chao Yu8f46dca2015-07-14 18:56:10 +08001483 ret = f2fs_write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
Chao Yu0c3a5792016-01-18 18:28:11 +08001484 f2fs_submit_merged_bio_cond(sbi, inode, NULL, 0, DATA, WRITE);
Jaegeuk Kim5463e7c2015-04-21 10:40:54 -07001485 if (locked)
1486 mutex_unlock(&sbi->writepages);
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001487
Chao Yuc227f912015-12-16 13:09:20 +08001488 remove_dirty_inode(inode);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001489
Jaegeuk Kim50c8cdb2014-03-18 13:47:11 +09001490 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001491 return ret;
Jaegeuk Kimd3baf952014-03-18 13:43:05 +09001492
1493skip_write:
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07001494 wbc->pages_skipped += get_dirty_pages(inode);
Yunlei Hed31c7c32016-02-04 16:14:00 +08001495 trace_f2fs_writepages(mapping->host, wbc, DATA);
Jaegeuk Kimd3baf952014-03-18 13:43:05 +09001496 return 0;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001497}
1498
Chao Yu3aab8f82014-07-02 13:25:04 +08001499static void f2fs_write_failed(struct address_space *mapping, loff_t to)
1500{
1501 struct inode *inode = mapping->host;
Jaegeuk Kim819d9152015-12-28 13:48:11 -08001502 loff_t i_size = i_size_read(inode);
Chao Yu3aab8f82014-07-02 13:25:04 +08001503
Jaegeuk Kim819d9152015-12-28 13:48:11 -08001504 if (to > i_size) {
1505 truncate_pagecache(inode, i_size);
1506 truncate_blocks(inode, i_size, true);
Chao Yu3aab8f82014-07-02 13:25:04 +08001507 }
1508}
1509
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08001510static int prepare_write_begin(struct f2fs_sb_info *sbi,
1511 struct page *page, loff_t pos, unsigned len,
1512 block_t *blk_addr, bool *node_changed)
1513{
1514 struct inode *inode = page->mapping->host;
1515 pgoff_t index = page->index;
1516 struct dnode_of_data dn;
1517 struct page *ipage;
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08001518 bool locked = false;
1519 struct extent_info ei;
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08001520 int err = 0;
1521
Jaegeuk Kim24b84912016-02-03 13:49:44 -08001522 /*
1523 * we already allocated all the blocks, so we don't need to get
1524 * the block addresses when there is no need to fill the page.
1525 */
1526 if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) &&
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001527 len == PAGE_SIZE)
Jaegeuk Kim24b84912016-02-03 13:49:44 -08001528 return 0;
1529
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08001530 if (f2fs_has_inline_data(inode) ||
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001531 (pos & PAGE_MASK) >= i_size_read(inode)) {
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08001532 f2fs_lock_op(sbi);
1533 locked = true;
1534 }
1535restart:
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08001536 /* check inline_data */
1537 ipage = get_node_page(sbi, inode->i_ino);
1538 if (IS_ERR(ipage)) {
1539 err = PTR_ERR(ipage);
1540 goto unlock_out;
1541 }
1542
1543 set_new_dnode(&dn, inode, ipage, ipage, 0);
1544
1545 if (f2fs_has_inline_data(inode)) {
1546 if (pos + len <= MAX_INLINE_DATA) {
1547 read_inline_data(page, ipage);
1548 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
Jaegeuk Kim2049d4f2016-01-25 05:57:05 -08001549 set_inline_node(ipage);
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08001550 } else {
1551 err = f2fs_convert_inline_page(&dn, page);
1552 if (err)
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08001553 goto out;
1554 if (dn.data_blkaddr == NULL_ADDR)
1555 err = f2fs_get_block(&dn, index);
1556 }
1557 } else if (locked) {
1558 err = f2fs_get_block(&dn, index);
1559 } else {
1560 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1561 dn.data_blkaddr = ei.blk + index - ei.fofs;
1562 } else {
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08001563 /* hole case */
1564 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
Jaegeuk Kim4da7bf52016-04-06 11:27:03 -07001565 if (err || dn.data_blkaddr == NULL_ADDR) {
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08001566 f2fs_put_dnode(&dn);
1567 f2fs_lock_op(sbi);
1568 locked = true;
1569 goto restart;
1570 }
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08001571 }
1572 }
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08001573
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08001574 /* convert_inline_page can make node_changed */
1575 *blk_addr = dn.data_blkaddr;
1576 *node_changed = dn.node_changed;
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08001577out:
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08001578 f2fs_put_dnode(&dn);
1579unlock_out:
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08001580 if (locked)
1581 f2fs_unlock_op(sbi);
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08001582 return err;
1583}
1584
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001585static int f2fs_write_begin(struct file *file, struct address_space *mapping,
1586 loff_t pos, unsigned len, unsigned flags,
1587 struct page **pagep, void **fsdata)
1588{
1589 struct inode *inode = mapping->host;
Jaegeuk Kim40813632014-09-02 15:31:18 -07001590 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim86531d62015-07-15 13:08:21 -07001591 struct page *page = NULL;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001592 pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08001593 bool need_balance = false;
1594 block_t blkaddr = NULL_ADDR;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001595 int err = 0;
1596
Chao Yu62aed042014-05-06 16:46:04 +08001597 trace_f2fs_write_begin(inode, pos, len, flags);
1598
Jaegeuk Kim5f727392014-11-25 10:59:45 -08001599 /*
1600 * We should check this at this moment to avoid deadlock on inode page
1601 * and #0 page. The locking rule for inline_data conversion should be:
1602 * lock_page(page #0) -> lock_page(inode_page)
1603 */
1604 if (index != 0) {
1605 err = f2fs_convert_inline_inode(inode);
1606 if (err)
1607 goto fail;
1608 }
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +09001609repeat:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001610 page = grab_cache_page_write_begin(mapping, index, flags);
Chao Yu3aab8f82014-07-02 13:25:04 +08001611 if (!page) {
1612 err = -ENOMEM;
1613 goto fail;
1614 }
Jaegeuk Kimd5f66992014-04-30 09:22:45 +09001615
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001616 *pagep = page;
1617
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08001618 err = prepare_write_begin(sbi, page, pos, len,
1619 &blkaddr, &need_balance);
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07001620 if (err)
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08001621 goto fail;
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07001622
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08001623 if (need_balance && has_not_enough_free_secs(sbi, 0)) {
Jaegeuk Kim2a340762015-12-22 13:23:35 -08001624 unlock_page(page);
Jaegeuk Kim2c4db1a2016-01-07 14:15:04 -08001625 f2fs_balance_fs(sbi, true);
Jaegeuk Kim2a340762015-12-22 13:23:35 -08001626 lock_page(page);
1627 if (page->mapping != mapping) {
1628 /* The page got truncated from under us */
1629 f2fs_put_page(page, 1);
1630 goto repeat;
1631 }
1632 }
1633
Jaegeuk Kimfec1d652016-01-20 23:43:51 +08001634 f2fs_wait_on_page_writeback(page, DATA, false);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001635
Chao Yu08b39fb2015-10-08 13:27:34 +08001636 /* wait for GCed encrypted page writeback */
1637 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08001638 f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
Chao Yu08b39fb2015-10-08 13:27:34 +08001639
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001640 if (len == PAGE_SIZE)
Chao Yu90d43882015-07-08 18:24:38 +08001641 goto out_update;
1642 if (PageUptodate(page))
1643 goto out_clear;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001644
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001645 if ((pos & PAGE_MASK) >= i_size_read(inode)) {
1646 unsigned start = pos & (PAGE_SIZE - 1);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001647 unsigned end = start + len;
1648
1649 /* Reading beyond i_size is simple: memset to zero */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001650 zero_user_segments(page, 0, start, end, PAGE_SIZE);
Chao Yu90d43882015-07-08 18:24:38 +08001651 goto out_update;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001652 }
1653
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08001654 if (blkaddr == NEW_ADDR) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001655 zero_user_segment(page, 0, PAGE_SIZE);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001656 } else {
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -08001657 struct f2fs_io_info fio = {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001658 .sbi = sbi,
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -08001659 .type = DATA,
1660 .rw = READ_SYNC,
Chao Yu7a9d7542016-02-22 18:36:38 +08001661 .old_blkaddr = blkaddr,
1662 .new_blkaddr = blkaddr,
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001663 .page = page,
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001664 .encrypted_page = NULL,
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -08001665 };
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001666 err = f2fs_submit_page_bio(&fio);
Jan Kara9234f312014-10-22 15:21:47 +02001667 if (err)
1668 goto fail;
Chao Yud54c7952014-03-29 15:30:40 +08001669
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001670 lock_page(page);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001671 if (unlikely(!PageUptodate(page))) {
Chao Yu3aab8f82014-07-02 13:25:04 +08001672 err = -EIO;
1673 goto fail;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001674 }
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001675 if (unlikely(page->mapping != mapping)) {
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +09001676 f2fs_put_page(page, 1);
1677 goto repeat;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001678 }
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001679
1680 /* avoid symlink page */
1681 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
Jaegeuk Kim0b81d072015-05-15 16:26:10 -07001682 err = fscrypt_decrypt_page(page);
Jaegeuk Kim86531d62015-07-15 13:08:21 -07001683 if (err)
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001684 goto fail;
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001685 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001686 }
Chao Yu90d43882015-07-08 18:24:38 +08001687out_update:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001688 SetPageUptodate(page);
Chao Yu90d43882015-07-08 18:24:38 +08001689out_clear:
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001690 clear_cold_data(page);
1691 return 0;
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07001692
Chao Yu3aab8f82014-07-02 13:25:04 +08001693fail:
Jaegeuk Kim86531d62015-07-15 13:08:21 -07001694 f2fs_put_page(page, 1);
Chao Yu3aab8f82014-07-02 13:25:04 +08001695 f2fs_write_failed(mapping, pos + len);
1696 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001697}
1698
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09001699static int f2fs_write_end(struct file *file,
1700 struct address_space *mapping,
1701 loff_t pos, unsigned len, unsigned copied,
1702 struct page *page, void *fsdata)
1703{
1704 struct inode *inode = page->mapping->host;
1705
Chao Yudfb2bf32014-05-06 16:47:23 +08001706 trace_f2fs_write_end(inode, pos, len, copied);
1707
Jaegeuk Kim34ba94b2014-10-09 13:19:53 -07001708 set_page_dirty(page);
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09001709
1710 if (pos + copied > i_size_read(inode)) {
1711 i_size_write(inode, pos + copied);
1712 mark_inode_dirty(inode);
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09001713 }
1714
Chao Yu75c3c8b2013-11-16 14:15:59 +08001715 f2fs_put_page(page, 1);
Jaegeuk Kimd0239e12016-01-08 16:57:48 -08001716 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09001717 return copied;
1718}
1719
Omar Sandoval6f673762015-03-16 04:33:52 -07001720static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
1721 loff_t offset)
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09001722{
1723 unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09001724
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09001725 if (offset & blocksize_mask)
1726 return -EINVAL;
1727
Al Viro5b46f252014-03-16 18:07:34 -04001728 if (iov_iter_alignment(iter) & blocksize_mask)
1729 return -EINVAL;
1730
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09001731 return 0;
1732}
1733
Omar Sandoval22c61862015-03-16 04:33:53 -07001734static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
1735 loff_t offset)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001736{
Jaegeuk Kimb439b102016-02-03 13:09:09 -08001737 struct address_space *mapping = iocb->ki_filp->f_mapping;
Chao Yu3aab8f82014-07-02 13:25:04 +08001738 struct inode *inode = mapping->host;
1739 size_t count = iov_iter_count(iter);
1740 int err;
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09001741
Jaegeuk Kimb439b102016-02-03 13:09:09 -08001742 err = check_direct_IO(inode, iter, offset);
Jaegeuk Kimb9d777b2015-12-22 11:09:35 -08001743 if (err)
1744 return err;
Huajun Li9ffe0fb2013-11-10 23:13:20 +08001745
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -07001746 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
1747 return 0;
1748
Omar Sandoval6f673762015-03-16 04:33:52 -07001749 trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
Chao Yu70407fa2014-07-31 21:11:22 +08001750
Chao Yue2b4e2b2015-08-19 19:11:19 +08001751 err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio);
Jaegeuk Kim6bfc4912016-04-18 17:07:44 -04001752 if (iov_iter_rw(iter) == WRITE) {
1753 if (err > 0)
1754 set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
1755 else if (err < 0)
1756 f2fs_write_failed(mapping, offset + count);
1757 }
Chao Yu70407fa2014-07-31 21:11:22 +08001758
Omar Sandoval6f673762015-03-16 04:33:52 -07001759 trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err);
Chao Yu70407fa2014-07-31 21:11:22 +08001760
Chao Yu3aab8f82014-07-02 13:25:04 +08001761 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001762}
1763
Chao Yu487261f2015-02-05 17:44:29 +08001764void f2fs_invalidate_page(struct page *page, unsigned int offset,
1765 unsigned int length)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001766{
1767 struct inode *inode = page->mapping->host;
Chao Yu487261f2015-02-05 17:44:29 +08001768 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07001769
Chao Yu487261f2015-02-05 17:44:29 +08001770 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001771 (offset % PAGE_SIZE || length != PAGE_SIZE))
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07001772 return;
1773
Chao Yu487261f2015-02-05 17:44:29 +08001774 if (PageDirty(page)) {
1775 if (inode->i_ino == F2FS_META_INO(sbi))
1776 dec_page_count(sbi, F2FS_DIRTY_META);
1777 else if (inode->i_ino == F2FS_NODE_INO(sbi))
1778 dec_page_count(sbi, F2FS_DIRTY_NODES);
1779 else
1780 inode_dec_dirty_pages(inode);
1781 }
Chao Yudecd36b2015-08-07 18:42:09 +08001782
1783 /* This is atomic written page, keep Private */
1784 if (IS_ATOMIC_WRITTEN_PAGE(page))
1785 return;
1786
Chao Yu23dc9742016-04-29 20:09:15 +08001787 set_page_private(page, 0);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001788 ClearPagePrivate(page);
1789}
1790
Chao Yu487261f2015-02-05 17:44:29 +08001791int f2fs_release_page(struct page *page, gfp_t wait)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001792{
Jaegeuk Kimf68daee2015-01-30 11:39:08 -08001793 /* If this is dirty page, keep PagePrivate */
1794 if (PageDirty(page))
1795 return 0;
1796
Chao Yudecd36b2015-08-07 18:42:09 +08001797 /* This is atomic written page, keep Private */
1798 if (IS_ATOMIC_WRITTEN_PAGE(page))
1799 return 0;
1800
Chao Yu23dc9742016-04-29 20:09:15 +08001801 set_page_private(page, 0);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001802 ClearPagePrivate(page);
Jaegeuk Kimc3850aa2013-03-14 09:24:32 +09001803 return 1;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001804}
1805
1806static int f2fs_set_data_page_dirty(struct page *page)
1807{
1808 struct address_space *mapping = page->mapping;
1809 struct inode *inode = mapping->host;
1810
Jaegeuk Kim26c6b882013-10-24 17:53:29 +09001811 trace_f2fs_set_page_dirty(page, DATA);
1812
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001813 SetPageUptodate(page);
Jaegeuk Kim34ba94b2014-10-09 13:19:53 -07001814
Jaegeuk Kim1e843712014-12-09 06:08:59 -08001815 if (f2fs_is_atomic_file(inode)) {
Chao Yudecd36b2015-08-07 18:42:09 +08001816 if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
1817 register_inmem_page(inode, page);
1818 return 1;
1819 }
1820 /*
1821 * Previously, this page has been registered, we just
1822 * return here.
1823 */
1824 return 0;
Jaegeuk Kim34ba94b2014-10-09 13:19:53 -07001825 }
1826
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001827 if (!PageDirty(page)) {
1828 __set_page_dirty_nobuffers(page);
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07001829 update_dirty_page(inode, page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001830 return 1;
1831 }
1832 return 0;
1833}
1834
Jaegeuk Kimc01e54b2013-01-17 20:30:23 +09001835static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
1836{
Chao Yu454ae7e2014-04-22 13:34:01 +08001837 struct inode *inode = mapping->host;
1838
Jaegeuk Kim1d373a02015-10-19 10:29:51 -07001839 if (f2fs_has_inline_data(inode))
1840 return 0;
1841
1842 /* make sure allocating whole blocks */
1843 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
1844 filemap_write_and_wait(mapping);
1845
Chao Yue2b4e2b2015-08-19 19:11:19 +08001846 return generic_block_bmap(mapping, block, get_data_block_bmap);
Chao Yu429511c2015-02-05 17:54:31 +08001847}
1848
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001849const struct address_space_operations f2fs_dblock_aops = {
1850 .readpage = f2fs_read_data_page,
1851 .readpages = f2fs_read_data_pages,
1852 .writepage = f2fs_write_data_page,
1853 .writepages = f2fs_write_data_pages,
1854 .write_begin = f2fs_write_begin,
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09001855 .write_end = f2fs_write_end,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001856 .set_page_dirty = f2fs_set_data_page_dirty,
Chao Yu487261f2015-02-05 17:44:29 +08001857 .invalidatepage = f2fs_invalidate_page,
1858 .releasepage = f2fs_release_page,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001859 .direct_IO = f2fs_direct_IO,
Jaegeuk Kimc01e54b2013-01-17 20:30:23 +09001860 .bmap = f2fs_bmap,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001861};