blob: 383db1fabcf4447637fd4153915e9512054b27ac [file] [log] [blame]
Huajun Lie18c65b2013-11-10 23:13:19 +08001/*
2 * fs/f2fs/inline.c
3 * Copyright (c) 2013, Intel Corporation
4 * Authors: Huajun Li <huajun.li@intel.com>
5 * Haicheng Li <haicheng.li@intel.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13
14#include "f2fs.h"
15
Huajun Lie18c65b2013-11-10 23:13:19 +080016bool f2fs_may_inline(struct inode *inode)
17{
18 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
19 block_t nr_blocks;
20 loff_t i_size;
21
22 if (!test_opt(sbi, INLINE_DATA))
23 return false;
24
25 nr_blocks = F2FS_I(inode)->i_xattr_nid ? 3 : 2;
26 if (inode->i_blocks > nr_blocks)
27 return false;
28
29 i_size = i_size_read(inode);
30 if (i_size > MAX_INLINE_DATA)
31 return false;
32
33 return true;
34}
35
36int f2fs_read_inline_data(struct inode *inode, struct page *page)
37{
38 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
39 struct page *ipage;
40 void *src_addr, *dst_addr;
41
Chao Yu04a17fb2013-12-30 18:36:23 +080042 if (page->index) {
43 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
44 goto out;
45 }
46
Huajun Lie18c65b2013-11-10 23:13:19 +080047 ipage = get_node_page(sbi, inode->i_ino);
Chao Yud54c7952014-03-29 15:30:40 +080048 if (IS_ERR(ipage)) {
49 unlock_page(page);
Huajun Lie18c65b2013-11-10 23:13:19 +080050 return PTR_ERR(ipage);
Chao Yud54c7952014-03-29 15:30:40 +080051 }
Huajun Lie18c65b2013-11-10 23:13:19 +080052
Chao Yu18309aa2013-12-30 09:29:06 +080053 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
Huajun Lie18c65b2013-11-10 23:13:19 +080054
55 /* Copy the whole inline data block */
56 src_addr = inline_data_addr(ipage);
57 dst_addr = kmap(page);
58 memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
59 kunmap(page);
60 f2fs_put_page(ipage, 1);
61
Chao Yu04a17fb2013-12-30 18:36:23 +080062out:
Huajun Lie18c65b2013-11-10 23:13:19 +080063 SetPageUptodate(page);
64 unlock_page(page);
65
66 return 0;
67}
68
69static int __f2fs_convert_inline_data(struct inode *inode, struct page *page)
70{
71 int err;
72 struct page *ipage;
73 struct dnode_of_data dn;
74 void *src_addr, *dst_addr;
75 block_t new_blk_addr;
76 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
77 struct f2fs_io_info fio = {
78 .type = DATA,
79 .rw = WRITE_SYNC | REQ_PRIO,
80 };
81
82 f2fs_lock_op(sbi);
83 ipage = get_node_page(sbi, inode->i_ino);
84 if (IS_ERR(ipage))
85 return PTR_ERR(ipage);
86
87 /*
88 * i_addr[0] is not used for inline data,
89 * so reserving new block will not destroy inline data
90 */
Jaegeuk Kima8865372013-12-27 17:04:17 +090091 set_new_dnode(&dn, inode, ipage, NULL, 0);
Huajun Lie18c65b2013-11-10 23:13:19 +080092 err = f2fs_reserve_block(&dn, 0);
93 if (err) {
Huajun Lie18c65b2013-11-10 23:13:19 +080094 f2fs_unlock_op(sbi);
95 return err;
96 }
97
Chao Yu18309aa2013-12-30 09:29:06 +080098 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
Huajun Lie18c65b2013-11-10 23:13:19 +080099
100 /* Copy the whole inline data block */
101 src_addr = inline_data_addr(ipage);
102 dst_addr = kmap(page);
103 memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
104 kunmap(page);
Jaegeuk Kim9e09fc82013-12-27 12:28:59 +0900105 SetPageUptodate(page);
Huajun Lie18c65b2013-11-10 23:13:19 +0800106
107 /* write data page to try to make data consistent */
108 set_page_writeback(page);
109 write_data_page(page, &dn, &new_blk_addr, &fio);
110 update_extent_cache(new_blk_addr, &dn);
Yuan Zhong5514f0a2014-01-10 07:26:14 +0000111 f2fs_wait_on_page_writeback(page, DATA);
Huajun Lie18c65b2013-11-10 23:13:19 +0800112
113 /* clear inline data and flag after data writeback */
114 zero_user_segment(ipage, INLINE_DATA_OFFSET,
115 INLINE_DATA_OFFSET + MAX_INLINE_DATA);
116 clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
Jaegeuk Kim0dbdc2a2013-11-26 11:08:57 +0900117 stat_dec_inline_inode(inode);
Huajun Lie18c65b2013-11-10 23:13:19 +0800118
119 sync_inode_page(&dn);
Jaegeuk Kima8865372013-12-27 17:04:17 +0900120 f2fs_put_dnode(&dn);
Huajun Lie18c65b2013-11-10 23:13:19 +0800121 f2fs_unlock_op(sbi);
Huajun Lie18c65b2013-11-10 23:13:19 +0800122 return err;
123}
124
Jaegeuk Kim9e09fc82013-12-27 12:28:59 +0900125int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size)
Huajun Lie18c65b2013-11-10 23:13:19 +0800126{
Huajun Lie18c65b2013-11-10 23:13:19 +0800127 struct page *page;
Jaegeuk Kim9e09fc82013-12-27 12:28:59 +0900128 int err;
Huajun Lie18c65b2013-11-10 23:13:19 +0800129
Jaegeuk Kim9e09fc82013-12-27 12:28:59 +0900130 if (!f2fs_has_inline_data(inode))
131 return 0;
132 else if (to_size <= MAX_INLINE_DATA)
133 return 0;
134
135 page = grab_cache_page_write_begin(inode->i_mapping, 0, AOP_FLAG_NOFS);
136 if (!page)
137 return -ENOMEM;
Huajun Lie18c65b2013-11-10 23:13:19 +0800138
139 err = __f2fs_convert_inline_data(inode, page);
Jaegeuk Kim9e09fc82013-12-27 12:28:59 +0900140 f2fs_put_page(page, 1);
Huajun Lie18c65b2013-11-10 23:13:19 +0800141 return err;
142}
143
144int f2fs_write_inline_data(struct inode *inode,
145 struct page *page, unsigned size)
146{
147 void *src_addr, *dst_addr;
148 struct page *ipage;
149 struct dnode_of_data dn;
150 int err;
151
152 set_new_dnode(&dn, inode, NULL, NULL, 0);
153 err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
154 if (err)
155 return err;
156 ipage = dn.inode_page;
157
158 zero_user_segment(ipage, INLINE_DATA_OFFSET,
159 INLINE_DATA_OFFSET + MAX_INLINE_DATA);
160 src_addr = kmap(page);
161 dst_addr = inline_data_addr(ipage);
162 memcpy(dst_addr, src_addr, size);
163 kunmap(page);
164
165 /* Release the first data block if it is allocated */
166 if (!f2fs_has_inline_data(inode)) {
167 truncate_data_blocks_range(&dn, 1);
168 set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
Jaegeuk Kim0dbdc2a2013-11-26 11:08:57 +0900169 stat_inc_inline_inode(inode);
Huajun Lie18c65b2013-11-10 23:13:19 +0800170 }
171
172 sync_inode_page(&dn);
173 f2fs_put_dnode(&dn);
174
175 return 0;
176}
Jaegeuk Kim1e1bb4b2013-12-26 12:49:48 +0900177
178int recover_inline_data(struct inode *inode, struct page *npage)
179{
180 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
181 struct f2fs_inode *ri = NULL;
182 void *src_addr, *dst_addr;
183 struct page *ipage;
184
185 /*
186 * The inline_data recovery policy is as follows.
187 * [prev.] [next] of inline_data flag
188 * o o -> recover inline_data
189 * o x -> remove inline_data, and then recover data blocks
190 * x o -> remove inline_data, and then recover inline_data
191 * x x -> recover data blocks
192 */
193 if (IS_INODE(npage))
194 ri = F2FS_INODE(npage);
195
196 if (f2fs_has_inline_data(inode) &&
197 ri && ri->i_inline & F2FS_INLINE_DATA) {
198process_inline:
199 ipage = get_node_page(sbi, inode->i_ino);
200 f2fs_bug_on(IS_ERR(ipage));
201
202 src_addr = inline_data_addr(npage);
203 dst_addr = inline_data_addr(ipage);
204 memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
205 update_inode(inode, ipage);
206 f2fs_put_page(ipage, 1);
207 return -1;
208 }
209
210 if (f2fs_has_inline_data(inode)) {
211 ipage = get_node_page(sbi, inode->i_ino);
212 f2fs_bug_on(IS_ERR(ipage));
213 zero_user_segment(ipage, INLINE_DATA_OFFSET,
214 INLINE_DATA_OFFSET + MAX_INLINE_DATA);
215 clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
216 update_inode(inode, ipage);
217 f2fs_put_page(ipage, 1);
218 } else if (ri && ri->i_inline & F2FS_INLINE_DATA) {
219 truncate_blocks(inode, 0);
220 set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
221 goto process_inline;
222 }
223 return 0;
224}