blob: a98bfca9c463cfeb4d4bc95fa2d4b5fcd767bcca [file] [log] [blame]
Theodore Ts'of5166762017-12-17 22:00:59 -05001// SPDX-License-Identifier: LGPL-2.1
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -05002/*
3 * Copyright IBM Corporation, 2007
4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
5 *
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -05006 */
7
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09008#include <linux/slab.h>
Christoph Hellwig3dcf5452008-04-29 18:13:32 -04009#include "ext4_jbd2.h"
Theodore Ts'o4a092d72012-11-28 13:03:30 -050010#include "ext4_extents.h"
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050011
12/*
13 * The contiguous blocks details which can be
14 * represented by a single extent
15 */
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -040016struct migrate_struct {
17 ext4_lblk_t first_block, last_block, curr_block;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050018 ext4_fsblk_t first_pblock, last_pblock;
19};
20
21static int finish_range(handle_t *handle, struct inode *inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -040022 struct migrate_struct *lb)
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050023
24{
25 int retval = 0, needed;
26 struct ext4_extent newext;
27 struct ext4_ext_path *path;
28 if (lb->first_pblock == 0)
29 return 0;
30
31 /* Add the extent to temp inode*/
32 newext.ee_block = cpu_to_le32(lb->first_block);
33 newext.ee_len = cpu_to_le16(lb->last_block - lb->first_block + 1);
34 ext4_ext_store_pblock(&newext, lb->first_pblock);
Dmitry Monakhov4b1f1662014-07-27 22:28:15 -040035 /* Locking only for convinience since we are operating on temp inode */
36 down_write(&EXT4_I(inode)->i_data_sem);
Theodore Ts'oed8a1a72014-09-01 14:43:09 -040037 path = ext4_find_extent(inode, lb->first_block, NULL, 0);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050038 if (IS_ERR(path)) {
39 retval = PTR_ERR(path);
Aneesh Kumar K.Vb35905c2008-02-25 16:54:37 -050040 path = NULL;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050041 goto err_out;
42 }
43
44 /*
45 * Calculate the credit needed to inserting this extent
46 * Since we are doing this in loop we may accumalate extra
47 * credit. But below we try to not accumalate too much
48 * of them by restarting the journal.
49 */
Mingming Caoee12b632008-08-19 22:16:05 -040050 needed = ext4_ext_calc_credits_for_single_extent(inode,
51 lb->last_block - lb->first_block + 1, path);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050052
53 /*
54 * Make sure the credit we accumalated is not really high
55 */
Frank Mayhar03901312009-01-07 00:06:22 -050056 if (needed && ext4_handle_has_enough_credits(handle,
57 EXT4_RESERVE_TRANS_BLOCKS)) {
Dmitry Monakhov4b1f1662014-07-27 22:28:15 -040058 up_write((&EXT4_I(inode)->i_data_sem));
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050059 retval = ext4_journal_restart(handle, needed);
Dmitry Monakhov4b1f1662014-07-27 22:28:15 -040060 down_write((&EXT4_I(inode)->i_data_sem));
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050061 if (retval)
62 goto err_out;
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -050063 } else if (needed) {
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050064 retval = ext4_journal_extend(handle, needed);
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -050065 if (retval) {
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050066 /*
67 * IF not able to extend the journal restart the journal
68 */
Dmitry Monakhov4b1f1662014-07-27 22:28:15 -040069 up_write((&EXT4_I(inode)->i_data_sem));
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050070 retval = ext4_journal_restart(handle, needed);
Dmitry Monakhov4b1f1662014-07-27 22:28:15 -040071 down_write((&EXT4_I(inode)->i_data_sem));
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050072 if (retval)
73 goto err_out;
74 }
75 }
Theodore Ts'odfe50802014-09-01 14:37:09 -040076 retval = ext4_ext_insert_extent(handle, inode, &path, &newext, 0);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050077err_out:
Dmitry Monakhov4b1f1662014-07-27 22:28:15 -040078 up_write((&EXT4_I(inode)->i_data_sem));
Theodore Ts'ob7ea89a2014-09-01 14:39:09 -040079 ext4_ext_drop_refs(path);
80 kfree(path);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050081 lb->first_pblock = 0;
82 return retval;
83}
84
85static int update_extent_range(handle_t *handle, struct inode *inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -040086 ext4_fsblk_t pblock, struct migrate_struct *lb)
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050087{
88 int retval;
89 /*
90 * See if we can add on to the existing range (if it exists)
91 */
92 if (lb->first_pblock &&
93 (lb->last_pblock+1 == pblock) &&
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -040094 (lb->last_block+1 == lb->curr_block)) {
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050095 lb->last_pblock = pblock;
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -040096 lb->last_block = lb->curr_block;
97 lb->curr_block++;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050098 return 0;
99 }
100 /*
101 * Start a new range.
102 */
103 retval = finish_range(handle, inode, lb);
104 lb->first_pblock = lb->last_pblock = pblock;
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400105 lb->first_block = lb->last_block = lb->curr_block;
106 lb->curr_block++;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500107 return retval;
108}
109
110static int update_ind_extent_range(handle_t *handle, struct inode *inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400111 ext4_fsblk_t pblock,
112 struct migrate_struct *lb)
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500113{
114 struct buffer_head *bh;
115 __le32 *i_data;
116 int i, retval = 0;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500117 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
118
Theodore Ts'ob878c8a2018-11-25 17:20:31 -0500119 bh = ext4_sb_bread(inode->i_sb, pblock, 0);
120 if (IS_ERR(bh))
121 return PTR_ERR(bh);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500122
123 i_data = (__le32 *)bh->b_data;
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400124 for (i = 0; i < max_entries; i++) {
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500125 if (i_data[i]) {
126 retval = update_extent_range(handle, inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400127 le32_to_cpu(i_data[i]), lb);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500128 if (retval)
129 break;
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400130 } else {
131 lb->curr_block++;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500132 }
133 }
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500134 put_bh(bh);
135 return retval;
136
137}
138
139static int update_dind_extent_range(handle_t *handle, struct inode *inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400140 ext4_fsblk_t pblock,
141 struct migrate_struct *lb)
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500142{
143 struct buffer_head *bh;
144 __le32 *i_data;
145 int i, retval = 0;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500146 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
147
Theodore Ts'ob878c8a2018-11-25 17:20:31 -0500148 bh = ext4_sb_bread(inode->i_sb, pblock, 0);
149 if (IS_ERR(bh))
150 return PTR_ERR(bh);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500151
152 i_data = (__le32 *)bh->b_data;
153 for (i = 0; i < max_entries; i++) {
154 if (i_data[i]) {
155 retval = update_ind_extent_range(handle, inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400156 le32_to_cpu(i_data[i]), lb);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500157 if (retval)
158 break;
159 } else {
160 /* Only update the file block number */
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400161 lb->curr_block += max_entries;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500162 }
163 }
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500164 put_bh(bh);
165 return retval;
166
167}
168
169static int update_tind_extent_range(handle_t *handle, struct inode *inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400170 ext4_fsblk_t pblock,
171 struct migrate_struct *lb)
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500172{
173 struct buffer_head *bh;
174 __le32 *i_data;
175 int i, retval = 0;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500176 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
177
Theodore Ts'ob878c8a2018-11-25 17:20:31 -0500178 bh = ext4_sb_bread(inode->i_sb, pblock, 0);
179 if (IS_ERR(bh))
180 return PTR_ERR(bh);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500181
182 i_data = (__le32 *)bh->b_data;
183 for (i = 0; i < max_entries; i++) {
184 if (i_data[i]) {
185 retval = update_dind_extent_range(handle, inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400186 le32_to_cpu(i_data[i]), lb);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500187 if (retval)
188 break;
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400189 } else {
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500190 /* Only update the file block number */
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400191 lb->curr_block += max_entries * max_entries;
192 }
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500193 }
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500194 put_bh(bh);
195 return retval;
196
197}
198
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500199static int extend_credit_for_blkdel(handle_t *handle, struct inode *inode)
200{
201 int retval = 0, needed;
202
Frank Mayhar03901312009-01-07 00:06:22 -0500203 if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500204 return 0;
205 /*
206 * We are freeing a blocks. During this we touch
207 * superblock, group descriptor and block bitmap.
208 * So allocate a credit of 3. We may update
209 * quota (user and group).
210 */
Dmitry Monakhov5aca07e2009-12-08 22:42:15 -0500211 needed = 3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500212
213 if (ext4_journal_extend(handle, needed) != 0)
214 retval = ext4_journal_restart(handle, needed);
215
216 return retval;
217}
218
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500219static int free_dind_blocks(handle_t *handle,
220 struct inode *inode, __le32 i_data)
221{
222 int i;
223 __le32 *tmp_idata;
224 struct buffer_head *bh;
225 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
226
Theodore Ts'ob878c8a2018-11-25 17:20:31 -0500227 bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
228 if (IS_ERR(bh))
229 return PTR_ERR(bh);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500230
231 tmp_idata = (__le32 *)bh->b_data;
232 for (i = 0; i < max_entries; i++) {
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500233 if (tmp_idata[i]) {
234 extend_credit_for_blkdel(handle, inode);
Peter Huewe7dc57612011-02-21 21:01:42 -0500235 ext4_free_blocks(handle, inode, NULL,
Theodore Ts'oe6362602009-11-23 07:17:05 -0500236 le32_to_cpu(tmp_idata[i]), 1,
237 EXT4_FREE_BLOCKS_METADATA |
238 EXT4_FREE_BLOCKS_FORGET);
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500239 }
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500240 }
241 put_bh(bh);
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500242 extend_credit_for_blkdel(handle, inode);
Peter Huewe7dc57612011-02-21 21:01:42 -0500243 ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
Theodore Ts'oe6362602009-11-23 07:17:05 -0500244 EXT4_FREE_BLOCKS_METADATA |
245 EXT4_FREE_BLOCKS_FORGET);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500246 return 0;
247}
248
249static int free_tind_blocks(handle_t *handle,
250 struct inode *inode, __le32 i_data)
251{
252 int i, retval = 0;
253 __le32 *tmp_idata;
254 struct buffer_head *bh;
255 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
256
Theodore Ts'ob878c8a2018-11-25 17:20:31 -0500257 bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
258 if (IS_ERR(bh))
259 return PTR_ERR(bh);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500260
261 tmp_idata = (__le32 *)bh->b_data;
262 for (i = 0; i < max_entries; i++) {
263 if (tmp_idata[i]) {
264 retval = free_dind_blocks(handle,
265 inode, tmp_idata[i]);
266 if (retval) {
267 put_bh(bh);
268 return retval;
269 }
270 }
271 }
272 put_bh(bh);
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500273 extend_credit_for_blkdel(handle, inode);
Peter Huewe7dc57612011-02-21 21:01:42 -0500274 ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
Theodore Ts'oe6362602009-11-23 07:17:05 -0500275 EXT4_FREE_BLOCKS_METADATA |
276 EXT4_FREE_BLOCKS_FORGET);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500277 return 0;
278}
279
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500280static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500281{
282 int retval;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500283
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500284 /* ei->i_data[EXT4_IND_BLOCK] */
285 if (i_data[0]) {
286 extend_credit_for_blkdel(handle, inode);
Peter Huewe7dc57612011-02-21 21:01:42 -0500287 ext4_free_blocks(handle, inode, NULL,
Theodore Ts'oe6362602009-11-23 07:17:05 -0500288 le32_to_cpu(i_data[0]), 1,
289 EXT4_FREE_BLOCKS_METADATA |
290 EXT4_FREE_BLOCKS_FORGET);
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500291 }
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500292
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500293 /* ei->i_data[EXT4_DIND_BLOCK] */
294 if (i_data[1]) {
295 retval = free_dind_blocks(handle, inode, i_data[1]);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500296 if (retval)
297 return retval;
298 }
299
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500300 /* ei->i_data[EXT4_TIND_BLOCK] */
301 if (i_data[2]) {
302 retval = free_tind_blocks(handle, inode, i_data[2]);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500303 if (retval)
304 return retval;
305 }
306 return 0;
307}
308
309static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
Aneesh Kumar K.V267e4db2008-04-29 08:11:12 -0400310 struct inode *tmp_inode)
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500311{
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500312 int retval;
313 __le32 i_data[3];
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500314 struct ext4_inode_info *ei = EXT4_I(inode);
315 struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode);
316
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500317 /*
318 * One credit accounted for writing the
319 * i_data field of the original inode
320 */
321 retval = ext4_journal_extend(handle, 1);
Aneesh Kumar K.V267e4db2008-04-29 08:11:12 -0400322 if (retval) {
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500323 retval = ext4_journal_restart(handle, 1);
324 if (retval)
325 goto err_out;
326 }
327
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500328 i_data[0] = ei->i_data[EXT4_IND_BLOCK];
329 i_data[1] = ei->i_data[EXT4_DIND_BLOCK];
330 i_data[2] = ei->i_data[EXT4_TIND_BLOCK];
331
332 down_write(&EXT4_I(inode)->i_data_sem);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500333 /*
Theodore Ts'o1b9c12f2009-09-17 08:32:22 -0400334 * if EXT4_STATE_EXT_MIGRATE is cleared a block allocation
Aneesh Kumar K.V267e4db2008-04-29 08:11:12 -0400335 * happened after we started the migrate. We need to
336 * fail the migrate
337 */
Theodore Ts'o19f5fb72010-01-24 14:34:07 -0500338 if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) {
Aneesh Kumar K.V267e4db2008-04-29 08:11:12 -0400339 retval = -EAGAIN;
340 up_write(&EXT4_I(inode)->i_data_sem);
341 goto err_out;
342 } else
Theodore Ts'o19f5fb72010-01-24 14:34:07 -0500343 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
Aneesh Kumar K.V267e4db2008-04-29 08:11:12 -0400344 /*
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500345 * We have the extent map build with the tmp inode.
346 * Now copy the i_data across
347 */
Theodore Ts'o74e4e6d2011-05-03 09:34:42 -0400348 ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500349 memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data));
350
351 /*
352 * Update i_blocks with the new blocks that got
353 * allocated while adding extents for extent index
354 * blocks.
355 *
356 * While converting to extents we need not
Adam Buchbinderb8a074632016-03-09 23:49:05 -0500357 * update the original inode i_blocks for extent blocks
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500358 * via quota APIs. The quota update happened via tmp_inode already.
359 */
360 spin_lock(&inode->i_lock);
361 inode->i_blocks += tmp_inode->i_blocks;
362 spin_unlock(&inode->i_lock);
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500363 up_write(&EXT4_I(inode)->i_data_sem);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500364
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500365 /*
366 * We mark the inode dirty after, because we decrement the
367 * i_blocks when freeing the indirect meta-data blocks
368 */
369 retval = free_ind_block(handle, inode, i_data);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500370 ext4_mark_inode_dirty(handle, inode);
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500371
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500372err_out:
373 return retval;
374}
375
376static int free_ext_idx(handle_t *handle, struct inode *inode,
377 struct ext4_extent_idx *ix)
378{
379 int i, retval = 0;
380 ext4_fsblk_t block;
381 struct buffer_head *bh;
382 struct ext4_extent_header *eh;
383
Theodore Ts'obf89d162010-10-27 21:30:14 -0400384 block = ext4_idx_pblock(ix);
Theodore Ts'ob878c8a2018-11-25 17:20:31 -0500385 bh = ext4_sb_bread(inode->i_sb, block, 0);
386 if (IS_ERR(bh))
387 return PTR_ERR(bh);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500388
389 eh = (struct ext4_extent_header *)bh->b_data;
390 if (eh->eh_depth != 0) {
391 ix = EXT_FIRST_INDEX(eh);
392 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
393 retval = free_ext_idx(handle, inode, ix);
394 if (retval)
395 break;
396 }
397 }
398 put_bh(bh);
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500399 extend_credit_for_blkdel(handle, inode);
Peter Huewe7dc57612011-02-21 21:01:42 -0500400 ext4_free_blocks(handle, inode, NULL, block, 1,
Theodore Ts'oe6362602009-11-23 07:17:05 -0500401 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500402 return retval;
403}
404
405/*
406 * Free the extent meta data blocks only
407 */
408static int free_ext_block(handle_t *handle, struct inode *inode)
409{
410 int i, retval = 0;
411 struct ext4_inode_info *ei = EXT4_I(inode);
412 struct ext4_extent_header *eh = (struct ext4_extent_header *)ei->i_data;
413 struct ext4_extent_idx *ix;
414 if (eh->eh_depth == 0)
415 /*
416 * No extra blocks allocated for extent meta data
417 */
418 return 0;
419 ix = EXT_FIRST_INDEX(eh);
420 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
421 retval = free_ext_idx(handle, inode, ix);
422 if (retval)
423 return retval;
424 }
425 return retval;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500426}
427
Aneesh Kumar K.V2a43a872008-09-13 12:52:26 -0400428int ext4_ext_migrate(struct inode *inode)
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500429{
430 handle_t *handle;
431 int retval = 0, i;
432 __le32 *i_data;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500433 struct ext4_inode_info *ei;
434 struct inode *tmp_inode = NULL;
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400435 struct migrate_struct lb;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500436 unsigned long max_entries;
Andreas Dilger11013912009-06-13 11:45:35 -0400437 __u32 goal;
Dmitry Monakhov5cb81da2011-10-29 09:05:00 -0400438 uid_t owner[2];
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500439
Theodore Ts'o83982b62009-01-06 14:53:16 -0500440 /*
441 * If the filesystem does not support extents, or the inode
442 * already is extent-based, error out.
443 */
Darrick J. Wonge2b911c2015-10-17 16:18:43 -0400444 if (!ext4_has_feature_extents(inode->i_sb) ||
Dmitry Monakhov12e9b892010-05-16 22:00:00 -0400445 (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500446 return -EINVAL;
447
Valerie Clementb8356c42008-02-05 10:56:37 -0500448 if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
449 /*
450 * don't migrate fast symlink
451 */
452 return retval;
453
Theodore Ts'o4b217632013-02-09 12:50:27 -0500454 /*
455 * Worst case we can touch the allocation bitmaps, a bgd
456 * block, and a block to link in the orphan list. We do need
457 * need to worry about credits for modifying the quota inode.
458 */
Theodore Ts'o9924a922013-02-08 21:59:22 -0500459 handle = ext4_journal_start(inode, EXT4_HT_MIGRATE,
Theodore Ts'o4b217632013-02-09 12:50:27 -0500460 4 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
461
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500462 if (IS_ERR(handle)) {
463 retval = PTR_ERR(handle);
Dan Carpenter09054262009-02-15 20:02:19 -0500464 return retval;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500465 }
Andreas Dilger11013912009-06-13 11:45:35 -0400466 goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
467 EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
Eric W. Biederman08cefc72012-02-07 15:41:49 -0800468 owner[0] = i_uid_read(inode);
469 owner[1] = i_gid_read(inode);
David Howells2b0143b2015-03-17 22:25:59 +0000470 tmp_inode = ext4_new_inode(handle, d_inode(inode->i_sb->s_root),
Tahsin Erdogan1b917ed2017-06-21 21:21:39 -0400471 S_IFREG, NULL, goal, owner, 0);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500472 if (IS_ERR(tmp_inode)) {
Dan Carpentera0cc9102012-02-20 17:53:06 -0500473 retval = PTR_ERR(tmp_inode);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500474 ext4_journal_stop(handle);
Dan Carpenter09054262009-02-15 20:02:19 -0500475 return retval;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500476 }
477 i_size_write(tmp_inode, i_size_read(inode));
478 /*
Dmitry Monakhovf39490b2010-03-01 23:14:36 -0500479 * Set the i_nlink to zero so it will be deleted later
480 * when we drop inode reference.
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500481 */
Miklos Szeredi6d6b77f2011-10-28 14:13:28 +0200482 clear_nlink(tmp_inode);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500483
484 ext4_ext_tree_init(handle, tmp_inode);
485 ext4_orphan_add(handle, tmp_inode);
486 ext4_journal_stop(handle);
487
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500488 /*
489 * start with one credit accounted for
490 * superblock modification.
491 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300492 * For the tmp_inode we already have committed the
Anatol Pomozov70261f52013-08-28 14:40:12 -0400493 * transaction that created the inode. Later as and
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500494 * when we add extents we extent the journal
495 */
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500496 /*
Theodore Ts'o1b9c12f2009-09-17 08:32:22 -0400497 * Even though we take i_mutex we can still cause block
498 * allocation via mmap write to holes. If we have allocated
499 * new blocks we fail migrate. New block allocation will
500 * clear EXT4_STATE_EXT_MIGRATE flag. The flag is updated
501 * with i_data_sem held to prevent racing with block
502 * allocation.
Aneesh Kumar K.V267e4db2008-04-29 08:11:12 -0400503 */
Lukas Czernerc8b459f2014-05-12 12:55:07 -0400504 down_read(&EXT4_I(inode)->i_data_sem);
Theodore Ts'o19f5fb72010-01-24 14:34:07 -0500505 ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
Aneesh Kumar K.V267e4db2008-04-29 08:11:12 -0400506 up_read((&EXT4_I(inode)->i_data_sem));
507
Theodore Ts'o9924a922013-02-08 21:59:22 -0500508 handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
Dmitry Monakhovf39490b2010-03-01 23:14:36 -0500509 if (IS_ERR(handle)) {
510 /*
511 * It is impossible to update on-disk structures without
512 * a handle, so just rollback in-core changes and live other
513 * work to orphan_list_cleanup()
514 */
515 ext4_orphan_del(NULL, tmp_inode);
516 retval = PTR_ERR(handle);
517 goto out;
518 }
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500519
520 ei = EXT4_I(inode);
521 i_data = ei->i_data;
522 memset(&lb, 0, sizeof(lb));
523
524 /* 32 bit block address 4 bytes */
525 max_entries = inode->i_sb->s_blocksize >> 2;
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400526 for (i = 0; i < EXT4_NDIR_BLOCKS; i++) {
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500527 if (i_data[i]) {
528 retval = update_extent_range(handle, tmp_inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400529 le32_to_cpu(i_data[i]), &lb);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500530 if (retval)
531 goto err_out;
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400532 } else
533 lb.curr_block++;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500534 }
535 if (i_data[EXT4_IND_BLOCK]) {
536 retval = update_ind_extent_range(handle, tmp_inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400537 le32_to_cpu(i_data[EXT4_IND_BLOCK]), &lb);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500538 if (retval)
539 goto err_out;
540 } else
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400541 lb.curr_block += max_entries;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500542 if (i_data[EXT4_DIND_BLOCK]) {
543 retval = update_dind_extent_range(handle, tmp_inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400544 le32_to_cpu(i_data[EXT4_DIND_BLOCK]), &lb);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500545 if (retval)
546 goto err_out;
547 } else
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400548 lb.curr_block += max_entries * max_entries;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500549 if (i_data[EXT4_TIND_BLOCK]) {
550 retval = update_tind_extent_range(handle, tmp_inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400551 le32_to_cpu(i_data[EXT4_TIND_BLOCK]), &lb);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500552 if (retval)
553 goto err_out;
554 }
555 /*
556 * Build the last extent
557 */
558 retval = finish_range(handle, tmp_inode, &lb);
559err_out:
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500560 if (retval)
561 /*
562 * Failure case delete the extent information with the
563 * tmp_inode
564 */
565 free_ext_block(handle, tmp_inode);
Aneesh Kumar K.V267e4db2008-04-29 08:11:12 -0400566 else {
567 retval = ext4_ext_swap_inode_data(handle, inode, tmp_inode);
568 if (retval)
569 /*
570 * if we fail to swap inode data free the extent
571 * details of the tmp inode
572 */
573 free_ext_block(handle, tmp_inode);
574 }
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500575
576 /* We mark the tmp_inode dirty via ext4_ext_tree_init. */
577 if (ext4_journal_extend(handle, 1) != 0)
578 ext4_journal_restart(handle, 1);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500579
580 /*
581 * Mark the tmp_inode as of size zero
582 */
583 i_size_write(tmp_inode, 0);
584
585 /*
586 * set the i_blocks count to zero
Wang Shilong58d86a52014-11-25 16:17:29 -0500587 * so that the ext4_evict_inode() does the
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500588 * right job
589 *
590 * We don't need to take the i_lock because
591 * the inode is not visible to user space.
592 */
593 tmp_inode->i_blocks = 0;
594
595 /* Reset the extent details */
596 ext4_ext_tree_init(handle, tmp_inode);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500597 ext4_journal_stop(handle);
Dmitry Monakhovf39490b2010-03-01 23:14:36 -0500598out:
Aneesh Kumar K.Va8526e82009-08-25 22:36:05 -0400599 unlock_new_inode(tmp_inode);
Dan Carpenter09054262009-02-15 20:02:19 -0500600 iput(tmp_inode);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500601
602 return retval;
603}
Lukas Czerner0d14b092013-04-10 23:32:52 -0400604
605/*
606 * Migrate a simple extent-based inode to use the i_blocks[] array
607 */
608int ext4_ind_migrate(struct inode *inode)
609{
610 struct ext4_extent_header *eh;
611 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
612 struct ext4_inode_info *ei = EXT4_I(inode);
613 struct ext4_extent *ex;
614 unsigned int i, len;
Eryu Guan8974fec2015-07-04 00:03:44 -0400615 ext4_lblk_t start, end;
Lukas Czerner0d14b092013-04-10 23:32:52 -0400616 ext4_fsblk_t blk;
617 handle_t *handle;
618 int ret;
619
Darrick J. Wonge2b911c2015-10-17 16:18:43 -0400620 if (!ext4_has_feature_extents(inode->i_sb) ||
Lukas Czerner0d14b092013-04-10 23:32:52 -0400621 (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
622 return -EINVAL;
623
Darrick J. Wonge2b911c2015-10-17 16:18:43 -0400624 if (ext4_has_feature_bigalloc(inode->i_sb))
Lukas Czerner43e50f52013-04-11 10:54:46 -0400625 return -EOPNOTSUPP;
626
Eryu Guand6f123a2015-07-03 23:56:50 -0400627 /*
628 * In order to get correct extent info, force all delayed allocation
629 * blocks to be allocated, otherwise delayed allocation blocks may not
630 * be reflected and bypass the checks on extent header.
631 */
632 if (test_opt(inode->i_sb, DELALLOC))
633 ext4_alloc_da_blocks(inode);
634
Lukas Czerner0d14b092013-04-10 23:32:52 -0400635 handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
636 if (IS_ERR(handle))
637 return PTR_ERR(handle);
638
639 down_write(&EXT4_I(inode)->i_data_sem);
640 ret = ext4_ext_check_inode(inode);
641 if (ret)
642 goto errout;
643
644 eh = ext_inode_hdr(inode);
645 ex = EXT_FIRST_EXTENT(eh);
646 if (ext4_blocks_count(es) > EXT4_MAX_BLOCK_FILE_PHYS ||
647 eh->eh_depth != 0 || le16_to_cpu(eh->eh_entries) > 1) {
648 ret = -EOPNOTSUPP;
649 goto errout;
650 }
651 if (eh->eh_entries == 0)
Eryu Guan8974fec2015-07-04 00:03:44 -0400652 blk = len = start = end = 0;
Lukas Czerner0d14b092013-04-10 23:32:52 -0400653 else {
654 len = le16_to_cpu(ex->ee_len);
655 blk = ext4_ext_pblock(ex);
Eryu Guan8974fec2015-07-04 00:03:44 -0400656 start = le32_to_cpu(ex->ee_block);
657 end = start + len - 1;
Eryu Guand6f123a2015-07-03 23:56:50 -0400658 if (end >= EXT4_NDIR_BLOCKS) {
Lukas Czerner0d14b092013-04-10 23:32:52 -0400659 ret = -EOPNOTSUPP;
660 goto errout;
661 }
662 }
663
664 ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
665 memset(ei->i_data, 0, sizeof(ei->i_data));
Eryu Guan8974fec2015-07-04 00:03:44 -0400666 for (i = start; i <= end; i++)
Lukas Czerner0d14b092013-04-10 23:32:52 -0400667 ei->i_data[i] = cpu_to_le32(blk++);
668 ext4_mark_inode_dirty(handle, inode);
669errout:
670 ext4_journal_stop(handle);
671 up_write(&EXT4_I(inode)->i_data_sem);
672 return ret;
673}