blob: bce2d696d6b9c656f55d01d072c741209c343596 [file] [log] [blame]
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -05001/*
2 * Copyright IBM Corporation, 2007
3 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2.1 of the GNU Lesser General Public License
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 *
13 */
14
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/slab.h>
Christoph Hellwig3dcf5452008-04-29 18:13:32 -040016#include "ext4_jbd2.h"
Theodore Ts'o4a092d72012-11-28 13:03:30 -050017#include "ext4_extents.h"
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050018
19/*
20 * The contiguous blocks details which can be
21 * represented by a single extent
22 */
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -040023struct migrate_struct {
24 ext4_lblk_t first_block, last_block, curr_block;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050025 ext4_fsblk_t first_pblock, last_pblock;
26};
27
28static int finish_range(handle_t *handle, struct inode *inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -040029 struct migrate_struct *lb)
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050030
31{
32 int retval = 0, needed;
33 struct ext4_extent newext;
34 struct ext4_ext_path *path;
35 if (lb->first_pblock == 0)
36 return 0;
37
38 /* Add the extent to temp inode*/
39 newext.ee_block = cpu_to_le32(lb->first_block);
40 newext.ee_len = cpu_to_le16(lb->last_block - lb->first_block + 1);
41 ext4_ext_store_pblock(&newext, lb->first_pblock);
Dmitry Monakhov4b1f1662014-07-27 22:28:15 -040042 /* Locking only for convinience since we are operating on temp inode */
43 down_write(&EXT4_I(inode)->i_data_sem);
Theodore Ts'oed8a1a72014-09-01 14:43:09 -040044 path = ext4_find_extent(inode, lb->first_block, NULL, 0);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050045 if (IS_ERR(path)) {
46 retval = PTR_ERR(path);
Aneesh Kumar K.Vb35905c2008-02-25 16:54:37 -050047 path = NULL;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050048 goto err_out;
49 }
50
51 /*
52 * Calculate the credit needed to inserting this extent
53 * Since we are doing this in loop we may accumalate extra
54 * credit. But below we try to not accumalate too much
55 * of them by restarting the journal.
56 */
Mingming Caoee12b632008-08-19 22:16:05 -040057 needed = ext4_ext_calc_credits_for_single_extent(inode,
58 lb->last_block - lb->first_block + 1, path);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050059
60 /*
61 * Make sure the credit we accumalated is not really high
62 */
Frank Mayhar03901312009-01-07 00:06:22 -050063 if (needed && ext4_handle_has_enough_credits(handle,
64 EXT4_RESERVE_TRANS_BLOCKS)) {
Dmitry Monakhov4b1f1662014-07-27 22:28:15 -040065 up_write((&EXT4_I(inode)->i_data_sem));
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050066 retval = ext4_journal_restart(handle, needed);
Dmitry Monakhov4b1f1662014-07-27 22:28:15 -040067 down_write((&EXT4_I(inode)->i_data_sem));
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050068 if (retval)
69 goto err_out;
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -050070 } else if (needed) {
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050071 retval = ext4_journal_extend(handle, needed);
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -050072 if (retval) {
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050073 /*
74 * IF not able to extend the journal restart the journal
75 */
Dmitry Monakhov4b1f1662014-07-27 22:28:15 -040076 up_write((&EXT4_I(inode)->i_data_sem));
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050077 retval = ext4_journal_restart(handle, needed);
Dmitry Monakhov4b1f1662014-07-27 22:28:15 -040078 down_write((&EXT4_I(inode)->i_data_sem));
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050079 if (retval)
80 goto err_out;
81 }
82 }
Theodore Ts'odfe50802014-09-01 14:37:09 -040083 retval = ext4_ext_insert_extent(handle, inode, &path, &newext, 0);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050084err_out:
Dmitry Monakhov4b1f1662014-07-27 22:28:15 -040085 up_write((&EXT4_I(inode)->i_data_sem));
Theodore Ts'ob7ea89a2014-09-01 14:39:09 -040086 ext4_ext_drop_refs(path);
87 kfree(path);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050088 lb->first_pblock = 0;
89 return retval;
90}
91
92static int update_extent_range(handle_t *handle, struct inode *inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -040093 ext4_fsblk_t pblock, struct migrate_struct *lb)
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050094{
95 int retval;
96 /*
97 * See if we can add on to the existing range (if it exists)
98 */
99 if (lb->first_pblock &&
100 (lb->last_pblock+1 == pblock) &&
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400101 (lb->last_block+1 == lb->curr_block)) {
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500102 lb->last_pblock = pblock;
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400103 lb->last_block = lb->curr_block;
104 lb->curr_block++;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500105 return 0;
106 }
107 /*
108 * Start a new range.
109 */
110 retval = finish_range(handle, inode, lb);
111 lb->first_pblock = lb->last_pblock = pblock;
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400112 lb->first_block = lb->last_block = lb->curr_block;
113 lb->curr_block++;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500114 return retval;
115}
116
117static int update_ind_extent_range(handle_t *handle, struct inode *inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400118 ext4_fsblk_t pblock,
119 struct migrate_struct *lb)
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500120{
121 struct buffer_head *bh;
122 __le32 *i_data;
123 int i, retval = 0;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500124 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
125
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500126 bh = sb_bread(inode->i_sb, pblock);
127 if (!bh)
128 return -EIO;
129
130 i_data = (__le32 *)bh->b_data;
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400131 for (i = 0; i < max_entries; i++) {
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500132 if (i_data[i]) {
133 retval = update_extent_range(handle, inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400134 le32_to_cpu(i_data[i]), lb);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500135 if (retval)
136 break;
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400137 } else {
138 lb->curr_block++;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500139 }
140 }
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500141 put_bh(bh);
142 return retval;
143
144}
145
146static int update_dind_extent_range(handle_t *handle, struct inode *inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400147 ext4_fsblk_t pblock,
148 struct migrate_struct *lb)
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500149{
150 struct buffer_head *bh;
151 __le32 *i_data;
152 int i, retval = 0;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500153 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
154
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500155 bh = sb_bread(inode->i_sb, pblock);
156 if (!bh)
157 return -EIO;
158
159 i_data = (__le32 *)bh->b_data;
160 for (i = 0; i < max_entries; i++) {
161 if (i_data[i]) {
162 retval = update_ind_extent_range(handle, inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400163 le32_to_cpu(i_data[i]), lb);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500164 if (retval)
165 break;
166 } else {
167 /* Only update the file block number */
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400168 lb->curr_block += max_entries;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500169 }
170 }
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500171 put_bh(bh);
172 return retval;
173
174}
175
176static int update_tind_extent_range(handle_t *handle, struct inode *inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400177 ext4_fsblk_t pblock,
178 struct migrate_struct *lb)
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500179{
180 struct buffer_head *bh;
181 __le32 *i_data;
182 int i, retval = 0;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500183 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
184
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500185 bh = sb_bread(inode->i_sb, pblock);
186 if (!bh)
187 return -EIO;
188
189 i_data = (__le32 *)bh->b_data;
190 for (i = 0; i < max_entries; i++) {
191 if (i_data[i]) {
192 retval = update_dind_extent_range(handle, inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400193 le32_to_cpu(i_data[i]), lb);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500194 if (retval)
195 break;
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400196 } else {
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500197 /* Only update the file block number */
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400198 lb->curr_block += max_entries * max_entries;
199 }
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500200 }
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500201 put_bh(bh);
202 return retval;
203
204}
205
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500206static int extend_credit_for_blkdel(handle_t *handle, struct inode *inode)
207{
208 int retval = 0, needed;
209
Frank Mayhar03901312009-01-07 00:06:22 -0500210 if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500211 return 0;
212 /*
213 * We are freeing a blocks. During this we touch
214 * superblock, group descriptor and block bitmap.
215 * So allocate a credit of 3. We may update
216 * quota (user and group).
217 */
Dmitry Monakhov5aca07e2009-12-08 22:42:15 -0500218 needed = 3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500219
220 if (ext4_journal_extend(handle, needed) != 0)
221 retval = ext4_journal_restart(handle, needed);
222
223 return retval;
224}
225
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500226static int free_dind_blocks(handle_t *handle,
227 struct inode *inode, __le32 i_data)
228{
229 int i;
230 __le32 *tmp_idata;
231 struct buffer_head *bh;
232 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
233
234 bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
235 if (!bh)
236 return -EIO;
237
238 tmp_idata = (__le32 *)bh->b_data;
239 for (i = 0; i < max_entries; i++) {
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500240 if (tmp_idata[i]) {
241 extend_credit_for_blkdel(handle, inode);
Peter Huewe7dc57612011-02-21 21:01:42 -0500242 ext4_free_blocks(handle, inode, NULL,
Theodore Ts'oe6362602009-11-23 07:17:05 -0500243 le32_to_cpu(tmp_idata[i]), 1,
244 EXT4_FREE_BLOCKS_METADATA |
245 EXT4_FREE_BLOCKS_FORGET);
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500246 }
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500247 }
248 put_bh(bh);
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500249 extend_credit_for_blkdel(handle, inode);
Peter Huewe7dc57612011-02-21 21:01:42 -0500250 ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
Theodore Ts'oe6362602009-11-23 07:17:05 -0500251 EXT4_FREE_BLOCKS_METADATA |
252 EXT4_FREE_BLOCKS_FORGET);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500253 return 0;
254}
255
256static int free_tind_blocks(handle_t *handle,
257 struct inode *inode, __le32 i_data)
258{
259 int i, retval = 0;
260 __le32 *tmp_idata;
261 struct buffer_head *bh;
262 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
263
264 bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
265 if (!bh)
266 return -EIO;
267
268 tmp_idata = (__le32 *)bh->b_data;
269 for (i = 0; i < max_entries; i++) {
270 if (tmp_idata[i]) {
271 retval = free_dind_blocks(handle,
272 inode, tmp_idata[i]);
273 if (retval) {
274 put_bh(bh);
275 return retval;
276 }
277 }
278 }
279 put_bh(bh);
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500280 extend_credit_for_blkdel(handle, inode);
Peter Huewe7dc57612011-02-21 21:01:42 -0500281 ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
Theodore Ts'oe6362602009-11-23 07:17:05 -0500282 EXT4_FREE_BLOCKS_METADATA |
283 EXT4_FREE_BLOCKS_FORGET);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500284 return 0;
285}
286
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500287static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500288{
289 int retval;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500290
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500291 /* ei->i_data[EXT4_IND_BLOCK] */
292 if (i_data[0]) {
293 extend_credit_for_blkdel(handle, inode);
Peter Huewe7dc57612011-02-21 21:01:42 -0500294 ext4_free_blocks(handle, inode, NULL,
Theodore Ts'oe6362602009-11-23 07:17:05 -0500295 le32_to_cpu(i_data[0]), 1,
296 EXT4_FREE_BLOCKS_METADATA |
297 EXT4_FREE_BLOCKS_FORGET);
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500298 }
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500299
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500300 /* ei->i_data[EXT4_DIND_BLOCK] */
301 if (i_data[1]) {
302 retval = free_dind_blocks(handle, inode, i_data[1]);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500303 if (retval)
304 return retval;
305 }
306
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500307 /* ei->i_data[EXT4_TIND_BLOCK] */
308 if (i_data[2]) {
309 retval = free_tind_blocks(handle, inode, i_data[2]);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500310 if (retval)
311 return retval;
312 }
313 return 0;
314}
315
316static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
Aneesh Kumar K.V267e4db2008-04-29 08:11:12 -0400317 struct inode *tmp_inode)
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500318{
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500319 int retval;
320 __le32 i_data[3];
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500321 struct ext4_inode_info *ei = EXT4_I(inode);
322 struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode);
323
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500324 /*
325 * One credit accounted for writing the
326 * i_data field of the original inode
327 */
328 retval = ext4_journal_extend(handle, 1);
Aneesh Kumar K.V267e4db2008-04-29 08:11:12 -0400329 if (retval) {
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500330 retval = ext4_journal_restart(handle, 1);
331 if (retval)
332 goto err_out;
333 }
334
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500335 i_data[0] = ei->i_data[EXT4_IND_BLOCK];
336 i_data[1] = ei->i_data[EXT4_DIND_BLOCK];
337 i_data[2] = ei->i_data[EXT4_TIND_BLOCK];
338
339 down_write(&EXT4_I(inode)->i_data_sem);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500340 /*
Theodore Ts'o1b9c12f2009-09-17 08:32:22 -0400341 * if EXT4_STATE_EXT_MIGRATE is cleared a block allocation
Aneesh Kumar K.V267e4db2008-04-29 08:11:12 -0400342 * happened after we started the migrate. We need to
343 * fail the migrate
344 */
Theodore Ts'o19f5fb72010-01-24 14:34:07 -0500345 if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) {
Aneesh Kumar K.V267e4db2008-04-29 08:11:12 -0400346 retval = -EAGAIN;
347 up_write(&EXT4_I(inode)->i_data_sem);
348 goto err_out;
349 } else
Theodore Ts'o19f5fb72010-01-24 14:34:07 -0500350 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
Aneesh Kumar K.V267e4db2008-04-29 08:11:12 -0400351 /*
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500352 * We have the extent map build with the tmp inode.
353 * Now copy the i_data across
354 */
Theodore Ts'o74e4e6d2011-05-03 09:34:42 -0400355 ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500356 memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data));
357
358 /*
359 * Update i_blocks with the new blocks that got
360 * allocated while adding extents for extent index
361 * blocks.
362 *
363 * While converting to extents we need not
Adam Buchbinderb8a074632016-03-09 23:49:05 -0500364 * update the original inode i_blocks for extent blocks
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500365 * via quota APIs. The quota update happened via tmp_inode already.
366 */
367 spin_lock(&inode->i_lock);
368 inode->i_blocks += tmp_inode->i_blocks;
369 spin_unlock(&inode->i_lock);
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500370 up_write(&EXT4_I(inode)->i_data_sem);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500371
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500372 /*
373 * We mark the inode dirty after, because we decrement the
374 * i_blocks when freeing the indirect meta-data blocks
375 */
376 retval = free_ind_block(handle, inode, i_data);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500377 ext4_mark_inode_dirty(handle, inode);
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500378
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500379err_out:
380 return retval;
381}
382
383static int free_ext_idx(handle_t *handle, struct inode *inode,
384 struct ext4_extent_idx *ix)
385{
386 int i, retval = 0;
387 ext4_fsblk_t block;
388 struct buffer_head *bh;
389 struct ext4_extent_header *eh;
390
Theodore Ts'obf89d162010-10-27 21:30:14 -0400391 block = ext4_idx_pblock(ix);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500392 bh = sb_bread(inode->i_sb, block);
393 if (!bh)
394 return -EIO;
395
396 eh = (struct ext4_extent_header *)bh->b_data;
397 if (eh->eh_depth != 0) {
398 ix = EXT_FIRST_INDEX(eh);
399 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
400 retval = free_ext_idx(handle, inode, ix);
401 if (retval)
402 break;
403 }
404 }
405 put_bh(bh);
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500406 extend_credit_for_blkdel(handle, inode);
Peter Huewe7dc57612011-02-21 21:01:42 -0500407 ext4_free_blocks(handle, inode, NULL, block, 1,
Theodore Ts'oe6362602009-11-23 07:17:05 -0500408 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500409 return retval;
410}
411
412/*
413 * Free the extent meta data blocks only
414 */
415static int free_ext_block(handle_t *handle, struct inode *inode)
416{
417 int i, retval = 0;
418 struct ext4_inode_info *ei = EXT4_I(inode);
419 struct ext4_extent_header *eh = (struct ext4_extent_header *)ei->i_data;
420 struct ext4_extent_idx *ix;
421 if (eh->eh_depth == 0)
422 /*
423 * No extra blocks allocated for extent meta data
424 */
425 return 0;
426 ix = EXT_FIRST_INDEX(eh);
427 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
428 retval = free_ext_idx(handle, inode, ix);
429 if (retval)
430 return retval;
431 }
432 return retval;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500433}
434
Aneesh Kumar K.V2a43a872008-09-13 12:52:26 -0400435int ext4_ext_migrate(struct inode *inode)
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500436{
Eric Biggerseb799e12020-02-19 10:30:47 -0800437 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500438 handle_t *handle;
439 int retval = 0, i;
440 __le32 *i_data;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500441 struct ext4_inode_info *ei;
442 struct inode *tmp_inode = NULL;
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400443 struct migrate_struct lb;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500444 unsigned long max_entries;
Andreas Dilger11013912009-06-13 11:45:35 -0400445 __u32 goal;
Dmitry Monakhov5cb81da2011-10-29 09:05:00 -0400446 uid_t owner[2];
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500447
Theodore Ts'o83982b62009-01-06 14:53:16 -0500448 /*
449 * If the filesystem does not support extents, or the inode
450 * already is extent-based, error out.
451 */
Darrick J. Wonge2b911c2015-10-17 16:18:43 -0400452 if (!ext4_has_feature_extents(inode->i_sb) ||
Dmitry Monakhov12e9b892010-05-16 22:00:00 -0400453 (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500454 return -EINVAL;
455
Valerie Clementb8356c42008-02-05 10:56:37 -0500456 if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
457 /*
458 * don't migrate fast symlink
459 */
460 return retval;
461
Eric Biggerseb799e12020-02-19 10:30:47 -0800462 percpu_down_write(&sbi->s_writepages_rwsem);
463
Theodore Ts'o4b217632013-02-09 12:50:27 -0500464 /*
465 * Worst case we can touch the allocation bitmaps, a bgd
466 * block, and a block to link in the orphan list. We do need
467 * need to worry about credits for modifying the quota inode.
468 */
Theodore Ts'o9924a922013-02-08 21:59:22 -0500469 handle = ext4_journal_start(inode, EXT4_HT_MIGRATE,
Theodore Ts'o4b217632013-02-09 12:50:27 -0500470 4 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
471
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500472 if (IS_ERR(handle)) {
473 retval = PTR_ERR(handle);
Eric Biggerseb799e12020-02-19 10:30:47 -0800474 goto out_unlock;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500475 }
Andreas Dilger11013912009-06-13 11:45:35 -0400476 goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
477 EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
Eric W. Biederman08cefc72012-02-07 15:41:49 -0800478 owner[0] = i_uid_read(inode);
479 owner[1] = i_gid_read(inode);
David Howells2b0143b2015-03-17 22:25:59 +0000480 tmp_inode = ext4_new_inode(handle, d_inode(inode->i_sb->s_root),
Dmitry Monakhov5cb81da2011-10-29 09:05:00 -0400481 S_IFREG, NULL, goal, owner);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500482 if (IS_ERR(tmp_inode)) {
Dan Carpentera0cc9102012-02-20 17:53:06 -0500483 retval = PTR_ERR(tmp_inode);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500484 ext4_journal_stop(handle);
Eric Biggerseb799e12020-02-19 10:30:47 -0800485 goto out_unlock;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500486 }
487 i_size_write(tmp_inode, i_size_read(inode));
488 /*
Dmitry Monakhovf39490b2010-03-01 23:14:36 -0500489 * Set the i_nlink to zero so it will be deleted later
490 * when we drop inode reference.
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500491 */
Miklos Szeredi6d6b77f2011-10-28 14:13:28 +0200492 clear_nlink(tmp_inode);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500493
494 ext4_ext_tree_init(handle, tmp_inode);
495 ext4_orphan_add(handle, tmp_inode);
496 ext4_journal_stop(handle);
497
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500498 /*
499 * start with one credit accounted for
500 * superblock modification.
501 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300502 * For the tmp_inode we already have committed the
Anatol Pomozov70261f52013-08-28 14:40:12 -0400503 * transaction that created the inode. Later as and
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500504 * when we add extents we extent the journal
505 */
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500506 /*
Theodore Ts'o1b9c12f2009-09-17 08:32:22 -0400507 * Even though we take i_mutex we can still cause block
508 * allocation via mmap write to holes. If we have allocated
509 * new blocks we fail migrate. New block allocation will
510 * clear EXT4_STATE_EXT_MIGRATE flag. The flag is updated
511 * with i_data_sem held to prevent racing with block
512 * allocation.
Aneesh Kumar K.V267e4db2008-04-29 08:11:12 -0400513 */
Lukas Czernerc8b459f2014-05-12 12:55:07 -0400514 down_read(&EXT4_I(inode)->i_data_sem);
Theodore Ts'o19f5fb72010-01-24 14:34:07 -0500515 ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
Aneesh Kumar K.V267e4db2008-04-29 08:11:12 -0400516 up_read((&EXT4_I(inode)->i_data_sem));
517
Theodore Ts'o9924a922013-02-08 21:59:22 -0500518 handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
Dmitry Monakhovf39490b2010-03-01 23:14:36 -0500519 if (IS_ERR(handle)) {
520 /*
521 * It is impossible to update on-disk structures without
522 * a handle, so just rollback in-core changes and live other
523 * work to orphan_list_cleanup()
524 */
525 ext4_orphan_del(NULL, tmp_inode);
526 retval = PTR_ERR(handle);
Eric Biggerseb799e12020-02-19 10:30:47 -0800527 goto out_tmp_inode;
Dmitry Monakhovf39490b2010-03-01 23:14:36 -0500528 }
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500529
530 ei = EXT4_I(inode);
531 i_data = ei->i_data;
532 memset(&lb, 0, sizeof(lb));
533
534 /* 32 bit block address 4 bytes */
535 max_entries = inode->i_sb->s_blocksize >> 2;
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400536 for (i = 0; i < EXT4_NDIR_BLOCKS; i++) {
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500537 if (i_data[i]) {
538 retval = update_extent_range(handle, tmp_inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400539 le32_to_cpu(i_data[i]), &lb);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500540 if (retval)
541 goto err_out;
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400542 } else
543 lb.curr_block++;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500544 }
545 if (i_data[EXT4_IND_BLOCK]) {
546 retval = update_ind_extent_range(handle, tmp_inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400547 le32_to_cpu(i_data[EXT4_IND_BLOCK]), &lb);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500548 if (retval)
549 goto err_out;
550 } else
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400551 lb.curr_block += max_entries;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500552 if (i_data[EXT4_DIND_BLOCK]) {
553 retval = update_dind_extent_range(handle, tmp_inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400554 le32_to_cpu(i_data[EXT4_DIND_BLOCK]), &lb);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500555 if (retval)
556 goto err_out;
557 } else
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400558 lb.curr_block += max_entries * max_entries;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500559 if (i_data[EXT4_TIND_BLOCK]) {
560 retval = update_tind_extent_range(handle, tmp_inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400561 le32_to_cpu(i_data[EXT4_TIND_BLOCK]), &lb);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500562 if (retval)
563 goto err_out;
564 }
565 /*
566 * Build the last extent
567 */
568 retval = finish_range(handle, tmp_inode, &lb);
569err_out:
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500570 if (retval)
571 /*
572 * Failure case delete the extent information with the
573 * tmp_inode
574 */
575 free_ext_block(handle, tmp_inode);
Aneesh Kumar K.V267e4db2008-04-29 08:11:12 -0400576 else {
577 retval = ext4_ext_swap_inode_data(handle, inode, tmp_inode);
578 if (retval)
579 /*
580 * if we fail to swap inode data free the extent
581 * details of the tmp inode
582 */
583 free_ext_block(handle, tmp_inode);
584 }
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500585
586 /* We mark the tmp_inode dirty via ext4_ext_tree_init. */
587 if (ext4_journal_extend(handle, 1) != 0)
588 ext4_journal_restart(handle, 1);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500589
590 /*
591 * Mark the tmp_inode as of size zero
592 */
593 i_size_write(tmp_inode, 0);
594
595 /*
596 * set the i_blocks count to zero
Wang Shilong58d86a52014-11-25 16:17:29 -0500597 * so that the ext4_evict_inode() does the
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500598 * right job
599 *
600 * We don't need to take the i_lock because
601 * the inode is not visible to user space.
602 */
603 tmp_inode->i_blocks = 0;
604
605 /* Reset the extent details */
606 ext4_ext_tree_init(handle, tmp_inode);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500607 ext4_journal_stop(handle);
Eric Biggerseb799e12020-02-19 10:30:47 -0800608out_tmp_inode:
Aneesh Kumar K.Va8526e82009-08-25 22:36:05 -0400609 unlock_new_inode(tmp_inode);
Dan Carpenter09054262009-02-15 20:02:19 -0500610 iput(tmp_inode);
Eric Biggerseb799e12020-02-19 10:30:47 -0800611out_unlock:
612 percpu_up_write(&sbi->s_writepages_rwsem);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500613 return retval;
614}
Lukas Czerner0d14b092013-04-10 23:32:52 -0400615
616/*
617 * Migrate a simple extent-based inode to use the i_blocks[] array
618 */
619int ext4_ind_migrate(struct inode *inode)
620{
621 struct ext4_extent_header *eh;
Eric Biggerseb799e12020-02-19 10:30:47 -0800622 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
623 struct ext4_super_block *es = sbi->s_es;
Lukas Czerner0d14b092013-04-10 23:32:52 -0400624 struct ext4_inode_info *ei = EXT4_I(inode);
625 struct ext4_extent *ex;
626 unsigned int i, len;
Eryu Guan8974fec2015-07-04 00:03:44 -0400627 ext4_lblk_t start, end;
Lukas Czerner0d14b092013-04-10 23:32:52 -0400628 ext4_fsblk_t blk;
629 handle_t *handle;
630 int ret;
631
Darrick J. Wonge2b911c2015-10-17 16:18:43 -0400632 if (!ext4_has_feature_extents(inode->i_sb) ||
Lukas Czerner0d14b092013-04-10 23:32:52 -0400633 (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
634 return -EINVAL;
635
Darrick J. Wonge2b911c2015-10-17 16:18:43 -0400636 if (ext4_has_feature_bigalloc(inode->i_sb))
Lukas Czerner43e50f52013-04-11 10:54:46 -0400637 return -EOPNOTSUPP;
638
Eryu Guand6f123a2015-07-03 23:56:50 -0400639 /*
640 * In order to get correct extent info, force all delayed allocation
641 * blocks to be allocated, otherwise delayed allocation blocks may not
642 * be reflected and bypass the checks on extent header.
643 */
644 if (test_opt(inode->i_sb, DELALLOC))
645 ext4_alloc_da_blocks(inode);
646
Eric Biggerseb799e12020-02-19 10:30:47 -0800647 percpu_down_write(&sbi->s_writepages_rwsem);
648
Lukas Czerner0d14b092013-04-10 23:32:52 -0400649 handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
Eric Biggerseb799e12020-02-19 10:30:47 -0800650 if (IS_ERR(handle)) {
651 ret = PTR_ERR(handle);
652 goto out_unlock;
653 }
Lukas Czerner0d14b092013-04-10 23:32:52 -0400654
655 down_write(&EXT4_I(inode)->i_data_sem);
656 ret = ext4_ext_check_inode(inode);
657 if (ret)
658 goto errout;
659
660 eh = ext_inode_hdr(inode);
661 ex = EXT_FIRST_EXTENT(eh);
662 if (ext4_blocks_count(es) > EXT4_MAX_BLOCK_FILE_PHYS ||
663 eh->eh_depth != 0 || le16_to_cpu(eh->eh_entries) > 1) {
664 ret = -EOPNOTSUPP;
665 goto errout;
666 }
667 if (eh->eh_entries == 0)
Eryu Guan8974fec2015-07-04 00:03:44 -0400668 blk = len = start = end = 0;
Lukas Czerner0d14b092013-04-10 23:32:52 -0400669 else {
670 len = le16_to_cpu(ex->ee_len);
671 blk = ext4_ext_pblock(ex);
Eryu Guan8974fec2015-07-04 00:03:44 -0400672 start = le32_to_cpu(ex->ee_block);
673 end = start + len - 1;
Eryu Guand6f123a2015-07-03 23:56:50 -0400674 if (end >= EXT4_NDIR_BLOCKS) {
Lukas Czerner0d14b092013-04-10 23:32:52 -0400675 ret = -EOPNOTSUPP;
676 goto errout;
677 }
678 }
679
680 ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
681 memset(ei->i_data, 0, sizeof(ei->i_data));
Eryu Guan8974fec2015-07-04 00:03:44 -0400682 for (i = start; i <= end; i++)
Lukas Czerner0d14b092013-04-10 23:32:52 -0400683 ei->i_data[i] = cpu_to_le32(blk++);
684 ext4_mark_inode_dirty(handle, inode);
685errout:
686 ext4_journal_stop(handle);
687 up_write(&EXT4_I(inode)->i_data_sem);
Eric Biggerseb799e12020-02-19 10:30:47 -0800688out_unlock:
689 percpu_up_write(&sbi->s_writepages_rwsem);
Lukas Czerner0d14b092013-04-10 23:32:52 -0400690 return ret;
691}