blob: 3a7928f825e45f8fb244587cbe7c8921f6a3269b [file] [log] [blame]
Alex Tomasa86c6182006-10-11 01:21:03 -07001/*
2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
4 *
5 * Architecture independence:
6 * Copyright (c) 2005, Bull S.A.
7 * Written by Pierre Peiffer <pierre.peiffer@bull.net>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public Licens
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
21 */
22
23/*
24 * Extents support for EXT4
25 *
26 * TODO:
27 * - ext4*_error() should be used in some situations
28 * - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29 * - smart tree reduction
30 */
31
32#include <linux/module.h>
33#include <linux/fs.h>
34#include <linux/time.h>
Mingming Caocd02ff02007-10-16 18:38:25 -040035#include <linux/jbd2.h>
Alex Tomasa86c6182006-10-11 01:21:03 -070036#include <linux/highuid.h>
37#include <linux/pagemap.h>
38#include <linux/quotaops.h>
39#include <linux/string.h>
40#include <linux/slab.h>
Amit Aroraa2df2a62007-07-17 21:42:41 -040041#include <linux/falloc.h>
Alex Tomasa86c6182006-10-11 01:21:03 -070042#include <asm/uaccess.h>
Eric Sandeen6873fa02008-10-07 00:46:36 -040043#include <linux/fiemap.h>
Christoph Hellwig3dcf5452008-04-29 18:13:32 -040044#include "ext4_jbd2.h"
45#include "ext4_extents.h"
Alex Tomasa86c6182006-10-11 01:21:03 -070046
47
Randy Dunlapd0d856e2006-10-11 01:21:07 -070048/*
49 * ext_pblock:
50 * combine low and high parts of physical block number into ext4_fsblk_t
51 */
Akira Fujita748de672009-06-17 19:24:03 -040052ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
Alex Tomasf65e6fb2006-10-11 01:21:05 -070053{
54 ext4_fsblk_t block;
55
Aneesh Kumar K.Vb3776112007-10-16 18:38:25 -040056 block = le32_to_cpu(ex->ee_start_lo);
Mingming Cao9b8f1f02006-10-11 01:21:13 -070057 block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1;
Alex Tomasf65e6fb2006-10-11 01:21:05 -070058 return block;
59}
60
Randy Dunlapd0d856e2006-10-11 01:21:07 -070061/*
62 * idx_pblock:
63 * combine low and high parts of a leaf physical block number into ext4_fsblk_t
64 */
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050065ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
Alex Tomasf65e6fb2006-10-11 01:21:05 -070066{
67 ext4_fsblk_t block;
68
Aneesh Kumar K.Vd8dd0b42007-10-16 18:38:25 -040069 block = le32_to_cpu(ix->ei_leaf_lo);
Mingming Cao9b8f1f02006-10-11 01:21:13 -070070 block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1;
Alex Tomasf65e6fb2006-10-11 01:21:05 -070071 return block;
72}
73
Randy Dunlapd0d856e2006-10-11 01:21:07 -070074/*
75 * ext4_ext_store_pblock:
76 * stores a large physical block number into an extent struct,
77 * breaking it into parts
78 */
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050079void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
Alex Tomasf65e6fb2006-10-11 01:21:05 -070080{
Aneesh Kumar K.Vb3776112007-10-16 18:38:25 -040081 ex->ee_start_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
Mingming Cao9b8f1f02006-10-11 01:21:13 -070082 ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
Alex Tomasf65e6fb2006-10-11 01:21:05 -070083}
84
Randy Dunlapd0d856e2006-10-11 01:21:07 -070085/*
86 * ext4_idx_store_pblock:
87 * stores a large physical block number into an index struct,
88 * breaking it into parts
89 */
Avantika Mathur09b88252006-12-06 20:41:36 -080090static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
Alex Tomasf65e6fb2006-10-11 01:21:05 -070091{
Aneesh Kumar K.Vd8dd0b42007-10-16 18:38:25 -040092 ix->ei_leaf_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
Mingming Cao9b8f1f02006-10-11 01:21:13 -070093 ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
Alex Tomasf65e6fb2006-10-11 01:21:05 -070094}
95
Jan Kara487caee2009-08-17 22:17:20 -040096static int ext4_ext_truncate_extend_restart(handle_t *handle,
97 struct inode *inode,
98 int needed)
Alex Tomasa86c6182006-10-11 01:21:03 -070099{
100 int err;
101
Frank Mayhar03901312009-01-07 00:06:22 -0500102 if (!ext4_handle_valid(handle))
103 return 0;
Alex Tomasa86c6182006-10-11 01:21:03 -0700104 if (handle->h_buffer_credits > needed)
Shen Feng9102e4f2008-07-11 19:27:31 -0400105 return 0;
106 err = ext4_journal_extend(handle, needed);
Theodore Ts'o0123c932008-08-01 20:57:54 -0400107 if (err <= 0)
Shen Feng9102e4f2008-07-11 19:27:31 -0400108 return err;
Jan Kara487caee2009-08-17 22:17:20 -0400109 err = ext4_truncate_restart_trans(handle, inode, needed);
110 /*
111 * We have dropped i_data_sem so someone might have cached again
112 * an extent we are going to truncate.
113 */
114 ext4_ext_invalidate_cache(inode);
115
116 return err;
Alex Tomasa86c6182006-10-11 01:21:03 -0700117}
118
119/*
120 * could return:
121 * - EROFS
122 * - ENOMEM
123 */
124static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
125 struct ext4_ext_path *path)
126{
127 if (path->p_bh) {
128 /* path points to block */
129 return ext4_journal_get_write_access(handle, path->p_bh);
130 }
131 /* path points to leaf/index in inode body */
132 /* we use in-core data, no need to protect them */
133 return 0;
134}
135
136/*
137 * could return:
138 * - EROFS
139 * - ENOMEM
140 * - EIO
141 */
142static int ext4_ext_dirty(handle_t *handle, struct inode *inode,
143 struct ext4_ext_path *path)
144{
145 int err;
146 if (path->p_bh) {
147 /* path points to block */
Frank Mayhar03901312009-01-07 00:06:22 -0500148 err = ext4_handle_dirty_metadata(handle, inode, path->p_bh);
Alex Tomasa86c6182006-10-11 01:21:03 -0700149 } else {
150 /* path points to leaf/index in inode body */
151 err = ext4_mark_inode_dirty(handle, inode);
152 }
153 return err;
154}
155
Alex Tomasf65e6fb2006-10-11 01:21:05 -0700156static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
Alex Tomasa86c6182006-10-11 01:21:03 -0700157 struct ext4_ext_path *path,
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -0500158 ext4_lblk_t block)
Alex Tomasa86c6182006-10-11 01:21:03 -0700159{
160 struct ext4_inode_info *ei = EXT4_I(inode);
Alex Tomasf65e6fb2006-10-11 01:21:05 -0700161 ext4_fsblk_t bg_start;
Valerie Clement74d34872008-02-15 13:43:07 -0500162 ext4_fsblk_t last_block;
Alex Tomasf65e6fb2006-10-11 01:21:05 -0700163 ext4_grpblk_t colour;
Theodore Ts'oa4912122009-03-12 12:18:34 -0400164 ext4_group_t block_group;
165 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
Alex Tomasa86c6182006-10-11 01:21:03 -0700166 int depth;
167
168 if (path) {
169 struct ext4_extent *ex;
170 depth = path->p_depth;
171
172 /* try to predict block placement */
Avantika Mathur7e028972006-12-06 20:41:33 -0800173 ex = path[depth].p_ext;
174 if (ex)
Alex Tomasf65e6fb2006-10-11 01:21:05 -0700175 return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
Alex Tomasa86c6182006-10-11 01:21:03 -0700176
Randy Dunlapd0d856e2006-10-11 01:21:07 -0700177 /* it looks like index is empty;
178 * try to find starting block from index itself */
Alex Tomasa86c6182006-10-11 01:21:03 -0700179 if (path[depth].p_bh)
180 return path[depth].p_bh->b_blocknr;
181 }
182
183 /* OK. use inode's group */
Theodore Ts'oa4912122009-03-12 12:18:34 -0400184 block_group = ei->i_block_group;
185 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
186 /*
187 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
188 * block groups per flexgroup, reserve the first block
189 * group for directories and special files. Regular
190 * files will start at the second block group. This
191 * tends to speed up directory access and improves
192 * fsck times.
193 */
194 block_group &= ~(flex_size-1);
195 if (S_ISREG(inode->i_mode))
196 block_group++;
197 }
198 bg_start = (block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) +
Alex Tomasa86c6182006-10-11 01:21:03 -0700199 le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block);
Valerie Clement74d34872008-02-15 13:43:07 -0500200 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
201
Theodore Ts'oa4912122009-03-12 12:18:34 -0400202 /*
203 * If we are doing delayed allocation, we don't need take
204 * colour into account.
205 */
206 if (test_opt(inode->i_sb, DELALLOC))
207 return bg_start;
208
Valerie Clement74d34872008-02-15 13:43:07 -0500209 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
210 colour = (current->pid % 16) *
Alex Tomasa86c6182006-10-11 01:21:03 -0700211 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
Valerie Clement74d34872008-02-15 13:43:07 -0500212 else
213 colour = (current->pid % 16) * ((last_block - bg_start) / 16);
Alex Tomasa86c6182006-10-11 01:21:03 -0700214 return bg_start + colour + block;
215}
216
Aneesh Kumar K.V654b4902008-07-11 19:27:31 -0400217/*
218 * Allocation for a meta data block
219 */
Alex Tomasf65e6fb2006-10-11 01:21:05 -0700220static ext4_fsblk_t
Aneesh Kumar K.V654b4902008-07-11 19:27:31 -0400221ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
Alex Tomasa86c6182006-10-11 01:21:03 -0700222 struct ext4_ext_path *path,
223 struct ext4_extent *ex, int *err)
224{
Alex Tomasf65e6fb2006-10-11 01:21:05 -0700225 ext4_fsblk_t goal, newblock;
Alex Tomasa86c6182006-10-11 01:21:03 -0700226
227 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
Theodore Ts'o97df5d12008-12-12 12:41:28 -0500228 newblock = ext4_new_meta_blocks(handle, inode, goal, NULL, err);
Alex Tomasa86c6182006-10-11 01:21:03 -0700229 return newblock;
230}
231
Theodore Ts'o55ad63b2009-08-28 10:40:33 -0400232static inline int ext4_ext_space_block(struct inode *inode, int check)
Alex Tomasa86c6182006-10-11 01:21:03 -0700233{
234 int size;
235
236 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
237 / sizeof(struct ext4_extent);
Theodore Ts'o55ad63b2009-08-28 10:40:33 -0400238 if (!check) {
Robert P. J. Daybbf2f9f2007-02-17 19:20:16 +0100239#ifdef AGGRESSIVE_TEST
Theodore Ts'o55ad63b2009-08-28 10:40:33 -0400240 if (size > 6)
241 size = 6;
Alex Tomasa86c6182006-10-11 01:21:03 -0700242#endif
Theodore Ts'o55ad63b2009-08-28 10:40:33 -0400243 }
Alex Tomasa86c6182006-10-11 01:21:03 -0700244 return size;
245}
246
Theodore Ts'o55ad63b2009-08-28 10:40:33 -0400247static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
Alex Tomasa86c6182006-10-11 01:21:03 -0700248{
249 int size;
250
251 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
252 / sizeof(struct ext4_extent_idx);
Theodore Ts'o55ad63b2009-08-28 10:40:33 -0400253 if (!check) {
Robert P. J. Daybbf2f9f2007-02-17 19:20:16 +0100254#ifdef AGGRESSIVE_TEST
Theodore Ts'o55ad63b2009-08-28 10:40:33 -0400255 if (size > 5)
256 size = 5;
Alex Tomasa86c6182006-10-11 01:21:03 -0700257#endif
Theodore Ts'o55ad63b2009-08-28 10:40:33 -0400258 }
Alex Tomasa86c6182006-10-11 01:21:03 -0700259 return size;
260}
261
Theodore Ts'o55ad63b2009-08-28 10:40:33 -0400262static inline int ext4_ext_space_root(struct inode *inode, int check)
Alex Tomasa86c6182006-10-11 01:21:03 -0700263{
264 int size;
265
266 size = sizeof(EXT4_I(inode)->i_data);
267 size -= sizeof(struct ext4_extent_header);
268 size /= sizeof(struct ext4_extent);
Theodore Ts'o55ad63b2009-08-28 10:40:33 -0400269 if (!check) {
Robert P. J. Daybbf2f9f2007-02-17 19:20:16 +0100270#ifdef AGGRESSIVE_TEST
Theodore Ts'o55ad63b2009-08-28 10:40:33 -0400271 if (size > 3)
272 size = 3;
Alex Tomasa86c6182006-10-11 01:21:03 -0700273#endif
Theodore Ts'o55ad63b2009-08-28 10:40:33 -0400274 }
Alex Tomasa86c6182006-10-11 01:21:03 -0700275 return size;
276}
277
Theodore Ts'o55ad63b2009-08-28 10:40:33 -0400278static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
Alex Tomasa86c6182006-10-11 01:21:03 -0700279{
280 int size;
281
282 size = sizeof(EXT4_I(inode)->i_data);
283 size -= sizeof(struct ext4_extent_header);
284 size /= sizeof(struct ext4_extent_idx);
Theodore Ts'o55ad63b2009-08-28 10:40:33 -0400285 if (!check) {
Robert P. J. Daybbf2f9f2007-02-17 19:20:16 +0100286#ifdef AGGRESSIVE_TEST
Theodore Ts'o55ad63b2009-08-28 10:40:33 -0400287 if (size > 4)
288 size = 4;
Alex Tomasa86c6182006-10-11 01:21:03 -0700289#endif
Theodore Ts'o55ad63b2009-08-28 10:40:33 -0400290 }
Alex Tomasa86c6182006-10-11 01:21:03 -0700291 return size;
292}
293
Mingming Caod2a17632008-07-14 17:52:37 -0400294/*
295 * Calculate the number of metadata blocks needed
296 * to allocate @blocks
297 * Worse case is one block per extent
298 */
299int ext4_ext_calc_metadata_amount(struct inode *inode, int blocks)
300{
301 int lcap, icap, rcap, leafs, idxs, num;
302 int newextents = blocks;
303
Theodore Ts'o55ad63b2009-08-28 10:40:33 -0400304 rcap = ext4_ext_space_root_idx(inode, 0);
305 lcap = ext4_ext_space_block(inode, 0);
306 icap = ext4_ext_space_block_idx(inode, 0);
Mingming Caod2a17632008-07-14 17:52:37 -0400307
308 /* number of new leaf blocks needed */
309 num = leafs = (newextents + lcap - 1) / lcap;
310
311 /*
312 * Worse case, we need separate index block(s)
313 * to link all new leaf blocks
314 */
315 idxs = (leafs + icap - 1) / icap;
316 do {
317 num += idxs;
318 idxs = (idxs + icap - 1) / icap;
319 } while (idxs > rcap);
320
321 return num;
322}
323
Alex Tomasc29c0ae2007-07-18 09:19:09 -0400324static int
325ext4_ext_max_entries(struct inode *inode, int depth)
326{
327 int max;
328
329 if (depth == ext_depth(inode)) {
330 if (depth == 0)
Theodore Ts'o55ad63b2009-08-28 10:40:33 -0400331 max = ext4_ext_space_root(inode, 1);
Alex Tomasc29c0ae2007-07-18 09:19:09 -0400332 else
Theodore Ts'o55ad63b2009-08-28 10:40:33 -0400333 max = ext4_ext_space_root_idx(inode, 1);
Alex Tomasc29c0ae2007-07-18 09:19:09 -0400334 } else {
335 if (depth == 0)
Theodore Ts'o55ad63b2009-08-28 10:40:33 -0400336 max = ext4_ext_space_block(inode, 1);
Alex Tomasc29c0ae2007-07-18 09:19:09 -0400337 else
Theodore Ts'o55ad63b2009-08-28 10:40:33 -0400338 max = ext4_ext_space_block_idx(inode, 1);
Alex Tomasc29c0ae2007-07-18 09:19:09 -0400339 }
340
341 return max;
342}
343
Aneesh Kumar K.V56b19862009-03-12 09:51:20 -0400344static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
345{
Theodore Ts'o6fd058f2009-05-17 15:38:01 -0400346 ext4_fsblk_t block = ext_pblock(ext);
Aneesh Kumar K.V56b19862009-03-12 09:51:20 -0400347 int len = ext4_ext_get_actual_len(ext);
Theodore Ts'oe84a26c2009-04-22 20:52:25 -0400348
Theodore Ts'o6fd058f2009-05-17 15:38:01 -0400349 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
Aneesh Kumar K.V56b19862009-03-12 09:51:20 -0400350}
351
352static int ext4_valid_extent_idx(struct inode *inode,
353 struct ext4_extent_idx *ext_idx)
354{
Theodore Ts'o6fd058f2009-05-17 15:38:01 -0400355 ext4_fsblk_t block = idx_pblock(ext_idx);
Theodore Ts'oe84a26c2009-04-22 20:52:25 -0400356
Theodore Ts'o6fd058f2009-05-17 15:38:01 -0400357 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
Aneesh Kumar K.V56b19862009-03-12 09:51:20 -0400358}
359
360static int ext4_valid_extent_entries(struct inode *inode,
361 struct ext4_extent_header *eh,
362 int depth)
363{
364 struct ext4_extent *ext;
365 struct ext4_extent_idx *ext_idx;
366 unsigned short entries;
367 if (eh->eh_entries == 0)
368 return 1;
369
370 entries = le16_to_cpu(eh->eh_entries);
371
372 if (depth == 0) {
373 /* leaf entries */
374 ext = EXT_FIRST_EXTENT(eh);
375 while (entries) {
376 if (!ext4_valid_extent(inode, ext))
377 return 0;
378 ext++;
379 entries--;
380 }
381 } else {
382 ext_idx = EXT_FIRST_INDEX(eh);
383 while (entries) {
384 if (!ext4_valid_extent_idx(inode, ext_idx))
385 return 0;
386 ext_idx++;
387 entries--;
388 }
389 }
390 return 1;
391}
392
393static int __ext4_ext_check(const char *function, struct inode *inode,
Alex Tomasc29c0ae2007-07-18 09:19:09 -0400394 struct ext4_extent_header *eh,
395 int depth)
396{
397 const char *error_msg;
398 int max = 0;
399
400 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
401 error_msg = "invalid magic";
402 goto corrupted;
403 }
404 if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
405 error_msg = "unexpected eh_depth";
406 goto corrupted;
407 }
408 if (unlikely(eh->eh_max == 0)) {
409 error_msg = "invalid eh_max";
410 goto corrupted;
411 }
412 max = ext4_ext_max_entries(inode, depth);
413 if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
414 error_msg = "too large eh_max";
415 goto corrupted;
416 }
417 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
418 error_msg = "invalid eh_entries";
419 goto corrupted;
420 }
Aneesh Kumar K.V56b19862009-03-12 09:51:20 -0400421 if (!ext4_valid_extent_entries(inode, eh, depth)) {
422 error_msg = "invalid extent entries";
423 goto corrupted;
424 }
Alex Tomasc29c0ae2007-07-18 09:19:09 -0400425 return 0;
426
427corrupted:
428 ext4_error(inode->i_sb, function,
Aneesh Kumar K.V56b19862009-03-12 09:51:20 -0400429 "bad header/extent in inode #%lu: %s - magic %x, "
Alex Tomasc29c0ae2007-07-18 09:19:09 -0400430 "entries %u, max %u(%u), depth %u(%u)",
431 inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
432 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
433 max, le16_to_cpu(eh->eh_depth), depth);
434
435 return -EIO;
436}
437
Aneesh Kumar K.V56b19862009-03-12 09:51:20 -0400438#define ext4_ext_check(inode, eh, depth) \
439 __ext4_ext_check(__func__, inode, eh, depth)
Alex Tomasc29c0ae2007-07-18 09:19:09 -0400440
Aneesh Kumar K.V7a262f72009-03-27 16:39:58 -0400441int ext4_ext_check_inode(struct inode *inode)
442{
443 return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
444}
445
Alex Tomasa86c6182006-10-11 01:21:03 -0700446#ifdef EXT_DEBUG
447static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
448{
449 int k, l = path->p_depth;
450
451 ext_debug("path:");
452 for (k = 0; k <= l; k++, path++) {
453 if (path->p_idx) {
Mingming Cao2ae02102006-10-11 01:21:11 -0700454 ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block),
Alex Tomasf65e6fb2006-10-11 01:21:05 -0700455 idx_pblock(path->p_idx));
Alex Tomasa86c6182006-10-11 01:21:03 -0700456 } else if (path->p_ext) {
Mingming553f9002009-09-18 13:34:55 -0400457 ext_debug(" %d:[%d]%d:%llu ",
Alex Tomasa86c6182006-10-11 01:21:03 -0700458 le32_to_cpu(path->p_ext->ee_block),
Mingming553f9002009-09-18 13:34:55 -0400459 ext4_ext_is_uninitialized(path->p_ext),
Amit Aroraa2df2a62007-07-17 21:42:41 -0400460 ext4_ext_get_actual_len(path->p_ext),
Alex Tomasf65e6fb2006-10-11 01:21:05 -0700461 ext_pblock(path->p_ext));
Alex Tomasa86c6182006-10-11 01:21:03 -0700462 } else
463 ext_debug(" []");
464 }
465 ext_debug("\n");
466}
467
468static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
469{
470 int depth = ext_depth(inode);
471 struct ext4_extent_header *eh;
472 struct ext4_extent *ex;
473 int i;
474
475 if (!path)
476 return;
477
478 eh = path[depth].p_hdr;
479 ex = EXT_FIRST_EXTENT(eh);
480
Mingming553f9002009-09-18 13:34:55 -0400481 ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
482
Alex Tomasa86c6182006-10-11 01:21:03 -0700483 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
Mingming553f9002009-09-18 13:34:55 -0400484 ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
485 ext4_ext_is_uninitialized(ex),
Amit Aroraa2df2a62007-07-17 21:42:41 -0400486 ext4_ext_get_actual_len(ex), ext_pblock(ex));
Alex Tomasa86c6182006-10-11 01:21:03 -0700487 }
488 ext_debug("\n");
489}
490#else
Theodore Ts'oaf5bc922008-09-08 22:25:24 -0400491#define ext4_ext_show_path(inode, path)
492#define ext4_ext_show_leaf(inode, path)
Alex Tomasa86c6182006-10-11 01:21:03 -0700493#endif
494
Aneesh Kumar K.Vb35905c2008-02-25 16:54:37 -0500495void ext4_ext_drop_refs(struct ext4_ext_path *path)
Alex Tomasa86c6182006-10-11 01:21:03 -0700496{
497 int depth = path->p_depth;
498 int i;
499
500 for (i = 0; i <= depth; i++, path++)
501 if (path->p_bh) {
502 brelse(path->p_bh);
503 path->p_bh = NULL;
504 }
505}
506
507/*
Randy Dunlapd0d856e2006-10-11 01:21:07 -0700508 * ext4_ext_binsearch_idx:
509 * binary search for the closest index of the given block
Alex Tomasc29c0ae2007-07-18 09:19:09 -0400510 * the header must be checked before calling this
Alex Tomasa86c6182006-10-11 01:21:03 -0700511 */
512static void
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -0500513ext4_ext_binsearch_idx(struct inode *inode,
514 struct ext4_ext_path *path, ext4_lblk_t block)
Alex Tomasa86c6182006-10-11 01:21:03 -0700515{
516 struct ext4_extent_header *eh = path->p_hdr;
517 struct ext4_extent_idx *r, *l, *m;
518
Alex Tomasa86c6182006-10-11 01:21:03 -0700519
Eric Sandeenbba90742008-01-28 23:58:27 -0500520 ext_debug("binsearch for %u(idx): ", block);
Alex Tomasa86c6182006-10-11 01:21:03 -0700521
522 l = EXT_FIRST_INDEX(eh) + 1;
Dmitry Monakhove9f410b2007-07-18 09:09:15 -0400523 r = EXT_LAST_INDEX(eh);
Alex Tomasa86c6182006-10-11 01:21:03 -0700524 while (l <= r) {
525 m = l + (r - l) / 2;
526 if (block < le32_to_cpu(m->ei_block))
527 r = m - 1;
528 else
529 l = m + 1;
Dmitry Monakhov26d535e2007-07-18 08:33:37 -0400530 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
531 m, le32_to_cpu(m->ei_block),
532 r, le32_to_cpu(r->ei_block));
Alex Tomasa86c6182006-10-11 01:21:03 -0700533 }
534
535 path->p_idx = l - 1;
Alex Tomasf65e6fb2006-10-11 01:21:05 -0700536 ext_debug(" -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
Dmitry Monakhov26d535e2007-07-18 08:33:37 -0400537 idx_pblock(path->p_idx));
Alex Tomasa86c6182006-10-11 01:21:03 -0700538
539#ifdef CHECK_BINSEARCH
540 {
541 struct ext4_extent_idx *chix, *ix;
542 int k;
543
544 chix = ix = EXT_FIRST_INDEX(eh);
545 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
546 if (k != 0 &&
547 le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
Theodore Ts'o47760042008-09-08 23:00:52 -0400548 printk(KERN_DEBUG "k=%d, ix=0x%p, "
549 "first=0x%p\n", k,
550 ix, EXT_FIRST_INDEX(eh));
551 printk(KERN_DEBUG "%u <= %u\n",
Alex Tomasa86c6182006-10-11 01:21:03 -0700552 le32_to_cpu(ix->ei_block),
553 le32_to_cpu(ix[-1].ei_block));
554 }
555 BUG_ON(k && le32_to_cpu(ix->ei_block)
Dave Kleikamp8c55e202007-05-24 13:04:54 -0400556 <= le32_to_cpu(ix[-1].ei_block));
Alex Tomasa86c6182006-10-11 01:21:03 -0700557 if (block < le32_to_cpu(ix->ei_block))
558 break;
559 chix = ix;
560 }
561 BUG_ON(chix != path->p_idx);
562 }
563#endif
564
565}
566
567/*
Randy Dunlapd0d856e2006-10-11 01:21:07 -0700568 * ext4_ext_binsearch:
569 * binary search for closest extent of the given block
Alex Tomasc29c0ae2007-07-18 09:19:09 -0400570 * the header must be checked before calling this
Alex Tomasa86c6182006-10-11 01:21:03 -0700571 */
572static void
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -0500573ext4_ext_binsearch(struct inode *inode,
574 struct ext4_ext_path *path, ext4_lblk_t block)
Alex Tomasa86c6182006-10-11 01:21:03 -0700575{
576 struct ext4_extent_header *eh = path->p_hdr;
577 struct ext4_extent *r, *l, *m;
578
Alex Tomasa86c6182006-10-11 01:21:03 -0700579 if (eh->eh_entries == 0) {
580 /*
Randy Dunlapd0d856e2006-10-11 01:21:07 -0700581 * this leaf is empty:
582 * we get such a leaf in split/add case
Alex Tomasa86c6182006-10-11 01:21:03 -0700583 */
584 return;
585 }
586
Eric Sandeenbba90742008-01-28 23:58:27 -0500587 ext_debug("binsearch for %u: ", block);
Alex Tomasa86c6182006-10-11 01:21:03 -0700588
589 l = EXT_FIRST_EXTENT(eh) + 1;
Dmitry Monakhove9f410b2007-07-18 09:09:15 -0400590 r = EXT_LAST_EXTENT(eh);
Alex Tomasa86c6182006-10-11 01:21:03 -0700591
592 while (l <= r) {
593 m = l + (r - l) / 2;
594 if (block < le32_to_cpu(m->ee_block))
595 r = m - 1;
596 else
597 l = m + 1;
Dmitry Monakhov26d535e2007-07-18 08:33:37 -0400598 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
599 m, le32_to_cpu(m->ee_block),
600 r, le32_to_cpu(r->ee_block));
Alex Tomasa86c6182006-10-11 01:21:03 -0700601 }
602
603 path->p_ext = l - 1;
Mingming553f9002009-09-18 13:34:55 -0400604 ext_debug(" -> %d:%llu:[%d]%d ",
Dave Kleikamp8c55e202007-05-24 13:04:54 -0400605 le32_to_cpu(path->p_ext->ee_block),
606 ext_pblock(path->p_ext),
Mingming553f9002009-09-18 13:34:55 -0400607 ext4_ext_is_uninitialized(path->p_ext),
Amit Aroraa2df2a62007-07-17 21:42:41 -0400608 ext4_ext_get_actual_len(path->p_ext));
Alex Tomasa86c6182006-10-11 01:21:03 -0700609
610#ifdef CHECK_BINSEARCH
611 {
612 struct ext4_extent *chex, *ex;
613 int k;
614
615 chex = ex = EXT_FIRST_EXTENT(eh);
616 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
617 BUG_ON(k && le32_to_cpu(ex->ee_block)
Dave Kleikamp8c55e202007-05-24 13:04:54 -0400618 <= le32_to_cpu(ex[-1].ee_block));
Alex Tomasa86c6182006-10-11 01:21:03 -0700619 if (block < le32_to_cpu(ex->ee_block))
620 break;
621 chex = ex;
622 }
623 BUG_ON(chex != path->p_ext);
624 }
625#endif
626
627}
628
629int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
630{
631 struct ext4_extent_header *eh;
632
633 eh = ext_inode_hdr(inode);
634 eh->eh_depth = 0;
635 eh->eh_entries = 0;
636 eh->eh_magic = EXT4_EXT_MAGIC;
Theodore Ts'o55ad63b2009-08-28 10:40:33 -0400637 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
Alex Tomasa86c6182006-10-11 01:21:03 -0700638 ext4_mark_inode_dirty(handle, inode);
639 ext4_ext_invalidate_cache(inode);
640 return 0;
641}
642
643struct ext4_ext_path *
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -0500644ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
645 struct ext4_ext_path *path)
Alex Tomasa86c6182006-10-11 01:21:03 -0700646{
647 struct ext4_extent_header *eh;
648 struct buffer_head *bh;
649 short int depth, i, ppos = 0, alloc = 0;
650
651 eh = ext_inode_hdr(inode);
Alex Tomasc29c0ae2007-07-18 09:19:09 -0400652 depth = ext_depth(inode);
Alex Tomasa86c6182006-10-11 01:21:03 -0700653
654 /* account possible depth increase */
655 if (!path) {
Avantika Mathur5d4958f2006-12-06 20:41:35 -0800656 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
Alex Tomasa86c6182006-10-11 01:21:03 -0700657 GFP_NOFS);
658 if (!path)
659 return ERR_PTR(-ENOMEM);
660 alloc = 1;
661 }
Alex Tomasa86c6182006-10-11 01:21:03 -0700662 path[0].p_hdr = eh;
Shen Feng1973adc2008-07-11 19:27:31 -0400663 path[0].p_bh = NULL;
Alex Tomasa86c6182006-10-11 01:21:03 -0700664
Alex Tomasc29c0ae2007-07-18 09:19:09 -0400665 i = depth;
Alex Tomasa86c6182006-10-11 01:21:03 -0700666 /* walk through the tree */
667 while (i) {
Aneesh Kumar K.V7a262f72009-03-27 16:39:58 -0400668 int need_to_validate = 0;
669
Alex Tomasa86c6182006-10-11 01:21:03 -0700670 ext_debug("depth %d: num %d, max %d\n",
671 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
Alex Tomasc29c0ae2007-07-18 09:19:09 -0400672
Alex Tomasa86c6182006-10-11 01:21:03 -0700673 ext4_ext_binsearch_idx(inode, path + ppos, block);
Alex Tomasf65e6fb2006-10-11 01:21:05 -0700674 path[ppos].p_block = idx_pblock(path[ppos].p_idx);
Alex Tomasa86c6182006-10-11 01:21:03 -0700675 path[ppos].p_depth = i;
676 path[ppos].p_ext = NULL;
677
Aneesh Kumar K.V7a262f72009-03-27 16:39:58 -0400678 bh = sb_getblk(inode->i_sb, path[ppos].p_block);
679 if (unlikely(!bh))
Alex Tomasa86c6182006-10-11 01:21:03 -0700680 goto err;
Aneesh Kumar K.V7a262f72009-03-27 16:39:58 -0400681 if (!bh_uptodate_or_lock(bh)) {
682 if (bh_submit_read(bh) < 0) {
683 put_bh(bh);
684 goto err;
685 }
686 /* validate the extent entries */
687 need_to_validate = 1;
688 }
Alex Tomasa86c6182006-10-11 01:21:03 -0700689 eh = ext_block_hdr(bh);
690 ppos++;
691 BUG_ON(ppos > depth);
692 path[ppos].p_bh = bh;
693 path[ppos].p_hdr = eh;
694 i--;
695
Aneesh Kumar K.V7a262f72009-03-27 16:39:58 -0400696 if (need_to_validate && ext4_ext_check(inode, eh, i))
Alex Tomasa86c6182006-10-11 01:21:03 -0700697 goto err;
698 }
699
700 path[ppos].p_depth = i;
Alex Tomasa86c6182006-10-11 01:21:03 -0700701 path[ppos].p_ext = NULL;
702 path[ppos].p_idx = NULL;
703
Alex Tomasa86c6182006-10-11 01:21:03 -0700704 /* find extent */
705 ext4_ext_binsearch(inode, path + ppos, block);
Shen Feng1973adc2008-07-11 19:27:31 -0400706 /* if not an empty leaf */
707 if (path[ppos].p_ext)
708 path[ppos].p_block = ext_pblock(path[ppos].p_ext);
Alex Tomasa86c6182006-10-11 01:21:03 -0700709
710 ext4_ext_show_path(inode, path);
711
712 return path;
713
714err:
715 ext4_ext_drop_refs(path);
716 if (alloc)
717 kfree(path);
718 return ERR_PTR(-EIO);
719}
720
721/*
Randy Dunlapd0d856e2006-10-11 01:21:07 -0700722 * ext4_ext_insert_index:
723 * insert new index [@logical;@ptr] into the block at @curp;
724 * check where to insert: before @curp or after @curp
Alex Tomasa86c6182006-10-11 01:21:03 -0700725 */
Mingming Cao00314622009-09-28 15:49:08 -0400726int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
Alex Tomasa86c6182006-10-11 01:21:03 -0700727 struct ext4_ext_path *curp,
Alex Tomasf65e6fb2006-10-11 01:21:05 -0700728 int logical, ext4_fsblk_t ptr)
Alex Tomasa86c6182006-10-11 01:21:03 -0700729{
730 struct ext4_extent_idx *ix;
731 int len, err;
732
Avantika Mathur7e028972006-12-06 20:41:33 -0800733 err = ext4_ext_get_access(handle, inode, curp);
734 if (err)
Alex Tomasa86c6182006-10-11 01:21:03 -0700735 return err;
736
737 BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block));
738 len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
739 if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
740 /* insert after */
741 if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
742 len = (len - 1) * sizeof(struct ext4_extent_idx);
743 len = len < 0 ? 0 : len;
Dmitry Monakhov26d535e2007-07-18 08:33:37 -0400744 ext_debug("insert new index %d after: %llu. "
Alex Tomasa86c6182006-10-11 01:21:03 -0700745 "move %d from 0x%p to 0x%p\n",
746 logical, ptr, len,
747 (curp->p_idx + 1), (curp->p_idx + 2));
748 memmove(curp->p_idx + 2, curp->p_idx + 1, len);
749 }
750 ix = curp->p_idx + 1;
751 } else {
752 /* insert before */
753 len = len * sizeof(struct ext4_extent_idx);
754 len = len < 0 ? 0 : len;
Dmitry Monakhov26d535e2007-07-18 08:33:37 -0400755 ext_debug("insert new index %d before: %llu. "
Alex Tomasa86c6182006-10-11 01:21:03 -0700756 "move %d from 0x%p to 0x%p\n",
757 logical, ptr, len,
758 curp->p_idx, (curp->p_idx + 1));
759 memmove(curp->p_idx + 1, curp->p_idx, len);
760 ix = curp->p_idx;
761 }
762
763 ix->ei_block = cpu_to_le32(logical);
Alex Tomasf65e6fb2006-10-11 01:21:05 -0700764 ext4_idx_store_pblock(ix, ptr);
Marcin Slusarze8546d02008-04-17 10:38:59 -0400765 le16_add_cpu(&curp->p_hdr->eh_entries, 1);
Alex Tomasa86c6182006-10-11 01:21:03 -0700766
767 BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)
Dave Kleikamp8c55e202007-05-24 13:04:54 -0400768 > le16_to_cpu(curp->p_hdr->eh_max));
Alex Tomasa86c6182006-10-11 01:21:03 -0700769 BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr));
770
771 err = ext4_ext_dirty(handle, inode, curp);
772 ext4_std_error(inode->i_sb, err);
773
774 return err;
775}
776
777/*
Randy Dunlapd0d856e2006-10-11 01:21:07 -0700778 * ext4_ext_split:
779 * inserts new subtree into the path, using free index entry
780 * at depth @at:
781 * - allocates all needed blocks (new leaf and all intermediate index blocks)
782 * - makes decision where to split
783 * - moves remaining extents and index entries (right to the split point)
784 * into the newly allocated blocks
785 * - initializes subtree
Alex Tomasa86c6182006-10-11 01:21:03 -0700786 */
787static int ext4_ext_split(handle_t *handle, struct inode *inode,
788 struct ext4_ext_path *path,
789 struct ext4_extent *newext, int at)
790{
791 struct buffer_head *bh = NULL;
792 int depth = ext_depth(inode);
793 struct ext4_extent_header *neh;
794 struct ext4_extent_idx *fidx;
795 struct ext4_extent *ex;
796 int i = at, k, m, a;
Alex Tomasf65e6fb2006-10-11 01:21:05 -0700797 ext4_fsblk_t newblock, oldblock;
Alex Tomasa86c6182006-10-11 01:21:03 -0700798 __le32 border;
Alex Tomasf65e6fb2006-10-11 01:21:05 -0700799 ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
Alex Tomasa86c6182006-10-11 01:21:03 -0700800 int err = 0;
801
802 /* make decision: where to split? */
Randy Dunlapd0d856e2006-10-11 01:21:07 -0700803 /* FIXME: now decision is simplest: at current extent */
Alex Tomasa86c6182006-10-11 01:21:03 -0700804
Randy Dunlapd0d856e2006-10-11 01:21:07 -0700805 /* if current leaf will be split, then we should use
Alex Tomasa86c6182006-10-11 01:21:03 -0700806 * border from split point */
807 BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr));
808 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
809 border = path[depth].p_ext[1].ee_block;
Randy Dunlapd0d856e2006-10-11 01:21:07 -0700810 ext_debug("leaf will be split."
Alex Tomasa86c6182006-10-11 01:21:03 -0700811 " next leaf starts at %d\n",
Dave Kleikamp8c55e202007-05-24 13:04:54 -0400812 le32_to_cpu(border));
Alex Tomasa86c6182006-10-11 01:21:03 -0700813 } else {
814 border = newext->ee_block;
815 ext_debug("leaf will be added."
816 " next leaf starts at %d\n",
Dave Kleikamp8c55e202007-05-24 13:04:54 -0400817 le32_to_cpu(border));
Alex Tomasa86c6182006-10-11 01:21:03 -0700818 }
819
820 /*
Randy Dunlapd0d856e2006-10-11 01:21:07 -0700821 * If error occurs, then we break processing
822 * and mark filesystem read-only. index won't
Alex Tomasa86c6182006-10-11 01:21:03 -0700823 * be inserted and tree will be in consistent
Randy Dunlapd0d856e2006-10-11 01:21:07 -0700824 * state. Next mount will repair buffers too.
Alex Tomasa86c6182006-10-11 01:21:03 -0700825 */
826
827 /*
Randy Dunlapd0d856e2006-10-11 01:21:07 -0700828 * Get array to track all allocated blocks.
829 * We need this to handle errors and free blocks
830 * upon them.
Alex Tomasa86c6182006-10-11 01:21:03 -0700831 */
Avantika Mathur5d4958f2006-12-06 20:41:35 -0800832 ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
Alex Tomasa86c6182006-10-11 01:21:03 -0700833 if (!ablocks)
834 return -ENOMEM;
Alex Tomasa86c6182006-10-11 01:21:03 -0700835
836 /* allocate all needed blocks */
837 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
838 for (a = 0; a < depth - at; a++) {
Aneesh Kumar K.V654b4902008-07-11 19:27:31 -0400839 newblock = ext4_ext_new_meta_block(handle, inode, path,
840 newext, &err);
Alex Tomasa86c6182006-10-11 01:21:03 -0700841 if (newblock == 0)
842 goto cleanup;
843 ablocks[a] = newblock;
844 }
845
846 /* initialize new leaf */
847 newblock = ablocks[--a];
848 BUG_ON(newblock == 0);
849 bh = sb_getblk(inode->i_sb, newblock);
850 if (!bh) {
851 err = -EIO;
852 goto cleanup;
853 }
854 lock_buffer(bh);
855
Avantika Mathur7e028972006-12-06 20:41:33 -0800856 err = ext4_journal_get_create_access(handle, bh);
857 if (err)
Alex Tomasa86c6182006-10-11 01:21:03 -0700858 goto cleanup;
859
860 neh = ext_block_hdr(bh);
861 neh->eh_entries = 0;
Theodore Ts'o55ad63b2009-08-28 10:40:33 -0400862 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
Alex Tomasa86c6182006-10-11 01:21:03 -0700863 neh->eh_magic = EXT4_EXT_MAGIC;
864 neh->eh_depth = 0;
865 ex = EXT_FIRST_EXTENT(neh);
866
Randy Dunlapd0d856e2006-10-11 01:21:07 -0700867 /* move remainder of path[depth] to the new leaf */
Alex Tomasa86c6182006-10-11 01:21:03 -0700868 BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max);
869 /* start copy from next extent */
870 /* TODO: we could do it by single memmove */
871 m = 0;
872 path[depth].p_ext++;
873 while (path[depth].p_ext <=
874 EXT_MAX_EXTENT(path[depth].p_hdr)) {
Mingming553f9002009-09-18 13:34:55 -0400875 ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
Dave Kleikamp8c55e202007-05-24 13:04:54 -0400876 le32_to_cpu(path[depth].p_ext->ee_block),
877 ext_pblock(path[depth].p_ext),
Mingming553f9002009-09-18 13:34:55 -0400878 ext4_ext_is_uninitialized(path[depth].p_ext),
Amit Aroraa2df2a62007-07-17 21:42:41 -0400879 ext4_ext_get_actual_len(path[depth].p_ext),
Alex Tomasa86c6182006-10-11 01:21:03 -0700880 newblock);
881 /*memmove(ex++, path[depth].p_ext++,
882 sizeof(struct ext4_extent));
883 neh->eh_entries++;*/
884 path[depth].p_ext++;
885 m++;
886 }
887 if (m) {
888 memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
Marcin Slusarze8546d02008-04-17 10:38:59 -0400889 le16_add_cpu(&neh->eh_entries, m);
Alex Tomasa86c6182006-10-11 01:21:03 -0700890 }
891
892 set_buffer_uptodate(bh);
893 unlock_buffer(bh);
894
Frank Mayhar03901312009-01-07 00:06:22 -0500895 err = ext4_handle_dirty_metadata(handle, inode, bh);
Avantika Mathur7e028972006-12-06 20:41:33 -0800896 if (err)
Alex Tomasa86c6182006-10-11 01:21:03 -0700897 goto cleanup;
898 brelse(bh);
899 bh = NULL;
900
901 /* correct old leaf */
902 if (m) {
Avantika Mathur7e028972006-12-06 20:41:33 -0800903 err = ext4_ext_get_access(handle, inode, path + depth);
904 if (err)
Alex Tomasa86c6182006-10-11 01:21:03 -0700905 goto cleanup;
Marcin Slusarze8546d02008-04-17 10:38:59 -0400906 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
Avantika Mathur7e028972006-12-06 20:41:33 -0800907 err = ext4_ext_dirty(handle, inode, path + depth);
908 if (err)
Alex Tomasa86c6182006-10-11 01:21:03 -0700909 goto cleanup;
910
911 }
912
913 /* create intermediate indexes */
914 k = depth - at - 1;
915 BUG_ON(k < 0);
916 if (k)
917 ext_debug("create %d intermediate indices\n", k);
918 /* insert new index into current index block */
919 /* current depth stored in i var */
920 i = depth - 1;
921 while (k--) {
922 oldblock = newblock;
923 newblock = ablocks[--a];
Eric Sandeenbba90742008-01-28 23:58:27 -0500924 bh = sb_getblk(inode->i_sb, newblock);
Alex Tomasa86c6182006-10-11 01:21:03 -0700925 if (!bh) {
926 err = -EIO;
927 goto cleanup;
928 }
929 lock_buffer(bh);
930
Avantika Mathur7e028972006-12-06 20:41:33 -0800931 err = ext4_journal_get_create_access(handle, bh);
932 if (err)
Alex Tomasa86c6182006-10-11 01:21:03 -0700933 goto cleanup;
934
935 neh = ext_block_hdr(bh);
936 neh->eh_entries = cpu_to_le16(1);
937 neh->eh_magic = EXT4_EXT_MAGIC;
Theodore Ts'o55ad63b2009-08-28 10:40:33 -0400938 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
Alex Tomasa86c6182006-10-11 01:21:03 -0700939 neh->eh_depth = cpu_to_le16(depth - i);
940 fidx = EXT_FIRST_INDEX(neh);
941 fidx->ei_block = border;
Alex Tomasf65e6fb2006-10-11 01:21:05 -0700942 ext4_idx_store_pblock(fidx, oldblock);
Alex Tomasa86c6182006-10-11 01:21:03 -0700943
Eric Sandeenbba90742008-01-28 23:58:27 -0500944 ext_debug("int.index at %d (block %llu): %u -> %llu\n",
945 i, newblock, le32_to_cpu(border), oldblock);
Alex Tomasa86c6182006-10-11 01:21:03 -0700946 /* copy indexes */
947 m = 0;
948 path[i].p_idx++;
949
950 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
951 EXT_MAX_INDEX(path[i].p_hdr));
952 BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) !=
953 EXT_LAST_INDEX(path[i].p_hdr));
954 while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
Dmitry Monakhov26d535e2007-07-18 08:33:37 -0400955 ext_debug("%d: move %d:%llu in new index %llu\n", i,
Dave Kleikamp8c55e202007-05-24 13:04:54 -0400956 le32_to_cpu(path[i].p_idx->ei_block),
957 idx_pblock(path[i].p_idx),
958 newblock);
Alex Tomasa86c6182006-10-11 01:21:03 -0700959 /*memmove(++fidx, path[i].p_idx++,
960 sizeof(struct ext4_extent_idx));
961 neh->eh_entries++;
962 BUG_ON(neh->eh_entries > neh->eh_max);*/
963 path[i].p_idx++;
964 m++;
965 }
966 if (m) {
967 memmove(++fidx, path[i].p_idx - m,
968 sizeof(struct ext4_extent_idx) * m);
Marcin Slusarze8546d02008-04-17 10:38:59 -0400969 le16_add_cpu(&neh->eh_entries, m);
Alex Tomasa86c6182006-10-11 01:21:03 -0700970 }
971 set_buffer_uptodate(bh);
972 unlock_buffer(bh);
973
Frank Mayhar03901312009-01-07 00:06:22 -0500974 err = ext4_handle_dirty_metadata(handle, inode, bh);
Avantika Mathur7e028972006-12-06 20:41:33 -0800975 if (err)
Alex Tomasa86c6182006-10-11 01:21:03 -0700976 goto cleanup;
977 brelse(bh);
978 bh = NULL;
979
980 /* correct old index */
981 if (m) {
982 err = ext4_ext_get_access(handle, inode, path + i);
983 if (err)
984 goto cleanup;
Marcin Slusarze8546d02008-04-17 10:38:59 -0400985 le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
Alex Tomasa86c6182006-10-11 01:21:03 -0700986 err = ext4_ext_dirty(handle, inode, path + i);
987 if (err)
988 goto cleanup;
989 }
990
991 i--;
992 }
993
994 /* insert new index */
Alex Tomasa86c6182006-10-11 01:21:03 -0700995 err = ext4_ext_insert_index(handle, inode, path + at,
996 le32_to_cpu(border), newblock);
997
998cleanup:
999 if (bh) {
1000 if (buffer_locked(bh))
1001 unlock_buffer(bh);
1002 brelse(bh);
1003 }
1004
1005 if (err) {
1006 /* free all allocated blocks in error case */
1007 for (i = 0; i < depth; i++) {
1008 if (!ablocks[i])
1009 continue;
Theodore Ts'oe6362602009-11-23 07:17:05 -05001010 ext4_free_blocks(handle, inode, 0, ablocks[i], 1,
1011 EXT4_FREE_BLOCKS_METADATA);
Alex Tomasa86c6182006-10-11 01:21:03 -07001012 }
1013 }
1014 kfree(ablocks);
1015
1016 return err;
1017}
1018
1019/*
Randy Dunlapd0d856e2006-10-11 01:21:07 -07001020 * ext4_ext_grow_indepth:
1021 * implements tree growing procedure:
1022 * - allocates new block
1023 * - moves top-level data (index block or leaf) into the new block
1024 * - initializes new top-level, creating index that points to the
1025 * just created block
Alex Tomasa86c6182006-10-11 01:21:03 -07001026 */
1027static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1028 struct ext4_ext_path *path,
1029 struct ext4_extent *newext)
1030{
1031 struct ext4_ext_path *curp = path;
1032 struct ext4_extent_header *neh;
1033 struct ext4_extent_idx *fidx;
1034 struct buffer_head *bh;
Alex Tomasf65e6fb2006-10-11 01:21:05 -07001035 ext4_fsblk_t newblock;
Alex Tomasa86c6182006-10-11 01:21:03 -07001036 int err = 0;
1037
Aneesh Kumar K.V654b4902008-07-11 19:27:31 -04001038 newblock = ext4_ext_new_meta_block(handle, inode, path, newext, &err);
Alex Tomasa86c6182006-10-11 01:21:03 -07001039 if (newblock == 0)
1040 return err;
1041
1042 bh = sb_getblk(inode->i_sb, newblock);
1043 if (!bh) {
1044 err = -EIO;
1045 ext4_std_error(inode->i_sb, err);
1046 return err;
1047 }
1048 lock_buffer(bh);
1049
Avantika Mathur7e028972006-12-06 20:41:33 -08001050 err = ext4_journal_get_create_access(handle, bh);
1051 if (err) {
Alex Tomasa86c6182006-10-11 01:21:03 -07001052 unlock_buffer(bh);
1053 goto out;
1054 }
1055
1056 /* move top-level index/leaf into new block */
1057 memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));
1058
1059 /* set size of new block */
1060 neh = ext_block_hdr(bh);
1061 /* old root could have indexes or leaves
1062 * so calculate e_max right way */
1063 if (ext_depth(inode))
Theodore Ts'o55ad63b2009-08-28 10:40:33 -04001064 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
Alex Tomasa86c6182006-10-11 01:21:03 -07001065 else
Theodore Ts'o55ad63b2009-08-28 10:40:33 -04001066 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
Alex Tomasa86c6182006-10-11 01:21:03 -07001067 neh->eh_magic = EXT4_EXT_MAGIC;
1068 set_buffer_uptodate(bh);
1069 unlock_buffer(bh);
1070
Frank Mayhar03901312009-01-07 00:06:22 -05001071 err = ext4_handle_dirty_metadata(handle, inode, bh);
Avantika Mathur7e028972006-12-06 20:41:33 -08001072 if (err)
Alex Tomasa86c6182006-10-11 01:21:03 -07001073 goto out;
1074
1075 /* create index in new top-level index: num,max,pointer */
Avantika Mathur7e028972006-12-06 20:41:33 -08001076 err = ext4_ext_get_access(handle, inode, curp);
1077 if (err)
Alex Tomasa86c6182006-10-11 01:21:03 -07001078 goto out;
1079
1080 curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
Theodore Ts'o55ad63b2009-08-28 10:40:33 -04001081 curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
Alex Tomasa86c6182006-10-11 01:21:03 -07001082 curp->p_hdr->eh_entries = cpu_to_le16(1);
1083 curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
Dmitry Monakhove9f410b2007-07-18 09:09:15 -04001084
1085 if (path[0].p_hdr->eh_depth)
1086 curp->p_idx->ei_block =
1087 EXT_FIRST_INDEX(path[0].p_hdr)->ei_block;
1088 else
1089 curp->p_idx->ei_block =
1090 EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
Alex Tomasf65e6fb2006-10-11 01:21:05 -07001091 ext4_idx_store_pblock(curp->p_idx, newblock);
Alex Tomasa86c6182006-10-11 01:21:03 -07001092
1093 neh = ext_inode_hdr(inode);
1094 fidx = EXT_FIRST_INDEX(neh);
Mingming Cao2ae02102006-10-11 01:21:11 -07001095 ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
Alex Tomasa86c6182006-10-11 01:21:03 -07001096 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
Alex Tomasf65e6fb2006-10-11 01:21:05 -07001097 le32_to_cpu(fidx->ei_block), idx_pblock(fidx));
Alex Tomasa86c6182006-10-11 01:21:03 -07001098
1099 neh->eh_depth = cpu_to_le16(path->p_depth + 1);
1100 err = ext4_ext_dirty(handle, inode, curp);
1101out:
1102 brelse(bh);
1103
1104 return err;
1105}
1106
1107/*
Randy Dunlapd0d856e2006-10-11 01:21:07 -07001108 * ext4_ext_create_new_leaf:
1109 * finds empty index and adds new leaf.
1110 * if no free index is found, then it requests in-depth growing.
Alex Tomasa86c6182006-10-11 01:21:03 -07001111 */
1112static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1113 struct ext4_ext_path *path,
1114 struct ext4_extent *newext)
1115{
1116 struct ext4_ext_path *curp;
1117 int depth, i, err = 0;
1118
1119repeat:
1120 i = depth = ext_depth(inode);
1121
1122 /* walk up to the tree and look for free index entry */
1123 curp = path + depth;
1124 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1125 i--;
1126 curp--;
1127 }
1128
Randy Dunlapd0d856e2006-10-11 01:21:07 -07001129 /* we use already allocated block for index block,
1130 * so subsequent data blocks should be contiguous */
Alex Tomasa86c6182006-10-11 01:21:03 -07001131 if (EXT_HAS_FREE_INDEX(curp)) {
1132 /* if we found index with free entry, then use that
1133 * entry: create all needed subtree and add new leaf */
1134 err = ext4_ext_split(handle, inode, path, newext, i);
Shen Feng787e0982008-07-11 19:27:31 -04001135 if (err)
1136 goto out;
Alex Tomasa86c6182006-10-11 01:21:03 -07001137
1138 /* refill path */
1139 ext4_ext_drop_refs(path);
1140 path = ext4_ext_find_extent(inode,
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05001141 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1142 path);
Alex Tomasa86c6182006-10-11 01:21:03 -07001143 if (IS_ERR(path))
1144 err = PTR_ERR(path);
1145 } else {
1146 /* tree is full, time to grow in depth */
1147 err = ext4_ext_grow_indepth(handle, inode, path, newext);
1148 if (err)
1149 goto out;
1150
1151 /* refill path */
1152 ext4_ext_drop_refs(path);
1153 path = ext4_ext_find_extent(inode,
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05001154 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1155 path);
Alex Tomasa86c6182006-10-11 01:21:03 -07001156 if (IS_ERR(path)) {
1157 err = PTR_ERR(path);
1158 goto out;
1159 }
1160
1161 /*
Randy Dunlapd0d856e2006-10-11 01:21:07 -07001162 * only first (depth 0 -> 1) produces free space;
1163 * in all other cases we have to split the grown tree
Alex Tomasa86c6182006-10-11 01:21:03 -07001164 */
1165 depth = ext_depth(inode);
1166 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
Randy Dunlapd0d856e2006-10-11 01:21:07 -07001167 /* now we need to split */
Alex Tomasa86c6182006-10-11 01:21:03 -07001168 goto repeat;
1169 }
1170 }
1171
1172out:
1173 return err;
1174}
1175
1176/*
Alex Tomas1988b512008-01-28 23:58:27 -05001177 * search the closest allocated block to the left for *logical
1178 * and returns it at @logical + it's physical address at @phys
1179 * if *logical is the smallest allocated block, the function
1180 * returns 0 at @phys
1181 * return value contains 0 (success) or error code
1182 */
1183int
1184ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path,
1185 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1186{
1187 struct ext4_extent_idx *ix;
1188 struct ext4_extent *ex;
Aneesh Kumar K.Vb939e372008-01-28 23:58:27 -05001189 int depth, ee_len;
Alex Tomas1988b512008-01-28 23:58:27 -05001190
1191 BUG_ON(path == NULL);
1192 depth = path->p_depth;
1193 *phys = 0;
1194
1195 if (depth == 0 && path->p_ext == NULL)
1196 return 0;
1197
1198 /* usually extent in the path covers blocks smaller
1199 * then *logical, but it can be that extent is the
1200 * first one in the file */
1201
1202 ex = path[depth].p_ext;
Aneesh Kumar K.Vb939e372008-01-28 23:58:27 -05001203 ee_len = ext4_ext_get_actual_len(ex);
Alex Tomas1988b512008-01-28 23:58:27 -05001204 if (*logical < le32_to_cpu(ex->ee_block)) {
1205 BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex);
1206 while (--depth >= 0) {
1207 ix = path[depth].p_idx;
1208 BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr));
1209 }
1210 return 0;
1211 }
1212
Aneesh Kumar K.Vb939e372008-01-28 23:58:27 -05001213 BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len));
Alex Tomas1988b512008-01-28 23:58:27 -05001214
Aneesh Kumar K.Vb939e372008-01-28 23:58:27 -05001215 *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1216 *phys = ext_pblock(ex) + ee_len - 1;
Alex Tomas1988b512008-01-28 23:58:27 -05001217 return 0;
1218}
1219
1220/*
1221 * search the closest allocated block to the right for *logical
1222 * and returns it at @logical + it's physical address at @phys
1223 * if *logical is the smallest allocated block, the function
1224 * returns 0 at @phys
1225 * return value contains 0 (success) or error code
1226 */
1227int
1228ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
1229 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1230{
1231 struct buffer_head *bh = NULL;
1232 struct ext4_extent_header *eh;
1233 struct ext4_extent_idx *ix;
1234 struct ext4_extent *ex;
1235 ext4_fsblk_t block;
Eric Sandeen395a87b2009-03-10 18:18:47 -04001236 int depth; /* Note, NOT eh_depth; depth from top of tree */
1237 int ee_len;
Alex Tomas1988b512008-01-28 23:58:27 -05001238
1239 BUG_ON(path == NULL);
1240 depth = path->p_depth;
1241 *phys = 0;
1242
1243 if (depth == 0 && path->p_ext == NULL)
1244 return 0;
1245
1246 /* usually extent in the path covers blocks smaller
1247 * then *logical, but it can be that extent is the
1248 * first one in the file */
1249
1250 ex = path[depth].p_ext;
Aneesh Kumar K.Vb939e372008-01-28 23:58:27 -05001251 ee_len = ext4_ext_get_actual_len(ex);
Alex Tomas1988b512008-01-28 23:58:27 -05001252 if (*logical < le32_to_cpu(ex->ee_block)) {
1253 BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex);
1254 while (--depth >= 0) {
1255 ix = path[depth].p_idx;
1256 BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr));
1257 }
1258 *logical = le32_to_cpu(ex->ee_block);
1259 *phys = ext_pblock(ex);
1260 return 0;
1261 }
1262
Aneesh Kumar K.Vb939e372008-01-28 23:58:27 -05001263 BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len));
Alex Tomas1988b512008-01-28 23:58:27 -05001264
1265 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1266 /* next allocated block in this leaf */
1267 ex++;
1268 *logical = le32_to_cpu(ex->ee_block);
1269 *phys = ext_pblock(ex);
1270 return 0;
1271 }
1272
1273 /* go up and search for index to the right */
1274 while (--depth >= 0) {
1275 ix = path[depth].p_idx;
1276 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
Wu Fengguang25f1ee32008-11-25 17:24:23 -05001277 goto got_index;
Alex Tomas1988b512008-01-28 23:58:27 -05001278 }
1279
Wu Fengguang25f1ee32008-11-25 17:24:23 -05001280 /* we've gone up to the root and found no index to the right */
1281 return 0;
Alex Tomas1988b512008-01-28 23:58:27 -05001282
Wu Fengguang25f1ee32008-11-25 17:24:23 -05001283got_index:
Alex Tomas1988b512008-01-28 23:58:27 -05001284 /* we've found index to the right, let's
1285 * follow it and find the closest allocated
1286 * block to the right */
1287 ix++;
1288 block = idx_pblock(ix);
1289 while (++depth < path->p_depth) {
1290 bh = sb_bread(inode->i_sb, block);
1291 if (bh == NULL)
1292 return -EIO;
1293 eh = ext_block_hdr(bh);
Eric Sandeen395a87b2009-03-10 18:18:47 -04001294 /* subtract from p_depth to get proper eh_depth */
Aneesh Kumar K.V56b19862009-03-12 09:51:20 -04001295 if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
Alex Tomas1988b512008-01-28 23:58:27 -05001296 put_bh(bh);
1297 return -EIO;
1298 }
1299 ix = EXT_FIRST_INDEX(eh);
1300 block = idx_pblock(ix);
1301 put_bh(bh);
1302 }
1303
1304 bh = sb_bread(inode->i_sb, block);
1305 if (bh == NULL)
1306 return -EIO;
1307 eh = ext_block_hdr(bh);
Aneesh Kumar K.V56b19862009-03-12 09:51:20 -04001308 if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
Alex Tomas1988b512008-01-28 23:58:27 -05001309 put_bh(bh);
1310 return -EIO;
1311 }
1312 ex = EXT_FIRST_EXTENT(eh);
1313 *logical = le32_to_cpu(ex->ee_block);
1314 *phys = ext_pblock(ex);
1315 put_bh(bh);
1316 return 0;
Alex Tomas1988b512008-01-28 23:58:27 -05001317}
1318
1319/*
Randy Dunlapd0d856e2006-10-11 01:21:07 -07001320 * ext4_ext_next_allocated_block:
1321 * returns allocated block in subsequent extent or EXT_MAX_BLOCK.
1322 * NOTE: it considers block number from index entry as
1323 * allocated block. Thus, index entries have to be consistent
1324 * with leaves.
Alex Tomasa86c6182006-10-11 01:21:03 -07001325 */
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05001326static ext4_lblk_t
Alex Tomasa86c6182006-10-11 01:21:03 -07001327ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1328{
1329 int depth;
1330
1331 BUG_ON(path == NULL);
1332 depth = path->p_depth;
1333
1334 if (depth == 0 && path->p_ext == NULL)
1335 return EXT_MAX_BLOCK;
1336
1337 while (depth >= 0) {
1338 if (depth == path->p_depth) {
1339 /* leaf */
1340 if (path[depth].p_ext !=
1341 EXT_LAST_EXTENT(path[depth].p_hdr))
1342 return le32_to_cpu(path[depth].p_ext[1].ee_block);
1343 } else {
1344 /* index */
1345 if (path[depth].p_idx !=
1346 EXT_LAST_INDEX(path[depth].p_hdr))
1347 return le32_to_cpu(path[depth].p_idx[1].ei_block);
1348 }
1349 depth--;
1350 }
1351
1352 return EXT_MAX_BLOCK;
1353}
1354
1355/*
Randy Dunlapd0d856e2006-10-11 01:21:07 -07001356 * ext4_ext_next_leaf_block:
Alex Tomasa86c6182006-10-11 01:21:03 -07001357 * returns first allocated block from next leaf or EXT_MAX_BLOCK
1358 */
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05001359static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
Andrew Morton63f57932006-10-11 01:21:24 -07001360 struct ext4_ext_path *path)
Alex Tomasa86c6182006-10-11 01:21:03 -07001361{
1362 int depth;
1363
1364 BUG_ON(path == NULL);
1365 depth = path->p_depth;
1366
1367 /* zero-tree has no leaf blocks at all */
1368 if (depth == 0)
1369 return EXT_MAX_BLOCK;
1370
1371 /* go to index block */
1372 depth--;
1373
1374 while (depth >= 0) {
1375 if (path[depth].p_idx !=
1376 EXT_LAST_INDEX(path[depth].p_hdr))
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05001377 return (ext4_lblk_t)
1378 le32_to_cpu(path[depth].p_idx[1].ei_block);
Alex Tomasa86c6182006-10-11 01:21:03 -07001379 depth--;
1380 }
1381
1382 return EXT_MAX_BLOCK;
1383}
1384
1385/*
Randy Dunlapd0d856e2006-10-11 01:21:07 -07001386 * ext4_ext_correct_indexes:
1387 * if leaf gets modified and modified extent is first in the leaf,
1388 * then we have to correct all indexes above.
Alex Tomasa86c6182006-10-11 01:21:03 -07001389 * TODO: do we need to correct tree in all cases?
1390 */
Aneesh Kumar K.V1d03ec92008-01-28 23:58:27 -05001391static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
Alex Tomasa86c6182006-10-11 01:21:03 -07001392 struct ext4_ext_path *path)
1393{
1394 struct ext4_extent_header *eh;
1395 int depth = ext_depth(inode);
1396 struct ext4_extent *ex;
1397 __le32 border;
1398 int k, err = 0;
1399
1400 eh = path[depth].p_hdr;
1401 ex = path[depth].p_ext;
1402 BUG_ON(ex == NULL);
1403 BUG_ON(eh == NULL);
1404
1405 if (depth == 0) {
1406 /* there is no tree at all */
1407 return 0;
1408 }
1409
1410 if (ex != EXT_FIRST_EXTENT(eh)) {
1411 /* we correct tree if first leaf got modified only */
1412 return 0;
1413 }
1414
1415 /*
Randy Dunlapd0d856e2006-10-11 01:21:07 -07001416 * TODO: we need correction if border is smaller than current one
Alex Tomasa86c6182006-10-11 01:21:03 -07001417 */
1418 k = depth - 1;
1419 border = path[depth].p_ext->ee_block;
Avantika Mathur7e028972006-12-06 20:41:33 -08001420 err = ext4_ext_get_access(handle, inode, path + k);
1421 if (err)
Alex Tomasa86c6182006-10-11 01:21:03 -07001422 return err;
1423 path[k].p_idx->ei_block = border;
Avantika Mathur7e028972006-12-06 20:41:33 -08001424 err = ext4_ext_dirty(handle, inode, path + k);
1425 if (err)
Alex Tomasa86c6182006-10-11 01:21:03 -07001426 return err;
1427
1428 while (k--) {
1429 /* change all left-side indexes */
1430 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1431 break;
Avantika Mathur7e028972006-12-06 20:41:33 -08001432 err = ext4_ext_get_access(handle, inode, path + k);
1433 if (err)
Alex Tomasa86c6182006-10-11 01:21:03 -07001434 break;
1435 path[k].p_idx->ei_block = border;
Avantika Mathur7e028972006-12-06 20:41:33 -08001436 err = ext4_ext_dirty(handle, inode, path + k);
1437 if (err)
Alex Tomasa86c6182006-10-11 01:21:03 -07001438 break;
1439 }
1440
1441 return err;
1442}
1443
Akira Fujita748de672009-06-17 19:24:03 -04001444int
Alex Tomasa86c6182006-10-11 01:21:03 -07001445ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1446 struct ext4_extent *ex2)
1447{
Amit Arora749269f2007-07-18 09:02:56 -04001448 unsigned short ext1_ee_len, ext2_ee_len, max_len;
Amit Aroraa2df2a62007-07-17 21:42:41 -04001449
1450 /*
1451 * Make sure that either both extents are uninitialized, or
1452 * both are _not_.
1453 */
1454 if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
1455 return 0;
1456
Amit Arora749269f2007-07-18 09:02:56 -04001457 if (ext4_ext_is_uninitialized(ex1))
1458 max_len = EXT_UNINIT_MAX_LEN;
1459 else
1460 max_len = EXT_INIT_MAX_LEN;
1461
Amit Aroraa2df2a62007-07-17 21:42:41 -04001462 ext1_ee_len = ext4_ext_get_actual_len(ex1);
1463 ext2_ee_len = ext4_ext_get_actual_len(ex2);
1464
1465 if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
Andrew Morton63f57932006-10-11 01:21:24 -07001466 le32_to_cpu(ex2->ee_block))
Alex Tomasa86c6182006-10-11 01:21:03 -07001467 return 0;
1468
Suparna Bhattacharya471d4012006-10-11 01:21:06 -07001469 /*
1470 * To allow future support for preallocated extents to be added
1471 * as an RO_COMPAT feature, refuse to merge to extents if
Randy Dunlapd0d856e2006-10-11 01:21:07 -07001472 * this can result in the top bit of ee_len being set.
Suparna Bhattacharya471d4012006-10-11 01:21:06 -07001473 */
Amit Arora749269f2007-07-18 09:02:56 -04001474 if (ext1_ee_len + ext2_ee_len > max_len)
Suparna Bhattacharya471d4012006-10-11 01:21:06 -07001475 return 0;
Robert P. J. Daybbf2f9f2007-02-17 19:20:16 +01001476#ifdef AGGRESSIVE_TEST
Aneesh Kumar K.Vb939e372008-01-28 23:58:27 -05001477 if (ext1_ee_len >= 4)
Alex Tomasa86c6182006-10-11 01:21:03 -07001478 return 0;
1479#endif
1480
Amit Aroraa2df2a62007-07-17 21:42:41 -04001481 if (ext_pblock(ex1) + ext1_ee_len == ext_pblock(ex2))
Alex Tomasa86c6182006-10-11 01:21:03 -07001482 return 1;
1483 return 0;
1484}
1485
1486/*
Amit Arora56055d32007-07-17 21:42:38 -04001487 * This function tries to merge the "ex" extent to the next extent in the tree.
1488 * It always tries to merge towards right. If you want to merge towards
1489 * left, pass "ex - 1" as argument instead of "ex".
1490 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1491 * 1 if they got merged.
1492 */
1493int ext4_ext_try_to_merge(struct inode *inode,
1494 struct ext4_ext_path *path,
1495 struct ext4_extent *ex)
1496{
1497 struct ext4_extent_header *eh;
1498 unsigned int depth, len;
1499 int merge_done = 0;
1500 int uninitialized = 0;
1501
1502 depth = ext_depth(inode);
1503 BUG_ON(path[depth].p_hdr == NULL);
1504 eh = path[depth].p_hdr;
1505
1506 while (ex < EXT_LAST_EXTENT(eh)) {
1507 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1508 break;
1509 /* merge with next extent! */
1510 if (ext4_ext_is_uninitialized(ex))
1511 uninitialized = 1;
1512 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1513 + ext4_ext_get_actual_len(ex + 1));
1514 if (uninitialized)
1515 ext4_ext_mark_uninitialized(ex);
1516
1517 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1518 len = (EXT_LAST_EXTENT(eh) - ex - 1)
1519 * sizeof(struct ext4_extent);
1520 memmove(ex + 1, ex + 2, len);
1521 }
Marcin Slusarze8546d02008-04-17 10:38:59 -04001522 le16_add_cpu(&eh->eh_entries, -1);
Amit Arora56055d32007-07-17 21:42:38 -04001523 merge_done = 1;
1524 WARN_ON(eh->eh_entries == 0);
1525 if (!eh->eh_entries)
1526 ext4_error(inode->i_sb, "ext4_ext_try_to_merge",
1527 "inode#%lu, eh->eh_entries = 0!", inode->i_ino);
1528 }
1529
1530 return merge_done;
1531}
1532
1533/*
Amit Arora25d14f92007-05-24 13:04:13 -04001534 * check if a portion of the "newext" extent overlaps with an
1535 * existing extent.
1536 *
1537 * If there is an overlap discovered, it updates the length of the newext
1538 * such that there will be no overlap, and then returns 1.
1539 * If there is no overlap found, it returns 0.
1540 */
1541unsigned int ext4_ext_check_overlap(struct inode *inode,
1542 struct ext4_extent *newext,
1543 struct ext4_ext_path *path)
1544{
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05001545 ext4_lblk_t b1, b2;
Amit Arora25d14f92007-05-24 13:04:13 -04001546 unsigned int depth, len1;
1547 unsigned int ret = 0;
1548
1549 b1 = le32_to_cpu(newext->ee_block);
Amit Aroraa2df2a62007-07-17 21:42:41 -04001550 len1 = ext4_ext_get_actual_len(newext);
Amit Arora25d14f92007-05-24 13:04:13 -04001551 depth = ext_depth(inode);
1552 if (!path[depth].p_ext)
1553 goto out;
1554 b2 = le32_to_cpu(path[depth].p_ext->ee_block);
1555
1556 /*
1557 * get the next allocated block if the extent in the path
Theodore Ts'o2b2d6d02008-07-26 16:15:44 -04001558 * is before the requested block(s)
Amit Arora25d14f92007-05-24 13:04:13 -04001559 */
1560 if (b2 < b1) {
1561 b2 = ext4_ext_next_allocated_block(path);
1562 if (b2 == EXT_MAX_BLOCK)
1563 goto out;
1564 }
1565
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05001566 /* check for wrap through zero on extent logical start block*/
Amit Arora25d14f92007-05-24 13:04:13 -04001567 if (b1 + len1 < b1) {
1568 len1 = EXT_MAX_BLOCK - b1;
1569 newext->ee_len = cpu_to_le16(len1);
1570 ret = 1;
1571 }
1572
1573 /* check for overlap */
1574 if (b1 + len1 > b2) {
1575 newext->ee_len = cpu_to_le16(b2 - b1);
1576 ret = 1;
1577 }
1578out:
1579 return ret;
1580}
1581
1582/*
Randy Dunlapd0d856e2006-10-11 01:21:07 -07001583 * ext4_ext_insert_extent:
1584 * tries to merge requsted extent into the existing extent or
1585 * inserts requested extent as new one into the tree,
1586 * creating new leaf in the no-space case.
Alex Tomasa86c6182006-10-11 01:21:03 -07001587 */
1588int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1589 struct ext4_ext_path *path,
Mingming Cao00314622009-09-28 15:49:08 -04001590 struct ext4_extent *newext, int flag)
Alex Tomasa86c6182006-10-11 01:21:03 -07001591{
Theodore Ts'oaf5bc922008-09-08 22:25:24 -04001592 struct ext4_extent_header *eh;
Alex Tomasa86c6182006-10-11 01:21:03 -07001593 struct ext4_extent *ex, *fex;
1594 struct ext4_extent *nearex; /* nearest extent */
1595 struct ext4_ext_path *npath = NULL;
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05001596 int depth, len, err;
1597 ext4_lblk_t next;
Amit Aroraa2df2a62007-07-17 21:42:41 -04001598 unsigned uninitialized = 0;
Alex Tomasa86c6182006-10-11 01:21:03 -07001599
Amit Aroraa2df2a62007-07-17 21:42:41 -04001600 BUG_ON(ext4_ext_get_actual_len(newext) == 0);
Alex Tomasa86c6182006-10-11 01:21:03 -07001601 depth = ext_depth(inode);
1602 ex = path[depth].p_ext;
1603 BUG_ON(path[depth].p_hdr == NULL);
1604
1605 /* try to insert block into found extent and return */
Mingming Cao00314622009-09-28 15:49:08 -04001606 if (ex && (flag != EXT4_GET_BLOCKS_DIO_CREATE_EXT)
1607 && ext4_can_extents_be_merged(inode, ex, newext)) {
Mingming553f9002009-09-18 13:34:55 -04001608 ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n",
1609 ext4_ext_is_uninitialized(newext),
Amit Aroraa2df2a62007-07-17 21:42:41 -04001610 ext4_ext_get_actual_len(newext),
Alex Tomasa86c6182006-10-11 01:21:03 -07001611 le32_to_cpu(ex->ee_block),
Mingming553f9002009-09-18 13:34:55 -04001612 ext4_ext_is_uninitialized(ex),
Amit Aroraa2df2a62007-07-17 21:42:41 -04001613 ext4_ext_get_actual_len(ex), ext_pblock(ex));
Avantika Mathur7e028972006-12-06 20:41:33 -08001614 err = ext4_ext_get_access(handle, inode, path + depth);
1615 if (err)
Alex Tomasa86c6182006-10-11 01:21:03 -07001616 return err;
Amit Aroraa2df2a62007-07-17 21:42:41 -04001617
1618 /*
1619 * ext4_can_extents_be_merged should have checked that either
1620 * both extents are uninitialized, or both aren't. Thus we
1621 * need to check only one of them here.
1622 */
1623 if (ext4_ext_is_uninitialized(ex))
1624 uninitialized = 1;
1625 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1626 + ext4_ext_get_actual_len(newext));
1627 if (uninitialized)
1628 ext4_ext_mark_uninitialized(ex);
Alex Tomasa86c6182006-10-11 01:21:03 -07001629 eh = path[depth].p_hdr;
1630 nearex = ex;
1631 goto merge;
1632 }
1633
1634repeat:
1635 depth = ext_depth(inode);
1636 eh = path[depth].p_hdr;
1637 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1638 goto has_space;
1639
1640 /* probably next leaf has space for us? */
1641 fex = EXT_LAST_EXTENT(eh);
1642 next = ext4_ext_next_leaf_block(inode, path);
1643 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
1644 && next != EXT_MAX_BLOCK) {
1645 ext_debug("next leaf block - %d\n", next);
1646 BUG_ON(npath != NULL);
1647 npath = ext4_ext_find_extent(inode, next, NULL);
1648 if (IS_ERR(npath))
1649 return PTR_ERR(npath);
1650 BUG_ON(npath->p_depth != path->p_depth);
1651 eh = npath[depth].p_hdr;
1652 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1653 ext_debug("next leaf isnt full(%d)\n",
1654 le16_to_cpu(eh->eh_entries));
1655 path = npath;
1656 goto repeat;
1657 }
1658 ext_debug("next leaf has no free space(%d,%d)\n",
1659 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1660 }
1661
1662 /*
Randy Dunlapd0d856e2006-10-11 01:21:07 -07001663 * There is no free space in the found leaf.
1664 * We're gonna add a new leaf in the tree.
Alex Tomasa86c6182006-10-11 01:21:03 -07001665 */
1666 err = ext4_ext_create_new_leaf(handle, inode, path, newext);
1667 if (err)
1668 goto cleanup;
1669 depth = ext_depth(inode);
1670 eh = path[depth].p_hdr;
1671
1672has_space:
1673 nearex = path[depth].p_ext;
1674
Avantika Mathur7e028972006-12-06 20:41:33 -08001675 err = ext4_ext_get_access(handle, inode, path + depth);
1676 if (err)
Alex Tomasa86c6182006-10-11 01:21:03 -07001677 goto cleanup;
1678
1679 if (!nearex) {
1680 /* there is no extent in this leaf, create first one */
Mingming553f9002009-09-18 13:34:55 -04001681 ext_debug("first extent in the leaf: %d:%llu:[%d]%d\n",
Dave Kleikamp8c55e202007-05-24 13:04:54 -04001682 le32_to_cpu(newext->ee_block),
1683 ext_pblock(newext),
Mingming553f9002009-09-18 13:34:55 -04001684 ext4_ext_is_uninitialized(newext),
Amit Aroraa2df2a62007-07-17 21:42:41 -04001685 ext4_ext_get_actual_len(newext));
Alex Tomasa86c6182006-10-11 01:21:03 -07001686 path[depth].p_ext = EXT_FIRST_EXTENT(eh);
1687 } else if (le32_to_cpu(newext->ee_block)
Dave Kleikamp8c55e202007-05-24 13:04:54 -04001688 > le32_to_cpu(nearex->ee_block)) {
Alex Tomasa86c6182006-10-11 01:21:03 -07001689/* BUG_ON(newext->ee_block == nearex->ee_block); */
1690 if (nearex != EXT_LAST_EXTENT(eh)) {
1691 len = EXT_MAX_EXTENT(eh) - nearex;
1692 len = (len - 1) * sizeof(struct ext4_extent);
1693 len = len < 0 ? 0 : len;
Mingming553f9002009-09-18 13:34:55 -04001694 ext_debug("insert %d:%llu:[%d]%d after: nearest 0x%p, "
Alex Tomasa86c6182006-10-11 01:21:03 -07001695 "move %d from 0x%p to 0x%p\n",
Dave Kleikamp8c55e202007-05-24 13:04:54 -04001696 le32_to_cpu(newext->ee_block),
1697 ext_pblock(newext),
Mingming553f9002009-09-18 13:34:55 -04001698 ext4_ext_is_uninitialized(newext),
Amit Aroraa2df2a62007-07-17 21:42:41 -04001699 ext4_ext_get_actual_len(newext),
Alex Tomasa86c6182006-10-11 01:21:03 -07001700 nearex, len, nearex + 1, nearex + 2);
1701 memmove(nearex + 2, nearex + 1, len);
1702 }
1703 path[depth].p_ext = nearex + 1;
1704 } else {
1705 BUG_ON(newext->ee_block == nearex->ee_block);
1706 len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
1707 len = len < 0 ? 0 : len;
Mingming553f9002009-09-18 13:34:55 -04001708 ext_debug("insert %d:%llu:[%d]%d before: nearest 0x%p, "
Alex Tomasa86c6182006-10-11 01:21:03 -07001709 "move %d from 0x%p to 0x%p\n",
1710 le32_to_cpu(newext->ee_block),
Alex Tomasf65e6fb2006-10-11 01:21:05 -07001711 ext_pblock(newext),
Mingming553f9002009-09-18 13:34:55 -04001712 ext4_ext_is_uninitialized(newext),
Amit Aroraa2df2a62007-07-17 21:42:41 -04001713 ext4_ext_get_actual_len(newext),
Alex Tomasa86c6182006-10-11 01:21:03 -07001714 nearex, len, nearex + 1, nearex + 2);
1715 memmove(nearex + 1, nearex, len);
1716 path[depth].p_ext = nearex;
1717 }
1718
Marcin Slusarze8546d02008-04-17 10:38:59 -04001719 le16_add_cpu(&eh->eh_entries, 1);
Alex Tomasa86c6182006-10-11 01:21:03 -07001720 nearex = path[depth].p_ext;
1721 nearex->ee_block = newext->ee_block;
Aneesh Kumar K.Vb3776112007-10-16 18:38:25 -04001722 ext4_ext_store_pblock(nearex, ext_pblock(newext));
Alex Tomasa86c6182006-10-11 01:21:03 -07001723 nearex->ee_len = newext->ee_len;
Alex Tomasa86c6182006-10-11 01:21:03 -07001724
1725merge:
1726 /* try to merge extents to the right */
Mingming Cao00314622009-09-28 15:49:08 -04001727 if (flag != EXT4_GET_BLOCKS_DIO_CREATE_EXT)
1728 ext4_ext_try_to_merge(inode, path, nearex);
Alex Tomasa86c6182006-10-11 01:21:03 -07001729
1730 /* try to merge extents to the left */
1731
1732 /* time to correct all indexes above */
1733 err = ext4_ext_correct_indexes(handle, inode, path);
1734 if (err)
1735 goto cleanup;
1736
1737 err = ext4_ext_dirty(handle, inode, path + depth);
1738
1739cleanup:
1740 if (npath) {
1741 ext4_ext_drop_refs(npath);
1742 kfree(npath);
1743 }
Alex Tomasa86c6182006-10-11 01:21:03 -07001744 ext4_ext_invalidate_cache(inode);
1745 return err;
1746}
1747
Eric Sandeen6873fa02008-10-07 00:46:36 -04001748int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
1749 ext4_lblk_t num, ext_prepare_callback func,
1750 void *cbdata)
1751{
1752 struct ext4_ext_path *path = NULL;
1753 struct ext4_ext_cache cbex;
1754 struct ext4_extent *ex;
1755 ext4_lblk_t next, start = 0, end = 0;
1756 ext4_lblk_t last = block + num;
1757 int depth, exists, err = 0;
1758
1759 BUG_ON(func == NULL);
1760 BUG_ON(inode == NULL);
1761
1762 while (block < last && block != EXT_MAX_BLOCK) {
1763 num = last - block;
1764 /* find extent for this block */
Theodore Ts'ofab3a542009-12-09 21:30:02 -05001765 down_read(&EXT4_I(inode)->i_data_sem);
Eric Sandeen6873fa02008-10-07 00:46:36 -04001766 path = ext4_ext_find_extent(inode, block, path);
Theodore Ts'ofab3a542009-12-09 21:30:02 -05001767 up_read(&EXT4_I(inode)->i_data_sem);
Eric Sandeen6873fa02008-10-07 00:46:36 -04001768 if (IS_ERR(path)) {
1769 err = PTR_ERR(path);
1770 path = NULL;
1771 break;
1772 }
1773
1774 depth = ext_depth(inode);
1775 BUG_ON(path[depth].p_hdr == NULL);
1776 ex = path[depth].p_ext;
1777 next = ext4_ext_next_allocated_block(path);
1778
1779 exists = 0;
1780 if (!ex) {
1781 /* there is no extent yet, so try to allocate
1782 * all requested space */
1783 start = block;
1784 end = block + num;
1785 } else if (le32_to_cpu(ex->ee_block) > block) {
1786 /* need to allocate space before found extent */
1787 start = block;
1788 end = le32_to_cpu(ex->ee_block);
1789 if (block + num < end)
1790 end = block + num;
1791 } else if (block >= le32_to_cpu(ex->ee_block)
1792 + ext4_ext_get_actual_len(ex)) {
1793 /* need to allocate space after found extent */
1794 start = block;
1795 end = block + num;
1796 if (end >= next)
1797 end = next;
1798 } else if (block >= le32_to_cpu(ex->ee_block)) {
1799 /*
1800 * some part of requested space is covered
1801 * by found extent
1802 */
1803 start = block;
1804 end = le32_to_cpu(ex->ee_block)
1805 + ext4_ext_get_actual_len(ex);
1806 if (block + num < end)
1807 end = block + num;
1808 exists = 1;
1809 } else {
1810 BUG();
1811 }
1812 BUG_ON(end <= start);
1813
1814 if (!exists) {
1815 cbex.ec_block = start;
1816 cbex.ec_len = end - start;
1817 cbex.ec_start = 0;
1818 cbex.ec_type = EXT4_EXT_CACHE_GAP;
1819 } else {
1820 cbex.ec_block = le32_to_cpu(ex->ee_block);
1821 cbex.ec_len = ext4_ext_get_actual_len(ex);
1822 cbex.ec_start = ext_pblock(ex);
1823 cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
1824 }
1825
1826 BUG_ON(cbex.ec_len == 0);
1827 err = func(inode, path, &cbex, ex, cbdata);
1828 ext4_ext_drop_refs(path);
1829
1830 if (err < 0)
1831 break;
1832
1833 if (err == EXT_REPEAT)
1834 continue;
1835 else if (err == EXT_BREAK) {
1836 err = 0;
1837 break;
1838 }
1839
1840 if (ext_depth(inode) != depth) {
1841 /* depth was changed. we have to realloc path */
1842 kfree(path);
1843 path = NULL;
1844 }
1845
1846 block = cbex.ec_block + cbex.ec_len;
1847 }
1848
1849 if (path) {
1850 ext4_ext_drop_refs(path);
1851 kfree(path);
1852 }
1853
1854 return err;
1855}
1856
Avantika Mathur09b88252006-12-06 20:41:36 -08001857static void
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05001858ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
Mingming Caodd545672007-07-31 00:37:46 -07001859 __u32 len, ext4_fsblk_t start, int type)
Alex Tomasa86c6182006-10-11 01:21:03 -07001860{
1861 struct ext4_ext_cache *cex;
1862 BUG_ON(len == 0);
Theodore Ts'o2ec0ae32009-05-15 09:07:28 -04001863 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
Alex Tomasa86c6182006-10-11 01:21:03 -07001864 cex = &EXT4_I(inode)->i_cached_extent;
1865 cex->ec_type = type;
1866 cex->ec_block = block;
1867 cex->ec_len = len;
1868 cex->ec_start = start;
Theodore Ts'o2ec0ae32009-05-15 09:07:28 -04001869 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
Alex Tomasa86c6182006-10-11 01:21:03 -07001870}
1871
1872/*
Randy Dunlapd0d856e2006-10-11 01:21:07 -07001873 * ext4_ext_put_gap_in_cache:
1874 * calculate boundaries of the gap that the requested block fits into
Alex Tomasa86c6182006-10-11 01:21:03 -07001875 * and cache this gap
1876 */
Avantika Mathur09b88252006-12-06 20:41:36 -08001877static void
Alex Tomasa86c6182006-10-11 01:21:03 -07001878ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05001879 ext4_lblk_t block)
Alex Tomasa86c6182006-10-11 01:21:03 -07001880{
1881 int depth = ext_depth(inode);
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05001882 unsigned long len;
1883 ext4_lblk_t lblock;
Alex Tomasa86c6182006-10-11 01:21:03 -07001884 struct ext4_extent *ex;
1885
1886 ex = path[depth].p_ext;
1887 if (ex == NULL) {
1888 /* there is no extent yet, so gap is [0;-] */
1889 lblock = 0;
1890 len = EXT_MAX_BLOCK;
1891 ext_debug("cache gap(whole file):");
1892 } else if (block < le32_to_cpu(ex->ee_block)) {
1893 lblock = block;
1894 len = le32_to_cpu(ex->ee_block) - block;
Eric Sandeenbba90742008-01-28 23:58:27 -05001895 ext_debug("cache gap(before): %u [%u:%u]",
1896 block,
1897 le32_to_cpu(ex->ee_block),
1898 ext4_ext_get_actual_len(ex));
Alex Tomasa86c6182006-10-11 01:21:03 -07001899 } else if (block >= le32_to_cpu(ex->ee_block)
Amit Aroraa2df2a62007-07-17 21:42:41 -04001900 + ext4_ext_get_actual_len(ex)) {
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05001901 ext4_lblk_t next;
Dave Kleikamp8c55e202007-05-24 13:04:54 -04001902 lblock = le32_to_cpu(ex->ee_block)
Amit Aroraa2df2a62007-07-17 21:42:41 -04001903 + ext4_ext_get_actual_len(ex);
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05001904
1905 next = ext4_ext_next_allocated_block(path);
Eric Sandeenbba90742008-01-28 23:58:27 -05001906 ext_debug("cache gap(after): [%u:%u] %u",
1907 le32_to_cpu(ex->ee_block),
1908 ext4_ext_get_actual_len(ex),
1909 block);
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05001910 BUG_ON(next == lblock);
1911 len = next - lblock;
Alex Tomasa86c6182006-10-11 01:21:03 -07001912 } else {
1913 lblock = len = 0;
1914 BUG();
1915 }
1916
Eric Sandeenbba90742008-01-28 23:58:27 -05001917 ext_debug(" -> %u:%lu\n", lblock, len);
Alex Tomasa86c6182006-10-11 01:21:03 -07001918 ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
1919}
1920
Avantika Mathur09b88252006-12-06 20:41:36 -08001921static int
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05001922ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
Alex Tomasa86c6182006-10-11 01:21:03 -07001923 struct ext4_extent *ex)
1924{
1925 struct ext4_ext_cache *cex;
Theodore Ts'o2ec0ae32009-05-15 09:07:28 -04001926 int ret = EXT4_EXT_CACHE_NO;
Alex Tomasa86c6182006-10-11 01:21:03 -07001927
Theodore Ts'o2ec0ae32009-05-15 09:07:28 -04001928 /*
1929 * We borrow i_block_reservation_lock to protect i_cached_extent
1930 */
1931 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
Alex Tomasa86c6182006-10-11 01:21:03 -07001932 cex = &EXT4_I(inode)->i_cached_extent;
1933
1934 /* has cache valid data? */
1935 if (cex->ec_type == EXT4_EXT_CACHE_NO)
Theodore Ts'o2ec0ae32009-05-15 09:07:28 -04001936 goto errout;
Alex Tomasa86c6182006-10-11 01:21:03 -07001937
1938 BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
1939 cex->ec_type != EXT4_EXT_CACHE_EXTENT);
1940 if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
Dave Kleikamp8c55e202007-05-24 13:04:54 -04001941 ex->ee_block = cpu_to_le32(cex->ec_block);
Alex Tomasf65e6fb2006-10-11 01:21:05 -07001942 ext4_ext_store_pblock(ex, cex->ec_start);
Dave Kleikamp8c55e202007-05-24 13:04:54 -04001943 ex->ee_len = cpu_to_le16(cex->ec_len);
Eric Sandeenbba90742008-01-28 23:58:27 -05001944 ext_debug("%u cached by %u:%u:%llu\n",
1945 block,
1946 cex->ec_block, cex->ec_len, cex->ec_start);
Theodore Ts'o2ec0ae32009-05-15 09:07:28 -04001947 ret = cex->ec_type;
Alex Tomasa86c6182006-10-11 01:21:03 -07001948 }
Theodore Ts'o2ec0ae32009-05-15 09:07:28 -04001949errout:
1950 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1951 return ret;
Alex Tomasa86c6182006-10-11 01:21:03 -07001952}
1953
1954/*
Randy Dunlapd0d856e2006-10-11 01:21:07 -07001955 * ext4_ext_rm_idx:
1956 * removes index from the index block.
1957 * It's used in truncate case only, thus all requests are for
1958 * last index in the block only.
Alex Tomasa86c6182006-10-11 01:21:03 -07001959 */
Aneesh Kumar K.V1d03ec92008-01-28 23:58:27 -05001960static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
Alex Tomasa86c6182006-10-11 01:21:03 -07001961 struct ext4_ext_path *path)
1962{
Alex Tomasa86c6182006-10-11 01:21:03 -07001963 int err;
Alex Tomasf65e6fb2006-10-11 01:21:05 -07001964 ext4_fsblk_t leaf;
Alex Tomasa86c6182006-10-11 01:21:03 -07001965
1966 /* free index block */
1967 path--;
Alex Tomasf65e6fb2006-10-11 01:21:05 -07001968 leaf = idx_pblock(path->p_idx);
Alex Tomasa86c6182006-10-11 01:21:03 -07001969 BUG_ON(path->p_hdr->eh_entries == 0);
Avantika Mathur7e028972006-12-06 20:41:33 -08001970 err = ext4_ext_get_access(handle, inode, path);
1971 if (err)
Alex Tomasa86c6182006-10-11 01:21:03 -07001972 return err;
Marcin Slusarze8546d02008-04-17 10:38:59 -04001973 le16_add_cpu(&path->p_hdr->eh_entries, -1);
Avantika Mathur7e028972006-12-06 20:41:33 -08001974 err = ext4_ext_dirty(handle, inode, path);
1975 if (err)
Alex Tomasa86c6182006-10-11 01:21:03 -07001976 return err;
Mingming Cao2ae02102006-10-11 01:21:11 -07001977 ext_debug("index is empty, remove it, free block %llu\n", leaf);
Theodore Ts'oe6362602009-11-23 07:17:05 -05001978 ext4_free_blocks(handle, inode, 0, leaf, 1,
1979 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
Alex Tomasa86c6182006-10-11 01:21:03 -07001980 return err;
1981}
1982
1983/*
Mingming Caoee12b632008-08-19 22:16:05 -04001984 * ext4_ext_calc_credits_for_single_extent:
1985 * This routine returns max. credits that needed to insert an extent
1986 * to the extent tree.
1987 * When pass the actual path, the caller should calculate credits
1988 * under i_data_sem.
Alex Tomasa86c6182006-10-11 01:21:03 -07001989 */
Mingming Cao525f4ed2008-08-19 22:15:58 -04001990int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
Alex Tomasa86c6182006-10-11 01:21:03 -07001991 struct ext4_ext_path *path)
1992{
Alex Tomasa86c6182006-10-11 01:21:03 -07001993 if (path) {
Mingming Caoee12b632008-08-19 22:16:05 -04001994 int depth = ext_depth(inode);
Mingming Caof3bd1f32008-08-19 22:16:03 -04001995 int ret = 0;
Mingming Caoee12b632008-08-19 22:16:05 -04001996
Alex Tomasa86c6182006-10-11 01:21:03 -07001997 /* probably there is space in leaf? */
Alex Tomasa86c6182006-10-11 01:21:03 -07001998 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
Mingming Caoee12b632008-08-19 22:16:05 -04001999 < le16_to_cpu(path[depth].p_hdr->eh_max)) {
2000
2001 /*
2002 * There are some space in the leaf tree, no
2003 * need to account for leaf block credit
2004 *
2005 * bitmaps and block group descriptor blocks
2006 * and other metadat blocks still need to be
2007 * accounted.
2008 */
Mingming Cao525f4ed2008-08-19 22:15:58 -04002009 /* 1 bitmap, 1 block group descriptor */
Mingming Caoee12b632008-08-19 22:16:05 -04002010 ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
Aneesh Kumar K.V5887e982009-07-05 23:12:04 -04002011 return ret;
Mingming Caoee12b632008-08-19 22:16:05 -04002012 }
Alex Tomasa86c6182006-10-11 01:21:03 -07002013 }
2014
Mingming Cao525f4ed2008-08-19 22:15:58 -04002015 return ext4_chunk_trans_blocks(inode, nrblocks);
Mingming Caoee12b632008-08-19 22:16:05 -04002016}
Alex Tomasa86c6182006-10-11 01:21:03 -07002017
Mingming Caoee12b632008-08-19 22:16:05 -04002018/*
2019 * How many index/leaf blocks need to change/allocate to modify nrblocks?
2020 *
2021 * if nrblocks are fit in a single extent (chunk flag is 1), then
2022 * in the worse case, each tree level index/leaf need to be changed
2023 * if the tree split due to insert a new extent, then the old tree
2024 * index/leaf need to be updated too
2025 *
2026 * If the nrblocks are discontiguous, they could cause
2027 * the whole tree split more than once, but this is really rare.
2028 */
Mingming Cao525f4ed2008-08-19 22:15:58 -04002029int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
Mingming Caoee12b632008-08-19 22:16:05 -04002030{
2031 int index;
2032 int depth = ext_depth(inode);
Alex Tomasa86c6182006-10-11 01:21:03 -07002033
Mingming Caoee12b632008-08-19 22:16:05 -04002034 if (chunk)
2035 index = depth * 2;
2036 else
2037 index = depth * 3;
Alex Tomasa86c6182006-10-11 01:21:03 -07002038
Mingming Caoee12b632008-08-19 22:16:05 -04002039 return index;
Alex Tomasa86c6182006-10-11 01:21:03 -07002040}
2041
2042static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2043 struct ext4_extent *ex,
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05002044 ext4_lblk_t from, ext4_lblk_t to)
Alex Tomasa86c6182006-10-11 01:21:03 -07002045{
Amit Aroraa2df2a62007-07-17 21:42:41 -04002046 unsigned short ee_len = ext4_ext_get_actual_len(ex);
Theodore Ts'oe6362602009-11-23 07:17:05 -05002047 int flags = EXT4_FREE_BLOCKS_FORGET;
Alex Tomasa86c6182006-10-11 01:21:03 -07002048
Alex Tomasc9de5602008-01-29 00:19:52 -05002049 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
Theodore Ts'oe6362602009-11-23 07:17:05 -05002050 flags |= EXT4_FREE_BLOCKS_METADATA;
Alex Tomasa86c6182006-10-11 01:21:03 -07002051#ifdef EXTENTS_STATS
2052 {
2053 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
Alex Tomasa86c6182006-10-11 01:21:03 -07002054 spin_lock(&sbi->s_ext_stats_lock);
2055 sbi->s_ext_blocks += ee_len;
2056 sbi->s_ext_extents++;
2057 if (ee_len < sbi->s_ext_min)
2058 sbi->s_ext_min = ee_len;
2059 if (ee_len > sbi->s_ext_max)
2060 sbi->s_ext_max = ee_len;
2061 if (ext_depth(inode) > sbi->s_depth_max)
2062 sbi->s_depth_max = ext_depth(inode);
2063 spin_unlock(&sbi->s_ext_stats_lock);
2064 }
2065#endif
2066 if (from >= le32_to_cpu(ex->ee_block)
Amit Aroraa2df2a62007-07-17 21:42:41 -04002067 && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
Alex Tomasa86c6182006-10-11 01:21:03 -07002068 /* tail removal */
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05002069 ext4_lblk_t num;
Alex Tomasf65e6fb2006-10-11 01:21:05 -07002070 ext4_fsblk_t start;
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05002071
Amit Aroraa2df2a62007-07-17 21:42:41 -04002072 num = le32_to_cpu(ex->ee_block) + ee_len - from;
2073 start = ext_pblock(ex) + ee_len - num;
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05002074 ext_debug("free last %u blocks starting %llu\n", num, start);
Theodore Ts'oe6362602009-11-23 07:17:05 -05002075 ext4_free_blocks(handle, inode, 0, start, num, flags);
Alex Tomasa86c6182006-10-11 01:21:03 -07002076 } else if (from == le32_to_cpu(ex->ee_block)
Amit Aroraa2df2a62007-07-17 21:42:41 -04002077 && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05002078 printk(KERN_INFO "strange request: removal %u-%u from %u:%u\n",
Amit Aroraa2df2a62007-07-17 21:42:41 -04002079 from, to, le32_to_cpu(ex->ee_block), ee_len);
Alex Tomasa86c6182006-10-11 01:21:03 -07002080 } else {
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05002081 printk(KERN_INFO "strange request: removal(2) "
2082 "%u-%u from %u:%u\n",
2083 from, to, le32_to_cpu(ex->ee_block), ee_len);
Alex Tomasa86c6182006-10-11 01:21:03 -07002084 }
2085 return 0;
2086}
2087
2088static int
2089ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05002090 struct ext4_ext_path *path, ext4_lblk_t start)
Alex Tomasa86c6182006-10-11 01:21:03 -07002091{
2092 int err = 0, correct_index = 0;
2093 int depth = ext_depth(inode), credits;
2094 struct ext4_extent_header *eh;
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05002095 ext4_lblk_t a, b, block;
2096 unsigned num;
2097 ext4_lblk_t ex_ee_block;
Alex Tomasa86c6182006-10-11 01:21:03 -07002098 unsigned short ex_ee_len;
Amit Aroraa2df2a62007-07-17 21:42:41 -04002099 unsigned uninitialized = 0;
Alex Tomasa86c6182006-10-11 01:21:03 -07002100 struct ext4_extent *ex;
2101
Alex Tomasc29c0ae2007-07-18 09:19:09 -04002102 /* the header must be checked already in ext4_ext_remove_space() */
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05002103 ext_debug("truncate since %u in leaf\n", start);
Alex Tomasa86c6182006-10-11 01:21:03 -07002104 if (!path[depth].p_hdr)
2105 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2106 eh = path[depth].p_hdr;
2107 BUG_ON(eh == NULL);
Alex Tomasa86c6182006-10-11 01:21:03 -07002108
2109 /* find where to start removing */
2110 ex = EXT_LAST_EXTENT(eh);
2111
2112 ex_ee_block = le32_to_cpu(ex->ee_block);
Amit Aroraa2df2a62007-07-17 21:42:41 -04002113 ex_ee_len = ext4_ext_get_actual_len(ex);
Alex Tomasa86c6182006-10-11 01:21:03 -07002114
2115 while (ex >= EXT_FIRST_EXTENT(eh) &&
2116 ex_ee_block + ex_ee_len > start) {
Aneesh Kumar K.Va41f2072009-06-10 14:22:55 -04002117
2118 if (ext4_ext_is_uninitialized(ex))
2119 uninitialized = 1;
2120 else
2121 uninitialized = 0;
2122
Mingming553f9002009-09-18 13:34:55 -04002123 ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
2124 uninitialized, ex_ee_len);
Alex Tomasa86c6182006-10-11 01:21:03 -07002125 path[depth].p_ext = ex;
2126
2127 a = ex_ee_block > start ? ex_ee_block : start;
2128 b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?
2129 ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;
2130
2131 ext_debug(" border %u:%u\n", a, b);
2132
2133 if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) {
2134 block = 0;
2135 num = 0;
2136 BUG();
2137 } else if (a != ex_ee_block) {
2138 /* remove tail of the extent */
2139 block = ex_ee_block;
2140 num = a - block;
2141 } else if (b != ex_ee_block + ex_ee_len - 1) {
2142 /* remove head of the extent */
2143 block = a;
2144 num = b - a;
2145 /* there is no "make a hole" API yet */
2146 BUG();
2147 } else {
2148 /* remove whole extent: excellent! */
2149 block = ex_ee_block;
2150 num = 0;
2151 BUG_ON(a != ex_ee_block);
2152 BUG_ON(b != ex_ee_block + ex_ee_len - 1);
2153 }
2154
Theodore Ts'o34071da2008-08-01 21:59:19 -04002155 /*
2156 * 3 for leaf, sb, and inode plus 2 (bmap and group
2157 * descriptor) for each block group; assume two block
2158 * groups plus ex_ee_len/blocks_per_block_group for
2159 * the worst case
2160 */
2161 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
Alex Tomasa86c6182006-10-11 01:21:03 -07002162 if (ex == EXT_FIRST_EXTENT(eh)) {
2163 correct_index = 1;
2164 credits += (ext_depth(inode)) + 1;
2165 }
Dmitry Monakhov5aca07e2009-12-08 22:42:15 -05002166 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
Alex Tomasa86c6182006-10-11 01:21:03 -07002167
Jan Kara487caee2009-08-17 22:17:20 -04002168 err = ext4_ext_truncate_extend_restart(handle, inode, credits);
Shen Feng9102e4f2008-07-11 19:27:31 -04002169 if (err)
Alex Tomasa86c6182006-10-11 01:21:03 -07002170 goto out;
Alex Tomasa86c6182006-10-11 01:21:03 -07002171
2172 err = ext4_ext_get_access(handle, inode, path + depth);
2173 if (err)
2174 goto out;
2175
2176 err = ext4_remove_blocks(handle, inode, ex, a, b);
2177 if (err)
2178 goto out;
2179
2180 if (num == 0) {
Randy Dunlapd0d856e2006-10-11 01:21:07 -07002181 /* this extent is removed; mark slot entirely unused */
Alex Tomasf65e6fb2006-10-11 01:21:05 -07002182 ext4_ext_store_pblock(ex, 0);
Marcin Slusarze8546d02008-04-17 10:38:59 -04002183 le16_add_cpu(&eh->eh_entries, -1);
Alex Tomasa86c6182006-10-11 01:21:03 -07002184 }
2185
2186 ex->ee_block = cpu_to_le32(block);
2187 ex->ee_len = cpu_to_le16(num);
Amit Arora749269f2007-07-18 09:02:56 -04002188 /*
2189 * Do not mark uninitialized if all the blocks in the
2190 * extent have been removed.
2191 */
2192 if (uninitialized && num)
Amit Aroraa2df2a62007-07-17 21:42:41 -04002193 ext4_ext_mark_uninitialized(ex);
Alex Tomasa86c6182006-10-11 01:21:03 -07002194
2195 err = ext4_ext_dirty(handle, inode, path + depth);
2196 if (err)
2197 goto out;
2198
Mingming Cao2ae02102006-10-11 01:21:11 -07002199 ext_debug("new extent: %u:%u:%llu\n", block, num,
Alex Tomasf65e6fb2006-10-11 01:21:05 -07002200 ext_pblock(ex));
Alex Tomasa86c6182006-10-11 01:21:03 -07002201 ex--;
2202 ex_ee_block = le32_to_cpu(ex->ee_block);
Amit Aroraa2df2a62007-07-17 21:42:41 -04002203 ex_ee_len = ext4_ext_get_actual_len(ex);
Alex Tomasa86c6182006-10-11 01:21:03 -07002204 }
2205
2206 if (correct_index && eh->eh_entries)
2207 err = ext4_ext_correct_indexes(handle, inode, path);
2208
2209 /* if this leaf is free, then we should
2210 * remove it from index block above */
2211 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2212 err = ext4_ext_rm_idx(handle, inode, path + depth);
2213
2214out:
2215 return err;
2216}
2217
2218/*
Randy Dunlapd0d856e2006-10-11 01:21:07 -07002219 * ext4_ext_more_to_rm:
2220 * returns 1 if current index has to be freed (even partial)
Alex Tomasa86c6182006-10-11 01:21:03 -07002221 */
Avantika Mathur09b88252006-12-06 20:41:36 -08002222static int
Alex Tomasa86c6182006-10-11 01:21:03 -07002223ext4_ext_more_to_rm(struct ext4_ext_path *path)
2224{
2225 BUG_ON(path->p_idx == NULL);
2226
2227 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2228 return 0;
2229
2230 /*
Randy Dunlapd0d856e2006-10-11 01:21:07 -07002231 * if truncate on deeper level happened, it wasn't partial,
Alex Tomasa86c6182006-10-11 01:21:03 -07002232 * so we have to consider current index for truncation
2233 */
2234 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2235 return 0;
2236 return 1;
2237}
2238
Aneesh Kumar K.V1d03ec92008-01-28 23:58:27 -05002239static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
Alex Tomasa86c6182006-10-11 01:21:03 -07002240{
2241 struct super_block *sb = inode->i_sb;
2242 int depth = ext_depth(inode);
2243 struct ext4_ext_path *path;
2244 handle_t *handle;
2245 int i = 0, err = 0;
2246
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05002247 ext_debug("truncate since %u\n", start);
Alex Tomasa86c6182006-10-11 01:21:03 -07002248
2249 /* probably first extent we're gonna free will be last in block */
2250 handle = ext4_journal_start(inode, depth + 1);
2251 if (IS_ERR(handle))
2252 return PTR_ERR(handle);
2253
2254 ext4_ext_invalidate_cache(inode);
2255
2256 /*
Randy Dunlapd0d856e2006-10-11 01:21:07 -07002257 * We start scanning from right side, freeing all the blocks
2258 * after i_size and walking into the tree depth-wise.
Alex Tomasa86c6182006-10-11 01:21:03 -07002259 */
Josef Bacik216553c2008-04-29 22:02:02 -04002260 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
Alex Tomasa86c6182006-10-11 01:21:03 -07002261 if (path == NULL) {
2262 ext4_journal_stop(handle);
2263 return -ENOMEM;
2264 }
Alex Tomasa86c6182006-10-11 01:21:03 -07002265 path[0].p_hdr = ext_inode_hdr(inode);
Aneesh Kumar K.V56b19862009-03-12 09:51:20 -04002266 if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
Alex Tomasa86c6182006-10-11 01:21:03 -07002267 err = -EIO;
2268 goto out;
2269 }
2270 path[0].p_depth = depth;
2271
2272 while (i >= 0 && err == 0) {
2273 if (i == depth) {
2274 /* this is leaf block */
2275 err = ext4_ext_rm_leaf(handle, inode, path, start);
Randy Dunlapd0d856e2006-10-11 01:21:07 -07002276 /* root level has p_bh == NULL, brelse() eats this */
Alex Tomasa86c6182006-10-11 01:21:03 -07002277 brelse(path[i].p_bh);
2278 path[i].p_bh = NULL;
2279 i--;
2280 continue;
2281 }
2282
2283 /* this is index block */
2284 if (!path[i].p_hdr) {
2285 ext_debug("initialize header\n");
2286 path[i].p_hdr = ext_block_hdr(path[i].p_bh);
Alex Tomasa86c6182006-10-11 01:21:03 -07002287 }
2288
Alex Tomasa86c6182006-10-11 01:21:03 -07002289 if (!path[i].p_idx) {
Randy Dunlapd0d856e2006-10-11 01:21:07 -07002290 /* this level hasn't been touched yet */
Alex Tomasa86c6182006-10-11 01:21:03 -07002291 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2292 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2293 ext_debug("init index ptr: hdr 0x%p, num %d\n",
2294 path[i].p_hdr,
2295 le16_to_cpu(path[i].p_hdr->eh_entries));
2296 } else {
Randy Dunlapd0d856e2006-10-11 01:21:07 -07002297 /* we were already here, see at next index */
Alex Tomasa86c6182006-10-11 01:21:03 -07002298 path[i].p_idx--;
2299 }
2300
2301 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2302 i, EXT_FIRST_INDEX(path[i].p_hdr),
2303 path[i].p_idx);
2304 if (ext4_ext_more_to_rm(path + i)) {
Alex Tomasc29c0ae2007-07-18 09:19:09 -04002305 struct buffer_head *bh;
Alex Tomasa86c6182006-10-11 01:21:03 -07002306 /* go to the next level */
Mingming Cao2ae02102006-10-11 01:21:11 -07002307 ext_debug("move to level %d (block %llu)\n",
Alex Tomasf65e6fb2006-10-11 01:21:05 -07002308 i + 1, idx_pblock(path[i].p_idx));
Alex Tomasa86c6182006-10-11 01:21:03 -07002309 memset(path + i + 1, 0, sizeof(*path));
Alex Tomasc29c0ae2007-07-18 09:19:09 -04002310 bh = sb_bread(sb, idx_pblock(path[i].p_idx));
2311 if (!bh) {
Alex Tomasa86c6182006-10-11 01:21:03 -07002312 /* should we reset i_size? */
2313 err = -EIO;
2314 break;
2315 }
Alex Tomasc29c0ae2007-07-18 09:19:09 -04002316 if (WARN_ON(i + 1 > depth)) {
2317 err = -EIO;
2318 break;
2319 }
Aneesh Kumar K.V56b19862009-03-12 09:51:20 -04002320 if (ext4_ext_check(inode, ext_block_hdr(bh),
Alex Tomasc29c0ae2007-07-18 09:19:09 -04002321 depth - i - 1)) {
2322 err = -EIO;
2323 break;
2324 }
2325 path[i + 1].p_bh = bh;
Alex Tomasa86c6182006-10-11 01:21:03 -07002326
Randy Dunlapd0d856e2006-10-11 01:21:07 -07002327 /* save actual number of indexes since this
2328 * number is changed at the next iteration */
Alex Tomasa86c6182006-10-11 01:21:03 -07002329 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2330 i++;
2331 } else {
Randy Dunlapd0d856e2006-10-11 01:21:07 -07002332 /* we finished processing this index, go up */
Alex Tomasa86c6182006-10-11 01:21:03 -07002333 if (path[i].p_hdr->eh_entries == 0 && i > 0) {
Randy Dunlapd0d856e2006-10-11 01:21:07 -07002334 /* index is empty, remove it;
Alex Tomasa86c6182006-10-11 01:21:03 -07002335 * handle must be already prepared by the
2336 * truncatei_leaf() */
2337 err = ext4_ext_rm_idx(handle, inode, path + i);
2338 }
Randy Dunlapd0d856e2006-10-11 01:21:07 -07002339 /* root level has p_bh == NULL, brelse() eats this */
Alex Tomasa86c6182006-10-11 01:21:03 -07002340 brelse(path[i].p_bh);
2341 path[i].p_bh = NULL;
2342 i--;
2343 ext_debug("return to level %d\n", i);
2344 }
2345 }
2346
2347 /* TODO: flexible tree reduction should be here */
2348 if (path->p_hdr->eh_entries == 0) {
2349 /*
Randy Dunlapd0d856e2006-10-11 01:21:07 -07002350 * truncate to zero freed all the tree,
2351 * so we need to correct eh_depth
Alex Tomasa86c6182006-10-11 01:21:03 -07002352 */
2353 err = ext4_ext_get_access(handle, inode, path);
2354 if (err == 0) {
2355 ext_inode_hdr(inode)->eh_depth = 0;
2356 ext_inode_hdr(inode)->eh_max =
Theodore Ts'o55ad63b2009-08-28 10:40:33 -04002357 cpu_to_le16(ext4_ext_space_root(inode, 0));
Alex Tomasa86c6182006-10-11 01:21:03 -07002358 err = ext4_ext_dirty(handle, inode, path);
2359 }
2360 }
2361out:
Alex Tomasa86c6182006-10-11 01:21:03 -07002362 ext4_ext_drop_refs(path);
2363 kfree(path);
2364 ext4_journal_stop(handle);
2365
2366 return err;
2367}
2368
2369/*
2370 * called at mount time
2371 */
2372void ext4_ext_init(struct super_block *sb)
2373{
2374 /*
2375 * possible initialization would be here
2376 */
2377
Theodore Ts'o83982b62009-01-06 14:53:16 -05002378 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
Theodore Ts'o90576c02009-09-29 15:51:30 -04002379#if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
Theodore Ts'o47760042008-09-08 23:00:52 -04002380 printk(KERN_INFO "EXT4-fs: file extents enabled");
Robert P. J. Daybbf2f9f2007-02-17 19:20:16 +01002381#ifdef AGGRESSIVE_TEST
2382 printk(", aggressive tests");
Alex Tomasa86c6182006-10-11 01:21:03 -07002383#endif
2384#ifdef CHECK_BINSEARCH
2385 printk(", check binsearch");
2386#endif
2387#ifdef EXTENTS_STATS
2388 printk(", stats");
2389#endif
2390 printk("\n");
Theodore Ts'o90576c02009-09-29 15:51:30 -04002391#endif
Alex Tomasa86c6182006-10-11 01:21:03 -07002392#ifdef EXTENTS_STATS
2393 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
2394 EXT4_SB(sb)->s_ext_min = 1 << 30;
2395 EXT4_SB(sb)->s_ext_max = 0;
2396#endif
2397 }
2398}
2399
2400/*
2401 * called at umount time
2402 */
2403void ext4_ext_release(struct super_block *sb)
2404{
Theodore Ts'o83982b62009-01-06 14:53:16 -05002405 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
Alex Tomasa86c6182006-10-11 01:21:03 -07002406 return;
2407
2408#ifdef EXTENTS_STATS
2409 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
2410 struct ext4_sb_info *sbi = EXT4_SB(sb);
2411 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
2412 sbi->s_ext_blocks, sbi->s_ext_extents,
2413 sbi->s_ext_blocks / sbi->s_ext_extents);
2414 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2415 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
2416 }
2417#endif
2418}
2419
Aneesh Kumar K.V093a0882008-04-29 08:11:12 -04002420static void bi_complete(struct bio *bio, int error)
2421{
2422 complete((struct completion *)bio->bi_private);
2423}
2424
2425/* FIXME!! we need to try to merge to left or right after zero-out */
2426static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2427{
2428 int ret = -EIO;
2429 struct bio *bio;
2430 int blkbits, blocksize;
2431 sector_t ee_pblock;
2432 struct completion event;
2433 unsigned int ee_len, len, done, offset;
2434
2435
2436 blkbits = inode->i_blkbits;
2437 blocksize = inode->i_sb->s_blocksize;
2438 ee_len = ext4_ext_get_actual_len(ex);
2439 ee_pblock = ext_pblock(ex);
2440
2441 /* convert ee_pblock to 512 byte sectors */
2442 ee_pblock = ee_pblock << (blkbits - 9);
2443
2444 while (ee_len > 0) {
2445
2446 if (ee_len > BIO_MAX_PAGES)
2447 len = BIO_MAX_PAGES;
2448 else
2449 len = ee_len;
2450
2451 bio = bio_alloc(GFP_NOIO, len);
Aneesh Kumar K.V093a0882008-04-29 08:11:12 -04002452 bio->bi_sector = ee_pblock;
2453 bio->bi_bdev = inode->i_sb->s_bdev;
2454
2455 done = 0;
2456 offset = 0;
2457 while (done < len) {
2458 ret = bio_add_page(bio, ZERO_PAGE(0),
2459 blocksize, offset);
2460 if (ret != blocksize) {
2461 /*
2462 * We can't add any more pages because of
2463 * hardware limitations. Start a new bio.
2464 */
2465 break;
2466 }
2467 done++;
2468 offset += blocksize;
2469 if (offset >= PAGE_CACHE_SIZE)
2470 offset = 0;
2471 }
2472
2473 init_completion(&event);
2474 bio->bi_private = &event;
2475 bio->bi_end_io = bi_complete;
2476 submit_bio(WRITE, bio);
2477 wait_for_completion(&event);
2478
2479 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
2480 ret = 0;
2481 else {
2482 ret = -EIO;
2483 break;
2484 }
2485 bio_put(bio);
2486 ee_len -= done;
2487 ee_pblock += done << (blkbits - 9);
2488 }
2489 return ret;
2490}
2491
Aneesh Kumar K.V3977c962008-04-17 10:38:59 -04002492#define EXT4_EXT_ZERO_LEN 7
Amit Arora56055d32007-07-17 21:42:38 -04002493/*
2494 * This function is called by ext4_ext_get_blocks() if someone tries to write
2495 * to an uninitialized extent. It may result in splitting the uninitialized
2496 * extent into multiple extents (upto three - one initialized and two
2497 * uninitialized).
2498 * There are three possibilities:
2499 * a> There is no split required: Entire extent should be initialized
2500 * b> Splits in two extents: Write is happening at either end of the extent
2501 * c> Splits in three extents: Somone is writing in middle of the extent
2502 */
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05002503static int ext4_ext_convert_to_initialized(handle_t *handle,
2504 struct inode *inode,
2505 struct ext4_ext_path *path,
2506 ext4_lblk_t iblock,
Theodore Ts'o498e5f22008-11-05 00:14:04 -05002507 unsigned int max_blocks)
Amit Arora56055d32007-07-17 21:42:38 -04002508{
Aneesh Kumar K.V95c38892008-04-17 10:38:59 -04002509 struct ext4_extent *ex, newex, orig_ex;
Amit Arora56055d32007-07-17 21:42:38 -04002510 struct ext4_extent *ex1 = NULL;
2511 struct ext4_extent *ex2 = NULL;
2512 struct ext4_extent *ex3 = NULL;
2513 struct ext4_extent_header *eh;
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05002514 ext4_lblk_t ee_block;
2515 unsigned int allocated, ee_len, depth;
Amit Arora56055d32007-07-17 21:42:38 -04002516 ext4_fsblk_t newblock;
2517 int err = 0;
2518 int ret = 0;
2519
2520 depth = ext_depth(inode);
2521 eh = path[depth].p_hdr;
2522 ex = path[depth].p_ext;
2523 ee_block = le32_to_cpu(ex->ee_block);
2524 ee_len = ext4_ext_get_actual_len(ex);
2525 allocated = ee_len - (iblock - ee_block);
2526 newblock = iblock - ee_block + ext_pblock(ex);
2527 ex2 = ex;
Aneesh Kumar K.V95c38892008-04-17 10:38:59 -04002528 orig_ex.ee_block = ex->ee_block;
2529 orig_ex.ee_len = cpu_to_le16(ee_len);
2530 ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
Amit Arora56055d32007-07-17 21:42:38 -04002531
Aneesh Kumar K.V9df56432008-02-22 06:17:31 -05002532 err = ext4_ext_get_access(handle, inode, path + depth);
2533 if (err)
2534 goto out;
Aneesh Kumar K.V3977c962008-04-17 10:38:59 -04002535 /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
2536 if (ee_len <= 2*EXT4_EXT_ZERO_LEN) {
2537 err = ext4_ext_zeroout(inode, &orig_ex);
2538 if (err)
2539 goto fix_extent_len;
2540 /* update the extent length and mark as initialized */
2541 ex->ee_block = orig_ex.ee_block;
2542 ex->ee_len = orig_ex.ee_len;
2543 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2544 ext4_ext_dirty(handle, inode, path + depth);
Aneesh Kumar K.V161e7b72008-04-29 22:03:59 -04002545 /* zeroed the full extent */
2546 return allocated;
Aneesh Kumar K.V3977c962008-04-17 10:38:59 -04002547 }
Aneesh Kumar K.V9df56432008-02-22 06:17:31 -05002548
Amit Arora56055d32007-07-17 21:42:38 -04002549 /* ex1: ee_block to iblock - 1 : uninitialized */
2550 if (iblock > ee_block) {
2551 ex1 = ex;
2552 ex1->ee_len = cpu_to_le16(iblock - ee_block);
2553 ext4_ext_mark_uninitialized(ex1);
2554 ex2 = &newex;
2555 }
2556 /*
2557 * for sanity, update the length of the ex2 extent before
2558 * we insert ex3, if ex1 is NULL. This is to avoid temporary
2559 * overlap of blocks.
2560 */
2561 if (!ex1 && allocated > max_blocks)
2562 ex2->ee_len = cpu_to_le16(max_blocks);
2563 /* ex3: to ee_block + ee_len : uninitialised */
2564 if (allocated > max_blocks) {
2565 unsigned int newdepth;
Aneesh Kumar K.V3977c962008-04-17 10:38:59 -04002566 /* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
2567 if (allocated <= EXT4_EXT_ZERO_LEN) {
Aneesh Kumar K.Vd03856b2008-08-02 18:51:32 -04002568 /*
2569 * iblock == ee_block is handled by the zerouout
2570 * at the beginning.
2571 * Mark first half uninitialized.
Aneesh Kumar K.V3977c962008-04-17 10:38:59 -04002572 * Mark second half initialized and zero out the
2573 * initialized extent
2574 */
2575 ex->ee_block = orig_ex.ee_block;
2576 ex->ee_len = cpu_to_le16(ee_len - allocated);
2577 ext4_ext_mark_uninitialized(ex);
2578 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2579 ext4_ext_dirty(handle, inode, path + depth);
2580
2581 ex3 = &newex;
2582 ex3->ee_block = cpu_to_le32(iblock);
2583 ext4_ext_store_pblock(ex3, newblock);
2584 ex3->ee_len = cpu_to_le16(allocated);
Mingming Cao00314622009-09-28 15:49:08 -04002585 err = ext4_ext_insert_extent(handle, inode, path,
2586 ex3, 0);
Aneesh Kumar K.V3977c962008-04-17 10:38:59 -04002587 if (err == -ENOSPC) {
2588 err = ext4_ext_zeroout(inode, &orig_ex);
2589 if (err)
2590 goto fix_extent_len;
2591 ex->ee_block = orig_ex.ee_block;
2592 ex->ee_len = orig_ex.ee_len;
2593 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2594 ext4_ext_dirty(handle, inode, path + depth);
Aneesh Kumar K.Vd03856b2008-08-02 18:51:32 -04002595 /* blocks available from iblock */
Aneesh Kumar K.V161e7b72008-04-29 22:03:59 -04002596 return allocated;
Aneesh Kumar K.V3977c962008-04-17 10:38:59 -04002597
2598 } else if (err)
2599 goto fix_extent_len;
2600
Aneesh Kumar K.V161e7b72008-04-29 22:03:59 -04002601 /*
2602 * We need to zero out the second half because
2603 * an fallocate request can update file size and
2604 * converting the second half to initialized extent
2605 * implies that we can leak some junk data to user
2606 * space.
2607 */
2608 err = ext4_ext_zeroout(inode, ex3);
2609 if (err) {
2610 /*
2611 * We should actually mark the
2612 * second half as uninit and return error
2613 * Insert would have changed the extent
2614 */
2615 depth = ext_depth(inode);
2616 ext4_ext_drop_refs(path);
2617 path = ext4_ext_find_extent(inode,
2618 iblock, path);
2619 if (IS_ERR(path)) {
2620 err = PTR_ERR(path);
2621 return err;
2622 }
Aneesh Kumar K.Vd03856b2008-08-02 18:51:32 -04002623 /* get the second half extent details */
Aneesh Kumar K.V161e7b72008-04-29 22:03:59 -04002624 ex = path[depth].p_ext;
2625 err = ext4_ext_get_access(handle, inode,
2626 path + depth);
2627 if (err)
2628 return err;
2629 ext4_ext_mark_uninitialized(ex);
2630 ext4_ext_dirty(handle, inode, path + depth);
2631 return err;
2632 }
2633
2634 /* zeroed the second half */
Aneesh Kumar K.V3977c962008-04-17 10:38:59 -04002635 return allocated;
2636 }
Amit Arora56055d32007-07-17 21:42:38 -04002637 ex3 = &newex;
2638 ex3->ee_block = cpu_to_le32(iblock + max_blocks);
2639 ext4_ext_store_pblock(ex3, newblock + max_blocks);
2640 ex3->ee_len = cpu_to_le16(allocated - max_blocks);
2641 ext4_ext_mark_uninitialized(ex3);
Mingming Cao00314622009-09-28 15:49:08 -04002642 err = ext4_ext_insert_extent(handle, inode, path, ex3, 0);
Aneesh Kumar K.V093a0882008-04-29 08:11:12 -04002643 if (err == -ENOSPC) {
2644 err = ext4_ext_zeroout(inode, &orig_ex);
2645 if (err)
2646 goto fix_extent_len;
2647 /* update the extent length and mark as initialized */
Aneesh Kumar K.V95c38892008-04-17 10:38:59 -04002648 ex->ee_block = orig_ex.ee_block;
2649 ex->ee_len = orig_ex.ee_len;
2650 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
Aneesh Kumar K.V95c38892008-04-17 10:38:59 -04002651 ext4_ext_dirty(handle, inode, path + depth);
Aneesh Kumar K.V161e7b72008-04-29 22:03:59 -04002652 /* zeroed the full extent */
Aneesh Kumar K.Vd03856b2008-08-02 18:51:32 -04002653 /* blocks available from iblock */
Aneesh Kumar K.V161e7b72008-04-29 22:03:59 -04002654 return allocated;
Aneesh Kumar K.V093a0882008-04-29 08:11:12 -04002655
2656 } else if (err)
2657 goto fix_extent_len;
Amit Arora56055d32007-07-17 21:42:38 -04002658 /*
2659 * The depth, and hence eh & ex might change
2660 * as part of the insert above.
2661 */
2662 newdepth = ext_depth(inode);
Aneesh Kumar K.V95c38892008-04-17 10:38:59 -04002663 /*
Coly Li73ac36e2009-01-07 18:09:16 -08002664 * update the extent length after successful insert of the
Aneesh Kumar K.V95c38892008-04-17 10:38:59 -04002665 * split extent
2666 */
2667 orig_ex.ee_len = cpu_to_le16(ee_len -
2668 ext4_ext_get_actual_len(ex3));
Aneesh Kumar K.Vd03856b2008-08-02 18:51:32 -04002669 depth = newdepth;
2670 ext4_ext_drop_refs(path);
2671 path = ext4_ext_find_extent(inode, iblock, path);
2672 if (IS_ERR(path)) {
2673 err = PTR_ERR(path);
2674 goto out;
Amit Arora56055d32007-07-17 21:42:38 -04002675 }
Aneesh Kumar K.Vd03856b2008-08-02 18:51:32 -04002676 eh = path[depth].p_hdr;
2677 ex = path[depth].p_ext;
2678 if (ex2 != &newex)
2679 ex2 = ex;
2680
2681 err = ext4_ext_get_access(handle, inode, path + depth);
2682 if (err)
2683 goto out;
2684
Amit Arora56055d32007-07-17 21:42:38 -04002685 allocated = max_blocks;
Aneesh Kumar K.V3977c962008-04-17 10:38:59 -04002686
2687 /* If extent has less than EXT4_EXT_ZERO_LEN and we are trying
2688 * to insert a extent in the middle zerout directly
2689 * otherwise give the extent a chance to merge to left
2690 */
2691 if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
2692 iblock != ee_block) {
2693 err = ext4_ext_zeroout(inode, &orig_ex);
2694 if (err)
2695 goto fix_extent_len;
2696 /* update the extent length and mark as initialized */
2697 ex->ee_block = orig_ex.ee_block;
2698 ex->ee_len = orig_ex.ee_len;
2699 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2700 ext4_ext_dirty(handle, inode, path + depth);
Aneesh Kumar K.V161e7b72008-04-29 22:03:59 -04002701 /* zero out the first half */
Aneesh Kumar K.Vd03856b2008-08-02 18:51:32 -04002702 /* blocks available from iblock */
Aneesh Kumar K.V161e7b72008-04-29 22:03:59 -04002703 return allocated;
Aneesh Kumar K.V3977c962008-04-17 10:38:59 -04002704 }
Amit Arora56055d32007-07-17 21:42:38 -04002705 }
2706 /*
2707 * If there was a change of depth as part of the
2708 * insertion of ex3 above, we need to update the length
2709 * of the ex1 extent again here
2710 */
2711 if (ex1 && ex1 != ex) {
2712 ex1 = ex;
2713 ex1->ee_len = cpu_to_le16(iblock - ee_block);
2714 ext4_ext_mark_uninitialized(ex1);
2715 ex2 = &newex;
2716 }
2717 /* ex2: iblock to iblock + maxblocks-1 : initialised */
2718 ex2->ee_block = cpu_to_le32(iblock);
Amit Arora56055d32007-07-17 21:42:38 -04002719 ext4_ext_store_pblock(ex2, newblock);
2720 ex2->ee_len = cpu_to_le16(allocated);
2721 if (ex2 != ex)
2722 goto insert;
Amit Arora56055d32007-07-17 21:42:38 -04002723 /*
2724 * New (initialized) extent starts from the first block
2725 * in the current extent. i.e., ex2 == ex
2726 * We have to see if it can be merged with the extent
2727 * on the left.
2728 */
2729 if (ex2 > EXT_FIRST_EXTENT(eh)) {
2730 /*
2731 * To merge left, pass "ex2 - 1" to try_to_merge(),
2732 * since it merges towards right _only_.
2733 */
2734 ret = ext4_ext_try_to_merge(inode, path, ex2 - 1);
2735 if (ret) {
2736 err = ext4_ext_correct_indexes(handle, inode, path);
2737 if (err)
2738 goto out;
2739 depth = ext_depth(inode);
2740 ex2--;
2741 }
2742 }
2743 /*
2744 * Try to Merge towards right. This might be required
2745 * only when the whole extent is being written to.
2746 * i.e. ex2 == ex and ex3 == NULL.
2747 */
2748 if (!ex3) {
2749 ret = ext4_ext_try_to_merge(inode, path, ex2);
2750 if (ret) {
2751 err = ext4_ext_correct_indexes(handle, inode, path);
2752 if (err)
2753 goto out;
2754 }
2755 }
2756 /* Mark modified extent as dirty */
2757 err = ext4_ext_dirty(handle, inode, path + depth);
2758 goto out;
2759insert:
Mingming Cao00314622009-09-28 15:49:08 -04002760 err = ext4_ext_insert_extent(handle, inode, path, &newex, 0);
Aneesh Kumar K.V093a0882008-04-29 08:11:12 -04002761 if (err == -ENOSPC) {
2762 err = ext4_ext_zeroout(inode, &orig_ex);
2763 if (err)
2764 goto fix_extent_len;
2765 /* update the extent length and mark as initialized */
Aneesh Kumar K.V95c38892008-04-17 10:38:59 -04002766 ex->ee_block = orig_ex.ee_block;
2767 ex->ee_len = orig_ex.ee_len;
2768 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
Aneesh Kumar K.V95c38892008-04-17 10:38:59 -04002769 ext4_ext_dirty(handle, inode, path + depth);
Aneesh Kumar K.V161e7b72008-04-29 22:03:59 -04002770 /* zero out the first half */
2771 return allocated;
Aneesh Kumar K.V093a0882008-04-29 08:11:12 -04002772 } else if (err)
2773 goto fix_extent_len;
Amit Arora56055d32007-07-17 21:42:38 -04002774out:
Mingming553f9002009-09-18 13:34:55 -04002775 ext4_ext_show_leaf(inode, path);
Amit Arora56055d32007-07-17 21:42:38 -04002776 return err ? err : allocated;
Aneesh Kumar K.V093a0882008-04-29 08:11:12 -04002777
2778fix_extent_len:
2779 ex->ee_block = orig_ex.ee_block;
2780 ex->ee_len = orig_ex.ee_len;
2781 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2782 ext4_ext_mark_uninitialized(ex);
2783 ext4_ext_dirty(handle, inode, path + depth);
2784 return err;
Amit Arora56055d32007-07-17 21:42:38 -04002785}
2786
Aneesh Kumar K.Vc278bfe2008-01-28 23:58:27 -05002787/*
Mingming Cao00314622009-09-28 15:49:08 -04002788 * This function is called by ext4_ext_get_blocks() from
2789 * ext4_get_blocks_dio_write() when DIO to write
2790 * to an uninitialized extent.
2791 *
2792 * Writing to an uninitized extent may result in splitting the uninitialized
2793 * extent into multiple /intialized unintialized extents (up to three)
2794 * There are three possibilities:
2795 * a> There is no split required: Entire extent should be uninitialized
2796 * b> Splits in two extents: Write is happening at either end of the extent
2797 * c> Splits in three extents: Somone is writing in middle of the extent
2798 *
2799 * One of more index blocks maybe needed if the extent tree grow after
2800 * the unintialized extent split. To prevent ENOSPC occur at the IO
2801 * complete, we need to split the uninitialized extent before DIO submit
2802 * the IO. The uninitilized extent called at this time will be split
2803 * into three uninitialized extent(at most). After IO complete, the part
2804 * being filled will be convert to initialized by the end_io callback function
2805 * via ext4_convert_unwritten_extents().
Mingmingba230c32009-11-06 04:01:23 -05002806 *
2807 * Returns the size of uninitialized extent to be written on success.
Mingming Cao00314622009-09-28 15:49:08 -04002808 */
2809static int ext4_split_unwritten_extents(handle_t *handle,
2810 struct inode *inode,
2811 struct ext4_ext_path *path,
2812 ext4_lblk_t iblock,
2813 unsigned int max_blocks,
2814 int flags)
2815{
2816 struct ext4_extent *ex, newex, orig_ex;
2817 struct ext4_extent *ex1 = NULL;
2818 struct ext4_extent *ex2 = NULL;
2819 struct ext4_extent *ex3 = NULL;
2820 struct ext4_extent_header *eh;
2821 ext4_lblk_t ee_block;
2822 unsigned int allocated, ee_len, depth;
2823 ext4_fsblk_t newblock;
2824 int err = 0;
Mingming Cao00314622009-09-28 15:49:08 -04002825
2826 ext_debug("ext4_split_unwritten_extents: inode %lu,"
2827 "iblock %llu, max_blocks %u\n", inode->i_ino,
2828 (unsigned long long)iblock, max_blocks);
2829 depth = ext_depth(inode);
2830 eh = path[depth].p_hdr;
2831 ex = path[depth].p_ext;
2832 ee_block = le32_to_cpu(ex->ee_block);
2833 ee_len = ext4_ext_get_actual_len(ex);
2834 allocated = ee_len - (iblock - ee_block);
2835 newblock = iblock - ee_block + ext_pblock(ex);
2836 ex2 = ex;
2837 orig_ex.ee_block = ex->ee_block;
2838 orig_ex.ee_len = cpu_to_le16(ee_len);
2839 ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
2840
2841 /*
Mingmingba230c32009-11-06 04:01:23 -05002842 * If the uninitialized extent begins at the same logical
2843 * block where the write begins, and the write completely
2844 * covers the extent, then we don't need to split it.
Mingming Cao00314622009-09-28 15:49:08 -04002845 */
Mingmingba230c32009-11-06 04:01:23 -05002846 if ((iblock == ee_block) && (allocated <= max_blocks))
2847 return allocated;
Mingming Cao00314622009-09-28 15:49:08 -04002848
2849 err = ext4_ext_get_access(handle, inode, path + depth);
2850 if (err)
2851 goto out;
2852 /* ex1: ee_block to iblock - 1 : uninitialized */
2853 if (iblock > ee_block) {
2854 ex1 = ex;
2855 ex1->ee_len = cpu_to_le16(iblock - ee_block);
2856 ext4_ext_mark_uninitialized(ex1);
2857 ex2 = &newex;
2858 }
2859 /*
2860 * for sanity, update the length of the ex2 extent before
2861 * we insert ex3, if ex1 is NULL. This is to avoid temporary
2862 * overlap of blocks.
2863 */
2864 if (!ex1 && allocated > max_blocks)
2865 ex2->ee_len = cpu_to_le16(max_blocks);
2866 /* ex3: to ee_block + ee_len : uninitialised */
2867 if (allocated > max_blocks) {
2868 unsigned int newdepth;
2869 ex3 = &newex;
2870 ex3->ee_block = cpu_to_le32(iblock + max_blocks);
2871 ext4_ext_store_pblock(ex3, newblock + max_blocks);
2872 ex3->ee_len = cpu_to_le16(allocated - max_blocks);
2873 ext4_ext_mark_uninitialized(ex3);
2874 err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
2875 if (err == -ENOSPC) {
2876 err = ext4_ext_zeroout(inode, &orig_ex);
2877 if (err)
2878 goto fix_extent_len;
2879 /* update the extent length and mark as initialized */
2880 ex->ee_block = orig_ex.ee_block;
2881 ex->ee_len = orig_ex.ee_len;
2882 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2883 ext4_ext_dirty(handle, inode, path + depth);
2884 /* zeroed the full extent */
2885 /* blocks available from iblock */
2886 return allocated;
2887
2888 } else if (err)
2889 goto fix_extent_len;
2890 /*
2891 * The depth, and hence eh & ex might change
2892 * as part of the insert above.
2893 */
2894 newdepth = ext_depth(inode);
2895 /*
2896 * update the extent length after successful insert of the
2897 * split extent
2898 */
2899 orig_ex.ee_len = cpu_to_le16(ee_len -
2900 ext4_ext_get_actual_len(ex3));
2901 depth = newdepth;
2902 ext4_ext_drop_refs(path);
2903 path = ext4_ext_find_extent(inode, iblock, path);
2904 if (IS_ERR(path)) {
2905 err = PTR_ERR(path);
2906 goto out;
2907 }
2908 eh = path[depth].p_hdr;
2909 ex = path[depth].p_ext;
2910 if (ex2 != &newex)
2911 ex2 = ex;
2912
2913 err = ext4_ext_get_access(handle, inode, path + depth);
2914 if (err)
2915 goto out;
2916
2917 allocated = max_blocks;
2918 }
2919 /*
2920 * If there was a change of depth as part of the
2921 * insertion of ex3 above, we need to update the length
2922 * of the ex1 extent again here
2923 */
2924 if (ex1 && ex1 != ex) {
2925 ex1 = ex;
2926 ex1->ee_len = cpu_to_le16(iblock - ee_block);
2927 ext4_ext_mark_uninitialized(ex1);
2928 ex2 = &newex;
2929 }
2930 /*
2931 * ex2: iblock to iblock + maxblocks-1 : to be direct IO written,
2932 * uninitialised still.
2933 */
2934 ex2->ee_block = cpu_to_le32(iblock);
2935 ext4_ext_store_pblock(ex2, newblock);
2936 ex2->ee_len = cpu_to_le16(allocated);
2937 ext4_ext_mark_uninitialized(ex2);
2938 if (ex2 != ex)
2939 goto insert;
2940 /* Mark modified extent as dirty */
2941 err = ext4_ext_dirty(handle, inode, path + depth);
2942 ext_debug("out here\n");
2943 goto out;
2944insert:
2945 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
2946 if (err == -ENOSPC) {
2947 err = ext4_ext_zeroout(inode, &orig_ex);
2948 if (err)
2949 goto fix_extent_len;
2950 /* update the extent length and mark as initialized */
2951 ex->ee_block = orig_ex.ee_block;
2952 ex->ee_len = orig_ex.ee_len;
2953 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2954 ext4_ext_dirty(handle, inode, path + depth);
2955 /* zero out the first half */
2956 return allocated;
2957 } else if (err)
2958 goto fix_extent_len;
2959out:
2960 ext4_ext_show_leaf(inode, path);
2961 return err ? err : allocated;
2962
2963fix_extent_len:
2964 ex->ee_block = orig_ex.ee_block;
2965 ex->ee_len = orig_ex.ee_len;
2966 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2967 ext4_ext_mark_uninitialized(ex);
2968 ext4_ext_dirty(handle, inode, path + depth);
2969 return err;
2970}
2971static int ext4_convert_unwritten_extents_dio(handle_t *handle,
2972 struct inode *inode,
2973 struct ext4_ext_path *path)
2974{
2975 struct ext4_extent *ex;
2976 struct ext4_extent_header *eh;
2977 int depth;
2978 int err = 0;
2979 int ret = 0;
2980
2981 depth = ext_depth(inode);
2982 eh = path[depth].p_hdr;
2983 ex = path[depth].p_ext;
2984
2985 err = ext4_ext_get_access(handle, inode, path + depth);
2986 if (err)
2987 goto out;
2988 /* first mark the extent as initialized */
2989 ext4_ext_mark_initialized(ex);
2990
2991 /*
2992 * We have to see if it can be merged with the extent
2993 * on the left.
2994 */
2995 if (ex > EXT_FIRST_EXTENT(eh)) {
2996 /*
2997 * To merge left, pass "ex - 1" to try_to_merge(),
2998 * since it merges towards right _only_.
2999 */
3000 ret = ext4_ext_try_to_merge(inode, path, ex - 1);
3001 if (ret) {
3002 err = ext4_ext_correct_indexes(handle, inode, path);
3003 if (err)
3004 goto out;
3005 depth = ext_depth(inode);
3006 ex--;
3007 }
3008 }
3009 /*
3010 * Try to Merge towards right.
3011 */
3012 ret = ext4_ext_try_to_merge(inode, path, ex);
3013 if (ret) {
3014 err = ext4_ext_correct_indexes(handle, inode, path);
3015 if (err)
3016 goto out;
3017 depth = ext_depth(inode);
3018 }
3019 /* Mark modified extent as dirty */
3020 err = ext4_ext_dirty(handle, inode, path + depth);
3021out:
3022 ext4_ext_show_leaf(inode, path);
3023 return err;
3024}
3025
3026static int
3027ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3028 ext4_lblk_t iblock, unsigned int max_blocks,
3029 struct ext4_ext_path *path, int flags,
3030 unsigned int allocated, struct buffer_head *bh_result,
3031 ext4_fsblk_t newblock)
3032{
3033 int ret = 0;
3034 int err = 0;
Mingming Cao8d5d02e2009-09-28 15:48:29 -04003035 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
Mingming Cao00314622009-09-28 15:49:08 -04003036
3037 ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
3038 "block %llu, max_blocks %u, flags %d, allocated %u",
3039 inode->i_ino, (unsigned long long)iblock, max_blocks,
3040 flags, allocated);
3041 ext4_ext_show_leaf(inode, path);
3042
3043 /* DIO get_block() before submit the IO, split the extent */
3044 if (flags == EXT4_GET_BLOCKS_DIO_CREATE_EXT) {
3045 ret = ext4_split_unwritten_extents(handle,
3046 inode, path, iblock,
3047 max_blocks, flags);
Mingming5f524952009-11-10 10:48:04 -05003048 /*
3049 * Flag the inode(non aio case) or end_io struct (aio case)
3050 * that this IO needs to convertion to written when IO is
3051 * completed
3052 */
Mingming Cao8d5d02e2009-09-28 15:48:29 -04003053 if (io)
3054 io->flag = DIO_AIO_UNWRITTEN;
Mingming5f524952009-11-10 10:48:04 -05003055 else
3056 EXT4_I(inode)->i_state |= EXT4_STATE_DIO_UNWRITTEN;
Mingming Cao00314622009-09-28 15:49:08 -04003057 goto out;
3058 }
Mingming5f524952009-11-10 10:48:04 -05003059 /* async DIO end_io complete, convert the filled extent to written */
Mingming Cao00314622009-09-28 15:49:08 -04003060 if (flags == EXT4_GET_BLOCKS_DIO_CONVERT_EXT) {
3061 ret = ext4_convert_unwritten_extents_dio(handle, inode,
3062 path);
Jan Karab436b9b2009-12-08 23:51:10 -05003063 if (ret >= 0)
3064 ext4_update_inode_fsync_trans(handle, inode, 1);
Mingming Cao00314622009-09-28 15:49:08 -04003065 goto out2;
3066 }
3067 /* buffered IO case */
3068 /*
3069 * repeat fallocate creation request
3070 * we already have an unwritten extent
3071 */
3072 if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)
3073 goto map_out;
3074
3075 /* buffered READ or buffered write_begin() lookup */
3076 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3077 /*
3078 * We have blocks reserved already. We
3079 * return allocated blocks so that delalloc
3080 * won't do block reservation for us. But
3081 * the buffer head will be unmapped so that
3082 * a read from the block returns 0s.
3083 */
3084 set_buffer_unwritten(bh_result);
3085 goto out1;
3086 }
3087
3088 /* buffered write, writepage time, convert*/
3089 ret = ext4_ext_convert_to_initialized(handle, inode,
3090 path, iblock,
3091 max_blocks);
Jan Karab436b9b2009-12-08 23:51:10 -05003092 if (ret >= 0)
3093 ext4_update_inode_fsync_trans(handle, inode, 1);
Mingming Cao00314622009-09-28 15:49:08 -04003094out:
3095 if (ret <= 0) {
3096 err = ret;
3097 goto out2;
3098 } else
3099 allocated = ret;
3100 set_buffer_new(bh_result);
3101map_out:
3102 set_buffer_mapped(bh_result);
3103out1:
3104 if (allocated > max_blocks)
3105 allocated = max_blocks;
3106 ext4_ext_show_leaf(inode, path);
3107 bh_result->b_bdev = inode->i_sb->s_bdev;
3108 bh_result->b_blocknr = newblock;
3109out2:
3110 if (path) {
3111 ext4_ext_drop_refs(path);
3112 kfree(path);
3113 }
3114 return err ? err : allocated;
3115}
3116/*
Mingming Caof5ab0d12008-02-25 15:29:55 -05003117 * Block allocation/map/preallocation routine for extents based files
3118 *
3119 *
Aneesh Kumar K.Vc278bfe2008-01-28 23:58:27 -05003120 * Need to be called with
Aneesh Kumar K.V0e855ac2008-01-28 23:58:26 -05003121 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
3122 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
Mingming Caof5ab0d12008-02-25 15:29:55 -05003123 *
3124 * return > 0, number of of blocks already mapped/allocated
3125 * if create == 0 and these are pre-allocated blocks
3126 * buffer head is unmapped
3127 * otherwise blocks are mapped
3128 *
3129 * return = 0, if plain look up failed (blocks have not been allocated)
3130 * buffer head is unmapped
3131 *
3132 * return < 0, error case.
Aneesh Kumar K.Vc278bfe2008-01-28 23:58:27 -05003133 */
Alex Tomasf65e6fb2006-10-11 01:21:05 -07003134int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05003135 ext4_lblk_t iblock,
Theodore Ts'o498e5f22008-11-05 00:14:04 -05003136 unsigned int max_blocks, struct buffer_head *bh_result,
Theodore Ts'oc2177052009-05-14 00:58:52 -04003137 int flags)
Alex Tomasa86c6182006-10-11 01:21:03 -07003138{
3139 struct ext4_ext_path *path = NULL;
Amit Arora56055d32007-07-17 21:42:38 -04003140 struct ext4_extent_header *eh;
Alex Tomasa86c6182006-10-11 01:21:03 -07003141 struct ext4_extent newex, *ex;
Theodore Ts'o498e5f22008-11-05 00:14:04 -05003142 ext4_fsblk_t newblock;
3143 int err = 0, depth, ret, cache_type;
3144 unsigned int allocated = 0;
Alex Tomasc9de5602008-01-29 00:19:52 -05003145 struct ext4_allocation_request ar;
Mingming Cao8d5d02e2009-09-28 15:48:29 -04003146 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
Alex Tomasa86c6182006-10-11 01:21:03 -07003147
3148 __clear_bit(BH_New, &bh_result->b_state);
Mingming84fe3be2009-09-01 08:44:37 -04003149 ext_debug("blocks %u/%u requested for inode %lu\n",
Eric Sandeenbba90742008-01-28 23:58:27 -05003150 iblock, max_blocks, inode->i_ino);
Alex Tomasa86c6182006-10-11 01:21:03 -07003151
3152 /* check in cache */
Theodore Ts'o498e5f22008-11-05 00:14:04 -05003153 cache_type = ext4_ext_in_cache(inode, iblock, &newex);
3154 if (cache_type) {
3155 if (cache_type == EXT4_EXT_CACHE_GAP) {
Theodore Ts'oc2177052009-05-14 00:58:52 -04003156 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
Amit Arora56055d32007-07-17 21:42:38 -04003157 /*
3158 * block isn't allocated yet and
3159 * user doesn't want to allocate it
3160 */
Alex Tomasa86c6182006-10-11 01:21:03 -07003161 goto out2;
3162 }
3163 /* we should allocate requested block */
Theodore Ts'o498e5f22008-11-05 00:14:04 -05003164 } else if (cache_type == EXT4_EXT_CACHE_EXTENT) {
Alex Tomasa86c6182006-10-11 01:21:03 -07003165 /* block is already allocated */
Dave Kleikamp8c55e202007-05-24 13:04:54 -04003166 newblock = iblock
3167 - le32_to_cpu(newex.ee_block)
3168 + ext_pblock(&newex);
Randy Dunlapd0d856e2006-10-11 01:21:07 -07003169 /* number of remaining blocks in the extent */
Aneesh Kumar K.Vb939e372008-01-28 23:58:27 -05003170 allocated = ext4_ext_get_actual_len(&newex) -
Alex Tomasa86c6182006-10-11 01:21:03 -07003171 (iblock - le32_to_cpu(newex.ee_block));
3172 goto out;
3173 } else {
3174 BUG();
3175 }
3176 }
3177
3178 /* find extent for this block */
3179 path = ext4_ext_find_extent(inode, iblock, NULL);
3180 if (IS_ERR(path)) {
3181 err = PTR_ERR(path);
3182 path = NULL;
3183 goto out2;
3184 }
3185
3186 depth = ext_depth(inode);
3187
3188 /*
Randy Dunlapd0d856e2006-10-11 01:21:07 -07003189 * consistent leaf must not be empty;
3190 * this situation is possible, though, _during_ tree modification;
Alex Tomasa86c6182006-10-11 01:21:03 -07003191 * this is why assert can't be put in ext4_ext_find_extent()
3192 */
3193 BUG_ON(path[depth].p_ext == NULL && depth != 0);
Amit Arora56055d32007-07-17 21:42:38 -04003194 eh = path[depth].p_hdr;
Alex Tomasa86c6182006-10-11 01:21:03 -07003195
Avantika Mathur7e028972006-12-06 20:41:33 -08003196 ex = path[depth].p_ext;
3197 if (ex) {
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05003198 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
Alex Tomasf65e6fb2006-10-11 01:21:05 -07003199 ext4_fsblk_t ee_start = ext_pblock(ex);
Amit Aroraa2df2a62007-07-17 21:42:41 -04003200 unsigned short ee_len;
Suparna Bhattacharya471d4012006-10-11 01:21:06 -07003201
3202 /*
Suparna Bhattacharya471d4012006-10-11 01:21:06 -07003203 * Uninitialized extents are treated as holes, except that
Amit Arora56055d32007-07-17 21:42:38 -04003204 * we split out initialized portions during a write.
Suparna Bhattacharya471d4012006-10-11 01:21:06 -07003205 */
Amit Aroraa2df2a62007-07-17 21:42:41 -04003206 ee_len = ext4_ext_get_actual_len(ex);
Randy Dunlapd0d856e2006-10-11 01:21:07 -07003207 /* if found extent covers block, simply return it */
Dave Kleikamp8c55e202007-05-24 13:04:54 -04003208 if (iblock >= ee_block && iblock < ee_block + ee_len) {
Alex Tomasa86c6182006-10-11 01:21:03 -07003209 newblock = iblock - ee_block + ee_start;
Randy Dunlapd0d856e2006-10-11 01:21:07 -07003210 /* number of remaining blocks in the extent */
Alex Tomasa86c6182006-10-11 01:21:03 -07003211 allocated = ee_len - (iblock - ee_block);
Mingming84fe3be2009-09-01 08:44:37 -04003212 ext_debug("%u fit into %u:%d -> %llu\n", iblock,
Alex Tomasa86c6182006-10-11 01:21:03 -07003213 ee_block, ee_len, newblock);
Amit Arora56055d32007-07-17 21:42:38 -04003214
Amit Aroraa2df2a62007-07-17 21:42:41 -04003215 /* Do not put uninitialized extent in the cache */
Amit Arora56055d32007-07-17 21:42:38 -04003216 if (!ext4_ext_is_uninitialized(ex)) {
Amit Aroraa2df2a62007-07-17 21:42:41 -04003217 ext4_ext_put_in_cache(inode, ee_block,
3218 ee_len, ee_start,
3219 EXT4_EXT_CACHE_EXTENT);
Amit Arora56055d32007-07-17 21:42:38 -04003220 goto out;
3221 }
Mingming Cao00314622009-09-28 15:49:08 -04003222 ret = ext4_ext_handle_uninitialized_extents(handle,
3223 inode, iblock, max_blocks, path,
3224 flags, allocated, bh_result, newblock);
3225 return ret;
Alex Tomasa86c6182006-10-11 01:21:03 -07003226 }
3227 }
3228
3229 /*
Randy Dunlapd0d856e2006-10-11 01:21:07 -07003230 * requested block isn't allocated yet;
Alex Tomasa86c6182006-10-11 01:21:03 -07003231 * we couldn't try to create block if create flag is zero
3232 */
Theodore Ts'oc2177052009-05-14 00:58:52 -04003233 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
Amit Arora56055d32007-07-17 21:42:38 -04003234 /*
3235 * put just found gap into cache to speed up
3236 * subsequent requests
3237 */
Alex Tomasa86c6182006-10-11 01:21:03 -07003238 ext4_ext_put_gap_in_cache(inode, path, iblock);
3239 goto out2;
3240 }
3241 /*
Theodore Ts'oc2ea3fd2008-10-10 09:40:52 -04003242 * Okay, we need to do block allocation.
Andrew Morton63f57932006-10-11 01:21:24 -07003243 */
Alex Tomasa86c6182006-10-11 01:21:03 -07003244
Alex Tomasc9de5602008-01-29 00:19:52 -05003245 /* find neighbour allocated blocks */
3246 ar.lleft = iblock;
3247 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
3248 if (err)
3249 goto out2;
3250 ar.lright = iblock;
3251 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright);
3252 if (err)
3253 goto out2;
Amit Arora25d14f92007-05-24 13:04:13 -04003254
Amit Arora749269f2007-07-18 09:02:56 -04003255 /*
3256 * See if request is beyond maximum number of blocks we can have in
3257 * a single extent. For an initialized extent this limit is
3258 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
3259 * EXT_UNINIT_MAX_LEN.
3260 */
3261 if (max_blocks > EXT_INIT_MAX_LEN &&
Theodore Ts'oc2177052009-05-14 00:58:52 -04003262 !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
Amit Arora749269f2007-07-18 09:02:56 -04003263 max_blocks = EXT_INIT_MAX_LEN;
3264 else if (max_blocks > EXT_UNINIT_MAX_LEN &&
Theodore Ts'oc2177052009-05-14 00:58:52 -04003265 (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
Amit Arora749269f2007-07-18 09:02:56 -04003266 max_blocks = EXT_UNINIT_MAX_LEN;
3267
Amit Arora25d14f92007-05-24 13:04:13 -04003268 /* Check if we can really insert (iblock)::(iblock+max_blocks) extent */
3269 newex.ee_block = cpu_to_le32(iblock);
3270 newex.ee_len = cpu_to_le16(max_blocks);
3271 err = ext4_ext_check_overlap(inode, &newex, path);
3272 if (err)
Aneesh Kumar K.Vb939e372008-01-28 23:58:27 -05003273 allocated = ext4_ext_get_actual_len(&newex);
Amit Arora25d14f92007-05-24 13:04:13 -04003274 else
3275 allocated = max_blocks;
Alex Tomasc9de5602008-01-29 00:19:52 -05003276
3277 /* allocate new block */
3278 ar.inode = inode;
3279 ar.goal = ext4_ext_find_goal(inode, path, iblock);
3280 ar.logical = iblock;
3281 ar.len = allocated;
3282 if (S_ISREG(inode->i_mode))
3283 ar.flags = EXT4_MB_HINT_DATA;
3284 else
3285 /* disable in-core preallocation for non-regular files */
3286 ar.flags = 0;
3287 newblock = ext4_mb_new_blocks(handle, &ar, &err);
Alex Tomasa86c6182006-10-11 01:21:03 -07003288 if (!newblock)
3289 goto out2;
Mingming84fe3be2009-09-01 08:44:37 -04003290 ext_debug("allocate new block: goal %llu, found %llu/%u\n",
Theodore Ts'o498e5f22008-11-05 00:14:04 -05003291 ar.goal, newblock, allocated);
Alex Tomasa86c6182006-10-11 01:21:03 -07003292
3293 /* try to insert new extent into found leaf and return */
Alex Tomasf65e6fb2006-10-11 01:21:05 -07003294 ext4_ext_store_pblock(&newex, newblock);
Alex Tomasc9de5602008-01-29 00:19:52 -05003295 newex.ee_len = cpu_to_le16(ar.len);
Mingming Cao8d5d02e2009-09-28 15:48:29 -04003296 /* Mark uninitialized */
3297 if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
Amit Aroraa2df2a62007-07-17 21:42:41 -04003298 ext4_ext_mark_uninitialized(&newex);
Mingming Cao8d5d02e2009-09-28 15:48:29 -04003299 /*
3300 * io_end structure was created for every async
3301 * direct IO write to the middle of the file.
3302 * To avoid unecessary convertion for every aio dio rewrite
3303 * to the mid of file, here we flag the IO that is really
3304 * need the convertion.
Mingming5f524952009-11-10 10:48:04 -05003305 * For non asycn direct IO case, flag the inode state
3306 * that we need to perform convertion when IO is done.
Mingming Cao8d5d02e2009-09-28 15:48:29 -04003307 */
Mingming5f524952009-11-10 10:48:04 -05003308 if (flags == EXT4_GET_BLOCKS_DIO_CREATE_EXT) {
3309 if (io)
3310 io->flag = DIO_AIO_UNWRITTEN;
3311 else
3312 EXT4_I(inode)->i_state |=
3313 EXT4_STATE_DIO_UNWRITTEN;;
3314 }
Mingming Cao8d5d02e2009-09-28 15:48:29 -04003315 }
Mingming Cao00314622009-09-28 15:49:08 -04003316 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
Alex Tomas315054f2007-05-24 13:04:25 -04003317 if (err) {
3318 /* free data blocks we just allocated */
Alex Tomasc9de5602008-01-29 00:19:52 -05003319 /* not a good idea to call discard here directly,
3320 * but otherwise we'd need to call it every free() */
Theodore Ts'oc2ea3fd2008-10-10 09:40:52 -04003321 ext4_discard_preallocations(inode);
Theodore Ts'oe6362602009-11-23 07:17:05 -05003322 ext4_free_blocks(handle, inode, 0, ext_pblock(&newex),
3323 ext4_ext_get_actual_len(&newex), 0);
Alex Tomasa86c6182006-10-11 01:21:03 -07003324 goto out2;
Alex Tomas315054f2007-05-24 13:04:25 -04003325 }
Alex Tomasa86c6182006-10-11 01:21:03 -07003326
Alex Tomasa86c6182006-10-11 01:21:03 -07003327 /* previous routine could use block we allocated */
Alex Tomasf65e6fb2006-10-11 01:21:05 -07003328 newblock = ext_pblock(&newex);
Aneesh Kumar K.Vb939e372008-01-28 23:58:27 -05003329 allocated = ext4_ext_get_actual_len(&newex);
Eric Sandeen953e6222008-07-11 19:27:31 -04003330 set_buffer_new(bh_result);
Alex Tomasa86c6182006-10-11 01:21:03 -07003331
Jan Karab436b9b2009-12-08 23:51:10 -05003332 /*
3333 * Cache the extent and update transaction to commit on fdatasync only
3334 * when it is _not_ an uninitialized extent.
3335 */
3336 if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
Amit Aroraa2df2a62007-07-17 21:42:41 -04003337 ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
3338 EXT4_EXT_CACHE_EXTENT);
Jan Karab436b9b2009-12-08 23:51:10 -05003339 ext4_update_inode_fsync_trans(handle, inode, 1);
3340 } else
3341 ext4_update_inode_fsync_trans(handle, inode, 0);
Alex Tomasa86c6182006-10-11 01:21:03 -07003342out:
3343 if (allocated > max_blocks)
3344 allocated = max_blocks;
3345 ext4_ext_show_leaf(inode, path);
Eric Sandeen953e6222008-07-11 19:27:31 -04003346 set_buffer_mapped(bh_result);
Alex Tomasa86c6182006-10-11 01:21:03 -07003347 bh_result->b_bdev = inode->i_sb->s_bdev;
3348 bh_result->b_blocknr = newblock;
3349out2:
3350 if (path) {
3351 ext4_ext_drop_refs(path);
3352 kfree(path);
3353 }
Alex Tomasa86c6182006-10-11 01:21:03 -07003354 return err ? err : allocated;
3355}
3356
Jan Karacf108bc2008-07-11 19:27:31 -04003357void ext4_ext_truncate(struct inode *inode)
Alex Tomasa86c6182006-10-11 01:21:03 -07003358{
3359 struct address_space *mapping = inode->i_mapping;
3360 struct super_block *sb = inode->i_sb;
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05003361 ext4_lblk_t last_block;
Alex Tomasa86c6182006-10-11 01:21:03 -07003362 handle_t *handle;
3363 int err = 0;
3364
3365 /*
3366 * probably first extent we're gonna free will be last in block
3367 */
Mingming Caof3bd1f32008-08-19 22:16:03 -04003368 err = ext4_writepage_trans_blocks(inode);
Alex Tomasa86c6182006-10-11 01:21:03 -07003369 handle = ext4_journal_start(inode, err);
Jan Karacf108bc2008-07-11 19:27:31 -04003370 if (IS_ERR(handle))
Alex Tomasa86c6182006-10-11 01:21:03 -07003371 return;
Alex Tomasa86c6182006-10-11 01:21:03 -07003372
Jan Karacf108bc2008-07-11 19:27:31 -04003373 if (inode->i_size & (sb->s_blocksize - 1))
3374 ext4_block_truncate_page(handle, mapping, inode->i_size);
Alex Tomasa86c6182006-10-11 01:21:03 -07003375
Jan Kara9ddfc3d2008-07-11 19:27:31 -04003376 if (ext4_orphan_add(handle, inode))
3377 goto out_stop;
3378
Aneesh Kumar K.V0e855ac2008-01-28 23:58:26 -05003379 down_write(&EXT4_I(inode)->i_data_sem);
Alex Tomasa86c6182006-10-11 01:21:03 -07003380 ext4_ext_invalidate_cache(inode);
3381
Theodore Ts'oc2ea3fd2008-10-10 09:40:52 -04003382 ext4_discard_preallocations(inode);
Alex Tomasc9de5602008-01-29 00:19:52 -05003383
Alex Tomasa86c6182006-10-11 01:21:03 -07003384 /*
Randy Dunlapd0d856e2006-10-11 01:21:07 -07003385 * TODO: optimization is possible here.
3386 * Probably we need not scan at all,
3387 * because page truncation is enough.
Alex Tomasa86c6182006-10-11 01:21:03 -07003388 */
Alex Tomasa86c6182006-10-11 01:21:03 -07003389
3390 /* we have to know where to truncate from in crash case */
3391 EXT4_I(inode)->i_disksize = inode->i_size;
3392 ext4_mark_inode_dirty(handle, inode);
3393
3394 last_block = (inode->i_size + sb->s_blocksize - 1)
3395 >> EXT4_BLOCK_SIZE_BITS(sb);
3396 err = ext4_ext_remove_space(inode, last_block);
3397
3398 /* In a multi-transaction truncate, we only make the final
Amit Arora56055d32007-07-17 21:42:38 -04003399 * transaction synchronous.
3400 */
Alex Tomasa86c6182006-10-11 01:21:03 -07003401 if (IS_SYNC(inode))
Frank Mayhar03901312009-01-07 00:06:22 -05003402 ext4_handle_sync(handle);
Alex Tomasa86c6182006-10-11 01:21:03 -07003403
3404out_stop:
Jan Kara9ddfc3d2008-07-11 19:27:31 -04003405 up_write(&EXT4_I(inode)->i_data_sem);
Alex Tomasa86c6182006-10-11 01:21:03 -07003406 /*
Randy Dunlapd0d856e2006-10-11 01:21:07 -07003407 * If this was a simple ftruncate() and the file will remain alive,
Alex Tomasa86c6182006-10-11 01:21:03 -07003408 * then we need to clear up the orphan record which we created above.
3409 * However, if this was a real unlink then we were called by
3410 * ext4_delete_inode(), and we allow that function to clean up the
3411 * orphan info for us.
3412 */
3413 if (inode->i_nlink)
3414 ext4_orphan_del(handle, inode);
3415
Solofo Ramangalahyef737722008-04-29 22:00:41 -04003416 inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
3417 ext4_mark_inode_dirty(handle, inode);
Alex Tomasa86c6182006-10-11 01:21:03 -07003418 ext4_journal_stop(handle);
3419}
3420
Aneesh Kumar K.Vfd287842008-04-29 08:11:12 -04003421static void ext4_falloc_update_inode(struct inode *inode,
3422 int mode, loff_t new_size, int update_ctime)
3423{
3424 struct timespec now;
3425
3426 if (update_ctime) {
3427 now = current_fs_time(inode->i_sb);
3428 if (!timespec_equal(&inode->i_ctime, &now))
3429 inode->i_ctime = now;
3430 }
3431 /*
3432 * Update only when preallocation was requested beyond
3433 * the file size.
3434 */
Aneesh Kumar K.Vcf17fea2008-09-13 13:06:18 -04003435 if (!(mode & FALLOC_FL_KEEP_SIZE)) {
3436 if (new_size > i_size_read(inode))
3437 i_size_write(inode, new_size);
3438 if (new_size > EXT4_I(inode)->i_disksize)
3439 ext4_update_i_disksize(inode, new_size);
Aneesh Kumar K.Vfd287842008-04-29 08:11:12 -04003440 }
3441
3442}
3443
Amit Aroraa2df2a62007-07-17 21:42:41 -04003444/*
3445 * preallocate space for a file. This implements ext4's fallocate inode
3446 * operation, which gets called from sys_fallocate system call.
3447 * For block-mapped files, posix_fallocate should fall back to the method
3448 * of writing zeroes to the required new blocks (the same behavior which is
3449 * expected for file systems which do not support fallocate() system call).
3450 */
3451long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
3452{
3453 handle_t *handle;
Aneesh Kumar K.V725d26d2008-01-28 23:58:27 -05003454 ext4_lblk_t block;
Aneesh Kumar K.Vfd287842008-04-29 08:11:12 -04003455 loff_t new_size;
Theodore Ts'o498e5f22008-11-05 00:14:04 -05003456 unsigned int max_blocks;
Amit Aroraa2df2a62007-07-17 21:42:41 -04003457 int ret = 0;
3458 int ret2 = 0;
3459 int retries = 0;
3460 struct buffer_head map_bh;
3461 unsigned int credits, blkbits = inode->i_blkbits;
3462
3463 /*
3464 * currently supporting (pre)allocate mode for extent-based
3465 * files _only_
3466 */
3467 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
3468 return -EOPNOTSUPP;
3469
3470 /* preallocation to directories is currently not supported */
3471 if (S_ISDIR(inode->i_mode))
3472 return -ENODEV;
3473
3474 block = offset >> blkbits;
Aneesh Kumar K.Vfd287842008-04-29 08:11:12 -04003475 /*
3476 * We can't just convert len to max_blocks because
3477 * If blocksize = 4096 offset = 3072 and len = 2048
3478 */
Amit Aroraa2df2a62007-07-17 21:42:41 -04003479 max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
Aneesh Kumar K.Vfd287842008-04-29 08:11:12 -04003480 - block;
Amit Aroraa2df2a62007-07-17 21:42:41 -04003481 /*
Mingming Caof3bd1f32008-08-19 22:16:03 -04003482 * credits to insert 1 extent into extent tree
Amit Aroraa2df2a62007-07-17 21:42:41 -04003483 */
Mingming Caof3bd1f32008-08-19 22:16:03 -04003484 credits = ext4_chunk_trans_blocks(inode, max_blocks);
Aneesh Kumar K.V55bd7252008-02-15 12:47:21 -05003485 mutex_lock(&inode->i_mutex);
Amit Aroraa2df2a62007-07-17 21:42:41 -04003486retry:
3487 while (ret >= 0 && ret < max_blocks) {
3488 block = block + ret;
3489 max_blocks = max_blocks - ret;
3490 handle = ext4_journal_start(inode, credits);
3491 if (IS_ERR(handle)) {
3492 ret = PTR_ERR(handle);
3493 break;
3494 }
Aneesh Kumar K.V79ffab32009-05-13 15:13:42 -04003495 map_bh.b_state = 0;
Theodore Ts'o12b7ac12009-05-14 00:57:44 -04003496 ret = ext4_get_blocks(handle, inode, block,
3497 max_blocks, &map_bh,
Theodore Ts'oc2177052009-05-14 00:58:52 -04003498 EXT4_GET_BLOCKS_CREATE_UNINIT_EXT);
Aneesh Kumar K.V221879c2008-01-28 23:58:27 -05003499 if (ret <= 0) {
Aneesh Kumar K.V2c986152008-02-25 15:41:35 -05003500#ifdef EXT4FS_DEBUG
3501 WARN_ON(ret <= 0);
3502 printk(KERN_ERR "%s: ext4_ext_get_blocks "
3503 "returned error inode#%lu, block=%u, "
Thadeu Lima de Souza Cascardo9fd97842009-01-26 19:26:26 -05003504 "max_blocks=%u", __func__,
Aneesh Kumar K.V221879c2008-01-28 23:58:27 -05003505 inode->i_ino, block, max_blocks);
Aneesh Kumar K.V2c986152008-02-25 15:41:35 -05003506#endif
Amit Aroraa2df2a62007-07-17 21:42:41 -04003507 ext4_mark_inode_dirty(handle, inode);
3508 ret2 = ext4_journal_stop(handle);
3509 break;
3510 }
Aneesh Kumar K.Vfd287842008-04-29 08:11:12 -04003511 if ((block + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
3512 blkbits) >> blkbits))
3513 new_size = offset + len;
3514 else
3515 new_size = (block + ret) << blkbits;
Amit Aroraa2df2a62007-07-17 21:42:41 -04003516
Aneesh Kumar K.Vfd287842008-04-29 08:11:12 -04003517 ext4_falloc_update_inode(inode, mode, new_size,
3518 buffer_new(&map_bh));
Amit Aroraa2df2a62007-07-17 21:42:41 -04003519 ext4_mark_inode_dirty(handle, inode);
3520 ret2 = ext4_journal_stop(handle);
3521 if (ret2)
3522 break;
3523 }
Aneesh Kumar K.Vfd287842008-04-29 08:11:12 -04003524 if (ret == -ENOSPC &&
3525 ext4_should_retry_alloc(inode->i_sb, &retries)) {
3526 ret = 0;
Amit Aroraa2df2a62007-07-17 21:42:41 -04003527 goto retry;
Amit Aroraa2df2a62007-07-17 21:42:41 -04003528 }
Aneesh Kumar K.V55bd7252008-02-15 12:47:21 -05003529 mutex_unlock(&inode->i_mutex);
Amit Aroraa2df2a62007-07-17 21:42:41 -04003530 return ret > 0 ? ret2 : ret;
3531}
Eric Sandeen6873fa02008-10-07 00:46:36 -04003532
3533/*
Mingming Cao00314622009-09-28 15:49:08 -04003534 * This function convert a range of blocks to written extents
3535 * The caller of this function will pass the start offset and the size.
3536 * all unwritten extents within this range will be converted to
3537 * written extents.
3538 *
3539 * This function is called from the direct IO end io call back
3540 * function, to convert the fallocated extents after IO is completed.
Mingming109f5562009-11-10 10:48:08 -05003541 * Returns 0 on success.
Mingming Cao00314622009-09-28 15:49:08 -04003542 */
3543int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
3544 loff_t len)
3545{
3546 handle_t *handle;
3547 ext4_lblk_t block;
3548 unsigned int max_blocks;
3549 int ret = 0;
3550 int ret2 = 0;
3551 struct buffer_head map_bh;
3552 unsigned int credits, blkbits = inode->i_blkbits;
3553
3554 block = offset >> blkbits;
3555 /*
3556 * We can't just convert len to max_blocks because
3557 * If blocksize = 4096 offset = 3072 and len = 2048
3558 */
3559 max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
3560 - block;
3561 /*
3562 * credits to insert 1 extent into extent tree
3563 */
3564 credits = ext4_chunk_trans_blocks(inode, max_blocks);
3565 while (ret >= 0 && ret < max_blocks) {
3566 block = block + ret;
3567 max_blocks = max_blocks - ret;
3568 handle = ext4_journal_start(inode, credits);
3569 if (IS_ERR(handle)) {
3570 ret = PTR_ERR(handle);
3571 break;
3572 }
3573 map_bh.b_state = 0;
3574 ret = ext4_get_blocks(handle, inode, block,
3575 max_blocks, &map_bh,
3576 EXT4_GET_BLOCKS_DIO_CONVERT_EXT);
3577 if (ret <= 0) {
3578 WARN_ON(ret <= 0);
3579 printk(KERN_ERR "%s: ext4_ext_get_blocks "
3580 "returned error inode#%lu, block=%u, "
3581 "max_blocks=%u", __func__,
3582 inode->i_ino, block, max_blocks);
3583 }
3584 ext4_mark_inode_dirty(handle, inode);
3585 ret2 = ext4_journal_stop(handle);
3586 if (ret <= 0 || ret2 )
3587 break;
3588 }
3589 return ret > 0 ? ret2 : ret;
3590}
3591/*
Eric Sandeen6873fa02008-10-07 00:46:36 -04003592 * Callback function called for each extent to gather FIEMAP information.
3593 */
Aneesh Kumar K.V3a06d772008-11-22 15:04:59 -05003594static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
Eric Sandeen6873fa02008-10-07 00:46:36 -04003595 struct ext4_ext_cache *newex, struct ext4_extent *ex,
3596 void *data)
3597{
3598 struct fiemap_extent_info *fieinfo = data;
Eric Sandeenc9877b22009-05-01 23:32:06 -04003599 unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
Eric Sandeen6873fa02008-10-07 00:46:36 -04003600 __u64 logical;
3601 __u64 physical;
3602 __u64 length;
3603 __u32 flags = 0;
3604 int error;
3605
3606 logical = (__u64)newex->ec_block << blksize_bits;
3607
3608 if (newex->ec_type == EXT4_EXT_CACHE_GAP) {
3609 pgoff_t offset;
3610 struct page *page;
3611 struct buffer_head *bh = NULL;
3612
3613 offset = logical >> PAGE_SHIFT;
3614 page = find_get_page(inode->i_mapping, offset);
3615 if (!page || !page_has_buffers(page))
3616 return EXT_CONTINUE;
3617
3618 bh = page_buffers(page);
3619
3620 if (!bh)
3621 return EXT_CONTINUE;
3622
3623 if (buffer_delay(bh)) {
3624 flags |= FIEMAP_EXTENT_DELALLOC;
3625 page_cache_release(page);
3626 } else {
3627 page_cache_release(page);
3628 return EXT_CONTINUE;
3629 }
3630 }
3631
3632 physical = (__u64)newex->ec_start << blksize_bits;
3633 length = (__u64)newex->ec_len << blksize_bits;
3634
3635 if (ex && ext4_ext_is_uninitialized(ex))
3636 flags |= FIEMAP_EXTENT_UNWRITTEN;
3637
3638 /*
3639 * If this extent reaches EXT_MAX_BLOCK, it must be last.
3640 *
3641 * Or if ext4_ext_next_allocated_block is EXT_MAX_BLOCK,
3642 * this also indicates no more allocated blocks.
3643 *
3644 * XXX this might miss a single-block extent at EXT_MAX_BLOCK
3645 */
Eric Sandeenc9877b22009-05-01 23:32:06 -04003646 if (ext4_ext_next_allocated_block(path) == EXT_MAX_BLOCK ||
Theodore Ts'oeefd7f02009-05-02 19:05:37 -04003647 newex->ec_block + newex->ec_len - 1 == EXT_MAX_BLOCK) {
3648 loff_t size = i_size_read(inode);
3649 loff_t bs = EXT4_BLOCK_SIZE(inode->i_sb);
3650
Eric Sandeen6873fa02008-10-07 00:46:36 -04003651 flags |= FIEMAP_EXTENT_LAST;
Theodore Ts'oeefd7f02009-05-02 19:05:37 -04003652 if ((flags & FIEMAP_EXTENT_DELALLOC) &&
3653 logical+length > size)
3654 length = (size - logical + bs - 1) & ~(bs-1);
3655 }
Eric Sandeen6873fa02008-10-07 00:46:36 -04003656
3657 error = fiemap_fill_next_extent(fieinfo, logical, physical,
3658 length, flags);
3659 if (error < 0)
3660 return error;
3661 if (error == 1)
3662 return EXT_BREAK;
3663
3664 return EXT_CONTINUE;
3665}
3666
3667/* fiemap flags we can handle specified here */
3668#define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
3669
Aneesh Kumar K.V3a06d772008-11-22 15:04:59 -05003670static int ext4_xattr_fiemap(struct inode *inode,
3671 struct fiemap_extent_info *fieinfo)
Eric Sandeen6873fa02008-10-07 00:46:36 -04003672{
3673 __u64 physical = 0;
3674 __u64 length;
3675 __u32 flags = FIEMAP_EXTENT_LAST;
3676 int blockbits = inode->i_sb->s_blocksize_bits;
3677 int error = 0;
3678
3679 /* in-inode? */
3680 if (EXT4_I(inode)->i_state & EXT4_STATE_XATTR) {
3681 struct ext4_iloc iloc;
3682 int offset; /* offset of xattr in inode */
3683
3684 error = ext4_get_inode_loc(inode, &iloc);
3685 if (error)
3686 return error;
3687 physical = iloc.bh->b_blocknr << blockbits;
3688 offset = EXT4_GOOD_OLD_INODE_SIZE +
3689 EXT4_I(inode)->i_extra_isize;
3690 physical += offset;
3691 length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
3692 flags |= FIEMAP_EXTENT_DATA_INLINE;
3693 } else { /* external block */
3694 physical = EXT4_I(inode)->i_file_acl << blockbits;
3695 length = inode->i_sb->s_blocksize;
3696 }
3697
3698 if (physical)
3699 error = fiemap_fill_next_extent(fieinfo, 0, physical,
3700 length, flags);
3701 return (error < 0 ? error : 0);
3702}
3703
3704int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3705 __u64 start, __u64 len)
3706{
3707 ext4_lblk_t start_blk;
3708 ext4_lblk_t len_blks;
3709 int error = 0;
3710
3711 /* fallback to generic here if not in extents fmt */
3712 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
3713 return generic_block_fiemap(inode, fieinfo, start, len,
3714 ext4_get_block);
3715
3716 if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
3717 return -EBADR;
3718
3719 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
3720 error = ext4_xattr_fiemap(inode, fieinfo);
3721 } else {
3722 start_blk = start >> inode->i_sb->s_blocksize_bits;
3723 len_blks = len >> inode->i_sb->s_blocksize_bits;
3724
3725 /*
3726 * Walk the extent tree gathering extent information.
3727 * ext4_ext_fiemap_cb will push extents back to user.
3728 */
Eric Sandeen6873fa02008-10-07 00:46:36 -04003729 error = ext4_ext_walk_space(inode, start_blk, len_blks,
3730 ext4_ext_fiemap_cb, fieinfo);
Eric Sandeen6873fa02008-10-07 00:46:36 -04003731 }
3732
3733 return error;
3734}
3735