blob: fe2ddbb81f74b87a7b506784184a93310d9d833f [file] [log] [blame]
Mark Fashehccd979b2005-12-15 14:31:24 -08001/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * alloc.c
5 *
6 * Extent allocs and frees
7 *
8 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 */
25
26#include <linux/fs.h>
27#include <linux/types.h>
28#include <linux/slab.h>
29#include <linux/highmem.h>
Mark Fasheh60b11392007-02-16 11:46:50 -080030#include <linux/swap.h>
Mark Fashehccd979b2005-12-15 14:31:24 -080031
32#define MLOG_MASK_PREFIX ML_DISK_ALLOC
33#include <cluster/masklog.h>
34
35#include "ocfs2.h"
36
37#include "alloc.h"
Mark Fasheh60b11392007-02-16 11:46:50 -080038#include "aops.h"
Mark Fashehccd979b2005-12-15 14:31:24 -080039#include "dlmglue.h"
40#include "extent_map.h"
41#include "inode.h"
42#include "journal.h"
43#include "localalloc.h"
44#include "suballoc.h"
45#include "sysfile.h"
46#include "file.h"
47#include "super.h"
48#include "uptodate.h"
49
50#include "buffer_head_io.h"
51
Tao Mae7d4cb62008-08-18 17:38:44 +080052/*
53 * ocfs2_extent_tree and ocfs2_extent_tree_operations are used to abstract
54 * the b-tree operations in ocfs2. Now all the b-tree operations are not
55 * limited to ocfs2_dinode only. Any data which need to allocate clusters
56 * to store can use b-tree. And it only needs to implement its ocfs2_extent_tree
57 * and operation.
58 *
59 * ocfs2_extent_tree contains info for the root of the b-tree, it must have a
60 * root ocfs2_extent_list and a root_bh so that they can be used in the b-tree
61 * functions.
62 * ocfs2_extent_tree_operations abstract the normal operations we do for
63 * the root of extent b-tree.
64 */
65struct ocfs2_extent_tree;
66
67struct ocfs2_extent_tree_operations {
Joel Becker35dc0aa2008-08-20 16:25:06 -070068 void (*eo_set_last_eb_blk)(struct ocfs2_extent_tree *et,
69 u64 blkno);
70 u64 (*eo_get_last_eb_blk)(struct ocfs2_extent_tree *et);
71 void (*eo_update_clusters)(struct inode *inode,
72 struct ocfs2_extent_tree *et,
73 u32 new_clusters);
74 int (*eo_sanity_check)(struct inode *inode, struct ocfs2_extent_tree *et);
Joel Becker0ce10102008-08-20 17:19:50 -070075
76 /* These are internal to ocfs2_extent_tree and don't have
77 * accessor functions */
78 void (*eo_fill_root_el)(struct ocfs2_extent_tree *et);
Tao Mae7d4cb62008-08-18 17:38:44 +080079};
80
81struct ocfs2_extent_tree {
Joel Beckerce1d9ea2008-08-20 16:30:07 -070082 enum ocfs2_extent_tree_type et_type;
83 struct ocfs2_extent_tree_operations *et_ops;
84 struct buffer_head *et_root_bh;
85 struct ocfs2_extent_list *et_root_el;
Joel Beckerea5efa12008-08-20 16:57:27 -070086 void *et_object;
Joel Beckerce1d9ea2008-08-20 16:30:07 -070087 unsigned int et_max_leaf_clusters;
Tao Mae7d4cb62008-08-18 17:38:44 +080088};
89
Joel Becker0ce10102008-08-20 17:19:50 -070090static void ocfs2_dinode_fill_root_el(struct ocfs2_extent_tree *et)
91{
92 struct ocfs2_dinode *di = et->et_object;
93
94 et->et_root_el = &di->id2.i_list;
95}
96
Tao Mae7d4cb62008-08-18 17:38:44 +080097static void ocfs2_dinode_set_last_eb_blk(struct ocfs2_extent_tree *et,
98 u64 blkno)
99{
Joel Beckerea5efa12008-08-20 16:57:27 -0700100 struct ocfs2_dinode *di = et->et_object;
Tao Mae7d4cb62008-08-18 17:38:44 +0800101
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700102 BUG_ON(et->et_type != OCFS2_DINODE_EXTENT);
Tao Mae7d4cb62008-08-18 17:38:44 +0800103 di->i_last_eb_blk = cpu_to_le64(blkno);
104}
105
106static u64 ocfs2_dinode_get_last_eb_blk(struct ocfs2_extent_tree *et)
107{
Joel Beckerea5efa12008-08-20 16:57:27 -0700108 struct ocfs2_dinode *di = et->et_object;
Tao Mae7d4cb62008-08-18 17:38:44 +0800109
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700110 BUG_ON(et->et_type != OCFS2_DINODE_EXTENT);
Tao Mae7d4cb62008-08-18 17:38:44 +0800111 return le64_to_cpu(di->i_last_eb_blk);
112}
113
114static void ocfs2_dinode_update_clusters(struct inode *inode,
115 struct ocfs2_extent_tree *et,
116 u32 clusters)
117{
Joel Beckerea5efa12008-08-20 16:57:27 -0700118 struct ocfs2_dinode *di = et->et_object;
Tao Mae7d4cb62008-08-18 17:38:44 +0800119
120 le32_add_cpu(&di->i_clusters, clusters);
121 spin_lock(&OCFS2_I(inode)->ip_lock);
122 OCFS2_I(inode)->ip_clusters = le32_to_cpu(di->i_clusters);
123 spin_unlock(&OCFS2_I(inode)->ip_lock);
124}
125
126static int ocfs2_dinode_sanity_check(struct inode *inode,
127 struct ocfs2_extent_tree *et)
128{
129 int ret = 0;
130 struct ocfs2_dinode *di;
131
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700132 BUG_ON(et->et_type != OCFS2_DINODE_EXTENT);
Tao Mae7d4cb62008-08-18 17:38:44 +0800133
Joel Beckerea5efa12008-08-20 16:57:27 -0700134 di = et->et_object;
Tao Mae7d4cb62008-08-18 17:38:44 +0800135 if (!OCFS2_IS_VALID_DINODE(di)) {
136 ret = -EIO;
137 ocfs2_error(inode->i_sb,
138 "Inode %llu has invalid path root",
139 (unsigned long long)OCFS2_I(inode)->ip_blkno);
140 }
141
142 return ret;
143}
144
145static struct ocfs2_extent_tree_operations ocfs2_dinode_et_ops = {
Joel Becker35dc0aa2008-08-20 16:25:06 -0700146 .eo_set_last_eb_blk = ocfs2_dinode_set_last_eb_blk,
147 .eo_get_last_eb_blk = ocfs2_dinode_get_last_eb_blk,
148 .eo_update_clusters = ocfs2_dinode_update_clusters,
149 .eo_sanity_check = ocfs2_dinode_sanity_check,
Joel Becker0ce10102008-08-20 17:19:50 -0700150 .eo_fill_root_el = ocfs2_dinode_fill_root_el,
Tao Mae7d4cb62008-08-18 17:38:44 +0800151};
152
Joel Becker0ce10102008-08-20 17:19:50 -0700153static void ocfs2_xattr_value_fill_root_el(struct ocfs2_extent_tree *et)
154{
155 struct ocfs2_xattr_value_root *xv = et->et_object;
156
157 et->et_root_el = &xv->xr_list;
158}
159
Tao Maf56654c2008-08-18 17:38:48 +0800160static void ocfs2_xattr_value_set_last_eb_blk(struct ocfs2_extent_tree *et,
161 u64 blkno)
162{
163 struct ocfs2_xattr_value_root *xv =
Joel Beckerea5efa12008-08-20 16:57:27 -0700164 (struct ocfs2_xattr_value_root *)et->et_object;
Tao Maf56654c2008-08-18 17:38:48 +0800165
166 xv->xr_last_eb_blk = cpu_to_le64(blkno);
167}
168
169static u64 ocfs2_xattr_value_get_last_eb_blk(struct ocfs2_extent_tree *et)
170{
171 struct ocfs2_xattr_value_root *xv =
Joel Beckerea5efa12008-08-20 16:57:27 -0700172 (struct ocfs2_xattr_value_root *) et->et_object;
Tao Maf56654c2008-08-18 17:38:48 +0800173
174 return le64_to_cpu(xv->xr_last_eb_blk);
175}
176
177static void ocfs2_xattr_value_update_clusters(struct inode *inode,
178 struct ocfs2_extent_tree *et,
179 u32 clusters)
180{
181 struct ocfs2_xattr_value_root *xv =
Joel Beckerea5efa12008-08-20 16:57:27 -0700182 (struct ocfs2_xattr_value_root *)et->et_object;
Tao Maf56654c2008-08-18 17:38:48 +0800183
184 le32_add_cpu(&xv->xr_clusters, clusters);
185}
186
187static int ocfs2_xattr_value_sanity_check(struct inode *inode,
188 struct ocfs2_extent_tree *et)
189{
190 return 0;
191}
192
193static struct ocfs2_extent_tree_operations ocfs2_xattr_et_ops = {
Joel Becker35dc0aa2008-08-20 16:25:06 -0700194 .eo_set_last_eb_blk = ocfs2_xattr_value_set_last_eb_blk,
195 .eo_get_last_eb_blk = ocfs2_xattr_value_get_last_eb_blk,
196 .eo_update_clusters = ocfs2_xattr_value_update_clusters,
197 .eo_sanity_check = ocfs2_xattr_value_sanity_check,
Joel Becker0ce10102008-08-20 17:19:50 -0700198 .eo_fill_root_el = ocfs2_xattr_value_fill_root_el,
Tao Maf56654c2008-08-18 17:38:48 +0800199};
200
Joel Becker0ce10102008-08-20 17:19:50 -0700201static void ocfs2_xattr_tree_fill_root_el(struct ocfs2_extent_tree *et)
202{
203 struct ocfs2_xattr_block *xb = et->et_object;
204
205 et->et_root_el = &xb->xb_attrs.xb_root.xt_list;
206}
207
Tao Maba492612008-08-18 17:38:49 +0800208static void ocfs2_xattr_tree_set_last_eb_blk(struct ocfs2_extent_tree *et,
209 u64 blkno)
210{
Joel Beckerea5efa12008-08-20 16:57:27 -0700211 struct ocfs2_xattr_block *xb = et->et_object;
Tao Maba492612008-08-18 17:38:49 +0800212 struct ocfs2_xattr_tree_root *xt = &xb->xb_attrs.xb_root;
213
214 xt->xt_last_eb_blk = cpu_to_le64(blkno);
215}
216
217static u64 ocfs2_xattr_tree_get_last_eb_blk(struct ocfs2_extent_tree *et)
218{
Joel Beckerea5efa12008-08-20 16:57:27 -0700219 struct ocfs2_xattr_block *xb = et->et_object;
Tao Maba492612008-08-18 17:38:49 +0800220 struct ocfs2_xattr_tree_root *xt = &xb->xb_attrs.xb_root;
221
222 return le64_to_cpu(xt->xt_last_eb_blk);
223}
224
225static void ocfs2_xattr_tree_update_clusters(struct inode *inode,
226 struct ocfs2_extent_tree *et,
227 u32 clusters)
228{
Joel Beckerea5efa12008-08-20 16:57:27 -0700229 struct ocfs2_xattr_block *xb = et->et_object;
Tao Maba492612008-08-18 17:38:49 +0800230
231 le32_add_cpu(&xb->xb_attrs.xb_root.xt_clusters, clusters);
232}
233
234static int ocfs2_xattr_tree_sanity_check(struct inode *inode,
235 struct ocfs2_extent_tree *et)
236{
237 return 0;
238}
239
240static struct ocfs2_extent_tree_operations ocfs2_xattr_tree_et_ops = {
Joel Becker35dc0aa2008-08-20 16:25:06 -0700241 .eo_set_last_eb_blk = ocfs2_xattr_tree_set_last_eb_blk,
242 .eo_get_last_eb_blk = ocfs2_xattr_tree_get_last_eb_blk,
243 .eo_update_clusters = ocfs2_xattr_tree_update_clusters,
244 .eo_sanity_check = ocfs2_xattr_tree_sanity_check,
Joel Becker0ce10102008-08-20 17:19:50 -0700245 .eo_fill_root_el = ocfs2_xattr_tree_fill_root_el,
Tao Maba492612008-08-18 17:38:49 +0800246};
247
Joel Beckerdc0ce612008-08-20 16:48:35 -0700248static void ocfs2_get_extent_tree(struct ocfs2_extent_tree *et,
249 struct inode *inode,
250 struct buffer_head *bh,
251 enum ocfs2_extent_tree_type et_type,
Joel Beckerea5efa12008-08-20 16:57:27 -0700252 void *obj)
Tao Mae7d4cb62008-08-18 17:38:44 +0800253{
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700254 et->et_type = et_type;
Tao Mae7d4cb62008-08-18 17:38:44 +0800255 get_bh(bh);
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700256 et->et_root_bh = bh;
Joel Beckerdc0ce612008-08-20 16:48:35 -0700257 et->et_max_leaf_clusters = 0;
Joel Beckerea5efa12008-08-20 16:57:27 -0700258 if (!obj)
259 obj = (void *)bh->b_data;
260 et->et_object = obj;
Tao Mae7d4cb62008-08-18 17:38:44 +0800261
Tao Mae7d4cb62008-08-18 17:38:44 +0800262 if (et_type == OCFS2_DINODE_EXTENT) {
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700263 et->et_ops = &ocfs2_dinode_et_ops;
Tao Maf56654c2008-08-18 17:38:48 +0800264 } else if (et_type == OCFS2_XATTR_VALUE_EXTENT) {
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700265 et->et_ops = &ocfs2_xattr_et_ops;
Tao Maba492612008-08-18 17:38:49 +0800266 } else if (et_type == OCFS2_XATTR_TREE_EXTENT) {
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700267 et->et_ops = &ocfs2_xattr_tree_et_ops;
268 et->et_max_leaf_clusters = ocfs2_clusters_for_bytes(inode->i_sb,
Tao Maca12b7c2008-08-18 17:38:52 +0800269 OCFS2_MAX_XATTR_TREE_LEAF_SIZE);
Tao Mae7d4cb62008-08-18 17:38:44 +0800270 }
Joel Becker0ce10102008-08-20 17:19:50 -0700271
272 et->et_ops->eo_fill_root_el(et);
Tao Mae7d4cb62008-08-18 17:38:44 +0800273}
274
Joel Beckerdc0ce612008-08-20 16:48:35 -0700275static void ocfs2_put_extent_tree(struct ocfs2_extent_tree *et)
Tao Mae7d4cb62008-08-18 17:38:44 +0800276{
Joel Beckerdc0ce612008-08-20 16:48:35 -0700277 brelse(et->et_root_bh);
Tao Mae7d4cb62008-08-18 17:38:44 +0800278}
279
Joel Becker35dc0aa2008-08-20 16:25:06 -0700280static inline void ocfs2_et_set_last_eb_blk(struct ocfs2_extent_tree *et,
281 u64 new_last_eb_blk)
Tao Mae7d4cb62008-08-18 17:38:44 +0800282{
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700283 et->et_ops->eo_set_last_eb_blk(et, new_last_eb_blk);
Tao Mae7d4cb62008-08-18 17:38:44 +0800284}
285
Joel Becker35dc0aa2008-08-20 16:25:06 -0700286static inline u64 ocfs2_et_get_last_eb_blk(struct ocfs2_extent_tree *et)
Tao Mae7d4cb62008-08-18 17:38:44 +0800287{
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700288 return et->et_ops->eo_get_last_eb_blk(et);
Tao Mae7d4cb62008-08-18 17:38:44 +0800289}
290
Joel Becker35dc0aa2008-08-20 16:25:06 -0700291static inline void ocfs2_et_update_clusters(struct inode *inode,
292 struct ocfs2_extent_tree *et,
293 u32 clusters)
Tao Mae7d4cb62008-08-18 17:38:44 +0800294{
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700295 et->et_ops->eo_update_clusters(inode, et, clusters);
Joel Becker35dc0aa2008-08-20 16:25:06 -0700296}
297
298static inline int ocfs2_et_sanity_check(struct inode *inode,
299 struct ocfs2_extent_tree *et)
300{
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700301 return et->et_ops->eo_sanity_check(inode, et);
Tao Mae7d4cb62008-08-18 17:38:44 +0800302}
303
Mark Fashehccd979b2005-12-15 14:31:24 -0800304static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc);
Mark Fasheh59a5e412007-06-22 15:52:36 -0700305static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt,
306 struct ocfs2_extent_block *eb);
Mark Fashehccd979b2005-12-15 14:31:24 -0800307
Mark Fashehdcd05382007-01-16 11:32:23 -0800308/*
309 * Structures which describe a path through a btree, and functions to
310 * manipulate them.
311 *
312 * The idea here is to be as generic as possible with the tree
313 * manipulation code.
314 */
315struct ocfs2_path_item {
316 struct buffer_head *bh;
317 struct ocfs2_extent_list *el;
318};
319
320#define OCFS2_MAX_PATH_DEPTH 5
321
322struct ocfs2_path {
323 int p_tree_depth;
324 struct ocfs2_path_item p_node[OCFS2_MAX_PATH_DEPTH];
325};
326
327#define path_root_bh(_path) ((_path)->p_node[0].bh)
328#define path_root_el(_path) ((_path)->p_node[0].el)
329#define path_leaf_bh(_path) ((_path)->p_node[(_path)->p_tree_depth].bh)
330#define path_leaf_el(_path) ((_path)->p_node[(_path)->p_tree_depth].el)
331#define path_num_items(_path) ((_path)->p_tree_depth + 1)
332
333/*
334 * Reset the actual path elements so that we can re-use the structure
335 * to build another path. Generally, this involves freeing the buffer
336 * heads.
337 */
338static void ocfs2_reinit_path(struct ocfs2_path *path, int keep_root)
339{
340 int i, start = 0, depth = 0;
341 struct ocfs2_path_item *node;
342
343 if (keep_root)
344 start = 1;
345
346 for(i = start; i < path_num_items(path); i++) {
347 node = &path->p_node[i];
348
349 brelse(node->bh);
350 node->bh = NULL;
351 node->el = NULL;
352 }
353
354 /*
355 * Tree depth may change during truncate, or insert. If we're
356 * keeping the root extent list, then make sure that our path
357 * structure reflects the proper depth.
358 */
359 if (keep_root)
360 depth = le16_to_cpu(path_root_el(path)->l_tree_depth);
361
362 path->p_tree_depth = depth;
363}
364
365static void ocfs2_free_path(struct ocfs2_path *path)
366{
367 if (path) {
368 ocfs2_reinit_path(path, 0);
369 kfree(path);
370 }
371}
372
373/*
Mark Fasheh328d5752007-06-18 10:48:04 -0700374 * All the elements of src into dest. After this call, src could be freed
375 * without affecting dest.
376 *
377 * Both paths should have the same root. Any non-root elements of dest
378 * will be freed.
379 */
380static void ocfs2_cp_path(struct ocfs2_path *dest, struct ocfs2_path *src)
381{
382 int i;
383
384 BUG_ON(path_root_bh(dest) != path_root_bh(src));
385 BUG_ON(path_root_el(dest) != path_root_el(src));
386
387 ocfs2_reinit_path(dest, 1);
388
389 for(i = 1; i < OCFS2_MAX_PATH_DEPTH; i++) {
390 dest->p_node[i].bh = src->p_node[i].bh;
391 dest->p_node[i].el = src->p_node[i].el;
392
393 if (dest->p_node[i].bh)
394 get_bh(dest->p_node[i].bh);
395 }
396}
397
398/*
Mark Fashehdcd05382007-01-16 11:32:23 -0800399 * Make the *dest path the same as src and re-initialize src path to
400 * have a root only.
401 */
402static void ocfs2_mv_path(struct ocfs2_path *dest, struct ocfs2_path *src)
403{
404 int i;
405
406 BUG_ON(path_root_bh(dest) != path_root_bh(src));
407
408 for(i = 1; i < OCFS2_MAX_PATH_DEPTH; i++) {
409 brelse(dest->p_node[i].bh);
410
411 dest->p_node[i].bh = src->p_node[i].bh;
412 dest->p_node[i].el = src->p_node[i].el;
413
414 src->p_node[i].bh = NULL;
415 src->p_node[i].el = NULL;
416 }
417}
418
419/*
420 * Insert an extent block at given index.
421 *
422 * This will not take an additional reference on eb_bh.
423 */
424static inline void ocfs2_path_insert_eb(struct ocfs2_path *path, int index,
425 struct buffer_head *eb_bh)
426{
427 struct ocfs2_extent_block *eb = (struct ocfs2_extent_block *)eb_bh->b_data;
428
429 /*
430 * Right now, no root bh is an extent block, so this helps
431 * catch code errors with dinode trees. The assertion can be
432 * safely removed if we ever need to insert extent block
433 * structures at the root.
434 */
435 BUG_ON(index == 0);
436
437 path->p_node[index].bh = eb_bh;
438 path->p_node[index].el = &eb->h_list;
439}
440
441static struct ocfs2_path *ocfs2_new_path(struct buffer_head *root_bh,
442 struct ocfs2_extent_list *root_el)
443{
444 struct ocfs2_path *path;
445
446 BUG_ON(le16_to_cpu(root_el->l_tree_depth) >= OCFS2_MAX_PATH_DEPTH);
447
448 path = kzalloc(sizeof(*path), GFP_NOFS);
449 if (path) {
450 path->p_tree_depth = le16_to_cpu(root_el->l_tree_depth);
451 get_bh(root_bh);
452 path_root_bh(path) = root_bh;
453 path_root_el(path) = root_el;
454 }
455
456 return path;
457}
458
459/*
Mark Fashehdcd05382007-01-16 11:32:23 -0800460 * Convenience function to journal all components in a path.
461 */
462static int ocfs2_journal_access_path(struct inode *inode, handle_t *handle,
463 struct ocfs2_path *path)
464{
465 int i, ret = 0;
466
467 if (!path)
468 goto out;
469
470 for(i = 0; i < path_num_items(path); i++) {
471 ret = ocfs2_journal_access(handle, inode, path->p_node[i].bh,
472 OCFS2_JOURNAL_ACCESS_WRITE);
473 if (ret < 0) {
474 mlog_errno(ret);
475 goto out;
476 }
477 }
478
479out:
480 return ret;
481}
482
Mark Fasheh328d5752007-06-18 10:48:04 -0700483/*
484 * Return the index of the extent record which contains cluster #v_cluster.
485 * -1 is returned if it was not found.
486 *
487 * Should work fine on interior and exterior nodes.
488 */
489int ocfs2_search_extent_list(struct ocfs2_extent_list *el, u32 v_cluster)
490{
491 int ret = -1;
492 int i;
493 struct ocfs2_extent_rec *rec;
494 u32 rec_end, rec_start, clusters;
495
496 for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
497 rec = &el->l_recs[i];
498
499 rec_start = le32_to_cpu(rec->e_cpos);
500 clusters = ocfs2_rec_clusters(el, rec);
501
502 rec_end = rec_start + clusters;
503
504 if (v_cluster >= rec_start && v_cluster < rec_end) {
505 ret = i;
506 break;
507 }
508 }
509
510 return ret;
511}
512
Mark Fashehdcd05382007-01-16 11:32:23 -0800513enum ocfs2_contig_type {
514 CONTIG_NONE = 0,
515 CONTIG_LEFT,
Mark Fasheh328d5752007-06-18 10:48:04 -0700516 CONTIG_RIGHT,
517 CONTIG_LEFTRIGHT,
Mark Fashehdcd05382007-01-16 11:32:23 -0800518};
519
Mark Fashehe48edee2007-03-07 16:46:57 -0800520
521/*
522 * NOTE: ocfs2_block_extent_contig(), ocfs2_extents_adjacent() and
523 * ocfs2_extent_contig only work properly against leaf nodes!
524 */
Mark Fashehdcd05382007-01-16 11:32:23 -0800525static int ocfs2_block_extent_contig(struct super_block *sb,
526 struct ocfs2_extent_rec *ext,
527 u64 blkno)
Mark Fashehccd979b2005-12-15 14:31:24 -0800528{
Mark Fashehe48edee2007-03-07 16:46:57 -0800529 u64 blk_end = le64_to_cpu(ext->e_blkno);
530
531 blk_end += ocfs2_clusters_to_blocks(sb,
532 le16_to_cpu(ext->e_leaf_clusters));
533
534 return blkno == blk_end;
Mark Fashehccd979b2005-12-15 14:31:24 -0800535}
536
Mark Fashehdcd05382007-01-16 11:32:23 -0800537static int ocfs2_extents_adjacent(struct ocfs2_extent_rec *left,
538 struct ocfs2_extent_rec *right)
539{
Mark Fashehe48edee2007-03-07 16:46:57 -0800540 u32 left_range;
541
542 left_range = le32_to_cpu(left->e_cpos) +
543 le16_to_cpu(left->e_leaf_clusters);
544
545 return (left_range == le32_to_cpu(right->e_cpos));
Mark Fashehdcd05382007-01-16 11:32:23 -0800546}
547
548static enum ocfs2_contig_type
549 ocfs2_extent_contig(struct inode *inode,
550 struct ocfs2_extent_rec *ext,
551 struct ocfs2_extent_rec *insert_rec)
552{
553 u64 blkno = le64_to_cpu(insert_rec->e_blkno);
554
Mark Fasheh328d5752007-06-18 10:48:04 -0700555 /*
556 * Refuse to coalesce extent records with different flag
557 * fields - we don't want to mix unwritten extents with user
558 * data.
559 */
560 if (ext->e_flags != insert_rec->e_flags)
561 return CONTIG_NONE;
562
Mark Fashehdcd05382007-01-16 11:32:23 -0800563 if (ocfs2_extents_adjacent(ext, insert_rec) &&
564 ocfs2_block_extent_contig(inode->i_sb, ext, blkno))
565 return CONTIG_RIGHT;
566
567 blkno = le64_to_cpu(ext->e_blkno);
568 if (ocfs2_extents_adjacent(insert_rec, ext) &&
569 ocfs2_block_extent_contig(inode->i_sb, insert_rec, blkno))
570 return CONTIG_LEFT;
571
572 return CONTIG_NONE;
573}
574
575/*
576 * NOTE: We can have pretty much any combination of contiguousness and
577 * appending.
578 *
579 * The usefulness of APPEND_TAIL is more in that it lets us know that
580 * we'll have to update the path to that leaf.
581 */
582enum ocfs2_append_type {
583 APPEND_NONE = 0,
584 APPEND_TAIL,
585};
586
Mark Fasheh328d5752007-06-18 10:48:04 -0700587enum ocfs2_split_type {
588 SPLIT_NONE = 0,
589 SPLIT_LEFT,
590 SPLIT_RIGHT,
591};
592
Mark Fashehdcd05382007-01-16 11:32:23 -0800593struct ocfs2_insert_type {
Mark Fasheh328d5752007-06-18 10:48:04 -0700594 enum ocfs2_split_type ins_split;
Mark Fashehdcd05382007-01-16 11:32:23 -0800595 enum ocfs2_append_type ins_appending;
596 enum ocfs2_contig_type ins_contig;
597 int ins_contig_index;
Mark Fashehdcd05382007-01-16 11:32:23 -0800598 int ins_tree_depth;
599};
600
Mark Fasheh328d5752007-06-18 10:48:04 -0700601struct ocfs2_merge_ctxt {
602 enum ocfs2_contig_type c_contig_type;
603 int c_has_empty_extent;
604 int c_split_covers_rec;
Mark Fasheh328d5752007-06-18 10:48:04 -0700605};
606
Mark Fashehccd979b2005-12-15 14:31:24 -0800607/*
608 * How many free extents have we got before we need more meta data?
609 */
610int ocfs2_num_free_extents(struct ocfs2_super *osb,
611 struct inode *inode,
Tao Mae7d4cb62008-08-18 17:38:44 +0800612 struct buffer_head *root_bh,
Tao Maf56654c2008-08-18 17:38:48 +0800613 enum ocfs2_extent_tree_type type,
Joel Beckerea5efa12008-08-20 16:57:27 -0700614 void *obj)
Mark Fashehccd979b2005-12-15 14:31:24 -0800615{
616 int retval;
Tao Mae7d4cb62008-08-18 17:38:44 +0800617 struct ocfs2_extent_list *el = NULL;
Mark Fashehccd979b2005-12-15 14:31:24 -0800618 struct ocfs2_extent_block *eb;
619 struct buffer_head *eb_bh = NULL;
Tao Mae7d4cb62008-08-18 17:38:44 +0800620 u64 last_eb_blk = 0;
Mark Fashehccd979b2005-12-15 14:31:24 -0800621
622 mlog_entry_void();
623
Tao Mae7d4cb62008-08-18 17:38:44 +0800624 if (type == OCFS2_DINODE_EXTENT) {
625 struct ocfs2_dinode *fe =
626 (struct ocfs2_dinode *)root_bh->b_data;
627 if (!OCFS2_IS_VALID_DINODE(fe)) {
628 OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
629 retval = -EIO;
630 goto bail;
631 }
632
633 if (fe->i_last_eb_blk)
634 last_eb_blk = le64_to_cpu(fe->i_last_eb_blk);
635 el = &fe->id2.i_list;
Tao Maf56654c2008-08-18 17:38:48 +0800636 } else if (type == OCFS2_XATTR_VALUE_EXTENT) {
637 struct ocfs2_xattr_value_root *xv =
Joel Beckerea5efa12008-08-20 16:57:27 -0700638 (struct ocfs2_xattr_value_root *) obj;
Tao Maf56654c2008-08-18 17:38:48 +0800639
640 last_eb_blk = le64_to_cpu(xv->xr_last_eb_blk);
641 el = &xv->xr_list;
Tao Maba492612008-08-18 17:38:49 +0800642 } else if (type == OCFS2_XATTR_TREE_EXTENT) {
643 struct ocfs2_xattr_block *xb =
644 (struct ocfs2_xattr_block *)root_bh->b_data;
645
646 last_eb_blk = le64_to_cpu(xb->xb_attrs.xb_root.xt_last_eb_blk);
647 el = &xb->xb_attrs.xb_root.xt_list;
Mark Fashehccd979b2005-12-15 14:31:24 -0800648 }
649
Tao Mae7d4cb62008-08-18 17:38:44 +0800650 if (last_eb_blk) {
651 retval = ocfs2_read_block(osb, last_eb_blk,
Mark Fashehccd979b2005-12-15 14:31:24 -0800652 &eb_bh, OCFS2_BH_CACHED, inode);
653 if (retval < 0) {
654 mlog_errno(retval);
655 goto bail;
656 }
657 eb = (struct ocfs2_extent_block *) eb_bh->b_data;
658 el = &eb->h_list;
Tao Mae7d4cb62008-08-18 17:38:44 +0800659 }
Mark Fashehccd979b2005-12-15 14:31:24 -0800660
661 BUG_ON(el->l_tree_depth != 0);
662
663 retval = le16_to_cpu(el->l_count) - le16_to_cpu(el->l_next_free_rec);
664bail:
665 if (eb_bh)
666 brelse(eb_bh);
667
668 mlog_exit(retval);
669 return retval;
670}
671
672/* expects array to already be allocated
673 *
674 * sets h_signature, h_blkno, h_suballoc_bit, h_suballoc_slot, and
675 * l_count for you
676 */
677static int ocfs2_create_new_meta_bhs(struct ocfs2_super *osb,
Mark Fasheh1fabe142006-10-09 18:11:45 -0700678 handle_t *handle,
Mark Fashehccd979b2005-12-15 14:31:24 -0800679 struct inode *inode,
680 int wanted,
681 struct ocfs2_alloc_context *meta_ac,
682 struct buffer_head *bhs[])
683{
684 int count, status, i;
685 u16 suballoc_bit_start;
686 u32 num_got;
687 u64 first_blkno;
688 struct ocfs2_extent_block *eb;
689
690 mlog_entry_void();
691
692 count = 0;
693 while (count < wanted) {
694 status = ocfs2_claim_metadata(osb,
695 handle,
696 meta_ac,
697 wanted - count,
698 &suballoc_bit_start,
699 &num_got,
700 &first_blkno);
701 if (status < 0) {
702 mlog_errno(status);
703 goto bail;
704 }
705
706 for(i = count; i < (num_got + count); i++) {
707 bhs[i] = sb_getblk(osb->sb, first_blkno);
708 if (bhs[i] == NULL) {
709 status = -EIO;
710 mlog_errno(status);
711 goto bail;
712 }
713 ocfs2_set_new_buffer_uptodate(inode, bhs[i]);
714
715 status = ocfs2_journal_access(handle, inode, bhs[i],
716 OCFS2_JOURNAL_ACCESS_CREATE);
717 if (status < 0) {
718 mlog_errno(status);
719 goto bail;
720 }
721
722 memset(bhs[i]->b_data, 0, osb->sb->s_blocksize);
723 eb = (struct ocfs2_extent_block *) bhs[i]->b_data;
724 /* Ok, setup the minimal stuff here. */
725 strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE);
726 eb->h_blkno = cpu_to_le64(first_blkno);
727 eb->h_fs_generation = cpu_to_le32(osb->fs_generation);
Mark Fashehccd979b2005-12-15 14:31:24 -0800728 eb->h_suballoc_slot = cpu_to_le16(osb->slot_num);
Mark Fashehccd979b2005-12-15 14:31:24 -0800729 eb->h_suballoc_bit = cpu_to_le16(suballoc_bit_start);
730 eb->h_list.l_count =
731 cpu_to_le16(ocfs2_extent_recs_per_eb(osb->sb));
732
733 suballoc_bit_start++;
734 first_blkno++;
735
736 /* We'll also be dirtied by the caller, so
737 * this isn't absolutely necessary. */
738 status = ocfs2_journal_dirty(handle, bhs[i]);
739 if (status < 0) {
740 mlog_errno(status);
741 goto bail;
742 }
743 }
744
745 count += num_got;
746 }
747
748 status = 0;
749bail:
750 if (status < 0) {
751 for(i = 0; i < wanted; i++) {
752 if (bhs[i])
753 brelse(bhs[i]);
754 bhs[i] = NULL;
755 }
756 }
757 mlog_exit(status);
758 return status;
759}
760
761/*
Mark Fashehdcd05382007-01-16 11:32:23 -0800762 * Helper function for ocfs2_add_branch() and ocfs2_shift_tree_depth().
763 *
764 * Returns the sum of the rightmost extent rec logical offset and
765 * cluster count.
766 *
767 * ocfs2_add_branch() uses this to determine what logical cluster
768 * value should be populated into the leftmost new branch records.
769 *
770 * ocfs2_shift_tree_depth() uses this to determine the # clusters
771 * value for the new topmost tree record.
772 */
773static inline u32 ocfs2_sum_rightmost_rec(struct ocfs2_extent_list *el)
774{
775 int i;
776
777 i = le16_to_cpu(el->l_next_free_rec) - 1;
778
779 return le32_to_cpu(el->l_recs[i].e_cpos) +
Mark Fashehe48edee2007-03-07 16:46:57 -0800780 ocfs2_rec_clusters(el, &el->l_recs[i]);
Mark Fashehdcd05382007-01-16 11:32:23 -0800781}
782
783/*
Mark Fashehccd979b2005-12-15 14:31:24 -0800784 * Add an entire tree branch to our inode. eb_bh is the extent block
785 * to start at, if we don't want to start the branch at the dinode
786 * structure.
787 *
788 * last_eb_bh is required as we have to update it's next_leaf pointer
789 * for the new last extent block.
790 *
791 * the new branch will be 'empty' in the sense that every block will
Mark Fashehe48edee2007-03-07 16:46:57 -0800792 * contain a single record with cluster count == 0.
Mark Fashehccd979b2005-12-15 14:31:24 -0800793 */
794static int ocfs2_add_branch(struct ocfs2_super *osb,
Mark Fasheh1fabe142006-10-09 18:11:45 -0700795 handle_t *handle,
Mark Fashehccd979b2005-12-15 14:31:24 -0800796 struct inode *inode,
Tao Mae7d4cb62008-08-18 17:38:44 +0800797 struct ocfs2_extent_tree *et,
Mark Fashehccd979b2005-12-15 14:31:24 -0800798 struct buffer_head *eb_bh,
Mark Fasheh328d5752007-06-18 10:48:04 -0700799 struct buffer_head **last_eb_bh,
Mark Fashehccd979b2005-12-15 14:31:24 -0800800 struct ocfs2_alloc_context *meta_ac)
801{
802 int status, new_blocks, i;
803 u64 next_blkno, new_last_eb_blk;
804 struct buffer_head *bh;
805 struct buffer_head **new_eb_bhs = NULL;
Mark Fashehccd979b2005-12-15 14:31:24 -0800806 struct ocfs2_extent_block *eb;
807 struct ocfs2_extent_list *eb_el;
808 struct ocfs2_extent_list *el;
Mark Fashehdcd05382007-01-16 11:32:23 -0800809 u32 new_cpos;
Mark Fashehccd979b2005-12-15 14:31:24 -0800810
811 mlog_entry_void();
812
Mark Fasheh328d5752007-06-18 10:48:04 -0700813 BUG_ON(!last_eb_bh || !*last_eb_bh);
Mark Fashehccd979b2005-12-15 14:31:24 -0800814
Mark Fashehccd979b2005-12-15 14:31:24 -0800815 if (eb_bh) {
816 eb = (struct ocfs2_extent_block *) eb_bh->b_data;
817 el = &eb->h_list;
818 } else
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700819 el = et->et_root_el;
Mark Fashehccd979b2005-12-15 14:31:24 -0800820
821 /* we never add a branch to a leaf. */
822 BUG_ON(!el->l_tree_depth);
823
824 new_blocks = le16_to_cpu(el->l_tree_depth);
825
826 /* allocate the number of new eb blocks we need */
827 new_eb_bhs = kcalloc(new_blocks, sizeof(struct buffer_head *),
828 GFP_KERNEL);
829 if (!new_eb_bhs) {
830 status = -ENOMEM;
831 mlog_errno(status);
832 goto bail;
833 }
834
835 status = ocfs2_create_new_meta_bhs(osb, handle, inode, new_blocks,
836 meta_ac, new_eb_bhs);
837 if (status < 0) {
838 mlog_errno(status);
839 goto bail;
840 }
841
Mark Fasheh328d5752007-06-18 10:48:04 -0700842 eb = (struct ocfs2_extent_block *)(*last_eb_bh)->b_data;
Mark Fashehdcd05382007-01-16 11:32:23 -0800843 new_cpos = ocfs2_sum_rightmost_rec(&eb->h_list);
844
Mark Fashehccd979b2005-12-15 14:31:24 -0800845 /* Note: new_eb_bhs[new_blocks - 1] is the guy which will be
846 * linked with the rest of the tree.
847 * conversly, new_eb_bhs[0] is the new bottommost leaf.
848 *
849 * when we leave the loop, new_last_eb_blk will point to the
850 * newest leaf, and next_blkno will point to the topmost extent
851 * block. */
852 next_blkno = new_last_eb_blk = 0;
853 for(i = 0; i < new_blocks; i++) {
854 bh = new_eb_bhs[i];
855 eb = (struct ocfs2_extent_block *) bh->b_data;
856 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
857 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
858 status = -EIO;
859 goto bail;
860 }
861 eb_el = &eb->h_list;
862
863 status = ocfs2_journal_access(handle, inode, bh,
864 OCFS2_JOURNAL_ACCESS_CREATE);
865 if (status < 0) {
866 mlog_errno(status);
867 goto bail;
868 }
869
870 eb->h_next_leaf_blk = 0;
871 eb_el->l_tree_depth = cpu_to_le16(i);
872 eb_el->l_next_free_rec = cpu_to_le16(1);
Mark Fashehdcd05382007-01-16 11:32:23 -0800873 /*
874 * This actually counts as an empty extent as
875 * c_clusters == 0
876 */
877 eb_el->l_recs[0].e_cpos = cpu_to_le32(new_cpos);
Mark Fashehccd979b2005-12-15 14:31:24 -0800878 eb_el->l_recs[0].e_blkno = cpu_to_le64(next_blkno);
Mark Fashehe48edee2007-03-07 16:46:57 -0800879 /*
880 * eb_el isn't always an interior node, but even leaf
881 * nodes want a zero'd flags and reserved field so
882 * this gets the whole 32 bits regardless of use.
883 */
884 eb_el->l_recs[0].e_int_clusters = cpu_to_le32(0);
Mark Fashehccd979b2005-12-15 14:31:24 -0800885 if (!eb_el->l_tree_depth)
886 new_last_eb_blk = le64_to_cpu(eb->h_blkno);
887
888 status = ocfs2_journal_dirty(handle, bh);
889 if (status < 0) {
890 mlog_errno(status);
891 goto bail;
892 }
893
894 next_blkno = le64_to_cpu(eb->h_blkno);
895 }
896
897 /* This is a bit hairy. We want to update up to three blocks
898 * here without leaving any of them in an inconsistent state
899 * in case of error. We don't have to worry about
900 * journal_dirty erroring as it won't unless we've aborted the
901 * handle (in which case we would never be here) so reserving
902 * the write with journal_access is all we need to do. */
Mark Fasheh328d5752007-06-18 10:48:04 -0700903 status = ocfs2_journal_access(handle, inode, *last_eb_bh,
Mark Fashehccd979b2005-12-15 14:31:24 -0800904 OCFS2_JOURNAL_ACCESS_WRITE);
905 if (status < 0) {
906 mlog_errno(status);
907 goto bail;
908 }
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700909 status = ocfs2_journal_access(handle, inode, et->et_root_bh,
Mark Fashehccd979b2005-12-15 14:31:24 -0800910 OCFS2_JOURNAL_ACCESS_WRITE);
911 if (status < 0) {
912 mlog_errno(status);
913 goto bail;
914 }
915 if (eb_bh) {
916 status = ocfs2_journal_access(handle, inode, eb_bh,
917 OCFS2_JOURNAL_ACCESS_WRITE);
918 if (status < 0) {
919 mlog_errno(status);
920 goto bail;
921 }
922 }
923
924 /* Link the new branch into the rest of the tree (el will
Tao Mae7d4cb62008-08-18 17:38:44 +0800925 * either be on the root_bh, or the extent block passed in. */
Mark Fashehccd979b2005-12-15 14:31:24 -0800926 i = le16_to_cpu(el->l_next_free_rec);
927 el->l_recs[i].e_blkno = cpu_to_le64(next_blkno);
Mark Fashehdcd05382007-01-16 11:32:23 -0800928 el->l_recs[i].e_cpos = cpu_to_le32(new_cpos);
Mark Fashehe48edee2007-03-07 16:46:57 -0800929 el->l_recs[i].e_int_clusters = 0;
Mark Fashehccd979b2005-12-15 14:31:24 -0800930 le16_add_cpu(&el->l_next_free_rec, 1);
931
932 /* fe needs a new last extent block pointer, as does the
933 * next_leaf on the previously last-extent-block. */
Joel Becker35dc0aa2008-08-20 16:25:06 -0700934 ocfs2_et_set_last_eb_blk(et, new_last_eb_blk);
Mark Fashehccd979b2005-12-15 14:31:24 -0800935
Mark Fasheh328d5752007-06-18 10:48:04 -0700936 eb = (struct ocfs2_extent_block *) (*last_eb_bh)->b_data;
Mark Fashehccd979b2005-12-15 14:31:24 -0800937 eb->h_next_leaf_blk = cpu_to_le64(new_last_eb_blk);
938
Mark Fasheh328d5752007-06-18 10:48:04 -0700939 status = ocfs2_journal_dirty(handle, *last_eb_bh);
Mark Fashehccd979b2005-12-15 14:31:24 -0800940 if (status < 0)
941 mlog_errno(status);
Joel Beckerce1d9ea2008-08-20 16:30:07 -0700942 status = ocfs2_journal_dirty(handle, et->et_root_bh);
Mark Fashehccd979b2005-12-15 14:31:24 -0800943 if (status < 0)
944 mlog_errno(status);
945 if (eb_bh) {
946 status = ocfs2_journal_dirty(handle, eb_bh);
947 if (status < 0)
948 mlog_errno(status);
949 }
950
Mark Fasheh328d5752007-06-18 10:48:04 -0700951 /*
952 * Some callers want to track the rightmost leaf so pass it
953 * back here.
954 */
955 brelse(*last_eb_bh);
956 get_bh(new_eb_bhs[0]);
957 *last_eb_bh = new_eb_bhs[0];
958
Mark Fashehccd979b2005-12-15 14:31:24 -0800959 status = 0;
960bail:
961 if (new_eb_bhs) {
962 for (i = 0; i < new_blocks; i++)
963 if (new_eb_bhs[i])
964 brelse(new_eb_bhs[i]);
965 kfree(new_eb_bhs);
966 }
967
968 mlog_exit(status);
969 return status;
970}
971
972/*
973 * adds another level to the allocation tree.
974 * returns back the new extent block so you can add a branch to it
975 * after this call.
976 */
977static int ocfs2_shift_tree_depth(struct ocfs2_super *osb,
Mark Fasheh1fabe142006-10-09 18:11:45 -0700978 handle_t *handle,
Mark Fashehccd979b2005-12-15 14:31:24 -0800979 struct inode *inode,
Tao Mae7d4cb62008-08-18 17:38:44 +0800980 struct ocfs2_extent_tree *et,
Mark Fashehccd979b2005-12-15 14:31:24 -0800981 struct ocfs2_alloc_context *meta_ac,
982 struct buffer_head **ret_new_eb_bh)
983{
984 int status, i;
Mark Fashehdcd05382007-01-16 11:32:23 -0800985 u32 new_clusters;
Mark Fashehccd979b2005-12-15 14:31:24 -0800986 struct buffer_head *new_eb_bh = NULL;
Mark Fashehccd979b2005-12-15 14:31:24 -0800987 struct ocfs2_extent_block *eb;
Tao Mae7d4cb62008-08-18 17:38:44 +0800988 struct ocfs2_extent_list *root_el;
Mark Fashehccd979b2005-12-15 14:31:24 -0800989 struct ocfs2_extent_list *eb_el;
990
991 mlog_entry_void();
992
993 status = ocfs2_create_new_meta_bhs(osb, handle, inode, 1, meta_ac,
994 &new_eb_bh);
995 if (status < 0) {
996 mlog_errno(status);
997 goto bail;
998 }
999
1000 eb = (struct ocfs2_extent_block *) new_eb_bh->b_data;
1001 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
1002 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
1003 status = -EIO;
1004 goto bail;
1005 }
1006
1007 eb_el = &eb->h_list;
Joel Beckerce1d9ea2008-08-20 16:30:07 -07001008 root_el = et->et_root_el;
Mark Fashehccd979b2005-12-15 14:31:24 -08001009
1010 status = ocfs2_journal_access(handle, inode, new_eb_bh,
1011 OCFS2_JOURNAL_ACCESS_CREATE);
1012 if (status < 0) {
1013 mlog_errno(status);
1014 goto bail;
1015 }
1016
Tao Mae7d4cb62008-08-18 17:38:44 +08001017 /* copy the root extent list data into the new extent block */
1018 eb_el->l_tree_depth = root_el->l_tree_depth;
1019 eb_el->l_next_free_rec = root_el->l_next_free_rec;
1020 for (i = 0; i < le16_to_cpu(root_el->l_next_free_rec); i++)
1021 eb_el->l_recs[i] = root_el->l_recs[i];
Mark Fashehccd979b2005-12-15 14:31:24 -08001022
1023 status = ocfs2_journal_dirty(handle, new_eb_bh);
1024 if (status < 0) {
1025 mlog_errno(status);
1026 goto bail;
1027 }
1028
Joel Beckerce1d9ea2008-08-20 16:30:07 -07001029 status = ocfs2_journal_access(handle, inode, et->et_root_bh,
Mark Fashehccd979b2005-12-15 14:31:24 -08001030 OCFS2_JOURNAL_ACCESS_WRITE);
1031 if (status < 0) {
1032 mlog_errno(status);
1033 goto bail;
1034 }
1035
Mark Fashehdcd05382007-01-16 11:32:23 -08001036 new_clusters = ocfs2_sum_rightmost_rec(eb_el);
1037
Tao Mae7d4cb62008-08-18 17:38:44 +08001038 /* update root_bh now */
1039 le16_add_cpu(&root_el->l_tree_depth, 1);
1040 root_el->l_recs[0].e_cpos = 0;
1041 root_el->l_recs[0].e_blkno = eb->h_blkno;
1042 root_el->l_recs[0].e_int_clusters = cpu_to_le32(new_clusters);
1043 for (i = 1; i < le16_to_cpu(root_el->l_next_free_rec); i++)
1044 memset(&root_el->l_recs[i], 0, sizeof(struct ocfs2_extent_rec));
1045 root_el->l_next_free_rec = cpu_to_le16(1);
Mark Fashehccd979b2005-12-15 14:31:24 -08001046
1047 /* If this is our 1st tree depth shift, then last_eb_blk
1048 * becomes the allocated extent block */
Tao Mae7d4cb62008-08-18 17:38:44 +08001049 if (root_el->l_tree_depth == cpu_to_le16(1))
Joel Becker35dc0aa2008-08-20 16:25:06 -07001050 ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno));
Mark Fashehccd979b2005-12-15 14:31:24 -08001051
Joel Beckerce1d9ea2008-08-20 16:30:07 -07001052 status = ocfs2_journal_dirty(handle, et->et_root_bh);
Mark Fashehccd979b2005-12-15 14:31:24 -08001053 if (status < 0) {
1054 mlog_errno(status);
1055 goto bail;
1056 }
1057
1058 *ret_new_eb_bh = new_eb_bh;
1059 new_eb_bh = NULL;
1060 status = 0;
1061bail:
1062 if (new_eb_bh)
1063 brelse(new_eb_bh);
1064
1065 mlog_exit(status);
1066 return status;
1067}
1068
1069/*
Mark Fashehccd979b2005-12-15 14:31:24 -08001070 * Should only be called when there is no space left in any of the
1071 * leaf nodes. What we want to do is find the lowest tree depth
1072 * non-leaf extent block with room for new records. There are three
1073 * valid results of this search:
1074 *
1075 * 1) a lowest extent block is found, then we pass it back in
1076 * *lowest_eb_bh and return '0'
1077 *
Tao Mae7d4cb62008-08-18 17:38:44 +08001078 * 2) the search fails to find anything, but the root_el has room. We
Mark Fashehccd979b2005-12-15 14:31:24 -08001079 * pass NULL back in *lowest_eb_bh, but still return '0'
1080 *
Tao Mae7d4cb62008-08-18 17:38:44 +08001081 * 3) the search fails to find anything AND the root_el is full, in
Mark Fashehccd979b2005-12-15 14:31:24 -08001082 * which case we return > 0
1083 *
1084 * return status < 0 indicates an error.
1085 */
1086static int ocfs2_find_branch_target(struct ocfs2_super *osb,
1087 struct inode *inode,
Tao Mae7d4cb62008-08-18 17:38:44 +08001088 struct ocfs2_extent_tree *et,
Mark Fashehccd979b2005-12-15 14:31:24 -08001089 struct buffer_head **target_bh)
1090{
1091 int status = 0, i;
1092 u64 blkno;
Mark Fashehccd979b2005-12-15 14:31:24 -08001093 struct ocfs2_extent_block *eb;
1094 struct ocfs2_extent_list *el;
1095 struct buffer_head *bh = NULL;
1096 struct buffer_head *lowest_bh = NULL;
1097
1098 mlog_entry_void();
1099
1100 *target_bh = NULL;
1101
Joel Beckerce1d9ea2008-08-20 16:30:07 -07001102 el = et->et_root_el;
Mark Fashehccd979b2005-12-15 14:31:24 -08001103
1104 while(le16_to_cpu(el->l_tree_depth) > 1) {
1105 if (le16_to_cpu(el->l_next_free_rec) == 0) {
Mark Fashehb06970532006-03-03 10:24:33 -08001106 ocfs2_error(inode->i_sb, "Dinode %llu has empty "
Mark Fashehccd979b2005-12-15 14:31:24 -08001107 "extent list (next_free_rec == 0)",
Mark Fashehb06970532006-03-03 10:24:33 -08001108 (unsigned long long)OCFS2_I(inode)->ip_blkno);
Mark Fashehccd979b2005-12-15 14:31:24 -08001109 status = -EIO;
1110 goto bail;
1111 }
1112 i = le16_to_cpu(el->l_next_free_rec) - 1;
1113 blkno = le64_to_cpu(el->l_recs[i].e_blkno);
1114 if (!blkno) {
Mark Fashehb06970532006-03-03 10:24:33 -08001115 ocfs2_error(inode->i_sb, "Dinode %llu has extent "
Mark Fashehccd979b2005-12-15 14:31:24 -08001116 "list where extent # %d has no physical "
1117 "block start",
Mark Fashehb06970532006-03-03 10:24:33 -08001118 (unsigned long long)OCFS2_I(inode)->ip_blkno, i);
Mark Fashehccd979b2005-12-15 14:31:24 -08001119 status = -EIO;
1120 goto bail;
1121 }
1122
1123 if (bh) {
1124 brelse(bh);
1125 bh = NULL;
1126 }
1127
1128 status = ocfs2_read_block(osb, blkno, &bh, OCFS2_BH_CACHED,
1129 inode);
1130 if (status < 0) {
1131 mlog_errno(status);
1132 goto bail;
1133 }
1134
1135 eb = (struct ocfs2_extent_block *) bh->b_data;
1136 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
1137 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
1138 status = -EIO;
1139 goto bail;
1140 }
1141 el = &eb->h_list;
1142
1143 if (le16_to_cpu(el->l_next_free_rec) <
1144 le16_to_cpu(el->l_count)) {
1145 if (lowest_bh)
1146 brelse(lowest_bh);
1147 lowest_bh = bh;
1148 get_bh(lowest_bh);
1149 }
1150 }
1151
1152 /* If we didn't find one and the fe doesn't have any room,
1153 * then return '1' */
Joel Beckerce1d9ea2008-08-20 16:30:07 -07001154 el = et->et_root_el;
Tao Mae7d4cb62008-08-18 17:38:44 +08001155 if (!lowest_bh && (el->l_next_free_rec == el->l_count))
Mark Fashehccd979b2005-12-15 14:31:24 -08001156 status = 1;
1157
1158 *target_bh = lowest_bh;
1159bail:
1160 if (bh)
1161 brelse(bh);
1162
1163 mlog_exit(status);
1164 return status;
1165}
1166
Mark Fashehe48edee2007-03-07 16:46:57 -08001167/*
Mark Fashehc3afcbb2007-05-29 14:28:51 -07001168 * Grow a b-tree so that it has more records.
1169 *
1170 * We might shift the tree depth in which case existing paths should
1171 * be considered invalid.
1172 *
1173 * Tree depth after the grow is returned via *final_depth.
Mark Fasheh328d5752007-06-18 10:48:04 -07001174 *
1175 * *last_eb_bh will be updated by ocfs2_add_branch().
Mark Fashehc3afcbb2007-05-29 14:28:51 -07001176 */
1177static int ocfs2_grow_tree(struct inode *inode, handle_t *handle,
Tao Mae7d4cb62008-08-18 17:38:44 +08001178 struct ocfs2_extent_tree *et, int *final_depth,
Mark Fasheh328d5752007-06-18 10:48:04 -07001179 struct buffer_head **last_eb_bh,
Mark Fashehc3afcbb2007-05-29 14:28:51 -07001180 struct ocfs2_alloc_context *meta_ac)
1181{
1182 int ret, shift;
Joel Beckerce1d9ea2008-08-20 16:30:07 -07001183 struct ocfs2_extent_list *el = et->et_root_el;
Tao Mae7d4cb62008-08-18 17:38:44 +08001184 int depth = le16_to_cpu(el->l_tree_depth);
Mark Fashehc3afcbb2007-05-29 14:28:51 -07001185 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1186 struct buffer_head *bh = NULL;
1187
1188 BUG_ON(meta_ac == NULL);
1189
Tao Mae7d4cb62008-08-18 17:38:44 +08001190 shift = ocfs2_find_branch_target(osb, inode, et, &bh);
Mark Fashehc3afcbb2007-05-29 14:28:51 -07001191 if (shift < 0) {
1192 ret = shift;
1193 mlog_errno(ret);
1194 goto out;
1195 }
1196
1197 /* We traveled all the way to the bottom of the allocation tree
1198 * and didn't find room for any more extents - we need to add
1199 * another tree level */
1200 if (shift) {
1201 BUG_ON(bh);
1202 mlog(0, "need to shift tree depth (current = %d)\n", depth);
1203
1204 /* ocfs2_shift_tree_depth will return us a buffer with
1205 * the new extent block (so we can pass that to
1206 * ocfs2_add_branch). */
Tao Mae7d4cb62008-08-18 17:38:44 +08001207 ret = ocfs2_shift_tree_depth(osb, handle, inode, et,
Mark Fashehc3afcbb2007-05-29 14:28:51 -07001208 meta_ac, &bh);
1209 if (ret < 0) {
1210 mlog_errno(ret);
1211 goto out;
1212 }
1213 depth++;
Mark Fasheh328d5752007-06-18 10:48:04 -07001214 if (depth == 1) {
1215 /*
1216 * Special case: we have room now if we shifted from
1217 * tree_depth 0, so no more work needs to be done.
1218 *
1219 * We won't be calling add_branch, so pass
1220 * back *last_eb_bh as the new leaf. At depth
1221 * zero, it should always be null so there's
1222 * no reason to brelse.
1223 */
1224 BUG_ON(*last_eb_bh);
1225 get_bh(bh);
1226 *last_eb_bh = bh;
Mark Fashehc3afcbb2007-05-29 14:28:51 -07001227 goto out;
Mark Fasheh328d5752007-06-18 10:48:04 -07001228 }
Mark Fashehc3afcbb2007-05-29 14:28:51 -07001229 }
1230
1231 /* call ocfs2_add_branch to add the final part of the tree with
1232 * the new data. */
1233 mlog(0, "add branch. bh = %p\n", bh);
Tao Mae7d4cb62008-08-18 17:38:44 +08001234 ret = ocfs2_add_branch(osb, handle, inode, et, bh, last_eb_bh,
Mark Fashehc3afcbb2007-05-29 14:28:51 -07001235 meta_ac);
1236 if (ret < 0) {
1237 mlog_errno(ret);
1238 goto out;
1239 }
1240
1241out:
1242 if (final_depth)
1243 *final_depth = depth;
1244 brelse(bh);
1245 return ret;
1246}
1247
1248/*
Mark Fashehdcd05382007-01-16 11:32:23 -08001249 * This function will discard the rightmost extent record.
1250 */
1251static void ocfs2_shift_records_right(struct ocfs2_extent_list *el)
1252{
1253 int next_free = le16_to_cpu(el->l_next_free_rec);
1254 int count = le16_to_cpu(el->l_count);
1255 unsigned int num_bytes;
1256
1257 BUG_ON(!next_free);
1258 /* This will cause us to go off the end of our extent list. */
1259 BUG_ON(next_free >= count);
1260
1261 num_bytes = sizeof(struct ocfs2_extent_rec) * next_free;
1262
1263 memmove(&el->l_recs[1], &el->l_recs[0], num_bytes);
1264}
1265
1266static void ocfs2_rotate_leaf(struct ocfs2_extent_list *el,
1267 struct ocfs2_extent_rec *insert_rec)
1268{
1269 int i, insert_index, next_free, has_empty, num_bytes;
1270 u32 insert_cpos = le32_to_cpu(insert_rec->e_cpos);
1271 struct ocfs2_extent_rec *rec;
1272
1273 next_free = le16_to_cpu(el->l_next_free_rec);
1274 has_empty = ocfs2_is_empty_extent(&el->l_recs[0]);
1275
1276 BUG_ON(!next_free);
1277
1278 /* The tree code before us didn't allow enough room in the leaf. */
Julia Lawallb1f35502008-03-04 15:21:05 -08001279 BUG_ON(el->l_next_free_rec == el->l_count && !has_empty);
Mark Fashehdcd05382007-01-16 11:32:23 -08001280
1281 /*
1282 * The easiest way to approach this is to just remove the
1283 * empty extent and temporarily decrement next_free.
1284 */
1285 if (has_empty) {
1286 /*
1287 * If next_free was 1 (only an empty extent), this
1288 * loop won't execute, which is fine. We still want
1289 * the decrement above to happen.
1290 */
1291 for(i = 0; i < (next_free - 1); i++)
1292 el->l_recs[i] = el->l_recs[i+1];
1293
1294 next_free--;
1295 }
1296
1297 /*
1298 * Figure out what the new record index should be.
1299 */
1300 for(i = 0; i < next_free; i++) {
1301 rec = &el->l_recs[i];
1302
1303 if (insert_cpos < le32_to_cpu(rec->e_cpos))
1304 break;
1305 }
1306 insert_index = i;
1307
1308 mlog(0, "ins %u: index %d, has_empty %d, next_free %d, count %d\n",
1309 insert_cpos, insert_index, has_empty, next_free, le16_to_cpu(el->l_count));
1310
1311 BUG_ON(insert_index < 0);
1312 BUG_ON(insert_index >= le16_to_cpu(el->l_count));
1313 BUG_ON(insert_index > next_free);
1314
1315 /*
1316 * No need to memmove if we're just adding to the tail.
1317 */
1318 if (insert_index != next_free) {
1319 BUG_ON(next_free >= le16_to_cpu(el->l_count));
1320
1321 num_bytes = next_free - insert_index;
1322 num_bytes *= sizeof(struct ocfs2_extent_rec);
1323 memmove(&el->l_recs[insert_index + 1],
1324 &el->l_recs[insert_index],
1325 num_bytes);
1326 }
1327
1328 /*
1329 * Either we had an empty extent, and need to re-increment or
1330 * there was no empty extent on a non full rightmost leaf node,
1331 * in which case we still need to increment.
1332 */
1333 next_free++;
1334 el->l_next_free_rec = cpu_to_le16(next_free);
1335 /*
1336 * Make sure none of the math above just messed up our tree.
1337 */
1338 BUG_ON(le16_to_cpu(el->l_next_free_rec) > le16_to_cpu(el->l_count));
1339
1340 el->l_recs[insert_index] = *insert_rec;
1341
1342}
1343
Mark Fasheh328d5752007-06-18 10:48:04 -07001344static void ocfs2_remove_empty_extent(struct ocfs2_extent_list *el)
1345{
1346 int size, num_recs = le16_to_cpu(el->l_next_free_rec);
1347
1348 BUG_ON(num_recs == 0);
1349
1350 if (ocfs2_is_empty_extent(&el->l_recs[0])) {
1351 num_recs--;
1352 size = num_recs * sizeof(struct ocfs2_extent_rec);
1353 memmove(&el->l_recs[0], &el->l_recs[1], size);
1354 memset(&el->l_recs[num_recs], 0,
1355 sizeof(struct ocfs2_extent_rec));
1356 el->l_next_free_rec = cpu_to_le16(num_recs);
1357 }
1358}
1359
Mark Fashehdcd05382007-01-16 11:32:23 -08001360/*
1361 * Create an empty extent record .
1362 *
1363 * l_next_free_rec may be updated.
1364 *
1365 * If an empty extent already exists do nothing.
1366 */
1367static void ocfs2_create_empty_extent(struct ocfs2_extent_list *el)
1368{
1369 int next_free = le16_to_cpu(el->l_next_free_rec);
1370
Mark Fashehe48edee2007-03-07 16:46:57 -08001371 BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
1372
Mark Fashehdcd05382007-01-16 11:32:23 -08001373 if (next_free == 0)
1374 goto set_and_inc;
1375
1376 if (ocfs2_is_empty_extent(&el->l_recs[0]))
1377 return;
1378
1379 mlog_bug_on_msg(el->l_count == el->l_next_free_rec,
1380 "Asked to create an empty extent in a full list:\n"
1381 "count = %u, tree depth = %u",
1382 le16_to_cpu(el->l_count),
1383 le16_to_cpu(el->l_tree_depth));
1384
1385 ocfs2_shift_records_right(el);
1386
1387set_and_inc:
1388 le16_add_cpu(&el->l_next_free_rec, 1);
1389 memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec));
1390}
1391
1392/*
1393 * For a rotation which involves two leaf nodes, the "root node" is
1394 * the lowest level tree node which contains a path to both leafs. This
1395 * resulting set of information can be used to form a complete "subtree"
1396 *
1397 * This function is passed two full paths from the dinode down to a
1398 * pair of adjacent leaves. It's task is to figure out which path
1399 * index contains the subtree root - this can be the root index itself
1400 * in a worst-case rotation.
1401 *
1402 * The array index of the subtree root is passed back.
1403 */
1404static int ocfs2_find_subtree_root(struct inode *inode,
1405 struct ocfs2_path *left,
1406 struct ocfs2_path *right)
1407{
1408 int i = 0;
1409
1410 /*
1411 * Check that the caller passed in two paths from the same tree.
1412 */
1413 BUG_ON(path_root_bh(left) != path_root_bh(right));
1414
1415 do {
1416 i++;
1417
1418 /*
1419 * The caller didn't pass two adjacent paths.
1420 */
1421 mlog_bug_on_msg(i > left->p_tree_depth,
1422 "Inode %lu, left depth %u, right depth %u\n"
1423 "left leaf blk %llu, right leaf blk %llu\n",
1424 inode->i_ino, left->p_tree_depth,
1425 right->p_tree_depth,
1426 (unsigned long long)path_leaf_bh(left)->b_blocknr,
1427 (unsigned long long)path_leaf_bh(right)->b_blocknr);
1428 } while (left->p_node[i].bh->b_blocknr ==
1429 right->p_node[i].bh->b_blocknr);
1430
1431 return i - 1;
1432}
1433
1434typedef void (path_insert_t)(void *, struct buffer_head *);
1435
1436/*
1437 * Traverse a btree path in search of cpos, starting at root_el.
1438 *
1439 * This code can be called with a cpos larger than the tree, in which
1440 * case it will return the rightmost path.
1441 */
1442static int __ocfs2_find_path(struct inode *inode,
1443 struct ocfs2_extent_list *root_el, u32 cpos,
1444 path_insert_t *func, void *data)
1445{
1446 int i, ret = 0;
1447 u32 range;
1448 u64 blkno;
1449 struct buffer_head *bh = NULL;
1450 struct ocfs2_extent_block *eb;
1451 struct ocfs2_extent_list *el;
1452 struct ocfs2_extent_rec *rec;
1453 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1454
1455 el = root_el;
1456 while (el->l_tree_depth) {
1457 if (le16_to_cpu(el->l_next_free_rec) == 0) {
1458 ocfs2_error(inode->i_sb,
1459 "Inode %llu has empty extent list at "
1460 "depth %u\n",
1461 (unsigned long long)oi->ip_blkno,
1462 le16_to_cpu(el->l_tree_depth));
1463 ret = -EROFS;
1464 goto out;
1465
1466 }
1467
1468 for(i = 0; i < le16_to_cpu(el->l_next_free_rec) - 1; i++) {
1469 rec = &el->l_recs[i];
1470
1471 /*
1472 * In the case that cpos is off the allocation
1473 * tree, this should just wind up returning the
1474 * rightmost record.
1475 */
1476 range = le32_to_cpu(rec->e_cpos) +
Mark Fashehe48edee2007-03-07 16:46:57 -08001477 ocfs2_rec_clusters(el, rec);
Mark Fashehdcd05382007-01-16 11:32:23 -08001478 if (cpos >= le32_to_cpu(rec->e_cpos) && cpos < range)
1479 break;
1480 }
1481
1482 blkno = le64_to_cpu(el->l_recs[i].e_blkno);
1483 if (blkno == 0) {
1484 ocfs2_error(inode->i_sb,
1485 "Inode %llu has bad blkno in extent list "
1486 "at depth %u (index %d)\n",
1487 (unsigned long long)oi->ip_blkno,
1488 le16_to_cpu(el->l_tree_depth), i);
1489 ret = -EROFS;
1490 goto out;
1491 }
1492
1493 brelse(bh);
1494 bh = NULL;
1495 ret = ocfs2_read_block(OCFS2_SB(inode->i_sb), blkno,
1496 &bh, OCFS2_BH_CACHED, inode);
1497 if (ret) {
1498 mlog_errno(ret);
1499 goto out;
1500 }
1501
1502 eb = (struct ocfs2_extent_block *) bh->b_data;
1503 el = &eb->h_list;
1504 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
1505 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
1506 ret = -EIO;
1507 goto out;
1508 }
1509
1510 if (le16_to_cpu(el->l_next_free_rec) >
1511 le16_to_cpu(el->l_count)) {
1512 ocfs2_error(inode->i_sb,
1513 "Inode %llu has bad count in extent list "
1514 "at block %llu (next free=%u, count=%u)\n",
1515 (unsigned long long)oi->ip_blkno,
1516 (unsigned long long)bh->b_blocknr,
1517 le16_to_cpu(el->l_next_free_rec),
1518 le16_to_cpu(el->l_count));
1519 ret = -EROFS;
1520 goto out;
1521 }
1522
1523 if (func)
1524 func(data, bh);
1525 }
1526
1527out:
1528 /*
1529 * Catch any trailing bh that the loop didn't handle.
1530 */
1531 brelse(bh);
1532
1533 return ret;
1534}
1535
1536/*
1537 * Given an initialized path (that is, it has a valid root extent
1538 * list), this function will traverse the btree in search of the path
1539 * which would contain cpos.
1540 *
1541 * The path traveled is recorded in the path structure.
1542 *
1543 * Note that this will not do any comparisons on leaf node extent
1544 * records, so it will work fine in the case that we just added a tree
1545 * branch.
1546 */
1547struct find_path_data {
1548 int index;
1549 struct ocfs2_path *path;
1550};
1551static void find_path_ins(void *data, struct buffer_head *bh)
1552{
1553 struct find_path_data *fp = data;
1554
1555 get_bh(bh);
1556 ocfs2_path_insert_eb(fp->path, fp->index, bh);
1557 fp->index++;
1558}
1559static int ocfs2_find_path(struct inode *inode, struct ocfs2_path *path,
1560 u32 cpos)
1561{
1562 struct find_path_data data;
1563
1564 data.index = 1;
1565 data.path = path;
1566 return __ocfs2_find_path(inode, path_root_el(path), cpos,
1567 find_path_ins, &data);
1568}
1569
1570static void find_leaf_ins(void *data, struct buffer_head *bh)
1571{
1572 struct ocfs2_extent_block *eb =(struct ocfs2_extent_block *)bh->b_data;
1573 struct ocfs2_extent_list *el = &eb->h_list;
1574 struct buffer_head **ret = data;
1575
1576 /* We want to retain only the leaf block. */
1577 if (le16_to_cpu(el->l_tree_depth) == 0) {
1578 get_bh(bh);
1579 *ret = bh;
1580 }
1581}
1582/*
1583 * Find the leaf block in the tree which would contain cpos. No
1584 * checking of the actual leaf is done.
1585 *
1586 * Some paths want to call this instead of allocating a path structure
1587 * and calling ocfs2_find_path().
1588 *
1589 * This function doesn't handle non btree extent lists.
1590 */
Mark Fasheh363041a2007-01-17 12:31:35 -08001591int ocfs2_find_leaf(struct inode *inode, struct ocfs2_extent_list *root_el,
1592 u32 cpos, struct buffer_head **leaf_bh)
Mark Fashehdcd05382007-01-16 11:32:23 -08001593{
1594 int ret;
1595 struct buffer_head *bh = NULL;
1596
1597 ret = __ocfs2_find_path(inode, root_el, cpos, find_leaf_ins, &bh);
1598 if (ret) {
1599 mlog_errno(ret);
1600 goto out;
1601 }
1602
1603 *leaf_bh = bh;
1604out:
1605 return ret;
1606}
1607
1608/*
1609 * Adjust the adjacent records (left_rec, right_rec) involved in a rotation.
1610 *
1611 * Basically, we've moved stuff around at the bottom of the tree and
1612 * we need to fix up the extent records above the changes to reflect
1613 * the new changes.
1614 *
1615 * left_rec: the record on the left.
1616 * left_child_el: is the child list pointed to by left_rec
1617 * right_rec: the record to the right of left_rec
1618 * right_child_el: is the child list pointed to by right_rec
1619 *
1620 * By definition, this only works on interior nodes.
1621 */
1622static void ocfs2_adjust_adjacent_records(struct ocfs2_extent_rec *left_rec,
1623 struct ocfs2_extent_list *left_child_el,
1624 struct ocfs2_extent_rec *right_rec,
1625 struct ocfs2_extent_list *right_child_el)
1626{
1627 u32 left_clusters, right_end;
1628
1629 /*
1630 * Interior nodes never have holes. Their cpos is the cpos of
1631 * the leftmost record in their child list. Their cluster
1632 * count covers the full theoretical range of their child list
1633 * - the range between their cpos and the cpos of the record
1634 * immediately to their right.
1635 */
1636 left_clusters = le32_to_cpu(right_child_el->l_recs[0].e_cpos);
Mark Fasheh328d5752007-06-18 10:48:04 -07001637 if (ocfs2_is_empty_extent(&right_child_el->l_recs[0])) {
1638 BUG_ON(le16_to_cpu(right_child_el->l_next_free_rec) <= 1);
1639 left_clusters = le32_to_cpu(right_child_el->l_recs[1].e_cpos);
1640 }
Mark Fashehdcd05382007-01-16 11:32:23 -08001641 left_clusters -= le32_to_cpu(left_rec->e_cpos);
Mark Fashehe48edee2007-03-07 16:46:57 -08001642 left_rec->e_int_clusters = cpu_to_le32(left_clusters);
Mark Fashehdcd05382007-01-16 11:32:23 -08001643
1644 /*
1645 * Calculate the rightmost cluster count boundary before
Mark Fashehe48edee2007-03-07 16:46:57 -08001646 * moving cpos - we will need to adjust clusters after
Mark Fashehdcd05382007-01-16 11:32:23 -08001647 * updating e_cpos to keep the same highest cluster count.
1648 */
1649 right_end = le32_to_cpu(right_rec->e_cpos);
Mark Fashehe48edee2007-03-07 16:46:57 -08001650 right_end += le32_to_cpu(right_rec->e_int_clusters);
Mark Fashehdcd05382007-01-16 11:32:23 -08001651
1652 right_rec->e_cpos = left_rec->e_cpos;
1653 le32_add_cpu(&right_rec->e_cpos, left_clusters);
1654
1655 right_end -= le32_to_cpu(right_rec->e_cpos);
Mark Fashehe48edee2007-03-07 16:46:57 -08001656 right_rec->e_int_clusters = cpu_to_le32(right_end);
Mark Fashehdcd05382007-01-16 11:32:23 -08001657}
1658
1659/*
1660 * Adjust the adjacent root node records involved in a
1661 * rotation. left_el_blkno is passed in as a key so that we can easily
1662 * find it's index in the root list.
1663 */
1664static void ocfs2_adjust_root_records(struct ocfs2_extent_list *root_el,
1665 struct ocfs2_extent_list *left_el,
1666 struct ocfs2_extent_list *right_el,
1667 u64 left_el_blkno)
1668{
1669 int i;
1670
1671 BUG_ON(le16_to_cpu(root_el->l_tree_depth) <=
1672 le16_to_cpu(left_el->l_tree_depth));
1673
1674 for(i = 0; i < le16_to_cpu(root_el->l_next_free_rec) - 1; i++) {
1675 if (le64_to_cpu(root_el->l_recs[i].e_blkno) == left_el_blkno)
1676 break;
1677 }
1678
1679 /*
1680 * The path walking code should have never returned a root and
1681 * two paths which are not adjacent.
1682 */
1683 BUG_ON(i >= (le16_to_cpu(root_el->l_next_free_rec) - 1));
1684
1685 ocfs2_adjust_adjacent_records(&root_el->l_recs[i], left_el,
1686 &root_el->l_recs[i + 1], right_el);
1687}
1688
1689/*
1690 * We've changed a leaf block (in right_path) and need to reflect that
1691 * change back up the subtree.
1692 *
1693 * This happens in multiple places:
1694 * - When we've moved an extent record from the left path leaf to the right
1695 * path leaf to make room for an empty extent in the left path leaf.
1696 * - When our insert into the right path leaf is at the leftmost edge
1697 * and requires an update of the path immediately to it's left. This
1698 * can occur at the end of some types of rotation and appending inserts.
Tao Ma677b9752008-01-30 14:21:05 +08001699 * - When we've adjusted the last extent record in the left path leaf and the
1700 * 1st extent record in the right path leaf during cross extent block merge.
Mark Fashehdcd05382007-01-16 11:32:23 -08001701 */
1702static void ocfs2_complete_edge_insert(struct inode *inode, handle_t *handle,
1703 struct ocfs2_path *left_path,
1704 struct ocfs2_path *right_path,
1705 int subtree_index)
1706{
1707 int ret, i, idx;
1708 struct ocfs2_extent_list *el, *left_el, *right_el;
1709 struct ocfs2_extent_rec *left_rec, *right_rec;
1710 struct buffer_head *root_bh = left_path->p_node[subtree_index].bh;
1711
1712 /*
1713 * Update the counts and position values within all the
1714 * interior nodes to reflect the leaf rotation we just did.
1715 *
1716 * The root node is handled below the loop.
1717 *
1718 * We begin the loop with right_el and left_el pointing to the
1719 * leaf lists and work our way up.
1720 *
1721 * NOTE: within this loop, left_el and right_el always refer
1722 * to the *child* lists.
1723 */
1724 left_el = path_leaf_el(left_path);
1725 right_el = path_leaf_el(right_path);
1726 for(i = left_path->p_tree_depth - 1; i > subtree_index; i--) {
1727 mlog(0, "Adjust records at index %u\n", i);
1728
1729 /*
1730 * One nice property of knowing that all of these
1731 * nodes are below the root is that we only deal with
1732 * the leftmost right node record and the rightmost
1733 * left node record.
1734 */
1735 el = left_path->p_node[i].el;
1736 idx = le16_to_cpu(left_el->l_next_free_rec) - 1;
1737 left_rec = &el->l_recs[idx];
1738
1739 el = right_path->p_node[i].el;
1740 right_rec = &el->l_recs[0];
1741
1742 ocfs2_adjust_adjacent_records(left_rec, left_el, right_rec,
1743 right_el);
1744
1745 ret = ocfs2_journal_dirty(handle, left_path->p_node[i].bh);
1746 if (ret)
1747 mlog_errno(ret);
1748
1749 ret = ocfs2_journal_dirty(handle, right_path->p_node[i].bh);
1750 if (ret)
1751 mlog_errno(ret);
1752
1753 /*
1754 * Setup our list pointers now so that the current
1755 * parents become children in the next iteration.
1756 */
1757 left_el = left_path->p_node[i].el;
1758 right_el = right_path->p_node[i].el;
1759 }
1760
1761 /*
1762 * At the root node, adjust the two adjacent records which
1763 * begin our path to the leaves.
1764 */
1765
1766 el = left_path->p_node[subtree_index].el;
1767 left_el = left_path->p_node[subtree_index + 1].el;
1768 right_el = right_path->p_node[subtree_index + 1].el;
1769
1770 ocfs2_adjust_root_records(el, left_el, right_el,
1771 left_path->p_node[subtree_index + 1].bh->b_blocknr);
1772
1773 root_bh = left_path->p_node[subtree_index].bh;
1774
1775 ret = ocfs2_journal_dirty(handle, root_bh);
1776 if (ret)
1777 mlog_errno(ret);
1778}
1779
1780static int ocfs2_rotate_subtree_right(struct inode *inode,
1781 handle_t *handle,
1782 struct ocfs2_path *left_path,
1783 struct ocfs2_path *right_path,
1784 int subtree_index)
1785{
1786 int ret, i;
1787 struct buffer_head *right_leaf_bh;
1788 struct buffer_head *left_leaf_bh = NULL;
1789 struct buffer_head *root_bh;
1790 struct ocfs2_extent_list *right_el, *left_el;
1791 struct ocfs2_extent_rec move_rec;
1792
1793 left_leaf_bh = path_leaf_bh(left_path);
1794 left_el = path_leaf_el(left_path);
1795
1796 if (left_el->l_next_free_rec != left_el->l_count) {
1797 ocfs2_error(inode->i_sb,
1798 "Inode %llu has non-full interior leaf node %llu"
1799 "(next free = %u)",
1800 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1801 (unsigned long long)left_leaf_bh->b_blocknr,
1802 le16_to_cpu(left_el->l_next_free_rec));
1803 return -EROFS;
1804 }
1805
1806 /*
1807 * This extent block may already have an empty record, so we
1808 * return early if so.
1809 */
1810 if (ocfs2_is_empty_extent(&left_el->l_recs[0]))
1811 return 0;
1812
1813 root_bh = left_path->p_node[subtree_index].bh;
1814 BUG_ON(root_bh != right_path->p_node[subtree_index].bh);
1815
1816 ret = ocfs2_journal_access(handle, inode, root_bh,
1817 OCFS2_JOURNAL_ACCESS_WRITE);
1818 if (ret) {
1819 mlog_errno(ret);
1820 goto out;
1821 }
1822
1823 for(i = subtree_index + 1; i < path_num_items(right_path); i++) {
1824 ret = ocfs2_journal_access(handle, inode,
1825 right_path->p_node[i].bh,
1826 OCFS2_JOURNAL_ACCESS_WRITE);
1827 if (ret) {
1828 mlog_errno(ret);
1829 goto out;
1830 }
1831
1832 ret = ocfs2_journal_access(handle, inode,
1833 left_path->p_node[i].bh,
1834 OCFS2_JOURNAL_ACCESS_WRITE);
1835 if (ret) {
1836 mlog_errno(ret);
1837 goto out;
1838 }
1839 }
1840
1841 right_leaf_bh = path_leaf_bh(right_path);
1842 right_el = path_leaf_el(right_path);
1843
1844 /* This is a code error, not a disk corruption. */
1845 mlog_bug_on_msg(!right_el->l_next_free_rec, "Inode %llu: Rotate fails "
1846 "because rightmost leaf block %llu is empty\n",
1847 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1848 (unsigned long long)right_leaf_bh->b_blocknr);
1849
1850 ocfs2_create_empty_extent(right_el);
1851
1852 ret = ocfs2_journal_dirty(handle, right_leaf_bh);
1853 if (ret) {
1854 mlog_errno(ret);
1855 goto out;
1856 }
1857
1858 /* Do the copy now. */
1859 i = le16_to_cpu(left_el->l_next_free_rec) - 1;
1860 move_rec = left_el->l_recs[i];
1861 right_el->l_recs[0] = move_rec;
1862
1863 /*
1864 * Clear out the record we just copied and shift everything
1865 * over, leaving an empty extent in the left leaf.
1866 *
1867 * We temporarily subtract from next_free_rec so that the
1868 * shift will lose the tail record (which is now defunct).
1869 */
1870 le16_add_cpu(&left_el->l_next_free_rec, -1);
1871 ocfs2_shift_records_right(left_el);
1872 memset(&left_el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec));
1873 le16_add_cpu(&left_el->l_next_free_rec, 1);
1874
1875 ret = ocfs2_journal_dirty(handle, left_leaf_bh);
1876 if (ret) {
1877 mlog_errno(ret);
1878 goto out;
1879 }
1880
1881 ocfs2_complete_edge_insert(inode, handle, left_path, right_path,
1882 subtree_index);
1883
1884out:
1885 return ret;
1886}
1887
1888/*
1889 * Given a full path, determine what cpos value would return us a path
1890 * containing the leaf immediately to the left of the current one.
1891 *
1892 * Will return zero if the path passed in is already the leftmost path.
1893 */
1894static int ocfs2_find_cpos_for_left_leaf(struct super_block *sb,
1895 struct ocfs2_path *path, u32 *cpos)
1896{
1897 int i, j, ret = 0;
1898 u64 blkno;
1899 struct ocfs2_extent_list *el;
1900
Mark Fashehe48edee2007-03-07 16:46:57 -08001901 BUG_ON(path->p_tree_depth == 0);
1902
Mark Fashehdcd05382007-01-16 11:32:23 -08001903 *cpos = 0;
1904
1905 blkno = path_leaf_bh(path)->b_blocknr;
1906
1907 /* Start at the tree node just above the leaf and work our way up. */
1908 i = path->p_tree_depth - 1;
1909 while (i >= 0) {
1910 el = path->p_node[i].el;
1911
1912 /*
1913 * Find the extent record just before the one in our
1914 * path.
1915 */
1916 for(j = 0; j < le16_to_cpu(el->l_next_free_rec); j++) {
1917 if (le64_to_cpu(el->l_recs[j].e_blkno) == blkno) {
1918 if (j == 0) {
1919 if (i == 0) {
1920 /*
1921 * We've determined that the
1922 * path specified is already
1923 * the leftmost one - return a
1924 * cpos of zero.
1925 */
1926 goto out;
1927 }
1928 /*
1929 * The leftmost record points to our
1930 * leaf - we need to travel up the
1931 * tree one level.
1932 */
1933 goto next_node;
1934 }
1935
1936 *cpos = le32_to_cpu(el->l_recs[j - 1].e_cpos);
Mark Fashehe48edee2007-03-07 16:46:57 -08001937 *cpos = *cpos + ocfs2_rec_clusters(el,
1938 &el->l_recs[j - 1]);
1939 *cpos = *cpos - 1;
Mark Fashehdcd05382007-01-16 11:32:23 -08001940 goto out;
1941 }
1942 }
1943
1944 /*
1945 * If we got here, we never found a valid node where
1946 * the tree indicated one should be.
1947 */
1948 ocfs2_error(sb,
1949 "Invalid extent tree at extent block %llu\n",
1950 (unsigned long long)blkno);
1951 ret = -EROFS;
1952 goto out;
1953
1954next_node:
1955 blkno = path->p_node[i].bh->b_blocknr;
1956 i--;
1957 }
1958
1959out:
1960 return ret;
1961}
1962
Mark Fasheh328d5752007-06-18 10:48:04 -07001963/*
1964 * Extend the transaction by enough credits to complete the rotation,
1965 * and still leave at least the original number of credits allocated
1966 * to this transaction.
1967 */
Mark Fashehdcd05382007-01-16 11:32:23 -08001968static int ocfs2_extend_rotate_transaction(handle_t *handle, int subtree_depth,
Mark Fasheh328d5752007-06-18 10:48:04 -07001969 int op_credits,
Mark Fashehdcd05382007-01-16 11:32:23 -08001970 struct ocfs2_path *path)
1971{
Mark Fasheh328d5752007-06-18 10:48:04 -07001972 int credits = (path->p_tree_depth - subtree_depth) * 2 + 1 + op_credits;
Mark Fashehdcd05382007-01-16 11:32:23 -08001973
1974 if (handle->h_buffer_credits < credits)
1975 return ocfs2_extend_trans(handle, credits);
1976
1977 return 0;
1978}
1979
1980/*
1981 * Trap the case where we're inserting into the theoretical range past
1982 * the _actual_ left leaf range. Otherwise, we'll rotate a record
1983 * whose cpos is less than ours into the right leaf.
1984 *
1985 * It's only necessary to look at the rightmost record of the left
1986 * leaf because the logic that calls us should ensure that the
1987 * theoretical ranges in the path components above the leaves are
1988 * correct.
1989 */
1990static int ocfs2_rotate_requires_path_adjustment(struct ocfs2_path *left_path,
1991 u32 insert_cpos)
1992{
1993 struct ocfs2_extent_list *left_el;
1994 struct ocfs2_extent_rec *rec;
1995 int next_free;
1996
1997 left_el = path_leaf_el(left_path);
1998 next_free = le16_to_cpu(left_el->l_next_free_rec);
1999 rec = &left_el->l_recs[next_free - 1];
2000
2001 if (insert_cpos > le32_to_cpu(rec->e_cpos))
2002 return 1;
2003 return 0;
2004}
2005
Mark Fasheh328d5752007-06-18 10:48:04 -07002006static int ocfs2_leftmost_rec_contains(struct ocfs2_extent_list *el, u32 cpos)
2007{
2008 int next_free = le16_to_cpu(el->l_next_free_rec);
2009 unsigned int range;
2010 struct ocfs2_extent_rec *rec;
2011
2012 if (next_free == 0)
2013 return 0;
2014
2015 rec = &el->l_recs[0];
2016 if (ocfs2_is_empty_extent(rec)) {
2017 /* Empty list. */
2018 if (next_free == 1)
2019 return 0;
2020 rec = &el->l_recs[1];
2021 }
2022
2023 range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
2024 if (cpos >= le32_to_cpu(rec->e_cpos) && cpos < range)
2025 return 1;
2026 return 0;
2027}
2028
Mark Fashehdcd05382007-01-16 11:32:23 -08002029/*
2030 * Rotate all the records in a btree right one record, starting at insert_cpos.
2031 *
2032 * The path to the rightmost leaf should be passed in.
2033 *
2034 * The array is assumed to be large enough to hold an entire path (tree depth).
2035 *
2036 * Upon succesful return from this function:
2037 *
2038 * - The 'right_path' array will contain a path to the leaf block
2039 * whose range contains e_cpos.
2040 * - That leaf block will have a single empty extent in list index 0.
2041 * - In the case that the rotation requires a post-insert update,
2042 * *ret_left_path will contain a valid path which can be passed to
2043 * ocfs2_insert_path().
2044 */
2045static int ocfs2_rotate_tree_right(struct inode *inode,
2046 handle_t *handle,
Mark Fasheh328d5752007-06-18 10:48:04 -07002047 enum ocfs2_split_type split,
Mark Fashehdcd05382007-01-16 11:32:23 -08002048 u32 insert_cpos,
2049 struct ocfs2_path *right_path,
2050 struct ocfs2_path **ret_left_path)
2051{
Mark Fasheh328d5752007-06-18 10:48:04 -07002052 int ret, start, orig_credits = handle->h_buffer_credits;
Mark Fashehdcd05382007-01-16 11:32:23 -08002053 u32 cpos;
2054 struct ocfs2_path *left_path = NULL;
2055
2056 *ret_left_path = NULL;
2057
2058 left_path = ocfs2_new_path(path_root_bh(right_path),
2059 path_root_el(right_path));
2060 if (!left_path) {
2061 ret = -ENOMEM;
2062 mlog_errno(ret);
2063 goto out;
2064 }
2065
2066 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, right_path, &cpos);
2067 if (ret) {
2068 mlog_errno(ret);
2069 goto out;
2070 }
2071
2072 mlog(0, "Insert: %u, first left path cpos: %u\n", insert_cpos, cpos);
2073
2074 /*
2075 * What we want to do here is:
2076 *
2077 * 1) Start with the rightmost path.
2078 *
2079 * 2) Determine a path to the leaf block directly to the left
2080 * of that leaf.
2081 *
2082 * 3) Determine the 'subtree root' - the lowest level tree node
2083 * which contains a path to both leaves.
2084 *
2085 * 4) Rotate the subtree.
2086 *
2087 * 5) Find the next subtree by considering the left path to be
2088 * the new right path.
2089 *
2090 * The check at the top of this while loop also accepts
2091 * insert_cpos == cpos because cpos is only a _theoretical_
2092 * value to get us the left path - insert_cpos might very well
2093 * be filling that hole.
2094 *
2095 * Stop at a cpos of '0' because we either started at the
2096 * leftmost branch (i.e., a tree with one branch and a
2097 * rotation inside of it), or we've gone as far as we can in
2098 * rotating subtrees.
2099 */
2100 while (cpos && insert_cpos <= cpos) {
2101 mlog(0, "Rotating a tree: ins. cpos: %u, left path cpos: %u\n",
2102 insert_cpos, cpos);
2103
2104 ret = ocfs2_find_path(inode, left_path, cpos);
2105 if (ret) {
2106 mlog_errno(ret);
2107 goto out;
2108 }
2109
2110 mlog_bug_on_msg(path_leaf_bh(left_path) ==
2111 path_leaf_bh(right_path),
2112 "Inode %lu: error during insert of %u "
2113 "(left path cpos %u) results in two identical "
2114 "paths ending at %llu\n",
2115 inode->i_ino, insert_cpos, cpos,
2116 (unsigned long long)
2117 path_leaf_bh(left_path)->b_blocknr);
2118
Mark Fasheh328d5752007-06-18 10:48:04 -07002119 if (split == SPLIT_NONE &&
2120 ocfs2_rotate_requires_path_adjustment(left_path,
Mark Fashehdcd05382007-01-16 11:32:23 -08002121 insert_cpos)) {
Mark Fashehdcd05382007-01-16 11:32:23 -08002122
2123 /*
2124 * We've rotated the tree as much as we
2125 * should. The rest is up to
2126 * ocfs2_insert_path() to complete, after the
2127 * record insertion. We indicate this
2128 * situation by returning the left path.
2129 *
2130 * The reason we don't adjust the records here
2131 * before the record insert is that an error
2132 * later might break the rule where a parent
2133 * record e_cpos will reflect the actual
2134 * e_cpos of the 1st nonempty record of the
2135 * child list.
2136 */
2137 *ret_left_path = left_path;
2138 goto out_ret_path;
2139 }
2140
2141 start = ocfs2_find_subtree_root(inode, left_path, right_path);
2142
2143 mlog(0, "Subtree root at index %d (blk %llu, depth %d)\n",
2144 start,
2145 (unsigned long long) right_path->p_node[start].bh->b_blocknr,
2146 right_path->p_tree_depth);
2147
2148 ret = ocfs2_extend_rotate_transaction(handle, start,
Mark Fasheh328d5752007-06-18 10:48:04 -07002149 orig_credits, right_path);
Mark Fashehdcd05382007-01-16 11:32:23 -08002150 if (ret) {
2151 mlog_errno(ret);
2152 goto out;
2153 }
2154
2155 ret = ocfs2_rotate_subtree_right(inode, handle, left_path,
2156 right_path, start);
2157 if (ret) {
2158 mlog_errno(ret);
2159 goto out;
2160 }
2161
Mark Fasheh328d5752007-06-18 10:48:04 -07002162 if (split != SPLIT_NONE &&
2163 ocfs2_leftmost_rec_contains(path_leaf_el(right_path),
2164 insert_cpos)) {
2165 /*
2166 * A rotate moves the rightmost left leaf
2167 * record over to the leftmost right leaf
2168 * slot. If we're doing an extent split
2169 * instead of a real insert, then we have to
2170 * check that the extent to be split wasn't
2171 * just moved over. If it was, then we can
2172 * exit here, passing left_path back -
2173 * ocfs2_split_extent() is smart enough to
2174 * search both leaves.
2175 */
2176 *ret_left_path = left_path;
2177 goto out_ret_path;
2178 }
2179
Mark Fashehdcd05382007-01-16 11:32:23 -08002180 /*
2181 * There is no need to re-read the next right path
2182 * as we know that it'll be our current left
2183 * path. Optimize by copying values instead.
2184 */
2185 ocfs2_mv_path(right_path, left_path);
2186
2187 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, right_path,
2188 &cpos);
2189 if (ret) {
2190 mlog_errno(ret);
2191 goto out;
2192 }
2193 }
2194
2195out:
2196 ocfs2_free_path(left_path);
2197
2198out_ret_path:
2199 return ret;
2200}
2201
Mark Fasheh328d5752007-06-18 10:48:04 -07002202static void ocfs2_update_edge_lengths(struct inode *inode, handle_t *handle,
2203 struct ocfs2_path *path)
2204{
2205 int i, idx;
2206 struct ocfs2_extent_rec *rec;
2207 struct ocfs2_extent_list *el;
2208 struct ocfs2_extent_block *eb;
2209 u32 range;
2210
2211 /* Path should always be rightmost. */
2212 eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data;
2213 BUG_ON(eb->h_next_leaf_blk != 0ULL);
2214
2215 el = &eb->h_list;
2216 BUG_ON(le16_to_cpu(el->l_next_free_rec) == 0);
2217 idx = le16_to_cpu(el->l_next_free_rec) - 1;
2218 rec = &el->l_recs[idx];
2219 range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
2220
2221 for (i = 0; i < path->p_tree_depth; i++) {
2222 el = path->p_node[i].el;
2223 idx = le16_to_cpu(el->l_next_free_rec) - 1;
2224 rec = &el->l_recs[idx];
2225
2226 rec->e_int_clusters = cpu_to_le32(range);
2227 le32_add_cpu(&rec->e_int_clusters, -le32_to_cpu(rec->e_cpos));
2228
2229 ocfs2_journal_dirty(handle, path->p_node[i].bh);
2230 }
2231}
2232
2233static void ocfs2_unlink_path(struct inode *inode, handle_t *handle,
2234 struct ocfs2_cached_dealloc_ctxt *dealloc,
2235 struct ocfs2_path *path, int unlink_start)
2236{
2237 int ret, i;
2238 struct ocfs2_extent_block *eb;
2239 struct ocfs2_extent_list *el;
2240 struct buffer_head *bh;
2241
2242 for(i = unlink_start; i < path_num_items(path); i++) {
2243 bh = path->p_node[i].bh;
2244
2245 eb = (struct ocfs2_extent_block *)bh->b_data;
2246 /*
2247 * Not all nodes might have had their final count
2248 * decremented by the caller - handle this here.
2249 */
2250 el = &eb->h_list;
2251 if (le16_to_cpu(el->l_next_free_rec) > 1) {
2252 mlog(ML_ERROR,
2253 "Inode %llu, attempted to remove extent block "
2254 "%llu with %u records\n",
2255 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2256 (unsigned long long)le64_to_cpu(eb->h_blkno),
2257 le16_to_cpu(el->l_next_free_rec));
2258
2259 ocfs2_journal_dirty(handle, bh);
2260 ocfs2_remove_from_cache(inode, bh);
2261 continue;
2262 }
2263
2264 el->l_next_free_rec = 0;
2265 memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec));
2266
2267 ocfs2_journal_dirty(handle, bh);
2268
2269 ret = ocfs2_cache_extent_block_free(dealloc, eb);
2270 if (ret)
2271 mlog_errno(ret);
2272
2273 ocfs2_remove_from_cache(inode, bh);
2274 }
2275}
2276
2277static void ocfs2_unlink_subtree(struct inode *inode, handle_t *handle,
2278 struct ocfs2_path *left_path,
2279 struct ocfs2_path *right_path,
2280 int subtree_index,
2281 struct ocfs2_cached_dealloc_ctxt *dealloc)
2282{
2283 int i;
2284 struct buffer_head *root_bh = left_path->p_node[subtree_index].bh;
2285 struct ocfs2_extent_list *root_el = left_path->p_node[subtree_index].el;
2286 struct ocfs2_extent_list *el;
2287 struct ocfs2_extent_block *eb;
2288
2289 el = path_leaf_el(left_path);
2290
2291 eb = (struct ocfs2_extent_block *)right_path->p_node[subtree_index + 1].bh->b_data;
2292
2293 for(i = 1; i < le16_to_cpu(root_el->l_next_free_rec); i++)
2294 if (root_el->l_recs[i].e_blkno == eb->h_blkno)
2295 break;
2296
2297 BUG_ON(i >= le16_to_cpu(root_el->l_next_free_rec));
2298
2299 memset(&root_el->l_recs[i], 0, sizeof(struct ocfs2_extent_rec));
2300 le16_add_cpu(&root_el->l_next_free_rec, -1);
2301
2302 eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data;
2303 eb->h_next_leaf_blk = 0;
2304
2305 ocfs2_journal_dirty(handle, root_bh);
2306 ocfs2_journal_dirty(handle, path_leaf_bh(left_path));
2307
2308 ocfs2_unlink_path(inode, handle, dealloc, right_path,
2309 subtree_index + 1);
2310}
2311
2312static int ocfs2_rotate_subtree_left(struct inode *inode, handle_t *handle,
2313 struct ocfs2_path *left_path,
2314 struct ocfs2_path *right_path,
2315 int subtree_index,
2316 struct ocfs2_cached_dealloc_ctxt *dealloc,
Tao Mae7d4cb62008-08-18 17:38:44 +08002317 int *deleted,
2318 struct ocfs2_extent_tree *et)
Mark Fasheh328d5752007-06-18 10:48:04 -07002319{
2320 int ret, i, del_right_subtree = 0, right_has_empty = 0;
Tao Mae7d4cb62008-08-18 17:38:44 +08002321 struct buffer_head *root_bh, *et_root_bh = path_root_bh(right_path);
Mark Fasheh328d5752007-06-18 10:48:04 -07002322 struct ocfs2_extent_list *right_leaf_el, *left_leaf_el;
2323 struct ocfs2_extent_block *eb;
2324
2325 *deleted = 0;
2326
2327 right_leaf_el = path_leaf_el(right_path);
2328 left_leaf_el = path_leaf_el(left_path);
2329 root_bh = left_path->p_node[subtree_index].bh;
2330 BUG_ON(root_bh != right_path->p_node[subtree_index].bh);
2331
2332 if (!ocfs2_is_empty_extent(&left_leaf_el->l_recs[0]))
2333 return 0;
2334
2335 eb = (struct ocfs2_extent_block *)path_leaf_bh(right_path)->b_data;
2336 if (ocfs2_is_empty_extent(&right_leaf_el->l_recs[0])) {
2337 /*
2338 * It's legal for us to proceed if the right leaf is
2339 * the rightmost one and it has an empty extent. There
2340 * are two cases to handle - whether the leaf will be
2341 * empty after removal or not. If the leaf isn't empty
2342 * then just remove the empty extent up front. The
2343 * next block will handle empty leaves by flagging
2344 * them for unlink.
2345 *
2346 * Non rightmost leaves will throw -EAGAIN and the
2347 * caller can manually move the subtree and retry.
2348 */
2349
2350 if (eb->h_next_leaf_blk != 0ULL)
2351 return -EAGAIN;
2352
2353 if (le16_to_cpu(right_leaf_el->l_next_free_rec) > 1) {
2354 ret = ocfs2_journal_access(handle, inode,
2355 path_leaf_bh(right_path),
2356 OCFS2_JOURNAL_ACCESS_WRITE);
2357 if (ret) {
2358 mlog_errno(ret);
2359 goto out;
2360 }
2361
2362 ocfs2_remove_empty_extent(right_leaf_el);
2363 } else
2364 right_has_empty = 1;
2365 }
2366
2367 if (eb->h_next_leaf_blk == 0ULL &&
2368 le16_to_cpu(right_leaf_el->l_next_free_rec) == 1) {
2369 /*
2370 * We have to update i_last_eb_blk during the meta
2371 * data delete.
2372 */
Tao Mae7d4cb62008-08-18 17:38:44 +08002373 ret = ocfs2_journal_access(handle, inode, et_root_bh,
Mark Fasheh328d5752007-06-18 10:48:04 -07002374 OCFS2_JOURNAL_ACCESS_WRITE);
2375 if (ret) {
2376 mlog_errno(ret);
2377 goto out;
2378 }
2379
2380 del_right_subtree = 1;
2381 }
2382
2383 /*
2384 * Getting here with an empty extent in the right path implies
2385 * that it's the rightmost path and will be deleted.
2386 */
2387 BUG_ON(right_has_empty && !del_right_subtree);
2388
2389 ret = ocfs2_journal_access(handle, inode, root_bh,
2390 OCFS2_JOURNAL_ACCESS_WRITE);
2391 if (ret) {
2392 mlog_errno(ret);
2393 goto out;
2394 }
2395
2396 for(i = subtree_index + 1; i < path_num_items(right_path); i++) {
2397 ret = ocfs2_journal_access(handle, inode,
2398 right_path->p_node[i].bh,
2399 OCFS2_JOURNAL_ACCESS_WRITE);
2400 if (ret) {
2401 mlog_errno(ret);
2402 goto out;
2403 }
2404
2405 ret = ocfs2_journal_access(handle, inode,
2406 left_path->p_node[i].bh,
2407 OCFS2_JOURNAL_ACCESS_WRITE);
2408 if (ret) {
2409 mlog_errno(ret);
2410 goto out;
2411 }
2412 }
2413
2414 if (!right_has_empty) {
2415 /*
2416 * Only do this if we're moving a real
2417 * record. Otherwise, the action is delayed until
2418 * after removal of the right path in which case we
2419 * can do a simple shift to remove the empty extent.
2420 */
2421 ocfs2_rotate_leaf(left_leaf_el, &right_leaf_el->l_recs[0]);
2422 memset(&right_leaf_el->l_recs[0], 0,
2423 sizeof(struct ocfs2_extent_rec));
2424 }
2425 if (eb->h_next_leaf_blk == 0ULL) {
2426 /*
2427 * Move recs over to get rid of empty extent, decrease
2428 * next_free. This is allowed to remove the last
2429 * extent in our leaf (setting l_next_free_rec to
2430 * zero) - the delete code below won't care.
2431 */
2432 ocfs2_remove_empty_extent(right_leaf_el);
2433 }
2434
2435 ret = ocfs2_journal_dirty(handle, path_leaf_bh(left_path));
2436 if (ret)
2437 mlog_errno(ret);
2438 ret = ocfs2_journal_dirty(handle, path_leaf_bh(right_path));
2439 if (ret)
2440 mlog_errno(ret);
2441
2442 if (del_right_subtree) {
2443 ocfs2_unlink_subtree(inode, handle, left_path, right_path,
2444 subtree_index, dealloc);
2445 ocfs2_update_edge_lengths(inode, handle, left_path);
2446
2447 eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data;
Joel Becker35dc0aa2008-08-20 16:25:06 -07002448 ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno));
Mark Fasheh328d5752007-06-18 10:48:04 -07002449
2450 /*
2451 * Removal of the extent in the left leaf was skipped
2452 * above so we could delete the right path
2453 * 1st.
2454 */
2455 if (right_has_empty)
2456 ocfs2_remove_empty_extent(left_leaf_el);
2457
Tao Mae7d4cb62008-08-18 17:38:44 +08002458 ret = ocfs2_journal_dirty(handle, et_root_bh);
Mark Fasheh328d5752007-06-18 10:48:04 -07002459 if (ret)
2460 mlog_errno(ret);
2461
2462 *deleted = 1;
2463 } else
2464 ocfs2_complete_edge_insert(inode, handle, left_path, right_path,
2465 subtree_index);
2466
2467out:
2468 return ret;
2469}
2470
2471/*
2472 * Given a full path, determine what cpos value would return us a path
2473 * containing the leaf immediately to the right of the current one.
2474 *
2475 * Will return zero if the path passed in is already the rightmost path.
2476 *
2477 * This looks similar, but is subtly different to
2478 * ocfs2_find_cpos_for_left_leaf().
2479 */
2480static int ocfs2_find_cpos_for_right_leaf(struct super_block *sb,
2481 struct ocfs2_path *path, u32 *cpos)
2482{
2483 int i, j, ret = 0;
2484 u64 blkno;
2485 struct ocfs2_extent_list *el;
2486
2487 *cpos = 0;
2488
2489 if (path->p_tree_depth == 0)
2490 return 0;
2491
2492 blkno = path_leaf_bh(path)->b_blocknr;
2493
2494 /* Start at the tree node just above the leaf and work our way up. */
2495 i = path->p_tree_depth - 1;
2496 while (i >= 0) {
2497 int next_free;
2498
2499 el = path->p_node[i].el;
2500
2501 /*
2502 * Find the extent record just after the one in our
2503 * path.
2504 */
2505 next_free = le16_to_cpu(el->l_next_free_rec);
2506 for(j = 0; j < le16_to_cpu(el->l_next_free_rec); j++) {
2507 if (le64_to_cpu(el->l_recs[j].e_blkno) == blkno) {
2508 if (j == (next_free - 1)) {
2509 if (i == 0) {
2510 /*
2511 * We've determined that the
2512 * path specified is already
2513 * the rightmost one - return a
2514 * cpos of zero.
2515 */
2516 goto out;
2517 }
2518 /*
2519 * The rightmost record points to our
2520 * leaf - we need to travel up the
2521 * tree one level.
2522 */
2523 goto next_node;
2524 }
2525
2526 *cpos = le32_to_cpu(el->l_recs[j + 1].e_cpos);
2527 goto out;
2528 }
2529 }
2530
2531 /*
2532 * If we got here, we never found a valid node where
2533 * the tree indicated one should be.
2534 */
2535 ocfs2_error(sb,
2536 "Invalid extent tree at extent block %llu\n",
2537 (unsigned long long)blkno);
2538 ret = -EROFS;
2539 goto out;
2540
2541next_node:
2542 blkno = path->p_node[i].bh->b_blocknr;
2543 i--;
2544 }
2545
2546out:
2547 return ret;
2548}
2549
2550static int ocfs2_rotate_rightmost_leaf_left(struct inode *inode,
2551 handle_t *handle,
2552 struct buffer_head *bh,
2553 struct ocfs2_extent_list *el)
2554{
2555 int ret;
2556
2557 if (!ocfs2_is_empty_extent(&el->l_recs[0]))
2558 return 0;
2559
2560 ret = ocfs2_journal_access(handle, inode, bh,
2561 OCFS2_JOURNAL_ACCESS_WRITE);
2562 if (ret) {
2563 mlog_errno(ret);
2564 goto out;
2565 }
2566
2567 ocfs2_remove_empty_extent(el);
2568
2569 ret = ocfs2_journal_dirty(handle, bh);
2570 if (ret)
2571 mlog_errno(ret);
2572
2573out:
2574 return ret;
2575}
2576
2577static int __ocfs2_rotate_tree_left(struct inode *inode,
2578 handle_t *handle, int orig_credits,
2579 struct ocfs2_path *path,
2580 struct ocfs2_cached_dealloc_ctxt *dealloc,
Tao Mae7d4cb62008-08-18 17:38:44 +08002581 struct ocfs2_path **empty_extent_path,
2582 struct ocfs2_extent_tree *et)
Mark Fasheh328d5752007-06-18 10:48:04 -07002583{
2584 int ret, subtree_root, deleted;
2585 u32 right_cpos;
2586 struct ocfs2_path *left_path = NULL;
2587 struct ocfs2_path *right_path = NULL;
2588
2589 BUG_ON(!ocfs2_is_empty_extent(&(path_leaf_el(path)->l_recs[0])));
2590
2591 *empty_extent_path = NULL;
2592
2593 ret = ocfs2_find_cpos_for_right_leaf(inode->i_sb, path,
2594 &right_cpos);
2595 if (ret) {
2596 mlog_errno(ret);
2597 goto out;
2598 }
2599
2600 left_path = ocfs2_new_path(path_root_bh(path),
2601 path_root_el(path));
2602 if (!left_path) {
2603 ret = -ENOMEM;
2604 mlog_errno(ret);
2605 goto out;
2606 }
2607
2608 ocfs2_cp_path(left_path, path);
2609
2610 right_path = ocfs2_new_path(path_root_bh(path),
2611 path_root_el(path));
2612 if (!right_path) {
2613 ret = -ENOMEM;
2614 mlog_errno(ret);
2615 goto out;
2616 }
2617
2618 while (right_cpos) {
2619 ret = ocfs2_find_path(inode, right_path, right_cpos);
2620 if (ret) {
2621 mlog_errno(ret);
2622 goto out;
2623 }
2624
2625 subtree_root = ocfs2_find_subtree_root(inode, left_path,
2626 right_path);
2627
2628 mlog(0, "Subtree root at index %d (blk %llu, depth %d)\n",
2629 subtree_root,
2630 (unsigned long long)
2631 right_path->p_node[subtree_root].bh->b_blocknr,
2632 right_path->p_tree_depth);
2633
2634 ret = ocfs2_extend_rotate_transaction(handle, subtree_root,
2635 orig_credits, left_path);
2636 if (ret) {
2637 mlog_errno(ret);
2638 goto out;
2639 }
2640
Mark Fashehe8aed342007-12-03 16:43:01 -08002641 /*
2642 * Caller might still want to make changes to the
2643 * tree root, so re-add it to the journal here.
2644 */
2645 ret = ocfs2_journal_access(handle, inode,
2646 path_root_bh(left_path),
2647 OCFS2_JOURNAL_ACCESS_WRITE);
2648 if (ret) {
2649 mlog_errno(ret);
2650 goto out;
2651 }
2652
Mark Fasheh328d5752007-06-18 10:48:04 -07002653 ret = ocfs2_rotate_subtree_left(inode, handle, left_path,
2654 right_path, subtree_root,
Tao Mae7d4cb62008-08-18 17:38:44 +08002655 dealloc, &deleted, et);
Mark Fasheh328d5752007-06-18 10:48:04 -07002656 if (ret == -EAGAIN) {
2657 /*
2658 * The rotation has to temporarily stop due to
2659 * the right subtree having an empty
2660 * extent. Pass it back to the caller for a
2661 * fixup.
2662 */
2663 *empty_extent_path = right_path;
2664 right_path = NULL;
2665 goto out;
2666 }
2667 if (ret) {
2668 mlog_errno(ret);
2669 goto out;
2670 }
2671
2672 /*
2673 * The subtree rotate might have removed records on
2674 * the rightmost edge. If so, then rotation is
2675 * complete.
2676 */
2677 if (deleted)
2678 break;
2679
2680 ocfs2_mv_path(left_path, right_path);
2681
2682 ret = ocfs2_find_cpos_for_right_leaf(inode->i_sb, left_path,
2683 &right_cpos);
2684 if (ret) {
2685 mlog_errno(ret);
2686 goto out;
2687 }
2688 }
2689
2690out:
2691 ocfs2_free_path(right_path);
2692 ocfs2_free_path(left_path);
2693
2694 return ret;
2695}
2696
2697static int ocfs2_remove_rightmost_path(struct inode *inode, handle_t *handle,
Tao Mae7d4cb62008-08-18 17:38:44 +08002698 struct ocfs2_path *path,
2699 struct ocfs2_cached_dealloc_ctxt *dealloc,
2700 struct ocfs2_extent_tree *et)
Mark Fasheh328d5752007-06-18 10:48:04 -07002701{
2702 int ret, subtree_index;
2703 u32 cpos;
2704 struct ocfs2_path *left_path = NULL;
Mark Fasheh328d5752007-06-18 10:48:04 -07002705 struct ocfs2_extent_block *eb;
2706 struct ocfs2_extent_list *el;
2707
Mark Fasheh328d5752007-06-18 10:48:04 -07002708
Joel Becker35dc0aa2008-08-20 16:25:06 -07002709 ret = ocfs2_et_sanity_check(inode, et);
Tao Mae7d4cb62008-08-18 17:38:44 +08002710 if (ret)
2711 goto out;
Mark Fasheh328d5752007-06-18 10:48:04 -07002712 /*
2713 * There's two ways we handle this depending on
2714 * whether path is the only existing one.
2715 */
2716 ret = ocfs2_extend_rotate_transaction(handle, 0,
2717 handle->h_buffer_credits,
2718 path);
2719 if (ret) {
2720 mlog_errno(ret);
2721 goto out;
2722 }
2723
2724 ret = ocfs2_journal_access_path(inode, handle, path);
2725 if (ret) {
2726 mlog_errno(ret);
2727 goto out;
2728 }
2729
2730 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, path, &cpos);
2731 if (ret) {
2732 mlog_errno(ret);
2733 goto out;
2734 }
2735
2736 if (cpos) {
2737 /*
2738 * We have a path to the left of this one - it needs
2739 * an update too.
2740 */
2741 left_path = ocfs2_new_path(path_root_bh(path),
2742 path_root_el(path));
2743 if (!left_path) {
2744 ret = -ENOMEM;
2745 mlog_errno(ret);
2746 goto out;
2747 }
2748
2749 ret = ocfs2_find_path(inode, left_path, cpos);
2750 if (ret) {
2751 mlog_errno(ret);
2752 goto out;
2753 }
2754
2755 ret = ocfs2_journal_access_path(inode, handle, left_path);
2756 if (ret) {
2757 mlog_errno(ret);
2758 goto out;
2759 }
2760
2761 subtree_index = ocfs2_find_subtree_root(inode, left_path, path);
2762
2763 ocfs2_unlink_subtree(inode, handle, left_path, path,
2764 subtree_index, dealloc);
2765 ocfs2_update_edge_lengths(inode, handle, left_path);
2766
2767 eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data;
Joel Becker35dc0aa2008-08-20 16:25:06 -07002768 ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno));
Mark Fasheh328d5752007-06-18 10:48:04 -07002769 } else {
2770 /*
2771 * 'path' is also the leftmost path which
2772 * means it must be the only one. This gets
2773 * handled differently because we want to
2774 * revert the inode back to having extents
2775 * in-line.
2776 */
2777 ocfs2_unlink_path(inode, handle, dealloc, path, 1);
2778
Joel Beckerce1d9ea2008-08-20 16:30:07 -07002779 el = et->et_root_el;
Mark Fasheh328d5752007-06-18 10:48:04 -07002780 el->l_tree_depth = 0;
2781 el->l_next_free_rec = 0;
2782 memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec));
2783
Joel Becker35dc0aa2008-08-20 16:25:06 -07002784 ocfs2_et_set_last_eb_blk(et, 0);
Mark Fasheh328d5752007-06-18 10:48:04 -07002785 }
2786
2787 ocfs2_journal_dirty(handle, path_root_bh(path));
2788
2789out:
2790 ocfs2_free_path(left_path);
2791 return ret;
2792}
2793
2794/*
2795 * Left rotation of btree records.
2796 *
2797 * In many ways, this is (unsurprisingly) the opposite of right
2798 * rotation. We start at some non-rightmost path containing an empty
2799 * extent in the leaf block. The code works its way to the rightmost
2800 * path by rotating records to the left in every subtree.
2801 *
2802 * This is used by any code which reduces the number of extent records
2803 * in a leaf. After removal, an empty record should be placed in the
2804 * leftmost list position.
2805 *
2806 * This won't handle a length update of the rightmost path records if
2807 * the rightmost tree leaf record is removed so the caller is
2808 * responsible for detecting and correcting that.
2809 */
2810static int ocfs2_rotate_tree_left(struct inode *inode, handle_t *handle,
2811 struct ocfs2_path *path,
Tao Mae7d4cb62008-08-18 17:38:44 +08002812 struct ocfs2_cached_dealloc_ctxt *dealloc,
2813 struct ocfs2_extent_tree *et)
Mark Fasheh328d5752007-06-18 10:48:04 -07002814{
2815 int ret, orig_credits = handle->h_buffer_credits;
2816 struct ocfs2_path *tmp_path = NULL, *restart_path = NULL;
2817 struct ocfs2_extent_block *eb;
2818 struct ocfs2_extent_list *el;
2819
2820 el = path_leaf_el(path);
2821 if (!ocfs2_is_empty_extent(&el->l_recs[0]))
2822 return 0;
2823
2824 if (path->p_tree_depth == 0) {
2825rightmost_no_delete:
2826 /*
Tao Mae7d4cb62008-08-18 17:38:44 +08002827 * Inline extents. This is trivially handled, so do
Mark Fasheh328d5752007-06-18 10:48:04 -07002828 * it up front.
2829 */
2830 ret = ocfs2_rotate_rightmost_leaf_left(inode, handle,
2831 path_leaf_bh(path),
2832 path_leaf_el(path));
2833 if (ret)
2834 mlog_errno(ret);
2835 goto out;
2836 }
2837
2838 /*
2839 * Handle rightmost branch now. There's several cases:
2840 * 1) simple rotation leaving records in there. That's trivial.
2841 * 2) rotation requiring a branch delete - there's no more
2842 * records left. Two cases of this:
2843 * a) There are branches to the left.
2844 * b) This is also the leftmost (the only) branch.
2845 *
2846 * 1) is handled via ocfs2_rotate_rightmost_leaf_left()
2847 * 2a) we need the left branch so that we can update it with the unlink
2848 * 2b) we need to bring the inode back to inline extents.
2849 */
2850
2851 eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data;
2852 el = &eb->h_list;
2853 if (eb->h_next_leaf_blk == 0) {
2854 /*
2855 * This gets a bit tricky if we're going to delete the
2856 * rightmost path. Get the other cases out of the way
2857 * 1st.
2858 */
2859 if (le16_to_cpu(el->l_next_free_rec) > 1)
2860 goto rightmost_no_delete;
2861
2862 if (le16_to_cpu(el->l_next_free_rec) == 0) {
2863 ret = -EIO;
2864 ocfs2_error(inode->i_sb,
2865 "Inode %llu has empty extent block at %llu",
2866 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2867 (unsigned long long)le64_to_cpu(eb->h_blkno));
2868 goto out;
2869 }
2870
2871 /*
2872 * XXX: The caller can not trust "path" any more after
2873 * this as it will have been deleted. What do we do?
2874 *
2875 * In theory the rotate-for-merge code will never get
2876 * here because it'll always ask for a rotate in a
2877 * nonempty list.
2878 */
2879
2880 ret = ocfs2_remove_rightmost_path(inode, handle, path,
Tao Mae7d4cb62008-08-18 17:38:44 +08002881 dealloc, et);
Mark Fasheh328d5752007-06-18 10:48:04 -07002882 if (ret)
2883 mlog_errno(ret);
2884 goto out;
2885 }
2886
2887 /*
2888 * Now we can loop, remembering the path we get from -EAGAIN
2889 * and restarting from there.
2890 */
2891try_rotate:
2892 ret = __ocfs2_rotate_tree_left(inode, handle, orig_credits, path,
Tao Mae7d4cb62008-08-18 17:38:44 +08002893 dealloc, &restart_path, et);
Mark Fasheh328d5752007-06-18 10:48:04 -07002894 if (ret && ret != -EAGAIN) {
2895 mlog_errno(ret);
2896 goto out;
2897 }
2898
2899 while (ret == -EAGAIN) {
2900 tmp_path = restart_path;
2901 restart_path = NULL;
2902
2903 ret = __ocfs2_rotate_tree_left(inode, handle, orig_credits,
2904 tmp_path, dealloc,
Tao Mae7d4cb62008-08-18 17:38:44 +08002905 &restart_path, et);
Mark Fasheh328d5752007-06-18 10:48:04 -07002906 if (ret && ret != -EAGAIN) {
2907 mlog_errno(ret);
2908 goto out;
2909 }
2910
2911 ocfs2_free_path(tmp_path);
2912 tmp_path = NULL;
2913
2914 if (ret == 0)
2915 goto try_rotate;
2916 }
2917
2918out:
2919 ocfs2_free_path(tmp_path);
2920 ocfs2_free_path(restart_path);
2921 return ret;
2922}
2923
2924static void ocfs2_cleanup_merge(struct ocfs2_extent_list *el,
2925 int index)
2926{
2927 struct ocfs2_extent_rec *rec = &el->l_recs[index];
2928 unsigned int size;
2929
2930 if (rec->e_leaf_clusters == 0) {
2931 /*
2932 * We consumed all of the merged-from record. An empty
2933 * extent cannot exist anywhere but the 1st array
2934 * position, so move things over if the merged-from
2935 * record doesn't occupy that position.
2936 *
2937 * This creates a new empty extent so the caller
2938 * should be smart enough to have removed any existing
2939 * ones.
2940 */
2941 if (index > 0) {
2942 BUG_ON(ocfs2_is_empty_extent(&el->l_recs[0]));
2943 size = index * sizeof(struct ocfs2_extent_rec);
2944 memmove(&el->l_recs[1], &el->l_recs[0], size);
2945 }
2946
2947 /*
2948 * Always memset - the caller doesn't check whether it
2949 * created an empty extent, so there could be junk in
2950 * the other fields.
2951 */
2952 memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec));
2953 }
2954}
2955
Tao Ma677b9752008-01-30 14:21:05 +08002956static int ocfs2_get_right_path(struct inode *inode,
2957 struct ocfs2_path *left_path,
2958 struct ocfs2_path **ret_right_path)
Mark Fasheh328d5752007-06-18 10:48:04 -07002959{
2960 int ret;
Tao Ma677b9752008-01-30 14:21:05 +08002961 u32 right_cpos;
2962 struct ocfs2_path *right_path = NULL;
2963 struct ocfs2_extent_list *left_el;
2964
2965 *ret_right_path = NULL;
2966
2967 /* This function shouldn't be called for non-trees. */
2968 BUG_ON(left_path->p_tree_depth == 0);
2969
2970 left_el = path_leaf_el(left_path);
2971 BUG_ON(left_el->l_next_free_rec != left_el->l_count);
2972
2973 ret = ocfs2_find_cpos_for_right_leaf(inode->i_sb, left_path,
2974 &right_cpos);
2975 if (ret) {
2976 mlog_errno(ret);
2977 goto out;
2978 }
2979
2980 /* This function shouldn't be called for the rightmost leaf. */
2981 BUG_ON(right_cpos == 0);
2982
2983 right_path = ocfs2_new_path(path_root_bh(left_path),
2984 path_root_el(left_path));
2985 if (!right_path) {
2986 ret = -ENOMEM;
2987 mlog_errno(ret);
2988 goto out;
2989 }
2990
2991 ret = ocfs2_find_path(inode, right_path, right_cpos);
2992 if (ret) {
2993 mlog_errno(ret);
2994 goto out;
2995 }
2996
2997 *ret_right_path = right_path;
2998out:
2999 if (ret)
3000 ocfs2_free_path(right_path);
3001 return ret;
3002}
3003
3004/*
3005 * Remove split_rec clusters from the record at index and merge them
3006 * onto the beginning of the record "next" to it.
3007 * For index < l_count - 1, the next means the extent rec at index + 1.
3008 * For index == l_count - 1, the "next" means the 1st extent rec of the
3009 * next extent block.
3010 */
3011static int ocfs2_merge_rec_right(struct inode *inode,
3012 struct ocfs2_path *left_path,
3013 handle_t *handle,
3014 struct ocfs2_extent_rec *split_rec,
3015 int index)
3016{
3017 int ret, next_free, i;
Mark Fasheh328d5752007-06-18 10:48:04 -07003018 unsigned int split_clusters = le16_to_cpu(split_rec->e_leaf_clusters);
3019 struct ocfs2_extent_rec *left_rec;
3020 struct ocfs2_extent_rec *right_rec;
Tao Ma677b9752008-01-30 14:21:05 +08003021 struct ocfs2_extent_list *right_el;
3022 struct ocfs2_path *right_path = NULL;
3023 int subtree_index = 0;
3024 struct ocfs2_extent_list *el = path_leaf_el(left_path);
3025 struct buffer_head *bh = path_leaf_bh(left_path);
3026 struct buffer_head *root_bh = NULL;
Mark Fasheh328d5752007-06-18 10:48:04 -07003027
3028 BUG_ON(index >= le16_to_cpu(el->l_next_free_rec));
Mark Fasheh328d5752007-06-18 10:48:04 -07003029 left_rec = &el->l_recs[index];
Tao Ma677b9752008-01-30 14:21:05 +08003030
Al Viro9d8df6a2008-05-21 06:32:11 +01003031 if (index == le16_to_cpu(el->l_next_free_rec) - 1 &&
Tao Ma677b9752008-01-30 14:21:05 +08003032 le16_to_cpu(el->l_next_free_rec) == le16_to_cpu(el->l_count)) {
3033 /* we meet with a cross extent block merge. */
3034 ret = ocfs2_get_right_path(inode, left_path, &right_path);
3035 if (ret) {
3036 mlog_errno(ret);
3037 goto out;
3038 }
3039
3040 right_el = path_leaf_el(right_path);
3041 next_free = le16_to_cpu(right_el->l_next_free_rec);
3042 BUG_ON(next_free <= 0);
3043 right_rec = &right_el->l_recs[0];
3044 if (ocfs2_is_empty_extent(right_rec)) {
Al Viro9d8df6a2008-05-21 06:32:11 +01003045 BUG_ON(next_free <= 1);
Tao Ma677b9752008-01-30 14:21:05 +08003046 right_rec = &right_el->l_recs[1];
3047 }
3048
3049 BUG_ON(le32_to_cpu(left_rec->e_cpos) +
3050 le16_to_cpu(left_rec->e_leaf_clusters) !=
3051 le32_to_cpu(right_rec->e_cpos));
3052
3053 subtree_index = ocfs2_find_subtree_root(inode,
3054 left_path, right_path);
3055
3056 ret = ocfs2_extend_rotate_transaction(handle, subtree_index,
3057 handle->h_buffer_credits,
3058 right_path);
3059 if (ret) {
3060 mlog_errno(ret);
3061 goto out;
3062 }
3063
3064 root_bh = left_path->p_node[subtree_index].bh;
3065 BUG_ON(root_bh != right_path->p_node[subtree_index].bh);
3066
3067 ret = ocfs2_journal_access(handle, inode, root_bh,
3068 OCFS2_JOURNAL_ACCESS_WRITE);
3069 if (ret) {
3070 mlog_errno(ret);
3071 goto out;
3072 }
3073
3074 for (i = subtree_index + 1;
3075 i < path_num_items(right_path); i++) {
3076 ret = ocfs2_journal_access(handle, inode,
3077 right_path->p_node[i].bh,
3078 OCFS2_JOURNAL_ACCESS_WRITE);
3079 if (ret) {
3080 mlog_errno(ret);
3081 goto out;
3082 }
3083
3084 ret = ocfs2_journal_access(handle, inode,
3085 left_path->p_node[i].bh,
3086 OCFS2_JOURNAL_ACCESS_WRITE);
3087 if (ret) {
3088 mlog_errno(ret);
3089 goto out;
3090 }
3091 }
3092
3093 } else {
3094 BUG_ON(index == le16_to_cpu(el->l_next_free_rec) - 1);
3095 right_rec = &el->l_recs[index + 1];
3096 }
Mark Fasheh328d5752007-06-18 10:48:04 -07003097
3098 ret = ocfs2_journal_access(handle, inode, bh,
3099 OCFS2_JOURNAL_ACCESS_WRITE);
3100 if (ret) {
3101 mlog_errno(ret);
3102 goto out;
3103 }
3104
3105 le16_add_cpu(&left_rec->e_leaf_clusters, -split_clusters);
3106
3107 le32_add_cpu(&right_rec->e_cpos, -split_clusters);
3108 le64_add_cpu(&right_rec->e_blkno,
3109 -ocfs2_clusters_to_blocks(inode->i_sb, split_clusters));
3110 le16_add_cpu(&right_rec->e_leaf_clusters, split_clusters);
3111
3112 ocfs2_cleanup_merge(el, index);
3113
3114 ret = ocfs2_journal_dirty(handle, bh);
3115 if (ret)
3116 mlog_errno(ret);
3117
Tao Ma677b9752008-01-30 14:21:05 +08003118 if (right_path) {
3119 ret = ocfs2_journal_dirty(handle, path_leaf_bh(right_path));
3120 if (ret)
3121 mlog_errno(ret);
3122
3123 ocfs2_complete_edge_insert(inode, handle, left_path,
3124 right_path, subtree_index);
3125 }
Mark Fasheh328d5752007-06-18 10:48:04 -07003126out:
Tao Ma677b9752008-01-30 14:21:05 +08003127 if (right_path)
3128 ocfs2_free_path(right_path);
3129 return ret;
3130}
3131
3132static int ocfs2_get_left_path(struct inode *inode,
3133 struct ocfs2_path *right_path,
3134 struct ocfs2_path **ret_left_path)
3135{
3136 int ret;
3137 u32 left_cpos;
3138 struct ocfs2_path *left_path = NULL;
3139
3140 *ret_left_path = NULL;
3141
3142 /* This function shouldn't be called for non-trees. */
3143 BUG_ON(right_path->p_tree_depth == 0);
3144
3145 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
3146 right_path, &left_cpos);
3147 if (ret) {
3148 mlog_errno(ret);
3149 goto out;
3150 }
3151
3152 /* This function shouldn't be called for the leftmost leaf. */
3153 BUG_ON(left_cpos == 0);
3154
3155 left_path = ocfs2_new_path(path_root_bh(right_path),
3156 path_root_el(right_path));
3157 if (!left_path) {
3158 ret = -ENOMEM;
3159 mlog_errno(ret);
3160 goto out;
3161 }
3162
3163 ret = ocfs2_find_path(inode, left_path, left_cpos);
3164 if (ret) {
3165 mlog_errno(ret);
3166 goto out;
3167 }
3168
3169 *ret_left_path = left_path;
3170out:
3171 if (ret)
3172 ocfs2_free_path(left_path);
Mark Fasheh328d5752007-06-18 10:48:04 -07003173 return ret;
3174}
3175
3176/*
3177 * Remove split_rec clusters from the record at index and merge them
Tao Ma677b9752008-01-30 14:21:05 +08003178 * onto the tail of the record "before" it.
3179 * For index > 0, the "before" means the extent rec at index - 1.
3180 *
3181 * For index == 0, the "before" means the last record of the previous
3182 * extent block. And there is also a situation that we may need to
3183 * remove the rightmost leaf extent block in the right_path and change
3184 * the right path to indicate the new rightmost path.
Mark Fasheh328d5752007-06-18 10:48:04 -07003185 */
Tao Ma677b9752008-01-30 14:21:05 +08003186static int ocfs2_merge_rec_left(struct inode *inode,
3187 struct ocfs2_path *right_path,
Mark Fasheh328d5752007-06-18 10:48:04 -07003188 handle_t *handle,
3189 struct ocfs2_extent_rec *split_rec,
Tao Ma677b9752008-01-30 14:21:05 +08003190 struct ocfs2_cached_dealloc_ctxt *dealloc,
Tao Mae7d4cb62008-08-18 17:38:44 +08003191 struct ocfs2_extent_tree *et,
Tao Ma677b9752008-01-30 14:21:05 +08003192 int index)
Mark Fasheh328d5752007-06-18 10:48:04 -07003193{
Tao Ma677b9752008-01-30 14:21:05 +08003194 int ret, i, subtree_index = 0, has_empty_extent = 0;
Mark Fasheh328d5752007-06-18 10:48:04 -07003195 unsigned int split_clusters = le16_to_cpu(split_rec->e_leaf_clusters);
3196 struct ocfs2_extent_rec *left_rec;
3197 struct ocfs2_extent_rec *right_rec;
Tao Ma677b9752008-01-30 14:21:05 +08003198 struct ocfs2_extent_list *el = path_leaf_el(right_path);
3199 struct buffer_head *bh = path_leaf_bh(right_path);
3200 struct buffer_head *root_bh = NULL;
3201 struct ocfs2_path *left_path = NULL;
3202 struct ocfs2_extent_list *left_el;
Mark Fasheh328d5752007-06-18 10:48:04 -07003203
Tao Ma677b9752008-01-30 14:21:05 +08003204 BUG_ON(index < 0);
Mark Fasheh328d5752007-06-18 10:48:04 -07003205
Mark Fasheh328d5752007-06-18 10:48:04 -07003206 right_rec = &el->l_recs[index];
Tao Ma677b9752008-01-30 14:21:05 +08003207 if (index == 0) {
3208 /* we meet with a cross extent block merge. */
3209 ret = ocfs2_get_left_path(inode, right_path, &left_path);
3210 if (ret) {
3211 mlog_errno(ret);
3212 goto out;
3213 }
3214
3215 left_el = path_leaf_el(left_path);
3216 BUG_ON(le16_to_cpu(left_el->l_next_free_rec) !=
3217 le16_to_cpu(left_el->l_count));
3218
3219 left_rec = &left_el->l_recs[
3220 le16_to_cpu(left_el->l_next_free_rec) - 1];
3221 BUG_ON(le32_to_cpu(left_rec->e_cpos) +
3222 le16_to_cpu(left_rec->e_leaf_clusters) !=
3223 le32_to_cpu(split_rec->e_cpos));
3224
3225 subtree_index = ocfs2_find_subtree_root(inode,
3226 left_path, right_path);
3227
3228 ret = ocfs2_extend_rotate_transaction(handle, subtree_index,
3229 handle->h_buffer_credits,
3230 left_path);
3231 if (ret) {
3232 mlog_errno(ret);
3233 goto out;
3234 }
3235
3236 root_bh = left_path->p_node[subtree_index].bh;
3237 BUG_ON(root_bh != right_path->p_node[subtree_index].bh);
3238
3239 ret = ocfs2_journal_access(handle, inode, root_bh,
3240 OCFS2_JOURNAL_ACCESS_WRITE);
3241 if (ret) {
3242 mlog_errno(ret);
3243 goto out;
3244 }
3245
3246 for (i = subtree_index + 1;
3247 i < path_num_items(right_path); i++) {
3248 ret = ocfs2_journal_access(handle, inode,
3249 right_path->p_node[i].bh,
3250 OCFS2_JOURNAL_ACCESS_WRITE);
3251 if (ret) {
3252 mlog_errno(ret);
3253 goto out;
3254 }
3255
3256 ret = ocfs2_journal_access(handle, inode,
3257 left_path->p_node[i].bh,
3258 OCFS2_JOURNAL_ACCESS_WRITE);
3259 if (ret) {
3260 mlog_errno(ret);
3261 goto out;
3262 }
3263 }
3264 } else {
3265 left_rec = &el->l_recs[index - 1];
3266 if (ocfs2_is_empty_extent(&el->l_recs[0]))
3267 has_empty_extent = 1;
3268 }
Mark Fasheh328d5752007-06-18 10:48:04 -07003269
3270 ret = ocfs2_journal_access(handle, inode, bh,
3271 OCFS2_JOURNAL_ACCESS_WRITE);
3272 if (ret) {
3273 mlog_errno(ret);
3274 goto out;
3275 }
3276
3277 if (has_empty_extent && index == 1) {
3278 /*
3279 * The easy case - we can just plop the record right in.
3280 */
3281 *left_rec = *split_rec;
3282
3283 has_empty_extent = 0;
Tao Ma677b9752008-01-30 14:21:05 +08003284 } else
Mark Fasheh328d5752007-06-18 10:48:04 -07003285 le16_add_cpu(&left_rec->e_leaf_clusters, split_clusters);
Mark Fasheh328d5752007-06-18 10:48:04 -07003286
3287 le32_add_cpu(&right_rec->e_cpos, split_clusters);
3288 le64_add_cpu(&right_rec->e_blkno,
3289 ocfs2_clusters_to_blocks(inode->i_sb, split_clusters));
3290 le16_add_cpu(&right_rec->e_leaf_clusters, -split_clusters);
3291
3292 ocfs2_cleanup_merge(el, index);
3293
3294 ret = ocfs2_journal_dirty(handle, bh);
3295 if (ret)
3296 mlog_errno(ret);
3297
Tao Ma677b9752008-01-30 14:21:05 +08003298 if (left_path) {
3299 ret = ocfs2_journal_dirty(handle, path_leaf_bh(left_path));
3300 if (ret)
3301 mlog_errno(ret);
3302
3303 /*
3304 * In the situation that the right_rec is empty and the extent
3305 * block is empty also, ocfs2_complete_edge_insert can't handle
3306 * it and we need to delete the right extent block.
3307 */
3308 if (le16_to_cpu(right_rec->e_leaf_clusters) == 0 &&
3309 le16_to_cpu(el->l_next_free_rec) == 1) {
3310
3311 ret = ocfs2_remove_rightmost_path(inode, handle,
Tao Mae7d4cb62008-08-18 17:38:44 +08003312 right_path,
3313 dealloc, et);
Tao Ma677b9752008-01-30 14:21:05 +08003314 if (ret) {
3315 mlog_errno(ret);
3316 goto out;
3317 }
3318
3319 /* Now the rightmost extent block has been deleted.
3320 * So we use the new rightmost path.
3321 */
3322 ocfs2_mv_path(right_path, left_path);
3323 left_path = NULL;
3324 } else
3325 ocfs2_complete_edge_insert(inode, handle, left_path,
3326 right_path, subtree_index);
3327 }
Mark Fasheh328d5752007-06-18 10:48:04 -07003328out:
Tao Ma677b9752008-01-30 14:21:05 +08003329 if (left_path)
3330 ocfs2_free_path(left_path);
Mark Fasheh328d5752007-06-18 10:48:04 -07003331 return ret;
3332}
3333
3334static int ocfs2_try_to_merge_extent(struct inode *inode,
3335 handle_t *handle,
Tao Ma677b9752008-01-30 14:21:05 +08003336 struct ocfs2_path *path,
Mark Fasheh328d5752007-06-18 10:48:04 -07003337 int split_index,
3338 struct ocfs2_extent_rec *split_rec,
3339 struct ocfs2_cached_dealloc_ctxt *dealloc,
Tao Mae7d4cb62008-08-18 17:38:44 +08003340 struct ocfs2_merge_ctxt *ctxt,
3341 struct ocfs2_extent_tree *et)
Mark Fasheh328d5752007-06-18 10:48:04 -07003342
3343{
Tao Mao518d7262007-08-28 17:25:35 -07003344 int ret = 0;
Tao Ma677b9752008-01-30 14:21:05 +08003345 struct ocfs2_extent_list *el = path_leaf_el(path);
Mark Fasheh328d5752007-06-18 10:48:04 -07003346 struct ocfs2_extent_rec *rec = &el->l_recs[split_index];
3347
3348 BUG_ON(ctxt->c_contig_type == CONTIG_NONE);
3349
Tao Mao518d7262007-08-28 17:25:35 -07003350 if (ctxt->c_split_covers_rec && ctxt->c_has_empty_extent) {
3351 /*
3352 * The merge code will need to create an empty
3353 * extent to take the place of the newly
3354 * emptied slot. Remove any pre-existing empty
3355 * extents - having more than one in a leaf is
3356 * illegal.
3357 */
Tao Ma677b9752008-01-30 14:21:05 +08003358 ret = ocfs2_rotate_tree_left(inode, handle, path,
Tao Mae7d4cb62008-08-18 17:38:44 +08003359 dealloc, et);
Tao Mao518d7262007-08-28 17:25:35 -07003360 if (ret) {
3361 mlog_errno(ret);
3362 goto out;
Mark Fasheh328d5752007-06-18 10:48:04 -07003363 }
Tao Mao518d7262007-08-28 17:25:35 -07003364 split_index--;
3365 rec = &el->l_recs[split_index];
Mark Fasheh328d5752007-06-18 10:48:04 -07003366 }
3367
3368 if (ctxt->c_contig_type == CONTIG_LEFTRIGHT) {
3369 /*
3370 * Left-right contig implies this.
3371 */
3372 BUG_ON(!ctxt->c_split_covers_rec);
Mark Fasheh328d5752007-06-18 10:48:04 -07003373
3374 /*
3375 * Since the leftright insert always covers the entire
3376 * extent, this call will delete the insert record
3377 * entirely, resulting in an empty extent record added to
3378 * the extent block.
3379 *
3380 * Since the adding of an empty extent shifts
3381 * everything back to the right, there's no need to
3382 * update split_index here.
Tao Ma677b9752008-01-30 14:21:05 +08003383 *
3384 * When the split_index is zero, we need to merge it to the
3385 * prevoius extent block. It is more efficient and easier
3386 * if we do merge_right first and merge_left later.
Mark Fasheh328d5752007-06-18 10:48:04 -07003387 */
Tao Ma677b9752008-01-30 14:21:05 +08003388 ret = ocfs2_merge_rec_right(inode, path,
3389 handle, split_rec,
3390 split_index);
Mark Fasheh328d5752007-06-18 10:48:04 -07003391 if (ret) {
3392 mlog_errno(ret);
3393 goto out;
3394 }
3395
3396 /*
3397 * We can only get this from logic error above.
3398 */
3399 BUG_ON(!ocfs2_is_empty_extent(&el->l_recs[0]));
3400
Tao Ma677b9752008-01-30 14:21:05 +08003401 /* The merge left us with an empty extent, remove it. */
Tao Mae7d4cb62008-08-18 17:38:44 +08003402 ret = ocfs2_rotate_tree_left(inode, handle, path,
3403 dealloc, et);
Mark Fasheh328d5752007-06-18 10:48:04 -07003404 if (ret) {
3405 mlog_errno(ret);
3406 goto out;
3407 }
Tao Ma677b9752008-01-30 14:21:05 +08003408
Mark Fasheh328d5752007-06-18 10:48:04 -07003409 rec = &el->l_recs[split_index];
3410
3411 /*
3412 * Note that we don't pass split_rec here on purpose -
Tao Ma677b9752008-01-30 14:21:05 +08003413 * we've merged it into the rec already.
Mark Fasheh328d5752007-06-18 10:48:04 -07003414 */
Tao Ma677b9752008-01-30 14:21:05 +08003415 ret = ocfs2_merge_rec_left(inode, path,
3416 handle, rec,
Tao Mae7d4cb62008-08-18 17:38:44 +08003417 dealloc, et,
Tao Ma677b9752008-01-30 14:21:05 +08003418 split_index);
3419
Mark Fasheh328d5752007-06-18 10:48:04 -07003420 if (ret) {
3421 mlog_errno(ret);
3422 goto out;
3423 }
3424
Tao Ma677b9752008-01-30 14:21:05 +08003425 ret = ocfs2_rotate_tree_left(inode, handle, path,
Tao Mae7d4cb62008-08-18 17:38:44 +08003426 dealloc, et);
Mark Fasheh328d5752007-06-18 10:48:04 -07003427 /*
3428 * Error from this last rotate is not critical, so
3429 * print but don't bubble it up.
3430 */
3431 if (ret)
3432 mlog_errno(ret);
3433 ret = 0;
3434 } else {
3435 /*
3436 * Merge a record to the left or right.
3437 *
3438 * 'contig_type' is relative to the existing record,
3439 * so for example, if we're "right contig", it's to
3440 * the record on the left (hence the left merge).
3441 */
3442 if (ctxt->c_contig_type == CONTIG_RIGHT) {
3443 ret = ocfs2_merge_rec_left(inode,
Tao Ma677b9752008-01-30 14:21:05 +08003444 path,
3445 handle, split_rec,
Tao Mae7d4cb62008-08-18 17:38:44 +08003446 dealloc, et,
Mark Fasheh328d5752007-06-18 10:48:04 -07003447 split_index);
3448 if (ret) {
3449 mlog_errno(ret);
3450 goto out;
3451 }
3452 } else {
3453 ret = ocfs2_merge_rec_right(inode,
Tao Ma677b9752008-01-30 14:21:05 +08003454 path,
3455 handle, split_rec,
Mark Fasheh328d5752007-06-18 10:48:04 -07003456 split_index);
3457 if (ret) {
3458 mlog_errno(ret);
3459 goto out;
3460 }
3461 }
3462
3463 if (ctxt->c_split_covers_rec) {
3464 /*
3465 * The merge may have left an empty extent in
3466 * our leaf. Try to rotate it away.
3467 */
Tao Ma677b9752008-01-30 14:21:05 +08003468 ret = ocfs2_rotate_tree_left(inode, handle, path,
Tao Mae7d4cb62008-08-18 17:38:44 +08003469 dealloc, et);
Mark Fasheh328d5752007-06-18 10:48:04 -07003470 if (ret)
3471 mlog_errno(ret);
3472 ret = 0;
3473 }
3474 }
3475
3476out:
3477 return ret;
3478}
3479
3480static void ocfs2_subtract_from_rec(struct super_block *sb,
3481 enum ocfs2_split_type split,
3482 struct ocfs2_extent_rec *rec,
3483 struct ocfs2_extent_rec *split_rec)
3484{
3485 u64 len_blocks;
3486
3487 len_blocks = ocfs2_clusters_to_blocks(sb,
3488 le16_to_cpu(split_rec->e_leaf_clusters));
3489
3490 if (split == SPLIT_LEFT) {
3491 /*
3492 * Region is on the left edge of the existing
3493 * record.
3494 */
3495 le32_add_cpu(&rec->e_cpos,
3496 le16_to_cpu(split_rec->e_leaf_clusters));
3497 le64_add_cpu(&rec->e_blkno, len_blocks);
3498 le16_add_cpu(&rec->e_leaf_clusters,
3499 -le16_to_cpu(split_rec->e_leaf_clusters));
3500 } else {
3501 /*
3502 * Region is on the right edge of the existing
3503 * record.
3504 */
3505 le16_add_cpu(&rec->e_leaf_clusters,
3506 -le16_to_cpu(split_rec->e_leaf_clusters));
3507 }
3508}
3509
Mark Fashehdcd05382007-01-16 11:32:23 -08003510/*
3511 * Do the final bits of extent record insertion at the target leaf
3512 * list. If this leaf is part of an allocation tree, it is assumed
3513 * that the tree above has been prepared.
3514 */
3515static void ocfs2_insert_at_leaf(struct ocfs2_extent_rec *insert_rec,
3516 struct ocfs2_extent_list *el,
3517 struct ocfs2_insert_type *insert,
3518 struct inode *inode)
3519{
3520 int i = insert->ins_contig_index;
3521 unsigned int range;
3522 struct ocfs2_extent_rec *rec;
3523
Mark Fashehe48edee2007-03-07 16:46:57 -08003524 BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
Mark Fashehdcd05382007-01-16 11:32:23 -08003525
Mark Fasheh328d5752007-06-18 10:48:04 -07003526 if (insert->ins_split != SPLIT_NONE) {
3527 i = ocfs2_search_extent_list(el, le32_to_cpu(insert_rec->e_cpos));
3528 BUG_ON(i == -1);
3529 rec = &el->l_recs[i];
3530 ocfs2_subtract_from_rec(inode->i_sb, insert->ins_split, rec,
3531 insert_rec);
3532 goto rotate;
3533 }
3534
Mark Fashehdcd05382007-01-16 11:32:23 -08003535 /*
3536 * Contiguous insert - either left or right.
3537 */
3538 if (insert->ins_contig != CONTIG_NONE) {
3539 rec = &el->l_recs[i];
3540 if (insert->ins_contig == CONTIG_LEFT) {
3541 rec->e_blkno = insert_rec->e_blkno;
3542 rec->e_cpos = insert_rec->e_cpos;
3543 }
Mark Fashehe48edee2007-03-07 16:46:57 -08003544 le16_add_cpu(&rec->e_leaf_clusters,
3545 le16_to_cpu(insert_rec->e_leaf_clusters));
Mark Fashehdcd05382007-01-16 11:32:23 -08003546 return;
3547 }
3548
3549 /*
3550 * Handle insert into an empty leaf.
3551 */
3552 if (le16_to_cpu(el->l_next_free_rec) == 0 ||
3553 ((le16_to_cpu(el->l_next_free_rec) == 1) &&
3554 ocfs2_is_empty_extent(&el->l_recs[0]))) {
3555 el->l_recs[0] = *insert_rec;
3556 el->l_next_free_rec = cpu_to_le16(1);
3557 return;
3558 }
3559
3560 /*
3561 * Appending insert.
3562 */
3563 if (insert->ins_appending == APPEND_TAIL) {
3564 i = le16_to_cpu(el->l_next_free_rec) - 1;
3565 rec = &el->l_recs[i];
Mark Fashehe48edee2007-03-07 16:46:57 -08003566 range = le32_to_cpu(rec->e_cpos)
3567 + le16_to_cpu(rec->e_leaf_clusters);
Mark Fashehdcd05382007-01-16 11:32:23 -08003568 BUG_ON(le32_to_cpu(insert_rec->e_cpos) < range);
3569
3570 mlog_bug_on_msg(le16_to_cpu(el->l_next_free_rec) >=
3571 le16_to_cpu(el->l_count),
3572 "inode %lu, depth %u, count %u, next free %u, "
3573 "rec.cpos %u, rec.clusters %u, "
3574 "insert.cpos %u, insert.clusters %u\n",
3575 inode->i_ino,
3576 le16_to_cpu(el->l_tree_depth),
3577 le16_to_cpu(el->l_count),
3578 le16_to_cpu(el->l_next_free_rec),
3579 le32_to_cpu(el->l_recs[i].e_cpos),
Mark Fashehe48edee2007-03-07 16:46:57 -08003580 le16_to_cpu(el->l_recs[i].e_leaf_clusters),
Mark Fashehdcd05382007-01-16 11:32:23 -08003581 le32_to_cpu(insert_rec->e_cpos),
Mark Fashehe48edee2007-03-07 16:46:57 -08003582 le16_to_cpu(insert_rec->e_leaf_clusters));
Mark Fashehdcd05382007-01-16 11:32:23 -08003583 i++;
3584 el->l_recs[i] = *insert_rec;
3585 le16_add_cpu(&el->l_next_free_rec, 1);
3586 return;
3587 }
3588
Mark Fasheh328d5752007-06-18 10:48:04 -07003589rotate:
Mark Fashehdcd05382007-01-16 11:32:23 -08003590 /*
3591 * Ok, we have to rotate.
3592 *
3593 * At this point, it is safe to assume that inserting into an
3594 * empty leaf and appending to a leaf have both been handled
3595 * above.
3596 *
3597 * This leaf needs to have space, either by the empty 1st
3598 * extent record, or by virtue of an l_next_rec < l_count.
3599 */
3600 ocfs2_rotate_leaf(el, insert_rec);
3601}
3602
Mark Fasheh328d5752007-06-18 10:48:04 -07003603static void ocfs2_adjust_rightmost_records(struct inode *inode,
3604 handle_t *handle,
3605 struct ocfs2_path *path,
3606 struct ocfs2_extent_rec *insert_rec)
3607{
3608 int ret, i, next_free;
3609 struct buffer_head *bh;
3610 struct ocfs2_extent_list *el;
3611 struct ocfs2_extent_rec *rec;
3612
3613 /*
3614 * Update everything except the leaf block.
3615 */
3616 for (i = 0; i < path->p_tree_depth; i++) {
3617 bh = path->p_node[i].bh;
3618 el = path->p_node[i].el;
3619
3620 next_free = le16_to_cpu(el->l_next_free_rec);
3621 if (next_free == 0) {
3622 ocfs2_error(inode->i_sb,
3623 "Dinode %llu has a bad extent list",
3624 (unsigned long long)OCFS2_I(inode)->ip_blkno);
3625 ret = -EIO;
3626 return;
3627 }
3628
3629 rec = &el->l_recs[next_free - 1];
3630
3631 rec->e_int_clusters = insert_rec->e_cpos;
3632 le32_add_cpu(&rec->e_int_clusters,
3633 le16_to_cpu(insert_rec->e_leaf_clusters));
3634 le32_add_cpu(&rec->e_int_clusters,
3635 -le32_to_cpu(rec->e_cpos));
3636
3637 ret = ocfs2_journal_dirty(handle, bh);
3638 if (ret)
3639 mlog_errno(ret);
3640
3641 }
3642}
3643
Mark Fashehdcd05382007-01-16 11:32:23 -08003644static int ocfs2_append_rec_to_path(struct inode *inode, handle_t *handle,
3645 struct ocfs2_extent_rec *insert_rec,
3646 struct ocfs2_path *right_path,
3647 struct ocfs2_path **ret_left_path)
3648{
Mark Fasheh328d5752007-06-18 10:48:04 -07003649 int ret, next_free;
Mark Fashehdcd05382007-01-16 11:32:23 -08003650 struct ocfs2_extent_list *el;
3651 struct ocfs2_path *left_path = NULL;
3652
3653 *ret_left_path = NULL;
3654
3655 /*
Mark Fashehe48edee2007-03-07 16:46:57 -08003656 * This shouldn't happen for non-trees. The extent rec cluster
3657 * count manipulation below only works for interior nodes.
3658 */
3659 BUG_ON(right_path->p_tree_depth == 0);
3660
3661 /*
Mark Fashehdcd05382007-01-16 11:32:23 -08003662 * If our appending insert is at the leftmost edge of a leaf,
3663 * then we might need to update the rightmost records of the
3664 * neighboring path.
3665 */
3666 el = path_leaf_el(right_path);
3667 next_free = le16_to_cpu(el->l_next_free_rec);
3668 if (next_free == 0 ||
3669 (next_free == 1 && ocfs2_is_empty_extent(&el->l_recs[0]))) {
3670 u32 left_cpos;
3671
3672 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, right_path,
3673 &left_cpos);
3674 if (ret) {
3675 mlog_errno(ret);
3676 goto out;
3677 }
3678
3679 mlog(0, "Append may need a left path update. cpos: %u, "
3680 "left_cpos: %u\n", le32_to_cpu(insert_rec->e_cpos),
3681 left_cpos);
3682
3683 /*
3684 * No need to worry if the append is already in the
3685 * leftmost leaf.
3686 */
3687 if (left_cpos) {
3688 left_path = ocfs2_new_path(path_root_bh(right_path),
3689 path_root_el(right_path));
3690 if (!left_path) {
3691 ret = -ENOMEM;
3692 mlog_errno(ret);
3693 goto out;
3694 }
3695
3696 ret = ocfs2_find_path(inode, left_path, left_cpos);
3697 if (ret) {
3698 mlog_errno(ret);
3699 goto out;
3700 }
3701
3702 /*
3703 * ocfs2_insert_path() will pass the left_path to the
3704 * journal for us.
3705 */
3706 }
3707 }
3708
3709 ret = ocfs2_journal_access_path(inode, handle, right_path);
3710 if (ret) {
3711 mlog_errno(ret);
3712 goto out;
3713 }
3714
Mark Fasheh328d5752007-06-18 10:48:04 -07003715 ocfs2_adjust_rightmost_records(inode, handle, right_path, insert_rec);
Mark Fashehdcd05382007-01-16 11:32:23 -08003716
3717 *ret_left_path = left_path;
3718 ret = 0;
3719out:
3720 if (ret != 0)
3721 ocfs2_free_path(left_path);
3722
3723 return ret;
3724}
3725
Mark Fasheh328d5752007-06-18 10:48:04 -07003726static void ocfs2_split_record(struct inode *inode,
3727 struct ocfs2_path *left_path,
3728 struct ocfs2_path *right_path,
3729 struct ocfs2_extent_rec *split_rec,
3730 enum ocfs2_split_type split)
3731{
3732 int index;
3733 u32 cpos = le32_to_cpu(split_rec->e_cpos);
3734 struct ocfs2_extent_list *left_el = NULL, *right_el, *insert_el, *el;
3735 struct ocfs2_extent_rec *rec, *tmprec;
3736
3737 right_el = path_leaf_el(right_path);;
3738 if (left_path)
3739 left_el = path_leaf_el(left_path);
3740
3741 el = right_el;
3742 insert_el = right_el;
3743 index = ocfs2_search_extent_list(el, cpos);
3744 if (index != -1) {
3745 if (index == 0 && left_path) {
3746 BUG_ON(ocfs2_is_empty_extent(&el->l_recs[0]));
3747
3748 /*
3749 * This typically means that the record
3750 * started in the left path but moved to the
3751 * right as a result of rotation. We either
3752 * move the existing record to the left, or we
3753 * do the later insert there.
3754 *
3755 * In this case, the left path should always
3756 * exist as the rotate code will have passed
3757 * it back for a post-insert update.
3758 */
3759
3760 if (split == SPLIT_LEFT) {
3761 /*
3762 * It's a left split. Since we know
3763 * that the rotate code gave us an
3764 * empty extent in the left path, we
3765 * can just do the insert there.
3766 */
3767 insert_el = left_el;
3768 } else {
3769 /*
3770 * Right split - we have to move the
3771 * existing record over to the left
3772 * leaf. The insert will be into the
3773 * newly created empty extent in the
3774 * right leaf.
3775 */
3776 tmprec = &right_el->l_recs[index];
3777 ocfs2_rotate_leaf(left_el, tmprec);
3778 el = left_el;
3779
3780 memset(tmprec, 0, sizeof(*tmprec));
3781 index = ocfs2_search_extent_list(left_el, cpos);
3782 BUG_ON(index == -1);
3783 }
3784 }
3785 } else {
3786 BUG_ON(!left_path);
3787 BUG_ON(!ocfs2_is_empty_extent(&left_el->l_recs[0]));
3788 /*
3789 * Left path is easy - we can just allow the insert to
3790 * happen.
3791 */
3792 el = left_el;
3793 insert_el = left_el;
3794 index = ocfs2_search_extent_list(el, cpos);
3795 BUG_ON(index == -1);
3796 }
3797
3798 rec = &el->l_recs[index];
3799 ocfs2_subtract_from_rec(inode->i_sb, split, rec, split_rec);
3800 ocfs2_rotate_leaf(insert_el, split_rec);
3801}
3802
Mark Fashehdcd05382007-01-16 11:32:23 -08003803/*
Tao Mae7d4cb62008-08-18 17:38:44 +08003804 * This function only does inserts on an allocation b-tree. For tree
3805 * depth = 0, ocfs2_insert_at_leaf() is called directly.
Mark Fashehdcd05382007-01-16 11:32:23 -08003806 *
3807 * right_path is the path we want to do the actual insert
3808 * in. left_path should only be passed in if we need to update that
3809 * portion of the tree after an edge insert.
3810 */
3811static int ocfs2_insert_path(struct inode *inode,
3812 handle_t *handle,
3813 struct ocfs2_path *left_path,
3814 struct ocfs2_path *right_path,
3815 struct ocfs2_extent_rec *insert_rec,
3816 struct ocfs2_insert_type *insert)
3817{
3818 int ret, subtree_index;
3819 struct buffer_head *leaf_bh = path_leaf_bh(right_path);
Mark Fashehdcd05382007-01-16 11:32:23 -08003820
Mark Fashehdcd05382007-01-16 11:32:23 -08003821 if (left_path) {
3822 int credits = handle->h_buffer_credits;
3823
3824 /*
3825 * There's a chance that left_path got passed back to
3826 * us without being accounted for in the
3827 * journal. Extend our transaction here to be sure we
3828 * can change those blocks.
3829 */
3830 credits += left_path->p_tree_depth;
3831
3832 ret = ocfs2_extend_trans(handle, credits);
3833 if (ret < 0) {
3834 mlog_errno(ret);
3835 goto out;
3836 }
3837
3838 ret = ocfs2_journal_access_path(inode, handle, left_path);
3839 if (ret < 0) {
3840 mlog_errno(ret);
3841 goto out;
3842 }
3843 }
3844
Mark Fashehe8aed342007-12-03 16:43:01 -08003845 /*
3846 * Pass both paths to the journal. The majority of inserts
3847 * will be touching all components anyway.
3848 */
3849 ret = ocfs2_journal_access_path(inode, handle, right_path);
3850 if (ret < 0) {
3851 mlog_errno(ret);
3852 goto out;
3853 }
3854
Mark Fasheh328d5752007-06-18 10:48:04 -07003855 if (insert->ins_split != SPLIT_NONE) {
3856 /*
3857 * We could call ocfs2_insert_at_leaf() for some types
Joe Perchesc78bad12008-02-03 17:33:42 +02003858 * of splits, but it's easier to just let one separate
Mark Fasheh328d5752007-06-18 10:48:04 -07003859 * function sort it all out.
3860 */
3861 ocfs2_split_record(inode, left_path, right_path,
3862 insert_rec, insert->ins_split);
Mark Fashehe8aed342007-12-03 16:43:01 -08003863
3864 /*
3865 * Split might have modified either leaf and we don't
3866 * have a guarantee that the later edge insert will
3867 * dirty this for us.
3868 */
3869 if (left_path)
3870 ret = ocfs2_journal_dirty(handle,
3871 path_leaf_bh(left_path));
3872 if (ret)
3873 mlog_errno(ret);
Mark Fasheh328d5752007-06-18 10:48:04 -07003874 } else
3875 ocfs2_insert_at_leaf(insert_rec, path_leaf_el(right_path),
3876 insert, inode);
Mark Fashehdcd05382007-01-16 11:32:23 -08003877
Mark Fashehdcd05382007-01-16 11:32:23 -08003878 ret = ocfs2_journal_dirty(handle, leaf_bh);
3879 if (ret)
3880 mlog_errno(ret);
3881
3882 if (left_path) {
3883 /*
3884 * The rotate code has indicated that we need to fix
3885 * up portions of the tree after the insert.
3886 *
3887 * XXX: Should we extend the transaction here?
3888 */
3889 subtree_index = ocfs2_find_subtree_root(inode, left_path,
3890 right_path);
3891 ocfs2_complete_edge_insert(inode, handle, left_path,
3892 right_path, subtree_index);
3893 }
3894
3895 ret = 0;
3896out:
3897 return ret;
3898}
3899
3900static int ocfs2_do_insert_extent(struct inode *inode,
3901 handle_t *handle,
Tao Mae7d4cb62008-08-18 17:38:44 +08003902 struct ocfs2_extent_tree *et,
Mark Fashehdcd05382007-01-16 11:32:23 -08003903 struct ocfs2_extent_rec *insert_rec,
3904 struct ocfs2_insert_type *type)
3905{
3906 int ret, rotate = 0;
3907 u32 cpos;
3908 struct ocfs2_path *right_path = NULL;
3909 struct ocfs2_path *left_path = NULL;
Mark Fashehdcd05382007-01-16 11:32:23 -08003910 struct ocfs2_extent_list *el;
3911
Joel Beckerce1d9ea2008-08-20 16:30:07 -07003912 el = et->et_root_el;
Mark Fashehdcd05382007-01-16 11:32:23 -08003913
Joel Beckerce1d9ea2008-08-20 16:30:07 -07003914 ret = ocfs2_journal_access(handle, inode, et->et_root_bh,
Mark Fashehdcd05382007-01-16 11:32:23 -08003915 OCFS2_JOURNAL_ACCESS_WRITE);
3916 if (ret) {
3917 mlog_errno(ret);
3918 goto out;
3919 }
3920
3921 if (le16_to_cpu(el->l_tree_depth) == 0) {
3922 ocfs2_insert_at_leaf(insert_rec, el, type, inode);
3923 goto out_update_clusters;
3924 }
3925
Joel Beckerce1d9ea2008-08-20 16:30:07 -07003926 right_path = ocfs2_new_path(et->et_root_bh, et->et_root_el);
Mark Fashehdcd05382007-01-16 11:32:23 -08003927 if (!right_path) {
3928 ret = -ENOMEM;
3929 mlog_errno(ret);
3930 goto out;
3931 }
3932
3933 /*
3934 * Determine the path to start with. Rotations need the
3935 * rightmost path, everything else can go directly to the
3936 * target leaf.
3937 */
3938 cpos = le32_to_cpu(insert_rec->e_cpos);
3939 if (type->ins_appending == APPEND_NONE &&
3940 type->ins_contig == CONTIG_NONE) {
3941 rotate = 1;
3942 cpos = UINT_MAX;
3943 }
3944
3945 ret = ocfs2_find_path(inode, right_path, cpos);
3946 if (ret) {
3947 mlog_errno(ret);
3948 goto out;
3949 }
3950
3951 /*
3952 * Rotations and appends need special treatment - they modify
3953 * parts of the tree's above them.
3954 *
3955 * Both might pass back a path immediate to the left of the
3956 * one being inserted to. This will be cause
3957 * ocfs2_insert_path() to modify the rightmost records of
3958 * left_path to account for an edge insert.
3959 *
3960 * XXX: When modifying this code, keep in mind that an insert
3961 * can wind up skipping both of these two special cases...
3962 */
3963 if (rotate) {
Mark Fasheh328d5752007-06-18 10:48:04 -07003964 ret = ocfs2_rotate_tree_right(inode, handle, type->ins_split,
Mark Fashehdcd05382007-01-16 11:32:23 -08003965 le32_to_cpu(insert_rec->e_cpos),
3966 right_path, &left_path);
3967 if (ret) {
3968 mlog_errno(ret);
3969 goto out;
3970 }
Mark Fashehe8aed342007-12-03 16:43:01 -08003971
3972 /*
3973 * ocfs2_rotate_tree_right() might have extended the
3974 * transaction without re-journaling our tree root.
3975 */
Joel Beckerce1d9ea2008-08-20 16:30:07 -07003976 ret = ocfs2_journal_access(handle, inode, et->et_root_bh,
Mark Fashehe8aed342007-12-03 16:43:01 -08003977 OCFS2_JOURNAL_ACCESS_WRITE);
3978 if (ret) {
3979 mlog_errno(ret);
3980 goto out;
3981 }
Mark Fashehdcd05382007-01-16 11:32:23 -08003982 } else if (type->ins_appending == APPEND_TAIL
3983 && type->ins_contig != CONTIG_LEFT) {
3984 ret = ocfs2_append_rec_to_path(inode, handle, insert_rec,
3985 right_path, &left_path);
3986 if (ret) {
3987 mlog_errno(ret);
3988 goto out;
3989 }
3990 }
3991
3992 ret = ocfs2_insert_path(inode, handle, left_path, right_path,
3993 insert_rec, type);
3994 if (ret) {
3995 mlog_errno(ret);
3996 goto out;
3997 }
3998
3999out_update_clusters:
Mark Fasheh328d5752007-06-18 10:48:04 -07004000 if (type->ins_split == SPLIT_NONE)
Joel Becker35dc0aa2008-08-20 16:25:06 -07004001 ocfs2_et_update_clusters(inode, et,
4002 le16_to_cpu(insert_rec->e_leaf_clusters));
Mark Fashehdcd05382007-01-16 11:32:23 -08004003
Joel Beckerce1d9ea2008-08-20 16:30:07 -07004004 ret = ocfs2_journal_dirty(handle, et->et_root_bh);
Mark Fashehdcd05382007-01-16 11:32:23 -08004005 if (ret)
4006 mlog_errno(ret);
4007
4008out:
4009 ocfs2_free_path(left_path);
4010 ocfs2_free_path(right_path);
4011
4012 return ret;
4013}
4014
Mark Fasheh328d5752007-06-18 10:48:04 -07004015static enum ocfs2_contig_type
Tao Maad5a4d72008-01-30 14:21:32 +08004016ocfs2_figure_merge_contig_type(struct inode *inode, struct ocfs2_path *path,
Mark Fasheh328d5752007-06-18 10:48:04 -07004017 struct ocfs2_extent_list *el, int index,
4018 struct ocfs2_extent_rec *split_rec)
4019{
Tao Maad5a4d72008-01-30 14:21:32 +08004020 int status;
Mark Fasheh328d5752007-06-18 10:48:04 -07004021 enum ocfs2_contig_type ret = CONTIG_NONE;
Tao Maad5a4d72008-01-30 14:21:32 +08004022 u32 left_cpos, right_cpos;
4023 struct ocfs2_extent_rec *rec = NULL;
4024 struct ocfs2_extent_list *new_el;
4025 struct ocfs2_path *left_path = NULL, *right_path = NULL;
4026 struct buffer_head *bh;
4027 struct ocfs2_extent_block *eb;
4028
4029 if (index > 0) {
4030 rec = &el->l_recs[index - 1];
4031 } else if (path->p_tree_depth > 0) {
4032 status = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
4033 path, &left_cpos);
4034 if (status)
4035 goto out;
4036
4037 if (left_cpos != 0) {
4038 left_path = ocfs2_new_path(path_root_bh(path),
4039 path_root_el(path));
4040 if (!left_path)
4041 goto out;
4042
4043 status = ocfs2_find_path(inode, left_path, left_cpos);
4044 if (status)
4045 goto out;
4046
4047 new_el = path_leaf_el(left_path);
4048
4049 if (le16_to_cpu(new_el->l_next_free_rec) !=
4050 le16_to_cpu(new_el->l_count)) {
4051 bh = path_leaf_bh(left_path);
4052 eb = (struct ocfs2_extent_block *)bh->b_data;
4053 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb,
4054 eb);
4055 goto out;
4056 }
4057 rec = &new_el->l_recs[
4058 le16_to_cpu(new_el->l_next_free_rec) - 1];
4059 }
4060 }
Mark Fasheh328d5752007-06-18 10:48:04 -07004061
4062 /*
4063 * We're careful to check for an empty extent record here -
4064 * the merge code will know what to do if it sees one.
4065 */
Tao Maad5a4d72008-01-30 14:21:32 +08004066 if (rec) {
Mark Fasheh328d5752007-06-18 10:48:04 -07004067 if (index == 1 && ocfs2_is_empty_extent(rec)) {
4068 if (split_rec->e_cpos == el->l_recs[index].e_cpos)
4069 ret = CONTIG_RIGHT;
4070 } else {
4071 ret = ocfs2_extent_contig(inode, rec, split_rec);
4072 }
4073 }
4074
Tao Maad5a4d72008-01-30 14:21:32 +08004075 rec = NULL;
4076 if (index < (le16_to_cpu(el->l_next_free_rec) - 1))
4077 rec = &el->l_recs[index + 1];
4078 else if (le16_to_cpu(el->l_next_free_rec) == le16_to_cpu(el->l_count) &&
4079 path->p_tree_depth > 0) {
4080 status = ocfs2_find_cpos_for_right_leaf(inode->i_sb,
4081 path, &right_cpos);
4082 if (status)
4083 goto out;
4084
4085 if (right_cpos == 0)
4086 goto out;
4087
4088 right_path = ocfs2_new_path(path_root_bh(path),
4089 path_root_el(path));
4090 if (!right_path)
4091 goto out;
4092
4093 status = ocfs2_find_path(inode, right_path, right_cpos);
4094 if (status)
4095 goto out;
4096
4097 new_el = path_leaf_el(right_path);
4098 rec = &new_el->l_recs[0];
4099 if (ocfs2_is_empty_extent(rec)) {
4100 if (le16_to_cpu(new_el->l_next_free_rec) <= 1) {
4101 bh = path_leaf_bh(right_path);
4102 eb = (struct ocfs2_extent_block *)bh->b_data;
4103 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb,
4104 eb);
4105 goto out;
4106 }
4107 rec = &new_el->l_recs[1];
4108 }
4109 }
4110
4111 if (rec) {
Mark Fasheh328d5752007-06-18 10:48:04 -07004112 enum ocfs2_contig_type contig_type;
4113
Mark Fasheh328d5752007-06-18 10:48:04 -07004114 contig_type = ocfs2_extent_contig(inode, rec, split_rec);
4115
4116 if (contig_type == CONTIG_LEFT && ret == CONTIG_RIGHT)
4117 ret = CONTIG_LEFTRIGHT;
4118 else if (ret == CONTIG_NONE)
4119 ret = contig_type;
4120 }
4121
Tao Maad5a4d72008-01-30 14:21:32 +08004122out:
4123 if (left_path)
4124 ocfs2_free_path(left_path);
4125 if (right_path)
4126 ocfs2_free_path(right_path);
4127
Mark Fasheh328d5752007-06-18 10:48:04 -07004128 return ret;
4129}
4130
Mark Fashehdcd05382007-01-16 11:32:23 -08004131static void ocfs2_figure_contig_type(struct inode *inode,
4132 struct ocfs2_insert_type *insert,
4133 struct ocfs2_extent_list *el,
Tao Maca12b7c2008-08-18 17:38:52 +08004134 struct ocfs2_extent_rec *insert_rec,
4135 struct ocfs2_extent_tree *et)
Mark Fashehdcd05382007-01-16 11:32:23 -08004136{
4137 int i;
4138 enum ocfs2_contig_type contig_type = CONTIG_NONE;
4139
Mark Fashehe48edee2007-03-07 16:46:57 -08004140 BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
4141
Mark Fashehdcd05382007-01-16 11:32:23 -08004142 for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
4143 contig_type = ocfs2_extent_contig(inode, &el->l_recs[i],
4144 insert_rec);
4145 if (contig_type != CONTIG_NONE) {
4146 insert->ins_contig_index = i;
4147 break;
4148 }
4149 }
4150 insert->ins_contig = contig_type;
Tao Maca12b7c2008-08-18 17:38:52 +08004151
4152 if (insert->ins_contig != CONTIG_NONE) {
4153 struct ocfs2_extent_rec *rec =
4154 &el->l_recs[insert->ins_contig_index];
4155 unsigned int len = le16_to_cpu(rec->e_leaf_clusters) +
4156 le16_to_cpu(insert_rec->e_leaf_clusters);
4157
4158 /*
4159 * Caller might want us to limit the size of extents, don't
4160 * calculate contiguousness if we might exceed that limit.
4161 */
Joel Beckerce1d9ea2008-08-20 16:30:07 -07004162 if (et->et_max_leaf_clusters &&
4163 (len > et->et_max_leaf_clusters))
Tao Maca12b7c2008-08-18 17:38:52 +08004164 insert->ins_contig = CONTIG_NONE;
4165 }
Mark Fashehdcd05382007-01-16 11:32:23 -08004166}
4167
4168/*
4169 * This should only be called against the righmost leaf extent list.
4170 *
4171 * ocfs2_figure_appending_type() will figure out whether we'll have to
4172 * insert at the tail of the rightmost leaf.
4173 *
Tao Mae7d4cb62008-08-18 17:38:44 +08004174 * This should also work against the root extent list for tree's with 0
4175 * depth. If we consider the root extent list to be the rightmost leaf node
Mark Fashehdcd05382007-01-16 11:32:23 -08004176 * then the logic here makes sense.
4177 */
4178static void ocfs2_figure_appending_type(struct ocfs2_insert_type *insert,
4179 struct ocfs2_extent_list *el,
4180 struct ocfs2_extent_rec *insert_rec)
4181{
4182 int i;
4183 u32 cpos = le32_to_cpu(insert_rec->e_cpos);
4184 struct ocfs2_extent_rec *rec;
4185
4186 insert->ins_appending = APPEND_NONE;
4187
Mark Fashehe48edee2007-03-07 16:46:57 -08004188 BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
Mark Fashehdcd05382007-01-16 11:32:23 -08004189
4190 if (!el->l_next_free_rec)
4191 goto set_tail_append;
4192
4193 if (ocfs2_is_empty_extent(&el->l_recs[0])) {
4194 /* Were all records empty? */
4195 if (le16_to_cpu(el->l_next_free_rec) == 1)
4196 goto set_tail_append;
4197 }
4198
4199 i = le16_to_cpu(el->l_next_free_rec) - 1;
4200 rec = &el->l_recs[i];
4201
Mark Fashehe48edee2007-03-07 16:46:57 -08004202 if (cpos >=
4203 (le32_to_cpu(rec->e_cpos) + le16_to_cpu(rec->e_leaf_clusters)))
Mark Fashehdcd05382007-01-16 11:32:23 -08004204 goto set_tail_append;
4205
4206 return;
4207
4208set_tail_append:
4209 insert->ins_appending = APPEND_TAIL;
4210}
4211
4212/*
4213 * Helper function called at the begining of an insert.
4214 *
4215 * This computes a few things that are commonly used in the process of
4216 * inserting into the btree:
4217 * - Whether the new extent is contiguous with an existing one.
4218 * - The current tree depth.
4219 * - Whether the insert is an appending one.
4220 * - The total # of free records in the tree.
4221 *
4222 * All of the information is stored on the ocfs2_insert_type
4223 * structure.
4224 */
4225static int ocfs2_figure_insert_type(struct inode *inode,
Tao Mae7d4cb62008-08-18 17:38:44 +08004226 struct ocfs2_extent_tree *et,
Mark Fashehdcd05382007-01-16 11:32:23 -08004227 struct buffer_head **last_eb_bh,
4228 struct ocfs2_extent_rec *insert_rec,
Tao Maoc77534f2007-08-28 17:22:33 -07004229 int *free_records,
Mark Fashehdcd05382007-01-16 11:32:23 -08004230 struct ocfs2_insert_type *insert)
4231{
4232 int ret;
Mark Fashehdcd05382007-01-16 11:32:23 -08004233 struct ocfs2_extent_block *eb;
4234 struct ocfs2_extent_list *el;
4235 struct ocfs2_path *path = NULL;
4236 struct buffer_head *bh = NULL;
4237
Mark Fasheh328d5752007-06-18 10:48:04 -07004238 insert->ins_split = SPLIT_NONE;
4239
Joel Beckerce1d9ea2008-08-20 16:30:07 -07004240 el = et->et_root_el;
Mark Fashehdcd05382007-01-16 11:32:23 -08004241 insert->ins_tree_depth = le16_to_cpu(el->l_tree_depth);
4242
4243 if (el->l_tree_depth) {
4244 /*
4245 * If we have tree depth, we read in the
4246 * rightmost extent block ahead of time as
4247 * ocfs2_figure_insert_type() and ocfs2_add_branch()
4248 * may want it later.
4249 */
4250 ret = ocfs2_read_block(OCFS2_SB(inode->i_sb),
Joel Becker35dc0aa2008-08-20 16:25:06 -07004251 ocfs2_et_get_last_eb_blk(et), &bh,
Mark Fashehdcd05382007-01-16 11:32:23 -08004252 OCFS2_BH_CACHED, inode);
4253 if (ret) {
4254 mlog_exit(ret);
4255 goto out;
4256 }
4257 eb = (struct ocfs2_extent_block *) bh->b_data;
4258 el = &eb->h_list;
4259 }
4260
4261 /*
4262 * Unless we have a contiguous insert, we'll need to know if
4263 * there is room left in our allocation tree for another
4264 * extent record.
4265 *
4266 * XXX: This test is simplistic, we can search for empty
4267 * extent records too.
4268 */
Tao Maoc77534f2007-08-28 17:22:33 -07004269 *free_records = le16_to_cpu(el->l_count) -
Mark Fashehdcd05382007-01-16 11:32:23 -08004270 le16_to_cpu(el->l_next_free_rec);
4271
4272 if (!insert->ins_tree_depth) {
Tao Maca12b7c2008-08-18 17:38:52 +08004273 ocfs2_figure_contig_type(inode, insert, el, insert_rec, et);
Mark Fashehdcd05382007-01-16 11:32:23 -08004274 ocfs2_figure_appending_type(insert, el, insert_rec);
4275 return 0;
4276 }
4277
Joel Beckerce1d9ea2008-08-20 16:30:07 -07004278 path = ocfs2_new_path(et->et_root_bh, et->et_root_el);
Mark Fashehdcd05382007-01-16 11:32:23 -08004279 if (!path) {
4280 ret = -ENOMEM;
4281 mlog_errno(ret);
4282 goto out;
4283 }
4284
4285 /*
4286 * In the case that we're inserting past what the tree
4287 * currently accounts for, ocfs2_find_path() will return for
4288 * us the rightmost tree path. This is accounted for below in
4289 * the appending code.
4290 */
4291 ret = ocfs2_find_path(inode, path, le32_to_cpu(insert_rec->e_cpos));
4292 if (ret) {
4293 mlog_errno(ret);
4294 goto out;
4295 }
4296
4297 el = path_leaf_el(path);
4298
4299 /*
4300 * Now that we have the path, there's two things we want to determine:
4301 * 1) Contiguousness (also set contig_index if this is so)
4302 *
4303 * 2) Are we doing an append? We can trivially break this up
4304 * into two types of appends: simple record append, or a
4305 * rotate inside the tail leaf.
4306 */
Tao Maca12b7c2008-08-18 17:38:52 +08004307 ocfs2_figure_contig_type(inode, insert, el, insert_rec, et);
Mark Fashehdcd05382007-01-16 11:32:23 -08004308
4309 /*
4310 * The insert code isn't quite ready to deal with all cases of
4311 * left contiguousness. Specifically, if it's an insert into
4312 * the 1st record in a leaf, it will require the adjustment of
Mark Fashehe48edee2007-03-07 16:46:57 -08004313 * cluster count on the last record of the path directly to it's
Mark Fashehdcd05382007-01-16 11:32:23 -08004314 * left. For now, just catch that case and fool the layers
4315 * above us. This works just fine for tree_depth == 0, which
4316 * is why we allow that above.
4317 */
4318 if (insert->ins_contig == CONTIG_LEFT &&
4319 insert->ins_contig_index == 0)
4320 insert->ins_contig = CONTIG_NONE;
4321
4322 /*
4323 * Ok, so we can simply compare against last_eb to figure out
4324 * whether the path doesn't exist. This will only happen in
4325 * the case that we're doing a tail append, so maybe we can
4326 * take advantage of that information somehow.
4327 */
Joel Becker35dc0aa2008-08-20 16:25:06 -07004328 if (ocfs2_et_get_last_eb_blk(et) ==
Tao Mae7d4cb62008-08-18 17:38:44 +08004329 path_leaf_bh(path)->b_blocknr) {
Mark Fashehdcd05382007-01-16 11:32:23 -08004330 /*
4331 * Ok, ocfs2_find_path() returned us the rightmost
4332 * tree path. This might be an appending insert. There are
4333 * two cases:
4334 * 1) We're doing a true append at the tail:
4335 * -This might even be off the end of the leaf
4336 * 2) We're "appending" by rotating in the tail
4337 */
4338 ocfs2_figure_appending_type(insert, el, insert_rec);
4339 }
4340
4341out:
4342 ocfs2_free_path(path);
4343
4344 if (ret == 0)
4345 *last_eb_bh = bh;
4346 else
4347 brelse(bh);
4348 return ret;
4349}
4350
4351/*
4352 * Insert an extent into an inode btree.
4353 *
4354 * The caller needs to update fe->i_clusters
4355 */
Tao Maf56654c2008-08-18 17:38:48 +08004356static int ocfs2_insert_extent(struct ocfs2_super *osb,
4357 handle_t *handle,
4358 struct inode *inode,
4359 struct buffer_head *root_bh,
4360 u32 cpos,
4361 u64 start_blk,
4362 u32 new_clusters,
4363 u8 flags,
4364 struct ocfs2_alloc_context *meta_ac,
4365 struct ocfs2_extent_tree *et)
Mark Fashehccd979b2005-12-15 14:31:24 -08004366{
Mark Fashehc3afcbb2007-05-29 14:28:51 -07004367 int status;
Tao Maoc77534f2007-08-28 17:22:33 -07004368 int uninitialized_var(free_records);
Mark Fashehccd979b2005-12-15 14:31:24 -08004369 struct buffer_head *last_eb_bh = NULL;
Mark Fashehdcd05382007-01-16 11:32:23 -08004370 struct ocfs2_insert_type insert = {0, };
4371 struct ocfs2_extent_rec rec;
Mark Fashehccd979b2005-12-15 14:31:24 -08004372
Mark Fasheh1afc32b2007-09-07 14:46:51 -07004373 BUG_ON(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL);
4374
Mark Fashehdcd05382007-01-16 11:32:23 -08004375 mlog(0, "add %u clusters at position %u to inode %llu\n",
4376 new_clusters, cpos, (unsigned long long)OCFS2_I(inode)->ip_blkno);
Mark Fashehccd979b2005-12-15 14:31:24 -08004377
Mark Fashehdcd05382007-01-16 11:32:23 -08004378 mlog_bug_on_msg(!ocfs2_sparse_alloc(osb) &&
4379 (OCFS2_I(inode)->ip_clusters != cpos),
4380 "Device %s, asking for sparse allocation: inode %llu, "
4381 "cpos %u, clusters %u\n",
4382 osb->dev_str,
4383 (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos,
4384 OCFS2_I(inode)->ip_clusters);
Mark Fashehccd979b2005-12-15 14:31:24 -08004385
Mark Fashehe48edee2007-03-07 16:46:57 -08004386 memset(&rec, 0, sizeof(rec));
Mark Fashehdcd05382007-01-16 11:32:23 -08004387 rec.e_cpos = cpu_to_le32(cpos);
4388 rec.e_blkno = cpu_to_le64(start_blk);
Mark Fashehe48edee2007-03-07 16:46:57 -08004389 rec.e_leaf_clusters = cpu_to_le16(new_clusters);
Mark Fasheh2ae99a62007-03-09 16:43:28 -08004390 rec.e_flags = flags;
Mark Fashehccd979b2005-12-15 14:31:24 -08004391
Tao Mae7d4cb62008-08-18 17:38:44 +08004392 status = ocfs2_figure_insert_type(inode, et, &last_eb_bh, &rec,
Tao Maoc77534f2007-08-28 17:22:33 -07004393 &free_records, &insert);
Mark Fashehdcd05382007-01-16 11:32:23 -08004394 if (status < 0) {
4395 mlog_errno(status);
4396 goto bail;
Mark Fashehccd979b2005-12-15 14:31:24 -08004397 }
4398
Mark Fashehdcd05382007-01-16 11:32:23 -08004399 mlog(0, "Insert.appending: %u, Insert.Contig: %u, "
4400 "Insert.contig_index: %d, Insert.free_records: %d, "
4401 "Insert.tree_depth: %d\n",
4402 insert.ins_appending, insert.ins_contig, insert.ins_contig_index,
Tao Maoc77534f2007-08-28 17:22:33 -07004403 free_records, insert.ins_tree_depth);
Mark Fashehccd979b2005-12-15 14:31:24 -08004404
Tao Maoc77534f2007-08-28 17:22:33 -07004405 if (insert.ins_contig == CONTIG_NONE && free_records == 0) {
Tao Mae7d4cb62008-08-18 17:38:44 +08004406 status = ocfs2_grow_tree(inode, handle, et,
Mark Fasheh328d5752007-06-18 10:48:04 -07004407 &insert.ins_tree_depth, &last_eb_bh,
Mark Fashehc3afcbb2007-05-29 14:28:51 -07004408 meta_ac);
4409 if (status) {
Mark Fashehccd979b2005-12-15 14:31:24 -08004410 mlog_errno(status);
4411 goto bail;
4412 }
Mark Fashehccd979b2005-12-15 14:31:24 -08004413 }
4414
Mark Fashehdcd05382007-01-16 11:32:23 -08004415 /* Finally, we can add clusters. This might rotate the tree for us. */
Tao Mae7d4cb62008-08-18 17:38:44 +08004416 status = ocfs2_do_insert_extent(inode, handle, et, &rec, &insert);
Mark Fashehccd979b2005-12-15 14:31:24 -08004417 if (status < 0)
4418 mlog_errno(status);
Joel Beckerce1d9ea2008-08-20 16:30:07 -07004419 else if (et->et_type == OCFS2_DINODE_EXTENT)
Mark Fasheh83418972007-04-23 18:53:12 -07004420 ocfs2_extent_map_insert_rec(inode, &rec);
Mark Fashehccd979b2005-12-15 14:31:24 -08004421
4422bail:
Mark Fashehccd979b2005-12-15 14:31:24 -08004423 if (last_eb_bh)
4424 brelse(last_eb_bh);
4425
Tao Maf56654c2008-08-18 17:38:48 +08004426 mlog_exit(status);
4427 return status;
4428}
4429
4430int ocfs2_dinode_insert_extent(struct ocfs2_super *osb,
4431 handle_t *handle,
4432 struct inode *inode,
4433 struct buffer_head *root_bh,
4434 u32 cpos,
4435 u64 start_blk,
4436 u32 new_clusters,
4437 u8 flags,
4438 struct ocfs2_alloc_context *meta_ac)
4439{
4440 int status;
Joel Beckerdc0ce612008-08-20 16:48:35 -07004441 struct ocfs2_extent_tree et;
Tao Maf56654c2008-08-18 17:38:48 +08004442
Joel Beckerdc0ce612008-08-20 16:48:35 -07004443 ocfs2_get_extent_tree(&et, inode, root_bh, OCFS2_DINODE_EXTENT,
4444 NULL);
Tao Maf56654c2008-08-18 17:38:48 +08004445 status = ocfs2_insert_extent(osb, handle, inode, root_bh,
4446 cpos, start_blk, new_clusters,
Joel Beckerdc0ce612008-08-20 16:48:35 -07004447 flags, meta_ac, &et);
4448 ocfs2_put_extent_tree(&et);
Tao Maf56654c2008-08-18 17:38:48 +08004449
Tao Maf56654c2008-08-18 17:38:48 +08004450 return status;
4451}
4452
4453int ocfs2_xattr_value_insert_extent(struct ocfs2_super *osb,
4454 handle_t *handle,
4455 struct inode *inode,
4456 struct buffer_head *root_bh,
4457 u32 cpos,
4458 u64 start_blk,
4459 u32 new_clusters,
4460 u8 flags,
4461 struct ocfs2_alloc_context *meta_ac,
Joel Beckerea5efa12008-08-20 16:57:27 -07004462 void *obj)
Tao Maf56654c2008-08-18 17:38:48 +08004463{
4464 int status;
Joel Beckerdc0ce612008-08-20 16:48:35 -07004465 struct ocfs2_extent_tree et;
Tao Maf56654c2008-08-18 17:38:48 +08004466
Joel Beckerdc0ce612008-08-20 16:48:35 -07004467 ocfs2_get_extent_tree(&et, inode, root_bh,
Joel Beckerea5efa12008-08-20 16:57:27 -07004468 OCFS2_XATTR_VALUE_EXTENT, obj);
Tao Maf56654c2008-08-18 17:38:48 +08004469 status = ocfs2_insert_extent(osb, handle, inode, root_bh,
4470 cpos, start_blk, new_clusters,
Joel Beckerdc0ce612008-08-20 16:48:35 -07004471 flags, meta_ac, &et);
4472 ocfs2_put_extent_tree(&et);
Tao Maf56654c2008-08-18 17:38:48 +08004473
Mark Fashehccd979b2005-12-15 14:31:24 -08004474 return status;
4475}
4476
Tao Maba492612008-08-18 17:38:49 +08004477int ocfs2_xattr_tree_insert_extent(struct ocfs2_super *osb,
4478 handle_t *handle,
4479 struct inode *inode,
4480 struct buffer_head *root_bh,
4481 u32 cpos,
4482 u64 start_blk,
4483 u32 new_clusters,
4484 u8 flags,
4485 struct ocfs2_alloc_context *meta_ac)
4486{
4487 int status;
Joel Beckerdc0ce612008-08-20 16:48:35 -07004488 struct ocfs2_extent_tree et;
Tao Maba492612008-08-18 17:38:49 +08004489
Joel Beckerdc0ce612008-08-20 16:48:35 -07004490 ocfs2_get_extent_tree(&et, inode, root_bh, OCFS2_XATTR_TREE_EXTENT,
4491 NULL);
Tao Maba492612008-08-18 17:38:49 +08004492 status = ocfs2_insert_extent(osb, handle, inode, root_bh,
4493 cpos, start_blk, new_clusters,
Joel Beckerdc0ce612008-08-20 16:48:35 -07004494 flags, meta_ac, &et);
4495 ocfs2_put_extent_tree(&et);
Tao Maba492612008-08-18 17:38:49 +08004496
Tao Maba492612008-08-18 17:38:49 +08004497 return status;
4498}
4499
Tao Ma0eb8d472008-08-18 17:38:45 +08004500/*
4501 * Allcate and add clusters into the extent b-tree.
4502 * The new clusters(clusters_to_add) will be inserted at logical_offset.
4503 * The extent b-tree's root is root_el and it should be in root_bh, and
4504 * it is not limited to the file storage. Any extent tree can use this
4505 * function if it implements the proper ocfs2_extent_tree.
4506 */
4507int ocfs2_add_clusters_in_btree(struct ocfs2_super *osb,
4508 struct inode *inode,
4509 u32 *logical_offset,
4510 u32 clusters_to_add,
4511 int mark_unwritten,
4512 struct buffer_head *root_bh,
4513 struct ocfs2_extent_list *root_el,
4514 handle_t *handle,
4515 struct ocfs2_alloc_context *data_ac,
4516 struct ocfs2_alloc_context *meta_ac,
4517 enum ocfs2_alloc_restarted *reason_ret,
Tao Maf56654c2008-08-18 17:38:48 +08004518 enum ocfs2_extent_tree_type type,
Joel Beckerea5efa12008-08-20 16:57:27 -07004519 void *obj)
Tao Ma0eb8d472008-08-18 17:38:45 +08004520{
4521 int status = 0;
4522 int free_extents;
4523 enum ocfs2_alloc_restarted reason = RESTART_NONE;
4524 u32 bit_off, num_bits;
4525 u64 block;
4526 u8 flags = 0;
4527
4528 BUG_ON(!clusters_to_add);
4529
4530 if (mark_unwritten)
4531 flags = OCFS2_EXT_UNWRITTEN;
4532
Tao Maf56654c2008-08-18 17:38:48 +08004533 free_extents = ocfs2_num_free_extents(osb, inode, root_bh, type,
Joel Beckerea5efa12008-08-20 16:57:27 -07004534 obj);
Tao Ma0eb8d472008-08-18 17:38:45 +08004535 if (free_extents < 0) {
4536 status = free_extents;
4537 mlog_errno(status);
4538 goto leave;
4539 }
4540
4541 /* there are two cases which could cause us to EAGAIN in the
4542 * we-need-more-metadata case:
4543 * 1) we haven't reserved *any*
4544 * 2) we are so fragmented, we've needed to add metadata too
4545 * many times. */
4546 if (!free_extents && !meta_ac) {
4547 mlog(0, "we haven't reserved any metadata!\n");
4548 status = -EAGAIN;
4549 reason = RESTART_META;
4550 goto leave;
4551 } else if ((!free_extents)
4552 && (ocfs2_alloc_context_bits_left(meta_ac)
4553 < ocfs2_extend_meta_needed(root_el))) {
4554 mlog(0, "filesystem is really fragmented...\n");
4555 status = -EAGAIN;
4556 reason = RESTART_META;
4557 goto leave;
4558 }
4559
4560 status = __ocfs2_claim_clusters(osb, handle, data_ac, 1,
4561 clusters_to_add, &bit_off, &num_bits);
4562 if (status < 0) {
4563 if (status != -ENOSPC)
4564 mlog_errno(status);
4565 goto leave;
4566 }
4567
4568 BUG_ON(num_bits > clusters_to_add);
4569
4570 /* reserve our write early -- insert_extent may update the inode */
4571 status = ocfs2_journal_access(handle, inode, root_bh,
4572 OCFS2_JOURNAL_ACCESS_WRITE);
4573 if (status < 0) {
4574 mlog_errno(status);
4575 goto leave;
4576 }
4577
4578 block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
4579 mlog(0, "Allocating %u clusters at block %u for inode %llu\n",
4580 num_bits, bit_off, (unsigned long long)OCFS2_I(inode)->ip_blkno);
Tao Maf56654c2008-08-18 17:38:48 +08004581 if (type == OCFS2_DINODE_EXTENT)
4582 status = ocfs2_dinode_insert_extent(osb, handle, inode, root_bh,
4583 *logical_offset, block,
4584 num_bits, flags, meta_ac);
Tao Maba492612008-08-18 17:38:49 +08004585 else if (type == OCFS2_XATTR_TREE_EXTENT)
4586 status = ocfs2_xattr_tree_insert_extent(osb, handle,
4587 inode, root_bh,
4588 *logical_offset,
4589 block, num_bits, flags,
4590 meta_ac);
Tao Maf56654c2008-08-18 17:38:48 +08004591 else
4592 status = ocfs2_xattr_value_insert_extent(osb, handle,
4593 inode, root_bh,
4594 *logical_offset,
4595 block, num_bits, flags,
Joel Beckerea5efa12008-08-20 16:57:27 -07004596 meta_ac, obj);
Tao Ma0eb8d472008-08-18 17:38:45 +08004597 if (status < 0) {
4598 mlog_errno(status);
4599 goto leave;
4600 }
4601
4602 status = ocfs2_journal_dirty(handle, root_bh);
4603 if (status < 0) {
4604 mlog_errno(status);
4605 goto leave;
4606 }
4607
4608 clusters_to_add -= num_bits;
4609 *logical_offset += num_bits;
4610
4611 if (clusters_to_add) {
4612 mlog(0, "need to alloc once more, wanted = %u\n",
4613 clusters_to_add);
4614 status = -EAGAIN;
4615 reason = RESTART_TRANS;
4616 }
4617
4618leave:
4619 mlog_exit(status);
4620 if (reason_ret)
4621 *reason_ret = reason;
4622 return status;
4623}
4624
Mark Fasheh328d5752007-06-18 10:48:04 -07004625static void ocfs2_make_right_split_rec(struct super_block *sb,
4626 struct ocfs2_extent_rec *split_rec,
4627 u32 cpos,
4628 struct ocfs2_extent_rec *rec)
4629{
4630 u32 rec_cpos = le32_to_cpu(rec->e_cpos);
4631 u32 rec_range = rec_cpos + le16_to_cpu(rec->e_leaf_clusters);
4632
4633 memset(split_rec, 0, sizeof(struct ocfs2_extent_rec));
4634
4635 split_rec->e_cpos = cpu_to_le32(cpos);
4636 split_rec->e_leaf_clusters = cpu_to_le16(rec_range - cpos);
4637
4638 split_rec->e_blkno = rec->e_blkno;
4639 le64_add_cpu(&split_rec->e_blkno,
4640 ocfs2_clusters_to_blocks(sb, cpos - rec_cpos));
4641
4642 split_rec->e_flags = rec->e_flags;
4643}
4644
4645static int ocfs2_split_and_insert(struct inode *inode,
4646 handle_t *handle,
4647 struct ocfs2_path *path,
Tao Mae7d4cb62008-08-18 17:38:44 +08004648 struct ocfs2_extent_tree *et,
Mark Fasheh328d5752007-06-18 10:48:04 -07004649 struct buffer_head **last_eb_bh,
4650 int split_index,
4651 struct ocfs2_extent_rec *orig_split_rec,
4652 struct ocfs2_alloc_context *meta_ac)
4653{
4654 int ret = 0, depth;
4655 unsigned int insert_range, rec_range, do_leftright = 0;
4656 struct ocfs2_extent_rec tmprec;
4657 struct ocfs2_extent_list *rightmost_el;
4658 struct ocfs2_extent_rec rec;
4659 struct ocfs2_extent_rec split_rec = *orig_split_rec;
4660 struct ocfs2_insert_type insert;
4661 struct ocfs2_extent_block *eb;
Mark Fasheh328d5752007-06-18 10:48:04 -07004662
4663leftright:
4664 /*
4665 * Store a copy of the record on the stack - it might move
4666 * around as the tree is manipulated below.
4667 */
4668 rec = path_leaf_el(path)->l_recs[split_index];
4669
Joel Beckerce1d9ea2008-08-20 16:30:07 -07004670 rightmost_el = et->et_root_el;
Mark Fasheh328d5752007-06-18 10:48:04 -07004671
4672 depth = le16_to_cpu(rightmost_el->l_tree_depth);
4673 if (depth) {
4674 BUG_ON(!(*last_eb_bh));
4675 eb = (struct ocfs2_extent_block *) (*last_eb_bh)->b_data;
4676 rightmost_el = &eb->h_list;
4677 }
4678
4679 if (le16_to_cpu(rightmost_el->l_next_free_rec) ==
4680 le16_to_cpu(rightmost_el->l_count)) {
Tao Mae7d4cb62008-08-18 17:38:44 +08004681 ret = ocfs2_grow_tree(inode, handle, et,
4682 &depth, last_eb_bh, meta_ac);
Mark Fasheh328d5752007-06-18 10:48:04 -07004683 if (ret) {
4684 mlog_errno(ret);
4685 goto out;
4686 }
Mark Fasheh328d5752007-06-18 10:48:04 -07004687 }
4688
4689 memset(&insert, 0, sizeof(struct ocfs2_insert_type));
4690 insert.ins_appending = APPEND_NONE;
4691 insert.ins_contig = CONTIG_NONE;
Mark Fasheh328d5752007-06-18 10:48:04 -07004692 insert.ins_tree_depth = depth;
4693
4694 insert_range = le32_to_cpu(split_rec.e_cpos) +
4695 le16_to_cpu(split_rec.e_leaf_clusters);
4696 rec_range = le32_to_cpu(rec.e_cpos) +
4697 le16_to_cpu(rec.e_leaf_clusters);
4698
4699 if (split_rec.e_cpos == rec.e_cpos) {
4700 insert.ins_split = SPLIT_LEFT;
4701 } else if (insert_range == rec_range) {
4702 insert.ins_split = SPLIT_RIGHT;
4703 } else {
4704 /*
4705 * Left/right split. We fake this as a right split
4706 * first and then make a second pass as a left split.
4707 */
4708 insert.ins_split = SPLIT_RIGHT;
4709
4710 ocfs2_make_right_split_rec(inode->i_sb, &tmprec, insert_range,
4711 &rec);
4712
4713 split_rec = tmprec;
4714
4715 BUG_ON(do_leftright);
4716 do_leftright = 1;
4717 }
4718
Tao Mae7d4cb62008-08-18 17:38:44 +08004719 ret = ocfs2_do_insert_extent(inode, handle, et, &split_rec, &insert);
Mark Fasheh328d5752007-06-18 10:48:04 -07004720 if (ret) {
4721 mlog_errno(ret);
4722 goto out;
4723 }
4724
4725 if (do_leftright == 1) {
4726 u32 cpos;
4727 struct ocfs2_extent_list *el;
4728
4729 do_leftright++;
4730 split_rec = *orig_split_rec;
4731
4732 ocfs2_reinit_path(path, 1);
4733
4734 cpos = le32_to_cpu(split_rec.e_cpos);
4735 ret = ocfs2_find_path(inode, path, cpos);
4736 if (ret) {
4737 mlog_errno(ret);
4738 goto out;
4739 }
4740
4741 el = path_leaf_el(path);
4742 split_index = ocfs2_search_extent_list(el, cpos);
4743 goto leftright;
4744 }
4745out:
4746
4747 return ret;
4748}
4749
4750/*
4751 * Mark part or all of the extent record at split_index in the leaf
4752 * pointed to by path as written. This removes the unwritten
4753 * extent flag.
4754 *
4755 * Care is taken to handle contiguousness so as to not grow the tree.
4756 *
4757 * meta_ac is not strictly necessary - we only truly need it if growth
4758 * of the tree is required. All other cases will degrade into a less
4759 * optimal tree layout.
4760 *
Tao Mae7d4cb62008-08-18 17:38:44 +08004761 * last_eb_bh should be the rightmost leaf block for any extent
4762 * btree. Since a split may grow the tree or a merge might shrink it,
4763 * the caller cannot trust the contents of that buffer after this call.
Mark Fasheh328d5752007-06-18 10:48:04 -07004764 *
4765 * This code is optimized for readability - several passes might be
4766 * made over certain portions of the tree. All of those blocks will
4767 * have been brought into cache (and pinned via the journal), so the
4768 * extra overhead is not expressed in terms of disk reads.
4769 */
4770static int __ocfs2_mark_extent_written(struct inode *inode,
Tao Mae7d4cb62008-08-18 17:38:44 +08004771 struct ocfs2_extent_tree *et,
Mark Fasheh328d5752007-06-18 10:48:04 -07004772 handle_t *handle,
4773 struct ocfs2_path *path,
4774 int split_index,
4775 struct ocfs2_extent_rec *split_rec,
4776 struct ocfs2_alloc_context *meta_ac,
4777 struct ocfs2_cached_dealloc_ctxt *dealloc)
4778{
4779 int ret = 0;
4780 struct ocfs2_extent_list *el = path_leaf_el(path);
Mark Fashehe8aed342007-12-03 16:43:01 -08004781 struct buffer_head *last_eb_bh = NULL;
Mark Fasheh328d5752007-06-18 10:48:04 -07004782 struct ocfs2_extent_rec *rec = &el->l_recs[split_index];
4783 struct ocfs2_merge_ctxt ctxt;
4784 struct ocfs2_extent_list *rightmost_el;
4785
Roel Kluin3cf0c502007-10-27 00:20:36 +02004786 if (!(rec->e_flags & OCFS2_EXT_UNWRITTEN)) {
Mark Fasheh328d5752007-06-18 10:48:04 -07004787 ret = -EIO;
4788 mlog_errno(ret);
4789 goto out;
4790 }
4791
4792 if (le32_to_cpu(rec->e_cpos) > le32_to_cpu(split_rec->e_cpos) ||
4793 ((le32_to_cpu(rec->e_cpos) + le16_to_cpu(rec->e_leaf_clusters)) <
4794 (le32_to_cpu(split_rec->e_cpos) + le16_to_cpu(split_rec->e_leaf_clusters)))) {
4795 ret = -EIO;
4796 mlog_errno(ret);
4797 goto out;
4798 }
4799
Tao Maad5a4d72008-01-30 14:21:32 +08004800 ctxt.c_contig_type = ocfs2_figure_merge_contig_type(inode, path, el,
Mark Fasheh328d5752007-06-18 10:48:04 -07004801 split_index,
4802 split_rec);
4803
4804 /*
4805 * The core merge / split code wants to know how much room is
4806 * left in this inodes allocation tree, so we pass the
4807 * rightmost extent list.
4808 */
4809 if (path->p_tree_depth) {
4810 struct ocfs2_extent_block *eb;
Mark Fasheh328d5752007-06-18 10:48:04 -07004811
4812 ret = ocfs2_read_block(OCFS2_SB(inode->i_sb),
Joel Becker35dc0aa2008-08-20 16:25:06 -07004813 ocfs2_et_get_last_eb_blk(et),
Mark Fasheh328d5752007-06-18 10:48:04 -07004814 &last_eb_bh, OCFS2_BH_CACHED, inode);
4815 if (ret) {
4816 mlog_exit(ret);
4817 goto out;
4818 }
4819
4820 eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
4821 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
4822 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
4823 ret = -EROFS;
4824 goto out;
4825 }
4826
4827 rightmost_el = &eb->h_list;
4828 } else
4829 rightmost_el = path_root_el(path);
4830
Mark Fasheh328d5752007-06-18 10:48:04 -07004831 if (rec->e_cpos == split_rec->e_cpos &&
4832 rec->e_leaf_clusters == split_rec->e_leaf_clusters)
4833 ctxt.c_split_covers_rec = 1;
4834 else
4835 ctxt.c_split_covers_rec = 0;
4836
4837 ctxt.c_has_empty_extent = ocfs2_is_empty_extent(&el->l_recs[0]);
4838
Mark Fasheh015452b2007-09-12 10:21:22 -07004839 mlog(0, "index: %d, contig: %u, has_empty: %u, split_covers: %u\n",
4840 split_index, ctxt.c_contig_type, ctxt.c_has_empty_extent,
4841 ctxt.c_split_covers_rec);
Mark Fasheh328d5752007-06-18 10:48:04 -07004842
4843 if (ctxt.c_contig_type == CONTIG_NONE) {
4844 if (ctxt.c_split_covers_rec)
4845 el->l_recs[split_index] = *split_rec;
4846 else
Tao Mae7d4cb62008-08-18 17:38:44 +08004847 ret = ocfs2_split_and_insert(inode, handle, path, et,
Mark Fasheh328d5752007-06-18 10:48:04 -07004848 &last_eb_bh, split_index,
4849 split_rec, meta_ac);
4850 if (ret)
4851 mlog_errno(ret);
4852 } else {
4853 ret = ocfs2_try_to_merge_extent(inode, handle, path,
4854 split_index, split_rec,
Tao Mae7d4cb62008-08-18 17:38:44 +08004855 dealloc, &ctxt, et);
Mark Fasheh328d5752007-06-18 10:48:04 -07004856 if (ret)
4857 mlog_errno(ret);
4858 }
4859
Mark Fasheh328d5752007-06-18 10:48:04 -07004860out:
4861 brelse(last_eb_bh);
4862 return ret;
4863}
4864
4865/*
4866 * Mark the already-existing extent at cpos as written for len clusters.
4867 *
4868 * If the existing extent is larger than the request, initiate a
4869 * split. An attempt will be made at merging with adjacent extents.
4870 *
4871 * The caller is responsible for passing down meta_ac if we'll need it.
4872 */
Tao Mae7d4cb62008-08-18 17:38:44 +08004873int ocfs2_mark_extent_written(struct inode *inode, struct buffer_head *root_bh,
Mark Fasheh328d5752007-06-18 10:48:04 -07004874 handle_t *handle, u32 cpos, u32 len, u32 phys,
4875 struct ocfs2_alloc_context *meta_ac,
Tao Mae7d4cb62008-08-18 17:38:44 +08004876 struct ocfs2_cached_dealloc_ctxt *dealloc,
Tao Maf56654c2008-08-18 17:38:48 +08004877 enum ocfs2_extent_tree_type et_type,
Joel Beckerea5efa12008-08-20 16:57:27 -07004878 void *obj)
Mark Fasheh328d5752007-06-18 10:48:04 -07004879{
4880 int ret, index;
4881 u64 start_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys);
4882 struct ocfs2_extent_rec split_rec;
4883 struct ocfs2_path *left_path = NULL;
4884 struct ocfs2_extent_list *el;
Joel Beckerdc0ce612008-08-20 16:48:35 -07004885 struct ocfs2_extent_tree et;
Mark Fasheh328d5752007-06-18 10:48:04 -07004886
4887 mlog(0, "Inode %lu cpos %u, len %u, phys %u (%llu)\n",
4888 inode->i_ino, cpos, len, phys, (unsigned long long)start_blkno);
4889
Joel Beckerea5efa12008-08-20 16:57:27 -07004890 ocfs2_get_extent_tree(&et, inode, root_bh, et_type, obj);
Joel Beckerdc0ce612008-08-20 16:48:35 -07004891
Mark Fasheh328d5752007-06-18 10:48:04 -07004892 if (!ocfs2_writes_unwritten_extents(OCFS2_SB(inode->i_sb))) {
4893 ocfs2_error(inode->i_sb, "Inode %llu has unwritten extents "
4894 "that are being written to, but the feature bit "
4895 "is not set in the super block.",
4896 (unsigned long long)OCFS2_I(inode)->ip_blkno);
4897 ret = -EROFS;
4898 goto out;
4899 }
4900
4901 /*
4902 * XXX: This should be fixed up so that we just re-insert the
4903 * next extent records.
4904 */
Tao Mae7d4cb62008-08-18 17:38:44 +08004905 if (et_type == OCFS2_DINODE_EXTENT)
4906 ocfs2_extent_map_trunc(inode, 0);
Mark Fasheh328d5752007-06-18 10:48:04 -07004907
Joel Beckerdc0ce612008-08-20 16:48:35 -07004908 left_path = ocfs2_new_path(et.et_root_bh, et.et_root_el);
Mark Fasheh328d5752007-06-18 10:48:04 -07004909 if (!left_path) {
4910 ret = -ENOMEM;
4911 mlog_errno(ret);
4912 goto out;
4913 }
4914
4915 ret = ocfs2_find_path(inode, left_path, cpos);
4916 if (ret) {
4917 mlog_errno(ret);
4918 goto out;
4919 }
4920 el = path_leaf_el(left_path);
4921
4922 index = ocfs2_search_extent_list(el, cpos);
4923 if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) {
4924 ocfs2_error(inode->i_sb,
4925 "Inode %llu has an extent at cpos %u which can no "
4926 "longer be found.\n",
4927 (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos);
4928 ret = -EROFS;
4929 goto out;
4930 }
4931
4932 memset(&split_rec, 0, sizeof(struct ocfs2_extent_rec));
4933 split_rec.e_cpos = cpu_to_le32(cpos);
4934 split_rec.e_leaf_clusters = cpu_to_le16(len);
4935 split_rec.e_blkno = cpu_to_le64(start_blkno);
4936 split_rec.e_flags = path_leaf_el(left_path)->l_recs[index].e_flags;
4937 split_rec.e_flags &= ~OCFS2_EXT_UNWRITTEN;
4938
Joel Beckerdc0ce612008-08-20 16:48:35 -07004939 ret = __ocfs2_mark_extent_written(inode, &et, handle, left_path,
Tao Mae7d4cb62008-08-18 17:38:44 +08004940 index, &split_rec, meta_ac,
4941 dealloc);
Mark Fasheh328d5752007-06-18 10:48:04 -07004942 if (ret)
4943 mlog_errno(ret);
4944
4945out:
4946 ocfs2_free_path(left_path);
Joel Beckerdc0ce612008-08-20 16:48:35 -07004947 ocfs2_put_extent_tree(&et);
Mark Fasheh328d5752007-06-18 10:48:04 -07004948 return ret;
4949}
4950
Tao Mae7d4cb62008-08-18 17:38:44 +08004951static int ocfs2_split_tree(struct inode *inode, struct ocfs2_extent_tree *et,
Mark Fashehd0c7d702007-07-03 13:27:22 -07004952 handle_t *handle, struct ocfs2_path *path,
4953 int index, u32 new_range,
4954 struct ocfs2_alloc_context *meta_ac)
4955{
4956 int ret, depth, credits = handle->h_buffer_credits;
Mark Fashehd0c7d702007-07-03 13:27:22 -07004957 struct buffer_head *last_eb_bh = NULL;
4958 struct ocfs2_extent_block *eb;
4959 struct ocfs2_extent_list *rightmost_el, *el;
4960 struct ocfs2_extent_rec split_rec;
4961 struct ocfs2_extent_rec *rec;
4962 struct ocfs2_insert_type insert;
4963
4964 /*
4965 * Setup the record to split before we grow the tree.
4966 */
4967 el = path_leaf_el(path);
4968 rec = &el->l_recs[index];
4969 ocfs2_make_right_split_rec(inode->i_sb, &split_rec, new_range, rec);
4970
4971 depth = path->p_tree_depth;
4972 if (depth > 0) {
4973 ret = ocfs2_read_block(OCFS2_SB(inode->i_sb),
Joel Becker35dc0aa2008-08-20 16:25:06 -07004974 ocfs2_et_get_last_eb_blk(et),
Mark Fashehd0c7d702007-07-03 13:27:22 -07004975 &last_eb_bh, OCFS2_BH_CACHED, inode);
4976 if (ret < 0) {
4977 mlog_errno(ret);
4978 goto out;
4979 }
4980
4981 eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
4982 rightmost_el = &eb->h_list;
4983 } else
4984 rightmost_el = path_leaf_el(path);
4985
Tao Ma811f9332008-08-18 17:38:43 +08004986 credits += path->p_tree_depth +
Joel Beckerce1d9ea2008-08-20 16:30:07 -07004987 ocfs2_extend_meta_needed(et->et_root_el);
Mark Fashehd0c7d702007-07-03 13:27:22 -07004988 ret = ocfs2_extend_trans(handle, credits);
4989 if (ret) {
4990 mlog_errno(ret);
4991 goto out;
4992 }
4993
4994 if (le16_to_cpu(rightmost_el->l_next_free_rec) ==
4995 le16_to_cpu(rightmost_el->l_count)) {
Tao Mae7d4cb62008-08-18 17:38:44 +08004996 ret = ocfs2_grow_tree(inode, handle, et, &depth, &last_eb_bh,
Mark Fashehd0c7d702007-07-03 13:27:22 -07004997 meta_ac);
4998 if (ret) {
4999 mlog_errno(ret);
5000 goto out;
5001 }
Mark Fashehd0c7d702007-07-03 13:27:22 -07005002 }
5003
5004 memset(&insert, 0, sizeof(struct ocfs2_insert_type));
5005 insert.ins_appending = APPEND_NONE;
5006 insert.ins_contig = CONTIG_NONE;
5007 insert.ins_split = SPLIT_RIGHT;
Mark Fashehd0c7d702007-07-03 13:27:22 -07005008 insert.ins_tree_depth = depth;
5009
Tao Mae7d4cb62008-08-18 17:38:44 +08005010 ret = ocfs2_do_insert_extent(inode, handle, et, &split_rec, &insert);
Mark Fashehd0c7d702007-07-03 13:27:22 -07005011 if (ret)
5012 mlog_errno(ret);
5013
5014out:
5015 brelse(last_eb_bh);
5016 return ret;
5017}
5018
5019static int ocfs2_truncate_rec(struct inode *inode, handle_t *handle,
5020 struct ocfs2_path *path, int index,
5021 struct ocfs2_cached_dealloc_ctxt *dealloc,
Tao Mae7d4cb62008-08-18 17:38:44 +08005022 u32 cpos, u32 len,
5023 struct ocfs2_extent_tree *et)
Mark Fashehd0c7d702007-07-03 13:27:22 -07005024{
5025 int ret;
5026 u32 left_cpos, rec_range, trunc_range;
5027 int wants_rotate = 0, is_rightmost_tree_rec = 0;
5028 struct super_block *sb = inode->i_sb;
5029 struct ocfs2_path *left_path = NULL;
5030 struct ocfs2_extent_list *el = path_leaf_el(path);
5031 struct ocfs2_extent_rec *rec;
5032 struct ocfs2_extent_block *eb;
5033
5034 if (ocfs2_is_empty_extent(&el->l_recs[0]) && index > 0) {
Tao Mae7d4cb62008-08-18 17:38:44 +08005035 ret = ocfs2_rotate_tree_left(inode, handle, path, dealloc, et);
Mark Fashehd0c7d702007-07-03 13:27:22 -07005036 if (ret) {
5037 mlog_errno(ret);
5038 goto out;
5039 }
5040
5041 index--;
5042 }
5043
5044 if (index == (le16_to_cpu(el->l_next_free_rec) - 1) &&
5045 path->p_tree_depth) {
5046 /*
5047 * Check whether this is the rightmost tree record. If
5048 * we remove all of this record or part of its right
5049 * edge then an update of the record lengths above it
5050 * will be required.
5051 */
5052 eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data;
5053 if (eb->h_next_leaf_blk == 0)
5054 is_rightmost_tree_rec = 1;
5055 }
5056
5057 rec = &el->l_recs[index];
5058 if (index == 0 && path->p_tree_depth &&
5059 le32_to_cpu(rec->e_cpos) == cpos) {
5060 /*
5061 * Changing the leftmost offset (via partial or whole
5062 * record truncate) of an interior (or rightmost) path
5063 * means we have to update the subtree that is formed
5064 * by this leaf and the one to it's left.
5065 *
5066 * There are two cases we can skip:
5067 * 1) Path is the leftmost one in our inode tree.
5068 * 2) The leaf is rightmost and will be empty after
5069 * we remove the extent record - the rotate code
5070 * knows how to update the newly formed edge.
5071 */
5072
5073 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, path,
5074 &left_cpos);
5075 if (ret) {
5076 mlog_errno(ret);
5077 goto out;
5078 }
5079
5080 if (left_cpos && le16_to_cpu(el->l_next_free_rec) > 1) {
5081 left_path = ocfs2_new_path(path_root_bh(path),
5082 path_root_el(path));
5083 if (!left_path) {
5084 ret = -ENOMEM;
5085 mlog_errno(ret);
5086 goto out;
5087 }
5088
5089 ret = ocfs2_find_path(inode, left_path, left_cpos);
5090 if (ret) {
5091 mlog_errno(ret);
5092 goto out;
5093 }
5094 }
5095 }
5096
5097 ret = ocfs2_extend_rotate_transaction(handle, 0,
5098 handle->h_buffer_credits,
5099 path);
5100 if (ret) {
5101 mlog_errno(ret);
5102 goto out;
5103 }
5104
5105 ret = ocfs2_journal_access_path(inode, handle, path);
5106 if (ret) {
5107 mlog_errno(ret);
5108 goto out;
5109 }
5110
5111 ret = ocfs2_journal_access_path(inode, handle, left_path);
5112 if (ret) {
5113 mlog_errno(ret);
5114 goto out;
5115 }
5116
5117 rec_range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
5118 trunc_range = cpos + len;
5119
5120 if (le32_to_cpu(rec->e_cpos) == cpos && rec_range == trunc_range) {
5121 int next_free;
5122
5123 memset(rec, 0, sizeof(*rec));
5124 ocfs2_cleanup_merge(el, index);
5125 wants_rotate = 1;
5126
5127 next_free = le16_to_cpu(el->l_next_free_rec);
5128 if (is_rightmost_tree_rec && next_free > 1) {
5129 /*
5130 * We skip the edge update if this path will
5131 * be deleted by the rotate code.
5132 */
5133 rec = &el->l_recs[next_free - 1];
5134 ocfs2_adjust_rightmost_records(inode, handle, path,
5135 rec);
5136 }
5137 } else if (le32_to_cpu(rec->e_cpos) == cpos) {
5138 /* Remove leftmost portion of the record. */
5139 le32_add_cpu(&rec->e_cpos, len);
5140 le64_add_cpu(&rec->e_blkno, ocfs2_clusters_to_blocks(sb, len));
5141 le16_add_cpu(&rec->e_leaf_clusters, -len);
5142 } else if (rec_range == trunc_range) {
5143 /* Remove rightmost portion of the record */
5144 le16_add_cpu(&rec->e_leaf_clusters, -len);
5145 if (is_rightmost_tree_rec)
5146 ocfs2_adjust_rightmost_records(inode, handle, path, rec);
5147 } else {
5148 /* Caller should have trapped this. */
5149 mlog(ML_ERROR, "Inode %llu: Invalid record truncate: (%u, %u) "
5150 "(%u, %u)\n", (unsigned long long)OCFS2_I(inode)->ip_blkno,
5151 le32_to_cpu(rec->e_cpos),
5152 le16_to_cpu(rec->e_leaf_clusters), cpos, len);
5153 BUG();
5154 }
5155
5156 if (left_path) {
5157 int subtree_index;
5158
5159 subtree_index = ocfs2_find_subtree_root(inode, left_path, path);
5160 ocfs2_complete_edge_insert(inode, handle, left_path, path,
5161 subtree_index);
5162 }
5163
5164 ocfs2_journal_dirty(handle, path_leaf_bh(path));
5165
Tao Mae7d4cb62008-08-18 17:38:44 +08005166 ret = ocfs2_rotate_tree_left(inode, handle, path, dealloc, et);
Mark Fashehd0c7d702007-07-03 13:27:22 -07005167 if (ret) {
5168 mlog_errno(ret);
5169 goto out;
5170 }
5171
5172out:
5173 ocfs2_free_path(left_path);
5174 return ret;
5175}
5176
Tao Mae7d4cb62008-08-18 17:38:44 +08005177int ocfs2_remove_extent(struct inode *inode, struct buffer_head *root_bh,
Mark Fasheh063c4562007-07-03 13:34:11 -07005178 u32 cpos, u32 len, handle_t *handle,
5179 struct ocfs2_alloc_context *meta_ac,
Tao Mae7d4cb62008-08-18 17:38:44 +08005180 struct ocfs2_cached_dealloc_ctxt *dealloc,
Tao Maf56654c2008-08-18 17:38:48 +08005181 enum ocfs2_extent_tree_type et_type,
Joel Beckerea5efa12008-08-20 16:57:27 -07005182 void *obj)
Mark Fashehd0c7d702007-07-03 13:27:22 -07005183{
5184 int ret, index;
5185 u32 rec_range, trunc_range;
5186 struct ocfs2_extent_rec *rec;
5187 struct ocfs2_extent_list *el;
Tao Mae7d4cb62008-08-18 17:38:44 +08005188 struct ocfs2_path *path = NULL;
Joel Beckerdc0ce612008-08-20 16:48:35 -07005189 struct ocfs2_extent_tree et;
Tao Mae7d4cb62008-08-18 17:38:44 +08005190
Joel Beckerea5efa12008-08-20 16:57:27 -07005191 ocfs2_get_extent_tree(&et, inode, root_bh, et_type, obj);
Mark Fashehd0c7d702007-07-03 13:27:22 -07005192
5193 ocfs2_extent_map_trunc(inode, 0);
5194
Joel Beckerdc0ce612008-08-20 16:48:35 -07005195 path = ocfs2_new_path(et.et_root_bh, et.et_root_el);
Mark Fashehd0c7d702007-07-03 13:27:22 -07005196 if (!path) {
5197 ret = -ENOMEM;
5198 mlog_errno(ret);
5199 goto out;
5200 }
5201
5202 ret = ocfs2_find_path(inode, path, cpos);
5203 if (ret) {
5204 mlog_errno(ret);
5205 goto out;
5206 }
5207
5208 el = path_leaf_el(path);
5209 index = ocfs2_search_extent_list(el, cpos);
5210 if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) {
5211 ocfs2_error(inode->i_sb,
5212 "Inode %llu has an extent at cpos %u which can no "
5213 "longer be found.\n",
5214 (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos);
5215 ret = -EROFS;
5216 goto out;
5217 }
5218
5219 /*
5220 * We have 3 cases of extent removal:
5221 * 1) Range covers the entire extent rec
5222 * 2) Range begins or ends on one edge of the extent rec
5223 * 3) Range is in the middle of the extent rec (no shared edges)
5224 *
5225 * For case 1 we remove the extent rec and left rotate to
5226 * fill the hole.
5227 *
5228 * For case 2 we just shrink the existing extent rec, with a
5229 * tree update if the shrinking edge is also the edge of an
5230 * extent block.
5231 *
5232 * For case 3 we do a right split to turn the extent rec into
5233 * something case 2 can handle.
5234 */
5235 rec = &el->l_recs[index];
5236 rec_range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
5237 trunc_range = cpos + len;
5238
5239 BUG_ON(cpos < le32_to_cpu(rec->e_cpos) || trunc_range > rec_range);
5240
5241 mlog(0, "Inode %llu, remove (cpos %u, len %u). Existing index %d "
5242 "(cpos %u, len %u)\n",
5243 (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos, len, index,
5244 le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec));
5245
5246 if (le32_to_cpu(rec->e_cpos) == cpos || rec_range == trunc_range) {
5247 ret = ocfs2_truncate_rec(inode, handle, path, index, dealloc,
Joel Beckerdc0ce612008-08-20 16:48:35 -07005248 cpos, len, &et);
Mark Fashehd0c7d702007-07-03 13:27:22 -07005249 if (ret) {
5250 mlog_errno(ret);
5251 goto out;
5252 }
5253 } else {
Joel Beckerdc0ce612008-08-20 16:48:35 -07005254 ret = ocfs2_split_tree(inode, &et, handle, path, index,
Mark Fashehd0c7d702007-07-03 13:27:22 -07005255 trunc_range, meta_ac);
5256 if (ret) {
5257 mlog_errno(ret);
5258 goto out;
5259 }
5260
5261 /*
5262 * The split could have manipulated the tree enough to
5263 * move the record location, so we have to look for it again.
5264 */
5265 ocfs2_reinit_path(path, 1);
5266
5267 ret = ocfs2_find_path(inode, path, cpos);
5268 if (ret) {
5269 mlog_errno(ret);
5270 goto out;
5271 }
5272
5273 el = path_leaf_el(path);
5274 index = ocfs2_search_extent_list(el, cpos);
5275 if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) {
5276 ocfs2_error(inode->i_sb,
5277 "Inode %llu: split at cpos %u lost record.",
5278 (unsigned long long)OCFS2_I(inode)->ip_blkno,
5279 cpos);
5280 ret = -EROFS;
5281 goto out;
5282 }
5283
5284 /*
5285 * Double check our values here. If anything is fishy,
5286 * it's easier to catch it at the top level.
5287 */
5288 rec = &el->l_recs[index];
5289 rec_range = le32_to_cpu(rec->e_cpos) +
5290 ocfs2_rec_clusters(el, rec);
5291 if (rec_range != trunc_range) {
5292 ocfs2_error(inode->i_sb,
5293 "Inode %llu: error after split at cpos %u"
5294 "trunc len %u, existing record is (%u,%u)",
5295 (unsigned long long)OCFS2_I(inode)->ip_blkno,
5296 cpos, len, le32_to_cpu(rec->e_cpos),
5297 ocfs2_rec_clusters(el, rec));
5298 ret = -EROFS;
5299 goto out;
5300 }
5301
5302 ret = ocfs2_truncate_rec(inode, handle, path, index, dealloc,
Joel Beckerdc0ce612008-08-20 16:48:35 -07005303 cpos, len, &et);
Mark Fashehd0c7d702007-07-03 13:27:22 -07005304 if (ret) {
5305 mlog_errno(ret);
5306 goto out;
5307 }
5308 }
5309
5310out:
5311 ocfs2_free_path(path);
Joel Beckerdc0ce612008-08-20 16:48:35 -07005312 ocfs2_put_extent_tree(&et);
Mark Fashehd0c7d702007-07-03 13:27:22 -07005313 return ret;
5314}
5315
Mark Fasheh063c4562007-07-03 13:34:11 -07005316int ocfs2_truncate_log_needs_flush(struct ocfs2_super *osb)
Mark Fashehccd979b2005-12-15 14:31:24 -08005317{
5318 struct buffer_head *tl_bh = osb->osb_tl_bh;
5319 struct ocfs2_dinode *di;
5320 struct ocfs2_truncate_log *tl;
5321
5322 di = (struct ocfs2_dinode *) tl_bh->b_data;
5323 tl = &di->id2.i_dealloc;
5324
5325 mlog_bug_on_msg(le16_to_cpu(tl->tl_used) > le16_to_cpu(tl->tl_count),
5326 "slot %d, invalid truncate log parameters: used = "
5327 "%u, count = %u\n", osb->slot_num,
5328 le16_to_cpu(tl->tl_used), le16_to_cpu(tl->tl_count));
5329 return le16_to_cpu(tl->tl_used) == le16_to_cpu(tl->tl_count);
5330}
5331
5332static int ocfs2_truncate_log_can_coalesce(struct ocfs2_truncate_log *tl,
5333 unsigned int new_start)
5334{
5335 unsigned int tail_index;
5336 unsigned int current_tail;
5337
5338 /* No records, nothing to coalesce */
5339 if (!le16_to_cpu(tl->tl_used))
5340 return 0;
5341
5342 tail_index = le16_to_cpu(tl->tl_used) - 1;
5343 current_tail = le32_to_cpu(tl->tl_recs[tail_index].t_start);
5344 current_tail += le32_to_cpu(tl->tl_recs[tail_index].t_clusters);
5345
5346 return current_tail == new_start;
5347}
5348
Mark Fasheh063c4562007-07-03 13:34:11 -07005349int ocfs2_truncate_log_append(struct ocfs2_super *osb,
5350 handle_t *handle,
5351 u64 start_blk,
5352 unsigned int num_clusters)
Mark Fashehccd979b2005-12-15 14:31:24 -08005353{
5354 int status, index;
5355 unsigned int start_cluster, tl_count;
5356 struct inode *tl_inode = osb->osb_tl_inode;
5357 struct buffer_head *tl_bh = osb->osb_tl_bh;
5358 struct ocfs2_dinode *di;
5359 struct ocfs2_truncate_log *tl;
5360
Mark Fashehb06970532006-03-03 10:24:33 -08005361 mlog_entry("start_blk = %llu, num_clusters = %u\n",
5362 (unsigned long long)start_blk, num_clusters);
Mark Fashehccd979b2005-12-15 14:31:24 -08005363
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08005364 BUG_ON(mutex_trylock(&tl_inode->i_mutex));
Mark Fashehccd979b2005-12-15 14:31:24 -08005365
5366 start_cluster = ocfs2_blocks_to_clusters(osb->sb, start_blk);
5367
5368 di = (struct ocfs2_dinode *) tl_bh->b_data;
5369 tl = &di->id2.i_dealloc;
5370 if (!OCFS2_IS_VALID_DINODE(di)) {
5371 OCFS2_RO_ON_INVALID_DINODE(osb->sb, di);
5372 status = -EIO;
5373 goto bail;
5374 }
5375
5376 tl_count = le16_to_cpu(tl->tl_count);
5377 mlog_bug_on_msg(tl_count > ocfs2_truncate_recs_per_inode(osb->sb) ||
5378 tl_count == 0,
Mark Fashehb06970532006-03-03 10:24:33 -08005379 "Truncate record count on #%llu invalid "
5380 "wanted %u, actual %u\n",
5381 (unsigned long long)OCFS2_I(tl_inode)->ip_blkno,
Mark Fashehccd979b2005-12-15 14:31:24 -08005382 ocfs2_truncate_recs_per_inode(osb->sb),
5383 le16_to_cpu(tl->tl_count));
5384
5385 /* Caller should have known to flush before calling us. */
5386 index = le16_to_cpu(tl->tl_used);
5387 if (index >= tl_count) {
5388 status = -ENOSPC;
5389 mlog_errno(status);
5390 goto bail;
5391 }
5392
5393 status = ocfs2_journal_access(handle, tl_inode, tl_bh,
5394 OCFS2_JOURNAL_ACCESS_WRITE);
5395 if (status < 0) {
5396 mlog_errno(status);
5397 goto bail;
5398 }
5399
5400 mlog(0, "Log truncate of %u clusters starting at cluster %u to "
Mark Fashehb06970532006-03-03 10:24:33 -08005401 "%llu (index = %d)\n", num_clusters, start_cluster,
5402 (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, index);
Mark Fashehccd979b2005-12-15 14:31:24 -08005403
5404 if (ocfs2_truncate_log_can_coalesce(tl, start_cluster)) {
5405 /*
5406 * Move index back to the record we are coalescing with.
5407 * ocfs2_truncate_log_can_coalesce() guarantees nonzero
5408 */
5409 index--;
5410
5411 num_clusters += le32_to_cpu(tl->tl_recs[index].t_clusters);
5412 mlog(0, "Coalesce with index %u (start = %u, clusters = %u)\n",
5413 index, le32_to_cpu(tl->tl_recs[index].t_start),
5414 num_clusters);
5415 } else {
5416 tl->tl_recs[index].t_start = cpu_to_le32(start_cluster);
5417 tl->tl_used = cpu_to_le16(index + 1);
5418 }
5419 tl->tl_recs[index].t_clusters = cpu_to_le32(num_clusters);
5420
5421 status = ocfs2_journal_dirty(handle, tl_bh);
5422 if (status < 0) {
5423 mlog_errno(status);
5424 goto bail;
5425 }
5426
5427bail:
5428 mlog_exit(status);
5429 return status;
5430}
5431
5432static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
Mark Fasheh1fabe142006-10-09 18:11:45 -07005433 handle_t *handle,
Mark Fashehccd979b2005-12-15 14:31:24 -08005434 struct inode *data_alloc_inode,
5435 struct buffer_head *data_alloc_bh)
5436{
5437 int status = 0;
5438 int i;
5439 unsigned int num_clusters;
5440 u64 start_blk;
5441 struct ocfs2_truncate_rec rec;
5442 struct ocfs2_dinode *di;
5443 struct ocfs2_truncate_log *tl;
5444 struct inode *tl_inode = osb->osb_tl_inode;
5445 struct buffer_head *tl_bh = osb->osb_tl_bh;
5446
5447 mlog_entry_void();
5448
5449 di = (struct ocfs2_dinode *) tl_bh->b_data;
5450 tl = &di->id2.i_dealloc;
5451 i = le16_to_cpu(tl->tl_used) - 1;
5452 while (i >= 0) {
5453 /* Caller has given us at least enough credits to
5454 * update the truncate log dinode */
5455 status = ocfs2_journal_access(handle, tl_inode, tl_bh,
5456 OCFS2_JOURNAL_ACCESS_WRITE);
5457 if (status < 0) {
5458 mlog_errno(status);
5459 goto bail;
5460 }
5461
5462 tl->tl_used = cpu_to_le16(i);
5463
5464 status = ocfs2_journal_dirty(handle, tl_bh);
5465 if (status < 0) {
5466 mlog_errno(status);
5467 goto bail;
5468 }
5469
5470 /* TODO: Perhaps we can calculate the bulk of the
5471 * credits up front rather than extending like
5472 * this. */
5473 status = ocfs2_extend_trans(handle,
5474 OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC);
5475 if (status < 0) {
5476 mlog_errno(status);
5477 goto bail;
5478 }
5479
5480 rec = tl->tl_recs[i];
5481 start_blk = ocfs2_clusters_to_blocks(data_alloc_inode->i_sb,
5482 le32_to_cpu(rec.t_start));
5483 num_clusters = le32_to_cpu(rec.t_clusters);
5484
5485 /* if start_blk is not set, we ignore the record as
5486 * invalid. */
5487 if (start_blk) {
5488 mlog(0, "free record %d, start = %u, clusters = %u\n",
5489 i, le32_to_cpu(rec.t_start), num_clusters);
5490
5491 status = ocfs2_free_clusters(handle, data_alloc_inode,
5492 data_alloc_bh, start_blk,
5493 num_clusters);
5494 if (status < 0) {
5495 mlog_errno(status);
5496 goto bail;
5497 }
5498 }
5499 i--;
5500 }
5501
5502bail:
5503 mlog_exit(status);
5504 return status;
5505}
5506
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08005507/* Expects you to already be holding tl_inode->i_mutex */
Mark Fasheh063c4562007-07-03 13:34:11 -07005508int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
Mark Fashehccd979b2005-12-15 14:31:24 -08005509{
5510 int status;
5511 unsigned int num_to_flush;
Mark Fasheh1fabe142006-10-09 18:11:45 -07005512 handle_t *handle;
Mark Fashehccd979b2005-12-15 14:31:24 -08005513 struct inode *tl_inode = osb->osb_tl_inode;
5514 struct inode *data_alloc_inode = NULL;
5515 struct buffer_head *tl_bh = osb->osb_tl_bh;
5516 struct buffer_head *data_alloc_bh = NULL;
5517 struct ocfs2_dinode *di;
5518 struct ocfs2_truncate_log *tl;
5519
5520 mlog_entry_void();
5521
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08005522 BUG_ON(mutex_trylock(&tl_inode->i_mutex));
Mark Fashehccd979b2005-12-15 14:31:24 -08005523
5524 di = (struct ocfs2_dinode *) tl_bh->b_data;
5525 tl = &di->id2.i_dealloc;
5526 if (!OCFS2_IS_VALID_DINODE(di)) {
5527 OCFS2_RO_ON_INVALID_DINODE(osb->sb, di);
5528 status = -EIO;
Mark Fashehe08dc8b2006-10-05 15:58:48 -07005529 goto out;
Mark Fashehccd979b2005-12-15 14:31:24 -08005530 }
5531
5532 num_to_flush = le16_to_cpu(tl->tl_used);
Mark Fashehb06970532006-03-03 10:24:33 -08005533 mlog(0, "Flush %u records from truncate log #%llu\n",
5534 num_to_flush, (unsigned long long)OCFS2_I(tl_inode)->ip_blkno);
Mark Fashehccd979b2005-12-15 14:31:24 -08005535 if (!num_to_flush) {
5536 status = 0;
Mark Fashehe08dc8b2006-10-05 15:58:48 -07005537 goto out;
Mark Fashehccd979b2005-12-15 14:31:24 -08005538 }
5539
5540 data_alloc_inode = ocfs2_get_system_file_inode(osb,
5541 GLOBAL_BITMAP_SYSTEM_INODE,
5542 OCFS2_INVALID_SLOT);
5543 if (!data_alloc_inode) {
5544 status = -EINVAL;
5545 mlog(ML_ERROR, "Could not get bitmap inode!\n");
Mark Fashehe08dc8b2006-10-05 15:58:48 -07005546 goto out;
Mark Fashehccd979b2005-12-15 14:31:24 -08005547 }
5548
Mark Fashehe08dc8b2006-10-05 15:58:48 -07005549 mutex_lock(&data_alloc_inode->i_mutex);
5550
Mark Fashehe63aecb62007-10-18 15:30:42 -07005551 status = ocfs2_inode_lock(data_alloc_inode, &data_alloc_bh, 1);
Mark Fashehccd979b2005-12-15 14:31:24 -08005552 if (status < 0) {
5553 mlog_errno(status);
Mark Fashehe08dc8b2006-10-05 15:58:48 -07005554 goto out_mutex;
Mark Fashehccd979b2005-12-15 14:31:24 -08005555 }
5556
Mark Fasheh65eff9c2006-10-09 17:26:22 -07005557 handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE);
Mark Fashehccd979b2005-12-15 14:31:24 -08005558 if (IS_ERR(handle)) {
5559 status = PTR_ERR(handle);
Mark Fashehccd979b2005-12-15 14:31:24 -08005560 mlog_errno(status);
Mark Fashehe08dc8b2006-10-05 15:58:48 -07005561 goto out_unlock;
Mark Fashehccd979b2005-12-15 14:31:24 -08005562 }
5563
5564 status = ocfs2_replay_truncate_records(osb, handle, data_alloc_inode,
5565 data_alloc_bh);
Mark Fashehe08dc8b2006-10-05 15:58:48 -07005566 if (status < 0)
Mark Fashehccd979b2005-12-15 14:31:24 -08005567 mlog_errno(status);
Mark Fashehccd979b2005-12-15 14:31:24 -08005568
Mark Fasheh02dc1af2006-10-09 16:48:10 -07005569 ocfs2_commit_trans(osb, handle);
Mark Fashehccd979b2005-12-15 14:31:24 -08005570
Mark Fashehe08dc8b2006-10-05 15:58:48 -07005571out_unlock:
5572 brelse(data_alloc_bh);
Mark Fashehe63aecb62007-10-18 15:30:42 -07005573 ocfs2_inode_unlock(data_alloc_inode, 1);
Mark Fashehccd979b2005-12-15 14:31:24 -08005574
Mark Fashehe08dc8b2006-10-05 15:58:48 -07005575out_mutex:
5576 mutex_unlock(&data_alloc_inode->i_mutex);
5577 iput(data_alloc_inode);
Mark Fashehccd979b2005-12-15 14:31:24 -08005578
Mark Fashehe08dc8b2006-10-05 15:58:48 -07005579out:
Mark Fashehccd979b2005-12-15 14:31:24 -08005580 mlog_exit(status);
5581 return status;
5582}
5583
5584int ocfs2_flush_truncate_log(struct ocfs2_super *osb)
5585{
5586 int status;
5587 struct inode *tl_inode = osb->osb_tl_inode;
5588
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08005589 mutex_lock(&tl_inode->i_mutex);
Mark Fashehccd979b2005-12-15 14:31:24 -08005590 status = __ocfs2_flush_truncate_log(osb);
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08005591 mutex_unlock(&tl_inode->i_mutex);
Mark Fashehccd979b2005-12-15 14:31:24 -08005592
5593 return status;
5594}
5595
David Howellsc4028952006-11-22 14:57:56 +00005596static void ocfs2_truncate_log_worker(struct work_struct *work)
Mark Fashehccd979b2005-12-15 14:31:24 -08005597{
5598 int status;
David Howellsc4028952006-11-22 14:57:56 +00005599 struct ocfs2_super *osb =
5600 container_of(work, struct ocfs2_super,
5601 osb_truncate_log_wq.work);
Mark Fashehccd979b2005-12-15 14:31:24 -08005602
5603 mlog_entry_void();
5604
5605 status = ocfs2_flush_truncate_log(osb);
5606 if (status < 0)
5607 mlog_errno(status);
Tao Ma4d0ddb22008-03-05 16:11:46 +08005608 else
5609 ocfs2_init_inode_steal_slot(osb);
Mark Fashehccd979b2005-12-15 14:31:24 -08005610
5611 mlog_exit(status);
5612}
5613
5614#define OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL (2 * HZ)
5615void ocfs2_schedule_truncate_log_flush(struct ocfs2_super *osb,
5616 int cancel)
5617{
5618 if (osb->osb_tl_inode) {
5619 /* We want to push off log flushes while truncates are
5620 * still running. */
5621 if (cancel)
5622 cancel_delayed_work(&osb->osb_truncate_log_wq);
5623
5624 queue_delayed_work(ocfs2_wq, &osb->osb_truncate_log_wq,
5625 OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL);
5626 }
5627}
5628
5629static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb,
5630 int slot_num,
5631 struct inode **tl_inode,
5632 struct buffer_head **tl_bh)
5633{
5634 int status;
5635 struct inode *inode = NULL;
5636 struct buffer_head *bh = NULL;
5637
5638 inode = ocfs2_get_system_file_inode(osb,
5639 TRUNCATE_LOG_SYSTEM_INODE,
5640 slot_num);
5641 if (!inode) {
5642 status = -EINVAL;
5643 mlog(ML_ERROR, "Could not get load truncate log inode!\n");
5644 goto bail;
5645 }
5646
5647 status = ocfs2_read_block(osb, OCFS2_I(inode)->ip_blkno, &bh,
5648 OCFS2_BH_CACHED, inode);
5649 if (status < 0) {
5650 iput(inode);
5651 mlog_errno(status);
5652 goto bail;
5653 }
5654
5655 *tl_inode = inode;
5656 *tl_bh = bh;
5657bail:
5658 mlog_exit(status);
5659 return status;
5660}
5661
5662/* called during the 1st stage of node recovery. we stamp a clean
5663 * truncate log and pass back a copy for processing later. if the
5664 * truncate log does not require processing, a *tl_copy is set to
5665 * NULL. */
5666int ocfs2_begin_truncate_log_recovery(struct ocfs2_super *osb,
5667 int slot_num,
5668 struct ocfs2_dinode **tl_copy)
5669{
5670 int status;
5671 struct inode *tl_inode = NULL;
5672 struct buffer_head *tl_bh = NULL;
5673 struct ocfs2_dinode *di;
5674 struct ocfs2_truncate_log *tl;
5675
5676 *tl_copy = NULL;
5677
5678 mlog(0, "recover truncate log from slot %d\n", slot_num);
5679
5680 status = ocfs2_get_truncate_log_info(osb, slot_num, &tl_inode, &tl_bh);
5681 if (status < 0) {
5682 mlog_errno(status);
5683 goto bail;
5684 }
5685
5686 di = (struct ocfs2_dinode *) tl_bh->b_data;
5687 tl = &di->id2.i_dealloc;
5688 if (!OCFS2_IS_VALID_DINODE(di)) {
5689 OCFS2_RO_ON_INVALID_DINODE(tl_inode->i_sb, di);
5690 status = -EIO;
5691 goto bail;
5692 }
5693
5694 if (le16_to_cpu(tl->tl_used)) {
5695 mlog(0, "We'll have %u logs to recover\n",
5696 le16_to_cpu(tl->tl_used));
5697
5698 *tl_copy = kmalloc(tl_bh->b_size, GFP_KERNEL);
5699 if (!(*tl_copy)) {
5700 status = -ENOMEM;
5701 mlog_errno(status);
5702 goto bail;
5703 }
5704
5705 /* Assuming the write-out below goes well, this copy
5706 * will be passed back to recovery for processing. */
5707 memcpy(*tl_copy, tl_bh->b_data, tl_bh->b_size);
5708
5709 /* All we need to do to clear the truncate log is set
5710 * tl_used. */
5711 tl->tl_used = 0;
5712
5713 status = ocfs2_write_block(osb, tl_bh, tl_inode);
5714 if (status < 0) {
5715 mlog_errno(status);
5716 goto bail;
5717 }
5718 }
5719
5720bail:
5721 if (tl_inode)
5722 iput(tl_inode);
5723 if (tl_bh)
5724 brelse(tl_bh);
5725
5726 if (status < 0 && (*tl_copy)) {
5727 kfree(*tl_copy);
5728 *tl_copy = NULL;
5729 }
5730
5731 mlog_exit(status);
5732 return status;
5733}
5734
5735int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb,
5736 struct ocfs2_dinode *tl_copy)
5737{
5738 int status = 0;
5739 int i;
5740 unsigned int clusters, num_recs, start_cluster;
5741 u64 start_blk;
Mark Fasheh1fabe142006-10-09 18:11:45 -07005742 handle_t *handle;
Mark Fashehccd979b2005-12-15 14:31:24 -08005743 struct inode *tl_inode = osb->osb_tl_inode;
5744 struct ocfs2_truncate_log *tl;
5745
5746 mlog_entry_void();
5747
5748 if (OCFS2_I(tl_inode)->ip_blkno == le64_to_cpu(tl_copy->i_blkno)) {
5749 mlog(ML_ERROR, "Asked to recover my own truncate log!\n");
5750 return -EINVAL;
5751 }
5752
5753 tl = &tl_copy->id2.i_dealloc;
5754 num_recs = le16_to_cpu(tl->tl_used);
Mark Fashehb06970532006-03-03 10:24:33 -08005755 mlog(0, "cleanup %u records from %llu\n", num_recs,
Mark Fasheh1ca1a112007-04-27 16:01:25 -07005756 (unsigned long long)le64_to_cpu(tl_copy->i_blkno));
Mark Fashehccd979b2005-12-15 14:31:24 -08005757
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08005758 mutex_lock(&tl_inode->i_mutex);
Mark Fashehccd979b2005-12-15 14:31:24 -08005759 for(i = 0; i < num_recs; i++) {
5760 if (ocfs2_truncate_log_needs_flush(osb)) {
5761 status = __ocfs2_flush_truncate_log(osb);
5762 if (status < 0) {
5763 mlog_errno(status);
5764 goto bail_up;
5765 }
5766 }
5767
Mark Fasheh65eff9c2006-10-09 17:26:22 -07005768 handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE);
Mark Fashehccd979b2005-12-15 14:31:24 -08005769 if (IS_ERR(handle)) {
5770 status = PTR_ERR(handle);
5771 mlog_errno(status);
5772 goto bail_up;
5773 }
5774
5775 clusters = le32_to_cpu(tl->tl_recs[i].t_clusters);
5776 start_cluster = le32_to_cpu(tl->tl_recs[i].t_start);
5777 start_blk = ocfs2_clusters_to_blocks(osb->sb, start_cluster);
5778
5779 status = ocfs2_truncate_log_append(osb, handle,
5780 start_blk, clusters);
Mark Fasheh02dc1af2006-10-09 16:48:10 -07005781 ocfs2_commit_trans(osb, handle);
Mark Fashehccd979b2005-12-15 14:31:24 -08005782 if (status < 0) {
5783 mlog_errno(status);
5784 goto bail_up;
5785 }
5786 }
5787
5788bail_up:
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08005789 mutex_unlock(&tl_inode->i_mutex);
Mark Fashehccd979b2005-12-15 14:31:24 -08005790
5791 mlog_exit(status);
5792 return status;
5793}
5794
5795void ocfs2_truncate_log_shutdown(struct ocfs2_super *osb)
5796{
5797 int status;
5798 struct inode *tl_inode = osb->osb_tl_inode;
5799
5800 mlog_entry_void();
5801
5802 if (tl_inode) {
5803 cancel_delayed_work(&osb->osb_truncate_log_wq);
5804 flush_workqueue(ocfs2_wq);
5805
5806 status = ocfs2_flush_truncate_log(osb);
5807 if (status < 0)
5808 mlog_errno(status);
5809
5810 brelse(osb->osb_tl_bh);
5811 iput(osb->osb_tl_inode);
5812 }
5813
5814 mlog_exit_void();
5815}
5816
5817int ocfs2_truncate_log_init(struct ocfs2_super *osb)
5818{
5819 int status;
5820 struct inode *tl_inode = NULL;
5821 struct buffer_head *tl_bh = NULL;
5822
5823 mlog_entry_void();
5824
5825 status = ocfs2_get_truncate_log_info(osb,
5826 osb->slot_num,
5827 &tl_inode,
5828 &tl_bh);
5829 if (status < 0)
5830 mlog_errno(status);
5831
5832 /* ocfs2_truncate_log_shutdown keys on the existence of
5833 * osb->osb_tl_inode so we don't set any of the osb variables
5834 * until we're sure all is well. */
David Howellsc4028952006-11-22 14:57:56 +00005835 INIT_DELAYED_WORK(&osb->osb_truncate_log_wq,
5836 ocfs2_truncate_log_worker);
Mark Fashehccd979b2005-12-15 14:31:24 -08005837 osb->osb_tl_bh = tl_bh;
5838 osb->osb_tl_inode = tl_inode;
5839
5840 mlog_exit(status);
5841 return status;
5842}
5843
Mark Fasheh2b604352007-06-22 15:45:27 -07005844/*
5845 * Delayed de-allocation of suballocator blocks.
5846 *
5847 * Some sets of block de-allocations might involve multiple suballocator inodes.
5848 *
5849 * The locking for this can get extremely complicated, especially when
5850 * the suballocator inodes to delete from aren't known until deep
5851 * within an unrelated codepath.
5852 *
5853 * ocfs2_extent_block structures are a good example of this - an inode
5854 * btree could have been grown by any number of nodes each allocating
5855 * out of their own suballoc inode.
5856 *
5857 * These structures allow the delay of block de-allocation until a
5858 * later time, when locking of multiple cluster inodes won't cause
5859 * deadlock.
5860 */
5861
5862/*
5863 * Describes a single block free from a suballocator
5864 */
5865struct ocfs2_cached_block_free {
5866 struct ocfs2_cached_block_free *free_next;
5867 u64 free_blk;
5868 unsigned int free_bit;
5869};
5870
5871struct ocfs2_per_slot_free_list {
5872 struct ocfs2_per_slot_free_list *f_next_suballocator;
5873 int f_inode_type;
5874 int f_slot;
5875 struct ocfs2_cached_block_free *f_first;
5876};
5877
5878static int ocfs2_free_cached_items(struct ocfs2_super *osb,
5879 int sysfile_type,
5880 int slot,
5881 struct ocfs2_cached_block_free *head)
5882{
5883 int ret;
5884 u64 bg_blkno;
5885 handle_t *handle;
5886 struct inode *inode;
5887 struct buffer_head *di_bh = NULL;
5888 struct ocfs2_cached_block_free *tmp;
5889
5890 inode = ocfs2_get_system_file_inode(osb, sysfile_type, slot);
5891 if (!inode) {
5892 ret = -EINVAL;
5893 mlog_errno(ret);
5894 goto out;
5895 }
5896
5897 mutex_lock(&inode->i_mutex);
5898
Mark Fashehe63aecb62007-10-18 15:30:42 -07005899 ret = ocfs2_inode_lock(inode, &di_bh, 1);
Mark Fasheh2b604352007-06-22 15:45:27 -07005900 if (ret) {
5901 mlog_errno(ret);
5902 goto out_mutex;
5903 }
5904
5905 handle = ocfs2_start_trans(osb, OCFS2_SUBALLOC_FREE);
5906 if (IS_ERR(handle)) {
5907 ret = PTR_ERR(handle);
5908 mlog_errno(ret);
5909 goto out_unlock;
5910 }
5911
5912 while (head) {
5913 bg_blkno = ocfs2_which_suballoc_group(head->free_blk,
5914 head->free_bit);
5915 mlog(0, "Free bit: (bit %u, blkno %llu)\n",
5916 head->free_bit, (unsigned long long)head->free_blk);
5917
5918 ret = ocfs2_free_suballoc_bits(handle, inode, di_bh,
5919 head->free_bit, bg_blkno, 1);
5920 if (ret) {
5921 mlog_errno(ret);
5922 goto out_journal;
5923 }
5924
5925 ret = ocfs2_extend_trans(handle, OCFS2_SUBALLOC_FREE);
5926 if (ret) {
5927 mlog_errno(ret);
5928 goto out_journal;
5929 }
5930
5931 tmp = head;
5932 head = head->free_next;
5933 kfree(tmp);
5934 }
5935
5936out_journal:
5937 ocfs2_commit_trans(osb, handle);
5938
5939out_unlock:
Mark Fashehe63aecb62007-10-18 15:30:42 -07005940 ocfs2_inode_unlock(inode, 1);
Mark Fasheh2b604352007-06-22 15:45:27 -07005941 brelse(di_bh);
5942out_mutex:
5943 mutex_unlock(&inode->i_mutex);
5944 iput(inode);
5945out:
5946 while(head) {
5947 /* Premature exit may have left some dangling items. */
5948 tmp = head;
5949 head = head->free_next;
5950 kfree(tmp);
5951 }
5952
5953 return ret;
5954}
5955
5956int ocfs2_run_deallocs(struct ocfs2_super *osb,
5957 struct ocfs2_cached_dealloc_ctxt *ctxt)
5958{
5959 int ret = 0, ret2;
5960 struct ocfs2_per_slot_free_list *fl;
5961
5962 if (!ctxt)
5963 return 0;
5964
5965 while (ctxt->c_first_suballocator) {
5966 fl = ctxt->c_first_suballocator;
5967
5968 if (fl->f_first) {
5969 mlog(0, "Free items: (type %u, slot %d)\n",
5970 fl->f_inode_type, fl->f_slot);
5971 ret2 = ocfs2_free_cached_items(osb, fl->f_inode_type,
5972 fl->f_slot, fl->f_first);
5973 if (ret2)
5974 mlog_errno(ret2);
5975 if (!ret)
5976 ret = ret2;
5977 }
5978
5979 ctxt->c_first_suballocator = fl->f_next_suballocator;
5980 kfree(fl);
5981 }
5982
5983 return ret;
5984}
5985
5986static struct ocfs2_per_slot_free_list *
5987ocfs2_find_per_slot_free_list(int type,
5988 int slot,
5989 struct ocfs2_cached_dealloc_ctxt *ctxt)
5990{
5991 struct ocfs2_per_slot_free_list *fl = ctxt->c_first_suballocator;
5992
5993 while (fl) {
5994 if (fl->f_inode_type == type && fl->f_slot == slot)
5995 return fl;
5996
5997 fl = fl->f_next_suballocator;
5998 }
5999
6000 fl = kmalloc(sizeof(*fl), GFP_NOFS);
6001 if (fl) {
6002 fl->f_inode_type = type;
6003 fl->f_slot = slot;
6004 fl->f_first = NULL;
6005 fl->f_next_suballocator = ctxt->c_first_suballocator;
6006
6007 ctxt->c_first_suballocator = fl;
6008 }
6009 return fl;
6010}
6011
6012static int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
6013 int type, int slot, u64 blkno,
6014 unsigned int bit)
6015{
6016 int ret;
6017 struct ocfs2_per_slot_free_list *fl;
6018 struct ocfs2_cached_block_free *item;
6019
6020 fl = ocfs2_find_per_slot_free_list(type, slot, ctxt);
6021 if (fl == NULL) {
6022 ret = -ENOMEM;
6023 mlog_errno(ret);
6024 goto out;
6025 }
6026
6027 item = kmalloc(sizeof(*item), GFP_NOFS);
6028 if (item == NULL) {
6029 ret = -ENOMEM;
6030 mlog_errno(ret);
6031 goto out;
6032 }
6033
6034 mlog(0, "Insert: (type %d, slot %u, bit %u, blk %llu)\n",
6035 type, slot, bit, (unsigned long long)blkno);
6036
6037 item->free_blk = blkno;
6038 item->free_bit = bit;
6039 item->free_next = fl->f_first;
6040
6041 fl->f_first = item;
6042
6043 ret = 0;
6044out:
6045 return ret;
6046}
6047
Mark Fasheh59a5e412007-06-22 15:52:36 -07006048static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt,
6049 struct ocfs2_extent_block *eb)
6050{
6051 return ocfs2_cache_block_dealloc(ctxt, EXTENT_ALLOC_SYSTEM_INODE,
6052 le16_to_cpu(eb->h_suballoc_slot),
6053 le64_to_cpu(eb->h_blkno),
6054 le16_to_cpu(eb->h_suballoc_bit));
6055}
6056
Mark Fashehccd979b2005-12-15 14:31:24 -08006057/* This function will figure out whether the currently last extent
6058 * block will be deleted, and if it will, what the new last extent
6059 * block will be so we can update his h_next_leaf_blk field, as well
6060 * as the dinodes i_last_eb_blk */
Mark Fashehdcd05382007-01-16 11:32:23 -08006061static int ocfs2_find_new_last_ext_blk(struct inode *inode,
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006062 unsigned int clusters_to_del,
Mark Fashehdcd05382007-01-16 11:32:23 -08006063 struct ocfs2_path *path,
Mark Fashehccd979b2005-12-15 14:31:24 -08006064 struct buffer_head **new_last_eb)
6065{
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006066 int next_free, ret = 0;
Mark Fashehdcd05382007-01-16 11:32:23 -08006067 u32 cpos;
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006068 struct ocfs2_extent_rec *rec;
Mark Fashehccd979b2005-12-15 14:31:24 -08006069 struct ocfs2_extent_block *eb;
6070 struct ocfs2_extent_list *el;
6071 struct buffer_head *bh = NULL;
6072
6073 *new_last_eb = NULL;
6074
Mark Fashehccd979b2005-12-15 14:31:24 -08006075 /* we have no tree, so of course, no last_eb. */
Mark Fashehdcd05382007-01-16 11:32:23 -08006076 if (!path->p_tree_depth)
6077 goto out;
Mark Fashehccd979b2005-12-15 14:31:24 -08006078
6079 /* trunc to zero special case - this makes tree_depth = 0
6080 * regardless of what it is. */
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006081 if (OCFS2_I(inode)->ip_clusters == clusters_to_del)
Mark Fashehdcd05382007-01-16 11:32:23 -08006082 goto out;
Mark Fashehccd979b2005-12-15 14:31:24 -08006083
Mark Fashehdcd05382007-01-16 11:32:23 -08006084 el = path_leaf_el(path);
Mark Fashehccd979b2005-12-15 14:31:24 -08006085 BUG_ON(!el->l_next_free_rec);
6086
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006087 /*
6088 * Make sure that this extent list will actually be empty
6089 * after we clear away the data. We can shortcut out if
6090 * there's more than one non-empty extent in the
6091 * list. Otherwise, a check of the remaining extent is
6092 * necessary.
6093 */
6094 next_free = le16_to_cpu(el->l_next_free_rec);
6095 rec = NULL;
Mark Fashehdcd05382007-01-16 11:32:23 -08006096 if (ocfs2_is_empty_extent(&el->l_recs[0])) {
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006097 if (next_free > 2)
Mark Fashehdcd05382007-01-16 11:32:23 -08006098 goto out;
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006099
6100 /* We may have a valid extent in index 1, check it. */
6101 if (next_free == 2)
6102 rec = &el->l_recs[1];
6103
6104 /*
6105 * Fall through - no more nonempty extents, so we want
6106 * to delete this leaf.
6107 */
6108 } else {
6109 if (next_free > 1)
6110 goto out;
6111
6112 rec = &el->l_recs[0];
6113 }
6114
6115 if (rec) {
6116 /*
6117 * Check it we'll only be trimming off the end of this
6118 * cluster.
6119 */
Mark Fashehe48edee2007-03-07 16:46:57 -08006120 if (le16_to_cpu(rec->e_leaf_clusters) > clusters_to_del)
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006121 goto out;
6122 }
Mark Fashehccd979b2005-12-15 14:31:24 -08006123
Mark Fashehdcd05382007-01-16 11:32:23 -08006124 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, path, &cpos);
6125 if (ret) {
6126 mlog_errno(ret);
6127 goto out;
6128 }
Mark Fashehccd979b2005-12-15 14:31:24 -08006129
Mark Fashehdcd05382007-01-16 11:32:23 -08006130 ret = ocfs2_find_leaf(inode, path_root_el(path), cpos, &bh);
6131 if (ret) {
6132 mlog_errno(ret);
6133 goto out;
6134 }
Mark Fashehccd979b2005-12-15 14:31:24 -08006135
Mark Fashehdcd05382007-01-16 11:32:23 -08006136 eb = (struct ocfs2_extent_block *) bh->b_data;
6137 el = &eb->h_list;
6138 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
6139 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
6140 ret = -EROFS;
6141 goto out;
6142 }
Mark Fashehccd979b2005-12-15 14:31:24 -08006143
6144 *new_last_eb = bh;
6145 get_bh(*new_last_eb);
Mark Fashehdcd05382007-01-16 11:32:23 -08006146 mlog(0, "returning block %llu, (cpos: %u)\n",
6147 (unsigned long long)le64_to_cpu(eb->h_blkno), cpos);
6148out:
6149 brelse(bh);
Mark Fashehccd979b2005-12-15 14:31:24 -08006150
Mark Fashehdcd05382007-01-16 11:32:23 -08006151 return ret;
Mark Fashehccd979b2005-12-15 14:31:24 -08006152}
6153
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006154/*
6155 * Trim some clusters off the rightmost edge of a tree. Only called
6156 * during truncate.
6157 *
6158 * The caller needs to:
6159 * - start journaling of each path component.
6160 * - compute and fully set up any new last ext block
6161 */
6162static int ocfs2_trim_tree(struct inode *inode, struct ocfs2_path *path,
6163 handle_t *handle, struct ocfs2_truncate_context *tc,
6164 u32 clusters_to_del, u64 *delete_start)
6165{
6166 int ret, i, index = path->p_tree_depth;
6167 u32 new_edge = 0;
6168 u64 deleted_eb = 0;
6169 struct buffer_head *bh;
6170 struct ocfs2_extent_list *el;
6171 struct ocfs2_extent_rec *rec;
6172
6173 *delete_start = 0;
6174
6175 while (index >= 0) {
6176 bh = path->p_node[index].bh;
6177 el = path->p_node[index].el;
6178
6179 mlog(0, "traveling tree (index = %d, block = %llu)\n",
6180 index, (unsigned long long)bh->b_blocknr);
6181
6182 BUG_ON(le16_to_cpu(el->l_next_free_rec) == 0);
6183
6184 if (index !=
6185 (path->p_tree_depth - le16_to_cpu(el->l_tree_depth))) {
6186 ocfs2_error(inode->i_sb,
6187 "Inode %lu has invalid ext. block %llu",
6188 inode->i_ino,
6189 (unsigned long long)bh->b_blocknr);
6190 ret = -EROFS;
6191 goto out;
6192 }
6193
6194find_tail_record:
6195 i = le16_to_cpu(el->l_next_free_rec) - 1;
6196 rec = &el->l_recs[i];
6197
6198 mlog(0, "Extent list before: record %d: (%u, %u, %llu), "
6199 "next = %u\n", i, le32_to_cpu(rec->e_cpos),
Mark Fashehe48edee2007-03-07 16:46:57 -08006200 ocfs2_rec_clusters(el, rec),
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006201 (unsigned long long)le64_to_cpu(rec->e_blkno),
6202 le16_to_cpu(el->l_next_free_rec));
6203
Mark Fashehe48edee2007-03-07 16:46:57 -08006204 BUG_ON(ocfs2_rec_clusters(el, rec) < clusters_to_del);
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006205
6206 if (le16_to_cpu(el->l_tree_depth) == 0) {
6207 /*
6208 * If the leaf block contains a single empty
6209 * extent and no records, we can just remove
6210 * the block.
6211 */
6212 if (i == 0 && ocfs2_is_empty_extent(rec)) {
6213 memset(rec, 0,
6214 sizeof(struct ocfs2_extent_rec));
6215 el->l_next_free_rec = cpu_to_le16(0);
6216
6217 goto delete;
6218 }
6219
6220 /*
6221 * Remove any empty extents by shifting things
6222 * left. That should make life much easier on
6223 * the code below. This condition is rare
6224 * enough that we shouldn't see a performance
6225 * hit.
6226 */
6227 if (ocfs2_is_empty_extent(&el->l_recs[0])) {
6228 le16_add_cpu(&el->l_next_free_rec, -1);
6229
6230 for(i = 0;
6231 i < le16_to_cpu(el->l_next_free_rec); i++)
6232 el->l_recs[i] = el->l_recs[i + 1];
6233
6234 memset(&el->l_recs[i], 0,
6235 sizeof(struct ocfs2_extent_rec));
6236
6237 /*
6238 * We've modified our extent list. The
6239 * simplest way to handle this change
6240 * is to being the search from the
6241 * start again.
6242 */
6243 goto find_tail_record;
6244 }
6245
Mark Fashehe48edee2007-03-07 16:46:57 -08006246 le16_add_cpu(&rec->e_leaf_clusters, -clusters_to_del);
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006247
6248 /*
6249 * We'll use "new_edge" on our way back up the
6250 * tree to know what our rightmost cpos is.
6251 */
Mark Fashehe48edee2007-03-07 16:46:57 -08006252 new_edge = le16_to_cpu(rec->e_leaf_clusters);
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006253 new_edge += le32_to_cpu(rec->e_cpos);
6254
6255 /*
6256 * The caller will use this to delete data blocks.
6257 */
6258 *delete_start = le64_to_cpu(rec->e_blkno)
6259 + ocfs2_clusters_to_blocks(inode->i_sb,
Mark Fashehe48edee2007-03-07 16:46:57 -08006260 le16_to_cpu(rec->e_leaf_clusters));
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006261
6262 /*
6263 * If it's now empty, remove this record.
6264 */
Mark Fashehe48edee2007-03-07 16:46:57 -08006265 if (le16_to_cpu(rec->e_leaf_clusters) == 0) {
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006266 memset(rec, 0,
6267 sizeof(struct ocfs2_extent_rec));
6268 le16_add_cpu(&el->l_next_free_rec, -1);
6269 }
6270 } else {
6271 if (le64_to_cpu(rec->e_blkno) == deleted_eb) {
6272 memset(rec, 0,
6273 sizeof(struct ocfs2_extent_rec));
6274 le16_add_cpu(&el->l_next_free_rec, -1);
6275
6276 goto delete;
6277 }
6278
6279 /* Can this actually happen? */
6280 if (le16_to_cpu(el->l_next_free_rec) == 0)
6281 goto delete;
6282
6283 /*
6284 * We never actually deleted any clusters
6285 * because our leaf was empty. There's no
6286 * reason to adjust the rightmost edge then.
6287 */
6288 if (new_edge == 0)
6289 goto delete;
6290
Mark Fashehe48edee2007-03-07 16:46:57 -08006291 rec->e_int_clusters = cpu_to_le32(new_edge);
6292 le32_add_cpu(&rec->e_int_clusters,
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006293 -le32_to_cpu(rec->e_cpos));
6294
6295 /*
6296 * A deleted child record should have been
6297 * caught above.
6298 */
Mark Fashehe48edee2007-03-07 16:46:57 -08006299 BUG_ON(le32_to_cpu(rec->e_int_clusters) == 0);
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006300 }
6301
6302delete:
6303 ret = ocfs2_journal_dirty(handle, bh);
6304 if (ret) {
6305 mlog_errno(ret);
6306 goto out;
6307 }
6308
6309 mlog(0, "extent list container %llu, after: record %d: "
6310 "(%u, %u, %llu), next = %u.\n",
6311 (unsigned long long)bh->b_blocknr, i,
Mark Fashehe48edee2007-03-07 16:46:57 -08006312 le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec),
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006313 (unsigned long long)le64_to_cpu(rec->e_blkno),
6314 le16_to_cpu(el->l_next_free_rec));
6315
6316 /*
6317 * We must be careful to only attempt delete of an
6318 * extent block (and not the root inode block).
6319 */
6320 if (index > 0 && le16_to_cpu(el->l_next_free_rec) == 0) {
6321 struct ocfs2_extent_block *eb =
6322 (struct ocfs2_extent_block *)bh->b_data;
6323
6324 /*
6325 * Save this for use when processing the
6326 * parent block.
6327 */
6328 deleted_eb = le64_to_cpu(eb->h_blkno);
6329
6330 mlog(0, "deleting this extent block.\n");
6331
6332 ocfs2_remove_from_cache(inode, bh);
6333
Mark Fashehe48edee2007-03-07 16:46:57 -08006334 BUG_ON(ocfs2_rec_clusters(el, &el->l_recs[0]));
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006335 BUG_ON(le32_to_cpu(el->l_recs[0].e_cpos));
6336 BUG_ON(le64_to_cpu(el->l_recs[0].e_blkno));
6337
Mark Fasheh59a5e412007-06-22 15:52:36 -07006338 ret = ocfs2_cache_extent_block_free(&tc->tc_dealloc, eb);
6339 /* An error here is not fatal. */
6340 if (ret < 0)
6341 mlog_errno(ret);
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006342 } else {
6343 deleted_eb = 0;
6344 }
6345
6346 index--;
6347 }
6348
6349 ret = 0;
6350out:
6351 return ret;
6352}
6353
Mark Fashehccd979b2005-12-15 14:31:24 -08006354static int ocfs2_do_truncate(struct ocfs2_super *osb,
6355 unsigned int clusters_to_del,
6356 struct inode *inode,
6357 struct buffer_head *fe_bh,
Mark Fasheh1fabe142006-10-09 18:11:45 -07006358 handle_t *handle,
Mark Fashehdcd05382007-01-16 11:32:23 -08006359 struct ocfs2_truncate_context *tc,
6360 struct ocfs2_path *path)
Mark Fashehccd979b2005-12-15 14:31:24 -08006361{
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006362 int status;
Mark Fashehccd979b2005-12-15 14:31:24 -08006363 struct ocfs2_dinode *fe;
Mark Fashehccd979b2005-12-15 14:31:24 -08006364 struct ocfs2_extent_block *last_eb = NULL;
6365 struct ocfs2_extent_list *el;
Mark Fashehccd979b2005-12-15 14:31:24 -08006366 struct buffer_head *last_eb_bh = NULL;
Mark Fashehccd979b2005-12-15 14:31:24 -08006367 u64 delete_blk = 0;
6368
6369 fe = (struct ocfs2_dinode *) fe_bh->b_data;
6370
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006371 status = ocfs2_find_new_last_ext_blk(inode, clusters_to_del,
Mark Fashehdcd05382007-01-16 11:32:23 -08006372 path, &last_eb_bh);
Mark Fashehccd979b2005-12-15 14:31:24 -08006373 if (status < 0) {
6374 mlog_errno(status);
6375 goto bail;
6376 }
Mark Fashehccd979b2005-12-15 14:31:24 -08006377
Mark Fashehdcd05382007-01-16 11:32:23 -08006378 /*
6379 * Each component will be touched, so we might as well journal
6380 * here to avoid having to handle errors later.
6381 */
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006382 status = ocfs2_journal_access_path(inode, handle, path);
6383 if (status < 0) {
6384 mlog_errno(status);
6385 goto bail;
Mark Fashehdcd05382007-01-16 11:32:23 -08006386 }
6387
6388 if (last_eb_bh) {
6389 status = ocfs2_journal_access(handle, inode, last_eb_bh,
6390 OCFS2_JOURNAL_ACCESS_WRITE);
6391 if (status < 0) {
6392 mlog_errno(status);
6393 goto bail;
6394 }
6395
6396 last_eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
6397 }
6398
6399 el = &(fe->id2.i_list);
6400
6401 /*
6402 * Lower levels depend on this never happening, but it's best
6403 * to check it up here before changing the tree.
6404 */
Mark Fashehe48edee2007-03-07 16:46:57 -08006405 if (el->l_tree_depth && el->l_recs[0].e_int_clusters == 0) {
Mark Fashehdcd05382007-01-16 11:32:23 -08006406 ocfs2_error(inode->i_sb,
6407 "Inode %lu has an empty extent record, depth %u\n",
6408 inode->i_ino, le16_to_cpu(el->l_tree_depth));
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006409 status = -EROFS;
Mark Fashehccd979b2005-12-15 14:31:24 -08006410 goto bail;
6411 }
Mark Fashehccd979b2005-12-15 14:31:24 -08006412
6413 spin_lock(&OCFS2_I(inode)->ip_lock);
6414 OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters) -
6415 clusters_to_del;
6416 spin_unlock(&OCFS2_I(inode)->ip_lock);
6417 le32_add_cpu(&fe->i_clusters, -clusters_to_del);
Mark Fashehe535e2e2007-08-31 10:23:41 -07006418 inode->i_blocks = ocfs2_inode_sector_count(inode);
Mark Fashehccd979b2005-12-15 14:31:24 -08006419
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006420 status = ocfs2_trim_tree(inode, path, handle, tc,
6421 clusters_to_del, &delete_blk);
6422 if (status) {
6423 mlog_errno(status);
6424 goto bail;
Mark Fashehccd979b2005-12-15 14:31:24 -08006425 }
6426
Mark Fashehdcd05382007-01-16 11:32:23 -08006427 if (le32_to_cpu(fe->i_clusters) == 0) {
Mark Fashehccd979b2005-12-15 14:31:24 -08006428 /* trunc to zero is a special case. */
6429 el->l_tree_depth = 0;
6430 fe->i_last_eb_blk = 0;
6431 } else if (last_eb)
6432 fe->i_last_eb_blk = last_eb->h_blkno;
6433
6434 status = ocfs2_journal_dirty(handle, fe_bh);
6435 if (status < 0) {
6436 mlog_errno(status);
6437 goto bail;
6438 }
6439
6440 if (last_eb) {
6441 /* If there will be a new last extent block, then by
6442 * definition, there cannot be any leaves to the right of
6443 * him. */
Mark Fashehccd979b2005-12-15 14:31:24 -08006444 last_eb->h_next_leaf_blk = 0;
6445 status = ocfs2_journal_dirty(handle, last_eb_bh);
6446 if (status < 0) {
6447 mlog_errno(status);
6448 goto bail;
6449 }
6450 }
6451
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006452 if (delete_blk) {
6453 status = ocfs2_truncate_log_append(osb, handle, delete_blk,
6454 clusters_to_del);
Mark Fashehccd979b2005-12-15 14:31:24 -08006455 if (status < 0) {
6456 mlog_errno(status);
6457 goto bail;
6458 }
Mark Fashehccd979b2005-12-15 14:31:24 -08006459 }
6460 status = 0;
6461bail:
Mark Fashehdcd05382007-01-16 11:32:23 -08006462
Mark Fashehccd979b2005-12-15 14:31:24 -08006463 mlog_exit(status);
6464 return status;
6465}
6466
Mark Fasheh60b11392007-02-16 11:46:50 -08006467static int ocfs2_writeback_zero_func(handle_t *handle, struct buffer_head *bh)
6468{
6469 set_buffer_uptodate(bh);
6470 mark_buffer_dirty(bh);
6471 return 0;
6472}
6473
6474static int ocfs2_ordered_zero_func(handle_t *handle, struct buffer_head *bh)
6475{
6476 set_buffer_uptodate(bh);
6477 mark_buffer_dirty(bh);
6478 return ocfs2_journal_dirty_data(handle, bh);
6479}
6480
Mark Fasheh1d410a62007-09-07 14:20:45 -07006481static void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
6482 unsigned int from, unsigned int to,
6483 struct page *page, int zero, u64 *phys)
6484{
6485 int ret, partial = 0;
6486
6487 ret = ocfs2_map_page_blocks(page, phys, inode, from, to, 0);
6488 if (ret)
6489 mlog_errno(ret);
6490
6491 if (zero)
Christoph Lametereebd2aa2008-02-04 22:28:29 -08006492 zero_user_segment(page, from, to);
Mark Fasheh1d410a62007-09-07 14:20:45 -07006493
6494 /*
6495 * Need to set the buffers we zero'd into uptodate
6496 * here if they aren't - ocfs2_map_page_blocks()
6497 * might've skipped some
6498 */
6499 if (ocfs2_should_order_data(inode)) {
6500 ret = walk_page_buffers(handle,
6501 page_buffers(page),
6502 from, to, &partial,
6503 ocfs2_ordered_zero_func);
6504 if (ret < 0)
6505 mlog_errno(ret);
6506 } else {
6507 ret = walk_page_buffers(handle, page_buffers(page),
6508 from, to, &partial,
6509 ocfs2_writeback_zero_func);
6510 if (ret < 0)
6511 mlog_errno(ret);
6512 }
6513
6514 if (!partial)
6515 SetPageUptodate(page);
6516
6517 flush_dcache_page(page);
6518}
6519
Mark Fasheh35edec12007-07-06 14:41:18 -07006520static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start,
6521 loff_t end, struct page **pages,
6522 int numpages, u64 phys, handle_t *handle)
Mark Fasheh60b11392007-02-16 11:46:50 -08006523{
Mark Fasheh1d410a62007-09-07 14:20:45 -07006524 int i;
Mark Fasheh60b11392007-02-16 11:46:50 -08006525 struct page *page;
6526 unsigned int from, to = PAGE_CACHE_SIZE;
6527 struct super_block *sb = inode->i_sb;
6528
6529 BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb)));
6530
6531 if (numpages == 0)
6532 goto out;
6533
Mark Fasheh35edec12007-07-06 14:41:18 -07006534 to = PAGE_CACHE_SIZE;
Mark Fasheh60b11392007-02-16 11:46:50 -08006535 for(i = 0; i < numpages; i++) {
6536 page = pages[i];
6537
Mark Fasheh35edec12007-07-06 14:41:18 -07006538 from = start & (PAGE_CACHE_SIZE - 1);
6539 if ((end >> PAGE_CACHE_SHIFT) == page->index)
6540 to = end & (PAGE_CACHE_SIZE - 1);
6541
Mark Fasheh60b11392007-02-16 11:46:50 -08006542 BUG_ON(from > PAGE_CACHE_SIZE);
6543 BUG_ON(to > PAGE_CACHE_SIZE);
6544
Mark Fasheh1d410a62007-09-07 14:20:45 -07006545 ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1,
6546 &phys);
Mark Fasheh60b11392007-02-16 11:46:50 -08006547
Mark Fasheh35edec12007-07-06 14:41:18 -07006548 start = (page->index + 1) << PAGE_CACHE_SHIFT;
Mark Fasheh60b11392007-02-16 11:46:50 -08006549 }
6550out:
Mark Fasheh1d410a62007-09-07 14:20:45 -07006551 if (pages)
6552 ocfs2_unlock_and_free_pages(pages, numpages);
Mark Fasheh60b11392007-02-16 11:46:50 -08006553}
6554
Mark Fasheh35edec12007-07-06 14:41:18 -07006555static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end,
Mark Fasheh1d410a62007-09-07 14:20:45 -07006556 struct page **pages, int *num)
Mark Fasheh60b11392007-02-16 11:46:50 -08006557{
Mark Fasheh1d410a62007-09-07 14:20:45 -07006558 int numpages, ret = 0;
Mark Fasheh60b11392007-02-16 11:46:50 -08006559 struct super_block *sb = inode->i_sb;
6560 struct address_space *mapping = inode->i_mapping;
6561 unsigned long index;
Mark Fasheh35edec12007-07-06 14:41:18 -07006562 loff_t last_page_bytes;
Mark Fasheh60b11392007-02-16 11:46:50 -08006563
Mark Fasheh35edec12007-07-06 14:41:18 -07006564 BUG_ON(start > end);
Mark Fasheh60b11392007-02-16 11:46:50 -08006565
Mark Fasheh35edec12007-07-06 14:41:18 -07006566 BUG_ON(start >> OCFS2_SB(sb)->s_clustersize_bits !=
6567 (end - 1) >> OCFS2_SB(sb)->s_clustersize_bits);
6568
Mark Fasheh1d410a62007-09-07 14:20:45 -07006569 numpages = 0;
Mark Fasheh35edec12007-07-06 14:41:18 -07006570 last_page_bytes = PAGE_ALIGN(end);
6571 index = start >> PAGE_CACHE_SHIFT;
Mark Fasheh60b11392007-02-16 11:46:50 -08006572 do {
6573 pages[numpages] = grab_cache_page(mapping, index);
6574 if (!pages[numpages]) {
6575 ret = -ENOMEM;
6576 mlog_errno(ret);
6577 goto out;
6578 }
6579
6580 numpages++;
6581 index++;
Mark Fasheh35edec12007-07-06 14:41:18 -07006582 } while (index < (last_page_bytes >> PAGE_CACHE_SHIFT));
Mark Fasheh60b11392007-02-16 11:46:50 -08006583
6584out:
6585 if (ret != 0) {
Mark Fasheh1d410a62007-09-07 14:20:45 -07006586 if (pages)
6587 ocfs2_unlock_and_free_pages(pages, numpages);
Mark Fasheh60b11392007-02-16 11:46:50 -08006588 numpages = 0;
6589 }
6590
6591 *num = numpages;
6592
6593 return ret;
6594}
6595
6596/*
6597 * Zero the area past i_size but still within an allocated
6598 * cluster. This avoids exposing nonzero data on subsequent file
6599 * extends.
6600 *
6601 * We need to call this before i_size is updated on the inode because
6602 * otherwise block_write_full_page() will skip writeout of pages past
6603 * i_size. The new_i_size parameter is passed for this reason.
6604 */
Mark Fasheh35edec12007-07-06 14:41:18 -07006605int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
6606 u64 range_start, u64 range_end)
Mark Fasheh60b11392007-02-16 11:46:50 -08006607{
Mark Fasheh1d410a62007-09-07 14:20:45 -07006608 int ret = 0, numpages;
Mark Fasheh60b11392007-02-16 11:46:50 -08006609 struct page **pages = NULL;
6610 u64 phys;
Mark Fasheh1d410a62007-09-07 14:20:45 -07006611 unsigned int ext_flags;
6612 struct super_block *sb = inode->i_sb;
Mark Fasheh60b11392007-02-16 11:46:50 -08006613
6614 /*
6615 * File systems which don't support sparse files zero on every
6616 * extend.
6617 */
Mark Fasheh1d410a62007-09-07 14:20:45 -07006618 if (!ocfs2_sparse_alloc(OCFS2_SB(sb)))
Mark Fasheh60b11392007-02-16 11:46:50 -08006619 return 0;
6620
Mark Fasheh1d410a62007-09-07 14:20:45 -07006621 pages = kcalloc(ocfs2_pages_per_cluster(sb),
Mark Fasheh60b11392007-02-16 11:46:50 -08006622 sizeof(struct page *), GFP_NOFS);
6623 if (pages == NULL) {
6624 ret = -ENOMEM;
6625 mlog_errno(ret);
6626 goto out;
6627 }
6628
Mark Fasheh1d410a62007-09-07 14:20:45 -07006629 if (range_start == range_end)
6630 goto out;
6631
6632 ret = ocfs2_extent_map_get_blocks(inode,
6633 range_start >> sb->s_blocksize_bits,
6634 &phys, NULL, &ext_flags);
Mark Fasheh60b11392007-02-16 11:46:50 -08006635 if (ret) {
6636 mlog_errno(ret);
6637 goto out;
6638 }
6639
Mark Fasheh1d410a62007-09-07 14:20:45 -07006640 /*
6641 * Tail is a hole, or is marked unwritten. In either case, we
6642 * can count on read and write to return/push zero's.
6643 */
6644 if (phys == 0 || ext_flags & OCFS2_EXT_UNWRITTEN)
Mark Fasheh60b11392007-02-16 11:46:50 -08006645 goto out;
6646
Mark Fasheh1d410a62007-09-07 14:20:45 -07006647 ret = ocfs2_grab_eof_pages(inode, range_start, range_end, pages,
6648 &numpages);
6649 if (ret) {
6650 mlog_errno(ret);
6651 goto out;
6652 }
6653
Mark Fasheh35edec12007-07-06 14:41:18 -07006654 ocfs2_zero_cluster_pages(inode, range_start, range_end, pages,
6655 numpages, phys, handle);
Mark Fasheh60b11392007-02-16 11:46:50 -08006656
6657 /*
6658 * Initiate writeout of the pages we zero'd here. We don't
6659 * wait on them - the truncate_inode_pages() call later will
6660 * do that for us.
6661 */
Mark Fasheh35edec12007-07-06 14:41:18 -07006662 ret = do_sync_mapping_range(inode->i_mapping, range_start,
6663 range_end - 1, SYNC_FILE_RANGE_WRITE);
Mark Fasheh60b11392007-02-16 11:46:50 -08006664 if (ret)
6665 mlog_errno(ret);
6666
6667out:
6668 if (pages)
6669 kfree(pages);
6670
6671 return ret;
6672}
6673
Tiger Yangfdd77702008-08-18 17:08:55 +08006674static void ocfs2_zero_dinode_id2_with_xattr(struct inode *inode,
6675 struct ocfs2_dinode *di)
Mark Fasheh1afc32b2007-09-07 14:46:51 -07006676{
6677 unsigned int blocksize = 1 << inode->i_sb->s_blocksize_bits;
Tiger Yangfdd77702008-08-18 17:08:55 +08006678 unsigned int xattrsize = le16_to_cpu(di->i_xattr_inline_size);
Mark Fasheh1afc32b2007-09-07 14:46:51 -07006679
Tiger Yangfdd77702008-08-18 17:08:55 +08006680 if (le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_XATTR_FL)
6681 memset(&di->id2, 0, blocksize -
6682 offsetof(struct ocfs2_dinode, id2) -
6683 xattrsize);
6684 else
6685 memset(&di->id2, 0, blocksize -
6686 offsetof(struct ocfs2_dinode, id2));
Mark Fasheh1afc32b2007-09-07 14:46:51 -07006687}
6688
Mark Fasheh5b6a3a22007-09-13 16:33:54 -07006689void ocfs2_dinode_new_extent_list(struct inode *inode,
6690 struct ocfs2_dinode *di)
6691{
Tiger Yangfdd77702008-08-18 17:08:55 +08006692 ocfs2_zero_dinode_id2_with_xattr(inode, di);
Mark Fasheh5b6a3a22007-09-13 16:33:54 -07006693 di->id2.i_list.l_tree_depth = 0;
6694 di->id2.i_list.l_next_free_rec = 0;
Tiger Yangfdd77702008-08-18 17:08:55 +08006695 di->id2.i_list.l_count = cpu_to_le16(
6696 ocfs2_extent_recs_per_inode_with_xattr(inode->i_sb, di));
Mark Fasheh5b6a3a22007-09-13 16:33:54 -07006697}
6698
Mark Fasheh1afc32b2007-09-07 14:46:51 -07006699void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di)
6700{
6701 struct ocfs2_inode_info *oi = OCFS2_I(inode);
6702 struct ocfs2_inline_data *idata = &di->id2.i_data;
6703
6704 spin_lock(&oi->ip_lock);
6705 oi->ip_dyn_features |= OCFS2_INLINE_DATA_FL;
6706 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
6707 spin_unlock(&oi->ip_lock);
6708
6709 /*
6710 * We clear the entire i_data structure here so that all
6711 * fields can be properly initialized.
6712 */
Tiger Yangfdd77702008-08-18 17:08:55 +08006713 ocfs2_zero_dinode_id2_with_xattr(inode, di);
Mark Fasheh1afc32b2007-09-07 14:46:51 -07006714
Tiger Yangfdd77702008-08-18 17:08:55 +08006715 idata->id_count = cpu_to_le16(
6716 ocfs2_max_inline_data_with_xattr(inode->i_sb, di));
Mark Fasheh1afc32b2007-09-07 14:46:51 -07006717}
6718
6719int ocfs2_convert_inline_data_to_extents(struct inode *inode,
6720 struct buffer_head *di_bh)
6721{
6722 int ret, i, has_data, num_pages = 0;
6723 handle_t *handle;
6724 u64 uninitialized_var(block);
6725 struct ocfs2_inode_info *oi = OCFS2_I(inode);
6726 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
6727 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
Mark Fasheh1afc32b2007-09-07 14:46:51 -07006728 struct ocfs2_alloc_context *data_ac = NULL;
6729 struct page **pages = NULL;
6730 loff_t end = osb->s_clustersize;
6731
6732 has_data = i_size_read(inode) ? 1 : 0;
6733
6734 if (has_data) {
6735 pages = kcalloc(ocfs2_pages_per_cluster(osb->sb),
6736 sizeof(struct page *), GFP_NOFS);
6737 if (pages == NULL) {
6738 ret = -ENOMEM;
6739 mlog_errno(ret);
6740 goto out;
6741 }
6742
6743 ret = ocfs2_reserve_clusters(osb, 1, &data_ac);
6744 if (ret) {
6745 mlog_errno(ret);
6746 goto out;
6747 }
6748 }
6749
6750 handle = ocfs2_start_trans(osb, OCFS2_INLINE_TO_EXTENTS_CREDITS);
6751 if (IS_ERR(handle)) {
6752 ret = PTR_ERR(handle);
6753 mlog_errno(ret);
6754 goto out_unlock;
6755 }
6756
6757 ret = ocfs2_journal_access(handle, inode, di_bh,
6758 OCFS2_JOURNAL_ACCESS_WRITE);
6759 if (ret) {
6760 mlog_errno(ret);
6761 goto out_commit;
6762 }
6763
6764 if (has_data) {
6765 u32 bit_off, num;
6766 unsigned int page_end;
6767 u64 phys;
6768
6769 ret = ocfs2_claim_clusters(osb, handle, data_ac, 1, &bit_off,
6770 &num);
6771 if (ret) {
6772 mlog_errno(ret);
6773 goto out_commit;
6774 }
6775
6776 /*
6777 * Save two copies, one for insert, and one that can
6778 * be changed by ocfs2_map_and_dirty_page() below.
6779 */
6780 block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
6781
6782 /*
6783 * Non sparse file systems zero on extend, so no need
6784 * to do that now.
6785 */
6786 if (!ocfs2_sparse_alloc(osb) &&
6787 PAGE_CACHE_SIZE < osb->s_clustersize)
6788 end = PAGE_CACHE_SIZE;
6789
6790 ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages);
6791 if (ret) {
6792 mlog_errno(ret);
6793 goto out_commit;
6794 }
6795
6796 /*
6797 * This should populate the 1st page for us and mark
6798 * it up to date.
6799 */
6800 ret = ocfs2_read_inline_data(inode, pages[0], di_bh);
6801 if (ret) {
6802 mlog_errno(ret);
6803 goto out_commit;
6804 }
6805
6806 page_end = PAGE_CACHE_SIZE;
6807 if (PAGE_CACHE_SIZE > osb->s_clustersize)
6808 page_end = osb->s_clustersize;
6809
6810 for (i = 0; i < num_pages; i++)
6811 ocfs2_map_and_dirty_page(inode, handle, 0, page_end,
6812 pages[i], i > 0, &phys);
6813 }
6814
6815 spin_lock(&oi->ip_lock);
6816 oi->ip_dyn_features &= ~OCFS2_INLINE_DATA_FL;
6817 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
6818 spin_unlock(&oi->ip_lock);
6819
Mark Fasheh5b6a3a22007-09-13 16:33:54 -07006820 ocfs2_dinode_new_extent_list(inode, di);
Mark Fasheh1afc32b2007-09-07 14:46:51 -07006821
6822 ocfs2_journal_dirty(handle, di_bh);
6823
6824 if (has_data) {
6825 /*
6826 * An error at this point should be extremely rare. If
6827 * this proves to be false, we could always re-build
6828 * the in-inode data from our pages.
6829 */
Tao Maf56654c2008-08-18 17:38:48 +08006830 ret = ocfs2_dinode_insert_extent(osb, handle, inode, di_bh,
6831 0, block, 1, 0, NULL);
Mark Fasheh1afc32b2007-09-07 14:46:51 -07006832 if (ret) {
6833 mlog_errno(ret);
6834 goto out_commit;
6835 }
6836
6837 inode->i_blocks = ocfs2_inode_sector_count(inode);
6838 }
6839
6840out_commit:
6841 ocfs2_commit_trans(osb, handle);
6842
6843out_unlock:
6844 if (data_ac)
6845 ocfs2_free_alloc_context(data_ac);
6846
6847out:
6848 if (pages) {
6849 ocfs2_unlock_and_free_pages(pages, num_pages);
6850 kfree(pages);
6851 }
6852
6853 return ret;
6854}
6855
Mark Fashehccd979b2005-12-15 14:31:24 -08006856/*
6857 * It is expected, that by the time you call this function,
6858 * inode->i_size and fe->i_size have been adjusted.
6859 *
6860 * WARNING: This will kfree the truncate context
6861 */
6862int ocfs2_commit_truncate(struct ocfs2_super *osb,
6863 struct inode *inode,
6864 struct buffer_head *fe_bh,
6865 struct ocfs2_truncate_context *tc)
6866{
6867 int status, i, credits, tl_sem = 0;
Mark Fashehdcd05382007-01-16 11:32:23 -08006868 u32 clusters_to_del, new_highest_cpos, range;
Mark Fashehccd979b2005-12-15 14:31:24 -08006869 struct ocfs2_extent_list *el;
Mark Fasheh1fabe142006-10-09 18:11:45 -07006870 handle_t *handle = NULL;
Mark Fashehccd979b2005-12-15 14:31:24 -08006871 struct inode *tl_inode = osb->osb_tl_inode;
Mark Fashehdcd05382007-01-16 11:32:23 -08006872 struct ocfs2_path *path = NULL;
Tao Mae7d4cb62008-08-18 17:38:44 +08006873 struct ocfs2_dinode *di = (struct ocfs2_dinode *)fe_bh->b_data;
Mark Fashehccd979b2005-12-15 14:31:24 -08006874
6875 mlog_entry_void();
6876
Mark Fashehdcd05382007-01-16 11:32:23 -08006877 new_highest_cpos = ocfs2_clusters_for_bytes(osb->sb,
Mark Fashehccd979b2005-12-15 14:31:24 -08006878 i_size_read(inode));
6879
Tao Mae7d4cb62008-08-18 17:38:44 +08006880 path = ocfs2_new_path(fe_bh, &di->id2.i_list);
Mark Fashehdcd05382007-01-16 11:32:23 -08006881 if (!path) {
6882 status = -ENOMEM;
6883 mlog_errno(status);
6884 goto bail;
6885 }
Mark Fasheh83418972007-04-23 18:53:12 -07006886
6887 ocfs2_extent_map_trunc(inode, new_highest_cpos);
6888
Mark Fashehccd979b2005-12-15 14:31:24 -08006889start:
Mark Fashehdcd05382007-01-16 11:32:23 -08006890 /*
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006891 * Check that we still have allocation to delete.
6892 */
6893 if (OCFS2_I(inode)->ip_clusters == 0) {
6894 status = 0;
6895 goto bail;
6896 }
6897
6898 /*
Mark Fashehdcd05382007-01-16 11:32:23 -08006899 * Truncate always works against the rightmost tree branch.
6900 */
6901 status = ocfs2_find_path(inode, path, UINT_MAX);
6902 if (status) {
6903 mlog_errno(status);
6904 goto bail;
Mark Fashehccd979b2005-12-15 14:31:24 -08006905 }
6906
Mark Fashehdcd05382007-01-16 11:32:23 -08006907 mlog(0, "inode->ip_clusters = %u, tree_depth = %u\n",
6908 OCFS2_I(inode)->ip_clusters, path->p_tree_depth);
6909
6910 /*
6911 * By now, el will point to the extent list on the bottom most
6912 * portion of this tree. Only the tail record is considered in
6913 * each pass.
6914 *
6915 * We handle the following cases, in order:
6916 * - empty extent: delete the remaining branch
6917 * - remove the entire record
6918 * - remove a partial record
6919 * - no record needs to be removed (truncate has completed)
6920 */
6921 el = path_leaf_el(path);
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006922 if (le16_to_cpu(el->l_next_free_rec) == 0) {
6923 ocfs2_error(inode->i_sb,
6924 "Inode %llu has empty extent block at %llu\n",
6925 (unsigned long long)OCFS2_I(inode)->ip_blkno,
6926 (unsigned long long)path_leaf_bh(path)->b_blocknr);
6927 status = -EROFS;
6928 goto bail;
6929 }
6930
Mark Fashehccd979b2005-12-15 14:31:24 -08006931 i = le16_to_cpu(el->l_next_free_rec) - 1;
Mark Fashehdcd05382007-01-16 11:32:23 -08006932 range = le32_to_cpu(el->l_recs[i].e_cpos) +
Mark Fashehe48edee2007-03-07 16:46:57 -08006933 ocfs2_rec_clusters(el, &el->l_recs[i]);
Mark Fashehdcd05382007-01-16 11:32:23 -08006934 if (i == 0 && ocfs2_is_empty_extent(&el->l_recs[i])) {
6935 clusters_to_del = 0;
6936 } else if (le32_to_cpu(el->l_recs[i].e_cpos) >= new_highest_cpos) {
Mark Fashehe48edee2007-03-07 16:46:57 -08006937 clusters_to_del = ocfs2_rec_clusters(el, &el->l_recs[i]);
Mark Fashehdcd05382007-01-16 11:32:23 -08006938 } else if (range > new_highest_cpos) {
Mark Fashehe48edee2007-03-07 16:46:57 -08006939 clusters_to_del = (ocfs2_rec_clusters(el, &el->l_recs[i]) +
Mark Fashehccd979b2005-12-15 14:31:24 -08006940 le32_to_cpu(el->l_recs[i].e_cpos)) -
Mark Fashehdcd05382007-01-16 11:32:23 -08006941 new_highest_cpos;
6942 } else {
6943 status = 0;
6944 goto bail;
6945 }
Mark Fashehccd979b2005-12-15 14:31:24 -08006946
Mark Fashehdcd05382007-01-16 11:32:23 -08006947 mlog(0, "clusters_to_del = %u in this pass, tail blk=%llu\n",
6948 clusters_to_del, (unsigned long long)path_leaf_bh(path)->b_blocknr);
6949
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08006950 mutex_lock(&tl_inode->i_mutex);
Mark Fashehccd979b2005-12-15 14:31:24 -08006951 tl_sem = 1;
6952 /* ocfs2_truncate_log_needs_flush guarantees us at least one
6953 * record is free for use. If there isn't any, we flush to get
6954 * an empty truncate log. */
6955 if (ocfs2_truncate_log_needs_flush(osb)) {
6956 status = __ocfs2_flush_truncate_log(osb);
6957 if (status < 0) {
6958 mlog_errno(status);
6959 goto bail;
6960 }
6961 }
6962
6963 credits = ocfs2_calc_tree_trunc_credits(osb->sb, clusters_to_del,
Mark Fashehdcd05382007-01-16 11:32:23 -08006964 (struct ocfs2_dinode *)fe_bh->b_data,
6965 el);
Mark Fasheh65eff9c2006-10-09 17:26:22 -07006966 handle = ocfs2_start_trans(osb, credits);
Mark Fashehccd979b2005-12-15 14:31:24 -08006967 if (IS_ERR(handle)) {
6968 status = PTR_ERR(handle);
6969 handle = NULL;
6970 mlog_errno(status);
6971 goto bail;
6972 }
6973
Mark Fashehdcd05382007-01-16 11:32:23 -08006974 status = ocfs2_do_truncate(osb, clusters_to_del, inode, fe_bh, handle,
6975 tc, path);
Mark Fashehccd979b2005-12-15 14:31:24 -08006976 if (status < 0) {
6977 mlog_errno(status);
6978 goto bail;
6979 }
6980
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08006981 mutex_unlock(&tl_inode->i_mutex);
Mark Fashehccd979b2005-12-15 14:31:24 -08006982 tl_sem = 0;
6983
Mark Fasheh02dc1af2006-10-09 16:48:10 -07006984 ocfs2_commit_trans(osb, handle);
Mark Fashehccd979b2005-12-15 14:31:24 -08006985 handle = NULL;
6986
Mark Fashehdcd05382007-01-16 11:32:23 -08006987 ocfs2_reinit_path(path, 1);
6988
6989 /*
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006990 * The check above will catch the case where we've truncated
6991 * away all allocation.
Mark Fashehdcd05382007-01-16 11:32:23 -08006992 */
Mark Fasheh3a0782d2007-01-17 12:53:31 -08006993 goto start;
6994
Mark Fashehccd979b2005-12-15 14:31:24 -08006995bail:
Mark Fashehccd979b2005-12-15 14:31:24 -08006996
6997 ocfs2_schedule_truncate_log_flush(osb, 1);
6998
6999 if (tl_sem)
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08007000 mutex_unlock(&tl_inode->i_mutex);
Mark Fashehccd979b2005-12-15 14:31:24 -08007001
7002 if (handle)
Mark Fasheh02dc1af2006-10-09 16:48:10 -07007003 ocfs2_commit_trans(osb, handle);
Mark Fashehccd979b2005-12-15 14:31:24 -08007004
Mark Fasheh59a5e412007-06-22 15:52:36 -07007005 ocfs2_run_deallocs(osb, &tc->tc_dealloc);
7006
Mark Fashehdcd05382007-01-16 11:32:23 -08007007 ocfs2_free_path(path);
Mark Fashehccd979b2005-12-15 14:31:24 -08007008
7009 /* This will drop the ext_alloc cluster lock for us */
7010 ocfs2_free_truncate_context(tc);
7011
7012 mlog_exit(status);
7013 return status;
7014}
7015
Mark Fashehccd979b2005-12-15 14:31:24 -08007016/*
Mark Fasheh59a5e412007-06-22 15:52:36 -07007017 * Expects the inode to already be locked.
Mark Fashehccd979b2005-12-15 14:31:24 -08007018 */
7019int ocfs2_prepare_truncate(struct ocfs2_super *osb,
7020 struct inode *inode,
7021 struct buffer_head *fe_bh,
7022 struct ocfs2_truncate_context **tc)
7023{
Mark Fasheh59a5e412007-06-22 15:52:36 -07007024 int status;
Mark Fashehccd979b2005-12-15 14:31:24 -08007025 unsigned int new_i_clusters;
7026 struct ocfs2_dinode *fe;
7027 struct ocfs2_extent_block *eb;
Mark Fashehccd979b2005-12-15 14:31:24 -08007028 struct buffer_head *last_eb_bh = NULL;
Mark Fashehccd979b2005-12-15 14:31:24 -08007029
7030 mlog_entry_void();
7031
7032 *tc = NULL;
7033
7034 new_i_clusters = ocfs2_clusters_for_bytes(osb->sb,
7035 i_size_read(inode));
7036 fe = (struct ocfs2_dinode *) fe_bh->b_data;
7037
7038 mlog(0, "fe->i_clusters = %u, new_i_clusters = %u, fe->i_size ="
Mark Fasheh1ca1a112007-04-27 16:01:25 -07007039 "%llu\n", le32_to_cpu(fe->i_clusters), new_i_clusters,
7040 (unsigned long long)le64_to_cpu(fe->i_size));
Mark Fashehccd979b2005-12-15 14:31:24 -08007041
Robert P. J. Daycd861282006-12-13 00:34:52 -08007042 *tc = kzalloc(sizeof(struct ocfs2_truncate_context), GFP_KERNEL);
Mark Fashehccd979b2005-12-15 14:31:24 -08007043 if (!(*tc)) {
7044 status = -ENOMEM;
7045 mlog_errno(status);
7046 goto bail;
7047 }
Mark Fasheh59a5e412007-06-22 15:52:36 -07007048 ocfs2_init_dealloc_ctxt(&(*tc)->tc_dealloc);
Mark Fashehccd979b2005-12-15 14:31:24 -08007049
Mark Fashehccd979b2005-12-15 14:31:24 -08007050 if (fe->id2.i_list.l_tree_depth) {
Mark Fashehccd979b2005-12-15 14:31:24 -08007051 status = ocfs2_read_block(osb, le64_to_cpu(fe->i_last_eb_blk),
7052 &last_eb_bh, OCFS2_BH_CACHED, inode);
7053 if (status < 0) {
7054 mlog_errno(status);
7055 goto bail;
7056 }
7057 eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
7058 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
7059 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
7060
7061 brelse(last_eb_bh);
7062 status = -EIO;
7063 goto bail;
7064 }
Mark Fashehccd979b2005-12-15 14:31:24 -08007065 }
7066
7067 (*tc)->tc_last_eb_bh = last_eb_bh;
7068
Mark Fashehccd979b2005-12-15 14:31:24 -08007069 status = 0;
7070bail:
7071 if (status < 0) {
7072 if (*tc)
7073 ocfs2_free_truncate_context(*tc);
7074 *tc = NULL;
7075 }
7076 mlog_exit_void();
7077 return status;
7078}
7079
Mark Fasheh1afc32b2007-09-07 14:46:51 -07007080/*
7081 * 'start' is inclusive, 'end' is not.
7082 */
7083int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
7084 unsigned int start, unsigned int end, int trunc)
7085{
7086 int ret;
7087 unsigned int numbytes;
7088 handle_t *handle;
7089 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
7090 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
7091 struct ocfs2_inline_data *idata = &di->id2.i_data;
7092
7093 if (end > i_size_read(inode))
7094 end = i_size_read(inode);
7095
7096 BUG_ON(start >= end);
7097
7098 if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) ||
7099 !(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL) ||
7100 !ocfs2_supports_inline_data(osb)) {
7101 ocfs2_error(inode->i_sb,
7102 "Inline data flags for inode %llu don't agree! "
7103 "Disk: 0x%x, Memory: 0x%x, Superblock: 0x%x\n",
7104 (unsigned long long)OCFS2_I(inode)->ip_blkno,
7105 le16_to_cpu(di->i_dyn_features),
7106 OCFS2_I(inode)->ip_dyn_features,
7107 osb->s_feature_incompat);
7108 ret = -EROFS;
7109 goto out;
7110 }
7111
7112 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
7113 if (IS_ERR(handle)) {
7114 ret = PTR_ERR(handle);
7115 mlog_errno(ret);
7116 goto out;
7117 }
7118
7119 ret = ocfs2_journal_access(handle, inode, di_bh,
7120 OCFS2_JOURNAL_ACCESS_WRITE);
7121 if (ret) {
7122 mlog_errno(ret);
7123 goto out_commit;
7124 }
7125
7126 numbytes = end - start;
7127 memset(idata->id_data + start, 0, numbytes);
7128
7129 /*
7130 * No need to worry about the data page here - it's been
7131 * truncated already and inline data doesn't need it for
7132 * pushing zero's to disk, so we'll let readpage pick it up
7133 * later.
7134 */
7135 if (trunc) {
7136 i_size_write(inode, start);
7137 di->i_size = cpu_to_le64(start);
7138 }
7139
7140 inode->i_blocks = ocfs2_inode_sector_count(inode);
7141 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
7142
7143 di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
7144 di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
7145
7146 ocfs2_journal_dirty(handle, di_bh);
7147
7148out_commit:
7149 ocfs2_commit_trans(osb, handle);
7150
7151out:
7152 return ret;
7153}
7154
Mark Fashehccd979b2005-12-15 14:31:24 -08007155static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc)
7156{
Mark Fasheh59a5e412007-06-22 15:52:36 -07007157 /*
7158 * The caller is responsible for completing deallocation
7159 * before freeing the context.
7160 */
7161 if (tc->tc_dealloc.c_first_suballocator != NULL)
7162 mlog(ML_NOTICE,
7163 "Truncate completion has non-empty dealloc context\n");
Mark Fashehccd979b2005-12-15 14:31:24 -08007164
7165 if (tc->tc_last_eb_bh)
7166 brelse(tc->tc_last_eb_bh);
7167
7168 kfree(tc);
7169}