blob: 26e867087e956ffa7603c287f911b54b6e1481d8 [file] [log] [blame]
Mark Fashehccd979b2005-12-15 14:31:24 -08001/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * alloc.c
5 *
6 * Extent allocs and frees
7 *
8 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 */
25
26#include <linux/fs.h>
27#include <linux/types.h>
28#include <linux/slab.h>
29#include <linux/highmem.h>
Mark Fasheh60b11392007-02-16 11:46:50 -080030#include <linux/swap.h>
Mark Fashehccd979b2005-12-15 14:31:24 -080031
32#define MLOG_MASK_PREFIX ML_DISK_ALLOC
33#include <cluster/masklog.h>
34
35#include "ocfs2.h"
36
37#include "alloc.h"
Mark Fasheh60b11392007-02-16 11:46:50 -080038#include "aops.h"
Mark Fashehccd979b2005-12-15 14:31:24 -080039#include "dlmglue.h"
40#include "extent_map.h"
41#include "inode.h"
42#include "journal.h"
43#include "localalloc.h"
44#include "suballoc.h"
45#include "sysfile.h"
46#include "file.h"
47#include "super.h"
48#include "uptodate.h"
49
50#include "buffer_head_io.h"
51
Mark Fashehccd979b2005-12-15 14:31:24 -080052static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc);
Mark Fasheh59a5e412007-06-22 15:52:36 -070053static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt,
54 struct ocfs2_extent_block *eb);
Mark Fashehccd979b2005-12-15 14:31:24 -080055
Mark Fashehdcd05382007-01-16 11:32:23 -080056/*
57 * Structures which describe a path through a btree, and functions to
58 * manipulate them.
59 *
60 * The idea here is to be as generic as possible with the tree
61 * manipulation code.
62 */
63struct ocfs2_path_item {
64 struct buffer_head *bh;
65 struct ocfs2_extent_list *el;
66};
67
68#define OCFS2_MAX_PATH_DEPTH 5
69
70struct ocfs2_path {
71 int p_tree_depth;
72 struct ocfs2_path_item p_node[OCFS2_MAX_PATH_DEPTH];
73};
74
75#define path_root_bh(_path) ((_path)->p_node[0].bh)
76#define path_root_el(_path) ((_path)->p_node[0].el)
77#define path_leaf_bh(_path) ((_path)->p_node[(_path)->p_tree_depth].bh)
78#define path_leaf_el(_path) ((_path)->p_node[(_path)->p_tree_depth].el)
79#define path_num_items(_path) ((_path)->p_tree_depth + 1)
80
81/*
82 * Reset the actual path elements so that we can re-use the structure
83 * to build another path. Generally, this involves freeing the buffer
84 * heads.
85 */
86static void ocfs2_reinit_path(struct ocfs2_path *path, int keep_root)
87{
88 int i, start = 0, depth = 0;
89 struct ocfs2_path_item *node;
90
91 if (keep_root)
92 start = 1;
93
94 for(i = start; i < path_num_items(path); i++) {
95 node = &path->p_node[i];
96
97 brelse(node->bh);
98 node->bh = NULL;
99 node->el = NULL;
100 }
101
102 /*
103 * Tree depth may change during truncate, or insert. If we're
104 * keeping the root extent list, then make sure that our path
105 * structure reflects the proper depth.
106 */
107 if (keep_root)
108 depth = le16_to_cpu(path_root_el(path)->l_tree_depth);
109
110 path->p_tree_depth = depth;
111}
112
113static void ocfs2_free_path(struct ocfs2_path *path)
114{
115 if (path) {
116 ocfs2_reinit_path(path, 0);
117 kfree(path);
118 }
119}
120
121/*
122 * Make the *dest path the same as src and re-initialize src path to
123 * have a root only.
124 */
125static void ocfs2_mv_path(struct ocfs2_path *dest, struct ocfs2_path *src)
126{
127 int i;
128
129 BUG_ON(path_root_bh(dest) != path_root_bh(src));
130
131 for(i = 1; i < OCFS2_MAX_PATH_DEPTH; i++) {
132 brelse(dest->p_node[i].bh);
133
134 dest->p_node[i].bh = src->p_node[i].bh;
135 dest->p_node[i].el = src->p_node[i].el;
136
137 src->p_node[i].bh = NULL;
138 src->p_node[i].el = NULL;
139 }
140}
141
142/*
143 * Insert an extent block at given index.
144 *
145 * This will not take an additional reference on eb_bh.
146 */
147static inline void ocfs2_path_insert_eb(struct ocfs2_path *path, int index,
148 struct buffer_head *eb_bh)
149{
150 struct ocfs2_extent_block *eb = (struct ocfs2_extent_block *)eb_bh->b_data;
151
152 /*
153 * Right now, no root bh is an extent block, so this helps
154 * catch code errors with dinode trees. The assertion can be
155 * safely removed if we ever need to insert extent block
156 * structures at the root.
157 */
158 BUG_ON(index == 0);
159
160 path->p_node[index].bh = eb_bh;
161 path->p_node[index].el = &eb->h_list;
162}
163
164static struct ocfs2_path *ocfs2_new_path(struct buffer_head *root_bh,
165 struct ocfs2_extent_list *root_el)
166{
167 struct ocfs2_path *path;
168
169 BUG_ON(le16_to_cpu(root_el->l_tree_depth) >= OCFS2_MAX_PATH_DEPTH);
170
171 path = kzalloc(sizeof(*path), GFP_NOFS);
172 if (path) {
173 path->p_tree_depth = le16_to_cpu(root_el->l_tree_depth);
174 get_bh(root_bh);
175 path_root_bh(path) = root_bh;
176 path_root_el(path) = root_el;
177 }
178
179 return path;
180}
181
182/*
183 * Allocate and initialize a new path based on a disk inode tree.
184 */
185static struct ocfs2_path *ocfs2_new_inode_path(struct buffer_head *di_bh)
186{
187 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
188 struct ocfs2_extent_list *el = &di->id2.i_list;
189
190 return ocfs2_new_path(di_bh, el);
191}
192
193/*
194 * Convenience function to journal all components in a path.
195 */
196static int ocfs2_journal_access_path(struct inode *inode, handle_t *handle,
197 struct ocfs2_path *path)
198{
199 int i, ret = 0;
200
201 if (!path)
202 goto out;
203
204 for(i = 0; i < path_num_items(path); i++) {
205 ret = ocfs2_journal_access(handle, inode, path->p_node[i].bh,
206 OCFS2_JOURNAL_ACCESS_WRITE);
207 if (ret < 0) {
208 mlog_errno(ret);
209 goto out;
210 }
211 }
212
213out:
214 return ret;
215}
216
217enum ocfs2_contig_type {
218 CONTIG_NONE = 0,
219 CONTIG_LEFT,
220 CONTIG_RIGHT
221};
222
Mark Fashehe48edee2007-03-07 16:46:57 -0800223
224/*
225 * NOTE: ocfs2_block_extent_contig(), ocfs2_extents_adjacent() and
226 * ocfs2_extent_contig only work properly against leaf nodes!
227 */
Mark Fashehdcd05382007-01-16 11:32:23 -0800228static int ocfs2_block_extent_contig(struct super_block *sb,
229 struct ocfs2_extent_rec *ext,
230 u64 blkno)
Mark Fashehccd979b2005-12-15 14:31:24 -0800231{
Mark Fashehe48edee2007-03-07 16:46:57 -0800232 u64 blk_end = le64_to_cpu(ext->e_blkno);
233
234 blk_end += ocfs2_clusters_to_blocks(sb,
235 le16_to_cpu(ext->e_leaf_clusters));
236
237 return blkno == blk_end;
Mark Fashehccd979b2005-12-15 14:31:24 -0800238}
239
Mark Fashehdcd05382007-01-16 11:32:23 -0800240static int ocfs2_extents_adjacent(struct ocfs2_extent_rec *left,
241 struct ocfs2_extent_rec *right)
242{
Mark Fashehe48edee2007-03-07 16:46:57 -0800243 u32 left_range;
244
245 left_range = le32_to_cpu(left->e_cpos) +
246 le16_to_cpu(left->e_leaf_clusters);
247
248 return (left_range == le32_to_cpu(right->e_cpos));
Mark Fashehdcd05382007-01-16 11:32:23 -0800249}
250
251static enum ocfs2_contig_type
252 ocfs2_extent_contig(struct inode *inode,
253 struct ocfs2_extent_rec *ext,
254 struct ocfs2_extent_rec *insert_rec)
255{
256 u64 blkno = le64_to_cpu(insert_rec->e_blkno);
257
258 if (ocfs2_extents_adjacent(ext, insert_rec) &&
259 ocfs2_block_extent_contig(inode->i_sb, ext, blkno))
260 return CONTIG_RIGHT;
261
262 blkno = le64_to_cpu(ext->e_blkno);
263 if (ocfs2_extents_adjacent(insert_rec, ext) &&
264 ocfs2_block_extent_contig(inode->i_sb, insert_rec, blkno))
265 return CONTIG_LEFT;
266
267 return CONTIG_NONE;
268}
269
270/*
271 * NOTE: We can have pretty much any combination of contiguousness and
272 * appending.
273 *
274 * The usefulness of APPEND_TAIL is more in that it lets us know that
275 * we'll have to update the path to that leaf.
276 */
277enum ocfs2_append_type {
278 APPEND_NONE = 0,
279 APPEND_TAIL,
280};
281
282struct ocfs2_insert_type {
283 enum ocfs2_append_type ins_appending;
284 enum ocfs2_contig_type ins_contig;
285 int ins_contig_index;
286 int ins_free_records;
287 int ins_tree_depth;
288};
289
Mark Fashehccd979b2005-12-15 14:31:24 -0800290/*
291 * How many free extents have we got before we need more meta data?
292 */
293int ocfs2_num_free_extents(struct ocfs2_super *osb,
294 struct inode *inode,
295 struct ocfs2_dinode *fe)
296{
297 int retval;
298 struct ocfs2_extent_list *el;
299 struct ocfs2_extent_block *eb;
300 struct buffer_head *eb_bh = NULL;
301
302 mlog_entry_void();
303
304 if (!OCFS2_IS_VALID_DINODE(fe)) {
305 OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
306 retval = -EIO;
307 goto bail;
308 }
309
310 if (fe->i_last_eb_blk) {
311 retval = ocfs2_read_block(osb, le64_to_cpu(fe->i_last_eb_blk),
312 &eb_bh, OCFS2_BH_CACHED, inode);
313 if (retval < 0) {
314 mlog_errno(retval);
315 goto bail;
316 }
317 eb = (struct ocfs2_extent_block *) eb_bh->b_data;
318 el = &eb->h_list;
319 } else
320 el = &fe->id2.i_list;
321
322 BUG_ON(el->l_tree_depth != 0);
323
324 retval = le16_to_cpu(el->l_count) - le16_to_cpu(el->l_next_free_rec);
325bail:
326 if (eb_bh)
327 brelse(eb_bh);
328
329 mlog_exit(retval);
330 return retval;
331}
332
333/* expects array to already be allocated
334 *
335 * sets h_signature, h_blkno, h_suballoc_bit, h_suballoc_slot, and
336 * l_count for you
337 */
338static int ocfs2_create_new_meta_bhs(struct ocfs2_super *osb,
Mark Fasheh1fabe142006-10-09 18:11:45 -0700339 handle_t *handle,
Mark Fashehccd979b2005-12-15 14:31:24 -0800340 struct inode *inode,
341 int wanted,
342 struct ocfs2_alloc_context *meta_ac,
343 struct buffer_head *bhs[])
344{
345 int count, status, i;
346 u16 suballoc_bit_start;
347 u32 num_got;
348 u64 first_blkno;
349 struct ocfs2_extent_block *eb;
350
351 mlog_entry_void();
352
353 count = 0;
354 while (count < wanted) {
355 status = ocfs2_claim_metadata(osb,
356 handle,
357 meta_ac,
358 wanted - count,
359 &suballoc_bit_start,
360 &num_got,
361 &first_blkno);
362 if (status < 0) {
363 mlog_errno(status);
364 goto bail;
365 }
366
367 for(i = count; i < (num_got + count); i++) {
368 bhs[i] = sb_getblk(osb->sb, first_blkno);
369 if (bhs[i] == NULL) {
370 status = -EIO;
371 mlog_errno(status);
372 goto bail;
373 }
374 ocfs2_set_new_buffer_uptodate(inode, bhs[i]);
375
376 status = ocfs2_journal_access(handle, inode, bhs[i],
377 OCFS2_JOURNAL_ACCESS_CREATE);
378 if (status < 0) {
379 mlog_errno(status);
380 goto bail;
381 }
382
383 memset(bhs[i]->b_data, 0, osb->sb->s_blocksize);
384 eb = (struct ocfs2_extent_block *) bhs[i]->b_data;
385 /* Ok, setup the minimal stuff here. */
386 strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE);
387 eb->h_blkno = cpu_to_le64(first_blkno);
388 eb->h_fs_generation = cpu_to_le32(osb->fs_generation);
389
390#ifndef OCFS2_USE_ALL_METADATA_SUBALLOCATORS
391 /* we always use slot zero's suballocator */
392 eb->h_suballoc_slot = 0;
393#else
394 eb->h_suballoc_slot = cpu_to_le16(osb->slot_num);
395#endif
396 eb->h_suballoc_bit = cpu_to_le16(suballoc_bit_start);
397 eb->h_list.l_count =
398 cpu_to_le16(ocfs2_extent_recs_per_eb(osb->sb));
399
400 suballoc_bit_start++;
401 first_blkno++;
402
403 /* We'll also be dirtied by the caller, so
404 * this isn't absolutely necessary. */
405 status = ocfs2_journal_dirty(handle, bhs[i]);
406 if (status < 0) {
407 mlog_errno(status);
408 goto bail;
409 }
410 }
411
412 count += num_got;
413 }
414
415 status = 0;
416bail:
417 if (status < 0) {
418 for(i = 0; i < wanted; i++) {
419 if (bhs[i])
420 brelse(bhs[i]);
421 bhs[i] = NULL;
422 }
423 }
424 mlog_exit(status);
425 return status;
426}
427
428/*
Mark Fashehdcd05382007-01-16 11:32:23 -0800429 * Helper function for ocfs2_add_branch() and ocfs2_shift_tree_depth().
430 *
431 * Returns the sum of the rightmost extent rec logical offset and
432 * cluster count.
433 *
434 * ocfs2_add_branch() uses this to determine what logical cluster
435 * value should be populated into the leftmost new branch records.
436 *
437 * ocfs2_shift_tree_depth() uses this to determine the # clusters
438 * value for the new topmost tree record.
439 */
440static inline u32 ocfs2_sum_rightmost_rec(struct ocfs2_extent_list *el)
441{
442 int i;
443
444 i = le16_to_cpu(el->l_next_free_rec) - 1;
445
446 return le32_to_cpu(el->l_recs[i].e_cpos) +
Mark Fashehe48edee2007-03-07 16:46:57 -0800447 ocfs2_rec_clusters(el, &el->l_recs[i]);
Mark Fashehdcd05382007-01-16 11:32:23 -0800448}
449
450/*
Mark Fashehccd979b2005-12-15 14:31:24 -0800451 * Add an entire tree branch to our inode. eb_bh is the extent block
452 * to start at, if we don't want to start the branch at the dinode
453 * structure.
454 *
455 * last_eb_bh is required as we have to update it's next_leaf pointer
456 * for the new last extent block.
457 *
458 * the new branch will be 'empty' in the sense that every block will
Mark Fashehe48edee2007-03-07 16:46:57 -0800459 * contain a single record with cluster count == 0.
Mark Fashehccd979b2005-12-15 14:31:24 -0800460 */
461static int ocfs2_add_branch(struct ocfs2_super *osb,
Mark Fasheh1fabe142006-10-09 18:11:45 -0700462 handle_t *handle,
Mark Fashehccd979b2005-12-15 14:31:24 -0800463 struct inode *inode,
464 struct buffer_head *fe_bh,
465 struct buffer_head *eb_bh,
466 struct buffer_head *last_eb_bh,
467 struct ocfs2_alloc_context *meta_ac)
468{
469 int status, new_blocks, i;
470 u64 next_blkno, new_last_eb_blk;
471 struct buffer_head *bh;
472 struct buffer_head **new_eb_bhs = NULL;
473 struct ocfs2_dinode *fe;
474 struct ocfs2_extent_block *eb;
475 struct ocfs2_extent_list *eb_el;
476 struct ocfs2_extent_list *el;
Mark Fashehdcd05382007-01-16 11:32:23 -0800477 u32 new_cpos;
Mark Fashehccd979b2005-12-15 14:31:24 -0800478
479 mlog_entry_void();
480
481 BUG_ON(!last_eb_bh);
482
483 fe = (struct ocfs2_dinode *) fe_bh->b_data;
484
485 if (eb_bh) {
486 eb = (struct ocfs2_extent_block *) eb_bh->b_data;
487 el = &eb->h_list;
488 } else
489 el = &fe->id2.i_list;
490
491 /* we never add a branch to a leaf. */
492 BUG_ON(!el->l_tree_depth);
493
494 new_blocks = le16_to_cpu(el->l_tree_depth);
495
496 /* allocate the number of new eb blocks we need */
497 new_eb_bhs = kcalloc(new_blocks, sizeof(struct buffer_head *),
498 GFP_KERNEL);
499 if (!new_eb_bhs) {
500 status = -ENOMEM;
501 mlog_errno(status);
502 goto bail;
503 }
504
505 status = ocfs2_create_new_meta_bhs(osb, handle, inode, new_blocks,
506 meta_ac, new_eb_bhs);
507 if (status < 0) {
508 mlog_errno(status);
509 goto bail;
510 }
511
Mark Fashehdcd05382007-01-16 11:32:23 -0800512 eb = (struct ocfs2_extent_block *)last_eb_bh->b_data;
513 new_cpos = ocfs2_sum_rightmost_rec(&eb->h_list);
514
Mark Fashehccd979b2005-12-15 14:31:24 -0800515 /* Note: new_eb_bhs[new_blocks - 1] is the guy which will be
516 * linked with the rest of the tree.
517 * conversly, new_eb_bhs[0] is the new bottommost leaf.
518 *
519 * when we leave the loop, new_last_eb_blk will point to the
520 * newest leaf, and next_blkno will point to the topmost extent
521 * block. */
522 next_blkno = new_last_eb_blk = 0;
523 for(i = 0; i < new_blocks; i++) {
524 bh = new_eb_bhs[i];
525 eb = (struct ocfs2_extent_block *) bh->b_data;
526 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
527 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
528 status = -EIO;
529 goto bail;
530 }
531 eb_el = &eb->h_list;
532
533 status = ocfs2_journal_access(handle, inode, bh,
534 OCFS2_JOURNAL_ACCESS_CREATE);
535 if (status < 0) {
536 mlog_errno(status);
537 goto bail;
538 }
539
540 eb->h_next_leaf_blk = 0;
541 eb_el->l_tree_depth = cpu_to_le16(i);
542 eb_el->l_next_free_rec = cpu_to_le16(1);
Mark Fashehdcd05382007-01-16 11:32:23 -0800543 /*
544 * This actually counts as an empty extent as
545 * c_clusters == 0
546 */
547 eb_el->l_recs[0].e_cpos = cpu_to_le32(new_cpos);
Mark Fashehccd979b2005-12-15 14:31:24 -0800548 eb_el->l_recs[0].e_blkno = cpu_to_le64(next_blkno);
Mark Fashehe48edee2007-03-07 16:46:57 -0800549 /*
550 * eb_el isn't always an interior node, but even leaf
551 * nodes want a zero'd flags and reserved field so
552 * this gets the whole 32 bits regardless of use.
553 */
554 eb_el->l_recs[0].e_int_clusters = cpu_to_le32(0);
Mark Fashehccd979b2005-12-15 14:31:24 -0800555 if (!eb_el->l_tree_depth)
556 new_last_eb_blk = le64_to_cpu(eb->h_blkno);
557
558 status = ocfs2_journal_dirty(handle, bh);
559 if (status < 0) {
560 mlog_errno(status);
561 goto bail;
562 }
563
564 next_blkno = le64_to_cpu(eb->h_blkno);
565 }
566
567 /* This is a bit hairy. We want to update up to three blocks
568 * here without leaving any of them in an inconsistent state
569 * in case of error. We don't have to worry about
570 * journal_dirty erroring as it won't unless we've aborted the
571 * handle (in which case we would never be here) so reserving
572 * the write with journal_access is all we need to do. */
573 status = ocfs2_journal_access(handle, inode, last_eb_bh,
574 OCFS2_JOURNAL_ACCESS_WRITE);
575 if (status < 0) {
576 mlog_errno(status);
577 goto bail;
578 }
579 status = ocfs2_journal_access(handle, inode, fe_bh,
580 OCFS2_JOURNAL_ACCESS_WRITE);
581 if (status < 0) {
582 mlog_errno(status);
583 goto bail;
584 }
585 if (eb_bh) {
586 status = ocfs2_journal_access(handle, inode, eb_bh,
587 OCFS2_JOURNAL_ACCESS_WRITE);
588 if (status < 0) {
589 mlog_errno(status);
590 goto bail;
591 }
592 }
593
594 /* Link the new branch into the rest of the tree (el will
595 * either be on the fe, or the extent block passed in. */
596 i = le16_to_cpu(el->l_next_free_rec);
597 el->l_recs[i].e_blkno = cpu_to_le64(next_blkno);
Mark Fashehdcd05382007-01-16 11:32:23 -0800598 el->l_recs[i].e_cpos = cpu_to_le32(new_cpos);
Mark Fashehe48edee2007-03-07 16:46:57 -0800599 el->l_recs[i].e_int_clusters = 0;
Mark Fashehccd979b2005-12-15 14:31:24 -0800600 le16_add_cpu(&el->l_next_free_rec, 1);
601
602 /* fe needs a new last extent block pointer, as does the
603 * next_leaf on the previously last-extent-block. */
604 fe->i_last_eb_blk = cpu_to_le64(new_last_eb_blk);
605
606 eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
607 eb->h_next_leaf_blk = cpu_to_le64(new_last_eb_blk);
608
609 status = ocfs2_journal_dirty(handle, last_eb_bh);
610 if (status < 0)
611 mlog_errno(status);
612 status = ocfs2_journal_dirty(handle, fe_bh);
613 if (status < 0)
614 mlog_errno(status);
615 if (eb_bh) {
616 status = ocfs2_journal_dirty(handle, eb_bh);
617 if (status < 0)
618 mlog_errno(status);
619 }
620
621 status = 0;
622bail:
623 if (new_eb_bhs) {
624 for (i = 0; i < new_blocks; i++)
625 if (new_eb_bhs[i])
626 brelse(new_eb_bhs[i]);
627 kfree(new_eb_bhs);
628 }
629
630 mlog_exit(status);
631 return status;
632}
633
634/*
635 * adds another level to the allocation tree.
636 * returns back the new extent block so you can add a branch to it
637 * after this call.
638 */
639static int ocfs2_shift_tree_depth(struct ocfs2_super *osb,
Mark Fasheh1fabe142006-10-09 18:11:45 -0700640 handle_t *handle,
Mark Fashehccd979b2005-12-15 14:31:24 -0800641 struct inode *inode,
642 struct buffer_head *fe_bh,
643 struct ocfs2_alloc_context *meta_ac,
644 struct buffer_head **ret_new_eb_bh)
645{
646 int status, i;
Mark Fashehdcd05382007-01-16 11:32:23 -0800647 u32 new_clusters;
Mark Fashehccd979b2005-12-15 14:31:24 -0800648 struct buffer_head *new_eb_bh = NULL;
649 struct ocfs2_dinode *fe;
650 struct ocfs2_extent_block *eb;
651 struct ocfs2_extent_list *fe_el;
652 struct ocfs2_extent_list *eb_el;
653
654 mlog_entry_void();
655
656 status = ocfs2_create_new_meta_bhs(osb, handle, inode, 1, meta_ac,
657 &new_eb_bh);
658 if (status < 0) {
659 mlog_errno(status);
660 goto bail;
661 }
662
663 eb = (struct ocfs2_extent_block *) new_eb_bh->b_data;
664 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
665 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
666 status = -EIO;
667 goto bail;
668 }
669
670 eb_el = &eb->h_list;
671 fe = (struct ocfs2_dinode *) fe_bh->b_data;
672 fe_el = &fe->id2.i_list;
673
674 status = ocfs2_journal_access(handle, inode, new_eb_bh,
675 OCFS2_JOURNAL_ACCESS_CREATE);
676 if (status < 0) {
677 mlog_errno(status);
678 goto bail;
679 }
680
681 /* copy the fe data into the new extent block */
682 eb_el->l_tree_depth = fe_el->l_tree_depth;
683 eb_el->l_next_free_rec = fe_el->l_next_free_rec;
Mark Fashehe48edee2007-03-07 16:46:57 -0800684 for(i = 0; i < le16_to_cpu(fe_el->l_next_free_rec); i++)
685 eb_el->l_recs[i] = fe_el->l_recs[i];
Mark Fashehccd979b2005-12-15 14:31:24 -0800686
687 status = ocfs2_journal_dirty(handle, new_eb_bh);
688 if (status < 0) {
689 mlog_errno(status);
690 goto bail;
691 }
692
693 status = ocfs2_journal_access(handle, inode, fe_bh,
694 OCFS2_JOURNAL_ACCESS_WRITE);
695 if (status < 0) {
696 mlog_errno(status);
697 goto bail;
698 }
699
Mark Fashehdcd05382007-01-16 11:32:23 -0800700 new_clusters = ocfs2_sum_rightmost_rec(eb_el);
701
Mark Fashehccd979b2005-12-15 14:31:24 -0800702 /* update fe now */
703 le16_add_cpu(&fe_el->l_tree_depth, 1);
704 fe_el->l_recs[0].e_cpos = 0;
705 fe_el->l_recs[0].e_blkno = eb->h_blkno;
Mark Fashehe48edee2007-03-07 16:46:57 -0800706 fe_el->l_recs[0].e_int_clusters = cpu_to_le32(new_clusters);
707 for(i = 1; i < le16_to_cpu(fe_el->l_next_free_rec); i++)
708 memset(&fe_el->l_recs[i], 0, sizeof(struct ocfs2_extent_rec));
Mark Fashehccd979b2005-12-15 14:31:24 -0800709 fe_el->l_next_free_rec = cpu_to_le16(1);
710
711 /* If this is our 1st tree depth shift, then last_eb_blk
712 * becomes the allocated extent block */
713 if (fe_el->l_tree_depth == cpu_to_le16(1))
714 fe->i_last_eb_blk = eb->h_blkno;
715
716 status = ocfs2_journal_dirty(handle, fe_bh);
717 if (status < 0) {
718 mlog_errno(status);
719 goto bail;
720 }
721
722 *ret_new_eb_bh = new_eb_bh;
723 new_eb_bh = NULL;
724 status = 0;
725bail:
726 if (new_eb_bh)
727 brelse(new_eb_bh);
728
729 mlog_exit(status);
730 return status;
731}
732
733/*
Mark Fashehccd979b2005-12-15 14:31:24 -0800734 * Should only be called when there is no space left in any of the
735 * leaf nodes. What we want to do is find the lowest tree depth
736 * non-leaf extent block with room for new records. There are three
737 * valid results of this search:
738 *
739 * 1) a lowest extent block is found, then we pass it back in
740 * *lowest_eb_bh and return '0'
741 *
742 * 2) the search fails to find anything, but the dinode has room. We
743 * pass NULL back in *lowest_eb_bh, but still return '0'
744 *
745 * 3) the search fails to find anything AND the dinode is full, in
746 * which case we return > 0
747 *
748 * return status < 0 indicates an error.
749 */
750static int ocfs2_find_branch_target(struct ocfs2_super *osb,
751 struct inode *inode,
752 struct buffer_head *fe_bh,
753 struct buffer_head **target_bh)
754{
755 int status = 0, i;
756 u64 blkno;
757 struct ocfs2_dinode *fe;
758 struct ocfs2_extent_block *eb;
759 struct ocfs2_extent_list *el;
760 struct buffer_head *bh = NULL;
761 struct buffer_head *lowest_bh = NULL;
762
763 mlog_entry_void();
764
765 *target_bh = NULL;
766
767 fe = (struct ocfs2_dinode *) fe_bh->b_data;
768 el = &fe->id2.i_list;
769
770 while(le16_to_cpu(el->l_tree_depth) > 1) {
771 if (le16_to_cpu(el->l_next_free_rec) == 0) {
Mark Fashehb06970532006-03-03 10:24:33 -0800772 ocfs2_error(inode->i_sb, "Dinode %llu has empty "
Mark Fashehccd979b2005-12-15 14:31:24 -0800773 "extent list (next_free_rec == 0)",
Mark Fashehb06970532006-03-03 10:24:33 -0800774 (unsigned long long)OCFS2_I(inode)->ip_blkno);
Mark Fashehccd979b2005-12-15 14:31:24 -0800775 status = -EIO;
776 goto bail;
777 }
778 i = le16_to_cpu(el->l_next_free_rec) - 1;
779 blkno = le64_to_cpu(el->l_recs[i].e_blkno);
780 if (!blkno) {
Mark Fashehb06970532006-03-03 10:24:33 -0800781 ocfs2_error(inode->i_sb, "Dinode %llu has extent "
Mark Fashehccd979b2005-12-15 14:31:24 -0800782 "list where extent # %d has no physical "
783 "block start",
Mark Fashehb06970532006-03-03 10:24:33 -0800784 (unsigned long long)OCFS2_I(inode)->ip_blkno, i);
Mark Fashehccd979b2005-12-15 14:31:24 -0800785 status = -EIO;
786 goto bail;
787 }
788
789 if (bh) {
790 brelse(bh);
791 bh = NULL;
792 }
793
794 status = ocfs2_read_block(osb, blkno, &bh, OCFS2_BH_CACHED,
795 inode);
796 if (status < 0) {
797 mlog_errno(status);
798 goto bail;
799 }
800
801 eb = (struct ocfs2_extent_block *) bh->b_data;
802 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
803 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
804 status = -EIO;
805 goto bail;
806 }
807 el = &eb->h_list;
808
809 if (le16_to_cpu(el->l_next_free_rec) <
810 le16_to_cpu(el->l_count)) {
811 if (lowest_bh)
812 brelse(lowest_bh);
813 lowest_bh = bh;
814 get_bh(lowest_bh);
815 }
816 }
817
818 /* If we didn't find one and the fe doesn't have any room,
819 * then return '1' */
820 if (!lowest_bh
821 && (fe->id2.i_list.l_next_free_rec == fe->id2.i_list.l_count))
822 status = 1;
823
824 *target_bh = lowest_bh;
825bail:
826 if (bh)
827 brelse(bh);
828
829 mlog_exit(status);
830 return status;
831}
832
Mark Fashehe48edee2007-03-07 16:46:57 -0800833/*
834 * This is only valid for leaf nodes, which are the only ones that can
835 * have empty extents anyway.
836 */
Mark Fashehdcd05382007-01-16 11:32:23 -0800837static inline int ocfs2_is_empty_extent(struct ocfs2_extent_rec *rec)
838{
Mark Fashehe48edee2007-03-07 16:46:57 -0800839 return !rec->e_leaf_clusters;
Mark Fashehdcd05382007-01-16 11:32:23 -0800840}
841
842/*
843 * This function will discard the rightmost extent record.
844 */
845static void ocfs2_shift_records_right(struct ocfs2_extent_list *el)
846{
847 int next_free = le16_to_cpu(el->l_next_free_rec);
848 int count = le16_to_cpu(el->l_count);
849 unsigned int num_bytes;
850
851 BUG_ON(!next_free);
852 /* This will cause us to go off the end of our extent list. */
853 BUG_ON(next_free >= count);
854
855 num_bytes = sizeof(struct ocfs2_extent_rec) * next_free;
856
857 memmove(&el->l_recs[1], &el->l_recs[0], num_bytes);
858}
859
860static void ocfs2_rotate_leaf(struct ocfs2_extent_list *el,
861 struct ocfs2_extent_rec *insert_rec)
862{
863 int i, insert_index, next_free, has_empty, num_bytes;
864 u32 insert_cpos = le32_to_cpu(insert_rec->e_cpos);
865 struct ocfs2_extent_rec *rec;
866
867 next_free = le16_to_cpu(el->l_next_free_rec);
868 has_empty = ocfs2_is_empty_extent(&el->l_recs[0]);
869
870 BUG_ON(!next_free);
871
872 /* The tree code before us didn't allow enough room in the leaf. */
873 if (el->l_next_free_rec == el->l_count && !has_empty)
874 BUG();
875
876 /*
877 * The easiest way to approach this is to just remove the
878 * empty extent and temporarily decrement next_free.
879 */
880 if (has_empty) {
881 /*
882 * If next_free was 1 (only an empty extent), this
883 * loop won't execute, which is fine. We still want
884 * the decrement above to happen.
885 */
886 for(i = 0; i < (next_free - 1); i++)
887 el->l_recs[i] = el->l_recs[i+1];
888
889 next_free--;
890 }
891
892 /*
893 * Figure out what the new record index should be.
894 */
895 for(i = 0; i < next_free; i++) {
896 rec = &el->l_recs[i];
897
898 if (insert_cpos < le32_to_cpu(rec->e_cpos))
899 break;
900 }
901 insert_index = i;
902
903 mlog(0, "ins %u: index %d, has_empty %d, next_free %d, count %d\n",
904 insert_cpos, insert_index, has_empty, next_free, le16_to_cpu(el->l_count));
905
906 BUG_ON(insert_index < 0);
907 BUG_ON(insert_index >= le16_to_cpu(el->l_count));
908 BUG_ON(insert_index > next_free);
909
910 /*
911 * No need to memmove if we're just adding to the tail.
912 */
913 if (insert_index != next_free) {
914 BUG_ON(next_free >= le16_to_cpu(el->l_count));
915
916 num_bytes = next_free - insert_index;
917 num_bytes *= sizeof(struct ocfs2_extent_rec);
918 memmove(&el->l_recs[insert_index + 1],
919 &el->l_recs[insert_index],
920 num_bytes);
921 }
922
923 /*
924 * Either we had an empty extent, and need to re-increment or
925 * there was no empty extent on a non full rightmost leaf node,
926 * in which case we still need to increment.
927 */
928 next_free++;
929 el->l_next_free_rec = cpu_to_le16(next_free);
930 /*
931 * Make sure none of the math above just messed up our tree.
932 */
933 BUG_ON(le16_to_cpu(el->l_next_free_rec) > le16_to_cpu(el->l_count));
934
935 el->l_recs[insert_index] = *insert_rec;
936
937}
938
939/*
940 * Create an empty extent record .
941 *
942 * l_next_free_rec may be updated.
943 *
944 * If an empty extent already exists do nothing.
945 */
946static void ocfs2_create_empty_extent(struct ocfs2_extent_list *el)
947{
948 int next_free = le16_to_cpu(el->l_next_free_rec);
949
Mark Fashehe48edee2007-03-07 16:46:57 -0800950 BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
951
Mark Fashehdcd05382007-01-16 11:32:23 -0800952 if (next_free == 0)
953 goto set_and_inc;
954
955 if (ocfs2_is_empty_extent(&el->l_recs[0]))
956 return;
957
958 mlog_bug_on_msg(el->l_count == el->l_next_free_rec,
959 "Asked to create an empty extent in a full list:\n"
960 "count = %u, tree depth = %u",
961 le16_to_cpu(el->l_count),
962 le16_to_cpu(el->l_tree_depth));
963
964 ocfs2_shift_records_right(el);
965
966set_and_inc:
967 le16_add_cpu(&el->l_next_free_rec, 1);
968 memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec));
969}
970
971/*
972 * For a rotation which involves two leaf nodes, the "root node" is
973 * the lowest level tree node which contains a path to both leafs. This
974 * resulting set of information can be used to form a complete "subtree"
975 *
976 * This function is passed two full paths from the dinode down to a
977 * pair of adjacent leaves. It's task is to figure out which path
978 * index contains the subtree root - this can be the root index itself
979 * in a worst-case rotation.
980 *
981 * The array index of the subtree root is passed back.
982 */
983static int ocfs2_find_subtree_root(struct inode *inode,
984 struct ocfs2_path *left,
985 struct ocfs2_path *right)
986{
987 int i = 0;
988
989 /*
990 * Check that the caller passed in two paths from the same tree.
991 */
992 BUG_ON(path_root_bh(left) != path_root_bh(right));
993
994 do {
995 i++;
996
997 /*
998 * The caller didn't pass two adjacent paths.
999 */
1000 mlog_bug_on_msg(i > left->p_tree_depth,
1001 "Inode %lu, left depth %u, right depth %u\n"
1002 "left leaf blk %llu, right leaf blk %llu\n",
1003 inode->i_ino, left->p_tree_depth,
1004 right->p_tree_depth,
1005 (unsigned long long)path_leaf_bh(left)->b_blocknr,
1006 (unsigned long long)path_leaf_bh(right)->b_blocknr);
1007 } while (left->p_node[i].bh->b_blocknr ==
1008 right->p_node[i].bh->b_blocknr);
1009
1010 return i - 1;
1011}
1012
1013typedef void (path_insert_t)(void *, struct buffer_head *);
1014
1015/*
1016 * Traverse a btree path in search of cpos, starting at root_el.
1017 *
1018 * This code can be called with a cpos larger than the tree, in which
1019 * case it will return the rightmost path.
1020 */
1021static int __ocfs2_find_path(struct inode *inode,
1022 struct ocfs2_extent_list *root_el, u32 cpos,
1023 path_insert_t *func, void *data)
1024{
1025 int i, ret = 0;
1026 u32 range;
1027 u64 blkno;
1028 struct buffer_head *bh = NULL;
1029 struct ocfs2_extent_block *eb;
1030 struct ocfs2_extent_list *el;
1031 struct ocfs2_extent_rec *rec;
1032 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1033
1034 el = root_el;
1035 while (el->l_tree_depth) {
1036 if (le16_to_cpu(el->l_next_free_rec) == 0) {
1037 ocfs2_error(inode->i_sb,
1038 "Inode %llu has empty extent list at "
1039 "depth %u\n",
1040 (unsigned long long)oi->ip_blkno,
1041 le16_to_cpu(el->l_tree_depth));
1042 ret = -EROFS;
1043 goto out;
1044
1045 }
1046
1047 for(i = 0; i < le16_to_cpu(el->l_next_free_rec) - 1; i++) {
1048 rec = &el->l_recs[i];
1049
1050 /*
1051 * In the case that cpos is off the allocation
1052 * tree, this should just wind up returning the
1053 * rightmost record.
1054 */
1055 range = le32_to_cpu(rec->e_cpos) +
Mark Fashehe48edee2007-03-07 16:46:57 -08001056 ocfs2_rec_clusters(el, rec);
Mark Fashehdcd05382007-01-16 11:32:23 -08001057 if (cpos >= le32_to_cpu(rec->e_cpos) && cpos < range)
1058 break;
1059 }
1060
1061 blkno = le64_to_cpu(el->l_recs[i].e_blkno);
1062 if (blkno == 0) {
1063 ocfs2_error(inode->i_sb,
1064 "Inode %llu has bad blkno in extent list "
1065 "at depth %u (index %d)\n",
1066 (unsigned long long)oi->ip_blkno,
1067 le16_to_cpu(el->l_tree_depth), i);
1068 ret = -EROFS;
1069 goto out;
1070 }
1071
1072 brelse(bh);
1073 bh = NULL;
1074 ret = ocfs2_read_block(OCFS2_SB(inode->i_sb), blkno,
1075 &bh, OCFS2_BH_CACHED, inode);
1076 if (ret) {
1077 mlog_errno(ret);
1078 goto out;
1079 }
1080
1081 eb = (struct ocfs2_extent_block *) bh->b_data;
1082 el = &eb->h_list;
1083 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
1084 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
1085 ret = -EIO;
1086 goto out;
1087 }
1088
1089 if (le16_to_cpu(el->l_next_free_rec) >
1090 le16_to_cpu(el->l_count)) {
1091 ocfs2_error(inode->i_sb,
1092 "Inode %llu has bad count in extent list "
1093 "at block %llu (next free=%u, count=%u)\n",
1094 (unsigned long long)oi->ip_blkno,
1095 (unsigned long long)bh->b_blocknr,
1096 le16_to_cpu(el->l_next_free_rec),
1097 le16_to_cpu(el->l_count));
1098 ret = -EROFS;
1099 goto out;
1100 }
1101
1102 if (func)
1103 func(data, bh);
1104 }
1105
1106out:
1107 /*
1108 * Catch any trailing bh that the loop didn't handle.
1109 */
1110 brelse(bh);
1111
1112 return ret;
1113}
1114
1115/*
1116 * Given an initialized path (that is, it has a valid root extent
1117 * list), this function will traverse the btree in search of the path
1118 * which would contain cpos.
1119 *
1120 * The path traveled is recorded in the path structure.
1121 *
1122 * Note that this will not do any comparisons on leaf node extent
1123 * records, so it will work fine in the case that we just added a tree
1124 * branch.
1125 */
1126struct find_path_data {
1127 int index;
1128 struct ocfs2_path *path;
1129};
1130static void find_path_ins(void *data, struct buffer_head *bh)
1131{
1132 struct find_path_data *fp = data;
1133
1134 get_bh(bh);
1135 ocfs2_path_insert_eb(fp->path, fp->index, bh);
1136 fp->index++;
1137}
1138static int ocfs2_find_path(struct inode *inode, struct ocfs2_path *path,
1139 u32 cpos)
1140{
1141 struct find_path_data data;
1142
1143 data.index = 1;
1144 data.path = path;
1145 return __ocfs2_find_path(inode, path_root_el(path), cpos,
1146 find_path_ins, &data);
1147}
1148
1149static void find_leaf_ins(void *data, struct buffer_head *bh)
1150{
1151 struct ocfs2_extent_block *eb =(struct ocfs2_extent_block *)bh->b_data;
1152 struct ocfs2_extent_list *el = &eb->h_list;
1153 struct buffer_head **ret = data;
1154
1155 /* We want to retain only the leaf block. */
1156 if (le16_to_cpu(el->l_tree_depth) == 0) {
1157 get_bh(bh);
1158 *ret = bh;
1159 }
1160}
1161/*
1162 * Find the leaf block in the tree which would contain cpos. No
1163 * checking of the actual leaf is done.
1164 *
1165 * Some paths want to call this instead of allocating a path structure
1166 * and calling ocfs2_find_path().
1167 *
1168 * This function doesn't handle non btree extent lists.
1169 */
Mark Fasheh363041a2007-01-17 12:31:35 -08001170int ocfs2_find_leaf(struct inode *inode, struct ocfs2_extent_list *root_el,
1171 u32 cpos, struct buffer_head **leaf_bh)
Mark Fashehdcd05382007-01-16 11:32:23 -08001172{
1173 int ret;
1174 struct buffer_head *bh = NULL;
1175
1176 ret = __ocfs2_find_path(inode, root_el, cpos, find_leaf_ins, &bh);
1177 if (ret) {
1178 mlog_errno(ret);
1179 goto out;
1180 }
1181
1182 *leaf_bh = bh;
1183out:
1184 return ret;
1185}
1186
1187/*
1188 * Adjust the adjacent records (left_rec, right_rec) involved in a rotation.
1189 *
1190 * Basically, we've moved stuff around at the bottom of the tree and
1191 * we need to fix up the extent records above the changes to reflect
1192 * the new changes.
1193 *
1194 * left_rec: the record on the left.
1195 * left_child_el: is the child list pointed to by left_rec
1196 * right_rec: the record to the right of left_rec
1197 * right_child_el: is the child list pointed to by right_rec
1198 *
1199 * By definition, this only works on interior nodes.
1200 */
1201static void ocfs2_adjust_adjacent_records(struct ocfs2_extent_rec *left_rec,
1202 struct ocfs2_extent_list *left_child_el,
1203 struct ocfs2_extent_rec *right_rec,
1204 struct ocfs2_extent_list *right_child_el)
1205{
1206 u32 left_clusters, right_end;
1207
1208 /*
1209 * Interior nodes never have holes. Their cpos is the cpos of
1210 * the leftmost record in their child list. Their cluster
1211 * count covers the full theoretical range of their child list
1212 * - the range between their cpos and the cpos of the record
1213 * immediately to their right.
1214 */
1215 left_clusters = le32_to_cpu(right_child_el->l_recs[0].e_cpos);
1216 left_clusters -= le32_to_cpu(left_rec->e_cpos);
Mark Fashehe48edee2007-03-07 16:46:57 -08001217 left_rec->e_int_clusters = cpu_to_le32(left_clusters);
Mark Fashehdcd05382007-01-16 11:32:23 -08001218
1219 /*
1220 * Calculate the rightmost cluster count boundary before
Mark Fashehe48edee2007-03-07 16:46:57 -08001221 * moving cpos - we will need to adjust clusters after
Mark Fashehdcd05382007-01-16 11:32:23 -08001222 * updating e_cpos to keep the same highest cluster count.
1223 */
1224 right_end = le32_to_cpu(right_rec->e_cpos);
Mark Fashehe48edee2007-03-07 16:46:57 -08001225 right_end += le32_to_cpu(right_rec->e_int_clusters);
Mark Fashehdcd05382007-01-16 11:32:23 -08001226
1227 right_rec->e_cpos = left_rec->e_cpos;
1228 le32_add_cpu(&right_rec->e_cpos, left_clusters);
1229
1230 right_end -= le32_to_cpu(right_rec->e_cpos);
Mark Fashehe48edee2007-03-07 16:46:57 -08001231 right_rec->e_int_clusters = cpu_to_le32(right_end);
Mark Fashehdcd05382007-01-16 11:32:23 -08001232}
1233
1234/*
1235 * Adjust the adjacent root node records involved in a
1236 * rotation. left_el_blkno is passed in as a key so that we can easily
1237 * find it's index in the root list.
1238 */
1239static void ocfs2_adjust_root_records(struct ocfs2_extent_list *root_el,
1240 struct ocfs2_extent_list *left_el,
1241 struct ocfs2_extent_list *right_el,
1242 u64 left_el_blkno)
1243{
1244 int i;
1245
1246 BUG_ON(le16_to_cpu(root_el->l_tree_depth) <=
1247 le16_to_cpu(left_el->l_tree_depth));
1248
1249 for(i = 0; i < le16_to_cpu(root_el->l_next_free_rec) - 1; i++) {
1250 if (le64_to_cpu(root_el->l_recs[i].e_blkno) == left_el_blkno)
1251 break;
1252 }
1253
1254 /*
1255 * The path walking code should have never returned a root and
1256 * two paths which are not adjacent.
1257 */
1258 BUG_ON(i >= (le16_to_cpu(root_el->l_next_free_rec) - 1));
1259
1260 ocfs2_adjust_adjacent_records(&root_el->l_recs[i], left_el,
1261 &root_el->l_recs[i + 1], right_el);
1262}
1263
1264/*
1265 * We've changed a leaf block (in right_path) and need to reflect that
1266 * change back up the subtree.
1267 *
1268 * This happens in multiple places:
1269 * - When we've moved an extent record from the left path leaf to the right
1270 * path leaf to make room for an empty extent in the left path leaf.
1271 * - When our insert into the right path leaf is at the leftmost edge
1272 * and requires an update of the path immediately to it's left. This
1273 * can occur at the end of some types of rotation and appending inserts.
1274 */
1275static void ocfs2_complete_edge_insert(struct inode *inode, handle_t *handle,
1276 struct ocfs2_path *left_path,
1277 struct ocfs2_path *right_path,
1278 int subtree_index)
1279{
1280 int ret, i, idx;
1281 struct ocfs2_extent_list *el, *left_el, *right_el;
1282 struct ocfs2_extent_rec *left_rec, *right_rec;
1283 struct buffer_head *root_bh = left_path->p_node[subtree_index].bh;
1284
1285 /*
1286 * Update the counts and position values within all the
1287 * interior nodes to reflect the leaf rotation we just did.
1288 *
1289 * The root node is handled below the loop.
1290 *
1291 * We begin the loop with right_el and left_el pointing to the
1292 * leaf lists and work our way up.
1293 *
1294 * NOTE: within this loop, left_el and right_el always refer
1295 * to the *child* lists.
1296 */
1297 left_el = path_leaf_el(left_path);
1298 right_el = path_leaf_el(right_path);
1299 for(i = left_path->p_tree_depth - 1; i > subtree_index; i--) {
1300 mlog(0, "Adjust records at index %u\n", i);
1301
1302 /*
1303 * One nice property of knowing that all of these
1304 * nodes are below the root is that we only deal with
1305 * the leftmost right node record and the rightmost
1306 * left node record.
1307 */
1308 el = left_path->p_node[i].el;
1309 idx = le16_to_cpu(left_el->l_next_free_rec) - 1;
1310 left_rec = &el->l_recs[idx];
1311
1312 el = right_path->p_node[i].el;
1313 right_rec = &el->l_recs[0];
1314
1315 ocfs2_adjust_adjacent_records(left_rec, left_el, right_rec,
1316 right_el);
1317
1318 ret = ocfs2_journal_dirty(handle, left_path->p_node[i].bh);
1319 if (ret)
1320 mlog_errno(ret);
1321
1322 ret = ocfs2_journal_dirty(handle, right_path->p_node[i].bh);
1323 if (ret)
1324 mlog_errno(ret);
1325
1326 /*
1327 * Setup our list pointers now so that the current
1328 * parents become children in the next iteration.
1329 */
1330 left_el = left_path->p_node[i].el;
1331 right_el = right_path->p_node[i].el;
1332 }
1333
1334 /*
1335 * At the root node, adjust the two adjacent records which
1336 * begin our path to the leaves.
1337 */
1338
1339 el = left_path->p_node[subtree_index].el;
1340 left_el = left_path->p_node[subtree_index + 1].el;
1341 right_el = right_path->p_node[subtree_index + 1].el;
1342
1343 ocfs2_adjust_root_records(el, left_el, right_el,
1344 left_path->p_node[subtree_index + 1].bh->b_blocknr);
1345
1346 root_bh = left_path->p_node[subtree_index].bh;
1347
1348 ret = ocfs2_journal_dirty(handle, root_bh);
1349 if (ret)
1350 mlog_errno(ret);
1351}
1352
1353static int ocfs2_rotate_subtree_right(struct inode *inode,
1354 handle_t *handle,
1355 struct ocfs2_path *left_path,
1356 struct ocfs2_path *right_path,
1357 int subtree_index)
1358{
1359 int ret, i;
1360 struct buffer_head *right_leaf_bh;
1361 struct buffer_head *left_leaf_bh = NULL;
1362 struct buffer_head *root_bh;
1363 struct ocfs2_extent_list *right_el, *left_el;
1364 struct ocfs2_extent_rec move_rec;
1365
1366 left_leaf_bh = path_leaf_bh(left_path);
1367 left_el = path_leaf_el(left_path);
1368
1369 if (left_el->l_next_free_rec != left_el->l_count) {
1370 ocfs2_error(inode->i_sb,
1371 "Inode %llu has non-full interior leaf node %llu"
1372 "(next free = %u)",
1373 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1374 (unsigned long long)left_leaf_bh->b_blocknr,
1375 le16_to_cpu(left_el->l_next_free_rec));
1376 return -EROFS;
1377 }
1378
1379 /*
1380 * This extent block may already have an empty record, so we
1381 * return early if so.
1382 */
1383 if (ocfs2_is_empty_extent(&left_el->l_recs[0]))
1384 return 0;
1385
1386 root_bh = left_path->p_node[subtree_index].bh;
1387 BUG_ON(root_bh != right_path->p_node[subtree_index].bh);
1388
1389 ret = ocfs2_journal_access(handle, inode, root_bh,
1390 OCFS2_JOURNAL_ACCESS_WRITE);
1391 if (ret) {
1392 mlog_errno(ret);
1393 goto out;
1394 }
1395
1396 for(i = subtree_index + 1; i < path_num_items(right_path); i++) {
1397 ret = ocfs2_journal_access(handle, inode,
1398 right_path->p_node[i].bh,
1399 OCFS2_JOURNAL_ACCESS_WRITE);
1400 if (ret) {
1401 mlog_errno(ret);
1402 goto out;
1403 }
1404
1405 ret = ocfs2_journal_access(handle, inode,
1406 left_path->p_node[i].bh,
1407 OCFS2_JOURNAL_ACCESS_WRITE);
1408 if (ret) {
1409 mlog_errno(ret);
1410 goto out;
1411 }
1412 }
1413
1414 right_leaf_bh = path_leaf_bh(right_path);
1415 right_el = path_leaf_el(right_path);
1416
1417 /* This is a code error, not a disk corruption. */
1418 mlog_bug_on_msg(!right_el->l_next_free_rec, "Inode %llu: Rotate fails "
1419 "because rightmost leaf block %llu is empty\n",
1420 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1421 (unsigned long long)right_leaf_bh->b_blocknr);
1422
1423 ocfs2_create_empty_extent(right_el);
1424
1425 ret = ocfs2_journal_dirty(handle, right_leaf_bh);
1426 if (ret) {
1427 mlog_errno(ret);
1428 goto out;
1429 }
1430
1431 /* Do the copy now. */
1432 i = le16_to_cpu(left_el->l_next_free_rec) - 1;
1433 move_rec = left_el->l_recs[i];
1434 right_el->l_recs[0] = move_rec;
1435
1436 /*
1437 * Clear out the record we just copied and shift everything
1438 * over, leaving an empty extent in the left leaf.
1439 *
1440 * We temporarily subtract from next_free_rec so that the
1441 * shift will lose the tail record (which is now defunct).
1442 */
1443 le16_add_cpu(&left_el->l_next_free_rec, -1);
1444 ocfs2_shift_records_right(left_el);
1445 memset(&left_el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec));
1446 le16_add_cpu(&left_el->l_next_free_rec, 1);
1447
1448 ret = ocfs2_journal_dirty(handle, left_leaf_bh);
1449 if (ret) {
1450 mlog_errno(ret);
1451 goto out;
1452 }
1453
1454 ocfs2_complete_edge_insert(inode, handle, left_path, right_path,
1455 subtree_index);
1456
1457out:
1458 return ret;
1459}
1460
1461/*
1462 * Given a full path, determine what cpos value would return us a path
1463 * containing the leaf immediately to the left of the current one.
1464 *
1465 * Will return zero if the path passed in is already the leftmost path.
1466 */
1467static int ocfs2_find_cpos_for_left_leaf(struct super_block *sb,
1468 struct ocfs2_path *path, u32 *cpos)
1469{
1470 int i, j, ret = 0;
1471 u64 blkno;
1472 struct ocfs2_extent_list *el;
1473
Mark Fashehe48edee2007-03-07 16:46:57 -08001474 BUG_ON(path->p_tree_depth == 0);
1475
Mark Fashehdcd05382007-01-16 11:32:23 -08001476 *cpos = 0;
1477
1478 blkno = path_leaf_bh(path)->b_blocknr;
1479
1480 /* Start at the tree node just above the leaf and work our way up. */
1481 i = path->p_tree_depth - 1;
1482 while (i >= 0) {
1483 el = path->p_node[i].el;
1484
1485 /*
1486 * Find the extent record just before the one in our
1487 * path.
1488 */
1489 for(j = 0; j < le16_to_cpu(el->l_next_free_rec); j++) {
1490 if (le64_to_cpu(el->l_recs[j].e_blkno) == blkno) {
1491 if (j == 0) {
1492 if (i == 0) {
1493 /*
1494 * We've determined that the
1495 * path specified is already
1496 * the leftmost one - return a
1497 * cpos of zero.
1498 */
1499 goto out;
1500 }
1501 /*
1502 * The leftmost record points to our
1503 * leaf - we need to travel up the
1504 * tree one level.
1505 */
1506 goto next_node;
1507 }
1508
1509 *cpos = le32_to_cpu(el->l_recs[j - 1].e_cpos);
Mark Fashehe48edee2007-03-07 16:46:57 -08001510 *cpos = *cpos + ocfs2_rec_clusters(el,
1511 &el->l_recs[j - 1]);
1512 *cpos = *cpos - 1;
Mark Fashehdcd05382007-01-16 11:32:23 -08001513 goto out;
1514 }
1515 }
1516
1517 /*
1518 * If we got here, we never found a valid node where
1519 * the tree indicated one should be.
1520 */
1521 ocfs2_error(sb,
1522 "Invalid extent tree at extent block %llu\n",
1523 (unsigned long long)blkno);
1524 ret = -EROFS;
1525 goto out;
1526
1527next_node:
1528 blkno = path->p_node[i].bh->b_blocknr;
1529 i--;
1530 }
1531
1532out:
1533 return ret;
1534}
1535
1536static int ocfs2_extend_rotate_transaction(handle_t *handle, int subtree_depth,
1537 struct ocfs2_path *path)
1538{
1539 int credits = (path->p_tree_depth - subtree_depth) * 2 + 1;
1540
1541 if (handle->h_buffer_credits < credits)
1542 return ocfs2_extend_trans(handle, credits);
1543
1544 return 0;
1545}
1546
1547/*
1548 * Trap the case where we're inserting into the theoretical range past
1549 * the _actual_ left leaf range. Otherwise, we'll rotate a record
1550 * whose cpos is less than ours into the right leaf.
1551 *
1552 * It's only necessary to look at the rightmost record of the left
1553 * leaf because the logic that calls us should ensure that the
1554 * theoretical ranges in the path components above the leaves are
1555 * correct.
1556 */
1557static int ocfs2_rotate_requires_path_adjustment(struct ocfs2_path *left_path,
1558 u32 insert_cpos)
1559{
1560 struct ocfs2_extent_list *left_el;
1561 struct ocfs2_extent_rec *rec;
1562 int next_free;
1563
1564 left_el = path_leaf_el(left_path);
1565 next_free = le16_to_cpu(left_el->l_next_free_rec);
1566 rec = &left_el->l_recs[next_free - 1];
1567
1568 if (insert_cpos > le32_to_cpu(rec->e_cpos))
1569 return 1;
1570 return 0;
1571}
1572
1573/*
1574 * Rotate all the records in a btree right one record, starting at insert_cpos.
1575 *
1576 * The path to the rightmost leaf should be passed in.
1577 *
1578 * The array is assumed to be large enough to hold an entire path (tree depth).
1579 *
1580 * Upon succesful return from this function:
1581 *
1582 * - The 'right_path' array will contain a path to the leaf block
1583 * whose range contains e_cpos.
1584 * - That leaf block will have a single empty extent in list index 0.
1585 * - In the case that the rotation requires a post-insert update,
1586 * *ret_left_path will contain a valid path which can be passed to
1587 * ocfs2_insert_path().
1588 */
1589static int ocfs2_rotate_tree_right(struct inode *inode,
1590 handle_t *handle,
1591 u32 insert_cpos,
1592 struct ocfs2_path *right_path,
1593 struct ocfs2_path **ret_left_path)
1594{
1595 int ret, start;
1596 u32 cpos;
1597 struct ocfs2_path *left_path = NULL;
1598
1599 *ret_left_path = NULL;
1600
1601 left_path = ocfs2_new_path(path_root_bh(right_path),
1602 path_root_el(right_path));
1603 if (!left_path) {
1604 ret = -ENOMEM;
1605 mlog_errno(ret);
1606 goto out;
1607 }
1608
1609 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, right_path, &cpos);
1610 if (ret) {
1611 mlog_errno(ret);
1612 goto out;
1613 }
1614
1615 mlog(0, "Insert: %u, first left path cpos: %u\n", insert_cpos, cpos);
1616
1617 /*
1618 * What we want to do here is:
1619 *
1620 * 1) Start with the rightmost path.
1621 *
1622 * 2) Determine a path to the leaf block directly to the left
1623 * of that leaf.
1624 *
1625 * 3) Determine the 'subtree root' - the lowest level tree node
1626 * which contains a path to both leaves.
1627 *
1628 * 4) Rotate the subtree.
1629 *
1630 * 5) Find the next subtree by considering the left path to be
1631 * the new right path.
1632 *
1633 * The check at the top of this while loop also accepts
1634 * insert_cpos == cpos because cpos is only a _theoretical_
1635 * value to get us the left path - insert_cpos might very well
1636 * be filling that hole.
1637 *
1638 * Stop at a cpos of '0' because we either started at the
1639 * leftmost branch (i.e., a tree with one branch and a
1640 * rotation inside of it), or we've gone as far as we can in
1641 * rotating subtrees.
1642 */
1643 while (cpos && insert_cpos <= cpos) {
1644 mlog(0, "Rotating a tree: ins. cpos: %u, left path cpos: %u\n",
1645 insert_cpos, cpos);
1646
1647 ret = ocfs2_find_path(inode, left_path, cpos);
1648 if (ret) {
1649 mlog_errno(ret);
1650 goto out;
1651 }
1652
1653 mlog_bug_on_msg(path_leaf_bh(left_path) ==
1654 path_leaf_bh(right_path),
1655 "Inode %lu: error during insert of %u "
1656 "(left path cpos %u) results in two identical "
1657 "paths ending at %llu\n",
1658 inode->i_ino, insert_cpos, cpos,
1659 (unsigned long long)
1660 path_leaf_bh(left_path)->b_blocknr);
1661
1662 if (ocfs2_rotate_requires_path_adjustment(left_path,
1663 insert_cpos)) {
1664 mlog(0, "Path adjustment required\n");
1665
1666 /*
1667 * We've rotated the tree as much as we
1668 * should. The rest is up to
1669 * ocfs2_insert_path() to complete, after the
1670 * record insertion. We indicate this
1671 * situation by returning the left path.
1672 *
1673 * The reason we don't adjust the records here
1674 * before the record insert is that an error
1675 * later might break the rule where a parent
1676 * record e_cpos will reflect the actual
1677 * e_cpos of the 1st nonempty record of the
1678 * child list.
1679 */
1680 *ret_left_path = left_path;
1681 goto out_ret_path;
1682 }
1683
1684 start = ocfs2_find_subtree_root(inode, left_path, right_path);
1685
1686 mlog(0, "Subtree root at index %d (blk %llu, depth %d)\n",
1687 start,
1688 (unsigned long long) right_path->p_node[start].bh->b_blocknr,
1689 right_path->p_tree_depth);
1690
1691 ret = ocfs2_extend_rotate_transaction(handle, start,
1692 right_path);
1693 if (ret) {
1694 mlog_errno(ret);
1695 goto out;
1696 }
1697
1698 ret = ocfs2_rotate_subtree_right(inode, handle, left_path,
1699 right_path, start);
1700 if (ret) {
1701 mlog_errno(ret);
1702 goto out;
1703 }
1704
1705 /*
1706 * There is no need to re-read the next right path
1707 * as we know that it'll be our current left
1708 * path. Optimize by copying values instead.
1709 */
1710 ocfs2_mv_path(right_path, left_path);
1711
1712 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, right_path,
1713 &cpos);
1714 if (ret) {
1715 mlog_errno(ret);
1716 goto out;
1717 }
1718 }
1719
1720out:
1721 ocfs2_free_path(left_path);
1722
1723out_ret_path:
1724 return ret;
1725}
1726
1727/*
1728 * Do the final bits of extent record insertion at the target leaf
1729 * list. If this leaf is part of an allocation tree, it is assumed
1730 * that the tree above has been prepared.
1731 */
1732static void ocfs2_insert_at_leaf(struct ocfs2_extent_rec *insert_rec,
1733 struct ocfs2_extent_list *el,
1734 struct ocfs2_insert_type *insert,
1735 struct inode *inode)
1736{
1737 int i = insert->ins_contig_index;
1738 unsigned int range;
1739 struct ocfs2_extent_rec *rec;
1740
Mark Fashehe48edee2007-03-07 16:46:57 -08001741 BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
Mark Fashehdcd05382007-01-16 11:32:23 -08001742
1743 /*
1744 * Contiguous insert - either left or right.
1745 */
1746 if (insert->ins_contig != CONTIG_NONE) {
1747 rec = &el->l_recs[i];
1748 if (insert->ins_contig == CONTIG_LEFT) {
1749 rec->e_blkno = insert_rec->e_blkno;
1750 rec->e_cpos = insert_rec->e_cpos;
1751 }
Mark Fashehe48edee2007-03-07 16:46:57 -08001752 le16_add_cpu(&rec->e_leaf_clusters,
1753 le16_to_cpu(insert_rec->e_leaf_clusters));
Mark Fashehdcd05382007-01-16 11:32:23 -08001754 return;
1755 }
1756
1757 /*
1758 * Handle insert into an empty leaf.
1759 */
1760 if (le16_to_cpu(el->l_next_free_rec) == 0 ||
1761 ((le16_to_cpu(el->l_next_free_rec) == 1) &&
1762 ocfs2_is_empty_extent(&el->l_recs[0]))) {
1763 el->l_recs[0] = *insert_rec;
1764 el->l_next_free_rec = cpu_to_le16(1);
1765 return;
1766 }
1767
1768 /*
1769 * Appending insert.
1770 */
1771 if (insert->ins_appending == APPEND_TAIL) {
1772 i = le16_to_cpu(el->l_next_free_rec) - 1;
1773 rec = &el->l_recs[i];
Mark Fashehe48edee2007-03-07 16:46:57 -08001774 range = le32_to_cpu(rec->e_cpos)
1775 + le16_to_cpu(rec->e_leaf_clusters);
Mark Fashehdcd05382007-01-16 11:32:23 -08001776 BUG_ON(le32_to_cpu(insert_rec->e_cpos) < range);
1777
1778 mlog_bug_on_msg(le16_to_cpu(el->l_next_free_rec) >=
1779 le16_to_cpu(el->l_count),
1780 "inode %lu, depth %u, count %u, next free %u, "
1781 "rec.cpos %u, rec.clusters %u, "
1782 "insert.cpos %u, insert.clusters %u\n",
1783 inode->i_ino,
1784 le16_to_cpu(el->l_tree_depth),
1785 le16_to_cpu(el->l_count),
1786 le16_to_cpu(el->l_next_free_rec),
1787 le32_to_cpu(el->l_recs[i].e_cpos),
Mark Fashehe48edee2007-03-07 16:46:57 -08001788 le16_to_cpu(el->l_recs[i].e_leaf_clusters),
Mark Fashehdcd05382007-01-16 11:32:23 -08001789 le32_to_cpu(insert_rec->e_cpos),
Mark Fashehe48edee2007-03-07 16:46:57 -08001790 le16_to_cpu(insert_rec->e_leaf_clusters));
Mark Fashehdcd05382007-01-16 11:32:23 -08001791 i++;
1792 el->l_recs[i] = *insert_rec;
1793 le16_add_cpu(&el->l_next_free_rec, 1);
1794 return;
1795 }
1796
1797 /*
1798 * Ok, we have to rotate.
1799 *
1800 * At this point, it is safe to assume that inserting into an
1801 * empty leaf and appending to a leaf have both been handled
1802 * above.
1803 *
1804 * This leaf needs to have space, either by the empty 1st
1805 * extent record, or by virtue of an l_next_rec < l_count.
1806 */
1807 ocfs2_rotate_leaf(el, insert_rec);
1808}
1809
1810static inline void ocfs2_update_dinode_clusters(struct inode *inode,
1811 struct ocfs2_dinode *di,
1812 u32 clusters)
1813{
1814 le32_add_cpu(&di->i_clusters, clusters);
1815 spin_lock(&OCFS2_I(inode)->ip_lock);
1816 OCFS2_I(inode)->ip_clusters = le32_to_cpu(di->i_clusters);
1817 spin_unlock(&OCFS2_I(inode)->ip_lock);
1818}
1819
1820static int ocfs2_append_rec_to_path(struct inode *inode, handle_t *handle,
1821 struct ocfs2_extent_rec *insert_rec,
1822 struct ocfs2_path *right_path,
1823 struct ocfs2_path **ret_left_path)
1824{
1825 int ret, i, next_free;
1826 struct buffer_head *bh;
1827 struct ocfs2_extent_list *el;
1828 struct ocfs2_path *left_path = NULL;
1829
1830 *ret_left_path = NULL;
1831
1832 /*
Mark Fashehe48edee2007-03-07 16:46:57 -08001833 * This shouldn't happen for non-trees. The extent rec cluster
1834 * count manipulation below only works for interior nodes.
1835 */
1836 BUG_ON(right_path->p_tree_depth == 0);
1837
1838 /*
Mark Fashehdcd05382007-01-16 11:32:23 -08001839 * If our appending insert is at the leftmost edge of a leaf,
1840 * then we might need to update the rightmost records of the
1841 * neighboring path.
1842 */
1843 el = path_leaf_el(right_path);
1844 next_free = le16_to_cpu(el->l_next_free_rec);
1845 if (next_free == 0 ||
1846 (next_free == 1 && ocfs2_is_empty_extent(&el->l_recs[0]))) {
1847 u32 left_cpos;
1848
1849 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, right_path,
1850 &left_cpos);
1851 if (ret) {
1852 mlog_errno(ret);
1853 goto out;
1854 }
1855
1856 mlog(0, "Append may need a left path update. cpos: %u, "
1857 "left_cpos: %u\n", le32_to_cpu(insert_rec->e_cpos),
1858 left_cpos);
1859
1860 /*
1861 * No need to worry if the append is already in the
1862 * leftmost leaf.
1863 */
1864 if (left_cpos) {
1865 left_path = ocfs2_new_path(path_root_bh(right_path),
1866 path_root_el(right_path));
1867 if (!left_path) {
1868 ret = -ENOMEM;
1869 mlog_errno(ret);
1870 goto out;
1871 }
1872
1873 ret = ocfs2_find_path(inode, left_path, left_cpos);
1874 if (ret) {
1875 mlog_errno(ret);
1876 goto out;
1877 }
1878
1879 /*
1880 * ocfs2_insert_path() will pass the left_path to the
1881 * journal for us.
1882 */
1883 }
1884 }
1885
1886 ret = ocfs2_journal_access_path(inode, handle, right_path);
1887 if (ret) {
1888 mlog_errno(ret);
1889 goto out;
1890 }
1891
1892 el = path_root_el(right_path);
1893 bh = path_root_bh(right_path);
1894 i = 0;
1895 while (1) {
Mark Fashehe48edee2007-03-07 16:46:57 -08001896 struct ocfs2_extent_rec *rec;
1897
Mark Fashehdcd05382007-01-16 11:32:23 -08001898 next_free = le16_to_cpu(el->l_next_free_rec);
1899 if (next_free == 0) {
1900 ocfs2_error(inode->i_sb,
1901 "Dinode %llu has a bad extent list",
1902 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1903 ret = -EIO;
1904 goto out;
1905 }
1906
Mark Fashehe48edee2007-03-07 16:46:57 -08001907 rec = &el->l_recs[next_free - 1];
1908
1909 rec->e_int_clusters = insert_rec->e_cpos;
1910 le32_add_cpu(&rec->e_int_clusters,
1911 le16_to_cpu(insert_rec->e_leaf_clusters));
1912 le32_add_cpu(&rec->e_int_clusters,
1913 -le32_to_cpu(rec->e_cpos));
Mark Fashehdcd05382007-01-16 11:32:23 -08001914
1915 ret = ocfs2_journal_dirty(handle, bh);
1916 if (ret)
1917 mlog_errno(ret);
1918
Mark Fashehe48edee2007-03-07 16:46:57 -08001919 /* Don't touch the leaf node */
Mark Fashehdcd05382007-01-16 11:32:23 -08001920 if (++i >= right_path->p_tree_depth)
1921 break;
1922
1923 bh = right_path->p_node[i].bh;
1924 el = right_path->p_node[i].el;
1925 }
1926
1927 *ret_left_path = left_path;
1928 ret = 0;
1929out:
1930 if (ret != 0)
1931 ocfs2_free_path(left_path);
1932
1933 return ret;
1934}
1935
1936/*
1937 * This function only does inserts on an allocation b-tree. For dinode
1938 * lists, ocfs2_insert_at_leaf() is called directly.
1939 *
1940 * right_path is the path we want to do the actual insert
1941 * in. left_path should only be passed in if we need to update that
1942 * portion of the tree after an edge insert.
1943 */
1944static int ocfs2_insert_path(struct inode *inode,
1945 handle_t *handle,
1946 struct ocfs2_path *left_path,
1947 struct ocfs2_path *right_path,
1948 struct ocfs2_extent_rec *insert_rec,
1949 struct ocfs2_insert_type *insert)
1950{
1951 int ret, subtree_index;
1952 struct buffer_head *leaf_bh = path_leaf_bh(right_path);
1953 struct ocfs2_extent_list *el;
1954
1955 /*
1956 * Pass both paths to the journal. The majority of inserts
1957 * will be touching all components anyway.
1958 */
1959 ret = ocfs2_journal_access_path(inode, handle, right_path);
1960 if (ret < 0) {
1961 mlog_errno(ret);
1962 goto out;
1963 }
1964
1965 if (left_path) {
1966 int credits = handle->h_buffer_credits;
1967
1968 /*
1969 * There's a chance that left_path got passed back to
1970 * us without being accounted for in the
1971 * journal. Extend our transaction here to be sure we
1972 * can change those blocks.
1973 */
1974 credits += left_path->p_tree_depth;
1975
1976 ret = ocfs2_extend_trans(handle, credits);
1977 if (ret < 0) {
1978 mlog_errno(ret);
1979 goto out;
1980 }
1981
1982 ret = ocfs2_journal_access_path(inode, handle, left_path);
1983 if (ret < 0) {
1984 mlog_errno(ret);
1985 goto out;
1986 }
1987 }
1988
1989 el = path_leaf_el(right_path);
1990
1991 ocfs2_insert_at_leaf(insert_rec, el, insert, inode);
1992 ret = ocfs2_journal_dirty(handle, leaf_bh);
1993 if (ret)
1994 mlog_errno(ret);
1995
1996 if (left_path) {
1997 /*
1998 * The rotate code has indicated that we need to fix
1999 * up portions of the tree after the insert.
2000 *
2001 * XXX: Should we extend the transaction here?
2002 */
2003 subtree_index = ocfs2_find_subtree_root(inode, left_path,
2004 right_path);
2005 ocfs2_complete_edge_insert(inode, handle, left_path,
2006 right_path, subtree_index);
2007 }
2008
2009 ret = 0;
2010out:
2011 return ret;
2012}
2013
2014static int ocfs2_do_insert_extent(struct inode *inode,
2015 handle_t *handle,
2016 struct buffer_head *di_bh,
2017 struct ocfs2_extent_rec *insert_rec,
2018 struct ocfs2_insert_type *type)
2019{
2020 int ret, rotate = 0;
2021 u32 cpos;
2022 struct ocfs2_path *right_path = NULL;
2023 struct ocfs2_path *left_path = NULL;
2024 struct ocfs2_dinode *di;
2025 struct ocfs2_extent_list *el;
2026
2027 di = (struct ocfs2_dinode *) di_bh->b_data;
2028 el = &di->id2.i_list;
2029
2030 ret = ocfs2_journal_access(handle, inode, di_bh,
2031 OCFS2_JOURNAL_ACCESS_WRITE);
2032 if (ret) {
2033 mlog_errno(ret);
2034 goto out;
2035 }
2036
2037 if (le16_to_cpu(el->l_tree_depth) == 0) {
2038 ocfs2_insert_at_leaf(insert_rec, el, type, inode);
2039 goto out_update_clusters;
2040 }
2041
2042 right_path = ocfs2_new_inode_path(di_bh);
2043 if (!right_path) {
2044 ret = -ENOMEM;
2045 mlog_errno(ret);
2046 goto out;
2047 }
2048
2049 /*
2050 * Determine the path to start with. Rotations need the
2051 * rightmost path, everything else can go directly to the
2052 * target leaf.
2053 */
2054 cpos = le32_to_cpu(insert_rec->e_cpos);
2055 if (type->ins_appending == APPEND_NONE &&
2056 type->ins_contig == CONTIG_NONE) {
2057 rotate = 1;
2058 cpos = UINT_MAX;
2059 }
2060
2061 ret = ocfs2_find_path(inode, right_path, cpos);
2062 if (ret) {
2063 mlog_errno(ret);
2064 goto out;
2065 }
2066
2067 /*
2068 * Rotations and appends need special treatment - they modify
2069 * parts of the tree's above them.
2070 *
2071 * Both might pass back a path immediate to the left of the
2072 * one being inserted to. This will be cause
2073 * ocfs2_insert_path() to modify the rightmost records of
2074 * left_path to account for an edge insert.
2075 *
2076 * XXX: When modifying this code, keep in mind that an insert
2077 * can wind up skipping both of these two special cases...
2078 */
2079 if (rotate) {
2080 ret = ocfs2_rotate_tree_right(inode, handle,
2081 le32_to_cpu(insert_rec->e_cpos),
2082 right_path, &left_path);
2083 if (ret) {
2084 mlog_errno(ret);
2085 goto out;
2086 }
2087 } else if (type->ins_appending == APPEND_TAIL
2088 && type->ins_contig != CONTIG_LEFT) {
2089 ret = ocfs2_append_rec_to_path(inode, handle, insert_rec,
2090 right_path, &left_path);
2091 if (ret) {
2092 mlog_errno(ret);
2093 goto out;
2094 }
2095 }
2096
2097 ret = ocfs2_insert_path(inode, handle, left_path, right_path,
2098 insert_rec, type);
2099 if (ret) {
2100 mlog_errno(ret);
2101 goto out;
2102 }
2103
2104out_update_clusters:
2105 ocfs2_update_dinode_clusters(inode, di,
Mark Fashehe48edee2007-03-07 16:46:57 -08002106 le16_to_cpu(insert_rec->e_leaf_clusters));
Mark Fashehdcd05382007-01-16 11:32:23 -08002107
2108 ret = ocfs2_journal_dirty(handle, di_bh);
2109 if (ret)
2110 mlog_errno(ret);
2111
2112out:
2113 ocfs2_free_path(left_path);
2114 ocfs2_free_path(right_path);
2115
2116 return ret;
2117}
2118
2119static void ocfs2_figure_contig_type(struct inode *inode,
2120 struct ocfs2_insert_type *insert,
2121 struct ocfs2_extent_list *el,
2122 struct ocfs2_extent_rec *insert_rec)
2123{
2124 int i;
2125 enum ocfs2_contig_type contig_type = CONTIG_NONE;
2126
Mark Fashehe48edee2007-03-07 16:46:57 -08002127 BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
2128
Mark Fashehdcd05382007-01-16 11:32:23 -08002129 for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
2130 contig_type = ocfs2_extent_contig(inode, &el->l_recs[i],
2131 insert_rec);
2132 if (contig_type != CONTIG_NONE) {
2133 insert->ins_contig_index = i;
2134 break;
2135 }
2136 }
2137 insert->ins_contig = contig_type;
2138}
2139
2140/*
2141 * This should only be called against the righmost leaf extent list.
2142 *
2143 * ocfs2_figure_appending_type() will figure out whether we'll have to
2144 * insert at the tail of the rightmost leaf.
2145 *
2146 * This should also work against the dinode list for tree's with 0
2147 * depth. If we consider the dinode list to be the rightmost leaf node
2148 * then the logic here makes sense.
2149 */
2150static void ocfs2_figure_appending_type(struct ocfs2_insert_type *insert,
2151 struct ocfs2_extent_list *el,
2152 struct ocfs2_extent_rec *insert_rec)
2153{
2154 int i;
2155 u32 cpos = le32_to_cpu(insert_rec->e_cpos);
2156 struct ocfs2_extent_rec *rec;
2157
2158 insert->ins_appending = APPEND_NONE;
2159
Mark Fashehe48edee2007-03-07 16:46:57 -08002160 BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
Mark Fashehdcd05382007-01-16 11:32:23 -08002161
2162 if (!el->l_next_free_rec)
2163 goto set_tail_append;
2164
2165 if (ocfs2_is_empty_extent(&el->l_recs[0])) {
2166 /* Were all records empty? */
2167 if (le16_to_cpu(el->l_next_free_rec) == 1)
2168 goto set_tail_append;
2169 }
2170
2171 i = le16_to_cpu(el->l_next_free_rec) - 1;
2172 rec = &el->l_recs[i];
2173
Mark Fashehe48edee2007-03-07 16:46:57 -08002174 if (cpos >=
2175 (le32_to_cpu(rec->e_cpos) + le16_to_cpu(rec->e_leaf_clusters)))
Mark Fashehdcd05382007-01-16 11:32:23 -08002176 goto set_tail_append;
2177
2178 return;
2179
2180set_tail_append:
2181 insert->ins_appending = APPEND_TAIL;
2182}
2183
2184/*
2185 * Helper function called at the begining of an insert.
2186 *
2187 * This computes a few things that are commonly used in the process of
2188 * inserting into the btree:
2189 * - Whether the new extent is contiguous with an existing one.
2190 * - The current tree depth.
2191 * - Whether the insert is an appending one.
2192 * - The total # of free records in the tree.
2193 *
2194 * All of the information is stored on the ocfs2_insert_type
2195 * structure.
2196 */
2197static int ocfs2_figure_insert_type(struct inode *inode,
2198 struct buffer_head *di_bh,
2199 struct buffer_head **last_eb_bh,
2200 struct ocfs2_extent_rec *insert_rec,
2201 struct ocfs2_insert_type *insert)
2202{
2203 int ret;
2204 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2205 struct ocfs2_extent_block *eb;
2206 struct ocfs2_extent_list *el;
2207 struct ocfs2_path *path = NULL;
2208 struct buffer_head *bh = NULL;
2209
2210 el = &di->id2.i_list;
2211 insert->ins_tree_depth = le16_to_cpu(el->l_tree_depth);
2212
2213 if (el->l_tree_depth) {
2214 /*
2215 * If we have tree depth, we read in the
2216 * rightmost extent block ahead of time as
2217 * ocfs2_figure_insert_type() and ocfs2_add_branch()
2218 * may want it later.
2219 */
2220 ret = ocfs2_read_block(OCFS2_SB(inode->i_sb),
2221 le64_to_cpu(di->i_last_eb_blk), &bh,
2222 OCFS2_BH_CACHED, inode);
2223 if (ret) {
2224 mlog_exit(ret);
2225 goto out;
2226 }
2227 eb = (struct ocfs2_extent_block *) bh->b_data;
2228 el = &eb->h_list;
2229 }
2230
2231 /*
2232 * Unless we have a contiguous insert, we'll need to know if
2233 * there is room left in our allocation tree for another
2234 * extent record.
2235 *
2236 * XXX: This test is simplistic, we can search for empty
2237 * extent records too.
2238 */
2239 insert->ins_free_records = le16_to_cpu(el->l_count) -
2240 le16_to_cpu(el->l_next_free_rec);
2241
2242 if (!insert->ins_tree_depth) {
2243 ocfs2_figure_contig_type(inode, insert, el, insert_rec);
2244 ocfs2_figure_appending_type(insert, el, insert_rec);
2245 return 0;
2246 }
2247
2248 path = ocfs2_new_inode_path(di_bh);
2249 if (!path) {
2250 ret = -ENOMEM;
2251 mlog_errno(ret);
2252 goto out;
2253 }
2254
2255 /*
2256 * In the case that we're inserting past what the tree
2257 * currently accounts for, ocfs2_find_path() will return for
2258 * us the rightmost tree path. This is accounted for below in
2259 * the appending code.
2260 */
2261 ret = ocfs2_find_path(inode, path, le32_to_cpu(insert_rec->e_cpos));
2262 if (ret) {
2263 mlog_errno(ret);
2264 goto out;
2265 }
2266
2267 el = path_leaf_el(path);
2268
2269 /*
2270 * Now that we have the path, there's two things we want to determine:
2271 * 1) Contiguousness (also set contig_index if this is so)
2272 *
2273 * 2) Are we doing an append? We can trivially break this up
2274 * into two types of appends: simple record append, or a
2275 * rotate inside the tail leaf.
2276 */
2277 ocfs2_figure_contig_type(inode, insert, el, insert_rec);
2278
2279 /*
2280 * The insert code isn't quite ready to deal with all cases of
2281 * left contiguousness. Specifically, if it's an insert into
2282 * the 1st record in a leaf, it will require the adjustment of
Mark Fashehe48edee2007-03-07 16:46:57 -08002283 * cluster count on the last record of the path directly to it's
Mark Fashehdcd05382007-01-16 11:32:23 -08002284 * left. For now, just catch that case and fool the layers
2285 * above us. This works just fine for tree_depth == 0, which
2286 * is why we allow that above.
2287 */
2288 if (insert->ins_contig == CONTIG_LEFT &&
2289 insert->ins_contig_index == 0)
2290 insert->ins_contig = CONTIG_NONE;
2291
2292 /*
2293 * Ok, so we can simply compare against last_eb to figure out
2294 * whether the path doesn't exist. This will only happen in
2295 * the case that we're doing a tail append, so maybe we can
2296 * take advantage of that information somehow.
2297 */
2298 if (le64_to_cpu(di->i_last_eb_blk) == path_leaf_bh(path)->b_blocknr) {
2299 /*
2300 * Ok, ocfs2_find_path() returned us the rightmost
2301 * tree path. This might be an appending insert. There are
2302 * two cases:
2303 * 1) We're doing a true append at the tail:
2304 * -This might even be off the end of the leaf
2305 * 2) We're "appending" by rotating in the tail
2306 */
2307 ocfs2_figure_appending_type(insert, el, insert_rec);
2308 }
2309
2310out:
2311 ocfs2_free_path(path);
2312
2313 if (ret == 0)
2314 *last_eb_bh = bh;
2315 else
2316 brelse(bh);
2317 return ret;
2318}
2319
2320/*
2321 * Insert an extent into an inode btree.
2322 *
2323 * The caller needs to update fe->i_clusters
2324 */
Mark Fashehccd979b2005-12-15 14:31:24 -08002325int ocfs2_insert_extent(struct ocfs2_super *osb,
Mark Fasheh1fabe142006-10-09 18:11:45 -07002326 handle_t *handle,
Mark Fashehccd979b2005-12-15 14:31:24 -08002327 struct inode *inode,
2328 struct buffer_head *fe_bh,
Mark Fashehdcd05382007-01-16 11:32:23 -08002329 u32 cpos,
Mark Fashehccd979b2005-12-15 14:31:24 -08002330 u64 start_blk,
2331 u32 new_clusters,
2332 struct ocfs2_alloc_context *meta_ac)
2333{
Mark Fashehdcd05382007-01-16 11:32:23 -08002334 int status, shift;
Mark Fashehccd979b2005-12-15 14:31:24 -08002335 struct buffer_head *last_eb_bh = NULL;
2336 struct buffer_head *bh = NULL;
Mark Fashehdcd05382007-01-16 11:32:23 -08002337 struct ocfs2_insert_type insert = {0, };
2338 struct ocfs2_extent_rec rec;
Mark Fashehccd979b2005-12-15 14:31:24 -08002339
Mark Fashehdcd05382007-01-16 11:32:23 -08002340 mlog(0, "add %u clusters at position %u to inode %llu\n",
2341 new_clusters, cpos, (unsigned long long)OCFS2_I(inode)->ip_blkno);
Mark Fashehccd979b2005-12-15 14:31:24 -08002342
Mark Fashehdcd05382007-01-16 11:32:23 -08002343 mlog_bug_on_msg(!ocfs2_sparse_alloc(osb) &&
2344 (OCFS2_I(inode)->ip_clusters != cpos),
2345 "Device %s, asking for sparse allocation: inode %llu, "
2346 "cpos %u, clusters %u\n",
2347 osb->dev_str,
2348 (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos,
2349 OCFS2_I(inode)->ip_clusters);
Mark Fashehccd979b2005-12-15 14:31:24 -08002350
Mark Fashehe48edee2007-03-07 16:46:57 -08002351 memset(&rec, 0, sizeof(rec));
Mark Fashehdcd05382007-01-16 11:32:23 -08002352 rec.e_cpos = cpu_to_le32(cpos);
2353 rec.e_blkno = cpu_to_le64(start_blk);
Mark Fashehe48edee2007-03-07 16:46:57 -08002354 rec.e_leaf_clusters = cpu_to_le16(new_clusters);
Mark Fashehccd979b2005-12-15 14:31:24 -08002355
Mark Fashehdcd05382007-01-16 11:32:23 -08002356 status = ocfs2_figure_insert_type(inode, fe_bh, &last_eb_bh, &rec,
2357 &insert);
2358 if (status < 0) {
2359 mlog_errno(status);
2360 goto bail;
Mark Fashehccd979b2005-12-15 14:31:24 -08002361 }
2362
Mark Fashehdcd05382007-01-16 11:32:23 -08002363 mlog(0, "Insert.appending: %u, Insert.Contig: %u, "
2364 "Insert.contig_index: %d, Insert.free_records: %d, "
2365 "Insert.tree_depth: %d\n",
2366 insert.ins_appending, insert.ins_contig, insert.ins_contig_index,
2367 insert.ins_free_records, insert.ins_tree_depth);
Mark Fashehccd979b2005-12-15 14:31:24 -08002368
Mark Fashehdcd05382007-01-16 11:32:23 -08002369 /*
2370 * Avoid growing the tree unless we're out of records and the
2371 * insert type requres one.
2372 */
2373 if (insert.ins_contig != CONTIG_NONE || insert.ins_free_records)
2374 goto out_add;
Mark Fashehccd979b2005-12-15 14:31:24 -08002375
2376 shift = ocfs2_find_branch_target(osb, inode, fe_bh, &bh);
2377 if (shift < 0) {
2378 status = shift;
2379 mlog_errno(status);
2380 goto bail;
2381 }
2382
2383 /* We traveled all the way to the bottom of the allocation tree
2384 * and didn't find room for any more extents - we need to add
2385 * another tree level */
2386 if (shift) {
Mark Fashehccd979b2005-12-15 14:31:24 -08002387 BUG_ON(bh);
Mark Fashehdcd05382007-01-16 11:32:23 -08002388 mlog(0, "need to shift tree depth "
2389 "(current = %d)\n", insert.ins_tree_depth);
Mark Fashehccd979b2005-12-15 14:31:24 -08002390
2391 /* ocfs2_shift_tree_depth will return us a buffer with
2392 * the new extent block (so we can pass that to
2393 * ocfs2_add_branch). */
2394 status = ocfs2_shift_tree_depth(osb, handle, inode, fe_bh,
2395 meta_ac, &bh);
2396 if (status < 0) {
2397 mlog_errno(status);
2398 goto bail;
2399 }
Mark Fashehdcd05382007-01-16 11:32:23 -08002400 insert.ins_tree_depth++;
Mark Fashehccd979b2005-12-15 14:31:24 -08002401 /* Special case: we have room now if we shifted from
2402 * tree_depth 0 */
Mark Fashehdcd05382007-01-16 11:32:23 -08002403 if (insert.ins_tree_depth == 1)
Mark Fashehccd979b2005-12-15 14:31:24 -08002404 goto out_add;
2405 }
2406
2407 /* call ocfs2_add_branch to add the final part of the tree with
2408 * the new data. */
Mark Fashehdcd05382007-01-16 11:32:23 -08002409 mlog(0, "add branch. bh = %p\n", bh);
Mark Fashehccd979b2005-12-15 14:31:24 -08002410 status = ocfs2_add_branch(osb, handle, inode, fe_bh, bh, last_eb_bh,
2411 meta_ac);
2412 if (status < 0) {
2413 mlog_errno(status);
2414 goto bail;
2415 }
2416
2417out_add:
Mark Fashehdcd05382007-01-16 11:32:23 -08002418 /* Finally, we can add clusters. This might rotate the tree for us. */
2419 status = ocfs2_do_insert_extent(inode, handle, fe_bh, &rec, &insert);
Mark Fashehccd979b2005-12-15 14:31:24 -08002420 if (status < 0)
2421 mlog_errno(status);
Mark Fasheh83418972007-04-23 18:53:12 -07002422 else
2423 ocfs2_extent_map_insert_rec(inode, &rec);
Mark Fashehccd979b2005-12-15 14:31:24 -08002424
2425bail:
2426 if (bh)
2427 brelse(bh);
2428
2429 if (last_eb_bh)
2430 brelse(last_eb_bh);
2431
2432 mlog_exit(status);
2433 return status;
2434}
2435
2436static inline int ocfs2_truncate_log_needs_flush(struct ocfs2_super *osb)
2437{
2438 struct buffer_head *tl_bh = osb->osb_tl_bh;
2439 struct ocfs2_dinode *di;
2440 struct ocfs2_truncate_log *tl;
2441
2442 di = (struct ocfs2_dinode *) tl_bh->b_data;
2443 tl = &di->id2.i_dealloc;
2444
2445 mlog_bug_on_msg(le16_to_cpu(tl->tl_used) > le16_to_cpu(tl->tl_count),
2446 "slot %d, invalid truncate log parameters: used = "
2447 "%u, count = %u\n", osb->slot_num,
2448 le16_to_cpu(tl->tl_used), le16_to_cpu(tl->tl_count));
2449 return le16_to_cpu(tl->tl_used) == le16_to_cpu(tl->tl_count);
2450}
2451
2452static int ocfs2_truncate_log_can_coalesce(struct ocfs2_truncate_log *tl,
2453 unsigned int new_start)
2454{
2455 unsigned int tail_index;
2456 unsigned int current_tail;
2457
2458 /* No records, nothing to coalesce */
2459 if (!le16_to_cpu(tl->tl_used))
2460 return 0;
2461
2462 tail_index = le16_to_cpu(tl->tl_used) - 1;
2463 current_tail = le32_to_cpu(tl->tl_recs[tail_index].t_start);
2464 current_tail += le32_to_cpu(tl->tl_recs[tail_index].t_clusters);
2465
2466 return current_tail == new_start;
2467}
2468
2469static int ocfs2_truncate_log_append(struct ocfs2_super *osb,
Mark Fasheh1fabe142006-10-09 18:11:45 -07002470 handle_t *handle,
Mark Fashehccd979b2005-12-15 14:31:24 -08002471 u64 start_blk,
2472 unsigned int num_clusters)
2473{
2474 int status, index;
2475 unsigned int start_cluster, tl_count;
2476 struct inode *tl_inode = osb->osb_tl_inode;
2477 struct buffer_head *tl_bh = osb->osb_tl_bh;
2478 struct ocfs2_dinode *di;
2479 struct ocfs2_truncate_log *tl;
2480
Mark Fashehb06970532006-03-03 10:24:33 -08002481 mlog_entry("start_blk = %llu, num_clusters = %u\n",
2482 (unsigned long long)start_blk, num_clusters);
Mark Fashehccd979b2005-12-15 14:31:24 -08002483
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08002484 BUG_ON(mutex_trylock(&tl_inode->i_mutex));
Mark Fashehccd979b2005-12-15 14:31:24 -08002485
2486 start_cluster = ocfs2_blocks_to_clusters(osb->sb, start_blk);
2487
2488 di = (struct ocfs2_dinode *) tl_bh->b_data;
2489 tl = &di->id2.i_dealloc;
2490 if (!OCFS2_IS_VALID_DINODE(di)) {
2491 OCFS2_RO_ON_INVALID_DINODE(osb->sb, di);
2492 status = -EIO;
2493 goto bail;
2494 }
2495
2496 tl_count = le16_to_cpu(tl->tl_count);
2497 mlog_bug_on_msg(tl_count > ocfs2_truncate_recs_per_inode(osb->sb) ||
2498 tl_count == 0,
Mark Fashehb06970532006-03-03 10:24:33 -08002499 "Truncate record count on #%llu invalid "
2500 "wanted %u, actual %u\n",
2501 (unsigned long long)OCFS2_I(tl_inode)->ip_blkno,
Mark Fashehccd979b2005-12-15 14:31:24 -08002502 ocfs2_truncate_recs_per_inode(osb->sb),
2503 le16_to_cpu(tl->tl_count));
2504
2505 /* Caller should have known to flush before calling us. */
2506 index = le16_to_cpu(tl->tl_used);
2507 if (index >= tl_count) {
2508 status = -ENOSPC;
2509 mlog_errno(status);
2510 goto bail;
2511 }
2512
2513 status = ocfs2_journal_access(handle, tl_inode, tl_bh,
2514 OCFS2_JOURNAL_ACCESS_WRITE);
2515 if (status < 0) {
2516 mlog_errno(status);
2517 goto bail;
2518 }
2519
2520 mlog(0, "Log truncate of %u clusters starting at cluster %u to "
Mark Fashehb06970532006-03-03 10:24:33 -08002521 "%llu (index = %d)\n", num_clusters, start_cluster,
2522 (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, index);
Mark Fashehccd979b2005-12-15 14:31:24 -08002523
2524 if (ocfs2_truncate_log_can_coalesce(tl, start_cluster)) {
2525 /*
2526 * Move index back to the record we are coalescing with.
2527 * ocfs2_truncate_log_can_coalesce() guarantees nonzero
2528 */
2529 index--;
2530
2531 num_clusters += le32_to_cpu(tl->tl_recs[index].t_clusters);
2532 mlog(0, "Coalesce with index %u (start = %u, clusters = %u)\n",
2533 index, le32_to_cpu(tl->tl_recs[index].t_start),
2534 num_clusters);
2535 } else {
2536 tl->tl_recs[index].t_start = cpu_to_le32(start_cluster);
2537 tl->tl_used = cpu_to_le16(index + 1);
2538 }
2539 tl->tl_recs[index].t_clusters = cpu_to_le32(num_clusters);
2540
2541 status = ocfs2_journal_dirty(handle, tl_bh);
2542 if (status < 0) {
2543 mlog_errno(status);
2544 goto bail;
2545 }
2546
2547bail:
2548 mlog_exit(status);
2549 return status;
2550}
2551
2552static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
Mark Fasheh1fabe142006-10-09 18:11:45 -07002553 handle_t *handle,
Mark Fashehccd979b2005-12-15 14:31:24 -08002554 struct inode *data_alloc_inode,
2555 struct buffer_head *data_alloc_bh)
2556{
2557 int status = 0;
2558 int i;
2559 unsigned int num_clusters;
2560 u64 start_blk;
2561 struct ocfs2_truncate_rec rec;
2562 struct ocfs2_dinode *di;
2563 struct ocfs2_truncate_log *tl;
2564 struct inode *tl_inode = osb->osb_tl_inode;
2565 struct buffer_head *tl_bh = osb->osb_tl_bh;
2566
2567 mlog_entry_void();
2568
2569 di = (struct ocfs2_dinode *) tl_bh->b_data;
2570 tl = &di->id2.i_dealloc;
2571 i = le16_to_cpu(tl->tl_used) - 1;
2572 while (i >= 0) {
2573 /* Caller has given us at least enough credits to
2574 * update the truncate log dinode */
2575 status = ocfs2_journal_access(handle, tl_inode, tl_bh,
2576 OCFS2_JOURNAL_ACCESS_WRITE);
2577 if (status < 0) {
2578 mlog_errno(status);
2579 goto bail;
2580 }
2581
2582 tl->tl_used = cpu_to_le16(i);
2583
2584 status = ocfs2_journal_dirty(handle, tl_bh);
2585 if (status < 0) {
2586 mlog_errno(status);
2587 goto bail;
2588 }
2589
2590 /* TODO: Perhaps we can calculate the bulk of the
2591 * credits up front rather than extending like
2592 * this. */
2593 status = ocfs2_extend_trans(handle,
2594 OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC);
2595 if (status < 0) {
2596 mlog_errno(status);
2597 goto bail;
2598 }
2599
2600 rec = tl->tl_recs[i];
2601 start_blk = ocfs2_clusters_to_blocks(data_alloc_inode->i_sb,
2602 le32_to_cpu(rec.t_start));
2603 num_clusters = le32_to_cpu(rec.t_clusters);
2604
2605 /* if start_blk is not set, we ignore the record as
2606 * invalid. */
2607 if (start_blk) {
2608 mlog(0, "free record %d, start = %u, clusters = %u\n",
2609 i, le32_to_cpu(rec.t_start), num_clusters);
2610
2611 status = ocfs2_free_clusters(handle, data_alloc_inode,
2612 data_alloc_bh, start_blk,
2613 num_clusters);
2614 if (status < 0) {
2615 mlog_errno(status);
2616 goto bail;
2617 }
2618 }
2619 i--;
2620 }
2621
2622bail:
2623 mlog_exit(status);
2624 return status;
2625}
2626
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08002627/* Expects you to already be holding tl_inode->i_mutex */
Mark Fashehccd979b2005-12-15 14:31:24 -08002628static int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
2629{
2630 int status;
2631 unsigned int num_to_flush;
Mark Fasheh1fabe142006-10-09 18:11:45 -07002632 handle_t *handle;
Mark Fashehccd979b2005-12-15 14:31:24 -08002633 struct inode *tl_inode = osb->osb_tl_inode;
2634 struct inode *data_alloc_inode = NULL;
2635 struct buffer_head *tl_bh = osb->osb_tl_bh;
2636 struct buffer_head *data_alloc_bh = NULL;
2637 struct ocfs2_dinode *di;
2638 struct ocfs2_truncate_log *tl;
2639
2640 mlog_entry_void();
2641
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08002642 BUG_ON(mutex_trylock(&tl_inode->i_mutex));
Mark Fashehccd979b2005-12-15 14:31:24 -08002643
2644 di = (struct ocfs2_dinode *) tl_bh->b_data;
2645 tl = &di->id2.i_dealloc;
2646 if (!OCFS2_IS_VALID_DINODE(di)) {
2647 OCFS2_RO_ON_INVALID_DINODE(osb->sb, di);
2648 status = -EIO;
Mark Fashehe08dc8b2006-10-05 15:58:48 -07002649 goto out;
Mark Fashehccd979b2005-12-15 14:31:24 -08002650 }
2651
2652 num_to_flush = le16_to_cpu(tl->tl_used);
Mark Fashehb06970532006-03-03 10:24:33 -08002653 mlog(0, "Flush %u records from truncate log #%llu\n",
2654 num_to_flush, (unsigned long long)OCFS2_I(tl_inode)->ip_blkno);
Mark Fashehccd979b2005-12-15 14:31:24 -08002655 if (!num_to_flush) {
2656 status = 0;
Mark Fashehe08dc8b2006-10-05 15:58:48 -07002657 goto out;
Mark Fashehccd979b2005-12-15 14:31:24 -08002658 }
2659
2660 data_alloc_inode = ocfs2_get_system_file_inode(osb,
2661 GLOBAL_BITMAP_SYSTEM_INODE,
2662 OCFS2_INVALID_SLOT);
2663 if (!data_alloc_inode) {
2664 status = -EINVAL;
2665 mlog(ML_ERROR, "Could not get bitmap inode!\n");
Mark Fashehe08dc8b2006-10-05 15:58:48 -07002666 goto out;
Mark Fashehccd979b2005-12-15 14:31:24 -08002667 }
2668
Mark Fashehe08dc8b2006-10-05 15:58:48 -07002669 mutex_lock(&data_alloc_inode->i_mutex);
2670
Mark Fasheh4bcec182006-10-09 16:02:40 -07002671 status = ocfs2_meta_lock(data_alloc_inode, &data_alloc_bh, 1);
Mark Fashehccd979b2005-12-15 14:31:24 -08002672 if (status < 0) {
2673 mlog_errno(status);
Mark Fashehe08dc8b2006-10-05 15:58:48 -07002674 goto out_mutex;
Mark Fashehccd979b2005-12-15 14:31:24 -08002675 }
2676
Mark Fasheh65eff9c2006-10-09 17:26:22 -07002677 handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE);
Mark Fashehccd979b2005-12-15 14:31:24 -08002678 if (IS_ERR(handle)) {
2679 status = PTR_ERR(handle);
Mark Fashehccd979b2005-12-15 14:31:24 -08002680 mlog_errno(status);
Mark Fashehe08dc8b2006-10-05 15:58:48 -07002681 goto out_unlock;
Mark Fashehccd979b2005-12-15 14:31:24 -08002682 }
2683
2684 status = ocfs2_replay_truncate_records(osb, handle, data_alloc_inode,
2685 data_alloc_bh);
Mark Fashehe08dc8b2006-10-05 15:58:48 -07002686 if (status < 0)
Mark Fashehccd979b2005-12-15 14:31:24 -08002687 mlog_errno(status);
Mark Fashehccd979b2005-12-15 14:31:24 -08002688
Mark Fasheh02dc1af2006-10-09 16:48:10 -07002689 ocfs2_commit_trans(osb, handle);
Mark Fashehccd979b2005-12-15 14:31:24 -08002690
Mark Fashehe08dc8b2006-10-05 15:58:48 -07002691out_unlock:
2692 brelse(data_alloc_bh);
2693 ocfs2_meta_unlock(data_alloc_inode, 1);
Mark Fashehccd979b2005-12-15 14:31:24 -08002694
Mark Fashehe08dc8b2006-10-05 15:58:48 -07002695out_mutex:
2696 mutex_unlock(&data_alloc_inode->i_mutex);
2697 iput(data_alloc_inode);
Mark Fashehccd979b2005-12-15 14:31:24 -08002698
Mark Fashehe08dc8b2006-10-05 15:58:48 -07002699out:
Mark Fashehccd979b2005-12-15 14:31:24 -08002700 mlog_exit(status);
2701 return status;
2702}
2703
2704int ocfs2_flush_truncate_log(struct ocfs2_super *osb)
2705{
2706 int status;
2707 struct inode *tl_inode = osb->osb_tl_inode;
2708
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08002709 mutex_lock(&tl_inode->i_mutex);
Mark Fashehccd979b2005-12-15 14:31:24 -08002710 status = __ocfs2_flush_truncate_log(osb);
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08002711 mutex_unlock(&tl_inode->i_mutex);
Mark Fashehccd979b2005-12-15 14:31:24 -08002712
2713 return status;
2714}
2715
David Howellsc4028952006-11-22 14:57:56 +00002716static void ocfs2_truncate_log_worker(struct work_struct *work)
Mark Fashehccd979b2005-12-15 14:31:24 -08002717{
2718 int status;
David Howellsc4028952006-11-22 14:57:56 +00002719 struct ocfs2_super *osb =
2720 container_of(work, struct ocfs2_super,
2721 osb_truncate_log_wq.work);
Mark Fashehccd979b2005-12-15 14:31:24 -08002722
2723 mlog_entry_void();
2724
2725 status = ocfs2_flush_truncate_log(osb);
2726 if (status < 0)
2727 mlog_errno(status);
2728
2729 mlog_exit(status);
2730}
2731
2732#define OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL (2 * HZ)
2733void ocfs2_schedule_truncate_log_flush(struct ocfs2_super *osb,
2734 int cancel)
2735{
2736 if (osb->osb_tl_inode) {
2737 /* We want to push off log flushes while truncates are
2738 * still running. */
2739 if (cancel)
2740 cancel_delayed_work(&osb->osb_truncate_log_wq);
2741
2742 queue_delayed_work(ocfs2_wq, &osb->osb_truncate_log_wq,
2743 OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL);
2744 }
2745}
2746
2747static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb,
2748 int slot_num,
2749 struct inode **tl_inode,
2750 struct buffer_head **tl_bh)
2751{
2752 int status;
2753 struct inode *inode = NULL;
2754 struct buffer_head *bh = NULL;
2755
2756 inode = ocfs2_get_system_file_inode(osb,
2757 TRUNCATE_LOG_SYSTEM_INODE,
2758 slot_num);
2759 if (!inode) {
2760 status = -EINVAL;
2761 mlog(ML_ERROR, "Could not get load truncate log inode!\n");
2762 goto bail;
2763 }
2764
2765 status = ocfs2_read_block(osb, OCFS2_I(inode)->ip_blkno, &bh,
2766 OCFS2_BH_CACHED, inode);
2767 if (status < 0) {
2768 iput(inode);
2769 mlog_errno(status);
2770 goto bail;
2771 }
2772
2773 *tl_inode = inode;
2774 *tl_bh = bh;
2775bail:
2776 mlog_exit(status);
2777 return status;
2778}
2779
2780/* called during the 1st stage of node recovery. we stamp a clean
2781 * truncate log and pass back a copy for processing later. if the
2782 * truncate log does not require processing, a *tl_copy is set to
2783 * NULL. */
2784int ocfs2_begin_truncate_log_recovery(struct ocfs2_super *osb,
2785 int slot_num,
2786 struct ocfs2_dinode **tl_copy)
2787{
2788 int status;
2789 struct inode *tl_inode = NULL;
2790 struct buffer_head *tl_bh = NULL;
2791 struct ocfs2_dinode *di;
2792 struct ocfs2_truncate_log *tl;
2793
2794 *tl_copy = NULL;
2795
2796 mlog(0, "recover truncate log from slot %d\n", slot_num);
2797
2798 status = ocfs2_get_truncate_log_info(osb, slot_num, &tl_inode, &tl_bh);
2799 if (status < 0) {
2800 mlog_errno(status);
2801 goto bail;
2802 }
2803
2804 di = (struct ocfs2_dinode *) tl_bh->b_data;
2805 tl = &di->id2.i_dealloc;
2806 if (!OCFS2_IS_VALID_DINODE(di)) {
2807 OCFS2_RO_ON_INVALID_DINODE(tl_inode->i_sb, di);
2808 status = -EIO;
2809 goto bail;
2810 }
2811
2812 if (le16_to_cpu(tl->tl_used)) {
2813 mlog(0, "We'll have %u logs to recover\n",
2814 le16_to_cpu(tl->tl_used));
2815
2816 *tl_copy = kmalloc(tl_bh->b_size, GFP_KERNEL);
2817 if (!(*tl_copy)) {
2818 status = -ENOMEM;
2819 mlog_errno(status);
2820 goto bail;
2821 }
2822
2823 /* Assuming the write-out below goes well, this copy
2824 * will be passed back to recovery for processing. */
2825 memcpy(*tl_copy, tl_bh->b_data, tl_bh->b_size);
2826
2827 /* All we need to do to clear the truncate log is set
2828 * tl_used. */
2829 tl->tl_used = 0;
2830
2831 status = ocfs2_write_block(osb, tl_bh, tl_inode);
2832 if (status < 0) {
2833 mlog_errno(status);
2834 goto bail;
2835 }
2836 }
2837
2838bail:
2839 if (tl_inode)
2840 iput(tl_inode);
2841 if (tl_bh)
2842 brelse(tl_bh);
2843
2844 if (status < 0 && (*tl_copy)) {
2845 kfree(*tl_copy);
2846 *tl_copy = NULL;
2847 }
2848
2849 mlog_exit(status);
2850 return status;
2851}
2852
2853int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb,
2854 struct ocfs2_dinode *tl_copy)
2855{
2856 int status = 0;
2857 int i;
2858 unsigned int clusters, num_recs, start_cluster;
2859 u64 start_blk;
Mark Fasheh1fabe142006-10-09 18:11:45 -07002860 handle_t *handle;
Mark Fashehccd979b2005-12-15 14:31:24 -08002861 struct inode *tl_inode = osb->osb_tl_inode;
2862 struct ocfs2_truncate_log *tl;
2863
2864 mlog_entry_void();
2865
2866 if (OCFS2_I(tl_inode)->ip_blkno == le64_to_cpu(tl_copy->i_blkno)) {
2867 mlog(ML_ERROR, "Asked to recover my own truncate log!\n");
2868 return -EINVAL;
2869 }
2870
2871 tl = &tl_copy->id2.i_dealloc;
2872 num_recs = le16_to_cpu(tl->tl_used);
Mark Fashehb06970532006-03-03 10:24:33 -08002873 mlog(0, "cleanup %u records from %llu\n", num_recs,
Mark Fasheh1ca1a112007-04-27 16:01:25 -07002874 (unsigned long long)le64_to_cpu(tl_copy->i_blkno));
Mark Fashehccd979b2005-12-15 14:31:24 -08002875
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08002876 mutex_lock(&tl_inode->i_mutex);
Mark Fashehccd979b2005-12-15 14:31:24 -08002877 for(i = 0; i < num_recs; i++) {
2878 if (ocfs2_truncate_log_needs_flush(osb)) {
2879 status = __ocfs2_flush_truncate_log(osb);
2880 if (status < 0) {
2881 mlog_errno(status);
2882 goto bail_up;
2883 }
2884 }
2885
Mark Fasheh65eff9c2006-10-09 17:26:22 -07002886 handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE);
Mark Fashehccd979b2005-12-15 14:31:24 -08002887 if (IS_ERR(handle)) {
2888 status = PTR_ERR(handle);
2889 mlog_errno(status);
2890 goto bail_up;
2891 }
2892
2893 clusters = le32_to_cpu(tl->tl_recs[i].t_clusters);
2894 start_cluster = le32_to_cpu(tl->tl_recs[i].t_start);
2895 start_blk = ocfs2_clusters_to_blocks(osb->sb, start_cluster);
2896
2897 status = ocfs2_truncate_log_append(osb, handle,
2898 start_blk, clusters);
Mark Fasheh02dc1af2006-10-09 16:48:10 -07002899 ocfs2_commit_trans(osb, handle);
Mark Fashehccd979b2005-12-15 14:31:24 -08002900 if (status < 0) {
2901 mlog_errno(status);
2902 goto bail_up;
2903 }
2904 }
2905
2906bail_up:
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08002907 mutex_unlock(&tl_inode->i_mutex);
Mark Fashehccd979b2005-12-15 14:31:24 -08002908
2909 mlog_exit(status);
2910 return status;
2911}
2912
2913void ocfs2_truncate_log_shutdown(struct ocfs2_super *osb)
2914{
2915 int status;
2916 struct inode *tl_inode = osb->osb_tl_inode;
2917
2918 mlog_entry_void();
2919
2920 if (tl_inode) {
2921 cancel_delayed_work(&osb->osb_truncate_log_wq);
2922 flush_workqueue(ocfs2_wq);
2923
2924 status = ocfs2_flush_truncate_log(osb);
2925 if (status < 0)
2926 mlog_errno(status);
2927
2928 brelse(osb->osb_tl_bh);
2929 iput(osb->osb_tl_inode);
2930 }
2931
2932 mlog_exit_void();
2933}
2934
2935int ocfs2_truncate_log_init(struct ocfs2_super *osb)
2936{
2937 int status;
2938 struct inode *tl_inode = NULL;
2939 struct buffer_head *tl_bh = NULL;
2940
2941 mlog_entry_void();
2942
2943 status = ocfs2_get_truncate_log_info(osb,
2944 osb->slot_num,
2945 &tl_inode,
2946 &tl_bh);
2947 if (status < 0)
2948 mlog_errno(status);
2949
2950 /* ocfs2_truncate_log_shutdown keys on the existence of
2951 * osb->osb_tl_inode so we don't set any of the osb variables
2952 * until we're sure all is well. */
David Howellsc4028952006-11-22 14:57:56 +00002953 INIT_DELAYED_WORK(&osb->osb_truncate_log_wq,
2954 ocfs2_truncate_log_worker);
Mark Fashehccd979b2005-12-15 14:31:24 -08002955 osb->osb_tl_bh = tl_bh;
2956 osb->osb_tl_inode = tl_inode;
2957
2958 mlog_exit(status);
2959 return status;
2960}
2961
Mark Fasheh2b604352007-06-22 15:45:27 -07002962/*
2963 * Delayed de-allocation of suballocator blocks.
2964 *
2965 * Some sets of block de-allocations might involve multiple suballocator inodes.
2966 *
2967 * The locking for this can get extremely complicated, especially when
2968 * the suballocator inodes to delete from aren't known until deep
2969 * within an unrelated codepath.
2970 *
2971 * ocfs2_extent_block structures are a good example of this - an inode
2972 * btree could have been grown by any number of nodes each allocating
2973 * out of their own suballoc inode.
2974 *
2975 * These structures allow the delay of block de-allocation until a
2976 * later time, when locking of multiple cluster inodes won't cause
2977 * deadlock.
2978 */
2979
2980/*
2981 * Describes a single block free from a suballocator
2982 */
2983struct ocfs2_cached_block_free {
2984 struct ocfs2_cached_block_free *free_next;
2985 u64 free_blk;
2986 unsigned int free_bit;
2987};
2988
2989struct ocfs2_per_slot_free_list {
2990 struct ocfs2_per_slot_free_list *f_next_suballocator;
2991 int f_inode_type;
2992 int f_slot;
2993 struct ocfs2_cached_block_free *f_first;
2994};
2995
2996static int ocfs2_free_cached_items(struct ocfs2_super *osb,
2997 int sysfile_type,
2998 int slot,
2999 struct ocfs2_cached_block_free *head)
3000{
3001 int ret;
3002 u64 bg_blkno;
3003 handle_t *handle;
3004 struct inode *inode;
3005 struct buffer_head *di_bh = NULL;
3006 struct ocfs2_cached_block_free *tmp;
3007
3008 inode = ocfs2_get_system_file_inode(osb, sysfile_type, slot);
3009 if (!inode) {
3010 ret = -EINVAL;
3011 mlog_errno(ret);
3012 goto out;
3013 }
3014
3015 mutex_lock(&inode->i_mutex);
3016
3017 ret = ocfs2_meta_lock(inode, &di_bh, 1);
3018 if (ret) {
3019 mlog_errno(ret);
3020 goto out_mutex;
3021 }
3022
3023 handle = ocfs2_start_trans(osb, OCFS2_SUBALLOC_FREE);
3024 if (IS_ERR(handle)) {
3025 ret = PTR_ERR(handle);
3026 mlog_errno(ret);
3027 goto out_unlock;
3028 }
3029
3030 while (head) {
3031 bg_blkno = ocfs2_which_suballoc_group(head->free_blk,
3032 head->free_bit);
3033 mlog(0, "Free bit: (bit %u, blkno %llu)\n",
3034 head->free_bit, (unsigned long long)head->free_blk);
3035
3036 ret = ocfs2_free_suballoc_bits(handle, inode, di_bh,
3037 head->free_bit, bg_blkno, 1);
3038 if (ret) {
3039 mlog_errno(ret);
3040 goto out_journal;
3041 }
3042
3043 ret = ocfs2_extend_trans(handle, OCFS2_SUBALLOC_FREE);
3044 if (ret) {
3045 mlog_errno(ret);
3046 goto out_journal;
3047 }
3048
3049 tmp = head;
3050 head = head->free_next;
3051 kfree(tmp);
3052 }
3053
3054out_journal:
3055 ocfs2_commit_trans(osb, handle);
3056
3057out_unlock:
3058 ocfs2_meta_unlock(inode, 1);
3059 brelse(di_bh);
3060out_mutex:
3061 mutex_unlock(&inode->i_mutex);
3062 iput(inode);
3063out:
3064 while(head) {
3065 /* Premature exit may have left some dangling items. */
3066 tmp = head;
3067 head = head->free_next;
3068 kfree(tmp);
3069 }
3070
3071 return ret;
3072}
3073
3074int ocfs2_run_deallocs(struct ocfs2_super *osb,
3075 struct ocfs2_cached_dealloc_ctxt *ctxt)
3076{
3077 int ret = 0, ret2;
3078 struct ocfs2_per_slot_free_list *fl;
3079
3080 if (!ctxt)
3081 return 0;
3082
3083 while (ctxt->c_first_suballocator) {
3084 fl = ctxt->c_first_suballocator;
3085
3086 if (fl->f_first) {
3087 mlog(0, "Free items: (type %u, slot %d)\n",
3088 fl->f_inode_type, fl->f_slot);
3089 ret2 = ocfs2_free_cached_items(osb, fl->f_inode_type,
3090 fl->f_slot, fl->f_first);
3091 if (ret2)
3092 mlog_errno(ret2);
3093 if (!ret)
3094 ret = ret2;
3095 }
3096
3097 ctxt->c_first_suballocator = fl->f_next_suballocator;
3098 kfree(fl);
3099 }
3100
3101 return ret;
3102}
3103
3104static struct ocfs2_per_slot_free_list *
3105ocfs2_find_per_slot_free_list(int type,
3106 int slot,
3107 struct ocfs2_cached_dealloc_ctxt *ctxt)
3108{
3109 struct ocfs2_per_slot_free_list *fl = ctxt->c_first_suballocator;
3110
3111 while (fl) {
3112 if (fl->f_inode_type == type && fl->f_slot == slot)
3113 return fl;
3114
3115 fl = fl->f_next_suballocator;
3116 }
3117
3118 fl = kmalloc(sizeof(*fl), GFP_NOFS);
3119 if (fl) {
3120 fl->f_inode_type = type;
3121 fl->f_slot = slot;
3122 fl->f_first = NULL;
3123 fl->f_next_suballocator = ctxt->c_first_suballocator;
3124
3125 ctxt->c_first_suballocator = fl;
3126 }
3127 return fl;
3128}
3129
3130static int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
3131 int type, int slot, u64 blkno,
3132 unsigned int bit)
3133{
3134 int ret;
3135 struct ocfs2_per_slot_free_list *fl;
3136 struct ocfs2_cached_block_free *item;
3137
3138 fl = ocfs2_find_per_slot_free_list(type, slot, ctxt);
3139 if (fl == NULL) {
3140 ret = -ENOMEM;
3141 mlog_errno(ret);
3142 goto out;
3143 }
3144
3145 item = kmalloc(sizeof(*item), GFP_NOFS);
3146 if (item == NULL) {
3147 ret = -ENOMEM;
3148 mlog_errno(ret);
3149 goto out;
3150 }
3151
3152 mlog(0, "Insert: (type %d, slot %u, bit %u, blk %llu)\n",
3153 type, slot, bit, (unsigned long long)blkno);
3154
3155 item->free_blk = blkno;
3156 item->free_bit = bit;
3157 item->free_next = fl->f_first;
3158
3159 fl->f_first = item;
3160
3161 ret = 0;
3162out:
3163 return ret;
3164}
3165
Mark Fasheh59a5e412007-06-22 15:52:36 -07003166static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt,
3167 struct ocfs2_extent_block *eb)
3168{
3169 return ocfs2_cache_block_dealloc(ctxt, EXTENT_ALLOC_SYSTEM_INODE,
3170 le16_to_cpu(eb->h_suballoc_slot),
3171 le64_to_cpu(eb->h_blkno),
3172 le16_to_cpu(eb->h_suballoc_bit));
3173}
3174
Mark Fashehccd979b2005-12-15 14:31:24 -08003175/* This function will figure out whether the currently last extent
3176 * block will be deleted, and if it will, what the new last extent
3177 * block will be so we can update his h_next_leaf_blk field, as well
3178 * as the dinodes i_last_eb_blk */
Mark Fashehdcd05382007-01-16 11:32:23 -08003179static int ocfs2_find_new_last_ext_blk(struct inode *inode,
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003180 unsigned int clusters_to_del,
Mark Fashehdcd05382007-01-16 11:32:23 -08003181 struct ocfs2_path *path,
Mark Fashehccd979b2005-12-15 14:31:24 -08003182 struct buffer_head **new_last_eb)
3183{
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003184 int next_free, ret = 0;
Mark Fashehdcd05382007-01-16 11:32:23 -08003185 u32 cpos;
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003186 struct ocfs2_extent_rec *rec;
Mark Fashehccd979b2005-12-15 14:31:24 -08003187 struct ocfs2_extent_block *eb;
3188 struct ocfs2_extent_list *el;
3189 struct buffer_head *bh = NULL;
3190
3191 *new_last_eb = NULL;
3192
Mark Fashehccd979b2005-12-15 14:31:24 -08003193 /* we have no tree, so of course, no last_eb. */
Mark Fashehdcd05382007-01-16 11:32:23 -08003194 if (!path->p_tree_depth)
3195 goto out;
Mark Fashehccd979b2005-12-15 14:31:24 -08003196
3197 /* trunc to zero special case - this makes tree_depth = 0
3198 * regardless of what it is. */
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003199 if (OCFS2_I(inode)->ip_clusters == clusters_to_del)
Mark Fashehdcd05382007-01-16 11:32:23 -08003200 goto out;
Mark Fashehccd979b2005-12-15 14:31:24 -08003201
Mark Fashehdcd05382007-01-16 11:32:23 -08003202 el = path_leaf_el(path);
Mark Fashehccd979b2005-12-15 14:31:24 -08003203 BUG_ON(!el->l_next_free_rec);
3204
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003205 /*
3206 * Make sure that this extent list will actually be empty
3207 * after we clear away the data. We can shortcut out if
3208 * there's more than one non-empty extent in the
3209 * list. Otherwise, a check of the remaining extent is
3210 * necessary.
3211 */
3212 next_free = le16_to_cpu(el->l_next_free_rec);
3213 rec = NULL;
Mark Fashehdcd05382007-01-16 11:32:23 -08003214 if (ocfs2_is_empty_extent(&el->l_recs[0])) {
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003215 if (next_free > 2)
Mark Fashehdcd05382007-01-16 11:32:23 -08003216 goto out;
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003217
3218 /* We may have a valid extent in index 1, check it. */
3219 if (next_free == 2)
3220 rec = &el->l_recs[1];
3221
3222 /*
3223 * Fall through - no more nonempty extents, so we want
3224 * to delete this leaf.
3225 */
3226 } else {
3227 if (next_free > 1)
3228 goto out;
3229
3230 rec = &el->l_recs[0];
3231 }
3232
3233 if (rec) {
3234 /*
3235 * Check it we'll only be trimming off the end of this
3236 * cluster.
3237 */
Mark Fashehe48edee2007-03-07 16:46:57 -08003238 if (le16_to_cpu(rec->e_leaf_clusters) > clusters_to_del)
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003239 goto out;
3240 }
Mark Fashehccd979b2005-12-15 14:31:24 -08003241
Mark Fashehdcd05382007-01-16 11:32:23 -08003242 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, path, &cpos);
3243 if (ret) {
3244 mlog_errno(ret);
3245 goto out;
3246 }
Mark Fashehccd979b2005-12-15 14:31:24 -08003247
Mark Fashehdcd05382007-01-16 11:32:23 -08003248 ret = ocfs2_find_leaf(inode, path_root_el(path), cpos, &bh);
3249 if (ret) {
3250 mlog_errno(ret);
3251 goto out;
3252 }
Mark Fashehccd979b2005-12-15 14:31:24 -08003253
Mark Fashehdcd05382007-01-16 11:32:23 -08003254 eb = (struct ocfs2_extent_block *) bh->b_data;
3255 el = &eb->h_list;
3256 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
3257 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
3258 ret = -EROFS;
3259 goto out;
3260 }
Mark Fashehccd979b2005-12-15 14:31:24 -08003261
3262 *new_last_eb = bh;
3263 get_bh(*new_last_eb);
Mark Fashehdcd05382007-01-16 11:32:23 -08003264 mlog(0, "returning block %llu, (cpos: %u)\n",
3265 (unsigned long long)le64_to_cpu(eb->h_blkno), cpos);
3266out:
3267 brelse(bh);
Mark Fashehccd979b2005-12-15 14:31:24 -08003268
Mark Fashehdcd05382007-01-16 11:32:23 -08003269 return ret;
Mark Fashehccd979b2005-12-15 14:31:24 -08003270}
3271
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003272/*
3273 * Trim some clusters off the rightmost edge of a tree. Only called
3274 * during truncate.
3275 *
3276 * The caller needs to:
3277 * - start journaling of each path component.
3278 * - compute and fully set up any new last ext block
3279 */
3280static int ocfs2_trim_tree(struct inode *inode, struct ocfs2_path *path,
3281 handle_t *handle, struct ocfs2_truncate_context *tc,
3282 u32 clusters_to_del, u64 *delete_start)
3283{
3284 int ret, i, index = path->p_tree_depth;
3285 u32 new_edge = 0;
3286 u64 deleted_eb = 0;
3287 struct buffer_head *bh;
3288 struct ocfs2_extent_list *el;
3289 struct ocfs2_extent_rec *rec;
3290
3291 *delete_start = 0;
3292
3293 while (index >= 0) {
3294 bh = path->p_node[index].bh;
3295 el = path->p_node[index].el;
3296
3297 mlog(0, "traveling tree (index = %d, block = %llu)\n",
3298 index, (unsigned long long)bh->b_blocknr);
3299
3300 BUG_ON(le16_to_cpu(el->l_next_free_rec) == 0);
3301
3302 if (index !=
3303 (path->p_tree_depth - le16_to_cpu(el->l_tree_depth))) {
3304 ocfs2_error(inode->i_sb,
3305 "Inode %lu has invalid ext. block %llu",
3306 inode->i_ino,
3307 (unsigned long long)bh->b_blocknr);
3308 ret = -EROFS;
3309 goto out;
3310 }
3311
3312find_tail_record:
3313 i = le16_to_cpu(el->l_next_free_rec) - 1;
3314 rec = &el->l_recs[i];
3315
3316 mlog(0, "Extent list before: record %d: (%u, %u, %llu), "
3317 "next = %u\n", i, le32_to_cpu(rec->e_cpos),
Mark Fashehe48edee2007-03-07 16:46:57 -08003318 ocfs2_rec_clusters(el, rec),
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003319 (unsigned long long)le64_to_cpu(rec->e_blkno),
3320 le16_to_cpu(el->l_next_free_rec));
3321
Mark Fashehe48edee2007-03-07 16:46:57 -08003322 BUG_ON(ocfs2_rec_clusters(el, rec) < clusters_to_del);
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003323
3324 if (le16_to_cpu(el->l_tree_depth) == 0) {
3325 /*
3326 * If the leaf block contains a single empty
3327 * extent and no records, we can just remove
3328 * the block.
3329 */
3330 if (i == 0 && ocfs2_is_empty_extent(rec)) {
3331 memset(rec, 0,
3332 sizeof(struct ocfs2_extent_rec));
3333 el->l_next_free_rec = cpu_to_le16(0);
3334
3335 goto delete;
3336 }
3337
3338 /*
3339 * Remove any empty extents by shifting things
3340 * left. That should make life much easier on
3341 * the code below. This condition is rare
3342 * enough that we shouldn't see a performance
3343 * hit.
3344 */
3345 if (ocfs2_is_empty_extent(&el->l_recs[0])) {
3346 le16_add_cpu(&el->l_next_free_rec, -1);
3347
3348 for(i = 0;
3349 i < le16_to_cpu(el->l_next_free_rec); i++)
3350 el->l_recs[i] = el->l_recs[i + 1];
3351
3352 memset(&el->l_recs[i], 0,
3353 sizeof(struct ocfs2_extent_rec));
3354
3355 /*
3356 * We've modified our extent list. The
3357 * simplest way to handle this change
3358 * is to being the search from the
3359 * start again.
3360 */
3361 goto find_tail_record;
3362 }
3363
Mark Fashehe48edee2007-03-07 16:46:57 -08003364 le16_add_cpu(&rec->e_leaf_clusters, -clusters_to_del);
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003365
3366 /*
3367 * We'll use "new_edge" on our way back up the
3368 * tree to know what our rightmost cpos is.
3369 */
Mark Fashehe48edee2007-03-07 16:46:57 -08003370 new_edge = le16_to_cpu(rec->e_leaf_clusters);
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003371 new_edge += le32_to_cpu(rec->e_cpos);
3372
3373 /*
3374 * The caller will use this to delete data blocks.
3375 */
3376 *delete_start = le64_to_cpu(rec->e_blkno)
3377 + ocfs2_clusters_to_blocks(inode->i_sb,
Mark Fashehe48edee2007-03-07 16:46:57 -08003378 le16_to_cpu(rec->e_leaf_clusters));
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003379
3380 /*
3381 * If it's now empty, remove this record.
3382 */
Mark Fashehe48edee2007-03-07 16:46:57 -08003383 if (le16_to_cpu(rec->e_leaf_clusters) == 0) {
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003384 memset(rec, 0,
3385 sizeof(struct ocfs2_extent_rec));
3386 le16_add_cpu(&el->l_next_free_rec, -1);
3387 }
3388 } else {
3389 if (le64_to_cpu(rec->e_blkno) == deleted_eb) {
3390 memset(rec, 0,
3391 sizeof(struct ocfs2_extent_rec));
3392 le16_add_cpu(&el->l_next_free_rec, -1);
3393
3394 goto delete;
3395 }
3396
3397 /* Can this actually happen? */
3398 if (le16_to_cpu(el->l_next_free_rec) == 0)
3399 goto delete;
3400
3401 /*
3402 * We never actually deleted any clusters
3403 * because our leaf was empty. There's no
3404 * reason to adjust the rightmost edge then.
3405 */
3406 if (new_edge == 0)
3407 goto delete;
3408
Mark Fashehe48edee2007-03-07 16:46:57 -08003409 rec->e_int_clusters = cpu_to_le32(new_edge);
3410 le32_add_cpu(&rec->e_int_clusters,
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003411 -le32_to_cpu(rec->e_cpos));
3412
3413 /*
3414 * A deleted child record should have been
3415 * caught above.
3416 */
Mark Fashehe48edee2007-03-07 16:46:57 -08003417 BUG_ON(le32_to_cpu(rec->e_int_clusters) == 0);
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003418 }
3419
3420delete:
3421 ret = ocfs2_journal_dirty(handle, bh);
3422 if (ret) {
3423 mlog_errno(ret);
3424 goto out;
3425 }
3426
3427 mlog(0, "extent list container %llu, after: record %d: "
3428 "(%u, %u, %llu), next = %u.\n",
3429 (unsigned long long)bh->b_blocknr, i,
Mark Fashehe48edee2007-03-07 16:46:57 -08003430 le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec),
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003431 (unsigned long long)le64_to_cpu(rec->e_blkno),
3432 le16_to_cpu(el->l_next_free_rec));
3433
3434 /*
3435 * We must be careful to only attempt delete of an
3436 * extent block (and not the root inode block).
3437 */
3438 if (index > 0 && le16_to_cpu(el->l_next_free_rec) == 0) {
3439 struct ocfs2_extent_block *eb =
3440 (struct ocfs2_extent_block *)bh->b_data;
3441
3442 /*
3443 * Save this for use when processing the
3444 * parent block.
3445 */
3446 deleted_eb = le64_to_cpu(eb->h_blkno);
3447
3448 mlog(0, "deleting this extent block.\n");
3449
3450 ocfs2_remove_from_cache(inode, bh);
3451
Mark Fashehe48edee2007-03-07 16:46:57 -08003452 BUG_ON(ocfs2_rec_clusters(el, &el->l_recs[0]));
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003453 BUG_ON(le32_to_cpu(el->l_recs[0].e_cpos));
3454 BUG_ON(le64_to_cpu(el->l_recs[0].e_blkno));
3455
Mark Fasheh59a5e412007-06-22 15:52:36 -07003456 ret = ocfs2_cache_extent_block_free(&tc->tc_dealloc, eb);
3457 /* An error here is not fatal. */
3458 if (ret < 0)
3459 mlog_errno(ret);
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003460 } else {
3461 deleted_eb = 0;
3462 }
3463
3464 index--;
3465 }
3466
3467 ret = 0;
3468out:
3469 return ret;
3470}
3471
Mark Fashehccd979b2005-12-15 14:31:24 -08003472static int ocfs2_do_truncate(struct ocfs2_super *osb,
3473 unsigned int clusters_to_del,
3474 struct inode *inode,
3475 struct buffer_head *fe_bh,
Mark Fasheh1fabe142006-10-09 18:11:45 -07003476 handle_t *handle,
Mark Fashehdcd05382007-01-16 11:32:23 -08003477 struct ocfs2_truncate_context *tc,
3478 struct ocfs2_path *path)
Mark Fashehccd979b2005-12-15 14:31:24 -08003479{
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003480 int status;
Mark Fashehccd979b2005-12-15 14:31:24 -08003481 struct ocfs2_dinode *fe;
Mark Fashehccd979b2005-12-15 14:31:24 -08003482 struct ocfs2_extent_block *last_eb = NULL;
3483 struct ocfs2_extent_list *el;
Mark Fashehccd979b2005-12-15 14:31:24 -08003484 struct buffer_head *last_eb_bh = NULL;
Mark Fashehccd979b2005-12-15 14:31:24 -08003485 u64 delete_blk = 0;
3486
3487 fe = (struct ocfs2_dinode *) fe_bh->b_data;
3488
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003489 status = ocfs2_find_new_last_ext_blk(inode, clusters_to_del,
Mark Fashehdcd05382007-01-16 11:32:23 -08003490 path, &last_eb_bh);
Mark Fashehccd979b2005-12-15 14:31:24 -08003491 if (status < 0) {
3492 mlog_errno(status);
3493 goto bail;
3494 }
Mark Fashehccd979b2005-12-15 14:31:24 -08003495
Mark Fashehdcd05382007-01-16 11:32:23 -08003496 /*
3497 * Each component will be touched, so we might as well journal
3498 * here to avoid having to handle errors later.
3499 */
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003500 status = ocfs2_journal_access_path(inode, handle, path);
3501 if (status < 0) {
3502 mlog_errno(status);
3503 goto bail;
Mark Fashehdcd05382007-01-16 11:32:23 -08003504 }
3505
3506 if (last_eb_bh) {
3507 status = ocfs2_journal_access(handle, inode, last_eb_bh,
3508 OCFS2_JOURNAL_ACCESS_WRITE);
3509 if (status < 0) {
3510 mlog_errno(status);
3511 goto bail;
3512 }
3513
3514 last_eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
3515 }
3516
3517 el = &(fe->id2.i_list);
3518
3519 /*
3520 * Lower levels depend on this never happening, but it's best
3521 * to check it up here before changing the tree.
3522 */
Mark Fashehe48edee2007-03-07 16:46:57 -08003523 if (el->l_tree_depth && el->l_recs[0].e_int_clusters == 0) {
Mark Fashehdcd05382007-01-16 11:32:23 -08003524 ocfs2_error(inode->i_sb,
3525 "Inode %lu has an empty extent record, depth %u\n",
3526 inode->i_ino, le16_to_cpu(el->l_tree_depth));
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003527 status = -EROFS;
Mark Fashehccd979b2005-12-15 14:31:24 -08003528 goto bail;
3529 }
Mark Fashehccd979b2005-12-15 14:31:24 -08003530
3531 spin_lock(&OCFS2_I(inode)->ip_lock);
3532 OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters) -
3533 clusters_to_del;
3534 spin_unlock(&OCFS2_I(inode)->ip_lock);
3535 le32_add_cpu(&fe->i_clusters, -clusters_to_del);
Mark Fashehccd979b2005-12-15 14:31:24 -08003536
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003537 status = ocfs2_trim_tree(inode, path, handle, tc,
3538 clusters_to_del, &delete_blk);
3539 if (status) {
3540 mlog_errno(status);
3541 goto bail;
Mark Fashehccd979b2005-12-15 14:31:24 -08003542 }
3543
Mark Fashehdcd05382007-01-16 11:32:23 -08003544 if (le32_to_cpu(fe->i_clusters) == 0) {
Mark Fashehccd979b2005-12-15 14:31:24 -08003545 /* trunc to zero is a special case. */
3546 el->l_tree_depth = 0;
3547 fe->i_last_eb_blk = 0;
3548 } else if (last_eb)
3549 fe->i_last_eb_blk = last_eb->h_blkno;
3550
3551 status = ocfs2_journal_dirty(handle, fe_bh);
3552 if (status < 0) {
3553 mlog_errno(status);
3554 goto bail;
3555 }
3556
3557 if (last_eb) {
3558 /* If there will be a new last extent block, then by
3559 * definition, there cannot be any leaves to the right of
3560 * him. */
Mark Fashehccd979b2005-12-15 14:31:24 -08003561 last_eb->h_next_leaf_blk = 0;
3562 status = ocfs2_journal_dirty(handle, last_eb_bh);
3563 if (status < 0) {
3564 mlog_errno(status);
3565 goto bail;
3566 }
3567 }
3568
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003569 if (delete_blk) {
3570 status = ocfs2_truncate_log_append(osb, handle, delete_blk,
3571 clusters_to_del);
Mark Fashehccd979b2005-12-15 14:31:24 -08003572 if (status < 0) {
3573 mlog_errno(status);
3574 goto bail;
3575 }
Mark Fashehccd979b2005-12-15 14:31:24 -08003576 }
3577 status = 0;
3578bail:
Mark Fashehdcd05382007-01-16 11:32:23 -08003579
Mark Fashehccd979b2005-12-15 14:31:24 -08003580 mlog_exit(status);
3581 return status;
3582}
3583
Mark Fasheh60b11392007-02-16 11:46:50 -08003584static int ocfs2_writeback_zero_func(handle_t *handle, struct buffer_head *bh)
3585{
3586 set_buffer_uptodate(bh);
3587 mark_buffer_dirty(bh);
3588 return 0;
3589}
3590
3591static int ocfs2_ordered_zero_func(handle_t *handle, struct buffer_head *bh)
3592{
3593 set_buffer_uptodate(bh);
3594 mark_buffer_dirty(bh);
3595 return ocfs2_journal_dirty_data(handle, bh);
3596}
3597
3598static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t isize,
3599 struct page **pages, int numpages,
3600 u64 phys, handle_t *handle)
3601{
3602 int i, ret, partial = 0;
3603 void *kaddr;
3604 struct page *page;
3605 unsigned int from, to = PAGE_CACHE_SIZE;
3606 struct super_block *sb = inode->i_sb;
3607
3608 BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb)));
3609
3610 if (numpages == 0)
3611 goto out;
3612
3613 from = isize & (PAGE_CACHE_SIZE - 1); /* 1st page offset */
3614 if (PAGE_CACHE_SHIFT > OCFS2_SB(sb)->s_clustersize_bits) {
3615 /*
3616 * Since 'from' has been capped to a value below page
3617 * size, this calculation won't be able to overflow
3618 * 'to'
3619 */
3620 to = ocfs2_align_bytes_to_clusters(sb, from);
3621
3622 /*
3623 * The truncate tail in this case should never contain
3624 * more than one page at maximum. The loop below also
3625 * assumes this.
3626 */
3627 BUG_ON(numpages != 1);
3628 }
3629
3630 for(i = 0; i < numpages; i++) {
3631 page = pages[i];
3632
3633 BUG_ON(from > PAGE_CACHE_SIZE);
3634 BUG_ON(to > PAGE_CACHE_SIZE);
3635
3636 ret = ocfs2_map_page_blocks(page, &phys, inode, from, to, 0);
3637 if (ret)
3638 mlog_errno(ret);
3639
3640 kaddr = kmap_atomic(page, KM_USER0);
3641 memset(kaddr + from, 0, to - from);
3642 kunmap_atomic(kaddr, KM_USER0);
3643
3644 /*
3645 * Need to set the buffers we zero'd into uptodate
3646 * here if they aren't - ocfs2_map_page_blocks()
3647 * might've skipped some
3648 */
3649 if (ocfs2_should_order_data(inode)) {
3650 ret = walk_page_buffers(handle,
3651 page_buffers(page),
3652 from, to, &partial,
3653 ocfs2_ordered_zero_func);
3654 if (ret < 0)
3655 mlog_errno(ret);
3656 } else {
3657 ret = walk_page_buffers(handle, page_buffers(page),
3658 from, to, &partial,
3659 ocfs2_writeback_zero_func);
3660 if (ret < 0)
3661 mlog_errno(ret);
3662 }
3663
3664 if (!partial)
3665 SetPageUptodate(page);
3666
3667 flush_dcache_page(page);
3668
3669 /*
3670 * Every page after the 1st one should be completely zero'd.
3671 */
3672 from = 0;
3673 }
3674out:
3675 if (pages) {
3676 for (i = 0; i < numpages; i++) {
3677 page = pages[i];
3678 unlock_page(page);
3679 mark_page_accessed(page);
3680 page_cache_release(page);
3681 }
3682 }
3683}
3684
3685static int ocfs2_grab_eof_pages(struct inode *inode, loff_t isize, struct page **pages,
3686 int *num, u64 *phys)
3687{
3688 int i, numpages = 0, ret = 0;
3689 unsigned int csize = OCFS2_SB(inode->i_sb)->s_clustersize;
Mark Fasheh49cb8d22007-03-09 16:21:46 -08003690 unsigned int ext_flags;
Mark Fasheh60b11392007-02-16 11:46:50 -08003691 struct super_block *sb = inode->i_sb;
3692 struct address_space *mapping = inode->i_mapping;
3693 unsigned long index;
3694 u64 next_cluster_bytes;
3695
3696 BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb)));
3697
3698 /* Cluster boundary, so we don't need to grab any pages. */
3699 if ((isize & (csize - 1)) == 0)
3700 goto out;
3701
3702 ret = ocfs2_extent_map_get_blocks(inode, isize >> sb->s_blocksize_bits,
Mark Fasheh49cb8d22007-03-09 16:21:46 -08003703 phys, NULL, &ext_flags);
Mark Fasheh60b11392007-02-16 11:46:50 -08003704 if (ret) {
3705 mlog_errno(ret);
3706 goto out;
3707 }
3708
3709 /* Tail is a hole. */
3710 if (*phys == 0)
3711 goto out;
3712
Mark Fasheh49cb8d22007-03-09 16:21:46 -08003713 /* Tail is marked as unwritten, we can count on write to zero
3714 * in that case. */
3715 if (ext_flags & OCFS2_EXT_UNWRITTEN)
3716 goto out;
3717
Mark Fasheh60b11392007-02-16 11:46:50 -08003718 next_cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, isize);
3719 index = isize >> PAGE_CACHE_SHIFT;
3720 do {
3721 pages[numpages] = grab_cache_page(mapping, index);
3722 if (!pages[numpages]) {
3723 ret = -ENOMEM;
3724 mlog_errno(ret);
3725 goto out;
3726 }
3727
3728 numpages++;
3729 index++;
3730 } while (index < (next_cluster_bytes >> PAGE_CACHE_SHIFT));
3731
3732out:
3733 if (ret != 0) {
3734 if (pages) {
3735 for (i = 0; i < numpages; i++) {
3736 if (pages[i]) {
3737 unlock_page(pages[i]);
3738 page_cache_release(pages[i]);
3739 }
3740 }
3741 }
3742 numpages = 0;
3743 }
3744
3745 *num = numpages;
3746
3747 return ret;
3748}
3749
3750/*
3751 * Zero the area past i_size but still within an allocated
3752 * cluster. This avoids exposing nonzero data on subsequent file
3753 * extends.
3754 *
3755 * We need to call this before i_size is updated on the inode because
3756 * otherwise block_write_full_page() will skip writeout of pages past
3757 * i_size. The new_i_size parameter is passed for this reason.
3758 */
3759int ocfs2_zero_tail_for_truncate(struct inode *inode, handle_t *handle,
3760 u64 new_i_size)
3761{
3762 int ret, numpages;
Mark Fashehfa410452007-03-01 11:22:19 -08003763 loff_t endbyte;
Mark Fasheh60b11392007-02-16 11:46:50 -08003764 struct page **pages = NULL;
3765 u64 phys;
3766
3767 /*
3768 * File systems which don't support sparse files zero on every
3769 * extend.
3770 */
3771 if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
3772 return 0;
3773
3774 pages = kcalloc(ocfs2_pages_per_cluster(inode->i_sb),
3775 sizeof(struct page *), GFP_NOFS);
3776 if (pages == NULL) {
3777 ret = -ENOMEM;
3778 mlog_errno(ret);
3779 goto out;
3780 }
3781
3782 ret = ocfs2_grab_eof_pages(inode, new_i_size, pages, &numpages, &phys);
3783 if (ret) {
3784 mlog_errno(ret);
3785 goto out;
3786 }
3787
Mark Fasheh60b11392007-02-16 11:46:50 -08003788 if (numpages == 0)
3789 goto out;
3790
3791 ocfs2_zero_cluster_pages(inode, new_i_size, pages, numpages, phys,
3792 handle);
3793
3794 /*
3795 * Initiate writeout of the pages we zero'd here. We don't
3796 * wait on them - the truncate_inode_pages() call later will
3797 * do that for us.
3798 */
Mark Fashehfa410452007-03-01 11:22:19 -08003799 endbyte = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size);
3800 ret = do_sync_mapping_range(inode->i_mapping, new_i_size,
3801 endbyte - 1, SYNC_FILE_RANGE_WRITE);
Mark Fasheh60b11392007-02-16 11:46:50 -08003802 if (ret)
3803 mlog_errno(ret);
3804
3805out:
3806 if (pages)
3807 kfree(pages);
3808
3809 return ret;
3810}
3811
Mark Fashehccd979b2005-12-15 14:31:24 -08003812/*
3813 * It is expected, that by the time you call this function,
3814 * inode->i_size and fe->i_size have been adjusted.
3815 *
3816 * WARNING: This will kfree the truncate context
3817 */
3818int ocfs2_commit_truncate(struct ocfs2_super *osb,
3819 struct inode *inode,
3820 struct buffer_head *fe_bh,
3821 struct ocfs2_truncate_context *tc)
3822{
3823 int status, i, credits, tl_sem = 0;
Mark Fashehdcd05382007-01-16 11:32:23 -08003824 u32 clusters_to_del, new_highest_cpos, range;
Mark Fashehccd979b2005-12-15 14:31:24 -08003825 struct ocfs2_extent_list *el;
Mark Fasheh1fabe142006-10-09 18:11:45 -07003826 handle_t *handle = NULL;
Mark Fashehccd979b2005-12-15 14:31:24 -08003827 struct inode *tl_inode = osb->osb_tl_inode;
Mark Fashehdcd05382007-01-16 11:32:23 -08003828 struct ocfs2_path *path = NULL;
Mark Fashehccd979b2005-12-15 14:31:24 -08003829
3830 mlog_entry_void();
3831
Mark Fashehdcd05382007-01-16 11:32:23 -08003832 new_highest_cpos = ocfs2_clusters_for_bytes(osb->sb,
Mark Fashehccd979b2005-12-15 14:31:24 -08003833 i_size_read(inode));
3834
Mark Fashehdcd05382007-01-16 11:32:23 -08003835 path = ocfs2_new_inode_path(fe_bh);
3836 if (!path) {
3837 status = -ENOMEM;
3838 mlog_errno(status);
3839 goto bail;
3840 }
Mark Fasheh83418972007-04-23 18:53:12 -07003841
3842 ocfs2_extent_map_trunc(inode, new_highest_cpos);
3843
Mark Fashehccd979b2005-12-15 14:31:24 -08003844start:
Mark Fashehdcd05382007-01-16 11:32:23 -08003845 /*
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003846 * Check that we still have allocation to delete.
3847 */
3848 if (OCFS2_I(inode)->ip_clusters == 0) {
3849 status = 0;
3850 goto bail;
3851 }
3852
3853 /*
Mark Fashehdcd05382007-01-16 11:32:23 -08003854 * Truncate always works against the rightmost tree branch.
3855 */
3856 status = ocfs2_find_path(inode, path, UINT_MAX);
3857 if (status) {
3858 mlog_errno(status);
3859 goto bail;
Mark Fashehccd979b2005-12-15 14:31:24 -08003860 }
3861
Mark Fashehdcd05382007-01-16 11:32:23 -08003862 mlog(0, "inode->ip_clusters = %u, tree_depth = %u\n",
3863 OCFS2_I(inode)->ip_clusters, path->p_tree_depth);
3864
3865 /*
3866 * By now, el will point to the extent list on the bottom most
3867 * portion of this tree. Only the tail record is considered in
3868 * each pass.
3869 *
3870 * We handle the following cases, in order:
3871 * - empty extent: delete the remaining branch
3872 * - remove the entire record
3873 * - remove a partial record
3874 * - no record needs to be removed (truncate has completed)
3875 */
3876 el = path_leaf_el(path);
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003877 if (le16_to_cpu(el->l_next_free_rec) == 0) {
3878 ocfs2_error(inode->i_sb,
3879 "Inode %llu has empty extent block at %llu\n",
3880 (unsigned long long)OCFS2_I(inode)->ip_blkno,
3881 (unsigned long long)path_leaf_bh(path)->b_blocknr);
3882 status = -EROFS;
3883 goto bail;
3884 }
3885
Mark Fashehccd979b2005-12-15 14:31:24 -08003886 i = le16_to_cpu(el->l_next_free_rec) - 1;
Mark Fashehdcd05382007-01-16 11:32:23 -08003887 range = le32_to_cpu(el->l_recs[i].e_cpos) +
Mark Fashehe48edee2007-03-07 16:46:57 -08003888 ocfs2_rec_clusters(el, &el->l_recs[i]);
Mark Fashehdcd05382007-01-16 11:32:23 -08003889 if (i == 0 && ocfs2_is_empty_extent(&el->l_recs[i])) {
3890 clusters_to_del = 0;
3891 } else if (le32_to_cpu(el->l_recs[i].e_cpos) >= new_highest_cpos) {
Mark Fashehe48edee2007-03-07 16:46:57 -08003892 clusters_to_del = ocfs2_rec_clusters(el, &el->l_recs[i]);
Mark Fashehdcd05382007-01-16 11:32:23 -08003893 } else if (range > new_highest_cpos) {
Mark Fashehe48edee2007-03-07 16:46:57 -08003894 clusters_to_del = (ocfs2_rec_clusters(el, &el->l_recs[i]) +
Mark Fashehccd979b2005-12-15 14:31:24 -08003895 le32_to_cpu(el->l_recs[i].e_cpos)) -
Mark Fashehdcd05382007-01-16 11:32:23 -08003896 new_highest_cpos;
3897 } else {
3898 status = 0;
3899 goto bail;
3900 }
Mark Fashehccd979b2005-12-15 14:31:24 -08003901
Mark Fashehdcd05382007-01-16 11:32:23 -08003902 mlog(0, "clusters_to_del = %u in this pass, tail blk=%llu\n",
3903 clusters_to_del, (unsigned long long)path_leaf_bh(path)->b_blocknr);
3904
3905 BUG_ON(clusters_to_del == 0);
Mark Fashehccd979b2005-12-15 14:31:24 -08003906
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08003907 mutex_lock(&tl_inode->i_mutex);
Mark Fashehccd979b2005-12-15 14:31:24 -08003908 tl_sem = 1;
3909 /* ocfs2_truncate_log_needs_flush guarantees us at least one
3910 * record is free for use. If there isn't any, we flush to get
3911 * an empty truncate log. */
3912 if (ocfs2_truncate_log_needs_flush(osb)) {
3913 status = __ocfs2_flush_truncate_log(osb);
3914 if (status < 0) {
3915 mlog_errno(status);
3916 goto bail;
3917 }
3918 }
3919
3920 credits = ocfs2_calc_tree_trunc_credits(osb->sb, clusters_to_del,
Mark Fashehdcd05382007-01-16 11:32:23 -08003921 (struct ocfs2_dinode *)fe_bh->b_data,
3922 el);
Mark Fasheh65eff9c2006-10-09 17:26:22 -07003923 handle = ocfs2_start_trans(osb, credits);
Mark Fashehccd979b2005-12-15 14:31:24 -08003924 if (IS_ERR(handle)) {
3925 status = PTR_ERR(handle);
3926 handle = NULL;
3927 mlog_errno(status);
3928 goto bail;
3929 }
3930
Mark Fashehdcd05382007-01-16 11:32:23 -08003931 status = ocfs2_do_truncate(osb, clusters_to_del, inode, fe_bh, handle,
3932 tc, path);
Mark Fashehccd979b2005-12-15 14:31:24 -08003933 if (status < 0) {
3934 mlog_errno(status);
3935 goto bail;
3936 }
3937
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08003938 mutex_unlock(&tl_inode->i_mutex);
Mark Fashehccd979b2005-12-15 14:31:24 -08003939 tl_sem = 0;
3940
Mark Fasheh02dc1af2006-10-09 16:48:10 -07003941 ocfs2_commit_trans(osb, handle);
Mark Fashehccd979b2005-12-15 14:31:24 -08003942 handle = NULL;
3943
Mark Fashehdcd05382007-01-16 11:32:23 -08003944 ocfs2_reinit_path(path, 1);
3945
3946 /*
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003947 * The check above will catch the case where we've truncated
3948 * away all allocation.
Mark Fashehdcd05382007-01-16 11:32:23 -08003949 */
Mark Fasheh3a0782d2007-01-17 12:53:31 -08003950 goto start;
3951
Mark Fashehccd979b2005-12-15 14:31:24 -08003952bail:
Mark Fashehccd979b2005-12-15 14:31:24 -08003953
3954 ocfs2_schedule_truncate_log_flush(osb, 1);
3955
3956 if (tl_sem)
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08003957 mutex_unlock(&tl_inode->i_mutex);
Mark Fashehccd979b2005-12-15 14:31:24 -08003958
3959 if (handle)
Mark Fasheh02dc1af2006-10-09 16:48:10 -07003960 ocfs2_commit_trans(osb, handle);
Mark Fashehccd979b2005-12-15 14:31:24 -08003961
Mark Fasheh59a5e412007-06-22 15:52:36 -07003962 ocfs2_run_deallocs(osb, &tc->tc_dealloc);
3963
Mark Fashehdcd05382007-01-16 11:32:23 -08003964 ocfs2_free_path(path);
Mark Fashehccd979b2005-12-15 14:31:24 -08003965
3966 /* This will drop the ext_alloc cluster lock for us */
3967 ocfs2_free_truncate_context(tc);
3968
3969 mlog_exit(status);
3970 return status;
3971}
3972
Mark Fashehccd979b2005-12-15 14:31:24 -08003973/*
Mark Fasheh59a5e412007-06-22 15:52:36 -07003974 * Expects the inode to already be locked.
Mark Fashehccd979b2005-12-15 14:31:24 -08003975 */
3976int ocfs2_prepare_truncate(struct ocfs2_super *osb,
3977 struct inode *inode,
3978 struct buffer_head *fe_bh,
3979 struct ocfs2_truncate_context **tc)
3980{
Mark Fasheh59a5e412007-06-22 15:52:36 -07003981 int status;
Mark Fashehccd979b2005-12-15 14:31:24 -08003982 unsigned int new_i_clusters;
3983 struct ocfs2_dinode *fe;
3984 struct ocfs2_extent_block *eb;
Mark Fashehccd979b2005-12-15 14:31:24 -08003985 struct buffer_head *last_eb_bh = NULL;
Mark Fashehccd979b2005-12-15 14:31:24 -08003986
3987 mlog_entry_void();
3988
3989 *tc = NULL;
3990
3991 new_i_clusters = ocfs2_clusters_for_bytes(osb->sb,
3992 i_size_read(inode));
3993 fe = (struct ocfs2_dinode *) fe_bh->b_data;
3994
3995 mlog(0, "fe->i_clusters = %u, new_i_clusters = %u, fe->i_size ="
Mark Fasheh1ca1a112007-04-27 16:01:25 -07003996 "%llu\n", le32_to_cpu(fe->i_clusters), new_i_clusters,
3997 (unsigned long long)le64_to_cpu(fe->i_size));
Mark Fashehccd979b2005-12-15 14:31:24 -08003998
Robert P. J. Daycd861282006-12-13 00:34:52 -08003999 *tc = kzalloc(sizeof(struct ocfs2_truncate_context), GFP_KERNEL);
Mark Fashehccd979b2005-12-15 14:31:24 -08004000 if (!(*tc)) {
4001 status = -ENOMEM;
4002 mlog_errno(status);
4003 goto bail;
4004 }
Mark Fasheh59a5e412007-06-22 15:52:36 -07004005 ocfs2_init_dealloc_ctxt(&(*tc)->tc_dealloc);
Mark Fashehccd979b2005-12-15 14:31:24 -08004006
Mark Fashehccd979b2005-12-15 14:31:24 -08004007 if (fe->id2.i_list.l_tree_depth) {
Mark Fashehccd979b2005-12-15 14:31:24 -08004008 status = ocfs2_read_block(osb, le64_to_cpu(fe->i_last_eb_blk),
4009 &last_eb_bh, OCFS2_BH_CACHED, inode);
4010 if (status < 0) {
4011 mlog_errno(status);
4012 goto bail;
4013 }
4014 eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
4015 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
4016 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
4017
4018 brelse(last_eb_bh);
4019 status = -EIO;
4020 goto bail;
4021 }
Mark Fashehccd979b2005-12-15 14:31:24 -08004022 }
4023
4024 (*tc)->tc_last_eb_bh = last_eb_bh;
4025
Mark Fashehccd979b2005-12-15 14:31:24 -08004026 status = 0;
4027bail:
4028 if (status < 0) {
4029 if (*tc)
4030 ocfs2_free_truncate_context(*tc);
4031 *tc = NULL;
4032 }
4033 mlog_exit_void();
4034 return status;
4035}
4036
4037static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc)
4038{
Mark Fasheh59a5e412007-06-22 15:52:36 -07004039 /*
4040 * The caller is responsible for completing deallocation
4041 * before freeing the context.
4042 */
4043 if (tc->tc_dealloc.c_first_suballocator != NULL)
4044 mlog(ML_NOTICE,
4045 "Truncate completion has non-empty dealloc context\n");
Mark Fashehccd979b2005-12-15 14:31:24 -08004046
4047 if (tc->tc_last_eb_bh)
4048 brelse(tc->tc_last_eb_bh);
4049
4050 kfree(tc);
4051}