blob: 02429d81ca3374fef11d9646391d3ee7f1878139 [file] [log] [blame]
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001/*
2 * This file is part of UBIFS.
3 *
4 * Copyright (C) 2006-2008 Nokia Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 51
17 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 * Authors: Adrian Hunter
20 * Artem Bityutskiy (Битюцкий Артём)
21 */
22
23/*
24 * This file implements functions that manage the running of the commit process.
25 * Each affected module has its own functions to accomplish their part in the
26 * commit and those functions are called here.
27 *
28 * The commit is the process whereby all updates to the index and LEB properties
29 * are written out together and the journal becomes empty. This keeps the
30 * file system consistent - at all times the state can be recreated by reading
31 * the index and LEB properties and then replaying the journal.
32 *
33 * The commit is split into two parts named "commit start" and "commit end".
34 * During commit start, the commit process has exclusive access to the journal
35 * by holding the commit semaphore down for writing. As few I/O operations as
36 * possible are performed during commit start, instead the nodes that are to be
37 * written are merely identified. During commit end, the commit semaphore is no
38 * longer held and the journal is again in operation, allowing users to continue
39 * to use the file system while the bulk of the commit I/O is performed. The
40 * purpose of this two-step approach is to prevent the commit from causing any
41 * latency blips. Note that in any case, the commit does not prevent lookups
42 * (as permitted by the TNC mutex), or access to VFS data structures e.g. page
43 * cache.
44 */
45
46#include <linux/freezer.h>
47#include <linux/kthread.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090048#include <linux/slab.h>
Artem Bityutskiy1e517642008-07-14 19:08:37 +030049#include "ubifs.h"
50
51/**
52 * do_commit - commit the journal.
53 * @c: UBIFS file-system description object
54 *
55 * This function implements UBIFS commit. It has to be called with commit lock
56 * locked. Returns zero in case of success and a negative error code in case of
57 * failure.
58 */
59static int do_commit(struct ubifs_info *c)
60{
61 int err, new_ltail_lnum, old_ltail_lnum, i;
62 struct ubifs_zbranch zroot;
63 struct ubifs_lp_stats lst;
64
65 dbg_cmt("start");
Artem Bityutskiy2ef13292010-09-19 18:34:26 +030066 ubifs_assert(!c->ro_media && !c->ro_mount);
Artem Bityutskiy2680d722010-09-17 16:44:28 +030067
68 if (c->ro_error) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +030069 err = -EROFS;
70 goto out_up;
71 }
72
73 /* Sync all write buffers (necessary for recovery) */
74 for (i = 0; i < c->jhead_cnt; i++) {
75 err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
76 if (err)
77 goto out_up;
78 }
79
Artem Bityutskiy014eb042008-07-21 17:14:29 +030080 c->cmt_no += 1;
Artem Bityutskiy1e517642008-07-14 19:08:37 +030081 err = ubifs_gc_start_commit(c);
82 if (err)
83 goto out_up;
84 err = dbg_check_lprops(c);
85 if (err)
86 goto out_up;
87 err = ubifs_log_start_commit(c, &new_ltail_lnum);
88 if (err)
89 goto out_up;
90 err = ubifs_tnc_start_commit(c, &zroot);
91 if (err)
92 goto out_up;
93 err = ubifs_lpt_start_commit(c);
94 if (err)
95 goto out_up;
96 err = ubifs_orphan_start_commit(c);
97 if (err)
98 goto out_up;
99
100 ubifs_get_lp_stats(c, &lst);
101
102 up_write(&c->commit_sem);
103
104 err = ubifs_tnc_end_commit(c);
105 if (err)
106 goto out;
107 err = ubifs_lpt_end_commit(c);
108 if (err)
109 goto out;
110 err = ubifs_orphan_end_commit(c);
111 if (err)
112 goto out;
113 old_ltail_lnum = c->ltail_lnum;
114 err = ubifs_log_end_commit(c, new_ltail_lnum);
115 if (err)
116 goto out;
117 err = dbg_check_old_index(c, &zroot);
118 if (err)
119 goto out;
120
121 mutex_lock(&c->mst_mutex);
Artem Bityutskiy014eb042008-07-21 17:14:29 +0300122 c->mst_node->cmt_no = cpu_to_le64(c->cmt_no);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300123 c->mst_node->log_lnum = cpu_to_le32(new_ltail_lnum);
124 c->mst_node->root_lnum = cpu_to_le32(zroot.lnum);
125 c->mst_node->root_offs = cpu_to_le32(zroot.offs);
126 c->mst_node->root_len = cpu_to_le32(zroot.len);
127 c->mst_node->ihead_lnum = cpu_to_le32(c->ihead_lnum);
128 c->mst_node->ihead_offs = cpu_to_le32(c->ihead_offs);
129 c->mst_node->index_size = cpu_to_le64(c->old_idx_sz);
130 c->mst_node->lpt_lnum = cpu_to_le32(c->lpt_lnum);
131 c->mst_node->lpt_offs = cpu_to_le32(c->lpt_offs);
132 c->mst_node->nhead_lnum = cpu_to_le32(c->nhead_lnum);
133 c->mst_node->nhead_offs = cpu_to_le32(c->nhead_offs);
134 c->mst_node->ltab_lnum = cpu_to_le32(c->ltab_lnum);
135 c->mst_node->ltab_offs = cpu_to_le32(c->ltab_offs);
136 c->mst_node->lsave_lnum = cpu_to_le32(c->lsave_lnum);
137 c->mst_node->lsave_offs = cpu_to_le32(c->lsave_offs);
138 c->mst_node->lscan_lnum = cpu_to_le32(c->lscan_lnum);
139 c->mst_node->empty_lebs = cpu_to_le32(lst.empty_lebs);
140 c->mst_node->idx_lebs = cpu_to_le32(lst.idx_lebs);
141 c->mst_node->total_free = cpu_to_le64(lst.total_free);
142 c->mst_node->total_dirty = cpu_to_le64(lst.total_dirty);
143 c->mst_node->total_used = cpu_to_le64(lst.total_used);
144 c->mst_node->total_dead = cpu_to_le64(lst.total_dead);
145 c->mst_node->total_dark = cpu_to_le64(lst.total_dark);
146 if (c->no_orphs)
147 c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
148 else
149 c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_NO_ORPHS);
150 err = ubifs_write_master(c);
151 mutex_unlock(&c->mst_mutex);
152 if (err)
153 goto out;
154
155 err = ubifs_log_post_commit(c, old_ltail_lnum);
156 if (err)
157 goto out;
158 err = ubifs_gc_end_commit(c);
159 if (err)
160 goto out;
161 err = ubifs_lpt_post_commit(c);
162 if (err)
163 goto out;
164
165 spin_lock(&c->cs_lock);
166 c->cmt_state = COMMIT_RESTING;
167 wake_up(&c->cmt_wq);
168 dbg_cmt("commit end");
169 spin_unlock(&c->cs_lock);
170
171 return 0;
172
173out_up:
174 up_write(&c->commit_sem);
175out:
176 ubifs_err("commit failed, error %d", err);
177 spin_lock(&c->cs_lock);
178 c->cmt_state = COMMIT_BROKEN;
179 wake_up(&c->cmt_wq);
180 spin_unlock(&c->cs_lock);
181 ubifs_ro_mode(c, err);
182 return err;
183}
184
185/**
186 * run_bg_commit - run background commit if it is needed.
187 * @c: UBIFS file-system description object
188 *
189 * This function runs background commit if it is needed. Returns zero in case
190 * of success and a negative error code in case of failure.
191 */
192static int run_bg_commit(struct ubifs_info *c)
193{
194 spin_lock(&c->cs_lock);
195 /*
196 * Run background commit only if background commit was requested or if
197 * commit is required.
198 */
199 if (c->cmt_state != COMMIT_BACKGROUND &&
200 c->cmt_state != COMMIT_REQUIRED)
201 goto out;
202 spin_unlock(&c->cs_lock);
203
204 down_write(&c->commit_sem);
205 spin_lock(&c->cs_lock);
206 if (c->cmt_state == COMMIT_REQUIRED)
207 c->cmt_state = COMMIT_RUNNING_REQUIRED;
208 else if (c->cmt_state == COMMIT_BACKGROUND)
209 c->cmt_state = COMMIT_RUNNING_BACKGROUND;
210 else
211 goto out_cmt_unlock;
212 spin_unlock(&c->cs_lock);
213
214 return do_commit(c);
215
216out_cmt_unlock:
217 up_write(&c->commit_sem);
218out:
219 spin_unlock(&c->cs_lock);
220 return 0;
221}
222
223/**
224 * ubifs_bg_thread - UBIFS background thread function.
225 * @info: points to the file-system description object
226 *
227 * This function implements various file-system background activities:
228 * o when a write-buffer timer expires it synchronizes the appropriate
229 * write-buffer;
230 * o when the journal is about to be full, it starts in-advance commit.
231 *
232 * Note, other stuff like background garbage collection may be added here in
233 * future.
234 */
235int ubifs_bg_thread(void *info)
236{
237 int err;
238 struct ubifs_info *c = info;
239
Artem Bityutskiy069782a2008-10-21 12:56:31 +0300240 dbg_msg("background thread \"%s\" started, PID %d",
241 c->bgt_name, current->pid);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300242 set_freezable();
243
244 while (1) {
245 if (kthread_should_stop())
246 break;
247
248 if (try_to_freeze())
249 continue;
250
251 set_current_state(TASK_INTERRUPTIBLE);
252 /* Check if there is something to do */
253 if (!c->need_bgt) {
254 /*
255 * Nothing prevents us from going sleep now and
256 * be never woken up and block the task which
257 * could wait in 'kthread_stop()' forever.
258 */
259 if (kthread_should_stop())
260 break;
261 schedule();
262 continue;
263 } else
264 __set_current_state(TASK_RUNNING);
265
266 c->need_bgt = 0;
267 err = ubifs_bg_wbufs_sync(c);
268 if (err)
269 ubifs_ro_mode(c, err);
270
271 run_bg_commit(c);
272 cond_resched();
273 }
274
275 dbg_msg("background thread \"%s\" stops", c->bgt_name);
276 return 0;
277}
278
279/**
280 * ubifs_commit_required - set commit state to "required".
281 * @c: UBIFS file-system description object
282 *
283 * This function is called if a commit is required but cannot be done from the
284 * calling function, so it is just flagged instead.
285 */
286void ubifs_commit_required(struct ubifs_info *c)
287{
288 spin_lock(&c->cs_lock);
289 switch (c->cmt_state) {
290 case COMMIT_RESTING:
291 case COMMIT_BACKGROUND:
292 dbg_cmt("old: %s, new: %s", dbg_cstate(c->cmt_state),
293 dbg_cstate(COMMIT_REQUIRED));
294 c->cmt_state = COMMIT_REQUIRED;
295 break;
296 case COMMIT_RUNNING_BACKGROUND:
297 dbg_cmt("old: %s, new: %s", dbg_cstate(c->cmt_state),
298 dbg_cstate(COMMIT_RUNNING_REQUIRED));
299 c->cmt_state = COMMIT_RUNNING_REQUIRED;
300 break;
301 case COMMIT_REQUIRED:
302 case COMMIT_RUNNING_REQUIRED:
303 case COMMIT_BROKEN:
304 break;
305 }
306 spin_unlock(&c->cs_lock);
307}
308
309/**
310 * ubifs_request_bg_commit - notify the background thread to do a commit.
311 * @c: UBIFS file-system description object
312 *
313 * This function is called if the journal is full enough to make a commit
314 * worthwhile, so background thread is kicked to start it.
315 */
316void ubifs_request_bg_commit(struct ubifs_info *c)
317{
318 spin_lock(&c->cs_lock);
319 if (c->cmt_state == COMMIT_RESTING) {
320 dbg_cmt("old: %s, new: %s", dbg_cstate(c->cmt_state),
321 dbg_cstate(COMMIT_BACKGROUND));
322 c->cmt_state = COMMIT_BACKGROUND;
323 spin_unlock(&c->cs_lock);
324 ubifs_wake_up_bgt(c);
325 } else
326 spin_unlock(&c->cs_lock);
327}
328
329/**
330 * wait_for_commit - wait for commit.
331 * @c: UBIFS file-system description object
332 *
333 * This function sleeps until the commit operation is no longer running.
334 */
335static int wait_for_commit(struct ubifs_info *c)
336{
337 dbg_cmt("pid %d goes sleep", current->pid);
338
339 /*
340 * The following sleeps if the condition is false, and will be woken
341 * when the commit ends. It is possible, although very unlikely, that we
342 * will wake up and see the subsequent commit running, rather than the
343 * one we were waiting for, and go back to sleep. However, we will be
344 * woken again, so there is no danger of sleeping forever.
345 */
346 wait_event(c->cmt_wq, c->cmt_state != COMMIT_RUNNING_BACKGROUND &&
347 c->cmt_state != COMMIT_RUNNING_REQUIRED);
348 dbg_cmt("commit finished, pid %d woke up", current->pid);
349 return 0;
350}
351
352/**
353 * ubifs_run_commit - run or wait for commit.
354 * @c: UBIFS file-system description object
355 *
356 * This function runs commit and returns zero in case of success and a negative
357 * error code in case of failure.
358 */
359int ubifs_run_commit(struct ubifs_info *c)
360{
361 int err = 0;
362
363 spin_lock(&c->cs_lock);
364 if (c->cmt_state == COMMIT_BROKEN) {
365 err = -EINVAL;
366 goto out;
367 }
368
369 if (c->cmt_state == COMMIT_RUNNING_BACKGROUND)
370 /*
371 * We set the commit state to 'running required' to indicate
372 * that we want it to complete as quickly as possible.
373 */
374 c->cmt_state = COMMIT_RUNNING_REQUIRED;
375
376 if (c->cmt_state == COMMIT_RUNNING_REQUIRED) {
377 spin_unlock(&c->cs_lock);
378 return wait_for_commit(c);
379 }
380 spin_unlock(&c->cs_lock);
381
382 /* Ok, the commit is indeed needed */
383
384 down_write(&c->commit_sem);
385 spin_lock(&c->cs_lock);
386 /*
387 * Since we unlocked 'c->cs_lock', the state may have changed, so
388 * re-check it.
389 */
390 if (c->cmt_state == COMMIT_BROKEN) {
391 err = -EINVAL;
392 goto out_cmt_unlock;
393 }
394
395 if (c->cmt_state == COMMIT_RUNNING_BACKGROUND)
396 c->cmt_state = COMMIT_RUNNING_REQUIRED;
397
398 if (c->cmt_state == COMMIT_RUNNING_REQUIRED) {
399 up_write(&c->commit_sem);
400 spin_unlock(&c->cs_lock);
401 return wait_for_commit(c);
402 }
403 c->cmt_state = COMMIT_RUNNING_REQUIRED;
404 spin_unlock(&c->cs_lock);
405
406 err = do_commit(c);
407 return err;
408
409out_cmt_unlock:
410 up_write(&c->commit_sem);
411out:
412 spin_unlock(&c->cs_lock);
413 return err;
414}
415
416/**
417 * ubifs_gc_should_commit - determine if it is time for GC to run commit.
418 * @c: UBIFS file-system description object
419 *
420 * This function is called by garbage collection to determine if commit should
421 * be run. If commit state is @COMMIT_BACKGROUND, which means that the journal
422 * is full enough to start commit, this function returns true. It is not
423 * absolutely necessary to commit yet, but it feels like this should be better
424 * then to keep doing GC. This function returns %1 if GC has to initiate commit
425 * and %0 if not.
426 */
427int ubifs_gc_should_commit(struct ubifs_info *c)
428{
429 int ret = 0;
430
431 spin_lock(&c->cs_lock);
432 if (c->cmt_state == COMMIT_BACKGROUND) {
433 dbg_cmt("commit required now");
434 c->cmt_state = COMMIT_REQUIRED;
435 } else
436 dbg_cmt("commit not requested");
437 if (c->cmt_state == COMMIT_REQUIRED)
438 ret = 1;
439 spin_unlock(&c->cs_lock);
440 return ret;
441}
442
443#ifdef CONFIG_UBIFS_FS_DEBUG
444
445/**
446 * struct idx_node - hold index nodes during index tree traversal.
447 * @list: list
448 * @iip: index in parent (slot number of this indexing node in the parent
449 * indexing node)
450 * @upper_key: all keys in this indexing node have to be less or equivalent to
451 * this key
452 * @idx: index node (8-byte aligned because all node structures must be 8-byte
453 * aligned)
454 */
455struct idx_node {
456 struct list_head list;
457 int iip;
458 union ubifs_key upper_key;
459 struct ubifs_idx_node idx __attribute__((aligned(8)));
460};
461
462/**
463 * dbg_old_index_check_init - get information for the next old index check.
464 * @c: UBIFS file-system description object
465 * @zroot: root of the index
466 *
467 * This function records information about the index that will be needed for the
468 * next old index check i.e. 'dbg_check_old_index()'.
469 *
470 * This function returns %0 on success and a negative error code on failure.
471 */
472int dbg_old_index_check_init(struct ubifs_info *c, struct ubifs_zbranch *zroot)
473{
474 struct ubifs_idx_node *idx;
475 int lnum, offs, len, err = 0;
Artem Bityutskiy17c2f9f2008-10-17 13:31:39 +0300476 struct ubifs_debug_info *d = c->dbg;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300477
Artem Bityutskiy17c2f9f2008-10-17 13:31:39 +0300478 d->old_zroot = *zroot;
479 lnum = d->old_zroot.lnum;
480 offs = d->old_zroot.offs;
481 len = d->old_zroot.len;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300482
483 idx = kmalloc(c->max_idx_node_sz, GFP_NOFS);
484 if (!idx)
485 return -ENOMEM;
486
487 err = ubifs_read_node(c, idx, UBIFS_IDX_NODE, len, lnum, offs);
488 if (err)
489 goto out;
490
Artem Bityutskiy17c2f9f2008-10-17 13:31:39 +0300491 d->old_zroot_level = le16_to_cpu(idx->level);
492 d->old_zroot_sqnum = le64_to_cpu(idx->ch.sqnum);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300493out:
494 kfree(idx);
495 return err;
496}
497
498/**
499 * dbg_check_old_index - check the old copy of the index.
500 * @c: UBIFS file-system description object
501 * @zroot: root of the new index
502 *
503 * In order to be able to recover from an unclean unmount, a complete copy of
504 * the index must exist on flash. This is the "old" index. The commit process
505 * must write the "new" index to flash without overwriting or destroying any
506 * part of the old index. This function is run at commit end in order to check
507 * that the old index does indeed exist completely intact.
508 *
509 * This function returns %0 on success and a negative error code on failure.
510 */
511int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot)
512{
513 int lnum, offs, len, err = 0, uninitialized_var(last_level), child_cnt;
514 int first = 1, iip;
Artem Bityutskiy17c2f9f2008-10-17 13:31:39 +0300515 struct ubifs_debug_info *d = c->dbg;
Subrata Modak83ef2ec2009-07-15 07:49:03 +0530516 union ubifs_key uninitialized_var(lower_key), upper_key, l_key, u_key;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300517 unsigned long long uninitialized_var(last_sqnum);
518 struct ubifs_idx_node *idx;
519 struct list_head list;
520 struct idx_node *i;
521 size_t sz;
522
523 if (!(ubifs_chk_flags & UBIFS_CHK_OLD_IDX))
524 goto out;
525
526 INIT_LIST_HEAD(&list);
527
528 sz = sizeof(struct idx_node) + ubifs_idx_node_sz(c, c->fanout) -
529 UBIFS_IDX_NODE_SZ;
530
531 /* Start at the old zroot */
Artem Bityutskiy17c2f9f2008-10-17 13:31:39 +0300532 lnum = d->old_zroot.lnum;
533 offs = d->old_zroot.offs;
534 len = d->old_zroot.len;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300535 iip = 0;
536
537 /*
538 * Traverse the index tree preorder depth-first i.e. do a node and then
539 * its subtrees from left to right.
540 */
541 while (1) {
542 struct ubifs_branch *br;
543
544 /* Get the next index node */
545 i = kmalloc(sz, GFP_NOFS);
546 if (!i) {
547 err = -ENOMEM;
548 goto out_free;
549 }
550 i->iip = iip;
551 /* Keep the index nodes on our path in a linked list */
552 list_add_tail(&i->list, &list);
553 /* Read the index node */
554 idx = &i->idx;
555 err = ubifs_read_node(c, idx, UBIFS_IDX_NODE, len, lnum, offs);
556 if (err)
557 goto out_free;
558 /* Validate index node */
559 child_cnt = le16_to_cpu(idx->child_cnt);
560 if (child_cnt < 1 || child_cnt > c->fanout) {
561 err = 1;
562 goto out_dump;
563 }
564 if (first) {
565 first = 0;
566 /* Check root level and sqnum */
Artem Bityutskiy17c2f9f2008-10-17 13:31:39 +0300567 if (le16_to_cpu(idx->level) != d->old_zroot_level) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300568 err = 2;
569 goto out_dump;
570 }
Artem Bityutskiy17c2f9f2008-10-17 13:31:39 +0300571 if (le64_to_cpu(idx->ch.sqnum) != d->old_zroot_sqnum) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300572 err = 3;
573 goto out_dump;
574 }
575 /* Set last values as though root had a parent */
576 last_level = le16_to_cpu(idx->level) + 1;
577 last_sqnum = le64_to_cpu(idx->ch.sqnum) + 1;
578 key_read(c, ubifs_idx_key(c, idx), &lower_key);
579 highest_ino_key(c, &upper_key, INUM_WATERMARK);
580 }
581 key_copy(c, &upper_key, &i->upper_key);
582 if (le16_to_cpu(idx->level) != last_level - 1) {
583 err = 3;
584 goto out_dump;
585 }
586 /*
587 * The index is always written bottom up hence a child's sqnum
588 * is always less than the parents.
589 */
590 if (le64_to_cpu(idx->ch.sqnum) >= last_sqnum) {
591 err = 4;
592 goto out_dump;
593 }
594 /* Check key range */
595 key_read(c, ubifs_idx_key(c, idx), &l_key);
596 br = ubifs_idx_branch(c, idx, child_cnt - 1);
597 key_read(c, &br->key, &u_key);
598 if (keys_cmp(c, &lower_key, &l_key) > 0) {
599 err = 5;
600 goto out_dump;
601 }
602 if (keys_cmp(c, &upper_key, &u_key) < 0) {
603 err = 6;
604 goto out_dump;
605 }
606 if (keys_cmp(c, &upper_key, &u_key) == 0)
607 if (!is_hash_key(c, &u_key)) {
608 err = 7;
609 goto out_dump;
610 }
611 /* Go to next index node */
612 if (le16_to_cpu(idx->level) == 0) {
613 /* At the bottom, so go up until can go right */
614 while (1) {
615 /* Drop the bottom of the list */
616 list_del(&i->list);
617 kfree(i);
618 /* No more list means we are done */
619 if (list_empty(&list))
620 goto out;
621 /* Look at the new bottom */
622 i = list_entry(list.prev, struct idx_node,
623 list);
624 idx = &i->idx;
625 /* Can we go right */
626 if (iip + 1 < le16_to_cpu(idx->child_cnt)) {
627 iip = iip + 1;
628 break;
629 } else
630 /* Nope, so go up again */
631 iip = i->iip;
632 }
633 } else
634 /* Go down left */
635 iip = 0;
636 /*
637 * We have the parent in 'idx' and now we set up for reading the
638 * child pointed to by slot 'iip'.
639 */
640 last_level = le16_to_cpu(idx->level);
641 last_sqnum = le64_to_cpu(idx->ch.sqnum);
642 br = ubifs_idx_branch(c, idx, iip);
643 lnum = le32_to_cpu(br->lnum);
644 offs = le32_to_cpu(br->offs);
645 len = le32_to_cpu(br->len);
646 key_read(c, &br->key, &lower_key);
647 if (iip + 1 < le16_to_cpu(idx->child_cnt)) {
648 br = ubifs_idx_branch(c, idx, iip + 1);
649 key_read(c, &br->key, &upper_key);
650 } else
651 key_copy(c, &i->upper_key, &upper_key);
652 }
653out:
654 err = dbg_old_index_check_init(c, zroot);
655 if (err)
656 goto out_free;
657
658 return 0;
659
660out_dump:
661 dbg_err("dumping index node (iip=%d)", i->iip);
662 dbg_dump_node(c, idx);
663 list_del(&i->list);
664 kfree(i);
665 if (!list_empty(&list)) {
666 i = list_entry(list.prev, struct idx_node, list);
667 dbg_err("dumping parent index node");
668 dbg_dump_node(c, &i->idx);
669 }
670out_free:
671 while (!list_empty(&list)) {
672 i = list_entry(list.next, struct idx_node, list);
673 list_del(&i->list);
674 kfree(i);
675 }
676 ubifs_err("failed, error %d", err);
677 if (err > 0)
678 err = -EINVAL;
679 return err;
680}
681
682#endif /* CONFIG_UBIFS_FS_DEBUG */