blob: 783d8e0beb76b59da67c2509d0371b194f994045 [file] [log] [blame]
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001/*
2 * This file is part of UBIFS.
3 *
4 * Copyright (C) 2006-2008 Nokia Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 51
17 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 * Authors: Adrian Hunter
20 * Artem Bityutskiy (Битюцкий Артём)
21 */
22
23/*
24 * This file implements functions needed to recover from unclean un-mounts.
25 * When UBIFS is mounted, it checks a flag on the master node to determine if
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020026 * an un-mount was completed successfully. If not, the process of mounting
Artem Bityutskiy6fb43742010-05-23 15:20:21 +030027 * incorporates additional checking and fixing of on-flash data structures.
Artem Bityutskiy1e517642008-07-14 19:08:37 +030028 * UBIFS always cleans away all remnants of an unclean un-mount, so that
29 * errors do not accumulate. However UBIFS defers recovery if it is mounted
30 * read-only, and the flash is not modified in that case.
Artem Bityutskiybe7b42a2011-02-06 16:41:06 +020031 *
32 * The general UBIFS approach to the recovery is that it recovers from
33 * corruptions which could be caused by power cuts, but it refuses to recover
34 * from corruption caused by other reasons. And UBIFS tries to distinguish
35 * between these 2 reasons of corruptions and silently recover in the former
36 * case and loudly complain in the latter case.
37 *
38 * UBIFS writes only to erased LEBs, so it writes only to the flash space
39 * containing only 0xFFs. UBIFS also always writes strictly from the beginning
40 * of the LEB to the end. And UBIFS assumes that the underlying flash media
Artem Bityutskiy2765df72011-02-02 09:22:54 +020041 * writes in @c->max_write_size bytes at a time.
Artem Bityutskiybe7b42a2011-02-06 16:41:06 +020042 *
43 * Hence, if UBIFS finds a corrupted node at offset X, it expects only the min.
44 * I/O unit corresponding to offset X to contain corrupted data, all the
45 * following min. I/O units have to contain empty space (all 0xFFs). If this is
46 * not true, the corruption cannot be the result of a power cut, and UBIFS
47 * refuses to mount.
Artem Bityutskiy1e517642008-07-14 19:08:37 +030048 */
49
50#include <linux/crc32.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090051#include <linux/slab.h>
Artem Bityutskiy1e517642008-07-14 19:08:37 +030052#include "ubifs.h"
53
54/**
55 * is_empty - determine whether a buffer is empty (contains all 0xff).
56 * @buf: buffer to clean
57 * @len: length of buffer
58 *
59 * This function returns %1 if the buffer is empty (contains all 0xff) otherwise
60 * %0 is returned.
61 */
62static int is_empty(void *buf, int len)
63{
64 uint8_t *p = buf;
65 int i;
66
67 for (i = 0; i < len; i++)
68 if (*p++ != 0xff)
69 return 0;
70 return 1;
71}
72
73/**
Artem Bityutskiy06112542009-06-29 19:27:14 +030074 * first_non_ff - find offset of the first non-0xff byte.
75 * @buf: buffer to search in
76 * @len: length of buffer
77 *
78 * This function returns offset of the first non-0xff byte in @buf or %-1 if
79 * the buffer contains only 0xff bytes.
80 */
81static int first_non_ff(void *buf, int len)
82{
83 uint8_t *p = buf;
84 int i;
85
86 for (i = 0; i < len; i++)
87 if (*p++ != 0xff)
88 return i;
89 return -1;
90}
91
92/**
Artem Bityutskiy1e517642008-07-14 19:08:37 +030093 * get_master_node - get the last valid master node allowing for corruption.
94 * @c: UBIFS file-system description object
95 * @lnum: LEB number
96 * @pbuf: buffer containing the LEB read, is returned here
97 * @mst: master node, if found, is returned here
98 * @cor: corruption, if found, is returned here
99 *
100 * This function allocates a buffer, reads the LEB into it, and finds and
101 * returns the last valid master node allowing for one area of corruption.
102 * The corrupt area, if there is one, must be consistent with the assumption
103 * that it is the result of an unclean unmount while the master node was being
104 * written. Under those circumstances, it is valid to use the previously written
105 * master node.
106 *
107 * This function returns %0 on success and a negative error code on failure.
108 */
109static int get_master_node(const struct ubifs_info *c, int lnum, void **pbuf,
110 struct ubifs_mst_node **mst, void **cor)
111{
112 const int sz = c->mst_node_alsz;
113 int err, offs, len;
114 void *sbuf, *buf;
115
116 sbuf = vmalloc(c->leb_size);
117 if (!sbuf)
118 return -ENOMEM;
119
120 err = ubi_read(c->ubi, lnum, sbuf, 0, c->leb_size);
121 if (err && err != -EBADMSG)
122 goto out_free;
123
124 /* Find the first position that is definitely not a node */
125 offs = 0;
126 buf = sbuf;
127 len = c->leb_size;
128 while (offs + UBIFS_MST_NODE_SZ <= c->leb_size) {
129 struct ubifs_ch *ch = buf;
130
131 if (le32_to_cpu(ch->magic) != UBIFS_NODE_MAGIC)
132 break;
133 offs += sz;
134 buf += sz;
135 len -= sz;
136 }
137 /* See if there was a valid master node before that */
138 if (offs) {
139 int ret;
140
141 offs -= sz;
142 buf -= sz;
143 len += sz;
144 ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
145 if (ret != SCANNED_A_NODE && offs) {
146 /* Could have been corruption so check one place back */
147 offs -= sz;
148 buf -= sz;
149 len += sz;
150 ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
151 if (ret != SCANNED_A_NODE)
152 /*
153 * We accept only one area of corruption because
154 * we are assuming that it was caused while
155 * trying to write a master node.
156 */
157 goto out_err;
158 }
159 if (ret == SCANNED_A_NODE) {
160 struct ubifs_ch *ch = buf;
161
162 if (ch->node_type != UBIFS_MST_NODE)
163 goto out_err;
164 dbg_rcvry("found a master node at %d:%d", lnum, offs);
165 *mst = buf;
166 offs += sz;
167 buf += sz;
168 len -= sz;
169 }
170 }
171 /* Check for corruption */
172 if (offs < c->leb_size) {
173 if (!is_empty(buf, min_t(int, len, sz))) {
174 *cor = buf;
175 dbg_rcvry("found corruption at %d:%d", lnum, offs);
176 }
177 offs += sz;
178 buf += sz;
179 len -= sz;
180 }
181 /* Check remaining empty space */
182 if (offs < c->leb_size)
183 if (!is_empty(buf, len))
184 goto out_err;
185 *pbuf = sbuf;
186 return 0;
187
188out_err:
189 err = -EINVAL;
190out_free:
191 vfree(sbuf);
192 *mst = NULL;
193 *cor = NULL;
194 return err;
195}
196
197/**
198 * write_rcvrd_mst_node - write recovered master node.
199 * @c: UBIFS file-system description object
200 * @mst: master node
201 *
202 * This function returns %0 on success and a negative error code on failure.
203 */
204static int write_rcvrd_mst_node(struct ubifs_info *c,
205 struct ubifs_mst_node *mst)
206{
207 int err = 0, lnum = UBIFS_MST_LNUM, sz = c->mst_node_alsz;
Harvey Harrison0ecb9522008-10-24 10:52:57 -0700208 __le32 save_flags;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300209
210 dbg_rcvry("recovery");
211
212 save_flags = mst->flags;
Harvey Harrison0ecb9522008-10-24 10:52:57 -0700213 mst->flags |= cpu_to_le32(UBIFS_MST_RCVRY);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300214
215 ubifs_prepare_node(c, mst, UBIFS_MST_NODE_SZ, 1);
216 err = ubi_leb_change(c->ubi, lnum, mst, sz, UBI_SHORTTERM);
217 if (err)
218 goto out;
219 err = ubi_leb_change(c->ubi, lnum + 1, mst, sz, UBI_SHORTTERM);
220 if (err)
221 goto out;
222out:
223 mst->flags = save_flags;
224 return err;
225}
226
227/**
228 * ubifs_recover_master_node - recover the master node.
229 * @c: UBIFS file-system description object
230 *
231 * This function recovers the master node from corruption that may occur due to
232 * an unclean unmount.
233 *
234 * This function returns %0 on success and a negative error code on failure.
235 */
236int ubifs_recover_master_node(struct ubifs_info *c)
237{
238 void *buf1 = NULL, *buf2 = NULL, *cor1 = NULL, *cor2 = NULL;
239 struct ubifs_mst_node *mst1 = NULL, *mst2 = NULL, *mst;
240 const int sz = c->mst_node_alsz;
241 int err, offs1, offs2;
242
243 dbg_rcvry("recovery");
244
245 err = get_master_node(c, UBIFS_MST_LNUM, &buf1, &mst1, &cor1);
246 if (err)
247 goto out_free;
248
249 err = get_master_node(c, UBIFS_MST_LNUM + 1, &buf2, &mst2, &cor2);
250 if (err)
251 goto out_free;
252
253 if (mst1) {
254 offs1 = (void *)mst1 - buf1;
255 if ((le32_to_cpu(mst1->flags) & UBIFS_MST_RCVRY) &&
256 (offs1 == 0 && !cor1)) {
257 /*
258 * mst1 was written by recovery at offset 0 with no
259 * corruption.
260 */
261 dbg_rcvry("recovery recovery");
262 mst = mst1;
263 } else if (mst2) {
264 offs2 = (void *)mst2 - buf2;
265 if (offs1 == offs2) {
266 /* Same offset, so must be the same */
267 if (memcmp((void *)mst1 + UBIFS_CH_SZ,
268 (void *)mst2 + UBIFS_CH_SZ,
269 UBIFS_MST_NODE_SZ - UBIFS_CH_SZ))
270 goto out_err;
271 mst = mst1;
272 } else if (offs2 + sz == offs1) {
273 /* 1st LEB was written, 2nd was not */
274 if (cor1)
275 goto out_err;
276 mst = mst1;
277 } else if (offs1 == 0 && offs2 + sz >= c->leb_size) {
278 /* 1st LEB was unmapped and written, 2nd not */
279 if (cor1)
280 goto out_err;
281 mst = mst1;
282 } else
283 goto out_err;
284 } else {
285 /*
286 * 2nd LEB was unmapped and about to be written, so
287 * there must be only one master node in the first LEB
288 * and no corruption.
289 */
290 if (offs1 != 0 || cor1)
291 goto out_err;
292 mst = mst1;
293 }
294 } else {
295 if (!mst2)
296 goto out_err;
297 /*
298 * 1st LEB was unmapped and about to be written, so there must
299 * be no room left in 2nd LEB.
300 */
301 offs2 = (void *)mst2 - buf2;
302 if (offs2 + sz + sz <= c->leb_size)
303 goto out_err;
304 mst = mst2;
305 }
306
Artem Bityutskiy348709b2009-08-25 15:00:55 +0300307 ubifs_msg("recovered master node from LEB %d",
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300308 (mst == mst1 ? UBIFS_MST_LNUM : UBIFS_MST_LNUM + 1));
309
310 memcpy(c->mst_node, mst, UBIFS_MST_NODE_SZ);
311
Artem Bityutskiy2ef13292010-09-19 18:34:26 +0300312 if (c->ro_mount) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300313 /* Read-only mode. Keep a copy for switching to rw mode */
314 c->rcvrd_mst_node = kmalloc(sz, GFP_KERNEL);
315 if (!c->rcvrd_mst_node) {
316 err = -ENOMEM;
317 goto out_free;
318 }
319 memcpy(c->rcvrd_mst_node, c->mst_node, UBIFS_MST_NODE_SZ);
Artem Bityutskiy6e0d9fd2011-04-21 14:49:55 +0300320
321 /*
322 * We had to recover the master node, which means there was an
323 * unclean reboot. However, it is possible that the master node
324 * is clean at this point, i.e., %UBIFS_MST_DIRTY is not set.
325 * E.g., consider the following chain of events:
326 *
327 * 1. UBIFS was cleanly unmounted, so the master node is clean
328 * 2. UBIFS is being mounted R/W and starts changing the master
329 * node in the first (%UBIFS_MST_LNUM). A power cut happens,
330 * so this LEB ends up with some amount of garbage at the
331 * end.
332 * 3. UBIFS is being mounted R/O. We reach this place and
333 * recover the master node from the second LEB
334 * (%UBIFS_MST_LNUM + 1). But we cannot update the media
335 * because we are being mounted R/O. We have to defer the
336 * operation.
337 * 4. However, this master node (@c->mst_node) is marked as
338 * clean (since the step 1). And if we just return, the
339 * mount code will be confused and won't recover the master
340 * node when it is re-mounter R/W later.
341 *
342 * Thus, to force the recovery by marking the master node as
343 * dirty.
344 */
345 c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300346 } else {
347 /* Write the recovered master node */
348 c->max_sqnum = le64_to_cpu(mst->ch.sqnum) - 1;
349 err = write_rcvrd_mst_node(c, c->mst_node);
350 if (err)
351 goto out_free;
352 }
353
354 vfree(buf2);
355 vfree(buf1);
356
357 return 0;
358
359out_err:
360 err = -EINVAL;
361out_free:
362 ubifs_err("failed to recover master node");
363 if (mst1) {
364 dbg_err("dumping first master node");
365 dbg_dump_node(c, mst1);
366 }
367 if (mst2) {
368 dbg_err("dumping second master node");
369 dbg_dump_node(c, mst2);
370 }
371 vfree(buf2);
372 vfree(buf1);
373 return err;
374}
375
376/**
377 * ubifs_write_rcvrd_mst_node - write the recovered master node.
378 * @c: UBIFS file-system description object
379 *
380 * This function writes the master node that was recovered during mounting in
381 * read-only mode and must now be written because we are remounting rw.
382 *
383 * This function returns %0 on success and a negative error code on failure.
384 */
385int ubifs_write_rcvrd_mst_node(struct ubifs_info *c)
386{
387 int err;
388
389 if (!c->rcvrd_mst_node)
390 return 0;
391 c->rcvrd_mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
392 c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
393 err = write_rcvrd_mst_node(c, c->rcvrd_mst_node);
394 if (err)
395 return err;
396 kfree(c->rcvrd_mst_node);
397 c->rcvrd_mst_node = NULL;
398 return 0;
399}
400
401/**
402 * is_last_write - determine if an offset was in the last write to a LEB.
403 * @c: UBIFS file-system description object
404 * @buf: buffer to check
405 * @offs: offset to check
406 *
407 * This function returns %1 if @offs was in the last write to the LEB whose data
Artem Bityutskiy2765df72011-02-02 09:22:54 +0200408 * is in @buf, otherwise %0 is returned. The determination is made by checking
409 * for subsequent empty space starting from the next @c->max_write_size
410 * boundary.
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300411 */
412static int is_last_write(const struct ubifs_info *c, void *buf, int offs)
413{
Artem Bityutskiy428ff9d2009-05-25 16:59:28 +0300414 int empty_offs, check_len;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300415 uint8_t *p;
416
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300417 /*
Artem Bityutskiy2765df72011-02-02 09:22:54 +0200418 * Round up to the next @c->max_write_size boundary i.e. @offs is in
419 * the last wbuf written. After that should be empty space.
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300420 */
Artem Bityutskiy2765df72011-02-02 09:22:54 +0200421 empty_offs = ALIGN(offs + 1, c->max_write_size);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300422 check_len = c->leb_size - empty_offs;
423 p = buf + empty_offs - offs;
Artem Bityutskiy431102f2009-06-29 18:58:34 +0300424 return is_empty(p, check_len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300425}
426
427/**
428 * clean_buf - clean the data from an LEB sitting in a buffer.
429 * @c: UBIFS file-system description object
430 * @buf: buffer to clean
431 * @lnum: LEB number to clean
432 * @offs: offset from which to clean
433 * @len: length of buffer
434 *
435 * This function pads up to the next min_io_size boundary (if there is one) and
436 * sets empty space to all 0xff. @buf, @offs and @len are updated to the next
Artem Bityutskiy428ff9d2009-05-25 16:59:28 +0300437 * @c->min_io_size boundary.
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300438 */
439static void clean_buf(const struct ubifs_info *c, void **buf, int lnum,
440 int *offs, int *len)
441{
442 int empty_offs, pad_len;
443
444 lnum = lnum;
445 dbg_rcvry("cleaning corruption at %d:%d", lnum, *offs);
446
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300447 ubifs_assert(!(*offs & 7));
448 empty_offs = ALIGN(*offs, c->min_io_size);
449 pad_len = empty_offs - *offs;
450 ubifs_pad(c, *buf, pad_len);
451 *offs += pad_len;
452 *buf += pad_len;
453 *len -= pad_len;
454 memset(*buf, 0xff, c->leb_size - empty_offs);
455}
456
457/**
458 * no_more_nodes - determine if there are no more nodes in a buffer.
459 * @c: UBIFS file-system description object
460 * @buf: buffer to check
461 * @len: length of buffer
462 * @lnum: LEB number of the LEB from which @buf was read
463 * @offs: offset from which @buf was read
464 *
Adrian Hunterde097572009-03-20 11:09:04 +0100465 * This function ensures that the corrupted node at @offs is the last thing
466 * written to a LEB. This function returns %1 if more data is not found and
467 * %0 if more data is found.
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300468 */
469static int no_more_nodes(const struct ubifs_info *c, void *buf, int len,
470 int lnum, int offs)
471{
Adrian Hunterde097572009-03-20 11:09:04 +0100472 struct ubifs_ch *ch = buf;
473 int skip, dlen = le32_to_cpu(ch->len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300474
Adrian Hunterde097572009-03-20 11:09:04 +0100475 /* Check for empty space after the corrupt node's common header */
Artem Bityutskiy2765df72011-02-02 09:22:54 +0200476 skip = ALIGN(offs + UBIFS_CH_SZ, c->max_write_size) - offs;
Adrian Hunterde097572009-03-20 11:09:04 +0100477 if (is_empty(buf + skip, len - skip))
478 return 1;
479 /*
480 * The area after the common header size is not empty, so the common
481 * header must be intact. Check it.
482 */
483 if (ubifs_check_node(c, buf, lnum, offs, 1, 0) != -EUCLEAN) {
484 dbg_rcvry("unexpected bad common header at %d:%d", lnum, offs);
485 return 0;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300486 }
Adrian Hunterde097572009-03-20 11:09:04 +0100487 /* Now we know the corrupt node's length we can skip over it */
Artem Bityutskiy2765df72011-02-02 09:22:54 +0200488 skip = ALIGN(offs + dlen, c->max_write_size) - offs;
Adrian Hunterde097572009-03-20 11:09:04 +0100489 /* After which there should be empty space */
490 if (is_empty(buf + skip, len - skip))
491 return 1;
492 dbg_rcvry("unexpected data at %d:%d", lnum, offs + skip);
493 return 0;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300494}
495
496/**
497 * fix_unclean_leb - fix an unclean LEB.
498 * @c: UBIFS file-system description object
499 * @sleb: scanned LEB information
500 * @start: offset where scan started
501 */
502static int fix_unclean_leb(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
503 int start)
504{
505 int lnum = sleb->lnum, endpt = start;
506
507 /* Get the end offset of the last node we are keeping */
508 if (!list_empty(&sleb->nodes)) {
509 struct ubifs_scan_node *snod;
510
511 snod = list_entry(sleb->nodes.prev,
512 struct ubifs_scan_node, list);
513 endpt = snod->offs + snod->len;
514 }
515
Artem Bityutskiy2ef13292010-09-19 18:34:26 +0300516 if (c->ro_mount && !c->remounting_rw) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300517 /* Add to recovery list */
518 struct ubifs_unclean_leb *ucleb;
519
520 dbg_rcvry("need to fix LEB %d start %d endpt %d",
521 lnum, start, sleb->endpt);
522 ucleb = kzalloc(sizeof(struct ubifs_unclean_leb), GFP_NOFS);
523 if (!ucleb)
524 return -ENOMEM;
525 ucleb->lnum = lnum;
526 ucleb->endpt = endpt;
527 list_add_tail(&ucleb->list, &c->unclean_leb_list);
528 } else {
529 /* Write the fixed LEB back to flash */
530 int err;
531
532 dbg_rcvry("fixing LEB %d start %d endpt %d",
533 lnum, start, sleb->endpt);
534 if (endpt == 0) {
535 err = ubifs_leb_unmap(c, lnum);
536 if (err)
537 return err;
538 } else {
539 int len = ALIGN(endpt, c->min_io_size);
540
541 if (start) {
542 err = ubi_read(c->ubi, lnum, sleb->buf, 0,
543 start);
544 if (err)
545 return err;
546 }
547 /* Pad to min_io_size */
548 if (len > endpt) {
549 int pad_len = len - ALIGN(endpt, 8);
550
551 if (pad_len > 0) {
552 void *buf = sleb->buf + len - pad_len;
553
554 ubifs_pad(c, buf, pad_len);
555 }
556 }
557 err = ubi_leb_change(c->ubi, lnum, sleb->buf, len,
558 UBI_UNKNOWN);
559 if (err)
560 return err;
561 }
562 }
563 return 0;
564}
565
566/**
Artem Bityutskiyda8b94e2011-05-26 08:58:19 +0300567 * drop_last_group - drop the last group of nodes.
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300568 * @sleb: scanned LEB information
569 * @offs: offset of dropped nodes is returned here
570 *
Artem Bityutskiybbf2b372011-05-16 15:15:52 +0300571 * This is a helper function for 'ubifs_recover_leb()' which drops the last
Artem Bityutskiyda8b94e2011-05-26 08:58:19 +0300572 * group of nodes of the scanned LEB.
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300573 */
Artem Bityutskiyda8b94e2011-05-26 08:58:19 +0300574static void drop_last_group(struct ubifs_scan_leb *sleb, int *offs)
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300575{
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300576 while (!list_empty(&sleb->nodes)) {
577 struct ubifs_scan_node *snod;
578 struct ubifs_ch *ch;
579
580 snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node,
581 list);
582 ch = snod->node;
583 if (ch->group_type != UBIFS_IN_NODE_GROUP)
Artem Bityutskiyda8b94e2011-05-26 08:58:19 +0300584 break;
585
586 dbg_rcvry("dropping grouped node at %d:%d",
587 sleb->lnum, snod->offs);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300588 *offs = snod->offs;
589 list_del(&snod->list);
590 kfree(snod);
591 sleb->nodes_cnt -= 1;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300592 }
Artem Bityutskiyda8b94e2011-05-26 08:58:19 +0300593}
594
595/**
596 * drop_last_node - drop the last node.
597 * @sleb: scanned LEB information
598 * @offs: offset of dropped nodes is returned here
599 * @grouped: non-zero if whole group of nodes have to be dropped
600 *
601 * This is a helper function for 'ubifs_recover_leb()' which drops the last
602 * node of the scanned LEB.
603 */
604static void drop_last_node(struct ubifs_scan_leb *sleb, int *offs)
605{
606 struct ubifs_scan_node *snod;
607
608 if (!list_empty(&sleb->nodes)) {
609 snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node,
610 list);
611
612 dbg_rcvry("dropping last node at %d:%d", sleb->lnum, snod->offs);
613 *offs = snod->offs;
614 list_del(&snod->list);
615 kfree(snod);
616 sleb->nodes_cnt -= 1;
617 }
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300618}
619
620/**
621 * ubifs_recover_leb - scan and recover a LEB.
622 * @c: UBIFS file-system description object
623 * @lnum: LEB number
624 * @offs: offset
625 * @sbuf: LEB-sized buffer to use
Artem Bityutskiyefcfde52011-05-26 08:36:52 +0300626 * @jhead: journal head number this LEB belongs to (%-1 if the LEB does not
627 * belong to any journal head)
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300628 *
629 * This function does a scan of a LEB, but caters for errors that might have
630 * been caused by the unclean unmount from which we are attempting to recover.
Artem Bityutskiyed43f2f2009-06-29 17:59:23 +0300631 * Returns %0 in case of success, %-EUCLEAN if an unrecoverable corruption is
632 * found, and a negative error code in case of failure.
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300633 */
634struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
Artem Bityutskiyefcfde52011-05-26 08:36:52 +0300635 int offs, void *sbuf, int jhead)
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300636{
Artem Bityutskiybbf2b372011-05-16 15:15:52 +0300637 int ret = 0, err, len = c->leb_size - offs, start = offs, min_io_unit;
Artem Bityutskiyefcfde52011-05-26 08:36:52 +0300638 int grouped = jhead == -1 ? 0 : c->jheads[jhead].grouped;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300639 struct ubifs_scan_leb *sleb;
640 void *buf = sbuf + offs;
641
Artem Bityutskiyefcfde52011-05-26 08:36:52 +0300642 dbg_rcvry("%d:%d, jhead %d, grouped %d", lnum, offs, jhead, grouped);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300643
644 sleb = ubifs_start_scan(c, lnum, offs, sbuf);
645 if (IS_ERR(sleb))
646 return sleb;
647
Artem Bityutskiybbf2b372011-05-16 15:15:52 +0300648 ubifs_assert(len >= 8);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300649 while (len >= 8) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300650 dbg_scan("look at LEB %d:%d (%d bytes left)",
651 lnum, offs, len);
652
653 cond_resched();
654
655 /*
656 * Scan quietly until there is an error from which we cannot
657 * recover
658 */
Artem Bityutskiyab759502011-05-26 06:51:48 +0300659 ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300660 if (ret == SCANNED_A_NODE) {
661 /* A valid node, and not a padding node */
662 struct ubifs_ch *ch = buf;
663 int node_len;
664
665 err = ubifs_add_snod(c, sleb, buf, offs);
666 if (err)
667 goto error;
668 node_len = ALIGN(le32_to_cpu(ch->len), 8);
669 offs += node_len;
670 buf += node_len;
671 len -= node_len;
Artem Bityutskiy61799202011-05-16 13:41:55 +0300672 } else if (ret > 0) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300673 /* Padding bytes or a valid padding node */
674 offs += ret;
675 buf += ret;
676 len -= ret;
Artem Bityutskiy61799202011-05-16 13:41:55 +0300677 } else if (ret == SCANNED_EMPTY_SPACE ||
678 ret == SCANNED_GARBAGE ||
679 ret == SCANNED_A_BAD_PAD_NODE ||
680 ret == SCANNED_A_CORRUPT_NODE) {
681 dbg_rcvry("found corruption - %d", ret);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300682 break;
Artem Bityutskiy61799202011-05-16 13:41:55 +0300683 } else {
684 dbg_err("unexpected return value %d", ret);
Artem Bityutskiyed43f2f2009-06-29 17:59:23 +0300685 err = -EINVAL;
686 goto error;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300687 }
688 }
689
Artem Bityutskiy61799202011-05-16 13:41:55 +0300690 if (ret == SCANNED_GARBAGE || ret == SCANNED_A_BAD_PAD_NODE) {
Artem Bityutskiy43e07072011-05-16 14:21:51 +0300691 if (!is_last_write(c, buf, offs))
Artem Bityutskiy61799202011-05-16 13:41:55 +0300692 goto corrupted_rescan;
693 } else if (ret == SCANNED_A_CORRUPT_NODE) {
Artem Bityutskiy43e07072011-05-16 14:21:51 +0300694 if (!no_more_nodes(c, buf, len, lnum, offs))
Artem Bityutskiy61799202011-05-16 13:41:55 +0300695 goto corrupted_rescan;
696 } else if (!is_empty(buf, len)) {
Artem Bityutskiy43e07072011-05-16 14:21:51 +0300697 if (!is_last_write(c, buf, offs)) {
Artem Bityutskiy06112542009-06-29 19:27:14 +0300698 int corruption = first_non_ff(buf, len);
699
Artem Bityutskiybe7b42a2011-02-06 16:41:06 +0200700 /*
701 * See header comment for this file for more
702 * explanations about the reasons we have this check.
703 */
Artem Bityutskiy06112542009-06-29 19:27:14 +0300704 ubifs_err("corrupt empty space LEB %d:%d, corruption "
705 "starts at %d", lnum, offs, corruption);
706 /* Make sure we dump interesting non-0xFF data */
Artem Bityutskiy10ac2792011-02-08 17:21:11 +0200707 offs += corruption;
Artem Bityutskiy06112542009-06-29 19:27:14 +0300708 buf += corruption;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300709 goto corrupted;
710 }
711 }
712
Artem Bityutskiybbf2b372011-05-16 15:15:52 +0300713 min_io_unit = round_down(offs, c->min_io_size);
714 if (grouped)
715 /*
716 * If nodes are grouped, always drop the incomplete group at
717 * the end.
718 */
Artem Bityutskiyda8b94e2011-05-26 08:58:19 +0300719 drop_last_group(sleb, &offs);
Artem Bityutskiybbf2b372011-05-16 15:15:52 +0300720
Artem Bityutskiyda8b94e2011-05-26 08:58:19 +0300721 if (jhead == GCHD) {
722 /*
723 * If this LEB belongs to the GC head then while we are in the
724 * middle of the same min. I/O unit keep dropping nodes. So
725 * basically, what we want is to make sure that the last min.
726 * I/O unit where we saw the corruption is dropped completely
727 * with all the uncorrupted nodes which may possibly sit there.
728 *
729 * In other words, let's name the min. I/O unit where the
730 * corruption starts B, and the previous min. I/O unit A. The
731 * below code tries to deal with a situation when half of B
732 * contains valid nodes or the end of a valid node, and the
733 * second half of B contains corrupted data or garbage. This
734 * means that UBIFS had been writing to B just before the power
735 * cut happened. I do not know how realistic is this scenario
736 * that half of the min. I/O unit had been written successfully
737 * and the other half not, but this is possible in our 'failure
738 * mode emulation' infrastructure at least.
739 *
740 * So what is the problem, why we need to drop those nodes? Why
741 * can't we just clean-up the second half of B by putting a
742 * padding node there? We can, and this works fine with one
743 * exception which was reproduced with power cut emulation
744 * testing and happens extremely rarely.
745 *
746 * Imagine the file-system is full, we run GC which starts
747 * moving valid nodes from LEB X to LEB Y (obviously, LEB Y is
748 * the current GC head LEB). The @c->gc_lnum is -1, which means
749 * that GC will retain LEB X and will try to continue. Imagine
750 * that LEB X is currently the dirtiest LEB, and the amount of
751 * used space in LEB Y is exactly the same as amount of free
752 * space in LEB X.
753 *
754 * And a power cut happens when nodes are moved from LEB X to
755 * LEB Y. We are here trying to recover LEB Y which is the GC
756 * head LEB. We find the min. I/O unit B as described above.
757 * Then we clean-up LEB Y by padding min. I/O unit. And later
758 * 'ubifs_rcvry_gc_commit()' function fails, because it cannot
759 * find a dirty LEB which could be GC'd into LEB Y! Even LEB X
760 * does not match because the amount of valid nodes there does
761 * not fit the free space in LEB Y any more! And this is
762 * because of the padding node which we added to LEB Y. The
763 * user-visible effect of this which I once observed and
764 * analysed is that we cannot mount the file-system with
765 * -ENOSPC error.
766 *
767 * So obviously, to make sure that situation does not happen we
768 * should free min. I/O unit B in LEB Y completely and the last
769 * used min. I/O unit in LEB Y should be A. This is basically
770 * what the below code tries to do.
771 */
772 while (offs > min_io_unit)
773 drop_last_node(sleb, &offs);
774 }
Artem Bityutskiybbf2b372011-05-16 15:15:52 +0300775
776 buf = sbuf + offs;
777 len = c->leb_size - offs;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300778
Artem Bityutskiy43e07072011-05-16 14:21:51 +0300779 clean_buf(c, &buf, lnum, &offs, &len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300780 ubifs_end_scan(c, sleb, lnum, offs);
781
Artem Bityutskiy7c47bfd2011-05-16 13:44:48 +0300782 err = fix_unclean_leb(c, sleb, start);
783 if (err)
784 goto error;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300785
786 return sleb;
787
Artem Bityutskiy61799202011-05-16 13:41:55 +0300788corrupted_rescan:
789 /* Re-scan the corrupted data with verbose messages */
790 dbg_err("corruptio %d", ret);
791 ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300792corrupted:
793 ubifs_scanned_corruption(c, lnum, offs, buf);
794 err = -EUCLEAN;
795error:
796 ubifs_err("LEB %d scanning failed", lnum);
797 ubifs_scan_destroy(sleb);
798 return ERR_PTR(err);
799}
800
801/**
802 * get_cs_sqnum - get commit start sequence number.
803 * @c: UBIFS file-system description object
804 * @lnum: LEB number of commit start node
805 * @offs: offset of commit start node
806 * @cs_sqnum: commit start sequence number is returned here
807 *
808 * This function returns %0 on success and a negative error code on failure.
809 */
810static int get_cs_sqnum(struct ubifs_info *c, int lnum, int offs,
811 unsigned long long *cs_sqnum)
812{
813 struct ubifs_cs_node *cs_node = NULL;
814 int err, ret;
815
816 dbg_rcvry("at %d:%d", lnum, offs);
817 cs_node = kmalloc(UBIFS_CS_NODE_SZ, GFP_KERNEL);
818 if (!cs_node)
819 return -ENOMEM;
820 if (c->leb_size - offs < UBIFS_CS_NODE_SZ)
821 goto out_err;
822 err = ubi_read(c->ubi, lnum, (void *)cs_node, offs, UBIFS_CS_NODE_SZ);
823 if (err && err != -EBADMSG)
824 goto out_free;
825 ret = ubifs_scan_a_node(c, cs_node, UBIFS_CS_NODE_SZ, lnum, offs, 0);
826 if (ret != SCANNED_A_NODE) {
827 dbg_err("Not a valid node");
828 goto out_err;
829 }
830 if (cs_node->ch.node_type != UBIFS_CS_NODE) {
831 dbg_err("Node a CS node, type is %d", cs_node->ch.node_type);
832 goto out_err;
833 }
834 if (le64_to_cpu(cs_node->cmt_no) != c->cmt_no) {
835 dbg_err("CS node cmt_no %llu != current cmt_no %llu",
836 (unsigned long long)le64_to_cpu(cs_node->cmt_no),
837 c->cmt_no);
838 goto out_err;
839 }
840 *cs_sqnum = le64_to_cpu(cs_node->ch.sqnum);
841 dbg_rcvry("commit start sqnum %llu", *cs_sqnum);
842 kfree(cs_node);
843 return 0;
844
845out_err:
846 err = -EINVAL;
847out_free:
848 ubifs_err("failed to get CS sqnum");
849 kfree(cs_node);
850 return err;
851}
852
853/**
854 * ubifs_recover_log_leb - scan and recover a log LEB.
855 * @c: UBIFS file-system description object
856 * @lnum: LEB number
857 * @offs: offset
858 * @sbuf: LEB-sized buffer to use
859 *
860 * This function does a scan of a LEB, but caters for errors that might have
Artem Bityutskiy7d08ae32010-10-17 15:50:19 +0300861 * been caused by unclean reboots from which we are attempting to recover
862 * (assume that only the last log LEB can be corrupted by an unclean reboot).
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300863 *
864 * This function returns %0 on success and a negative error code on failure.
865 */
866struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
867 int offs, void *sbuf)
868{
869 struct ubifs_scan_leb *sleb;
870 int next_lnum;
871
872 dbg_rcvry("LEB %d", lnum);
873 next_lnum = lnum + 1;
874 if (next_lnum >= UBIFS_LOG_LNUM + c->log_lebs)
875 next_lnum = UBIFS_LOG_LNUM;
876 if (next_lnum != c->ltail_lnum) {
877 /*
878 * We can only recover at the end of the log, so check that the
879 * next log LEB is empty or out of date.
880 */
Artem Bityutskiy348709b2009-08-25 15:00:55 +0300881 sleb = ubifs_scan(c, next_lnum, 0, sbuf, 0);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300882 if (IS_ERR(sleb))
883 return sleb;
884 if (sleb->nodes_cnt) {
885 struct ubifs_scan_node *snod;
886 unsigned long long cs_sqnum = c->cs_sqnum;
887
888 snod = list_entry(sleb->nodes.next,
889 struct ubifs_scan_node, list);
890 if (cs_sqnum == 0) {
891 int err;
892
893 err = get_cs_sqnum(c, lnum, offs, &cs_sqnum);
894 if (err) {
895 ubifs_scan_destroy(sleb);
896 return ERR_PTR(err);
897 }
898 }
899 if (snod->sqnum > cs_sqnum) {
900 ubifs_err("unrecoverable log corruption "
901 "in LEB %d", lnum);
902 ubifs_scan_destroy(sleb);
903 return ERR_PTR(-EUCLEAN);
904 }
905 }
906 ubifs_scan_destroy(sleb);
907 }
Artem Bityutskiyefcfde52011-05-26 08:36:52 +0300908 return ubifs_recover_leb(c, lnum, offs, sbuf, -1);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300909}
910
911/**
912 * recover_head - recover a head.
913 * @c: UBIFS file-system description object
914 * @lnum: LEB number of head to recover
915 * @offs: offset of head to recover
916 * @sbuf: LEB-sized buffer to use
917 *
918 * This function ensures that there is no data on the flash at a head location.
919 *
920 * This function returns %0 on success and a negative error code on failure.
921 */
922static int recover_head(const struct ubifs_info *c, int lnum, int offs,
923 void *sbuf)
924{
Artem Bityutskiy2765df72011-02-02 09:22:54 +0200925 int len = c->max_write_size, err;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300926
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300927 if (offs + len > c->leb_size)
928 len = c->leb_size - offs;
929
930 if (!len)
931 return 0;
932
933 /* Read at the head location and check it is empty flash */
934 err = ubi_read(c->ubi, lnum, sbuf, offs, len);
Artem Bityutskiy431102f2009-06-29 18:58:34 +0300935 if (err || !is_empty(sbuf, len)) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300936 dbg_rcvry("cleaning head at %d:%d", lnum, offs);
937 if (offs == 0)
938 return ubifs_leb_unmap(c, lnum);
939 err = ubi_read(c->ubi, lnum, sbuf, 0, offs);
940 if (err)
941 return err;
942 return ubi_leb_change(c->ubi, lnum, sbuf, offs, UBI_UNKNOWN);
943 }
944
945 return 0;
946}
947
948/**
949 * ubifs_recover_inl_heads - recover index and LPT heads.
950 * @c: UBIFS file-system description object
951 * @sbuf: LEB-sized buffer to use
952 *
953 * This function ensures that there is no data on the flash at the index and
954 * LPT head locations.
955 *
956 * This deals with the recovery of a half-completed journal commit. UBIFS is
957 * careful never to overwrite the last version of the index or the LPT. Because
958 * the index and LPT are wandering trees, data from a half-completed commit will
959 * not be referenced anywhere in UBIFS. The data will be either in LEBs that are
960 * assumed to be empty and will be unmapped anyway before use, or in the index
961 * and LPT heads.
962 *
963 * This function returns %0 on success and a negative error code on failure.
964 */
965int ubifs_recover_inl_heads(const struct ubifs_info *c, void *sbuf)
966{
967 int err;
968
Artem Bityutskiy2ef13292010-09-19 18:34:26 +0300969 ubifs_assert(!c->ro_mount || c->remounting_rw);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300970
971 dbg_rcvry("checking index head at %d:%d", c->ihead_lnum, c->ihead_offs);
972 err = recover_head(c, c->ihead_lnum, c->ihead_offs, sbuf);
973 if (err)
974 return err;
975
976 dbg_rcvry("checking LPT head at %d:%d", c->nhead_lnum, c->nhead_offs);
977 err = recover_head(c, c->nhead_lnum, c->nhead_offs, sbuf);
978 if (err)
979 return err;
980
981 return 0;
982}
983
984/**
985 * clean_an_unclean_leb - read and write a LEB to remove corruption.
986 * @c: UBIFS file-system description object
987 * @ucleb: unclean LEB information
988 * @sbuf: LEB-sized buffer to use
989 *
990 * This function reads a LEB up to a point pre-determined by the mount recovery,
991 * checks the nodes, and writes the result back to the flash, thereby cleaning
992 * off any following corruption, or non-fatal ECC errors.
993 *
994 * This function returns %0 on success and a negative error code on failure.
995 */
996static int clean_an_unclean_leb(const struct ubifs_info *c,
997 struct ubifs_unclean_leb *ucleb, void *sbuf)
998{
999 int err, lnum = ucleb->lnum, offs = 0, len = ucleb->endpt, quiet = 1;
1000 void *buf = sbuf;
1001
1002 dbg_rcvry("LEB %d len %d", lnum, len);
1003
1004 if (len == 0) {
1005 /* Nothing to read, just unmap it */
1006 err = ubifs_leb_unmap(c, lnum);
1007 if (err)
1008 return err;
1009 return 0;
1010 }
1011
1012 err = ubi_read(c->ubi, lnum, buf, offs, len);
1013 if (err && err != -EBADMSG)
1014 return err;
1015
1016 while (len >= 8) {
1017 int ret;
1018
1019 cond_resched();
1020
1021 /* Scan quietly until there is an error */
1022 ret = ubifs_scan_a_node(c, buf, len, lnum, offs, quiet);
1023
1024 if (ret == SCANNED_A_NODE) {
1025 /* A valid node, and not a padding node */
1026 struct ubifs_ch *ch = buf;
1027 int node_len;
1028
1029 node_len = ALIGN(le32_to_cpu(ch->len), 8);
1030 offs += node_len;
1031 buf += node_len;
1032 len -= node_len;
1033 continue;
1034 }
1035
1036 if (ret > 0) {
1037 /* Padding bytes or a valid padding node */
1038 offs += ret;
1039 buf += ret;
1040 len -= ret;
1041 continue;
1042 }
1043
1044 if (ret == SCANNED_EMPTY_SPACE) {
1045 ubifs_err("unexpected empty space at %d:%d",
1046 lnum, offs);
1047 return -EUCLEAN;
1048 }
1049
1050 if (quiet) {
1051 /* Redo the last scan but noisily */
1052 quiet = 0;
1053 continue;
1054 }
1055
1056 ubifs_scanned_corruption(c, lnum, offs, buf);
1057 return -EUCLEAN;
1058 }
1059
1060 /* Pad to min_io_size */
1061 len = ALIGN(ucleb->endpt, c->min_io_size);
1062 if (len > ucleb->endpt) {
1063 int pad_len = len - ALIGN(ucleb->endpt, 8);
1064
1065 if (pad_len > 0) {
1066 buf = c->sbuf + len - pad_len;
1067 ubifs_pad(c, buf, pad_len);
1068 }
1069 }
1070
1071 /* Write back the LEB atomically */
1072 err = ubi_leb_change(c->ubi, lnum, sbuf, len, UBI_UNKNOWN);
1073 if (err)
1074 return err;
1075
1076 dbg_rcvry("cleaned LEB %d", lnum);
1077
1078 return 0;
1079}
1080
1081/**
1082 * ubifs_clean_lebs - clean LEBs recovered during read-only mount.
1083 * @c: UBIFS file-system description object
1084 * @sbuf: LEB-sized buffer to use
1085 *
1086 * This function cleans a LEB identified during recovery that needs to be
1087 * written but was not because UBIFS was mounted read-only. This happens when
1088 * remounting to read-write mode.
1089 *
1090 * This function returns %0 on success and a negative error code on failure.
1091 */
1092int ubifs_clean_lebs(const struct ubifs_info *c, void *sbuf)
1093{
1094 dbg_rcvry("recovery");
1095 while (!list_empty(&c->unclean_leb_list)) {
1096 struct ubifs_unclean_leb *ucleb;
1097 int err;
1098
1099 ucleb = list_entry(c->unclean_leb_list.next,
1100 struct ubifs_unclean_leb, list);
1101 err = clean_an_unclean_leb(c, ucleb, sbuf);
1102 if (err)
1103 return err;
1104 list_del(&ucleb->list);
1105 kfree(ucleb);
1106 }
1107 return 0;
1108}
1109
1110/**
Artem Bityutskiy44744212011-04-27 14:52:35 +03001111 * grab_empty_leb - grab an empty LEB to use as GC LEB and run commit.
1112 * @c: UBIFS file-system description object
1113 *
1114 * This is a helper function for 'ubifs_rcvry_gc_commit()' which grabs an empty
1115 * LEB to be used as GC LEB (@c->gc_lnum), and then runs the commit. Returns
1116 * zero in case of success and a negative error code in case of failure.
1117 */
1118static int grab_empty_leb(struct ubifs_info *c)
1119{
1120 int lnum, err;
1121
1122 /*
1123 * Note, it is very important to first search for an empty LEB and then
1124 * run the commit, not vice-versa. The reason is that there might be
1125 * only one empty LEB at the moment, the one which has been the
1126 * @c->gc_lnum just before the power cut happened. During the regular
1127 * UBIFS operation (not now) @c->gc_lnum is marked as "taken", so no
1128 * one but GC can grab it. But at this moment this single empty LEB is
1129 * not marked as taken, so if we run commit - what happens? Right, the
1130 * commit will grab it and write the index there. Remember that the
1131 * index always expands as long as there is free space, and it only
1132 * starts consolidating when we run out of space.
1133 *
1134 * IOW, if we run commit now, we might not be able to find a free LEB
1135 * after this.
1136 */
1137 lnum = ubifs_find_free_leb_for_idx(c);
1138 if (lnum < 0) {
1139 dbg_err("could not find an empty LEB");
1140 dbg_dump_lprops(c);
1141 dbg_dump_budg(c, &c->bi);
1142 return lnum;
1143 }
1144
1145 /* Reset the index flag */
1146 err = ubifs_change_one_lp(c, lnum, LPROPS_NC, LPROPS_NC, 0,
1147 LPROPS_INDEX, 0);
1148 if (err)
1149 return err;
1150
1151 c->gc_lnum = lnum;
1152 dbg_rcvry("found empty LEB %d, run commit", lnum);
1153
1154 return ubifs_run_commit(c);
1155}
1156
1157/**
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001158 * ubifs_rcvry_gc_commit - recover the GC LEB number and run the commit.
1159 * @c: UBIFS file-system description object
1160 *
1161 * Out-of-place garbage collection requires always one empty LEB with which to
1162 * start garbage collection. The LEB number is recorded in c->gc_lnum and is
1163 * written to the master node on unmounting. In the case of an unclean unmount
1164 * the value of gc_lnum recorded in the master node is out of date and cannot
1165 * be used. Instead, recovery must allocate an empty LEB for this purpose.
1166 * However, there may not be enough empty space, in which case it must be
1167 * possible to GC the dirtiest LEB into the GC head LEB.
1168 *
1169 * This function also runs the commit which causes the TNC updates from
1170 * size-recovery and orphans to be written to the flash. That is important to
1171 * ensure correct replay order for subsequent mounts.
1172 *
1173 * This function returns %0 on success and a negative error code on failure.
1174 */
1175int ubifs_rcvry_gc_commit(struct ubifs_info *c)
1176{
1177 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
1178 struct ubifs_lprops lp;
Artem Bityutskiyfe79c052011-04-29 16:35:46 +03001179 int err;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001180
Artem Bityutskiyc839e292011-05-13 12:26:54 +03001181 dbg_rcvry("GC head LEB %d, offs %d", wbuf->lnum, wbuf->offs);
1182
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001183 c->gc_lnum = -1;
Artem Bityutskiyc839e292011-05-13 12:26:54 +03001184 if (wbuf->lnum == -1 || wbuf->offs == c->leb_size)
Artem Bityutskiy44744212011-04-27 14:52:35 +03001185 return grab_empty_leb(c);
Artem Bityutskiyfe79c052011-04-29 16:35:46 +03001186
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001187 err = ubifs_find_dirty_leb(c, &lp, wbuf->offs, 2);
1188 if (err) {
Artem Bityutskiyfe79c052011-04-29 16:35:46 +03001189 if (err != -ENOSPC)
1190 return err;
1191
1192 dbg_rcvry("could not find a dirty LEB");
1193 return grab_empty_leb(c);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001194 }
Artem Bityutskiy2405f592011-04-26 09:49:32 +03001195
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001196 ubifs_assert(!(lp.flags & LPROPS_INDEX));
Artem Bityutskiybcdca3e2011-04-26 10:07:50 +03001197 ubifs_assert(lp.free + lp.dirty >= wbuf->offs);
Artem Bityutskiy2405f592011-04-26 09:49:32 +03001198
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001199 /*
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001200 * We run the commit before garbage collection otherwise subsequent
1201 * mounts will see the GC and orphan deletion in a different order.
1202 */
1203 dbg_rcvry("committing");
1204 err = ubifs_run_commit(c);
1205 if (err)
1206 return err;
Artem Bityutskiyfe79c052011-04-29 16:35:46 +03001207
1208 dbg_rcvry("GC'ing LEB %d", lp.lnum);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001209 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
1210 err = ubifs_garbage_collect_leb(c, &lp);
1211 if (err >= 0) {
1212 int err2 = ubifs_wbuf_sync_nolock(wbuf);
1213
1214 if (err2)
1215 err = err2;
1216 }
1217 mutex_unlock(&wbuf->io_mutex);
1218 if (err < 0) {
1219 dbg_err("GC failed, error %d", err);
1220 if (err == -EAGAIN)
1221 err = -EINVAL;
1222 return err;
1223 }
Artem Bityutskiyfe79c052011-04-29 16:35:46 +03001224
1225 ubifs_assert(err == LEB_RETAINED);
1226 if (err != LEB_RETAINED)
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001227 return -EINVAL;
Artem Bityutskiyfe79c052011-04-29 16:35:46 +03001228
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001229 err = ubifs_leb_unmap(c, c->gc_lnum);
1230 if (err)
1231 return err;
Artem Bityutskiyfe79c052011-04-29 16:35:46 +03001232
1233 dbg_rcvry("allocated LEB %d for GC", lp.lnum);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001234 return 0;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001235}
1236
1237/**
1238 * struct size_entry - inode size information for recovery.
1239 * @rb: link in the RB-tree of sizes
1240 * @inum: inode number
1241 * @i_size: size on inode
1242 * @d_size: maximum size based on data nodes
1243 * @exists: indicates whether the inode exists
1244 * @inode: inode if pinned in memory awaiting rw mode to fix it
1245 */
1246struct size_entry {
1247 struct rb_node rb;
1248 ino_t inum;
1249 loff_t i_size;
1250 loff_t d_size;
1251 int exists;
1252 struct inode *inode;
1253};
1254
1255/**
1256 * add_ino - add an entry to the size tree.
1257 * @c: UBIFS file-system description object
1258 * @inum: inode number
1259 * @i_size: size on inode
1260 * @d_size: maximum size based on data nodes
1261 * @exists: indicates whether the inode exists
1262 */
1263static int add_ino(struct ubifs_info *c, ino_t inum, loff_t i_size,
1264 loff_t d_size, int exists)
1265{
1266 struct rb_node **p = &c->size_tree.rb_node, *parent = NULL;
1267 struct size_entry *e;
1268
1269 while (*p) {
1270 parent = *p;
1271 e = rb_entry(parent, struct size_entry, rb);
1272 if (inum < e->inum)
1273 p = &(*p)->rb_left;
1274 else
1275 p = &(*p)->rb_right;
1276 }
1277
1278 e = kzalloc(sizeof(struct size_entry), GFP_KERNEL);
1279 if (!e)
1280 return -ENOMEM;
1281
1282 e->inum = inum;
1283 e->i_size = i_size;
1284 e->d_size = d_size;
1285 e->exists = exists;
1286
1287 rb_link_node(&e->rb, parent, p);
1288 rb_insert_color(&e->rb, &c->size_tree);
1289
1290 return 0;
1291}
1292
1293/**
1294 * find_ino - find an entry on the size tree.
1295 * @c: UBIFS file-system description object
1296 * @inum: inode number
1297 */
1298static struct size_entry *find_ino(struct ubifs_info *c, ino_t inum)
1299{
1300 struct rb_node *p = c->size_tree.rb_node;
1301 struct size_entry *e;
1302
1303 while (p) {
1304 e = rb_entry(p, struct size_entry, rb);
1305 if (inum < e->inum)
1306 p = p->rb_left;
1307 else if (inum > e->inum)
1308 p = p->rb_right;
1309 else
1310 return e;
1311 }
1312 return NULL;
1313}
1314
1315/**
1316 * remove_ino - remove an entry from the size tree.
1317 * @c: UBIFS file-system description object
1318 * @inum: inode number
1319 */
1320static void remove_ino(struct ubifs_info *c, ino_t inum)
1321{
1322 struct size_entry *e = find_ino(c, inum);
1323
1324 if (!e)
1325 return;
1326 rb_erase(&e->rb, &c->size_tree);
1327 kfree(e);
1328}
1329
1330/**
1331 * ubifs_destroy_size_tree - free resources related to the size tree.
1332 * @c: UBIFS file-system description object
1333 */
1334void ubifs_destroy_size_tree(struct ubifs_info *c)
1335{
1336 struct rb_node *this = c->size_tree.rb_node;
1337 struct size_entry *e;
1338
1339 while (this) {
1340 if (this->rb_left) {
1341 this = this->rb_left;
1342 continue;
1343 } else if (this->rb_right) {
1344 this = this->rb_right;
1345 continue;
1346 }
1347 e = rb_entry(this, struct size_entry, rb);
1348 if (e->inode)
1349 iput(e->inode);
1350 this = rb_parent(this);
1351 if (this) {
1352 if (this->rb_left == &e->rb)
1353 this->rb_left = NULL;
1354 else
1355 this->rb_right = NULL;
1356 }
1357 kfree(e);
1358 }
1359 c->size_tree = RB_ROOT;
1360}
1361
1362/**
1363 * ubifs_recover_size_accum - accumulate inode sizes for recovery.
1364 * @c: UBIFS file-system description object
1365 * @key: node key
1366 * @deletion: node is for a deletion
1367 * @new_size: inode size
1368 *
1369 * This function has two purposes:
1370 * 1) to ensure there are no data nodes that fall outside the inode size
1371 * 2) to ensure there are no data nodes for inodes that do not exist
1372 * To accomplish those purposes, a rb-tree is constructed containing an entry
1373 * for each inode number in the journal that has not been deleted, and recording
1374 * the size from the inode node, the maximum size of any data node (also altered
1375 * by truncations) and a flag indicating a inode number for which no inode node
1376 * was present in the journal.
1377 *
1378 * Note that there is still the possibility that there are data nodes that have
1379 * been committed that are beyond the inode size, however the only way to find
1380 * them would be to scan the entire index. Alternatively, some provision could
1381 * be made to record the size of inodes at the start of commit, which would seem
1382 * very cumbersome for a scenario that is quite unlikely and the only negative
1383 * consequence of which is wasted space.
1384 *
1385 * This functions returns %0 on success and a negative error code on failure.
1386 */
1387int ubifs_recover_size_accum(struct ubifs_info *c, union ubifs_key *key,
1388 int deletion, loff_t new_size)
1389{
1390 ino_t inum = key_inum(c, key);
1391 struct size_entry *e;
1392 int err;
1393
1394 switch (key_type(c, key)) {
1395 case UBIFS_INO_KEY:
1396 if (deletion)
1397 remove_ino(c, inum);
1398 else {
1399 e = find_ino(c, inum);
1400 if (e) {
1401 e->i_size = new_size;
1402 e->exists = 1;
1403 } else {
1404 err = add_ino(c, inum, new_size, 0, 1);
1405 if (err)
1406 return err;
1407 }
1408 }
1409 break;
1410 case UBIFS_DATA_KEY:
1411 e = find_ino(c, inum);
1412 if (e) {
1413 if (new_size > e->d_size)
1414 e->d_size = new_size;
1415 } else {
1416 err = add_ino(c, inum, 0, new_size, 0);
1417 if (err)
1418 return err;
1419 }
1420 break;
1421 case UBIFS_TRUN_KEY:
1422 e = find_ino(c, inum);
1423 if (e)
1424 e->d_size = new_size;
1425 break;
1426 }
1427 return 0;
1428}
1429
1430/**
1431 * fix_size_in_place - fix inode size in place on flash.
1432 * @c: UBIFS file-system description object
1433 * @e: inode size information for recovery
1434 */
1435static int fix_size_in_place(struct ubifs_info *c, struct size_entry *e)
1436{
1437 struct ubifs_ino_node *ino = c->sbuf;
1438 unsigned char *p;
1439 union ubifs_key key;
1440 int err, lnum, offs, len;
1441 loff_t i_size;
1442 uint32_t crc;
1443
1444 /* Locate the inode node LEB number and offset */
1445 ino_key_init(c, &key, e->inum);
1446 err = ubifs_tnc_locate(c, &key, ino, &lnum, &offs);
1447 if (err)
1448 goto out;
1449 /*
1450 * If the size recorded on the inode node is greater than the size that
1451 * was calculated from nodes in the journal then don't change the inode.
1452 */
1453 i_size = le64_to_cpu(ino->size);
1454 if (i_size >= e->d_size)
1455 return 0;
1456 /* Read the LEB */
1457 err = ubi_read(c->ubi, lnum, c->sbuf, 0, c->leb_size);
1458 if (err)
1459 goto out;
1460 /* Change the size field and recalculate the CRC */
1461 ino = c->sbuf + offs;
1462 ino->size = cpu_to_le64(e->d_size);
1463 len = le32_to_cpu(ino->ch.len);
1464 crc = crc32(UBIFS_CRC32_INIT, (void *)ino + 8, len - 8);
1465 ino->ch.crc = cpu_to_le32(crc);
1466 /* Work out where data in the LEB ends and free space begins */
1467 p = c->sbuf;
1468 len = c->leb_size - 1;
1469 while (p[len] == 0xff)
1470 len -= 1;
1471 len = ALIGN(len + 1, c->min_io_size);
1472 /* Atomically write the fixed LEB back again */
1473 err = ubi_leb_change(c->ubi, lnum, c->sbuf, len, UBI_UNKNOWN);
1474 if (err)
1475 goto out;
Artem Bityutskiy69f8a752011-05-02 21:51:17 +03001476 dbg_rcvry("inode %lu at %d:%d size %lld -> %lld",
Artem Bityutskiye84461a2008-10-29 12:08:43 +02001477 (unsigned long)e->inum, lnum, offs, i_size, e->d_size);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001478 return 0;
1479
1480out:
1481 ubifs_warn("inode %lu failed to fix size %lld -> %lld error %d",
Artem Bityutskiye84461a2008-10-29 12:08:43 +02001482 (unsigned long)e->inum, e->i_size, e->d_size, err);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001483 return err;
1484}
1485
1486/**
1487 * ubifs_recover_size - recover inode size.
1488 * @c: UBIFS file-system description object
1489 *
1490 * This function attempts to fix inode size discrepancies identified by the
1491 * 'ubifs_recover_size_accum()' function.
1492 *
1493 * This functions returns %0 on success and a negative error code on failure.
1494 */
1495int ubifs_recover_size(struct ubifs_info *c)
1496{
1497 struct rb_node *this = rb_first(&c->size_tree);
1498
1499 while (this) {
1500 struct size_entry *e;
1501 int err;
1502
1503 e = rb_entry(this, struct size_entry, rb);
1504 if (!e->exists) {
1505 union ubifs_key key;
1506
1507 ino_key_init(c, &key, e->inum);
1508 err = ubifs_tnc_lookup(c, &key, c->sbuf);
1509 if (err && err != -ENOENT)
1510 return err;
1511 if (err == -ENOENT) {
1512 /* Remove data nodes that have no inode */
Artem Bityutskiye84461a2008-10-29 12:08:43 +02001513 dbg_rcvry("removing ino %lu",
1514 (unsigned long)e->inum);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001515 err = ubifs_tnc_remove_ino(c, e->inum);
1516 if (err)
1517 return err;
1518 } else {
1519 struct ubifs_ino_node *ino = c->sbuf;
1520
1521 e->exists = 1;
1522 e->i_size = le64_to_cpu(ino->size);
1523 }
1524 }
Artem Bityutskiy69f8a752011-05-02 21:51:17 +03001525
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001526 if (e->exists && e->i_size < e->d_size) {
Artem Bityutskiy69f8a752011-05-02 21:51:17 +03001527 if (c->ro_mount) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001528 /* Fix the inode size and pin it in memory */
1529 struct inode *inode;
Artem Bityutskiyc1f1f912011-05-05 14:16:32 +03001530 struct ubifs_inode *ui;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001531
Artem Bityutskiy69f8a752011-05-02 21:51:17 +03001532 ubifs_assert(!e->inode);
1533
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001534 inode = ubifs_iget(c->vfs_sb, e->inum);
1535 if (IS_ERR(inode))
1536 return PTR_ERR(inode);
Artem Bityutskiyc1f1f912011-05-05 14:16:32 +03001537
1538 ui = ubifs_inode(inode);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001539 if (inode->i_size < e->d_size) {
1540 dbg_rcvry("ino %lu size %lld -> %lld",
Artem Bityutskiye84461a2008-10-29 12:08:43 +02001541 (unsigned long)e->inum,
Artem Bityutskiy4c954522011-05-02 21:43:54 +03001542 inode->i_size, e->d_size);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001543 inode->i_size = e->d_size;
Artem Bityutskiyc1f1f912011-05-05 14:16:32 +03001544 ui->ui_size = e->d_size;
1545 ui->synced_i_size = e->d_size;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001546 e->inode = inode;
1547 this = rb_next(this);
1548 continue;
1549 }
1550 iput(inode);
1551 } else {
1552 /* Fix the size in place */
1553 err = fix_size_in_place(c, e);
1554 if (err)
1555 return err;
1556 if (e->inode)
1557 iput(e->inode);
1558 }
1559 }
Artem Bityutskiy69f8a752011-05-02 21:51:17 +03001560
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001561 this = rb_next(this);
1562 rb_erase(&e->rb, &c->size_tree);
1563 kfree(e);
1564 }
Artem Bityutskiy69f8a752011-05-02 21:51:17 +03001565
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001566 return 0;
1567}