blob: c30d976b4be857a9d222cedd5b7192a4f90e6cfa [file] [log] [blame]
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001/*
2 * This file is part of UBIFS.
3 *
4 * Copyright (C) 2006-2008 Nokia Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 51
17 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 * Authors: Adrian Hunter
20 * Artem Bityutskiy (Битюцкий Артём)
21 */
22
23/*
24 * This file implements functions needed to recover from unclean un-mounts.
25 * When UBIFS is mounted, it checks a flag on the master node to determine if
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020026 * an un-mount was completed successfully. If not, the process of mounting
Artem Bityutskiy6fb43742010-05-23 15:20:21 +030027 * incorporates additional checking and fixing of on-flash data structures.
Artem Bityutskiy1e517642008-07-14 19:08:37 +030028 * UBIFS always cleans away all remnants of an unclean un-mount, so that
29 * errors do not accumulate. However UBIFS defers recovery if it is mounted
30 * read-only, and the flash is not modified in that case.
Artem Bityutskiybe7b42a2011-02-06 16:41:06 +020031 *
32 * The general UBIFS approach to the recovery is that it recovers from
33 * corruptions which could be caused by power cuts, but it refuses to recover
34 * from corruption caused by other reasons. And UBIFS tries to distinguish
35 * between these 2 reasons of corruptions and silently recover in the former
36 * case and loudly complain in the latter case.
37 *
38 * UBIFS writes only to erased LEBs, so it writes only to the flash space
39 * containing only 0xFFs. UBIFS also always writes strictly from the beginning
40 * of the LEB to the end. And UBIFS assumes that the underlying flash media
Artem Bityutskiy2765df72011-02-02 09:22:54 +020041 * writes in @c->max_write_size bytes at a time.
Artem Bityutskiybe7b42a2011-02-06 16:41:06 +020042 *
43 * Hence, if UBIFS finds a corrupted node at offset X, it expects only the min.
44 * I/O unit corresponding to offset X to contain corrupted data, all the
45 * following min. I/O units have to contain empty space (all 0xFFs). If this is
46 * not true, the corruption cannot be the result of a power cut, and UBIFS
47 * refuses to mount.
Artem Bityutskiy1e517642008-07-14 19:08:37 +030048 */
49
50#include <linux/crc32.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090051#include <linux/slab.h>
Artem Bityutskiy1e517642008-07-14 19:08:37 +030052#include "ubifs.h"
53
54/**
55 * is_empty - determine whether a buffer is empty (contains all 0xff).
56 * @buf: buffer to clean
57 * @len: length of buffer
58 *
59 * This function returns %1 if the buffer is empty (contains all 0xff) otherwise
60 * %0 is returned.
61 */
62static int is_empty(void *buf, int len)
63{
64 uint8_t *p = buf;
65 int i;
66
67 for (i = 0; i < len; i++)
68 if (*p++ != 0xff)
69 return 0;
70 return 1;
71}
72
73/**
Artem Bityutskiy06112542009-06-29 19:27:14 +030074 * first_non_ff - find offset of the first non-0xff byte.
75 * @buf: buffer to search in
76 * @len: length of buffer
77 *
78 * This function returns offset of the first non-0xff byte in @buf or %-1 if
79 * the buffer contains only 0xff bytes.
80 */
81static int first_non_ff(void *buf, int len)
82{
83 uint8_t *p = buf;
84 int i;
85
86 for (i = 0; i < len; i++)
87 if (*p++ != 0xff)
88 return i;
89 return -1;
90}
91
92/**
Artem Bityutskiy1e517642008-07-14 19:08:37 +030093 * get_master_node - get the last valid master node allowing for corruption.
94 * @c: UBIFS file-system description object
95 * @lnum: LEB number
96 * @pbuf: buffer containing the LEB read, is returned here
97 * @mst: master node, if found, is returned here
98 * @cor: corruption, if found, is returned here
99 *
100 * This function allocates a buffer, reads the LEB into it, and finds and
101 * returns the last valid master node allowing for one area of corruption.
102 * The corrupt area, if there is one, must be consistent with the assumption
103 * that it is the result of an unclean unmount while the master node was being
104 * written. Under those circumstances, it is valid to use the previously written
105 * master node.
106 *
107 * This function returns %0 on success and a negative error code on failure.
108 */
109static int get_master_node(const struct ubifs_info *c, int lnum, void **pbuf,
110 struct ubifs_mst_node **mst, void **cor)
111{
112 const int sz = c->mst_node_alsz;
113 int err, offs, len;
114 void *sbuf, *buf;
115
116 sbuf = vmalloc(c->leb_size);
117 if (!sbuf)
118 return -ENOMEM;
119
Artem Bityutskiyd3048202011-06-03 14:03:25 +0300120 err = ubifs_leb_read(c, lnum, sbuf, 0, c->leb_size, 0);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300121 if (err && err != -EBADMSG)
122 goto out_free;
123
124 /* Find the first position that is definitely not a node */
125 offs = 0;
126 buf = sbuf;
127 len = c->leb_size;
128 while (offs + UBIFS_MST_NODE_SZ <= c->leb_size) {
129 struct ubifs_ch *ch = buf;
130
131 if (le32_to_cpu(ch->magic) != UBIFS_NODE_MAGIC)
132 break;
133 offs += sz;
134 buf += sz;
135 len -= sz;
136 }
137 /* See if there was a valid master node before that */
138 if (offs) {
139 int ret;
140
141 offs -= sz;
142 buf -= sz;
143 len += sz;
144 ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
145 if (ret != SCANNED_A_NODE && offs) {
146 /* Could have been corruption so check one place back */
147 offs -= sz;
148 buf -= sz;
149 len += sz;
150 ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
151 if (ret != SCANNED_A_NODE)
152 /*
153 * We accept only one area of corruption because
154 * we are assuming that it was caused while
155 * trying to write a master node.
156 */
157 goto out_err;
158 }
159 if (ret == SCANNED_A_NODE) {
160 struct ubifs_ch *ch = buf;
161
162 if (ch->node_type != UBIFS_MST_NODE)
163 goto out_err;
164 dbg_rcvry("found a master node at %d:%d", lnum, offs);
165 *mst = buf;
166 offs += sz;
167 buf += sz;
168 len -= sz;
169 }
170 }
171 /* Check for corruption */
172 if (offs < c->leb_size) {
173 if (!is_empty(buf, min_t(int, len, sz))) {
174 *cor = buf;
175 dbg_rcvry("found corruption at %d:%d", lnum, offs);
176 }
177 offs += sz;
178 buf += sz;
179 len -= sz;
180 }
181 /* Check remaining empty space */
182 if (offs < c->leb_size)
183 if (!is_empty(buf, len))
184 goto out_err;
185 *pbuf = sbuf;
186 return 0;
187
188out_err:
189 err = -EINVAL;
190out_free:
191 vfree(sbuf);
192 *mst = NULL;
193 *cor = NULL;
194 return err;
195}
196
197/**
198 * write_rcvrd_mst_node - write recovered master node.
199 * @c: UBIFS file-system description object
200 * @mst: master node
201 *
202 * This function returns %0 on success and a negative error code on failure.
203 */
204static int write_rcvrd_mst_node(struct ubifs_info *c,
205 struct ubifs_mst_node *mst)
206{
207 int err = 0, lnum = UBIFS_MST_LNUM, sz = c->mst_node_alsz;
Harvey Harrison0ecb9522008-10-24 10:52:57 -0700208 __le32 save_flags;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300209
210 dbg_rcvry("recovery");
211
212 save_flags = mst->flags;
Harvey Harrison0ecb9522008-10-24 10:52:57 -0700213 mst->flags |= cpu_to_le32(UBIFS_MST_RCVRY);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300214
215 ubifs_prepare_node(c, mst, UBIFS_MST_NODE_SZ, 1);
Richard Weinbergerb36a2612012-05-14 17:55:51 +0200216 err = ubifs_leb_change(c, lnum, mst, sz);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300217 if (err)
218 goto out;
Richard Weinbergerb36a2612012-05-14 17:55:51 +0200219 err = ubifs_leb_change(c, lnum + 1, mst, sz);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300220 if (err)
221 goto out;
222out:
223 mst->flags = save_flags;
224 return err;
225}
226
227/**
228 * ubifs_recover_master_node - recover the master node.
229 * @c: UBIFS file-system description object
230 *
231 * This function recovers the master node from corruption that may occur due to
232 * an unclean unmount.
233 *
234 * This function returns %0 on success and a negative error code on failure.
235 */
236int ubifs_recover_master_node(struct ubifs_info *c)
237{
238 void *buf1 = NULL, *buf2 = NULL, *cor1 = NULL, *cor2 = NULL;
239 struct ubifs_mst_node *mst1 = NULL, *mst2 = NULL, *mst;
240 const int sz = c->mst_node_alsz;
241 int err, offs1, offs2;
242
243 dbg_rcvry("recovery");
244
245 err = get_master_node(c, UBIFS_MST_LNUM, &buf1, &mst1, &cor1);
246 if (err)
247 goto out_free;
248
249 err = get_master_node(c, UBIFS_MST_LNUM + 1, &buf2, &mst2, &cor2);
250 if (err)
251 goto out_free;
252
253 if (mst1) {
254 offs1 = (void *)mst1 - buf1;
255 if ((le32_to_cpu(mst1->flags) & UBIFS_MST_RCVRY) &&
256 (offs1 == 0 && !cor1)) {
257 /*
258 * mst1 was written by recovery at offset 0 with no
259 * corruption.
260 */
261 dbg_rcvry("recovery recovery");
262 mst = mst1;
263 } else if (mst2) {
264 offs2 = (void *)mst2 - buf2;
265 if (offs1 == offs2) {
266 /* Same offset, so must be the same */
267 if (memcmp((void *)mst1 + UBIFS_CH_SZ,
268 (void *)mst2 + UBIFS_CH_SZ,
269 UBIFS_MST_NODE_SZ - UBIFS_CH_SZ))
270 goto out_err;
271 mst = mst1;
272 } else if (offs2 + sz == offs1) {
273 /* 1st LEB was written, 2nd was not */
274 if (cor1)
275 goto out_err;
276 mst = mst1;
Anatolij Gustschin19495f72011-07-07 12:25:02 +0200277 } else if (offs1 == 0 &&
278 c->leb_size - offs2 - sz < sz) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300279 /* 1st LEB was unmapped and written, 2nd not */
280 if (cor1)
281 goto out_err;
282 mst = mst1;
283 } else
284 goto out_err;
285 } else {
286 /*
287 * 2nd LEB was unmapped and about to be written, so
288 * there must be only one master node in the first LEB
289 * and no corruption.
290 */
291 if (offs1 != 0 || cor1)
292 goto out_err;
293 mst = mst1;
294 }
295 } else {
296 if (!mst2)
297 goto out_err;
298 /*
299 * 1st LEB was unmapped and about to be written, so there must
300 * be no room left in 2nd LEB.
301 */
302 offs2 = (void *)mst2 - buf2;
303 if (offs2 + sz + sz <= c->leb_size)
304 goto out_err;
305 mst = mst2;
306 }
307
Artem Bityutskiy348709b2009-08-25 15:00:55 +0300308 ubifs_msg("recovered master node from LEB %d",
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300309 (mst == mst1 ? UBIFS_MST_LNUM : UBIFS_MST_LNUM + 1));
310
311 memcpy(c->mst_node, mst, UBIFS_MST_NODE_SZ);
312
Artem Bityutskiy2ef13292010-09-19 18:34:26 +0300313 if (c->ro_mount) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300314 /* Read-only mode. Keep a copy for switching to rw mode */
315 c->rcvrd_mst_node = kmalloc(sz, GFP_KERNEL);
316 if (!c->rcvrd_mst_node) {
317 err = -ENOMEM;
318 goto out_free;
319 }
320 memcpy(c->rcvrd_mst_node, c->mst_node, UBIFS_MST_NODE_SZ);
Artem Bityutskiy6e0d9fd2011-04-21 14:49:55 +0300321
322 /*
323 * We had to recover the master node, which means there was an
324 * unclean reboot. However, it is possible that the master node
325 * is clean at this point, i.e., %UBIFS_MST_DIRTY is not set.
326 * E.g., consider the following chain of events:
327 *
328 * 1. UBIFS was cleanly unmounted, so the master node is clean
329 * 2. UBIFS is being mounted R/W and starts changing the master
330 * node in the first (%UBIFS_MST_LNUM). A power cut happens,
331 * so this LEB ends up with some amount of garbage at the
332 * end.
333 * 3. UBIFS is being mounted R/O. We reach this place and
334 * recover the master node from the second LEB
335 * (%UBIFS_MST_LNUM + 1). But we cannot update the media
336 * because we are being mounted R/O. We have to defer the
337 * operation.
338 * 4. However, this master node (@c->mst_node) is marked as
339 * clean (since the step 1). And if we just return, the
340 * mount code will be confused and won't recover the master
341 * node when it is re-mounter R/W later.
342 *
343 * Thus, to force the recovery by marking the master node as
344 * dirty.
345 */
346 c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300347 } else {
348 /* Write the recovered master node */
349 c->max_sqnum = le64_to_cpu(mst->ch.sqnum) - 1;
350 err = write_rcvrd_mst_node(c, c->mst_node);
351 if (err)
352 goto out_free;
353 }
354
355 vfree(buf2);
356 vfree(buf1);
357
358 return 0;
359
360out_err:
361 err = -EINVAL;
362out_free:
363 ubifs_err("failed to recover master node");
364 if (mst1) {
Artem Bityutskiya6aae4d2012-05-16 20:11:23 +0300365 ubifs_err("dumping first master node");
Artem Bityutskiyedf6be22012-05-16 19:15:56 +0300366 ubifs_dump_node(c, mst1);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300367 }
368 if (mst2) {
Artem Bityutskiya6aae4d2012-05-16 20:11:23 +0300369 ubifs_err("dumping second master node");
Artem Bityutskiyedf6be22012-05-16 19:15:56 +0300370 ubifs_dump_node(c, mst2);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300371 }
372 vfree(buf2);
373 vfree(buf1);
374 return err;
375}
376
377/**
378 * ubifs_write_rcvrd_mst_node - write the recovered master node.
379 * @c: UBIFS file-system description object
380 *
381 * This function writes the master node that was recovered during mounting in
382 * read-only mode and must now be written because we are remounting rw.
383 *
384 * This function returns %0 on success and a negative error code on failure.
385 */
386int ubifs_write_rcvrd_mst_node(struct ubifs_info *c)
387{
388 int err;
389
390 if (!c->rcvrd_mst_node)
391 return 0;
392 c->rcvrd_mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
393 c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
394 err = write_rcvrd_mst_node(c, c->rcvrd_mst_node);
395 if (err)
396 return err;
397 kfree(c->rcvrd_mst_node);
398 c->rcvrd_mst_node = NULL;
399 return 0;
400}
401
402/**
403 * is_last_write - determine if an offset was in the last write to a LEB.
404 * @c: UBIFS file-system description object
405 * @buf: buffer to check
406 * @offs: offset to check
407 *
408 * This function returns %1 if @offs was in the last write to the LEB whose data
Artem Bityutskiy2765df72011-02-02 09:22:54 +0200409 * is in @buf, otherwise %0 is returned. The determination is made by checking
410 * for subsequent empty space starting from the next @c->max_write_size
411 * boundary.
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300412 */
413static int is_last_write(const struct ubifs_info *c, void *buf, int offs)
414{
Artem Bityutskiy428ff9d2009-05-25 16:59:28 +0300415 int empty_offs, check_len;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300416 uint8_t *p;
417
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300418 /*
Artem Bityutskiy2765df72011-02-02 09:22:54 +0200419 * Round up to the next @c->max_write_size boundary i.e. @offs is in
420 * the last wbuf written. After that should be empty space.
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300421 */
Artem Bityutskiy2765df72011-02-02 09:22:54 +0200422 empty_offs = ALIGN(offs + 1, c->max_write_size);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300423 check_len = c->leb_size - empty_offs;
424 p = buf + empty_offs - offs;
Artem Bityutskiy431102f2009-06-29 18:58:34 +0300425 return is_empty(p, check_len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300426}
427
428/**
429 * clean_buf - clean the data from an LEB sitting in a buffer.
430 * @c: UBIFS file-system description object
431 * @buf: buffer to clean
432 * @lnum: LEB number to clean
433 * @offs: offset from which to clean
434 * @len: length of buffer
435 *
436 * This function pads up to the next min_io_size boundary (if there is one) and
437 * sets empty space to all 0xff. @buf, @offs and @len are updated to the next
Artem Bityutskiy428ff9d2009-05-25 16:59:28 +0300438 * @c->min_io_size boundary.
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300439 */
440static void clean_buf(const struct ubifs_info *c, void **buf, int lnum,
441 int *offs, int *len)
442{
443 int empty_offs, pad_len;
444
445 lnum = lnum;
446 dbg_rcvry("cleaning corruption at %d:%d", lnum, *offs);
447
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300448 ubifs_assert(!(*offs & 7));
449 empty_offs = ALIGN(*offs, c->min_io_size);
450 pad_len = empty_offs - *offs;
451 ubifs_pad(c, *buf, pad_len);
452 *offs += pad_len;
453 *buf += pad_len;
454 *len -= pad_len;
455 memset(*buf, 0xff, c->leb_size - empty_offs);
456}
457
458/**
459 * no_more_nodes - determine if there are no more nodes in a buffer.
460 * @c: UBIFS file-system description object
461 * @buf: buffer to check
462 * @len: length of buffer
463 * @lnum: LEB number of the LEB from which @buf was read
464 * @offs: offset from which @buf was read
465 *
Adrian Hunterde097572009-03-20 11:09:04 +0100466 * This function ensures that the corrupted node at @offs is the last thing
467 * written to a LEB. This function returns %1 if more data is not found and
468 * %0 if more data is found.
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300469 */
470static int no_more_nodes(const struct ubifs_info *c, void *buf, int len,
471 int lnum, int offs)
472{
Adrian Hunterde097572009-03-20 11:09:04 +0100473 struct ubifs_ch *ch = buf;
474 int skip, dlen = le32_to_cpu(ch->len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300475
Adrian Hunterde097572009-03-20 11:09:04 +0100476 /* Check for empty space after the corrupt node's common header */
Artem Bityutskiy2765df72011-02-02 09:22:54 +0200477 skip = ALIGN(offs + UBIFS_CH_SZ, c->max_write_size) - offs;
Adrian Hunterde097572009-03-20 11:09:04 +0100478 if (is_empty(buf + skip, len - skip))
479 return 1;
480 /*
481 * The area after the common header size is not empty, so the common
482 * header must be intact. Check it.
483 */
484 if (ubifs_check_node(c, buf, lnum, offs, 1, 0) != -EUCLEAN) {
485 dbg_rcvry("unexpected bad common header at %d:%d", lnum, offs);
486 return 0;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300487 }
Adrian Hunterde097572009-03-20 11:09:04 +0100488 /* Now we know the corrupt node's length we can skip over it */
Artem Bityutskiy2765df72011-02-02 09:22:54 +0200489 skip = ALIGN(offs + dlen, c->max_write_size) - offs;
Adrian Hunterde097572009-03-20 11:09:04 +0100490 /* After which there should be empty space */
491 if (is_empty(buf + skip, len - skip))
492 return 1;
493 dbg_rcvry("unexpected data at %d:%d", lnum, offs + skip);
494 return 0;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300495}
496
497/**
498 * fix_unclean_leb - fix an unclean LEB.
499 * @c: UBIFS file-system description object
500 * @sleb: scanned LEB information
501 * @start: offset where scan started
502 */
503static int fix_unclean_leb(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
504 int start)
505{
506 int lnum = sleb->lnum, endpt = start;
507
508 /* Get the end offset of the last node we are keeping */
509 if (!list_empty(&sleb->nodes)) {
510 struct ubifs_scan_node *snod;
511
512 snod = list_entry(sleb->nodes.prev,
513 struct ubifs_scan_node, list);
514 endpt = snod->offs + snod->len;
515 }
516
Artem Bityutskiy2ef13292010-09-19 18:34:26 +0300517 if (c->ro_mount && !c->remounting_rw) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300518 /* Add to recovery list */
519 struct ubifs_unclean_leb *ucleb;
520
521 dbg_rcvry("need to fix LEB %d start %d endpt %d",
522 lnum, start, sleb->endpt);
523 ucleb = kzalloc(sizeof(struct ubifs_unclean_leb), GFP_NOFS);
524 if (!ucleb)
525 return -ENOMEM;
526 ucleb->lnum = lnum;
527 ucleb->endpt = endpt;
528 list_add_tail(&ucleb->list, &c->unclean_leb_list);
529 } else {
530 /* Write the fixed LEB back to flash */
531 int err;
532
533 dbg_rcvry("fixing LEB %d start %d endpt %d",
534 lnum, start, sleb->endpt);
535 if (endpt == 0) {
536 err = ubifs_leb_unmap(c, lnum);
537 if (err)
538 return err;
539 } else {
540 int len = ALIGN(endpt, c->min_io_size);
541
542 if (start) {
Artem Bityutskiyd3048202011-06-03 14:03:25 +0300543 err = ubifs_leb_read(c, lnum, sleb->buf, 0,
544 start, 1);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300545 if (err)
546 return err;
547 }
548 /* Pad to min_io_size */
549 if (len > endpt) {
550 int pad_len = len - ALIGN(endpt, 8);
551
552 if (pad_len > 0) {
553 void *buf = sleb->buf + len - pad_len;
554
555 ubifs_pad(c, buf, pad_len);
556 }
557 }
Richard Weinbergerb36a2612012-05-14 17:55:51 +0200558 err = ubifs_leb_change(c, lnum, sleb->buf, len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300559 if (err)
560 return err;
561 }
562 }
563 return 0;
564}
565
566/**
Artem Bityutskiyda8b94e2011-05-26 08:58:19 +0300567 * drop_last_group - drop the last group of nodes.
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300568 * @sleb: scanned LEB information
569 * @offs: offset of dropped nodes is returned here
570 *
Artem Bityutskiybbf2b372011-05-16 15:15:52 +0300571 * This is a helper function for 'ubifs_recover_leb()' which drops the last
Artem Bityutskiyda8b94e2011-05-26 08:58:19 +0300572 * group of nodes of the scanned LEB.
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300573 */
Artem Bityutskiyda8b94e2011-05-26 08:58:19 +0300574static void drop_last_group(struct ubifs_scan_leb *sleb, int *offs)
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300575{
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300576 while (!list_empty(&sleb->nodes)) {
577 struct ubifs_scan_node *snod;
578 struct ubifs_ch *ch;
579
580 snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node,
581 list);
582 ch = snod->node;
583 if (ch->group_type != UBIFS_IN_NODE_GROUP)
Artem Bityutskiyda8b94e2011-05-26 08:58:19 +0300584 break;
585
586 dbg_rcvry("dropping grouped node at %d:%d",
587 sleb->lnum, snod->offs);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300588 *offs = snod->offs;
589 list_del(&snod->list);
590 kfree(snod);
591 sleb->nodes_cnt -= 1;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300592 }
Artem Bityutskiyda8b94e2011-05-26 08:58:19 +0300593}
594
595/**
596 * drop_last_node - drop the last node.
597 * @sleb: scanned LEB information
598 * @offs: offset of dropped nodes is returned here
599 * @grouped: non-zero if whole group of nodes have to be dropped
600 *
601 * This is a helper function for 'ubifs_recover_leb()' which drops the last
602 * node of the scanned LEB.
603 */
604static void drop_last_node(struct ubifs_scan_leb *sleb, int *offs)
605{
606 struct ubifs_scan_node *snod;
607
608 if (!list_empty(&sleb->nodes)) {
609 snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node,
610 list);
611
612 dbg_rcvry("dropping last node at %d:%d", sleb->lnum, snod->offs);
613 *offs = snod->offs;
614 list_del(&snod->list);
615 kfree(snod);
616 sleb->nodes_cnt -= 1;
617 }
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300618}
619
620/**
621 * ubifs_recover_leb - scan and recover a LEB.
622 * @c: UBIFS file-system description object
623 * @lnum: LEB number
624 * @offs: offset
625 * @sbuf: LEB-sized buffer to use
Artem Bityutskiyefcfde52011-05-26 08:36:52 +0300626 * @jhead: journal head number this LEB belongs to (%-1 if the LEB does not
627 * belong to any journal head)
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300628 *
629 * This function does a scan of a LEB, but caters for errors that might have
630 * been caused by the unclean unmount from which we are attempting to recover.
Artem Bityutskiyed43f2f2009-06-29 17:59:23 +0300631 * Returns %0 in case of success, %-EUCLEAN if an unrecoverable corruption is
632 * found, and a negative error code in case of failure.
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300633 */
634struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
Artem Bityutskiyefcfde52011-05-26 08:36:52 +0300635 int offs, void *sbuf, int jhead)
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300636{
Artem Bityutskiybbf2b372011-05-16 15:15:52 +0300637 int ret = 0, err, len = c->leb_size - offs, start = offs, min_io_unit;
Artem Bityutskiyefcfde52011-05-26 08:36:52 +0300638 int grouped = jhead == -1 ? 0 : c->jheads[jhead].grouped;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300639 struct ubifs_scan_leb *sleb;
640 void *buf = sbuf + offs;
641
Artem Bityutskiyefcfde52011-05-26 08:36:52 +0300642 dbg_rcvry("%d:%d, jhead %d, grouped %d", lnum, offs, jhead, grouped);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300643
644 sleb = ubifs_start_scan(c, lnum, offs, sbuf);
645 if (IS_ERR(sleb))
646 return sleb;
647
Artem Bityutskiybbf2b372011-05-16 15:15:52 +0300648 ubifs_assert(len >= 8);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300649 while (len >= 8) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300650 dbg_scan("look at LEB %d:%d (%d bytes left)",
651 lnum, offs, len);
652
653 cond_resched();
654
655 /*
656 * Scan quietly until there is an error from which we cannot
657 * recover
658 */
Artem Bityutskiyab759502011-05-26 06:51:48 +0300659 ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300660 if (ret == SCANNED_A_NODE) {
661 /* A valid node, and not a padding node */
662 struct ubifs_ch *ch = buf;
663 int node_len;
664
665 err = ubifs_add_snod(c, sleb, buf, offs);
666 if (err)
667 goto error;
668 node_len = ALIGN(le32_to_cpu(ch->len), 8);
669 offs += node_len;
670 buf += node_len;
671 len -= node_len;
Artem Bityutskiy61799202011-05-16 13:41:55 +0300672 } else if (ret > 0) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300673 /* Padding bytes or a valid padding node */
674 offs += ret;
675 buf += ret;
676 len -= ret;
Artem Bityutskiy61799202011-05-16 13:41:55 +0300677 } else if (ret == SCANNED_EMPTY_SPACE ||
678 ret == SCANNED_GARBAGE ||
679 ret == SCANNED_A_BAD_PAD_NODE ||
680 ret == SCANNED_A_CORRUPT_NODE) {
Artem Bityutskiy78437362012-01-16 12:10:56 +0200681 dbg_rcvry("found corruption (%d) at %d:%d",
682 ret, lnum, offs);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300683 break;
Artem Bityutskiy61799202011-05-16 13:41:55 +0300684 } else {
Artem Bityutskiya6aae4d2012-05-16 20:11:23 +0300685 ubifs_err("unexpected return value %d", ret);
Artem Bityutskiyed43f2f2009-06-29 17:59:23 +0300686 err = -EINVAL;
687 goto error;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300688 }
689 }
690
Artem Bityutskiy61799202011-05-16 13:41:55 +0300691 if (ret == SCANNED_GARBAGE || ret == SCANNED_A_BAD_PAD_NODE) {
Artem Bityutskiy43e07072011-05-16 14:21:51 +0300692 if (!is_last_write(c, buf, offs))
Artem Bityutskiy61799202011-05-16 13:41:55 +0300693 goto corrupted_rescan;
694 } else if (ret == SCANNED_A_CORRUPT_NODE) {
Artem Bityutskiy43e07072011-05-16 14:21:51 +0300695 if (!no_more_nodes(c, buf, len, lnum, offs))
Artem Bityutskiy61799202011-05-16 13:41:55 +0300696 goto corrupted_rescan;
697 } else if (!is_empty(buf, len)) {
Artem Bityutskiy43e07072011-05-16 14:21:51 +0300698 if (!is_last_write(c, buf, offs)) {
Artem Bityutskiy06112542009-06-29 19:27:14 +0300699 int corruption = first_non_ff(buf, len);
700
Artem Bityutskiybe7b42a2011-02-06 16:41:06 +0200701 /*
702 * See header comment for this file for more
703 * explanations about the reasons we have this check.
704 */
Artem Bityutskiy06112542009-06-29 19:27:14 +0300705 ubifs_err("corrupt empty space LEB %d:%d, corruption "
706 "starts at %d", lnum, offs, corruption);
707 /* Make sure we dump interesting non-0xFF data */
Artem Bityutskiy10ac2792011-02-08 17:21:11 +0200708 offs += corruption;
Artem Bityutskiy06112542009-06-29 19:27:14 +0300709 buf += corruption;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300710 goto corrupted;
711 }
712 }
713
Artem Bityutskiybbf2b372011-05-16 15:15:52 +0300714 min_io_unit = round_down(offs, c->min_io_size);
715 if (grouped)
716 /*
717 * If nodes are grouped, always drop the incomplete group at
718 * the end.
719 */
Artem Bityutskiyda8b94e2011-05-26 08:58:19 +0300720 drop_last_group(sleb, &offs);
Artem Bityutskiybbf2b372011-05-16 15:15:52 +0300721
Artem Bityutskiyda8b94e2011-05-26 08:58:19 +0300722 if (jhead == GCHD) {
723 /*
724 * If this LEB belongs to the GC head then while we are in the
725 * middle of the same min. I/O unit keep dropping nodes. So
726 * basically, what we want is to make sure that the last min.
727 * I/O unit where we saw the corruption is dropped completely
728 * with all the uncorrupted nodes which may possibly sit there.
729 *
730 * In other words, let's name the min. I/O unit where the
731 * corruption starts B, and the previous min. I/O unit A. The
732 * below code tries to deal with a situation when half of B
733 * contains valid nodes or the end of a valid node, and the
734 * second half of B contains corrupted data or garbage. This
735 * means that UBIFS had been writing to B just before the power
736 * cut happened. I do not know how realistic is this scenario
737 * that half of the min. I/O unit had been written successfully
738 * and the other half not, but this is possible in our 'failure
739 * mode emulation' infrastructure at least.
740 *
741 * So what is the problem, why we need to drop those nodes? Why
742 * can't we just clean-up the second half of B by putting a
743 * padding node there? We can, and this works fine with one
744 * exception which was reproduced with power cut emulation
745 * testing and happens extremely rarely.
746 *
747 * Imagine the file-system is full, we run GC which starts
748 * moving valid nodes from LEB X to LEB Y (obviously, LEB Y is
749 * the current GC head LEB). The @c->gc_lnum is -1, which means
750 * that GC will retain LEB X and will try to continue. Imagine
751 * that LEB X is currently the dirtiest LEB, and the amount of
752 * used space in LEB Y is exactly the same as amount of free
753 * space in LEB X.
754 *
755 * And a power cut happens when nodes are moved from LEB X to
756 * LEB Y. We are here trying to recover LEB Y which is the GC
757 * head LEB. We find the min. I/O unit B as described above.
758 * Then we clean-up LEB Y by padding min. I/O unit. And later
759 * 'ubifs_rcvry_gc_commit()' function fails, because it cannot
760 * find a dirty LEB which could be GC'd into LEB Y! Even LEB X
761 * does not match because the amount of valid nodes there does
762 * not fit the free space in LEB Y any more! And this is
763 * because of the padding node which we added to LEB Y. The
764 * user-visible effect of this which I once observed and
765 * analysed is that we cannot mount the file-system with
766 * -ENOSPC error.
767 *
768 * So obviously, to make sure that situation does not happen we
769 * should free min. I/O unit B in LEB Y completely and the last
770 * used min. I/O unit in LEB Y should be A. This is basically
771 * what the below code tries to do.
772 */
773 while (offs > min_io_unit)
774 drop_last_node(sleb, &offs);
775 }
Artem Bityutskiybbf2b372011-05-16 15:15:52 +0300776
777 buf = sbuf + offs;
778 len = c->leb_size - offs;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300779
Artem Bityutskiy43e07072011-05-16 14:21:51 +0300780 clean_buf(c, &buf, lnum, &offs, &len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300781 ubifs_end_scan(c, sleb, lnum, offs);
782
Artem Bityutskiy7c47bfd2011-05-16 13:44:48 +0300783 err = fix_unclean_leb(c, sleb, start);
784 if (err)
785 goto error;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300786
787 return sleb;
788
Artem Bityutskiy61799202011-05-16 13:41:55 +0300789corrupted_rescan:
790 /* Re-scan the corrupted data with verbose messages */
Artem Bityutskiya6aae4d2012-05-16 20:11:23 +0300791 ubifs_err("corruptio %d", ret);
Artem Bityutskiy61799202011-05-16 13:41:55 +0300792 ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300793corrupted:
794 ubifs_scanned_corruption(c, lnum, offs, buf);
795 err = -EUCLEAN;
796error:
797 ubifs_err("LEB %d scanning failed", lnum);
798 ubifs_scan_destroy(sleb);
799 return ERR_PTR(err);
800}
801
802/**
803 * get_cs_sqnum - get commit start sequence number.
804 * @c: UBIFS file-system description object
805 * @lnum: LEB number of commit start node
806 * @offs: offset of commit start node
807 * @cs_sqnum: commit start sequence number is returned here
808 *
809 * This function returns %0 on success and a negative error code on failure.
810 */
811static int get_cs_sqnum(struct ubifs_info *c, int lnum, int offs,
812 unsigned long long *cs_sqnum)
813{
814 struct ubifs_cs_node *cs_node = NULL;
815 int err, ret;
816
817 dbg_rcvry("at %d:%d", lnum, offs);
818 cs_node = kmalloc(UBIFS_CS_NODE_SZ, GFP_KERNEL);
819 if (!cs_node)
820 return -ENOMEM;
821 if (c->leb_size - offs < UBIFS_CS_NODE_SZ)
822 goto out_err;
Artem Bityutskiyd3048202011-06-03 14:03:25 +0300823 err = ubifs_leb_read(c, lnum, (void *)cs_node, offs,
824 UBIFS_CS_NODE_SZ, 0);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300825 if (err && err != -EBADMSG)
826 goto out_free;
827 ret = ubifs_scan_a_node(c, cs_node, UBIFS_CS_NODE_SZ, lnum, offs, 0);
828 if (ret != SCANNED_A_NODE) {
Artem Bityutskiya6aae4d2012-05-16 20:11:23 +0300829 ubifs_err("Not a valid node");
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300830 goto out_err;
831 }
832 if (cs_node->ch.node_type != UBIFS_CS_NODE) {
Artem Bityutskiya6aae4d2012-05-16 20:11:23 +0300833 ubifs_err("Node a CS node, type is %d", cs_node->ch.node_type);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300834 goto out_err;
835 }
836 if (le64_to_cpu(cs_node->cmt_no) != c->cmt_no) {
Artem Bityutskiya6aae4d2012-05-16 20:11:23 +0300837 ubifs_err("CS node cmt_no %llu != current cmt_no %llu",
838 (unsigned long long)le64_to_cpu(cs_node->cmt_no),
839 c->cmt_no);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300840 goto out_err;
841 }
842 *cs_sqnum = le64_to_cpu(cs_node->ch.sqnum);
843 dbg_rcvry("commit start sqnum %llu", *cs_sqnum);
844 kfree(cs_node);
845 return 0;
846
847out_err:
848 err = -EINVAL;
849out_free:
850 ubifs_err("failed to get CS sqnum");
851 kfree(cs_node);
852 return err;
853}
854
855/**
856 * ubifs_recover_log_leb - scan and recover a log LEB.
857 * @c: UBIFS file-system description object
858 * @lnum: LEB number
859 * @offs: offset
860 * @sbuf: LEB-sized buffer to use
861 *
862 * This function does a scan of a LEB, but caters for errors that might have
Artem Bityutskiy7d08ae32010-10-17 15:50:19 +0300863 * been caused by unclean reboots from which we are attempting to recover
864 * (assume that only the last log LEB can be corrupted by an unclean reboot).
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300865 *
866 * This function returns %0 on success and a negative error code on failure.
867 */
868struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
869 int offs, void *sbuf)
870{
871 struct ubifs_scan_leb *sleb;
872 int next_lnum;
873
874 dbg_rcvry("LEB %d", lnum);
875 next_lnum = lnum + 1;
876 if (next_lnum >= UBIFS_LOG_LNUM + c->log_lebs)
877 next_lnum = UBIFS_LOG_LNUM;
878 if (next_lnum != c->ltail_lnum) {
879 /*
880 * We can only recover at the end of the log, so check that the
881 * next log LEB is empty or out of date.
882 */
Artem Bityutskiy348709b2009-08-25 15:00:55 +0300883 sleb = ubifs_scan(c, next_lnum, 0, sbuf, 0);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300884 if (IS_ERR(sleb))
885 return sleb;
886 if (sleb->nodes_cnt) {
887 struct ubifs_scan_node *snod;
888 unsigned long long cs_sqnum = c->cs_sqnum;
889
890 snod = list_entry(sleb->nodes.next,
891 struct ubifs_scan_node, list);
892 if (cs_sqnum == 0) {
893 int err;
894
895 err = get_cs_sqnum(c, lnum, offs, &cs_sqnum);
896 if (err) {
897 ubifs_scan_destroy(sleb);
898 return ERR_PTR(err);
899 }
900 }
901 if (snod->sqnum > cs_sqnum) {
902 ubifs_err("unrecoverable log corruption "
903 "in LEB %d", lnum);
904 ubifs_scan_destroy(sleb);
905 return ERR_PTR(-EUCLEAN);
906 }
907 }
908 ubifs_scan_destroy(sleb);
909 }
Artem Bityutskiyefcfde52011-05-26 08:36:52 +0300910 return ubifs_recover_leb(c, lnum, offs, sbuf, -1);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300911}
912
913/**
914 * recover_head - recover a head.
915 * @c: UBIFS file-system description object
916 * @lnum: LEB number of head to recover
917 * @offs: offset of head to recover
918 * @sbuf: LEB-sized buffer to use
919 *
920 * This function ensures that there is no data on the flash at a head location.
921 *
922 * This function returns %0 on success and a negative error code on failure.
923 */
Artem Bityutskiy83cef702011-06-03 13:45:09 +0300924static int recover_head(struct ubifs_info *c, int lnum, int offs, void *sbuf)
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300925{
Artem Bityutskiy2765df72011-02-02 09:22:54 +0200926 int len = c->max_write_size, err;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300927
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300928 if (offs + len > c->leb_size)
929 len = c->leb_size - offs;
930
931 if (!len)
932 return 0;
933
934 /* Read at the head location and check it is empty flash */
Artem Bityutskiyd3048202011-06-03 14:03:25 +0300935 err = ubifs_leb_read(c, lnum, sbuf, offs, len, 1);
Artem Bityutskiy431102f2009-06-29 18:58:34 +0300936 if (err || !is_empty(sbuf, len)) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300937 dbg_rcvry("cleaning head at %d:%d", lnum, offs);
938 if (offs == 0)
939 return ubifs_leb_unmap(c, lnum);
Artem Bityutskiyd3048202011-06-03 14:03:25 +0300940 err = ubifs_leb_read(c, lnum, sbuf, 0, offs, 1);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300941 if (err)
942 return err;
Richard Weinbergerb36a2612012-05-14 17:55:51 +0200943 return ubifs_leb_change(c, lnum, sbuf, offs);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300944 }
945
946 return 0;
947}
948
949/**
950 * ubifs_recover_inl_heads - recover index and LPT heads.
951 * @c: UBIFS file-system description object
952 * @sbuf: LEB-sized buffer to use
953 *
954 * This function ensures that there is no data on the flash at the index and
955 * LPT head locations.
956 *
957 * This deals with the recovery of a half-completed journal commit. UBIFS is
958 * careful never to overwrite the last version of the index or the LPT. Because
959 * the index and LPT are wandering trees, data from a half-completed commit will
960 * not be referenced anywhere in UBIFS. The data will be either in LEBs that are
961 * assumed to be empty and will be unmapped anyway before use, or in the index
962 * and LPT heads.
963 *
964 * This function returns %0 on success and a negative error code on failure.
965 */
Artem Bityutskiy83cef702011-06-03 13:45:09 +0300966int ubifs_recover_inl_heads(struct ubifs_info *c, void *sbuf)
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300967{
968 int err;
969
Artem Bityutskiy2ef13292010-09-19 18:34:26 +0300970 ubifs_assert(!c->ro_mount || c->remounting_rw);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300971
972 dbg_rcvry("checking index head at %d:%d", c->ihead_lnum, c->ihead_offs);
973 err = recover_head(c, c->ihead_lnum, c->ihead_offs, sbuf);
974 if (err)
975 return err;
976
977 dbg_rcvry("checking LPT head at %d:%d", c->nhead_lnum, c->nhead_offs);
978 err = recover_head(c, c->nhead_lnum, c->nhead_offs, sbuf);
979 if (err)
980 return err;
981
982 return 0;
983}
984
985/**
srimugunthan dhandapani7606f852011-08-26 16:08:39 +0530986 * clean_an_unclean_leb - read and write a LEB to remove corruption.
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300987 * @c: UBIFS file-system description object
988 * @ucleb: unclean LEB information
989 * @sbuf: LEB-sized buffer to use
990 *
991 * This function reads a LEB up to a point pre-determined by the mount recovery,
992 * checks the nodes, and writes the result back to the flash, thereby cleaning
993 * off any following corruption, or non-fatal ECC errors.
994 *
995 * This function returns %0 on success and a negative error code on failure.
996 */
Artem Bityutskiy83cef702011-06-03 13:45:09 +0300997static int clean_an_unclean_leb(struct ubifs_info *c,
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300998 struct ubifs_unclean_leb *ucleb, void *sbuf)
999{
1000 int err, lnum = ucleb->lnum, offs = 0, len = ucleb->endpt, quiet = 1;
1001 void *buf = sbuf;
1002
1003 dbg_rcvry("LEB %d len %d", lnum, len);
1004
1005 if (len == 0) {
1006 /* Nothing to read, just unmap it */
1007 err = ubifs_leb_unmap(c, lnum);
1008 if (err)
1009 return err;
1010 return 0;
1011 }
1012
Artem Bityutskiyd3048202011-06-03 14:03:25 +03001013 err = ubifs_leb_read(c, lnum, buf, offs, len, 0);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001014 if (err && err != -EBADMSG)
1015 return err;
1016
1017 while (len >= 8) {
1018 int ret;
1019
1020 cond_resched();
1021
1022 /* Scan quietly until there is an error */
1023 ret = ubifs_scan_a_node(c, buf, len, lnum, offs, quiet);
1024
1025 if (ret == SCANNED_A_NODE) {
1026 /* A valid node, and not a padding node */
1027 struct ubifs_ch *ch = buf;
1028 int node_len;
1029
1030 node_len = ALIGN(le32_to_cpu(ch->len), 8);
1031 offs += node_len;
1032 buf += node_len;
1033 len -= node_len;
1034 continue;
1035 }
1036
1037 if (ret > 0) {
1038 /* Padding bytes or a valid padding node */
1039 offs += ret;
1040 buf += ret;
1041 len -= ret;
1042 continue;
1043 }
1044
1045 if (ret == SCANNED_EMPTY_SPACE) {
1046 ubifs_err("unexpected empty space at %d:%d",
1047 lnum, offs);
1048 return -EUCLEAN;
1049 }
1050
1051 if (quiet) {
1052 /* Redo the last scan but noisily */
1053 quiet = 0;
1054 continue;
1055 }
1056
1057 ubifs_scanned_corruption(c, lnum, offs, buf);
1058 return -EUCLEAN;
1059 }
1060
1061 /* Pad to min_io_size */
1062 len = ALIGN(ucleb->endpt, c->min_io_size);
1063 if (len > ucleb->endpt) {
1064 int pad_len = len - ALIGN(ucleb->endpt, 8);
1065
1066 if (pad_len > 0) {
1067 buf = c->sbuf + len - pad_len;
1068 ubifs_pad(c, buf, pad_len);
1069 }
1070 }
1071
1072 /* Write back the LEB atomically */
Richard Weinbergerb36a2612012-05-14 17:55:51 +02001073 err = ubifs_leb_change(c, lnum, sbuf, len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001074 if (err)
1075 return err;
1076
1077 dbg_rcvry("cleaned LEB %d", lnum);
1078
1079 return 0;
1080}
1081
1082/**
1083 * ubifs_clean_lebs - clean LEBs recovered during read-only mount.
1084 * @c: UBIFS file-system description object
1085 * @sbuf: LEB-sized buffer to use
1086 *
1087 * This function cleans a LEB identified during recovery that needs to be
1088 * written but was not because UBIFS was mounted read-only. This happens when
1089 * remounting to read-write mode.
1090 *
1091 * This function returns %0 on success and a negative error code on failure.
1092 */
Artem Bityutskiy83cef702011-06-03 13:45:09 +03001093int ubifs_clean_lebs(struct ubifs_info *c, void *sbuf)
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001094{
1095 dbg_rcvry("recovery");
1096 while (!list_empty(&c->unclean_leb_list)) {
1097 struct ubifs_unclean_leb *ucleb;
1098 int err;
1099
1100 ucleb = list_entry(c->unclean_leb_list.next,
1101 struct ubifs_unclean_leb, list);
1102 err = clean_an_unclean_leb(c, ucleb, sbuf);
1103 if (err)
1104 return err;
1105 list_del(&ucleb->list);
1106 kfree(ucleb);
1107 }
1108 return 0;
1109}
1110
1111/**
Artem Bityutskiy44744212011-04-27 14:52:35 +03001112 * grab_empty_leb - grab an empty LEB to use as GC LEB and run commit.
1113 * @c: UBIFS file-system description object
1114 *
1115 * This is a helper function for 'ubifs_rcvry_gc_commit()' which grabs an empty
1116 * LEB to be used as GC LEB (@c->gc_lnum), and then runs the commit. Returns
1117 * zero in case of success and a negative error code in case of failure.
1118 */
1119static int grab_empty_leb(struct ubifs_info *c)
1120{
1121 int lnum, err;
1122
1123 /*
1124 * Note, it is very important to first search for an empty LEB and then
1125 * run the commit, not vice-versa. The reason is that there might be
1126 * only one empty LEB at the moment, the one which has been the
1127 * @c->gc_lnum just before the power cut happened. During the regular
1128 * UBIFS operation (not now) @c->gc_lnum is marked as "taken", so no
1129 * one but GC can grab it. But at this moment this single empty LEB is
1130 * not marked as taken, so if we run commit - what happens? Right, the
1131 * commit will grab it and write the index there. Remember that the
1132 * index always expands as long as there is free space, and it only
1133 * starts consolidating when we run out of space.
1134 *
1135 * IOW, if we run commit now, we might not be able to find a free LEB
1136 * after this.
1137 */
1138 lnum = ubifs_find_free_leb_for_idx(c);
1139 if (lnum < 0) {
Artem Bityutskiya6aae4d2012-05-16 20:11:23 +03001140 ubifs_err("could not find an empty LEB");
Artem Bityutskiyedf6be22012-05-16 19:15:56 +03001141 ubifs_dump_lprops(c);
1142 ubifs_dump_budg(c, &c->bi);
Artem Bityutskiy44744212011-04-27 14:52:35 +03001143 return lnum;
1144 }
1145
1146 /* Reset the index flag */
1147 err = ubifs_change_one_lp(c, lnum, LPROPS_NC, LPROPS_NC, 0,
1148 LPROPS_INDEX, 0);
1149 if (err)
1150 return err;
1151
1152 c->gc_lnum = lnum;
1153 dbg_rcvry("found empty LEB %d, run commit", lnum);
1154
1155 return ubifs_run_commit(c);
1156}
1157
1158/**
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001159 * ubifs_rcvry_gc_commit - recover the GC LEB number and run the commit.
1160 * @c: UBIFS file-system description object
1161 *
1162 * Out-of-place garbage collection requires always one empty LEB with which to
1163 * start garbage collection. The LEB number is recorded in c->gc_lnum and is
1164 * written to the master node on unmounting. In the case of an unclean unmount
1165 * the value of gc_lnum recorded in the master node is out of date and cannot
1166 * be used. Instead, recovery must allocate an empty LEB for this purpose.
1167 * However, there may not be enough empty space, in which case it must be
1168 * possible to GC the dirtiest LEB into the GC head LEB.
1169 *
1170 * This function also runs the commit which causes the TNC updates from
1171 * size-recovery and orphans to be written to the flash. That is important to
1172 * ensure correct replay order for subsequent mounts.
1173 *
1174 * This function returns %0 on success and a negative error code on failure.
1175 */
1176int ubifs_rcvry_gc_commit(struct ubifs_info *c)
1177{
1178 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
1179 struct ubifs_lprops lp;
Artem Bityutskiyfe79c052011-04-29 16:35:46 +03001180 int err;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001181
Artem Bityutskiyc839e292011-05-13 12:26:54 +03001182 dbg_rcvry("GC head LEB %d, offs %d", wbuf->lnum, wbuf->offs);
1183
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001184 c->gc_lnum = -1;
Artem Bityutskiyc839e292011-05-13 12:26:54 +03001185 if (wbuf->lnum == -1 || wbuf->offs == c->leb_size)
Artem Bityutskiy44744212011-04-27 14:52:35 +03001186 return grab_empty_leb(c);
Artem Bityutskiyfe79c052011-04-29 16:35:46 +03001187
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001188 err = ubifs_find_dirty_leb(c, &lp, wbuf->offs, 2);
1189 if (err) {
Artem Bityutskiyfe79c052011-04-29 16:35:46 +03001190 if (err != -ENOSPC)
1191 return err;
1192
1193 dbg_rcvry("could not find a dirty LEB");
1194 return grab_empty_leb(c);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001195 }
Artem Bityutskiy2405f592011-04-26 09:49:32 +03001196
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001197 ubifs_assert(!(lp.flags & LPROPS_INDEX));
Artem Bityutskiybcdca3e2011-04-26 10:07:50 +03001198 ubifs_assert(lp.free + lp.dirty >= wbuf->offs);
Artem Bityutskiy2405f592011-04-26 09:49:32 +03001199
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001200 /*
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001201 * We run the commit before garbage collection otherwise subsequent
1202 * mounts will see the GC and orphan deletion in a different order.
1203 */
1204 dbg_rcvry("committing");
1205 err = ubifs_run_commit(c);
1206 if (err)
1207 return err;
Artem Bityutskiyfe79c052011-04-29 16:35:46 +03001208
1209 dbg_rcvry("GC'ing LEB %d", lp.lnum);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001210 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
1211 err = ubifs_garbage_collect_leb(c, &lp);
1212 if (err >= 0) {
1213 int err2 = ubifs_wbuf_sync_nolock(wbuf);
1214
1215 if (err2)
1216 err = err2;
1217 }
1218 mutex_unlock(&wbuf->io_mutex);
1219 if (err < 0) {
Artem Bityutskiya6aae4d2012-05-16 20:11:23 +03001220 ubifs_err("GC failed, error %d", err);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001221 if (err == -EAGAIN)
1222 err = -EINVAL;
1223 return err;
1224 }
Artem Bityutskiyfe79c052011-04-29 16:35:46 +03001225
1226 ubifs_assert(err == LEB_RETAINED);
1227 if (err != LEB_RETAINED)
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001228 return -EINVAL;
Artem Bityutskiyfe79c052011-04-29 16:35:46 +03001229
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001230 err = ubifs_leb_unmap(c, c->gc_lnum);
1231 if (err)
1232 return err;
Artem Bityutskiyfe79c052011-04-29 16:35:46 +03001233
1234 dbg_rcvry("allocated LEB %d for GC", lp.lnum);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001235 return 0;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001236}
1237
1238/**
1239 * struct size_entry - inode size information for recovery.
1240 * @rb: link in the RB-tree of sizes
1241 * @inum: inode number
1242 * @i_size: size on inode
1243 * @d_size: maximum size based on data nodes
1244 * @exists: indicates whether the inode exists
1245 * @inode: inode if pinned in memory awaiting rw mode to fix it
1246 */
1247struct size_entry {
1248 struct rb_node rb;
1249 ino_t inum;
1250 loff_t i_size;
1251 loff_t d_size;
1252 int exists;
1253 struct inode *inode;
1254};
1255
1256/**
1257 * add_ino - add an entry to the size tree.
1258 * @c: UBIFS file-system description object
1259 * @inum: inode number
1260 * @i_size: size on inode
1261 * @d_size: maximum size based on data nodes
1262 * @exists: indicates whether the inode exists
1263 */
1264static int add_ino(struct ubifs_info *c, ino_t inum, loff_t i_size,
1265 loff_t d_size, int exists)
1266{
1267 struct rb_node **p = &c->size_tree.rb_node, *parent = NULL;
1268 struct size_entry *e;
1269
1270 while (*p) {
1271 parent = *p;
1272 e = rb_entry(parent, struct size_entry, rb);
1273 if (inum < e->inum)
1274 p = &(*p)->rb_left;
1275 else
1276 p = &(*p)->rb_right;
1277 }
1278
1279 e = kzalloc(sizeof(struct size_entry), GFP_KERNEL);
1280 if (!e)
1281 return -ENOMEM;
1282
1283 e->inum = inum;
1284 e->i_size = i_size;
1285 e->d_size = d_size;
1286 e->exists = exists;
1287
1288 rb_link_node(&e->rb, parent, p);
1289 rb_insert_color(&e->rb, &c->size_tree);
1290
1291 return 0;
1292}
1293
1294/**
1295 * find_ino - find an entry on the size tree.
1296 * @c: UBIFS file-system description object
1297 * @inum: inode number
1298 */
1299static struct size_entry *find_ino(struct ubifs_info *c, ino_t inum)
1300{
1301 struct rb_node *p = c->size_tree.rb_node;
1302 struct size_entry *e;
1303
1304 while (p) {
1305 e = rb_entry(p, struct size_entry, rb);
1306 if (inum < e->inum)
1307 p = p->rb_left;
1308 else if (inum > e->inum)
1309 p = p->rb_right;
1310 else
1311 return e;
1312 }
1313 return NULL;
1314}
1315
1316/**
1317 * remove_ino - remove an entry from the size tree.
1318 * @c: UBIFS file-system description object
1319 * @inum: inode number
1320 */
1321static void remove_ino(struct ubifs_info *c, ino_t inum)
1322{
1323 struct size_entry *e = find_ino(c, inum);
1324
1325 if (!e)
1326 return;
1327 rb_erase(&e->rb, &c->size_tree);
1328 kfree(e);
1329}
1330
1331/**
1332 * ubifs_destroy_size_tree - free resources related to the size tree.
1333 * @c: UBIFS file-system description object
1334 */
1335void ubifs_destroy_size_tree(struct ubifs_info *c)
1336{
1337 struct rb_node *this = c->size_tree.rb_node;
1338 struct size_entry *e;
1339
1340 while (this) {
1341 if (this->rb_left) {
1342 this = this->rb_left;
1343 continue;
1344 } else if (this->rb_right) {
1345 this = this->rb_right;
1346 continue;
1347 }
1348 e = rb_entry(this, struct size_entry, rb);
1349 if (e->inode)
1350 iput(e->inode);
1351 this = rb_parent(this);
1352 if (this) {
1353 if (this->rb_left == &e->rb)
1354 this->rb_left = NULL;
1355 else
1356 this->rb_right = NULL;
1357 }
1358 kfree(e);
1359 }
1360 c->size_tree = RB_ROOT;
1361}
1362
1363/**
1364 * ubifs_recover_size_accum - accumulate inode sizes for recovery.
1365 * @c: UBIFS file-system description object
1366 * @key: node key
1367 * @deletion: node is for a deletion
1368 * @new_size: inode size
1369 *
1370 * This function has two purposes:
1371 * 1) to ensure there are no data nodes that fall outside the inode size
1372 * 2) to ensure there are no data nodes for inodes that do not exist
1373 * To accomplish those purposes, a rb-tree is constructed containing an entry
1374 * for each inode number in the journal that has not been deleted, and recording
1375 * the size from the inode node, the maximum size of any data node (also altered
1376 * by truncations) and a flag indicating a inode number for which no inode node
1377 * was present in the journal.
1378 *
1379 * Note that there is still the possibility that there are data nodes that have
1380 * been committed that are beyond the inode size, however the only way to find
1381 * them would be to scan the entire index. Alternatively, some provision could
1382 * be made to record the size of inodes at the start of commit, which would seem
1383 * very cumbersome for a scenario that is quite unlikely and the only negative
1384 * consequence of which is wasted space.
1385 *
1386 * This functions returns %0 on success and a negative error code on failure.
1387 */
1388int ubifs_recover_size_accum(struct ubifs_info *c, union ubifs_key *key,
1389 int deletion, loff_t new_size)
1390{
1391 ino_t inum = key_inum(c, key);
1392 struct size_entry *e;
1393 int err;
1394
1395 switch (key_type(c, key)) {
1396 case UBIFS_INO_KEY:
1397 if (deletion)
1398 remove_ino(c, inum);
1399 else {
1400 e = find_ino(c, inum);
1401 if (e) {
1402 e->i_size = new_size;
1403 e->exists = 1;
1404 } else {
1405 err = add_ino(c, inum, new_size, 0, 1);
1406 if (err)
1407 return err;
1408 }
1409 }
1410 break;
1411 case UBIFS_DATA_KEY:
1412 e = find_ino(c, inum);
1413 if (e) {
1414 if (new_size > e->d_size)
1415 e->d_size = new_size;
1416 } else {
1417 err = add_ino(c, inum, 0, new_size, 0);
1418 if (err)
1419 return err;
1420 }
1421 break;
1422 case UBIFS_TRUN_KEY:
1423 e = find_ino(c, inum);
1424 if (e)
1425 e->d_size = new_size;
1426 break;
1427 }
1428 return 0;
1429}
1430
1431/**
1432 * fix_size_in_place - fix inode size in place on flash.
1433 * @c: UBIFS file-system description object
1434 * @e: inode size information for recovery
1435 */
1436static int fix_size_in_place(struct ubifs_info *c, struct size_entry *e)
1437{
1438 struct ubifs_ino_node *ino = c->sbuf;
1439 unsigned char *p;
1440 union ubifs_key key;
1441 int err, lnum, offs, len;
1442 loff_t i_size;
1443 uint32_t crc;
1444
1445 /* Locate the inode node LEB number and offset */
1446 ino_key_init(c, &key, e->inum);
1447 err = ubifs_tnc_locate(c, &key, ino, &lnum, &offs);
1448 if (err)
1449 goto out;
1450 /*
1451 * If the size recorded on the inode node is greater than the size that
1452 * was calculated from nodes in the journal then don't change the inode.
1453 */
1454 i_size = le64_to_cpu(ino->size);
1455 if (i_size >= e->d_size)
1456 return 0;
1457 /* Read the LEB */
Artem Bityutskiyd3048202011-06-03 14:03:25 +03001458 err = ubifs_leb_read(c, lnum, c->sbuf, 0, c->leb_size, 1);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001459 if (err)
1460 goto out;
1461 /* Change the size field and recalculate the CRC */
1462 ino = c->sbuf + offs;
1463 ino->size = cpu_to_le64(e->d_size);
1464 len = le32_to_cpu(ino->ch.len);
1465 crc = crc32(UBIFS_CRC32_INIT, (void *)ino + 8, len - 8);
1466 ino->ch.crc = cpu_to_le32(crc);
1467 /* Work out where data in the LEB ends and free space begins */
1468 p = c->sbuf;
1469 len = c->leb_size - 1;
1470 while (p[len] == 0xff)
1471 len -= 1;
1472 len = ALIGN(len + 1, c->min_io_size);
1473 /* Atomically write the fixed LEB back again */
Richard Weinbergerb36a2612012-05-14 17:55:51 +02001474 err = ubifs_leb_change(c, lnum, c->sbuf, len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001475 if (err)
1476 goto out;
Artem Bityutskiy69f8a752011-05-02 21:51:17 +03001477 dbg_rcvry("inode %lu at %d:%d size %lld -> %lld",
Artem Bityutskiye84461a2008-10-29 12:08:43 +02001478 (unsigned long)e->inum, lnum, offs, i_size, e->d_size);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001479 return 0;
1480
1481out:
1482 ubifs_warn("inode %lu failed to fix size %lld -> %lld error %d",
Artem Bityutskiye84461a2008-10-29 12:08:43 +02001483 (unsigned long)e->inum, e->i_size, e->d_size, err);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001484 return err;
1485}
1486
1487/**
1488 * ubifs_recover_size - recover inode size.
1489 * @c: UBIFS file-system description object
1490 *
1491 * This function attempts to fix inode size discrepancies identified by the
1492 * 'ubifs_recover_size_accum()' function.
1493 *
1494 * This functions returns %0 on success and a negative error code on failure.
1495 */
1496int ubifs_recover_size(struct ubifs_info *c)
1497{
1498 struct rb_node *this = rb_first(&c->size_tree);
1499
1500 while (this) {
1501 struct size_entry *e;
1502 int err;
1503
1504 e = rb_entry(this, struct size_entry, rb);
1505 if (!e->exists) {
1506 union ubifs_key key;
1507
1508 ino_key_init(c, &key, e->inum);
1509 err = ubifs_tnc_lookup(c, &key, c->sbuf);
1510 if (err && err != -ENOENT)
1511 return err;
1512 if (err == -ENOENT) {
1513 /* Remove data nodes that have no inode */
Artem Bityutskiye84461a2008-10-29 12:08:43 +02001514 dbg_rcvry("removing ino %lu",
1515 (unsigned long)e->inum);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001516 err = ubifs_tnc_remove_ino(c, e->inum);
1517 if (err)
1518 return err;
1519 } else {
1520 struct ubifs_ino_node *ino = c->sbuf;
1521
1522 e->exists = 1;
1523 e->i_size = le64_to_cpu(ino->size);
1524 }
1525 }
Artem Bityutskiy69f8a752011-05-02 21:51:17 +03001526
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001527 if (e->exists && e->i_size < e->d_size) {
Artem Bityutskiy69f8a752011-05-02 21:51:17 +03001528 if (c->ro_mount) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001529 /* Fix the inode size and pin it in memory */
1530 struct inode *inode;
Artem Bityutskiyc1f1f912011-05-05 14:16:32 +03001531 struct ubifs_inode *ui;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001532
Artem Bityutskiy69f8a752011-05-02 21:51:17 +03001533 ubifs_assert(!e->inode);
1534
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001535 inode = ubifs_iget(c->vfs_sb, e->inum);
1536 if (IS_ERR(inode))
1537 return PTR_ERR(inode);
Artem Bityutskiyc1f1f912011-05-05 14:16:32 +03001538
1539 ui = ubifs_inode(inode);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001540 if (inode->i_size < e->d_size) {
1541 dbg_rcvry("ino %lu size %lld -> %lld",
Artem Bityutskiye84461a2008-10-29 12:08:43 +02001542 (unsigned long)e->inum,
Artem Bityutskiy4c954522011-05-02 21:43:54 +03001543 inode->i_size, e->d_size);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001544 inode->i_size = e->d_size;
Artem Bityutskiyc1f1f912011-05-05 14:16:32 +03001545 ui->ui_size = e->d_size;
1546 ui->synced_i_size = e->d_size;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001547 e->inode = inode;
1548 this = rb_next(this);
1549 continue;
1550 }
1551 iput(inode);
1552 } else {
1553 /* Fix the size in place */
1554 err = fix_size_in_place(c, e);
1555 if (err)
1556 return err;
1557 if (e->inode)
1558 iput(e->inode);
1559 }
1560 }
Artem Bityutskiy69f8a752011-05-02 21:51:17 +03001561
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001562 this = rb_next(this);
1563 rb_erase(&e->rb, &c->size_tree);
1564 kfree(e);
1565 }
Artem Bityutskiy69f8a752011-05-02 21:51:17 +03001566
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001567 return 0;
1568}