blob: 13492fef6471df3765acfebb1b84c2b57a877155 [file] [log] [blame]
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001/*
2 * Copyright (c) 2012 Linutronix GmbH
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +02003 * Copyright (c) 2014 sigma star gmbh
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02004 * Author: Richard Weinberger <richard@nod.at>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
14 *
15 */
16
17#include <linux/crc32.h>
18#include "ubi.h"
19
20/**
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +020021 * init_seen - allocate memory for used for debugging.
22 * @ubi: UBI device description object
23 */
24static inline int *init_seen(struct ubi_device *ubi)
25{
26 int *ret;
27
28 if (!ubi_dbg_chk_fastmap(ubi))
29 return NULL;
30
31 ret = kcalloc(ubi->peb_count, sizeof(int), GFP_KERNEL);
32 if (!ret)
33 return ERR_PTR(-ENOMEM);
34
35 return ret;
36}
37
38/**
39 * free_seen - free the seen logic integer array.
40 * @seen: integer array of @ubi->peb_count size
41 */
42static inline void free_seen(int *seen)
43{
44 kfree(seen);
45}
46
47/**
48 * set_seen - mark a PEB as seen.
49 * @ubi: UBI device description object
50 * @pnum: The PEB to be makred as seen
51 * @seen: integer array of @ubi->peb_count size
52 */
53static inline void set_seen(struct ubi_device *ubi, int pnum, int *seen)
54{
55 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
56 return;
57
58 seen[pnum] = 1;
59}
60
61/**
62 * self_check_seen - check whether all PEB have been seen by fastmap.
63 * @ubi: UBI device description object
64 * @seen: integer array of @ubi->peb_count size
65 */
66static int self_check_seen(struct ubi_device *ubi, int *seen)
67{
68 int pnum, ret = 0;
69
70 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
71 return 0;
72
73 for (pnum = 0; pnum < ubi->peb_count; pnum++) {
74 if (!seen[pnum] && ubi->lookuptbl[pnum]) {
75 ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
76 ret = -EINVAL;
77 }
78 }
79
80 return ret;
81}
82
83/**
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +020084 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
85 * @ubi: UBI device description object
86 */
87size_t ubi_calc_fm_size(struct ubi_device *ubi)
88{
89 size_t size;
90
Richard Weinberger91401a342014-09-30 00:20:46 +020091 size = sizeof(struct ubi_fm_sb) + \
92 sizeof(struct ubi_fm_hdr) + \
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +020093 sizeof(struct ubi_fm_scan_pool) + \
94 sizeof(struct ubi_fm_scan_pool) + \
95 (ubi->peb_count * sizeof(struct ubi_fm_ec)) + \
96 (sizeof(struct ubi_fm_eba) + \
97 (ubi->peb_count * sizeof(__be32))) + \
98 sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
99 return roundup(size, ubi->leb_size);
100}
101
102
103/**
104 * new_fm_vhdr - allocate a new volume header for fastmap usage.
105 * @ubi: UBI device description object
106 * @vol_id: the VID of the new header
107 *
108 * Returns a new struct ubi_vid_hdr on success.
109 * NULL indicates out of memory.
110 */
111static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
112{
113 struct ubi_vid_hdr *new;
114
115 new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
116 if (!new)
117 goto out;
118
119 new->vol_type = UBI_VID_DYNAMIC;
120 new->vol_id = cpu_to_be32(vol_id);
121
122 /* UBI implementations without fastmap support have to delete the
123 * fastmap.
124 */
125 new->compat = UBI_COMPAT_DELETE;
126
127out:
128 return new;
129}
130
131/**
132 * add_aeb - create and add a attach erase block to a given list.
133 * @ai: UBI attach info object
134 * @list: the target list
135 * @pnum: PEB number of the new attach erase block
136 * @ec: erease counter of the new LEB
137 * @scrub: scrub this PEB after attaching
138 *
139 * Returns 0 on success, < 0 indicates an internal error.
140 */
141static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
142 int pnum, int ec, int scrub)
143{
144 struct ubi_ainf_peb *aeb;
145
146 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
147 if (!aeb)
148 return -ENOMEM;
149
150 aeb->pnum = pnum;
151 aeb->ec = ec;
152 aeb->lnum = -1;
153 aeb->scrub = scrub;
154 aeb->copy_flag = aeb->sqnum = 0;
155
156 ai->ec_sum += aeb->ec;
157 ai->ec_count++;
158
159 if (ai->max_ec < aeb->ec)
160 ai->max_ec = aeb->ec;
161
162 if (ai->min_ec > aeb->ec)
163 ai->min_ec = aeb->ec;
164
165 list_add_tail(&aeb->u.list, list);
166
167 return 0;
168}
169
170/**
171 * add_vol - create and add a new volume to ubi_attach_info.
172 * @ai: ubi_attach_info object
173 * @vol_id: VID of the new volume
174 * @used_ebs: number of used EBS
175 * @data_pad: data padding value of the new volume
176 * @vol_type: volume type
177 * @last_eb_bytes: number of bytes in the last LEB
178 *
179 * Returns the new struct ubi_ainf_volume on success.
180 * NULL indicates an error.
181 */
182static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
183 int used_ebs, int data_pad, u8 vol_type,
184 int last_eb_bytes)
185{
186 struct ubi_ainf_volume *av;
187 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
188
189 while (*p) {
190 parent = *p;
191 av = rb_entry(parent, struct ubi_ainf_volume, rb);
192
Heiko Schochere9110362014-06-24 09:25:18 +0200193 if (vol_id > av->vol_id)
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200194 p = &(*p)->rb_left;
Mike Snitzer604b5922014-03-21 15:54:03 -0400195 else
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200196 p = &(*p)->rb_right;
197 }
198
199 av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
200 if (!av)
201 goto out;
202
Richard Weinberger42dd3cd2014-10-25 13:26:49 +0200203 av->highest_lnum = av->leb_count = av->used_ebs = 0;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200204 av->vol_id = vol_id;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200205 av->data_pad = data_pad;
206 av->last_data_size = last_eb_bytes;
207 av->compat = 0;
208 av->vol_type = vol_type;
209 av->root = RB_ROOT;
Richard Weinberger42dd3cd2014-10-25 13:26:49 +0200210 if (av->vol_type == UBI_STATIC_VOLUME)
211 av->used_ebs = used_ebs;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200212
213 dbg_bld("found volume (ID %i)", vol_id);
214
215 rb_link_node(&av->rb, parent, p);
216 rb_insert_color(&av->rb, &ai->volumes);
217
218out:
219 return av;
220}
221
222/**
223 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
224 * from it's original list.
225 * @ai: ubi_attach_info object
226 * @aeb: the to be assigned SEB
227 * @av: target scan volume
228 */
229static void assign_aeb_to_av(struct ubi_attach_info *ai,
230 struct ubi_ainf_peb *aeb,
231 struct ubi_ainf_volume *av)
232{
233 struct ubi_ainf_peb *tmp_aeb;
234 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
235
236 p = &av->root.rb_node;
237 while (*p) {
238 parent = *p;
239
240 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
241 if (aeb->lnum != tmp_aeb->lnum) {
242 if (aeb->lnum < tmp_aeb->lnum)
243 p = &(*p)->rb_left;
244 else
245 p = &(*p)->rb_right;
246
247 continue;
248 } else
249 break;
250 }
251
252 list_del(&aeb->u.list);
253 av->leb_count++;
254
255 rb_link_node(&aeb->u.rb, parent, p);
256 rb_insert_color(&aeb->u.rb, &av->root);
257}
258
259/**
260 * update_vol - inserts or updates a LEB which was found a pool.
261 * @ubi: the UBI device object
262 * @ai: attach info object
263 * @av: the volume this LEB belongs to
264 * @new_vh: the volume header derived from new_aeb
265 * @new_aeb: the AEB to be examined
266 *
267 * Returns 0 on success, < 0 indicates an internal error.
268 */
269static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
270 struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
271 struct ubi_ainf_peb *new_aeb)
272{
273 struct rb_node **p = &av->root.rb_node, *parent = NULL;
274 struct ubi_ainf_peb *aeb, *victim;
275 int cmp_res;
276
277 while (*p) {
278 parent = *p;
279 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
280
281 if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
282 if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
283 p = &(*p)->rb_left;
284 else
285 p = &(*p)->rb_right;
286
287 continue;
288 }
289
290 /* This case can happen if the fastmap gets written
291 * because of a volume change (creation, deletion, ..).
292 * Then a PEB can be within the persistent EBA and the pool.
293 */
294 if (aeb->pnum == new_aeb->pnum) {
295 ubi_assert(aeb->lnum == new_aeb->lnum);
296 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
297
298 return 0;
299 }
300
301 cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
302 if (cmp_res < 0)
303 return cmp_res;
304
305 /* new_aeb is newer */
306 if (cmp_res & 1) {
307 victim = kmem_cache_alloc(ai->aeb_slab_cache,
308 GFP_KERNEL);
309 if (!victim)
310 return -ENOMEM;
311
312 victim->ec = aeb->ec;
313 victim->pnum = aeb->pnum;
314 list_add_tail(&victim->u.list, &ai->erase);
315
316 if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
317 av->last_data_size = \
318 be32_to_cpu(new_vh->data_size);
319
320 dbg_bld("vol %i: AEB %i's PEB %i is the newer",
321 av->vol_id, aeb->lnum, new_aeb->pnum);
322
323 aeb->ec = new_aeb->ec;
324 aeb->pnum = new_aeb->pnum;
325 aeb->copy_flag = new_vh->copy_flag;
326 aeb->scrub = new_aeb->scrub;
327 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
328
329 /* new_aeb is older */
330 } else {
331 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
332 av->vol_id, aeb->lnum, new_aeb->pnum);
333 list_add_tail(&new_aeb->u.list, &ai->erase);
334 }
335
336 return 0;
337 }
338 /* This LEB is new, let's add it to the volume */
339
340 if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
341 av->highest_lnum = be32_to_cpu(new_vh->lnum);
342 av->last_data_size = be32_to_cpu(new_vh->data_size);
343 }
344
345 if (av->vol_type == UBI_STATIC_VOLUME)
346 av->used_ebs = be32_to_cpu(new_vh->used_ebs);
347
348 av->leb_count++;
349
350 rb_link_node(&new_aeb->u.rb, parent, p);
351 rb_insert_color(&new_aeb->u.rb, &av->root);
352
353 return 0;
354}
355
356/**
357 * process_pool_aeb - we found a non-empty PEB in a pool.
358 * @ubi: UBI device object
359 * @ai: attach info object
360 * @new_vh: the volume header derived from new_aeb
361 * @new_aeb: the AEB to be examined
362 *
363 * Returns 0 on success, < 0 indicates an internal error.
364 */
365static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
366 struct ubi_vid_hdr *new_vh,
367 struct ubi_ainf_peb *new_aeb)
368{
369 struct ubi_ainf_volume *av, *tmp_av = NULL;
370 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
371 int found = 0;
372
373 if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
374 be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
375 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
376
377 return 0;
378 }
379
380 /* Find the volume this SEB belongs to */
381 while (*p) {
382 parent = *p;
383 tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
384
385 if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
386 p = &(*p)->rb_left;
387 else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
388 p = &(*p)->rb_right;
389 else {
390 found = 1;
391 break;
392 }
393 }
394
395 if (found)
396 av = tmp_av;
397 else {
Tanya Brokhman326087032014-10-20 19:57:00 +0300398 ubi_err(ubi, "orphaned volume in fastmap pool!");
Richard Genoud1bf18902014-09-09 14:25:01 +0200399 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200400 return UBI_BAD_FASTMAP;
401 }
402
403 ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
404
405 return update_vol(ubi, ai, av, new_vh, new_aeb);
406}
407
408/**
409 * unmap_peb - unmap a PEB.
410 * If fastmap detects a free PEB in the pool it has to check whether
411 * this PEB has been unmapped after writing the fastmap.
412 *
413 * @ai: UBI attach info object
414 * @pnum: The PEB to be unmapped
415 */
416static void unmap_peb(struct ubi_attach_info *ai, int pnum)
417{
418 struct ubi_ainf_volume *av;
419 struct rb_node *node, *node2;
420 struct ubi_ainf_peb *aeb;
421
422 for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
423 av = rb_entry(node, struct ubi_ainf_volume, rb);
424
425 for (node2 = rb_first(&av->root); node2;
426 node2 = rb_next(node2)) {
427 aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
428 if (aeb->pnum == pnum) {
429 rb_erase(&aeb->u.rb, &av->root);
Richard Weinbergerad3d6a02014-10-24 15:22:05 +0200430 av->leb_count--;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200431 kmem_cache_free(ai->aeb_slab_cache, aeb);
432 return;
433 }
434 }
435 }
436}
437
438/**
439 * scan_pool - scans a pool for changed (no longer empty PEBs).
440 * @ubi: UBI device object
441 * @ai: attach info object
442 * @pebs: an array of all PEB numbers in the to be scanned pool
443 * @pool_size: size of the pool (number of entries in @pebs)
444 * @max_sqnum: pointer to the maximal sequence number
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200445 * @free: list of PEBs which are most likely free (and go into @ai->free)
446 *
447 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
448 * < 0 indicates an internal error.
449 */
450static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
451 int *pebs, int pool_size, unsigned long long *max_sqnum,
Richard Weinbergerd141a8e2014-10-07 21:39:20 +0200452 struct list_head *free)
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200453{
454 struct ubi_vid_hdr *vh;
455 struct ubi_ec_hdr *ech;
Richard Weinbergerd141a8e2014-10-07 21:39:20 +0200456 struct ubi_ainf_peb *new_aeb;
457 int i, pnum, err, ret = 0;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200458
459 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
460 if (!ech)
461 return -ENOMEM;
462
463 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
464 if (!vh) {
465 kfree(ech);
466 return -ENOMEM;
467 }
468
469 dbg_bld("scanning fastmap pool: size = %i", pool_size);
470
471 /*
472 * Now scan all PEBs in the pool to find changes which have been made
473 * after the creation of the fastmap
474 */
475 for (i = 0; i < pool_size; i++) {
476 int scrub = 0;
Richard Genoudc22301a2013-09-28 15:55:13 +0200477 int image_seq;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200478
479 pnum = be32_to_cpu(pebs[i]);
480
481 if (ubi_io_is_bad(ubi, pnum)) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300482 ubi_err(ubi, "bad PEB in fastmap pool!");
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200483 ret = UBI_BAD_FASTMAP;
484 goto out;
485 }
486
487 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
488 if (err && err != UBI_IO_BITFLIPS) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300489 ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200490 pnum, err);
491 ret = err > 0 ? UBI_BAD_FASTMAP : err;
492 goto out;
Brian Norris44305eb2014-05-20 22:35:38 -0700493 } else if (err == UBI_IO_BITFLIPS)
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200494 scrub = 1;
495
Richard Genoudc22301a2013-09-28 15:55:13 +0200496 /*
497 * Older UBI implementations have image_seq set to zero, so
498 * we shouldn't fail if image_seq == 0.
499 */
500 image_seq = be32_to_cpu(ech->image_seq);
501
502 if (image_seq && (image_seq != ubi->image_seq)) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300503 ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200504 be32_to_cpu(ech->image_seq), ubi->image_seq);
Richard Weinbergerf240dca2013-09-28 15:55:11 +0200505 ret = UBI_BAD_FASTMAP;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200506 goto out;
507 }
508
509 err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
510 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
511 unsigned long long ec = be64_to_cpu(ech->ec);
512 unmap_peb(ai, pnum);
513 dbg_bld("Adding PEB to free: %i", pnum);
514 if (err == UBI_IO_FF_BITFLIPS)
515 add_aeb(ai, free, pnum, ec, 1);
516 else
517 add_aeb(ai, free, pnum, ec, 0);
518 continue;
519 } else if (err == 0 || err == UBI_IO_BITFLIPS) {
520 dbg_bld("Found non empty PEB:%i in pool", pnum);
521
522 if (err == UBI_IO_BITFLIPS)
523 scrub = 1;
524
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200525 new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
526 GFP_KERNEL);
527 if (!new_aeb) {
528 ret = -ENOMEM;
529 goto out;
530 }
531
532 new_aeb->ec = be64_to_cpu(ech->ec);
533 new_aeb->pnum = pnum;
534 new_aeb->lnum = be32_to_cpu(vh->lnum);
535 new_aeb->sqnum = be64_to_cpu(vh->sqnum);
536 new_aeb->copy_flag = vh->copy_flag;
537 new_aeb->scrub = scrub;
538
539 if (*max_sqnum < new_aeb->sqnum)
540 *max_sqnum = new_aeb->sqnum;
541
542 err = process_pool_aeb(ubi, ai, vh, new_aeb);
543 if (err) {
544 ret = err > 0 ? UBI_BAD_FASTMAP : err;
545 goto out;
546 }
547 } else {
548 /* We are paranoid and fall back to scanning mode */
Tanya Brokhman326087032014-10-20 19:57:00 +0300549 ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200550 ret = err > 0 ? UBI_BAD_FASTMAP : err;
551 goto out;
552 }
553
554 }
555
556out:
557 ubi_free_vid_hdr(ubi, vh);
558 kfree(ech);
559 return ret;
560}
561
562/**
563 * count_fastmap_pebs - Counts the PEBs found by fastmap.
564 * @ai: The UBI attach info object
565 */
566static int count_fastmap_pebs(struct ubi_attach_info *ai)
567{
568 struct ubi_ainf_peb *aeb;
569 struct ubi_ainf_volume *av;
570 struct rb_node *rb1, *rb2;
571 int n = 0;
572
573 list_for_each_entry(aeb, &ai->erase, u.list)
574 n++;
575
576 list_for_each_entry(aeb, &ai->free, u.list)
577 n++;
578
579 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
580 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
581 n++;
582
583 return n;
584}
585
586/**
587 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
588 * @ubi: UBI device object
589 * @ai: UBI attach info object
590 * @fm: the fastmap to be attached
591 *
592 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
593 * < 0 indicates an internal error.
594 */
595static int ubi_attach_fastmap(struct ubi_device *ubi,
596 struct ubi_attach_info *ai,
597 struct ubi_fastmap_layout *fm)
598{
Richard Weinbergerd141a8e2014-10-07 21:39:20 +0200599 struct list_head used, free;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200600 struct ubi_ainf_volume *av;
601 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200602 struct ubi_fm_sb *fmsb;
603 struct ubi_fm_hdr *fmhdr;
604 struct ubi_fm_scan_pool *fmpl1, *fmpl2;
605 struct ubi_fm_ec *fmec;
606 struct ubi_fm_volhdr *fmvhdr;
607 struct ubi_fm_eba *fm_eba;
608 int ret, i, j, pool_size, wl_pool_size;
609 size_t fm_pos = 0, fm_size = ubi->fm_size;
610 unsigned long long max_sqnum = 0;
611 void *fm_raw = ubi->fm_buf;
612
613 INIT_LIST_HEAD(&used);
614 INIT_LIST_HEAD(&free);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200615 ai->min_ec = UBI_MAX_ERASECOUNTER;
616
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200617 fmsb = (struct ubi_fm_sb *)(fm_raw);
618 ai->max_sqnum = fmsb->sqnum;
619 fm_pos += sizeof(struct ubi_fm_sb);
620 if (fm_pos >= fm_size)
621 goto fail_bad;
622
623 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
624 fm_pos += sizeof(*fmhdr);
625 if (fm_pos >= fm_size)
626 goto fail_bad;
627
628 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300629 ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200630 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
631 goto fail_bad;
632 }
633
634 fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
635 fm_pos += sizeof(*fmpl1);
636 if (fm_pos >= fm_size)
637 goto fail_bad;
638 if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300639 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200640 be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
641 goto fail_bad;
642 }
643
644 fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
645 fm_pos += sizeof(*fmpl2);
646 if (fm_pos >= fm_size)
647 goto fail_bad;
648 if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300649 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200650 be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
651 goto fail_bad;
652 }
653
654 pool_size = be16_to_cpu(fmpl1->size);
655 wl_pool_size = be16_to_cpu(fmpl2->size);
656 fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
657 fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
658
659 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300660 ubi_err(ubi, "bad pool size: %i", pool_size);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200661 goto fail_bad;
662 }
663
664 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300665 ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200666 goto fail_bad;
667 }
668
669
670 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
671 fm->max_pool_size < 0) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300672 ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200673 goto fail_bad;
674 }
675
676 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
677 fm->max_wl_pool_size < 0) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300678 ubi_err(ubi, "bad maximal WL pool size: %i",
679 fm->max_wl_pool_size);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200680 goto fail_bad;
681 }
682
683 /* read EC values from free list */
684 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
685 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
686 fm_pos += sizeof(*fmec);
687 if (fm_pos >= fm_size)
688 goto fail_bad;
689
690 add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
691 be32_to_cpu(fmec->ec), 0);
692 }
693
694 /* read EC values from used list */
695 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
696 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
697 fm_pos += sizeof(*fmec);
698 if (fm_pos >= fm_size)
699 goto fail_bad;
700
701 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
702 be32_to_cpu(fmec->ec), 0);
703 }
704
705 /* read EC values from scrub list */
706 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
707 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
708 fm_pos += sizeof(*fmec);
709 if (fm_pos >= fm_size)
710 goto fail_bad;
711
712 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
713 be32_to_cpu(fmec->ec), 1);
714 }
715
716 /* read EC values from erase list */
717 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
718 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
719 fm_pos += sizeof(*fmec);
720 if (fm_pos >= fm_size)
721 goto fail_bad;
722
723 add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
724 be32_to_cpu(fmec->ec), 1);
725 }
726
727 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
728 ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
729
730 /* Iterate over all volumes and read their EBA table */
731 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
732 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
733 fm_pos += sizeof(*fmvhdr);
734 if (fm_pos >= fm_size)
735 goto fail_bad;
736
737 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300738 ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200739 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
740 goto fail_bad;
741 }
742
743 av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
744 be32_to_cpu(fmvhdr->used_ebs),
745 be32_to_cpu(fmvhdr->data_pad),
746 fmvhdr->vol_type,
747 be32_to_cpu(fmvhdr->last_eb_bytes));
748
749 if (!av)
750 goto fail_bad;
751
752 ai->vols_found++;
753 if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
754 ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
755
756 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
757 fm_pos += sizeof(*fm_eba);
758 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
759 if (fm_pos >= fm_size)
760 goto fail_bad;
761
762 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300763 ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200764 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
765 goto fail_bad;
766 }
767
768 for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
769 int pnum = be32_to_cpu(fm_eba->pnum[j]);
770
771 if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
772 continue;
773
774 aeb = NULL;
775 list_for_each_entry(tmp_aeb, &used, u.list) {
Brian Pomerantz584d4622013-05-01 17:10:44 -0700776 if (tmp_aeb->pnum == pnum) {
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200777 aeb = tmp_aeb;
Brian Pomerantz584d4622013-05-01 17:10:44 -0700778 break;
779 }
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200780 }
781
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200782 if (!aeb) {
Richard Weinbergerd141a8e2014-10-07 21:39:20 +0200783 ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
784 goto fail_bad;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200785 }
786
787 aeb->lnum = j;
788
789 if (av->highest_lnum <= aeb->lnum)
790 av->highest_lnum = aeb->lnum;
791
792 assign_aeb_to_av(ai, aeb, av);
793
794 dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
795 aeb->pnum, aeb->lnum, av->vol_id);
796 }
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200797 }
798
Richard Weinbergerd141a8e2014-10-07 21:39:20 +0200799 ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum, &free);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200800 if (ret)
801 goto fail;
802
Richard Weinbergerd141a8e2014-10-07 21:39:20 +0200803 ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum, &free);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200804 if (ret)
805 goto fail;
806
807 if (max_sqnum > ai->max_sqnum)
808 ai->max_sqnum = max_sqnum;
809
Wei Yongjun6a059ab2012-10-09 14:14:21 +0800810 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
811 list_move_tail(&tmp_aeb->u.list, &ai->free);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200812
Richard Weinbergera83832a2014-10-07 18:51:07 +0200813 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
814 list_move_tail(&tmp_aeb->u.list, &ai->erase);
815
Richard Weinbergerae0d1462013-09-28 15:55:16 +0200816 ubi_assert(list_empty(&free));
817
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200818 /*
819 * If fastmap is leaking PEBs (must not happen), raise a
820 * fat warning and fall back to scanning mode.
821 * We do this here because in ubi_wl_init() it's too late
822 * and we cannot fall back to scanning.
823 */
824 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
825 ai->bad_peb_count - fm->used_blocks))
826 goto fail_bad;
827
828 return 0;
829
830fail_bad:
831 ret = UBI_BAD_FASTMAP;
832fail:
Richard Weinbergerfe24c6e2013-09-28 15:55:15 +0200833 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
Richard Weinbergerfe24c6e2013-09-28 15:55:15 +0200834 list_del(&tmp_aeb->u.list);
Dan Carpenter5547fec2014-01-29 16:17:57 +0300835 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
Richard Weinbergerfe24c6e2013-09-28 15:55:15 +0200836 }
Richard Weinbergerfe24c6e2013-09-28 15:55:15 +0200837 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
Richard Weinbergerfe24c6e2013-09-28 15:55:15 +0200838 list_del(&tmp_aeb->u.list);
Dan Carpenter5547fec2014-01-29 16:17:57 +0300839 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
Richard Weinbergerfe24c6e2013-09-28 15:55:15 +0200840 }
841
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200842 return ret;
843}
844
845/**
846 * ubi_scan_fastmap - scan the fastmap.
847 * @ubi: UBI device object
848 * @ai: UBI attach info to be filled
849 * @fm_anchor: The fastmap starts at this PEB
850 *
851 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
852 * UBI_BAD_FASTMAP if one was found but is not usable.
853 * < 0 indicates an internal error.
854 */
855int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
856 int fm_anchor)
857{
858 struct ubi_fm_sb *fmsb, *fmsb2;
859 struct ubi_vid_hdr *vh;
860 struct ubi_ec_hdr *ech;
861 struct ubi_fastmap_layout *fm;
862 int i, used_blocks, pnum, ret = 0;
863 size_t fm_size;
864 __be32 crc, tmp_crc;
865 unsigned long long sqnum = 0;
866
Richard Weinberger111ab0b2014-11-10 16:28:08 +0100867 down_write(&ubi->fm_protect);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200868 memset(ubi->fm_buf, 0, ubi->fm_size);
869
870 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
871 if (!fmsb) {
872 ret = -ENOMEM;
873 goto out;
874 }
875
876 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
877 if (!fm) {
878 ret = -ENOMEM;
879 kfree(fmsb);
880 goto out;
881 }
882
883 ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
884 if (ret && ret != UBI_IO_BITFLIPS)
885 goto free_fm_sb;
886 else if (ret == UBI_IO_BITFLIPS)
887 fm->to_be_tortured[0] = 1;
888
889 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300890 ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200891 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
892 ret = UBI_BAD_FASTMAP;
893 goto free_fm_sb;
894 }
895
896 if (fmsb->version != UBI_FM_FMT_VERSION) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300897 ubi_err(ubi, "bad fastmap version: %i, expected: %i",
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200898 fmsb->version, UBI_FM_FMT_VERSION);
899 ret = UBI_BAD_FASTMAP;
900 goto free_fm_sb;
901 }
902
903 used_blocks = be32_to_cpu(fmsb->used_blocks);
904 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300905 ubi_err(ubi, "number of fastmap blocks is invalid: %i",
906 used_blocks);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200907 ret = UBI_BAD_FASTMAP;
908 goto free_fm_sb;
909 }
910
911 fm_size = ubi->leb_size * used_blocks;
912 if (fm_size != ubi->fm_size) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300913 ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
914 fm_size, ubi->fm_size);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200915 ret = UBI_BAD_FASTMAP;
916 goto free_fm_sb;
917 }
918
919 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
920 if (!ech) {
921 ret = -ENOMEM;
922 goto free_fm_sb;
923 }
924
925 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
926 if (!vh) {
927 ret = -ENOMEM;
928 goto free_hdr;
929 }
930
931 for (i = 0; i < used_blocks; i++) {
Richard Genoudc22301a2013-09-28 15:55:13 +0200932 int image_seq;
933
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200934 pnum = be32_to_cpu(fmsb->block_loc[i]);
935
936 if (ubi_io_is_bad(ubi, pnum)) {
937 ret = UBI_BAD_FASTMAP;
938 goto free_hdr;
939 }
940
941 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
942 if (ret && ret != UBI_IO_BITFLIPS) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300943 ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200944 i, pnum);
945 if (ret > 0)
946 ret = UBI_BAD_FASTMAP;
947 goto free_hdr;
948 } else if (ret == UBI_IO_BITFLIPS)
949 fm->to_be_tortured[i] = 1;
950
Richard Genoudc22301a2013-09-28 15:55:13 +0200951 image_seq = be32_to_cpu(ech->image_seq);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200952 if (!ubi->image_seq)
Richard Genoudc22301a2013-09-28 15:55:13 +0200953 ubi->image_seq = image_seq;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200954
Richard Genoudc22301a2013-09-28 15:55:13 +0200955 /*
956 * Older UBI implementations have image_seq set to zero, so
957 * we shouldn't fail if image_seq == 0.
958 */
959 if (image_seq && (image_seq != ubi->image_seq)) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300960 ubi_err(ubi, "wrong image seq:%d instead of %d",
Richard Genoudc22301a2013-09-28 15:55:13 +0200961 be32_to_cpu(ech->image_seq), ubi->image_seq);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200962 ret = UBI_BAD_FASTMAP;
963 goto free_hdr;
964 }
965
966 ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
967 if (ret && ret != UBI_IO_BITFLIPS) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300968 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200969 i, pnum);
970 goto free_hdr;
971 }
972
973 if (i == 0) {
974 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300975 ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200976 be32_to_cpu(vh->vol_id),
977 UBI_FM_SB_VOLUME_ID);
978 ret = UBI_BAD_FASTMAP;
979 goto free_hdr;
980 }
981 } else {
982 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300983 ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200984 be32_to_cpu(vh->vol_id),
985 UBI_FM_DATA_VOLUME_ID);
986 ret = UBI_BAD_FASTMAP;
987 goto free_hdr;
988 }
989 }
990
991 if (sqnum < be64_to_cpu(vh->sqnum))
992 sqnum = be64_to_cpu(vh->sqnum);
993
994 ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
995 ubi->leb_start, ubi->leb_size);
996 if (ret && ret != UBI_IO_BITFLIPS) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300997 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200998 "err: %i)", i, pnum, ret);
999 goto free_hdr;
1000 }
1001 }
1002
1003 kfree(fmsb);
1004 fmsb = NULL;
1005
1006 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
1007 tmp_crc = be32_to_cpu(fmsb2->data_crc);
1008 fmsb2->data_crc = 0;
1009 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
1010 if (crc != tmp_crc) {
Tanya Brokhman326087032014-10-20 19:57:00 +03001011 ubi_err(ubi, "fastmap data CRC is invalid");
1012 ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
1013 tmp_crc, crc);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001014 ret = UBI_BAD_FASTMAP;
1015 goto free_hdr;
1016 }
1017
1018 fmsb2->sqnum = sqnum;
1019
1020 fm->used_blocks = used_blocks;
1021
1022 ret = ubi_attach_fastmap(ubi, ai, fm);
1023 if (ret) {
1024 if (ret > 0)
1025 ret = UBI_BAD_FASTMAP;
1026 goto free_hdr;
1027 }
1028
1029 for (i = 0; i < used_blocks; i++) {
1030 struct ubi_wl_entry *e;
1031
1032 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1033 if (!e) {
1034 while (i--)
1035 kfree(fm->e[i]);
1036
1037 ret = -ENOMEM;
1038 goto free_hdr;
1039 }
1040
1041 e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1042 e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1043 fm->e[i] = e;
1044 }
1045
1046 ubi->fm = fm;
1047 ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1048 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
Tanya Brokhman326087032014-10-20 19:57:00 +03001049 ubi_msg(ubi, "attached by fastmap");
1050 ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
1051 ubi_msg(ubi, "fastmap WL pool size: %d",
1052 ubi->fm_wl_pool.max_size);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001053 ubi->fm_disabled = 0;
1054
1055 ubi_free_vid_hdr(ubi, vh);
1056 kfree(ech);
1057out:
Richard Weinberger111ab0b2014-11-10 16:28:08 +01001058 up_write(&ubi->fm_protect);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001059 if (ret == UBI_BAD_FASTMAP)
Tanya Brokhman326087032014-10-20 19:57:00 +03001060 ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001061 return ret;
1062
1063free_hdr:
1064 ubi_free_vid_hdr(ubi, vh);
1065 kfree(ech);
1066free_fm_sb:
1067 kfree(fmsb);
1068 kfree(fm);
1069 goto out;
1070}
1071
1072/**
1073 * ubi_write_fastmap - writes a fastmap.
1074 * @ubi: UBI device object
1075 * @new_fm: the to be written fastmap
1076 *
1077 * Returns 0 on success, < 0 indicates an internal error.
1078 */
1079static int ubi_write_fastmap(struct ubi_device *ubi,
1080 struct ubi_fastmap_layout *new_fm)
1081{
1082 size_t fm_pos = 0;
1083 void *fm_raw;
1084 struct ubi_fm_sb *fmsb;
1085 struct ubi_fm_hdr *fmh;
1086 struct ubi_fm_scan_pool *fmpl1, *fmpl2;
1087 struct ubi_fm_ec *fec;
1088 struct ubi_fm_volhdr *fvh;
1089 struct ubi_fm_eba *feba;
1090 struct rb_node *node;
1091 struct ubi_wl_entry *wl_e;
1092 struct ubi_volume *vol;
1093 struct ubi_vid_hdr *avhdr, *dvhdr;
1094 struct ubi_work *ubi_wrk;
1095 int ret, i, j, free_peb_count, used_peb_count, vol_count;
1096 int scrub_peb_count, erase_peb_count;
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +02001097 int *seen_pebs = NULL;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001098
1099 fm_raw = ubi->fm_buf;
1100 memset(ubi->fm_buf, 0, ubi->fm_size);
1101
1102 avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1103 if (!avhdr) {
1104 ret = -ENOMEM;
1105 goto out;
1106 }
1107
1108 dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
1109 if (!dvhdr) {
1110 ret = -ENOMEM;
1111 goto out_kfree;
1112 }
1113
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +02001114 seen_pebs = init_seen(ubi);
1115 if (IS_ERR(seen_pebs)) {
1116 ret = PTR_ERR(seen_pebs);
1117 goto out_kfree;
1118 }
1119
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001120 spin_lock(&ubi->volumes_lock);
1121 spin_lock(&ubi->wl_lock);
1122
1123 fmsb = (struct ubi_fm_sb *)fm_raw;
1124 fm_pos += sizeof(*fmsb);
1125 ubi_assert(fm_pos <= ubi->fm_size);
1126
1127 fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1128 fm_pos += sizeof(*fmh);
1129 ubi_assert(fm_pos <= ubi->fm_size);
1130
1131 fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1132 fmsb->version = UBI_FM_FMT_VERSION;
1133 fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1134 /* the max sqnum will be filled in while *reading* the fastmap */
1135 fmsb->sqnum = 0;
1136
1137 fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1138 free_peb_count = 0;
1139 used_peb_count = 0;
1140 scrub_peb_count = 0;
1141 erase_peb_count = 0;
1142 vol_count = 0;
1143
1144 fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1145 fm_pos += sizeof(*fmpl1);
1146 fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1147 fmpl1->size = cpu_to_be16(ubi->fm_pool.size);
1148 fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1149
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +02001150 for (i = 0; i < ubi->fm_pool.size; i++) {
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001151 fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +02001152 set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
1153 }
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001154
1155 fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1156 fm_pos += sizeof(*fmpl2);
1157 fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1158 fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size);
1159 fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1160
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +02001161 for (i = 0; i < ubi->fm_wl_pool.size; i++) {
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001162 fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +02001163 set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
1164 }
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001165
1166 for (node = rb_first(&ubi->free); node; node = rb_next(node)) {
1167 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1168 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1169
1170 fec->pnum = cpu_to_be32(wl_e->pnum);
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +02001171 set_seen(ubi, wl_e->pnum, seen_pebs);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001172 fec->ec = cpu_to_be32(wl_e->ec);
1173
1174 free_peb_count++;
1175 fm_pos += sizeof(*fec);
1176 ubi_assert(fm_pos <= ubi->fm_size);
1177 }
1178 fmh->free_peb_count = cpu_to_be32(free_peb_count);
1179
1180 for (node = rb_first(&ubi->used); node; node = rb_next(node)) {
1181 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1182 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1183
1184 fec->pnum = cpu_to_be32(wl_e->pnum);
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +02001185 set_seen(ubi, wl_e->pnum, seen_pebs);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001186 fec->ec = cpu_to_be32(wl_e->ec);
1187
1188 used_peb_count++;
1189 fm_pos += sizeof(*fec);
1190 ubi_assert(fm_pos <= ubi->fm_size);
1191 }
Richard Weinberger4f5e3b62014-11-24 14:20:31 +01001192
1193 for (i = 0; i < UBI_PROT_QUEUE_LEN; i++) {
1194 list_for_each_entry(wl_e, &ubi->pq[i], u.list) {
1195 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1196
1197 fec->pnum = cpu_to_be32(wl_e->pnum);
1198 fec->ec = cpu_to_be32(wl_e->ec);
1199
1200 used_peb_count++;
1201 fm_pos += sizeof(*fec);
1202 ubi_assert(fm_pos <= ubi->fm_size);
1203 }
1204 }
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001205 fmh->used_peb_count = cpu_to_be32(used_peb_count);
1206
1207 for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
1208 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1209 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1210
1211 fec->pnum = cpu_to_be32(wl_e->pnum);
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +02001212 set_seen(ubi, wl_e->pnum, seen_pebs);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001213 fec->ec = cpu_to_be32(wl_e->ec);
1214
1215 scrub_peb_count++;
1216 fm_pos += sizeof(*fec);
1217 ubi_assert(fm_pos <= ubi->fm_size);
1218 }
1219 fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1220
1221
1222 list_for_each_entry(ubi_wrk, &ubi->works, list) {
1223 if (ubi_is_erase_work(ubi_wrk)) {
1224 wl_e = ubi_wrk->e;
1225 ubi_assert(wl_e);
1226
1227 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1228
1229 fec->pnum = cpu_to_be32(wl_e->pnum);
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +02001230 set_seen(ubi, wl_e->pnum, seen_pebs);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001231 fec->ec = cpu_to_be32(wl_e->ec);
1232
1233 erase_peb_count++;
1234 fm_pos += sizeof(*fec);
1235 ubi_assert(fm_pos <= ubi->fm_size);
1236 }
1237 }
1238 fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1239
1240 for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1241 vol = ubi->volumes[i];
1242
1243 if (!vol)
1244 continue;
1245
1246 vol_count++;
1247
1248 fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1249 fm_pos += sizeof(*fvh);
1250 ubi_assert(fm_pos <= ubi->fm_size);
1251
1252 fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1253 fvh->vol_id = cpu_to_be32(vol->vol_id);
1254 fvh->vol_type = vol->vol_type;
1255 fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1256 fvh->data_pad = cpu_to_be32(vol->data_pad);
1257 fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1258
1259 ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1260 vol->vol_type == UBI_STATIC_VOLUME);
1261
1262 feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1263 fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1264 ubi_assert(fm_pos <= ubi->fm_size);
1265
1266 for (j = 0; j < vol->reserved_pebs; j++)
1267 feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
1268
1269 feba->reserved_pebs = cpu_to_be32(j);
1270 feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1271 }
1272 fmh->vol_count = cpu_to_be32(vol_count);
1273 fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1274
1275 avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1276 avhdr->lnum = 0;
1277
1278 spin_unlock(&ubi->wl_lock);
1279 spin_unlock(&ubi->volumes_lock);
1280
1281 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1282 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
1283 if (ret) {
Tanya Brokhman326087032014-10-20 19:57:00 +03001284 ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001285 goto out_kfree;
1286 }
1287
1288 for (i = 0; i < new_fm->used_blocks; i++) {
1289 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +02001290 set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001291 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1292 }
1293
1294 fmsb->data_crc = 0;
1295 fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1296 ubi->fm_size));
1297
1298 for (i = 1; i < new_fm->used_blocks; i++) {
1299 dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1300 dvhdr->lnum = cpu_to_be32(i);
1301 dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1302 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1303 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
1304 if (ret) {
Tanya Brokhman326087032014-10-20 19:57:00 +03001305 ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001306 new_fm->e[i]->pnum);
1307 goto out_kfree;
1308 }
1309 }
1310
1311 for (i = 0; i < new_fm->used_blocks; i++) {
1312 ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
1313 new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
1314 if (ret) {
Tanya Brokhman326087032014-10-20 19:57:00 +03001315 ubi_err(ubi, "unable to write fastmap to PEB %i!",
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001316 new_fm->e[i]->pnum);
1317 goto out_kfree;
1318 }
1319 }
1320
1321 ubi_assert(new_fm);
1322 ubi->fm = new_fm;
1323
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +02001324 ret = self_check_seen(ubi, seen_pebs);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001325 dbg_bld("fastmap written!");
1326
1327out_kfree:
1328 ubi_free_vid_hdr(ubi, avhdr);
1329 ubi_free_vid_hdr(ubi, dvhdr);
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +02001330 free_seen(seen_pebs);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001331out:
1332 return ret;
1333}
1334
1335/**
1336 * erase_block - Manually erase a PEB.
1337 * @ubi: UBI device object
1338 * @pnum: PEB to be erased
1339 *
1340 * Returns the new EC value on success, < 0 indicates an internal error.
1341 */
1342static int erase_block(struct ubi_device *ubi, int pnum)
1343{
1344 int ret;
1345 struct ubi_ec_hdr *ec_hdr;
1346 long long ec;
1347
1348 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1349 if (!ec_hdr)
1350 return -ENOMEM;
1351
1352 ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1353 if (ret < 0)
1354 goto out;
1355 else if (ret && ret != UBI_IO_BITFLIPS) {
1356 ret = -EINVAL;
1357 goto out;
1358 }
1359
1360 ret = ubi_io_sync_erase(ubi, pnum, 0);
1361 if (ret < 0)
1362 goto out;
1363
1364 ec = be64_to_cpu(ec_hdr->ec);
1365 ec += ret;
1366 if (ec > UBI_MAX_ERASECOUNTER) {
1367 ret = -EINVAL;
1368 goto out;
1369 }
1370
1371 ec_hdr->ec = cpu_to_be64(ec);
1372 ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1373 if (ret < 0)
1374 goto out;
1375
1376 ret = ec;
1377out:
1378 kfree(ec_hdr);
1379 return ret;
1380}
1381
1382/**
1383 * invalidate_fastmap - destroys a fastmap.
1384 * @ubi: UBI device object
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001385 *
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001386 * This function ensures that upon next UBI attach a full scan
1387 * is issued. We need this if UBI is about to write a new fastmap
1388 * but is unable to do so. In this case we have two options:
1389 * a) Make sure that the current fastmap will not be usued upon
1390 * attach time and contine or b) fall back to RO mode to have the
1391 * current fastmap in a valid state.
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001392 * Returns 0 on success, < 0 indicates an internal error.
1393 */
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001394static int invalidate_fastmap(struct ubi_device *ubi)
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001395{
Richard Weinberger8930fa52013-08-19 22:31:49 +02001396 int ret;
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001397 struct ubi_fastmap_layout *fm;
1398 struct ubi_wl_entry *e;
1399 struct ubi_vid_hdr *vh = NULL;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001400
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001401 if (!ubi->fm)
1402 return 0;
1403
1404 ubi->fm = NULL;
1405
1406 ret = -ENOMEM;
1407 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
1408 if (!fm)
1409 goto out;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001410
1411 vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1412 if (!vh)
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001413 goto out_free_fm;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001414
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001415 ret = -ENOSPC;
1416 e = ubi_wl_get_fm_peb(ubi, 1);
1417 if (!e)
1418 goto out_free_fm;
1419
1420 /*
1421 * Create fake fastmap such that UBI will fall back
1422 * to scanning mode.
1423 */
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001424 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001425 ret = ubi_io_write_vid_hdr(ubi, e->pnum, vh);
1426 if (ret < 0) {
1427 ubi_wl_put_fm_peb(ubi, e, 0, 0);
1428 goto out_free_fm;
1429 }
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001430
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001431 fm->used_blocks = 1;
1432 fm->e[0] = e;
1433
1434 ubi->fm = fm;
1435
1436out:
1437 ubi_free_vid_hdr(ubi, vh);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001438 return ret;
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001439
1440out_free_fm:
1441 kfree(fm);
1442 goto out;
1443}
1444
1445/**
1446 * return_fm_pebs - returns all PEBs used by a fastmap back to the
1447 * WL sub-system.
1448 * @ubi: UBI device object
1449 * @fm: fastmap layout object
1450 */
1451static void return_fm_pebs(struct ubi_device *ubi,
1452 struct ubi_fastmap_layout *fm)
1453{
1454 int i;
1455
1456 if (!fm)
1457 return;
1458
1459 for (i = 0; i < fm->used_blocks; i++) {
1460 if (fm->e[i]) {
1461 ubi_wl_put_fm_peb(ubi, fm->e[i], i,
1462 fm->to_be_tortured[i]);
1463 fm->e[i] = NULL;
1464 }
1465 }
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001466}
1467
1468/**
1469 * ubi_update_fastmap - will be called by UBI if a volume changes or
1470 * a fastmap pool becomes full.
1471 * @ubi: UBI device object
1472 *
1473 * Returns 0 on success, < 0 indicates an internal error.
1474 */
1475int ubi_update_fastmap(struct ubi_device *ubi)
1476{
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001477 int ret, i, j;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001478 struct ubi_fastmap_layout *new_fm, *old_fm;
1479 struct ubi_wl_entry *tmp_e;
1480
Richard Weinberger111ab0b2014-11-10 16:28:08 +01001481 down_write(&ubi->fm_protect);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001482
1483 ubi_refill_pools(ubi);
1484
1485 if (ubi->ro_mode || ubi->fm_disabled) {
Richard Weinberger111ab0b2014-11-10 16:28:08 +01001486 up_write(&ubi->fm_protect);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001487 return 0;
1488 }
1489
1490 ret = ubi_ensure_anchor_pebs(ubi);
1491 if (ret) {
Richard Weinberger111ab0b2014-11-10 16:28:08 +01001492 up_write(&ubi->fm_protect);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001493 return ret;
1494 }
1495
1496 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1497 if (!new_fm) {
Richard Weinberger111ab0b2014-11-10 16:28:08 +01001498 up_write(&ubi->fm_protect);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001499 return -ENOMEM;
1500 }
1501
1502 new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001503 old_fm = ubi->fm;
1504 ubi->fm = NULL;
1505
1506 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
Tanya Brokhman326087032014-10-20 19:57:00 +03001507 ubi_err(ubi, "fastmap too large");
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001508 ret = -ENOSPC;
1509 goto err;
1510 }
1511
1512 for (i = 1; i < new_fm->used_blocks; i++) {
1513 spin_lock(&ubi->wl_lock);
1514 tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1515 spin_unlock(&ubi->wl_lock);
1516
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001517 if (!tmp_e) {
1518 if (old_fm && old_fm->e[i]) {
1519 ret = erase_block(ubi, old_fm->e[i]->pnum);
1520 if (ret < 0) {
1521 ubi_err(ubi, "could not erase old fastmap PEB");
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001522
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001523 for (j = 1; j < i; j++) {
1524 ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1525 j, 0);
1526 new_fm->e[j] = NULL;
1527 }
1528 goto err;
1529 }
1530 new_fm->e[i] = old_fm->e[i];
1531 old_fm->e[i] = NULL;
1532 } else {
1533 ubi_err(ubi, "could not get any free erase block");
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001534
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001535 for (j = 1; j < i; j++) {
1536 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1537 new_fm->e[j] = NULL;
1538 }
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001539
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001540 ret = -ENOSPC;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001541 goto err;
1542 }
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001543 } else {
Richard Weinbergerc4ca6be2014-10-06 14:47:51 +02001544 new_fm->e[i] = tmp_e;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001545
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001546 if (old_fm && old_fm->e[i]) {
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001547 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1548 old_fm->to_be_tortured[i]);
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001549 old_fm->e[i] = NULL;
1550 }
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001551 }
1552 }
1553
Richard Weinberger61de74c2014-10-29 16:59:32 +01001554 /* Old fastmap is larger than the new one */
1555 if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
1556 for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
1557 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1558 old_fm->to_be_tortured[i]);
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001559 old_fm->e[i] = NULL;
Richard Weinberger61de74c2014-10-29 16:59:32 +01001560 }
1561 }
1562
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001563 spin_lock(&ubi->wl_lock);
1564 tmp_e = ubi_wl_get_fm_peb(ubi, 1);
1565 spin_unlock(&ubi->wl_lock);
1566
1567 if (old_fm) {
1568 /* no fresh anchor PEB was found, reuse the old one */
1569 if (!tmp_e) {
1570 ret = erase_block(ubi, old_fm->e[0]->pnum);
1571 if (ret < 0) {
Tanya Brokhman326087032014-10-20 19:57:00 +03001572 ubi_err(ubi, "could not erase old anchor PEB");
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001573
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001574 for (i = 1; i < new_fm->used_blocks; i++) {
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001575 ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1576 i, 0);
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001577 new_fm->e[i] = NULL;
1578 }
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001579 goto err;
1580 }
Richard Weinbergerc4ca6be2014-10-06 14:47:51 +02001581 new_fm->e[0] = old_fm->e[0];
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001582 new_fm->e[0]->ec = ret;
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001583 old_fm->e[0] = NULL;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001584 } else {
1585 /* we've got a new anchor PEB, return the old one */
1586 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1587 old_fm->to_be_tortured[0]);
Richard Weinbergerc4ca6be2014-10-06 14:47:51 +02001588 new_fm->e[0] = tmp_e;
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001589 old_fm->e[0] = NULL;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001590 }
1591 } else {
1592 if (!tmp_e) {
Tanya Brokhman326087032014-10-20 19:57:00 +03001593 ubi_err(ubi, "could not find any anchor PEB");
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001594
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001595 for (i = 1; i < new_fm->used_blocks; i++) {
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001596 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001597 new_fm->e[i] = NULL;
1598 }
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001599
1600 ret = -ENOSPC;
1601 goto err;
1602 }
Richard Weinbergerc4ca6be2014-10-06 14:47:51 +02001603 new_fm->e[0] = tmp_e;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001604 }
1605
1606 down_write(&ubi->work_sem);
Richard Weinberger111ab0b2014-11-10 16:28:08 +01001607 down_write(&ubi->fm_eba_sem);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001608 ret = ubi_write_fastmap(ubi, new_fm);
Richard Weinberger111ab0b2014-11-10 16:28:08 +01001609 up_write(&ubi->fm_eba_sem);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001610 up_write(&ubi->work_sem);
1611
1612 if (ret)
1613 goto err;
1614
1615out_unlock:
Richard Weinberger111ab0b2014-11-10 16:28:08 +01001616 up_write(&ubi->fm_protect);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001617 kfree(old_fm);
1618 return ret;
1619
1620err:
Tanya Brokhman326087032014-10-20 19:57:00 +03001621 ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001622
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001623 ret = invalidate_fastmap(ubi);
1624 if (ret < 0) {
1625 ubi_err(ubi, "Unable to invalidiate current fastmap!");
1626 ubi_ro_mode(ubi);
1627 } else {
1628 return_fm_pebs(ubi, old_fm);
1629 return_fm_pebs(ubi, new_fm);
1630 ret = 0;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001631 }
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001632
1633 kfree(new_fm);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001634 goto out_unlock;
1635}