blob: 27a94f28819b59afb41e5ca5f5d1cf0b7823da7b [file] [log] [blame]
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001/*
2 * Copyright (c) 2012 Linutronix GmbH
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +02003 * Copyright (c) 2014 sigma star gmbh
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02004 * Author: Richard Weinberger <richard@nod.at>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
14 *
15 */
16
17#include <linux/crc32.h>
Richard Weinberger5d71afb2016-06-14 10:12:18 +020018#include <linux/bitmap.h>
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +020019#include "ubi.h"
20
21/**
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +020022 * init_seen - allocate memory for used for debugging.
23 * @ubi: UBI device description object
24 */
Richard Weinberger5d71afb2016-06-14 10:12:18 +020025static inline unsigned long *init_seen(struct ubi_device *ubi)
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +020026{
Richard Weinberger5d71afb2016-06-14 10:12:18 +020027 unsigned long *ret;
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +020028
29 if (!ubi_dbg_chk_fastmap(ubi))
30 return NULL;
31
Richard Weinberger5d71afb2016-06-14 10:12:18 +020032 ret = kcalloc(BITS_TO_LONGS(ubi->peb_count), sizeof(unsigned long),
33 GFP_KERNEL);
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +020034 if (!ret)
35 return ERR_PTR(-ENOMEM);
36
37 return ret;
38}
39
40/**
41 * free_seen - free the seen logic integer array.
42 * @seen: integer array of @ubi->peb_count size
43 */
Richard Weinberger5d71afb2016-06-14 10:12:18 +020044static inline void free_seen(unsigned long *seen)
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +020045{
46 kfree(seen);
47}
48
49/**
50 * set_seen - mark a PEB as seen.
51 * @ubi: UBI device description object
52 * @pnum: The PEB to be makred as seen
53 * @seen: integer array of @ubi->peb_count size
54 */
Richard Weinberger5d71afb2016-06-14 10:12:18 +020055static inline void set_seen(struct ubi_device *ubi, int pnum, unsigned long *seen)
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +020056{
57 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
58 return;
59
Richard Weinberger5d71afb2016-06-14 10:12:18 +020060 set_bit(pnum, seen);
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +020061}
62
63/**
64 * self_check_seen - check whether all PEB have been seen by fastmap.
65 * @ubi: UBI device description object
66 * @seen: integer array of @ubi->peb_count size
67 */
Richard Weinberger5d71afb2016-06-14 10:12:18 +020068static int self_check_seen(struct ubi_device *ubi, unsigned long *seen)
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +020069{
70 int pnum, ret = 0;
71
72 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
73 return 0;
74
75 for (pnum = 0; pnum < ubi->peb_count; pnum++) {
Richard Weinberger5d71afb2016-06-14 10:12:18 +020076 if (test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +020077 ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
78 ret = -EINVAL;
79 }
80 }
81
82 return ret;
83}
84
85/**
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +020086 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
87 * @ubi: UBI device description object
88 */
89size_t ubi_calc_fm_size(struct ubi_device *ubi)
90{
91 size_t size;
92
shengyonga18fd672015-05-26 10:07:06 +000093 size = sizeof(struct ubi_fm_sb) +
94 sizeof(struct ubi_fm_hdr) +
95 sizeof(struct ubi_fm_scan_pool) +
96 sizeof(struct ubi_fm_scan_pool) +
97 (ubi->peb_count * sizeof(struct ubi_fm_ec)) +
98 (sizeof(struct ubi_fm_eba) +
99 (ubi->peb_count * sizeof(__be32))) +
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200100 sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
101 return roundup(size, ubi->leb_size);
102}
103
104
105/**
106 * new_fm_vhdr - allocate a new volume header for fastmap usage.
107 * @ubi: UBI device description object
108 * @vol_id: the VID of the new header
109 *
110 * Returns a new struct ubi_vid_hdr on success.
111 * NULL indicates out of memory.
112 */
113static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
114{
115 struct ubi_vid_hdr *new;
116
117 new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
118 if (!new)
119 goto out;
120
121 new->vol_type = UBI_VID_DYNAMIC;
122 new->vol_id = cpu_to_be32(vol_id);
123
124 /* UBI implementations without fastmap support have to delete the
125 * fastmap.
126 */
127 new->compat = UBI_COMPAT_DELETE;
128
129out:
130 return new;
131}
132
133/**
134 * add_aeb - create and add a attach erase block to a given list.
135 * @ai: UBI attach info object
136 * @list: the target list
137 * @pnum: PEB number of the new attach erase block
138 * @ec: erease counter of the new LEB
139 * @scrub: scrub this PEB after attaching
140 *
141 * Returns 0 on success, < 0 indicates an internal error.
142 */
143static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
144 int pnum, int ec, int scrub)
145{
146 struct ubi_ainf_peb *aeb;
147
Boris Brezillon91f42852016-09-16 16:59:18 +0200148 aeb = ubi_alloc_aeb(ai, pnum, ec);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200149 if (!aeb)
150 return -ENOMEM;
151
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200152 aeb->lnum = -1;
153 aeb->scrub = scrub;
154 aeb->copy_flag = aeb->sqnum = 0;
155
156 ai->ec_sum += aeb->ec;
157 ai->ec_count++;
158
159 if (ai->max_ec < aeb->ec)
160 ai->max_ec = aeb->ec;
161
162 if (ai->min_ec > aeb->ec)
163 ai->min_ec = aeb->ec;
164
165 list_add_tail(&aeb->u.list, list);
166
167 return 0;
168}
169
170/**
171 * add_vol - create and add a new volume to ubi_attach_info.
172 * @ai: ubi_attach_info object
173 * @vol_id: VID of the new volume
174 * @used_ebs: number of used EBS
175 * @data_pad: data padding value of the new volume
176 * @vol_type: volume type
177 * @last_eb_bytes: number of bytes in the last LEB
178 *
179 * Returns the new struct ubi_ainf_volume on success.
180 * NULL indicates an error.
181 */
182static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
183 int used_ebs, int data_pad, u8 vol_type,
184 int last_eb_bytes)
185{
186 struct ubi_ainf_volume *av;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200187
Boris Brezillonde4c4552016-09-16 16:59:14 +0200188 av = ubi_add_av(ai, vol_id);
189 if (IS_ERR(av))
190 return av;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200191
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200192 av->data_pad = data_pad;
193 av->last_data_size = last_eb_bytes;
194 av->compat = 0;
195 av->vol_type = vol_type;
Richard Weinberger42dd3cd2014-10-25 13:26:49 +0200196 if (av->vol_type == UBI_STATIC_VOLUME)
197 av->used_ebs = used_ebs;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200198
199 dbg_bld("found volume (ID %i)", vol_id);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200200 return av;
201}
202
203/**
204 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
205 * from it's original list.
206 * @ai: ubi_attach_info object
207 * @aeb: the to be assigned SEB
208 * @av: target scan volume
209 */
210static void assign_aeb_to_av(struct ubi_attach_info *ai,
211 struct ubi_ainf_peb *aeb,
212 struct ubi_ainf_volume *av)
213{
214 struct ubi_ainf_peb *tmp_aeb;
215 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
216
217 p = &av->root.rb_node;
218 while (*p) {
219 parent = *p;
220
221 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
222 if (aeb->lnum != tmp_aeb->lnum) {
223 if (aeb->lnum < tmp_aeb->lnum)
224 p = &(*p)->rb_left;
225 else
226 p = &(*p)->rb_right;
227
228 continue;
229 } else
230 break;
231 }
232
233 list_del(&aeb->u.list);
234 av->leb_count++;
235
236 rb_link_node(&aeb->u.rb, parent, p);
237 rb_insert_color(&aeb->u.rb, &av->root);
238}
239
240/**
241 * update_vol - inserts or updates a LEB which was found a pool.
242 * @ubi: the UBI device object
243 * @ai: attach info object
244 * @av: the volume this LEB belongs to
245 * @new_vh: the volume header derived from new_aeb
246 * @new_aeb: the AEB to be examined
247 *
248 * Returns 0 on success, < 0 indicates an internal error.
249 */
250static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
251 struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
252 struct ubi_ainf_peb *new_aeb)
253{
254 struct rb_node **p = &av->root.rb_node, *parent = NULL;
255 struct ubi_ainf_peb *aeb, *victim;
256 int cmp_res;
257
258 while (*p) {
259 parent = *p;
260 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
261
262 if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
263 if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
264 p = &(*p)->rb_left;
265 else
266 p = &(*p)->rb_right;
267
268 continue;
269 }
270
271 /* This case can happen if the fastmap gets written
272 * because of a volume change (creation, deletion, ..).
273 * Then a PEB can be within the persistent EBA and the pool.
274 */
275 if (aeb->pnum == new_aeb->pnum) {
276 ubi_assert(aeb->lnum == new_aeb->lnum);
Boris Brezillon91f42852016-09-16 16:59:18 +0200277 ubi_free_aeb(ai, new_aeb);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200278
279 return 0;
280 }
281
282 cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
283 if (cmp_res < 0)
284 return cmp_res;
285
286 /* new_aeb is newer */
287 if (cmp_res & 1) {
Boris Brezillon91f42852016-09-16 16:59:18 +0200288 victim = ubi_alloc_aeb(ai, aeb->ec, aeb->pnum);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200289 if (!victim)
290 return -ENOMEM;
291
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200292 list_add_tail(&victim->u.list, &ai->erase);
293
294 if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
shengyong669d3d12015-06-03 01:37:38 +0000295 av->last_data_size =
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200296 be32_to_cpu(new_vh->data_size);
297
298 dbg_bld("vol %i: AEB %i's PEB %i is the newer",
299 av->vol_id, aeb->lnum, new_aeb->pnum);
300
301 aeb->ec = new_aeb->ec;
302 aeb->pnum = new_aeb->pnum;
303 aeb->copy_flag = new_vh->copy_flag;
304 aeb->scrub = new_aeb->scrub;
Boris Brezillon91f42852016-09-16 16:59:18 +0200305 ubi_free_aeb(ai, new_aeb);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200306
307 /* new_aeb is older */
308 } else {
309 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
310 av->vol_id, aeb->lnum, new_aeb->pnum);
311 list_add_tail(&new_aeb->u.list, &ai->erase);
312 }
313
314 return 0;
315 }
316 /* This LEB is new, let's add it to the volume */
317
318 if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
319 av->highest_lnum = be32_to_cpu(new_vh->lnum);
320 av->last_data_size = be32_to_cpu(new_vh->data_size);
321 }
322
323 if (av->vol_type == UBI_STATIC_VOLUME)
324 av->used_ebs = be32_to_cpu(new_vh->used_ebs);
325
326 av->leb_count++;
327
328 rb_link_node(&new_aeb->u.rb, parent, p);
329 rb_insert_color(&new_aeb->u.rb, &av->root);
330
331 return 0;
332}
333
334/**
335 * process_pool_aeb - we found a non-empty PEB in a pool.
336 * @ubi: UBI device object
337 * @ai: attach info object
338 * @new_vh: the volume header derived from new_aeb
339 * @new_aeb: the AEB to be examined
340 *
341 * Returns 0 on success, < 0 indicates an internal error.
342 */
343static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
344 struct ubi_vid_hdr *new_vh,
345 struct ubi_ainf_peb *new_aeb)
346{
Boris Brezillon4c368422016-09-16 16:59:11 +0200347 int vol_id = be32_to_cpu(new_vh->vol_id);
Boris Brezillonf5f9d432016-09-16 16:59:09 +0200348 struct ubi_ainf_volume *av;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200349
Boris Brezillon4c368422016-09-16 16:59:11 +0200350 if (vol_id == UBI_FM_SB_VOLUME_ID || vol_id == UBI_FM_DATA_VOLUME_ID) {
Boris Brezillon91f42852016-09-16 16:59:18 +0200351 ubi_free_aeb(ai, new_aeb);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200352
353 return 0;
354 }
355
356 /* Find the volume this SEB belongs to */
Boris Brezillon4c368422016-09-16 16:59:11 +0200357 av = ubi_find_av(ai, vol_id);
Boris Brezillonf5f9d432016-09-16 16:59:09 +0200358 if (!av) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300359 ubi_err(ubi, "orphaned volume in fastmap pool!");
Boris Brezillon91f42852016-09-16 16:59:18 +0200360 ubi_free_aeb(ai, new_aeb);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200361 return UBI_BAD_FASTMAP;
362 }
363
Boris Brezillon4c368422016-09-16 16:59:11 +0200364 ubi_assert(vol_id == av->vol_id);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200365
366 return update_vol(ubi, ai, av, new_vh, new_aeb);
367}
368
369/**
370 * unmap_peb - unmap a PEB.
371 * If fastmap detects a free PEB in the pool it has to check whether
372 * this PEB has been unmapped after writing the fastmap.
373 *
374 * @ai: UBI attach info object
375 * @pnum: The PEB to be unmapped
376 */
377static void unmap_peb(struct ubi_attach_info *ai, int pnum)
378{
379 struct ubi_ainf_volume *av;
380 struct rb_node *node, *node2;
381 struct ubi_ainf_peb *aeb;
382
Boris Brezillonf2fb1342016-09-16 16:59:16 +0200383 ubi_rb_for_each_entry(node, av, &ai->volumes, rb) {
384 ubi_rb_for_each_entry(node2, aeb, &av->root, u.rb) {
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200385 if (aeb->pnum == pnum) {
386 rb_erase(&aeb->u.rb, &av->root);
Richard Weinbergerad3d6a02014-10-24 15:22:05 +0200387 av->leb_count--;
Boris Brezillon91f42852016-09-16 16:59:18 +0200388 ubi_free_aeb(ai, aeb);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200389 return;
390 }
391 }
392 }
393}
394
395/**
396 * scan_pool - scans a pool for changed (no longer empty PEBs).
397 * @ubi: UBI device object
398 * @ai: attach info object
399 * @pebs: an array of all PEB numbers in the to be scanned pool
400 * @pool_size: size of the pool (number of entries in @pebs)
401 * @max_sqnum: pointer to the maximal sequence number
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200402 * @free: list of PEBs which are most likely free (and go into @ai->free)
403 *
404 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
405 * < 0 indicates an internal error.
406 */
407static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
Ezequiel García2a130f12015-10-17 14:55:55 -0300408 __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
Richard Weinbergerd141a8e2014-10-07 21:39:20 +0200409 struct list_head *free)
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200410{
411 struct ubi_vid_hdr *vh;
412 struct ubi_ec_hdr *ech;
Richard Weinbergerd141a8e2014-10-07 21:39:20 +0200413 struct ubi_ainf_peb *new_aeb;
414 int i, pnum, err, ret = 0;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200415
416 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
417 if (!ech)
418 return -ENOMEM;
419
420 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
421 if (!vh) {
422 kfree(ech);
423 return -ENOMEM;
424 }
425
426 dbg_bld("scanning fastmap pool: size = %i", pool_size);
427
428 /*
429 * Now scan all PEBs in the pool to find changes which have been made
430 * after the creation of the fastmap
431 */
432 for (i = 0; i < pool_size; i++) {
433 int scrub = 0;
Richard Genoudc22301a2013-09-28 15:55:13 +0200434 int image_seq;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200435
436 pnum = be32_to_cpu(pebs[i]);
437
438 if (ubi_io_is_bad(ubi, pnum)) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300439 ubi_err(ubi, "bad PEB in fastmap pool!");
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200440 ret = UBI_BAD_FASTMAP;
441 goto out;
442 }
443
444 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
445 if (err && err != UBI_IO_BITFLIPS) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300446 ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200447 pnum, err);
448 ret = err > 0 ? UBI_BAD_FASTMAP : err;
449 goto out;
Brian Norris44305eb2014-05-20 22:35:38 -0700450 } else if (err == UBI_IO_BITFLIPS)
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200451 scrub = 1;
452
Richard Genoudc22301a2013-09-28 15:55:13 +0200453 /*
454 * Older UBI implementations have image_seq set to zero, so
455 * we shouldn't fail if image_seq == 0.
456 */
457 image_seq = be32_to_cpu(ech->image_seq);
458
459 if (image_seq && (image_seq != ubi->image_seq)) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300460 ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200461 be32_to_cpu(ech->image_seq), ubi->image_seq);
Richard Weinbergerf240dca2013-09-28 15:55:11 +0200462 ret = UBI_BAD_FASTMAP;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200463 goto out;
464 }
465
466 err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
467 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
468 unsigned long long ec = be64_to_cpu(ech->ec);
469 unmap_peb(ai, pnum);
470 dbg_bld("Adding PEB to free: %i", pnum);
Boris Brezillonecbfa8e2016-09-16 16:59:12 +0200471
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200472 if (err == UBI_IO_FF_BITFLIPS)
Boris Brezillonecbfa8e2016-09-16 16:59:12 +0200473 scrub = 1;
474
475 add_aeb(ai, free, pnum, ec, scrub);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200476 continue;
477 } else if (err == 0 || err == UBI_IO_BITFLIPS) {
478 dbg_bld("Found non empty PEB:%i in pool", pnum);
479
480 if (err == UBI_IO_BITFLIPS)
481 scrub = 1;
482
Boris Brezillon91f42852016-09-16 16:59:18 +0200483 new_aeb = ubi_alloc_aeb(ai, pnum, be64_to_cpu(ech->ec));
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200484 if (!new_aeb) {
485 ret = -ENOMEM;
486 goto out;
487 }
488
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200489 new_aeb->lnum = be32_to_cpu(vh->lnum);
490 new_aeb->sqnum = be64_to_cpu(vh->sqnum);
491 new_aeb->copy_flag = vh->copy_flag;
492 new_aeb->scrub = scrub;
493
494 if (*max_sqnum < new_aeb->sqnum)
495 *max_sqnum = new_aeb->sqnum;
496
497 err = process_pool_aeb(ubi, ai, vh, new_aeb);
498 if (err) {
499 ret = err > 0 ? UBI_BAD_FASTMAP : err;
500 goto out;
501 }
502 } else {
503 /* We are paranoid and fall back to scanning mode */
Tanya Brokhman326087032014-10-20 19:57:00 +0300504 ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200505 ret = err > 0 ? UBI_BAD_FASTMAP : err;
506 goto out;
507 }
508
509 }
510
511out:
512 ubi_free_vid_hdr(ubi, vh);
513 kfree(ech);
514 return ret;
515}
516
517/**
518 * count_fastmap_pebs - Counts the PEBs found by fastmap.
519 * @ai: The UBI attach info object
520 */
521static int count_fastmap_pebs(struct ubi_attach_info *ai)
522{
523 struct ubi_ainf_peb *aeb;
524 struct ubi_ainf_volume *av;
525 struct rb_node *rb1, *rb2;
526 int n = 0;
527
528 list_for_each_entry(aeb, &ai->erase, u.list)
529 n++;
530
531 list_for_each_entry(aeb, &ai->free, u.list)
532 n++;
533
Richard Weinbergerbe801102016-06-14 10:12:14 +0200534 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200535 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
536 n++;
537
538 return n;
539}
540
541/**
542 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
543 * @ubi: UBI device object
544 * @ai: UBI attach info object
545 * @fm: the fastmap to be attached
546 *
547 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
548 * < 0 indicates an internal error.
549 */
550static int ubi_attach_fastmap(struct ubi_device *ubi,
551 struct ubi_attach_info *ai,
552 struct ubi_fastmap_layout *fm)
553{
Richard Weinbergerd141a8e2014-10-07 21:39:20 +0200554 struct list_head used, free;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200555 struct ubi_ainf_volume *av;
556 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200557 struct ubi_fm_sb *fmsb;
558 struct ubi_fm_hdr *fmhdr;
shengyongf6e951a2015-05-26 10:07:07 +0000559 struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200560 struct ubi_fm_ec *fmec;
561 struct ubi_fm_volhdr *fmvhdr;
562 struct ubi_fm_eba *fm_eba;
563 int ret, i, j, pool_size, wl_pool_size;
564 size_t fm_pos = 0, fm_size = ubi->fm_size;
565 unsigned long long max_sqnum = 0;
566 void *fm_raw = ubi->fm_buf;
567
568 INIT_LIST_HEAD(&used);
569 INIT_LIST_HEAD(&free);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200570 ai->min_ec = UBI_MAX_ERASECOUNTER;
571
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200572 fmsb = (struct ubi_fm_sb *)(fm_raw);
573 ai->max_sqnum = fmsb->sqnum;
574 fm_pos += sizeof(struct ubi_fm_sb);
575 if (fm_pos >= fm_size)
576 goto fail_bad;
577
578 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
579 fm_pos += sizeof(*fmhdr);
580 if (fm_pos >= fm_size)
581 goto fail_bad;
582
583 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300584 ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200585 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
586 goto fail_bad;
587 }
588
shengyongf6e951a2015-05-26 10:07:07 +0000589 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
590 fm_pos += sizeof(*fmpl);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200591 if (fm_pos >= fm_size)
592 goto fail_bad;
shengyongf6e951a2015-05-26 10:07:07 +0000593 if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300594 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
shengyongf6e951a2015-05-26 10:07:07 +0000595 be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200596 goto fail_bad;
597 }
598
shengyongf6e951a2015-05-26 10:07:07 +0000599 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
600 fm_pos += sizeof(*fmpl_wl);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200601 if (fm_pos >= fm_size)
602 goto fail_bad;
shengyongf6e951a2015-05-26 10:07:07 +0000603 if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) {
604 ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x",
605 be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200606 goto fail_bad;
607 }
608
shengyongf6e951a2015-05-26 10:07:07 +0000609 pool_size = be16_to_cpu(fmpl->size);
610 wl_pool_size = be16_to_cpu(fmpl_wl->size);
611 fm->max_pool_size = be16_to_cpu(fmpl->max_size);
612 fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200613
614 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300615 ubi_err(ubi, "bad pool size: %i", pool_size);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200616 goto fail_bad;
617 }
618
619 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300620 ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200621 goto fail_bad;
622 }
623
624
625 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
626 fm->max_pool_size < 0) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300627 ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200628 goto fail_bad;
629 }
630
631 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
632 fm->max_wl_pool_size < 0) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300633 ubi_err(ubi, "bad maximal WL pool size: %i",
634 fm->max_wl_pool_size);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200635 goto fail_bad;
636 }
637
638 /* read EC values from free list */
639 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
640 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
641 fm_pos += sizeof(*fmec);
642 if (fm_pos >= fm_size)
643 goto fail_bad;
644
645 add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
646 be32_to_cpu(fmec->ec), 0);
647 }
648
649 /* read EC values from used list */
650 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
651 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
652 fm_pos += sizeof(*fmec);
653 if (fm_pos >= fm_size)
654 goto fail_bad;
655
656 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
657 be32_to_cpu(fmec->ec), 0);
658 }
659
660 /* read EC values from scrub list */
661 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
662 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
663 fm_pos += sizeof(*fmec);
664 if (fm_pos >= fm_size)
665 goto fail_bad;
666
667 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
668 be32_to_cpu(fmec->ec), 1);
669 }
670
671 /* read EC values from erase list */
672 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
673 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
674 fm_pos += sizeof(*fmec);
675 if (fm_pos >= fm_size)
676 goto fail_bad;
677
678 add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
679 be32_to_cpu(fmec->ec), 1);
680 }
681
682 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
683 ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
684
685 /* Iterate over all volumes and read their EBA table */
686 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
687 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
688 fm_pos += sizeof(*fmvhdr);
689 if (fm_pos >= fm_size)
690 goto fail_bad;
691
692 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300693 ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200694 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
695 goto fail_bad;
696 }
697
698 av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
699 be32_to_cpu(fmvhdr->used_ebs),
700 be32_to_cpu(fmvhdr->data_pad),
701 fmvhdr->vol_type,
702 be32_to_cpu(fmvhdr->last_eb_bytes));
703
704 if (!av)
705 goto fail_bad;
shengyonge96a8a32015-05-26 10:07:09 +0000706 if (PTR_ERR(av) == -EINVAL) {
707 ubi_err(ubi, "volume (ID %i) already exists",
708 fmvhdr->vol_id);
709 goto fail_bad;
710 }
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200711
712 ai->vols_found++;
713 if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
714 ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
715
716 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
717 fm_pos += sizeof(*fm_eba);
718 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
719 if (fm_pos >= fm_size)
720 goto fail_bad;
721
722 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300723 ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200724 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
725 goto fail_bad;
726 }
727
728 for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
729 int pnum = be32_to_cpu(fm_eba->pnum[j]);
730
Richard Weinberger876f9a32015-07-03 10:36:14 +0200731 if (pnum < 0)
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200732 continue;
733
734 aeb = NULL;
735 list_for_each_entry(tmp_aeb, &used, u.list) {
Brian Pomerantz584d4622013-05-01 17:10:44 -0700736 if (tmp_aeb->pnum == pnum) {
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200737 aeb = tmp_aeb;
Brian Pomerantz584d4622013-05-01 17:10:44 -0700738 break;
739 }
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200740 }
741
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200742 if (!aeb) {
Richard Weinbergerd141a8e2014-10-07 21:39:20 +0200743 ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
744 goto fail_bad;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200745 }
746
747 aeb->lnum = j;
748
749 if (av->highest_lnum <= aeb->lnum)
750 av->highest_lnum = aeb->lnum;
751
752 assign_aeb_to_av(ai, aeb, av);
753
754 dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
755 aeb->pnum, aeb->lnum, av->vol_id);
756 }
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200757 }
758
shengyongf6e951a2015-05-26 10:07:07 +0000759 ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200760 if (ret)
761 goto fail;
762
shengyongf6e951a2015-05-26 10:07:07 +0000763 ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200764 if (ret)
765 goto fail;
766
767 if (max_sqnum > ai->max_sqnum)
768 ai->max_sqnum = max_sqnum;
769
Wei Yongjun6a059ab2012-10-09 14:14:21 +0800770 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
771 list_move_tail(&tmp_aeb->u.list, &ai->free);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200772
Richard Weinbergera83832a2014-10-07 18:51:07 +0200773 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
774 list_move_tail(&tmp_aeb->u.list, &ai->erase);
775
Richard Weinbergerae0d1462013-09-28 15:55:16 +0200776 ubi_assert(list_empty(&free));
777
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200778 /*
779 * If fastmap is leaking PEBs (must not happen), raise a
780 * fat warning and fall back to scanning mode.
781 * We do this here because in ubi_wl_init() it's too late
782 * and we cannot fall back to scanning.
783 */
784 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
785 ai->bad_peb_count - fm->used_blocks))
786 goto fail_bad;
787
788 return 0;
789
790fail_bad:
791 ret = UBI_BAD_FASTMAP;
792fail:
Richard Weinbergerfe24c6e2013-09-28 15:55:15 +0200793 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
Richard Weinbergerfe24c6e2013-09-28 15:55:15 +0200794 list_del(&tmp_aeb->u.list);
Boris Brezillon91f42852016-09-16 16:59:18 +0200795 ubi_free_aeb(ai, tmp_aeb);
Richard Weinbergerfe24c6e2013-09-28 15:55:15 +0200796 }
Richard Weinbergerfe24c6e2013-09-28 15:55:15 +0200797 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
Richard Weinbergerfe24c6e2013-09-28 15:55:15 +0200798 list_del(&tmp_aeb->u.list);
Boris Brezillon91f42852016-09-16 16:59:18 +0200799 ubi_free_aeb(ai, tmp_aeb);
Richard Weinbergerfe24c6e2013-09-28 15:55:15 +0200800 }
801
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200802 return ret;
803}
804
805/**
Richard Weinbergerfdf10ed2016-06-14 10:12:15 +0200806 * find_fm_anchor - find the most recent Fastmap superblock (anchor)
807 * @ai: UBI attach info to be filled
808 */
809static int find_fm_anchor(struct ubi_attach_info *ai)
810{
811 int ret = -1;
812 struct ubi_ainf_peb *aeb;
813 unsigned long long max_sqnum = 0;
814
815 list_for_each_entry(aeb, &ai->fastmap, u.list) {
816 if (aeb->vol_id == UBI_FM_SB_VOLUME_ID && aeb->sqnum > max_sqnum) {
817 max_sqnum = aeb->sqnum;
818 ret = aeb->pnum;
819 }
820 }
821
822 return ret;
823}
824
825/**
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200826 * ubi_scan_fastmap - scan the fastmap.
827 * @ubi: UBI device object
828 * @ai: UBI attach info to be filled
Richard Weinbergerfdf10ed2016-06-14 10:12:15 +0200829 * @scan_ai: UBI attach info from the first 64 PEBs,
830 * used to find the most recent Fastmap data structure
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200831 *
832 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
833 * UBI_BAD_FASTMAP if one was found but is not usable.
834 * < 0 indicates an internal error.
835 */
836int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
Richard Weinbergerfdf10ed2016-06-14 10:12:15 +0200837 struct ubi_attach_info *scan_ai)
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200838{
839 struct ubi_fm_sb *fmsb, *fmsb2;
840 struct ubi_vid_hdr *vh;
841 struct ubi_ec_hdr *ech;
842 struct ubi_fastmap_layout *fm;
Richard Weinbergerfdf10ed2016-06-14 10:12:15 +0200843 struct ubi_ainf_peb *tmp_aeb, *aeb;
844 int i, used_blocks, pnum, fm_anchor, ret = 0;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200845 size_t fm_size;
846 __be32 crc, tmp_crc;
847 unsigned long long sqnum = 0;
848
Richard Weinbergerfdf10ed2016-06-14 10:12:15 +0200849 fm_anchor = find_fm_anchor(scan_ai);
850 if (fm_anchor < 0)
851 return UBI_NO_FASTMAP;
852
853 /* Move all (possible) fastmap blocks into our new attach structure. */
854 list_for_each_entry_safe(aeb, tmp_aeb, &scan_ai->fastmap, u.list)
855 list_move_tail(&aeb->u.list, &ai->fastmap);
856
Richard Weinberger111ab0b2014-11-10 16:28:08 +0100857 down_write(&ubi->fm_protect);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200858 memset(ubi->fm_buf, 0, ubi->fm_size);
859
860 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
861 if (!fmsb) {
862 ret = -ENOMEM;
863 goto out;
864 }
865
866 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
867 if (!fm) {
868 ret = -ENOMEM;
869 kfree(fmsb);
870 goto out;
871 }
872
Boris Brezillonfcbb6af2016-09-16 16:59:17 +0200873 ret = ubi_io_read_data(ubi, fmsb, fm_anchor, 0, sizeof(*fmsb));
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200874 if (ret && ret != UBI_IO_BITFLIPS)
875 goto free_fm_sb;
876 else if (ret == UBI_IO_BITFLIPS)
877 fm->to_be_tortured[0] = 1;
878
879 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300880 ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200881 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
882 ret = UBI_BAD_FASTMAP;
883 goto free_fm_sb;
884 }
885
886 if (fmsb->version != UBI_FM_FMT_VERSION) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300887 ubi_err(ubi, "bad fastmap version: %i, expected: %i",
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200888 fmsb->version, UBI_FM_FMT_VERSION);
889 ret = UBI_BAD_FASTMAP;
890 goto free_fm_sb;
891 }
892
893 used_blocks = be32_to_cpu(fmsb->used_blocks);
894 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300895 ubi_err(ubi, "number of fastmap blocks is invalid: %i",
896 used_blocks);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200897 ret = UBI_BAD_FASTMAP;
898 goto free_fm_sb;
899 }
900
901 fm_size = ubi->leb_size * used_blocks;
902 if (fm_size != ubi->fm_size) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300903 ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
904 fm_size, ubi->fm_size);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200905 ret = UBI_BAD_FASTMAP;
906 goto free_fm_sb;
907 }
908
909 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
910 if (!ech) {
911 ret = -ENOMEM;
912 goto free_fm_sb;
913 }
914
915 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
916 if (!vh) {
917 ret = -ENOMEM;
918 goto free_hdr;
919 }
920
921 for (i = 0; i < used_blocks; i++) {
Richard Genoudc22301a2013-09-28 15:55:13 +0200922 int image_seq;
923
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200924 pnum = be32_to_cpu(fmsb->block_loc[i]);
925
926 if (ubi_io_is_bad(ubi, pnum)) {
927 ret = UBI_BAD_FASTMAP;
928 goto free_hdr;
929 }
930
Richard Weinberger5283ec72016-06-14 10:12:16 +0200931 if (i == 0 && pnum != fm_anchor) {
932 ubi_err(ubi, "Fastmap anchor PEB mismatch: PEB: %i vs. %i",
933 pnum, fm_anchor);
934 ret = UBI_BAD_FASTMAP;
935 goto free_hdr;
936 }
937
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200938 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
939 if (ret && ret != UBI_IO_BITFLIPS) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300940 ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200941 i, pnum);
942 if (ret > 0)
943 ret = UBI_BAD_FASTMAP;
944 goto free_hdr;
945 } else if (ret == UBI_IO_BITFLIPS)
946 fm->to_be_tortured[i] = 1;
947
Richard Genoudc22301a2013-09-28 15:55:13 +0200948 image_seq = be32_to_cpu(ech->image_seq);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200949 if (!ubi->image_seq)
Richard Genoudc22301a2013-09-28 15:55:13 +0200950 ubi->image_seq = image_seq;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200951
Richard Genoudc22301a2013-09-28 15:55:13 +0200952 /*
953 * Older UBI implementations have image_seq set to zero, so
954 * we shouldn't fail if image_seq == 0.
955 */
956 if (image_seq && (image_seq != ubi->image_seq)) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300957 ubi_err(ubi, "wrong image seq:%d instead of %d",
Richard Genoudc22301a2013-09-28 15:55:13 +0200958 be32_to_cpu(ech->image_seq), ubi->image_seq);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200959 ret = UBI_BAD_FASTMAP;
960 goto free_hdr;
961 }
962
963 ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
964 if (ret && ret != UBI_IO_BITFLIPS) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300965 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200966 i, pnum);
967 goto free_hdr;
968 }
969
970 if (i == 0) {
971 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300972 ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200973 be32_to_cpu(vh->vol_id),
974 UBI_FM_SB_VOLUME_ID);
975 ret = UBI_BAD_FASTMAP;
976 goto free_hdr;
977 }
978 } else {
979 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300980 ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200981 be32_to_cpu(vh->vol_id),
982 UBI_FM_DATA_VOLUME_ID);
983 ret = UBI_BAD_FASTMAP;
984 goto free_hdr;
985 }
986 }
987
988 if (sqnum < be64_to_cpu(vh->sqnum))
989 sqnum = be64_to_cpu(vh->sqnum);
990
Boris Brezillonfcbb6af2016-09-16 16:59:17 +0200991 ret = ubi_io_read_data(ubi, ubi->fm_buf + (ubi->leb_size * i),
992 pnum, 0, ubi->leb_size);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200993 if (ret && ret != UBI_IO_BITFLIPS) {
Tanya Brokhman326087032014-10-20 19:57:00 +0300994 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +0200995 "err: %i)", i, pnum, ret);
996 goto free_hdr;
997 }
998 }
999
1000 kfree(fmsb);
1001 fmsb = NULL;
1002
1003 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
1004 tmp_crc = be32_to_cpu(fmsb2->data_crc);
1005 fmsb2->data_crc = 0;
1006 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
1007 if (crc != tmp_crc) {
Tanya Brokhman326087032014-10-20 19:57:00 +03001008 ubi_err(ubi, "fastmap data CRC is invalid");
1009 ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
1010 tmp_crc, crc);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001011 ret = UBI_BAD_FASTMAP;
1012 goto free_hdr;
1013 }
1014
1015 fmsb2->sqnum = sqnum;
1016
1017 fm->used_blocks = used_blocks;
1018
1019 ret = ubi_attach_fastmap(ubi, ai, fm);
1020 if (ret) {
1021 if (ret > 0)
1022 ret = UBI_BAD_FASTMAP;
1023 goto free_hdr;
1024 }
1025
1026 for (i = 0; i < used_blocks; i++) {
1027 struct ubi_wl_entry *e;
1028
1029 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1030 if (!e) {
1031 while (i--)
1032 kfree(fm->e[i]);
1033
1034 ret = -ENOMEM;
1035 goto free_hdr;
1036 }
1037
1038 e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1039 e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1040 fm->e[i] = e;
1041 }
1042
1043 ubi->fm = fm;
1044 ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1045 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
Tanya Brokhman326087032014-10-20 19:57:00 +03001046 ubi_msg(ubi, "attached by fastmap");
1047 ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
1048 ubi_msg(ubi, "fastmap WL pool size: %d",
1049 ubi->fm_wl_pool.max_size);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001050 ubi->fm_disabled = 0;
Richard Weinberger19001492016-04-26 16:39:48 +02001051 ubi->fast_attach = 1;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001052
1053 ubi_free_vid_hdr(ubi, vh);
1054 kfree(ech);
1055out:
Richard Weinberger111ab0b2014-11-10 16:28:08 +01001056 up_write(&ubi->fm_protect);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001057 if (ret == UBI_BAD_FASTMAP)
Tanya Brokhman326087032014-10-20 19:57:00 +03001058 ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001059 return ret;
1060
1061free_hdr:
1062 ubi_free_vid_hdr(ubi, vh);
1063 kfree(ech);
1064free_fm_sb:
1065 kfree(fmsb);
1066 kfree(fm);
1067 goto out;
1068}
1069
1070/**
1071 * ubi_write_fastmap - writes a fastmap.
1072 * @ubi: UBI device object
1073 * @new_fm: the to be written fastmap
1074 *
1075 * Returns 0 on success, < 0 indicates an internal error.
1076 */
1077static int ubi_write_fastmap(struct ubi_device *ubi,
1078 struct ubi_fastmap_layout *new_fm)
1079{
1080 size_t fm_pos = 0;
1081 void *fm_raw;
1082 struct ubi_fm_sb *fmsb;
1083 struct ubi_fm_hdr *fmh;
shengyongf6e951a2015-05-26 10:07:07 +00001084 struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001085 struct ubi_fm_ec *fec;
1086 struct ubi_fm_volhdr *fvh;
1087 struct ubi_fm_eba *feba;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001088 struct ubi_wl_entry *wl_e;
1089 struct ubi_volume *vol;
1090 struct ubi_vid_hdr *avhdr, *dvhdr;
1091 struct ubi_work *ubi_wrk;
Richard Weinbergerc5c3f3c2014-10-28 16:24:27 +01001092 struct rb_node *tmp_rb;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001093 int ret, i, j, free_peb_count, used_peb_count, vol_count;
1094 int scrub_peb_count, erase_peb_count;
Richard Weinberger5d71afb2016-06-14 10:12:18 +02001095 unsigned long *seen_pebs = NULL;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001096
1097 fm_raw = ubi->fm_buf;
1098 memset(ubi->fm_buf, 0, ubi->fm_size);
1099
1100 avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1101 if (!avhdr) {
1102 ret = -ENOMEM;
1103 goto out;
1104 }
1105
1106 dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
1107 if (!dvhdr) {
1108 ret = -ENOMEM;
1109 goto out_kfree;
1110 }
1111
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +02001112 seen_pebs = init_seen(ubi);
1113 if (IS_ERR(seen_pebs)) {
1114 ret = PTR_ERR(seen_pebs);
1115 goto out_kfree;
1116 }
1117
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001118 spin_lock(&ubi->volumes_lock);
1119 spin_lock(&ubi->wl_lock);
1120
1121 fmsb = (struct ubi_fm_sb *)fm_raw;
1122 fm_pos += sizeof(*fmsb);
1123 ubi_assert(fm_pos <= ubi->fm_size);
1124
1125 fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1126 fm_pos += sizeof(*fmh);
1127 ubi_assert(fm_pos <= ubi->fm_size);
1128
1129 fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1130 fmsb->version = UBI_FM_FMT_VERSION;
1131 fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1132 /* the max sqnum will be filled in while *reading* the fastmap */
1133 fmsb->sqnum = 0;
1134
1135 fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1136 free_peb_count = 0;
1137 used_peb_count = 0;
1138 scrub_peb_count = 0;
1139 erase_peb_count = 0;
1140 vol_count = 0;
1141
shengyongf6e951a2015-05-26 10:07:07 +00001142 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1143 fm_pos += sizeof(*fmpl);
1144 fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1145 fmpl->size = cpu_to_be16(ubi->fm_pool.size);
1146 fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001147
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +02001148 for (i = 0; i < ubi->fm_pool.size; i++) {
shengyongf6e951a2015-05-26 10:07:07 +00001149 fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +02001150 set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
1151 }
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001152
shengyongf6e951a2015-05-26 10:07:07 +00001153 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1154 fm_pos += sizeof(*fmpl_wl);
1155 fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1156 fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size);
1157 fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001158
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +02001159 for (i = 0; i < ubi->fm_wl_pool.size; i++) {
shengyongf6e951a2015-05-26 10:07:07 +00001160 fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +02001161 set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
1162 }
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001163
Richard Weinbergerc5c3f3c2014-10-28 16:24:27 +01001164 ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001165 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1166
1167 fec->pnum = cpu_to_be32(wl_e->pnum);
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +02001168 set_seen(ubi, wl_e->pnum, seen_pebs);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001169 fec->ec = cpu_to_be32(wl_e->ec);
1170
1171 free_peb_count++;
1172 fm_pos += sizeof(*fec);
1173 ubi_assert(fm_pos <= ubi->fm_size);
1174 }
1175 fmh->free_peb_count = cpu_to_be32(free_peb_count);
1176
Richard Weinbergerc5c3f3c2014-10-28 16:24:27 +01001177 ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001178 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1179
1180 fec->pnum = cpu_to_be32(wl_e->pnum);
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +02001181 set_seen(ubi, wl_e->pnum, seen_pebs);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001182 fec->ec = cpu_to_be32(wl_e->ec);
1183
1184 used_peb_count++;
1185 fm_pos += sizeof(*fec);
1186 ubi_assert(fm_pos <= ubi->fm_size);
1187 }
Richard Weinberger4f5e3b62014-11-24 14:20:31 +01001188
Richard Weinbergerc5c3f3c2014-10-28 16:24:27 +01001189 ubi_for_each_protected_peb(ubi, i, wl_e) {
1190 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
Richard Weinberger4f5e3b62014-11-24 14:20:31 +01001191
Richard Weinbergerc5c3f3c2014-10-28 16:24:27 +01001192 fec->pnum = cpu_to_be32(wl_e->pnum);
1193 set_seen(ubi, wl_e->pnum, seen_pebs);
1194 fec->ec = cpu_to_be32(wl_e->ec);
Richard Weinberger4f5e3b62014-11-24 14:20:31 +01001195
Richard Weinbergerc5c3f3c2014-10-28 16:24:27 +01001196 used_peb_count++;
1197 fm_pos += sizeof(*fec);
1198 ubi_assert(fm_pos <= ubi->fm_size);
Richard Weinberger4f5e3b62014-11-24 14:20:31 +01001199 }
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001200 fmh->used_peb_count = cpu_to_be32(used_peb_count);
1201
Richard Weinbergerc5c3f3c2014-10-28 16:24:27 +01001202 ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001203 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1204
1205 fec->pnum = cpu_to_be32(wl_e->pnum);
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +02001206 set_seen(ubi, wl_e->pnum, seen_pebs);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001207 fec->ec = cpu_to_be32(wl_e->ec);
1208
1209 scrub_peb_count++;
1210 fm_pos += sizeof(*fec);
1211 ubi_assert(fm_pos <= ubi->fm_size);
1212 }
1213 fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1214
1215
1216 list_for_each_entry(ubi_wrk, &ubi->works, list) {
1217 if (ubi_is_erase_work(ubi_wrk)) {
1218 wl_e = ubi_wrk->e;
1219 ubi_assert(wl_e);
1220
1221 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1222
1223 fec->pnum = cpu_to_be32(wl_e->pnum);
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +02001224 set_seen(ubi, wl_e->pnum, seen_pebs);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001225 fec->ec = cpu_to_be32(wl_e->ec);
1226
1227 erase_peb_count++;
1228 fm_pos += sizeof(*fec);
1229 ubi_assert(fm_pos <= ubi->fm_size);
1230 }
1231 }
1232 fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1233
1234 for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1235 vol = ubi->volumes[i];
1236
1237 if (!vol)
1238 continue;
1239
1240 vol_count++;
1241
1242 fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1243 fm_pos += sizeof(*fvh);
1244 ubi_assert(fm_pos <= ubi->fm_size);
1245
1246 fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1247 fvh->vol_id = cpu_to_be32(vol->vol_id);
1248 fvh->vol_type = vol->vol_type;
1249 fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1250 fvh->data_pad = cpu_to_be32(vol->data_pad);
1251 fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1252
1253 ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1254 vol->vol_type == UBI_STATIC_VOLUME);
1255
1256 feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1257 fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1258 ubi_assert(fm_pos <= ubi->fm_size);
1259
Boris Brezillon1f81a5c2016-09-16 16:59:24 +02001260 for (j = 0; j < vol->reserved_pebs; j++) {
1261 struct ubi_eba_leb_desc ldesc;
1262
1263 ubi_eba_get_ldesc(vol, j, &ldesc);
1264 feba->pnum[j] = cpu_to_be32(ldesc.pnum);
1265 }
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001266
1267 feba->reserved_pebs = cpu_to_be32(j);
1268 feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1269 }
1270 fmh->vol_count = cpu_to_be32(vol_count);
1271 fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1272
1273 avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1274 avhdr->lnum = 0;
1275
1276 spin_unlock(&ubi->wl_lock);
1277 spin_unlock(&ubi->volumes_lock);
1278
1279 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1280 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
1281 if (ret) {
Tanya Brokhman326087032014-10-20 19:57:00 +03001282 ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001283 goto out_kfree;
1284 }
1285
1286 for (i = 0; i < new_fm->used_blocks; i++) {
1287 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +02001288 set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001289 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1290 }
1291
1292 fmsb->data_crc = 0;
1293 fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1294 ubi->fm_size));
1295
1296 for (i = 1; i < new_fm->used_blocks; i++) {
1297 dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1298 dvhdr->lnum = cpu_to_be32(i);
1299 dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1300 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1301 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
1302 if (ret) {
Tanya Brokhman326087032014-10-20 19:57:00 +03001303 ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001304 new_fm->e[i]->pnum);
1305 goto out_kfree;
1306 }
1307 }
1308
1309 for (i = 0; i < new_fm->used_blocks; i++) {
Boris Brezillonfcbb6af2016-09-16 16:59:17 +02001310 ret = ubi_io_write_data(ubi, fm_raw + (i * ubi->leb_size),
1311 new_fm->e[i]->pnum, 0, ubi->leb_size);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001312 if (ret) {
Tanya Brokhman326087032014-10-20 19:57:00 +03001313 ubi_err(ubi, "unable to write fastmap to PEB %i!",
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001314 new_fm->e[i]->pnum);
1315 goto out_kfree;
1316 }
1317 }
1318
1319 ubi_assert(new_fm);
1320 ubi->fm = new_fm;
1321
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +02001322 ret = self_check_seen(ubi, seen_pebs);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001323 dbg_bld("fastmap written!");
1324
1325out_kfree:
1326 ubi_free_vid_hdr(ubi, avhdr);
1327 ubi_free_vid_hdr(ubi, dvhdr);
Richard Weinbergerdaef3dd2014-09-22 15:36:40 +02001328 free_seen(seen_pebs);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001329out:
1330 return ret;
1331}
1332
1333/**
1334 * erase_block - Manually erase a PEB.
1335 * @ubi: UBI device object
1336 * @pnum: PEB to be erased
1337 *
1338 * Returns the new EC value on success, < 0 indicates an internal error.
1339 */
1340static int erase_block(struct ubi_device *ubi, int pnum)
1341{
1342 int ret;
1343 struct ubi_ec_hdr *ec_hdr;
1344 long long ec;
1345
1346 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1347 if (!ec_hdr)
1348 return -ENOMEM;
1349
1350 ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1351 if (ret < 0)
1352 goto out;
1353 else if (ret && ret != UBI_IO_BITFLIPS) {
1354 ret = -EINVAL;
1355 goto out;
1356 }
1357
1358 ret = ubi_io_sync_erase(ubi, pnum, 0);
1359 if (ret < 0)
1360 goto out;
1361
1362 ec = be64_to_cpu(ec_hdr->ec);
1363 ec += ret;
1364 if (ec > UBI_MAX_ERASECOUNTER) {
1365 ret = -EINVAL;
1366 goto out;
1367 }
1368
1369 ec_hdr->ec = cpu_to_be64(ec);
1370 ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1371 if (ret < 0)
1372 goto out;
1373
1374 ret = ec;
1375out:
1376 kfree(ec_hdr);
1377 return ret;
1378}
1379
1380/**
1381 * invalidate_fastmap - destroys a fastmap.
1382 * @ubi: UBI device object
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001383 *
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001384 * This function ensures that upon next UBI attach a full scan
1385 * is issued. We need this if UBI is about to write a new fastmap
1386 * but is unable to do so. In this case we have two options:
1387 * a) Make sure that the current fastmap will not be usued upon
1388 * attach time and contine or b) fall back to RO mode to have the
1389 * current fastmap in a valid state.
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001390 * Returns 0 on success, < 0 indicates an internal error.
1391 */
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001392static int invalidate_fastmap(struct ubi_device *ubi)
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001393{
Richard Weinberger8930fa52013-08-19 22:31:49 +02001394 int ret;
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001395 struct ubi_fastmap_layout *fm;
1396 struct ubi_wl_entry *e;
1397 struct ubi_vid_hdr *vh = NULL;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001398
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001399 if (!ubi->fm)
1400 return 0;
1401
1402 ubi->fm = NULL;
1403
1404 ret = -ENOMEM;
1405 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
1406 if (!fm)
1407 goto out;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001408
1409 vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1410 if (!vh)
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001411 goto out_free_fm;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001412
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001413 ret = -ENOSPC;
1414 e = ubi_wl_get_fm_peb(ubi, 1);
1415 if (!e)
1416 goto out_free_fm;
1417
1418 /*
1419 * Create fake fastmap such that UBI will fall back
1420 * to scanning mode.
1421 */
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001422 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001423 ret = ubi_io_write_vid_hdr(ubi, e->pnum, vh);
1424 if (ret < 0) {
1425 ubi_wl_put_fm_peb(ubi, e, 0, 0);
1426 goto out_free_fm;
1427 }
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001428
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001429 fm->used_blocks = 1;
1430 fm->e[0] = e;
1431
1432 ubi->fm = fm;
1433
1434out:
1435 ubi_free_vid_hdr(ubi, vh);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001436 return ret;
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001437
1438out_free_fm:
1439 kfree(fm);
1440 goto out;
1441}
1442
1443/**
1444 * return_fm_pebs - returns all PEBs used by a fastmap back to the
1445 * WL sub-system.
1446 * @ubi: UBI device object
1447 * @fm: fastmap layout object
1448 */
1449static void return_fm_pebs(struct ubi_device *ubi,
1450 struct ubi_fastmap_layout *fm)
1451{
1452 int i;
1453
1454 if (!fm)
1455 return;
1456
1457 for (i = 0; i < fm->used_blocks; i++) {
1458 if (fm->e[i]) {
1459 ubi_wl_put_fm_peb(ubi, fm->e[i], i,
1460 fm->to_be_tortured[i]);
1461 fm->e[i] = NULL;
1462 }
1463 }
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001464}
1465
1466/**
1467 * ubi_update_fastmap - will be called by UBI if a volume changes or
1468 * a fastmap pool becomes full.
1469 * @ubi: UBI device object
1470 *
1471 * Returns 0 on success, < 0 indicates an internal error.
1472 */
1473int ubi_update_fastmap(struct ubi_device *ubi)
1474{
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001475 int ret, i, j;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001476 struct ubi_fastmap_layout *new_fm, *old_fm;
1477 struct ubi_wl_entry *tmp_e;
1478
Richard Weinberger111ab0b2014-11-10 16:28:08 +01001479 down_write(&ubi->fm_protect);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001480
1481 ubi_refill_pools(ubi);
1482
1483 if (ubi->ro_mode || ubi->fm_disabled) {
Richard Weinberger111ab0b2014-11-10 16:28:08 +01001484 up_write(&ubi->fm_protect);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001485 return 0;
1486 }
1487
1488 ret = ubi_ensure_anchor_pebs(ubi);
1489 if (ret) {
Richard Weinberger111ab0b2014-11-10 16:28:08 +01001490 up_write(&ubi->fm_protect);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001491 return ret;
1492 }
1493
1494 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1495 if (!new_fm) {
Richard Weinberger111ab0b2014-11-10 16:28:08 +01001496 up_write(&ubi->fm_protect);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001497 return -ENOMEM;
1498 }
1499
1500 new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001501 old_fm = ubi->fm;
1502 ubi->fm = NULL;
1503
1504 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
Tanya Brokhman326087032014-10-20 19:57:00 +03001505 ubi_err(ubi, "fastmap too large");
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001506 ret = -ENOSPC;
1507 goto err;
1508 }
1509
1510 for (i = 1; i < new_fm->used_blocks; i++) {
1511 spin_lock(&ubi->wl_lock);
1512 tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1513 spin_unlock(&ubi->wl_lock);
1514
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001515 if (!tmp_e) {
1516 if (old_fm && old_fm->e[i]) {
1517 ret = erase_block(ubi, old_fm->e[i]->pnum);
1518 if (ret < 0) {
1519 ubi_err(ubi, "could not erase old fastmap PEB");
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001520
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001521 for (j = 1; j < i; j++) {
1522 ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1523 j, 0);
1524 new_fm->e[j] = NULL;
1525 }
1526 goto err;
1527 }
1528 new_fm->e[i] = old_fm->e[i];
1529 old_fm->e[i] = NULL;
1530 } else {
1531 ubi_err(ubi, "could not get any free erase block");
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001532
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001533 for (j = 1; j < i; j++) {
1534 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1535 new_fm->e[j] = NULL;
1536 }
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001537
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001538 ret = -ENOSPC;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001539 goto err;
1540 }
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001541 } else {
Richard Weinbergerc4ca6be2014-10-06 14:47:51 +02001542 new_fm->e[i] = tmp_e;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001543
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001544 if (old_fm && old_fm->e[i]) {
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001545 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1546 old_fm->to_be_tortured[i]);
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001547 old_fm->e[i] = NULL;
1548 }
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001549 }
1550 }
1551
Richard Weinberger61de74c2014-10-29 16:59:32 +01001552 /* Old fastmap is larger than the new one */
1553 if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
1554 for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
1555 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1556 old_fm->to_be_tortured[i]);
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001557 old_fm->e[i] = NULL;
Richard Weinberger61de74c2014-10-29 16:59:32 +01001558 }
1559 }
1560
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001561 spin_lock(&ubi->wl_lock);
1562 tmp_e = ubi_wl_get_fm_peb(ubi, 1);
1563 spin_unlock(&ubi->wl_lock);
1564
1565 if (old_fm) {
1566 /* no fresh anchor PEB was found, reuse the old one */
1567 if (!tmp_e) {
1568 ret = erase_block(ubi, old_fm->e[0]->pnum);
1569 if (ret < 0) {
Tanya Brokhman326087032014-10-20 19:57:00 +03001570 ubi_err(ubi, "could not erase old anchor PEB");
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001571
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001572 for (i = 1; i < new_fm->used_blocks; i++) {
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001573 ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1574 i, 0);
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001575 new_fm->e[i] = NULL;
1576 }
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001577 goto err;
1578 }
Richard Weinbergerc4ca6be2014-10-06 14:47:51 +02001579 new_fm->e[0] = old_fm->e[0];
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001580 new_fm->e[0]->ec = ret;
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001581 old_fm->e[0] = NULL;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001582 } else {
1583 /* we've got a new anchor PEB, return the old one */
1584 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1585 old_fm->to_be_tortured[0]);
Richard Weinbergerc4ca6be2014-10-06 14:47:51 +02001586 new_fm->e[0] = tmp_e;
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001587 old_fm->e[0] = NULL;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001588 }
1589 } else {
1590 if (!tmp_e) {
Tanya Brokhman326087032014-10-20 19:57:00 +03001591 ubi_err(ubi, "could not find any anchor PEB");
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001592
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001593 for (i = 1; i < new_fm->used_blocks; i++) {
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001594 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001595 new_fm->e[i] = NULL;
1596 }
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001597
1598 ret = -ENOSPC;
1599 goto err;
1600 }
Richard Weinbergerc4ca6be2014-10-06 14:47:51 +02001601 new_fm->e[0] = tmp_e;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001602 }
1603
1604 down_write(&ubi->work_sem);
Richard Weinberger111ab0b2014-11-10 16:28:08 +01001605 down_write(&ubi->fm_eba_sem);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001606 ret = ubi_write_fastmap(ubi, new_fm);
Richard Weinberger111ab0b2014-11-10 16:28:08 +01001607 up_write(&ubi->fm_eba_sem);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001608 up_write(&ubi->work_sem);
1609
1610 if (ret)
1611 goto err;
1612
1613out_unlock:
Richard Weinberger111ab0b2014-11-10 16:28:08 +01001614 up_write(&ubi->fm_protect);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001615 kfree(old_fm);
1616 return ret;
1617
1618err:
Tanya Brokhman326087032014-10-20 19:57:00 +03001619 ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001620
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001621 ret = invalidate_fastmap(ubi);
1622 if (ret < 0) {
1623 ubi_err(ubi, "Unable to invalidiate current fastmap!");
1624 ubi_ro_mode(ubi);
1625 } else {
1626 return_fm_pebs(ubi, old_fm);
1627 return_fm_pebs(ubi, new_fm);
1628 ret = 0;
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001629 }
Richard Weinberger5ca97ad2014-11-10 16:11:40 +01001630
1631 kfree(new_fm);
Richard Weinbergerdbb7d2a2012-09-26 17:51:49 +02001632 goto out_unlock;
1633}