Richard Weinberger | 78d6d49 | 2014-11-10 18:21:51 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2012 Linutronix GmbH |
| 3 | * Copyright (c) 2014 sigma star gmbh |
| 4 | * Author: Richard Weinberger <richard@nod.at> |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by |
| 8 | * the Free Software Foundation; version 2. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See |
| 13 | * the GNU General Public License for more details. |
| 14 | * |
| 15 | */ |
| 16 | |
| 17 | /** |
| 18 | * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue |
| 19 | * @wrk: the work description object |
| 20 | */ |
| 21 | static void update_fastmap_work_fn(struct work_struct *wrk) |
| 22 | { |
| 23 | struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work); |
Richard Weinberger | 1841fcf | 2014-10-29 11:47:22 +0100 | [diff] [blame] | 24 | |
Richard Weinberger | 78d6d49 | 2014-11-10 18:21:51 +0100 | [diff] [blame] | 25 | ubi_update_fastmap(ubi); |
| 26 | spin_lock(&ubi->wl_lock); |
| 27 | ubi->fm_work_scheduled = 0; |
| 28 | spin_unlock(&ubi->wl_lock); |
| 29 | } |
| 30 | |
| 31 | /** |
Richard Weinberger | 78d6d49 | 2014-11-10 18:21:51 +0100 | [diff] [blame] | 32 | * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB. |
| 33 | * @root: the RB-tree where to look for |
| 34 | */ |
| 35 | static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root) |
| 36 | { |
| 37 | struct rb_node *p; |
| 38 | struct ubi_wl_entry *e, *victim = NULL; |
| 39 | int max_ec = UBI_MAX_ERASECOUNTER; |
| 40 | |
| 41 | ubi_rb_for_each_entry(p, e, root, u.rb) { |
| 42 | if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) { |
| 43 | victim = e; |
| 44 | max_ec = e->ec; |
| 45 | } |
| 46 | } |
| 47 | |
| 48 | return victim; |
| 49 | } |
| 50 | |
| 51 | /** |
| 52 | * return_unused_pool_pebs - returns unused PEB to the free tree. |
| 53 | * @ubi: UBI device description object |
| 54 | * @pool: fastmap pool description object |
| 55 | */ |
| 56 | static void return_unused_pool_pebs(struct ubi_device *ubi, |
| 57 | struct ubi_fm_pool *pool) |
| 58 | { |
| 59 | int i; |
| 60 | struct ubi_wl_entry *e; |
| 61 | |
| 62 | for (i = pool->used; i < pool->size; i++) { |
| 63 | e = ubi->lookuptbl[pool->pebs[i]]; |
| 64 | wl_tree_add(e, &ubi->free); |
| 65 | ubi->free_count++; |
| 66 | } |
| 67 | } |
| 68 | |
| 69 | static int anchor_pebs_avalible(struct rb_root *root) |
| 70 | { |
| 71 | struct rb_node *p; |
| 72 | struct ubi_wl_entry *e; |
| 73 | |
| 74 | ubi_rb_for_each_entry(p, e, root, u.rb) |
| 75 | if (e->pnum < UBI_FM_MAX_START) |
| 76 | return 1; |
| 77 | |
| 78 | return 0; |
| 79 | } |
| 80 | |
| 81 | /** |
| 82 | * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number. |
| 83 | * @ubi: UBI device description object |
| 84 | * @anchor: This PEB will be used as anchor PEB by fastmap |
| 85 | * |
| 86 | * The function returns a physical erase block with a given maximal number |
| 87 | * and removes it from the wl subsystem. |
| 88 | * Must be called with wl_lock held! |
| 89 | */ |
| 90 | struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor) |
| 91 | { |
| 92 | struct ubi_wl_entry *e = NULL; |
| 93 | |
| 94 | if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1)) |
| 95 | goto out; |
| 96 | |
| 97 | if (anchor) |
| 98 | e = find_anchor_wl_entry(&ubi->free); |
| 99 | else |
| 100 | e = find_mean_wl_entry(ubi, &ubi->free); |
| 101 | |
| 102 | if (!e) |
| 103 | goto out; |
| 104 | |
| 105 | self_check_in_wl_tree(ubi, e, &ubi->free); |
| 106 | |
| 107 | /* remove it from the free list, |
| 108 | * the wl subsystem does no longer know this erase block */ |
| 109 | rb_erase(&e->u.rb, &ubi->free); |
| 110 | ubi->free_count--; |
| 111 | out: |
| 112 | return e; |
| 113 | } |
| 114 | |
| 115 | /** |
| 116 | * ubi_refill_pools - refills all fastmap PEB pools. |
| 117 | * @ubi: UBI device description object |
| 118 | */ |
| 119 | void ubi_refill_pools(struct ubi_device *ubi) |
| 120 | { |
| 121 | struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool; |
| 122 | struct ubi_fm_pool *pool = &ubi->fm_pool; |
| 123 | struct ubi_wl_entry *e; |
| 124 | int enough; |
| 125 | |
| 126 | spin_lock(&ubi->wl_lock); |
| 127 | |
| 128 | return_unused_pool_pebs(ubi, wl_pool); |
| 129 | return_unused_pool_pebs(ubi, pool); |
| 130 | |
| 131 | wl_pool->size = 0; |
| 132 | pool->size = 0; |
| 133 | |
| 134 | for (;;) { |
| 135 | enough = 0; |
| 136 | if (pool->size < pool->max_size) { |
| 137 | if (!ubi->free.rb_node) |
| 138 | break; |
| 139 | |
| 140 | e = wl_get_wle(ubi); |
| 141 | if (!e) |
| 142 | break; |
| 143 | |
| 144 | pool->pebs[pool->size] = e->pnum; |
| 145 | pool->size++; |
| 146 | } else |
| 147 | enough++; |
| 148 | |
| 149 | if (wl_pool->size < wl_pool->max_size) { |
| 150 | if (!ubi->free.rb_node || |
| 151 | (ubi->free_count - ubi->beb_rsvd_pebs < 5)) |
| 152 | break; |
| 153 | |
| 154 | e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); |
| 155 | self_check_in_wl_tree(ubi, e, &ubi->free); |
| 156 | rb_erase(&e->u.rb, &ubi->free); |
| 157 | ubi->free_count--; |
| 158 | |
| 159 | wl_pool->pebs[wl_pool->size] = e->pnum; |
| 160 | wl_pool->size++; |
| 161 | } else |
| 162 | enough++; |
| 163 | |
| 164 | if (enough == 2) |
| 165 | break; |
| 166 | } |
| 167 | |
| 168 | wl_pool->used = 0; |
| 169 | pool->used = 0; |
| 170 | |
| 171 | spin_unlock(&ubi->wl_lock); |
| 172 | } |
| 173 | |
| 174 | /** |
Richard Weinberger | 1cb8f97 | 2015-08-11 23:27:44 +0200 | [diff] [blame^] | 175 | * produce_free_peb - produce a free physical eraseblock. |
| 176 | * @ubi: UBI device description object |
| 177 | * |
| 178 | * This function tries to make a free PEB by means of synchronous execution of |
| 179 | * pending works. This may be needed if, for example the background thread is |
| 180 | * disabled. Returns zero in case of success and a negative error code in case |
| 181 | * of failure. |
| 182 | */ |
| 183 | static int produce_free_peb(struct ubi_device *ubi) |
| 184 | { |
| 185 | int err; |
| 186 | |
| 187 | while (!ubi->free.rb_node && ubi->works_count) { |
| 188 | dbg_wl("do one work synchronously"); |
| 189 | err = do_work(ubi); |
| 190 | |
| 191 | if (err) |
| 192 | return err; |
| 193 | } |
| 194 | |
| 195 | return 0; |
| 196 | } |
| 197 | |
| 198 | /** |
Richard Weinberger | 78d6d49 | 2014-11-10 18:21:51 +0100 | [diff] [blame] | 199 | * ubi_wl_get_peb - get a physical eraseblock. |
| 200 | * @ubi: UBI device description object |
| 201 | * |
| 202 | * This function returns a physical eraseblock in case of success and a |
| 203 | * negative error code in case of failure. |
| 204 | * Returns with ubi->fm_eba_sem held in read mode! |
| 205 | */ |
| 206 | int ubi_wl_get_peb(struct ubi_device *ubi) |
| 207 | { |
| 208 | int ret, retried = 0; |
| 209 | struct ubi_fm_pool *pool = &ubi->fm_pool; |
| 210 | struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool; |
| 211 | |
| 212 | again: |
| 213 | down_read(&ubi->fm_eba_sem); |
| 214 | spin_lock(&ubi->wl_lock); |
| 215 | |
| 216 | /* We check here also for the WL pool because at this point we can |
| 217 | * refill the WL pool synchronous. */ |
| 218 | if (pool->used == pool->size || wl_pool->used == wl_pool->size) { |
| 219 | spin_unlock(&ubi->wl_lock); |
| 220 | up_read(&ubi->fm_eba_sem); |
| 221 | ret = ubi_update_fastmap(ubi); |
| 222 | if (ret) { |
| 223 | ubi_msg(ubi, "Unable to write a new fastmap: %i", ret); |
| 224 | down_read(&ubi->fm_eba_sem); |
| 225 | return -ENOSPC; |
| 226 | } |
| 227 | down_read(&ubi->fm_eba_sem); |
| 228 | spin_lock(&ubi->wl_lock); |
| 229 | } |
| 230 | |
| 231 | if (pool->used == pool->size) { |
| 232 | spin_unlock(&ubi->wl_lock); |
| 233 | if (retried) { |
| 234 | ubi_err(ubi, "Unable to get a free PEB from user WL pool"); |
| 235 | ret = -ENOSPC; |
| 236 | goto out; |
| 237 | } |
| 238 | retried = 1; |
| 239 | up_read(&ubi->fm_eba_sem); |
Richard Weinberger | 1cb8f97 | 2015-08-11 23:27:44 +0200 | [diff] [blame^] | 240 | ret = produce_free_peb(ubi); |
| 241 | if (ret < 0) { |
| 242 | down_read(&ubi->fm_eba_sem); |
| 243 | goto out; |
| 244 | } |
Richard Weinberger | 78d6d49 | 2014-11-10 18:21:51 +0100 | [diff] [blame] | 245 | goto again; |
| 246 | } |
| 247 | |
| 248 | ubi_assert(pool->used < pool->size); |
| 249 | ret = pool->pebs[pool->used++]; |
| 250 | prot_queue_add(ubi, ubi->lookuptbl[ret]); |
| 251 | spin_unlock(&ubi->wl_lock); |
| 252 | out: |
| 253 | return ret; |
| 254 | } |
| 255 | |
| 256 | /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system. |
| 257 | * |
| 258 | * @ubi: UBI device description object |
| 259 | */ |
| 260 | static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) |
| 261 | { |
| 262 | struct ubi_fm_pool *pool = &ubi->fm_wl_pool; |
| 263 | int pnum; |
| 264 | |
| 265 | if (pool->used == pool->size) { |
| 266 | /* We cannot update the fastmap here because this |
| 267 | * function is called in atomic context. |
| 268 | * Let's fail here and refill/update it as soon as possible. */ |
| 269 | if (!ubi->fm_work_scheduled) { |
| 270 | ubi->fm_work_scheduled = 1; |
| 271 | schedule_work(&ubi->fm_work); |
| 272 | } |
| 273 | return NULL; |
Richard Weinberger | 78d6d49 | 2014-11-10 18:21:51 +0100 | [diff] [blame] | 274 | } |
Richard Weinberger | e1bc37c | 2014-10-29 11:44:45 +0100 | [diff] [blame] | 275 | |
| 276 | pnum = pool->pebs[pool->used++]; |
| 277 | return ubi->lookuptbl[pnum]; |
Richard Weinberger | 78d6d49 | 2014-11-10 18:21:51 +0100 | [diff] [blame] | 278 | } |
| 279 | |
| 280 | /** |
| 281 | * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB. |
| 282 | * @ubi: UBI device description object |
| 283 | */ |
| 284 | int ubi_ensure_anchor_pebs(struct ubi_device *ubi) |
| 285 | { |
| 286 | struct ubi_work *wrk; |
| 287 | |
| 288 | spin_lock(&ubi->wl_lock); |
| 289 | if (ubi->wl_scheduled) { |
| 290 | spin_unlock(&ubi->wl_lock); |
| 291 | return 0; |
| 292 | } |
| 293 | ubi->wl_scheduled = 1; |
| 294 | spin_unlock(&ubi->wl_lock); |
| 295 | |
| 296 | wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); |
| 297 | if (!wrk) { |
| 298 | spin_lock(&ubi->wl_lock); |
| 299 | ubi->wl_scheduled = 0; |
| 300 | spin_unlock(&ubi->wl_lock); |
| 301 | return -ENOMEM; |
| 302 | } |
| 303 | |
| 304 | wrk->anchor = 1; |
| 305 | wrk->func = &wear_leveling_worker; |
| 306 | schedule_ubi_work(ubi, wrk); |
| 307 | return 0; |
| 308 | } |
| 309 | |
| 310 | /** |
| 311 | * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling |
| 312 | * sub-system. |
| 313 | * see: ubi_wl_put_peb() |
| 314 | * |
| 315 | * @ubi: UBI device description object |
| 316 | * @fm_e: physical eraseblock to return |
| 317 | * @lnum: the last used logical eraseblock number for the PEB |
| 318 | * @torture: if this physical eraseblock has to be tortured |
| 319 | */ |
| 320 | int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e, |
| 321 | int lnum, int torture) |
| 322 | { |
| 323 | struct ubi_wl_entry *e; |
| 324 | int vol_id, pnum = fm_e->pnum; |
| 325 | |
| 326 | dbg_wl("PEB %d", pnum); |
| 327 | |
| 328 | ubi_assert(pnum >= 0); |
| 329 | ubi_assert(pnum < ubi->peb_count); |
| 330 | |
| 331 | spin_lock(&ubi->wl_lock); |
| 332 | e = ubi->lookuptbl[pnum]; |
| 333 | |
| 334 | /* This can happen if we recovered from a fastmap the very |
| 335 | * first time and writing now a new one. In this case the wl system |
| 336 | * has never seen any PEB used by the original fastmap. |
| 337 | */ |
| 338 | if (!e) { |
| 339 | e = fm_e; |
| 340 | ubi_assert(e->ec >= 0); |
| 341 | ubi->lookuptbl[pnum] = e; |
| 342 | } |
| 343 | |
| 344 | spin_unlock(&ubi->wl_lock); |
| 345 | |
| 346 | vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID; |
| 347 | return schedule_erase(ubi, e, vol_id, lnum, torture); |
| 348 | } |
| 349 | |
| 350 | /** |
| 351 | * ubi_is_erase_work - checks whether a work is erase work. |
| 352 | * @wrk: The work object to be checked |
| 353 | */ |
| 354 | int ubi_is_erase_work(struct ubi_work *wrk) |
| 355 | { |
| 356 | return wrk->func == erase_worker; |
| 357 | } |
| 358 | |
| 359 | static void ubi_fastmap_close(struct ubi_device *ubi) |
| 360 | { |
| 361 | int i; |
| 362 | |
| 363 | flush_work(&ubi->fm_work); |
| 364 | return_unused_pool_pebs(ubi, &ubi->fm_pool); |
| 365 | return_unused_pool_pebs(ubi, &ubi->fm_wl_pool); |
| 366 | |
| 367 | if (ubi->fm) { |
| 368 | for (i = 0; i < ubi->fm->used_blocks; i++) |
| 369 | kfree(ubi->fm->e[i]); |
| 370 | } |
| 371 | kfree(ubi->fm); |
| 372 | } |
Richard Weinberger | 2f84c246 | 2014-10-29 10:31:41 +0100 | [diff] [blame] | 373 | |
| 374 | /** |
| 375 | * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap. |
| 376 | * See find_mean_wl_entry() |
| 377 | * |
| 378 | * @ubi: UBI device description object |
| 379 | * @e: physical eraseblock to return |
| 380 | * @root: RB tree to test against. |
| 381 | */ |
| 382 | static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi, |
| 383 | struct ubi_wl_entry *e, |
| 384 | struct rb_root *root) { |
| 385 | if (e && !ubi->fm_disabled && !ubi->fm && |
| 386 | e->pnum < UBI_FM_MAX_START) |
| 387 | e = rb_entry(rb_next(root->rb_node), |
| 388 | struct ubi_wl_entry, u.rb); |
| 389 | |
| 390 | return e; |
| 391 | } |