Matias Bjørling | 48add0f | 2015-10-28 19:54:56 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2015 Matias Bjorling <m@bjorling.me> |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public License version |
| 6 | * 2 as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, but |
| 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 11 | * General Public License for more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public License |
| 14 | * along with this program; see the file COPYING. If not, write to |
| 15 | * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, |
| 16 | * USA. |
| 17 | * |
| 18 | * Implementation of a generic nvm manager for Open-Channel SSDs. |
| 19 | */ |
| 20 | |
| 21 | #include "gennvm.h" |
| 22 | |
| 23 | static void gennvm_blocks_free(struct nvm_dev *dev) |
| 24 | { |
| 25 | struct gen_nvm *gn = dev->mp; |
| 26 | struct gen_lun *lun; |
| 27 | int i; |
| 28 | |
| 29 | gennvm_for_each_lun(gn, lun, i) { |
| 30 | if (!lun->vlun.blocks) |
| 31 | break; |
| 32 | vfree(lun->vlun.blocks); |
| 33 | } |
| 34 | } |
| 35 | |
| 36 | static void gennvm_luns_free(struct nvm_dev *dev) |
| 37 | { |
| 38 | struct gen_nvm *gn = dev->mp; |
| 39 | |
| 40 | kfree(gn->luns); |
| 41 | } |
| 42 | |
| 43 | static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn) |
| 44 | { |
| 45 | struct gen_lun *lun; |
| 46 | int i; |
| 47 | |
| 48 | gn->luns = kcalloc(dev->nr_luns, sizeof(struct gen_lun), GFP_KERNEL); |
| 49 | if (!gn->luns) |
| 50 | return -ENOMEM; |
| 51 | |
| 52 | gennvm_for_each_lun(gn, lun, i) { |
| 53 | spin_lock_init(&lun->vlun.lock); |
| 54 | INIT_LIST_HEAD(&lun->free_list); |
| 55 | INIT_LIST_HEAD(&lun->used_list); |
| 56 | INIT_LIST_HEAD(&lun->bb_list); |
| 57 | |
| 58 | lun->reserved_blocks = 2; /* for GC only */ |
| 59 | lun->vlun.id = i; |
| 60 | lun->vlun.lun_id = i % dev->luns_per_chnl; |
| 61 | lun->vlun.chnl_id = i / dev->luns_per_chnl; |
| 62 | lun->vlun.nr_free_blocks = dev->blks_per_lun; |
| 63 | } |
| 64 | return 0; |
| 65 | } |
| 66 | |
Matias Bjørling | 1145046 | 2015-11-16 15:34:37 +0100 | [diff] [blame] | 67 | static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks, |
Matias Bjørling | 48add0f | 2015-10-28 19:54:56 +0100 | [diff] [blame] | 68 | void *private) |
| 69 | { |
| 70 | struct gen_nvm *gn = private; |
Matias Bjørling | 1145046 | 2015-11-16 15:34:37 +0100 | [diff] [blame] | 71 | struct nvm_dev *dev = gn->dev; |
| 72 | struct gen_lun *lun; |
Matias Bjørling | 48add0f | 2015-10-28 19:54:56 +0100 | [diff] [blame] | 73 | struct nvm_block *blk; |
| 74 | int i; |
| 75 | |
Matias Bjørling | 7386af2 | 2015-11-16 15:34:44 +0100 | [diff] [blame^] | 76 | ppa = dev_to_generic_addr(gn->dev, ppa); |
Matias Bjørling | 1145046 | 2015-11-16 15:34:37 +0100 | [diff] [blame] | 77 | lun = &gn->luns[(dev->nr_luns * ppa.g.ch) + ppa.g.lun]; |
Matias Bjørling | 48add0f | 2015-10-28 19:54:56 +0100 | [diff] [blame] | 78 | |
Matias Bjørling | 1145046 | 2015-11-16 15:34:37 +0100 | [diff] [blame] | 79 | for (i = 0; i < nr_blocks; i++) { |
| 80 | if (blks[i] == 0) |
| 81 | continue; |
| 82 | |
Matias Bjørling | 48add0f | 2015-10-28 19:54:56 +0100 | [diff] [blame] | 83 | blk = &lun->vlun.blocks[i]; |
| 84 | if (!blk) { |
| 85 | pr_err("gennvm: BB data is out of bounds.\n"); |
| 86 | return -EINVAL; |
| 87 | } |
| 88 | |
| 89 | list_move_tail(&blk->list, &lun->bb_list); |
| 90 | } |
| 91 | |
| 92 | return 0; |
| 93 | } |
| 94 | |
| 95 | static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private) |
| 96 | { |
| 97 | struct nvm_dev *dev = private; |
| 98 | struct gen_nvm *gn = dev->mp; |
| 99 | sector_t max_pages = dev->total_pages * (dev->sec_size >> 9); |
| 100 | u64 elba = slba + nlb; |
| 101 | struct gen_lun *lun; |
| 102 | struct nvm_block *blk; |
| 103 | u64 i; |
| 104 | int lun_id; |
| 105 | |
| 106 | if (unlikely(elba > dev->total_pages)) { |
| 107 | pr_err("gennvm: L2P data from device is out of bounds!\n"); |
| 108 | return -EINVAL; |
| 109 | } |
| 110 | |
| 111 | for (i = 0; i < nlb; i++) { |
| 112 | u64 pba = le64_to_cpu(entries[i]); |
| 113 | |
| 114 | if (unlikely(pba >= max_pages && pba != U64_MAX)) { |
| 115 | pr_err("gennvm: L2P data entry is out of bounds!\n"); |
| 116 | return -EINVAL; |
| 117 | } |
| 118 | |
| 119 | /* Address zero is a special one. The first page on a disk is |
| 120 | * protected. It often holds internal device boot |
| 121 | * information. |
| 122 | */ |
| 123 | if (!pba) |
| 124 | continue; |
| 125 | |
| 126 | /* resolve block from physical address */ |
| 127 | lun_id = div_u64(pba, dev->sec_per_lun); |
| 128 | lun = &gn->luns[lun_id]; |
| 129 | |
| 130 | /* Calculate block offset into lun */ |
| 131 | pba = pba - (dev->sec_per_lun * lun_id); |
| 132 | blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)]; |
| 133 | |
| 134 | if (!blk->type) { |
| 135 | /* at this point, we don't know anything about the |
| 136 | * block. It's up to the FTL on top to re-etablish the |
| 137 | * block state |
| 138 | */ |
| 139 | list_move_tail(&blk->list, &lun->used_list); |
| 140 | blk->type = 1; |
| 141 | lun->vlun.nr_free_blocks--; |
| 142 | } |
| 143 | } |
| 144 | |
| 145 | return 0; |
| 146 | } |
| 147 | |
| 148 | static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn) |
| 149 | { |
| 150 | struct gen_lun *lun; |
| 151 | struct nvm_block *block; |
| 152 | sector_t lun_iter, blk_iter, cur_block_id = 0; |
| 153 | int ret; |
| 154 | |
| 155 | gennvm_for_each_lun(gn, lun, lun_iter) { |
| 156 | lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) * |
| 157 | dev->blks_per_lun); |
| 158 | if (!lun->vlun.blocks) |
| 159 | return -ENOMEM; |
| 160 | |
| 161 | for (blk_iter = 0; blk_iter < dev->blks_per_lun; blk_iter++) { |
| 162 | block = &lun->vlun.blocks[blk_iter]; |
| 163 | |
| 164 | INIT_LIST_HEAD(&block->list); |
| 165 | |
| 166 | block->lun = &lun->vlun; |
| 167 | block->id = cur_block_id++; |
| 168 | |
| 169 | /* First block is reserved for device */ |
| 170 | if (unlikely(lun_iter == 0 && blk_iter == 0)) |
| 171 | continue; |
| 172 | |
| 173 | list_add_tail(&block->list, &lun->free_list); |
| 174 | } |
| 175 | |
| 176 | if (dev->ops->get_bb_tbl) { |
Matias Bjørling | 1145046 | 2015-11-16 15:34:37 +0100 | [diff] [blame] | 177 | struct ppa_addr ppa; |
| 178 | |
| 179 | ppa.ppa = 0; |
| 180 | ppa.g.ch = lun->vlun.chnl_id; |
| 181 | ppa.g.lun = lun->vlun.id; |
Matias Bjørling | 7386af2 | 2015-11-16 15:34:44 +0100 | [diff] [blame^] | 182 | ppa = generic_to_dev_addr(dev, ppa); |
Matias Bjørling | 1145046 | 2015-11-16 15:34:37 +0100 | [diff] [blame] | 183 | |
| 184 | ret = dev->ops->get_bb_tbl(dev->q, ppa, |
| 185 | dev->blks_per_lun, |
| 186 | gennvm_block_bb, gn); |
Matias Bjørling | 48add0f | 2015-10-28 19:54:56 +0100 | [diff] [blame] | 187 | if (ret) |
| 188 | pr_err("gennvm: could not read BB table\n"); |
| 189 | } |
| 190 | } |
| 191 | |
| 192 | if (dev->ops->get_l2p_tbl) { |
| 193 | ret = dev->ops->get_l2p_tbl(dev->q, 0, dev->total_pages, |
| 194 | gennvm_block_map, dev); |
| 195 | if (ret) { |
| 196 | pr_err("gennvm: could not read L2P table.\n"); |
| 197 | pr_warn("gennvm: default block initialization"); |
| 198 | } |
| 199 | } |
| 200 | |
| 201 | return 0; |
| 202 | } |
| 203 | |
| 204 | static int gennvm_register(struct nvm_dev *dev) |
| 205 | { |
| 206 | struct gen_nvm *gn; |
| 207 | int ret; |
| 208 | |
| 209 | gn = kzalloc(sizeof(struct gen_nvm), GFP_KERNEL); |
| 210 | if (!gn) |
| 211 | return -ENOMEM; |
| 212 | |
Matias Bjørling | 1145046 | 2015-11-16 15:34:37 +0100 | [diff] [blame] | 213 | gn->dev = dev; |
Matias Bjørling | 48add0f | 2015-10-28 19:54:56 +0100 | [diff] [blame] | 214 | gn->nr_luns = dev->nr_luns; |
| 215 | dev->mp = gn; |
| 216 | |
| 217 | ret = gennvm_luns_init(dev, gn); |
| 218 | if (ret) { |
| 219 | pr_err("gennvm: could not initialize luns\n"); |
| 220 | goto err; |
| 221 | } |
| 222 | |
| 223 | ret = gennvm_blocks_init(dev, gn); |
| 224 | if (ret) { |
| 225 | pr_err("gennvm: could not initialize blocks\n"); |
| 226 | goto err; |
| 227 | } |
| 228 | |
| 229 | return 1; |
| 230 | err: |
| 231 | kfree(gn); |
| 232 | return ret; |
| 233 | } |
| 234 | |
| 235 | static void gennvm_unregister(struct nvm_dev *dev) |
| 236 | { |
| 237 | gennvm_blocks_free(dev); |
| 238 | gennvm_luns_free(dev); |
| 239 | kfree(dev->mp); |
| 240 | dev->mp = NULL; |
| 241 | } |
| 242 | |
| 243 | static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev, |
| 244 | struct nvm_lun *vlun, unsigned long flags) |
| 245 | { |
| 246 | struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun); |
| 247 | struct nvm_block *blk = NULL; |
| 248 | int is_gc = flags & NVM_IOTYPE_GC; |
| 249 | |
| 250 | spin_lock(&vlun->lock); |
| 251 | |
| 252 | if (list_empty(&lun->free_list)) { |
| 253 | pr_err_ratelimited("gennvm: lun %u have no free pages available", |
| 254 | lun->vlun.id); |
| 255 | spin_unlock(&vlun->lock); |
| 256 | goto out; |
| 257 | } |
| 258 | |
| 259 | while (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks) { |
| 260 | spin_unlock(&vlun->lock); |
| 261 | goto out; |
| 262 | } |
| 263 | |
| 264 | blk = list_first_entry(&lun->free_list, struct nvm_block, list); |
| 265 | list_move_tail(&blk->list, &lun->used_list); |
| 266 | blk->type = 1; |
| 267 | |
| 268 | lun->vlun.nr_free_blocks--; |
| 269 | |
| 270 | spin_unlock(&vlun->lock); |
| 271 | out: |
| 272 | return blk; |
| 273 | } |
| 274 | |
| 275 | static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk) |
| 276 | { |
| 277 | struct nvm_lun *vlun = blk->lun; |
| 278 | struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun); |
| 279 | |
| 280 | spin_lock(&vlun->lock); |
| 281 | |
| 282 | switch (blk->type) { |
| 283 | case 1: |
| 284 | list_move_tail(&blk->list, &lun->free_list); |
| 285 | lun->vlun.nr_free_blocks++; |
| 286 | blk->type = 0; |
| 287 | break; |
| 288 | case 2: |
| 289 | list_move_tail(&blk->list, &lun->bb_list); |
| 290 | break; |
| 291 | default: |
| 292 | WARN_ON_ONCE(1); |
| 293 | pr_err("gennvm: erroneous block type (%lu -> %u)\n", |
| 294 | blk->id, blk->type); |
| 295 | list_move_tail(&blk->list, &lun->bb_list); |
| 296 | } |
| 297 | |
| 298 | spin_unlock(&vlun->lock); |
| 299 | } |
| 300 | |
| 301 | static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd) |
| 302 | { |
| 303 | int i; |
| 304 | |
| 305 | if (rqd->nr_pages > 1) { |
| 306 | for (i = 0; i < rqd->nr_pages; i++) |
Matias Bjørling | 7386af2 | 2015-11-16 15:34:44 +0100 | [diff] [blame^] | 307 | rqd->ppa_list[i] = dev_to_generic_addr(dev, |
Matias Bjørling | 48add0f | 2015-10-28 19:54:56 +0100 | [diff] [blame] | 308 | rqd->ppa_list[i]); |
| 309 | } else { |
Matias Bjørling | 7386af2 | 2015-11-16 15:34:44 +0100 | [diff] [blame^] | 310 | rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr); |
Matias Bjørling | 48add0f | 2015-10-28 19:54:56 +0100 | [diff] [blame] | 311 | } |
| 312 | } |
| 313 | |
| 314 | static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd) |
| 315 | { |
| 316 | int i; |
| 317 | |
| 318 | if (rqd->nr_pages > 1) { |
| 319 | for (i = 0; i < rqd->nr_pages; i++) |
Matias Bjørling | 7386af2 | 2015-11-16 15:34:44 +0100 | [diff] [blame^] | 320 | rqd->ppa_list[i] = generic_to_dev_addr(dev, |
Matias Bjørling | 48add0f | 2015-10-28 19:54:56 +0100 | [diff] [blame] | 321 | rqd->ppa_list[i]); |
| 322 | } else { |
Matias Bjørling | 7386af2 | 2015-11-16 15:34:44 +0100 | [diff] [blame^] | 323 | rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr); |
Matias Bjørling | 48add0f | 2015-10-28 19:54:56 +0100 | [diff] [blame] | 324 | } |
| 325 | } |
| 326 | |
| 327 | static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) |
| 328 | { |
| 329 | if (!dev->ops->submit_io) |
| 330 | return 0; |
| 331 | |
| 332 | /* Convert address space */ |
| 333 | gennvm_generic_to_addr_mode(dev, rqd); |
| 334 | |
| 335 | rqd->dev = dev; |
| 336 | return dev->ops->submit_io(dev->q, rqd); |
| 337 | } |
| 338 | |
| 339 | static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa, |
| 340 | int type) |
| 341 | { |
| 342 | struct gen_nvm *gn = dev->mp; |
| 343 | struct gen_lun *lun; |
| 344 | struct nvm_block *blk; |
| 345 | |
| 346 | if (unlikely(ppa->g.ch > dev->nr_chnls || |
| 347 | ppa->g.lun > dev->luns_per_chnl || |
| 348 | ppa->g.blk > dev->blks_per_lun)) { |
| 349 | WARN_ON_ONCE(1); |
| 350 | pr_err("gennvm: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u", |
| 351 | ppa->g.ch, dev->nr_chnls, |
| 352 | ppa->g.lun, dev->luns_per_chnl, |
| 353 | ppa->g.blk, dev->blks_per_lun); |
| 354 | return; |
| 355 | } |
| 356 | |
| 357 | lun = &gn->luns[ppa->g.lun * ppa->g.ch]; |
| 358 | blk = &lun->vlun.blocks[ppa->g.blk]; |
| 359 | |
| 360 | /* will be moved to bb list on put_blk from target */ |
| 361 | blk->type = type; |
| 362 | } |
| 363 | |
| 364 | /* mark block bad. It is expected the target recover from the error. */ |
| 365 | static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd) |
| 366 | { |
| 367 | int i; |
| 368 | |
Matias Bjørling | 1145046 | 2015-11-16 15:34:37 +0100 | [diff] [blame] | 369 | if (!dev->ops->set_bb_tbl) |
Matias Bjørling | 48add0f | 2015-10-28 19:54:56 +0100 | [diff] [blame] | 370 | return; |
| 371 | |
Matias Bjørling | 1145046 | 2015-11-16 15:34:37 +0100 | [diff] [blame] | 372 | if (dev->ops->set_bb_tbl(dev->q, rqd, 1)) |
Matias Bjørling | 48add0f | 2015-10-28 19:54:56 +0100 | [diff] [blame] | 373 | return; |
| 374 | |
| 375 | gennvm_addr_to_generic_mode(dev, rqd); |
| 376 | |
| 377 | /* look up blocks and mark them as bad */ |
| 378 | if (rqd->nr_pages > 1) |
| 379 | for (i = 0; i < rqd->nr_pages; i++) |
| 380 | gennvm_blk_set_type(dev, &rqd->ppa_list[i], 2); |
| 381 | else |
| 382 | gennvm_blk_set_type(dev, &rqd->ppa_addr, 2); |
| 383 | } |
| 384 | |
| 385 | static int gennvm_end_io(struct nvm_rq *rqd, int error) |
| 386 | { |
| 387 | struct nvm_tgt_instance *ins = rqd->ins; |
| 388 | int ret = 0; |
| 389 | |
| 390 | switch (error) { |
| 391 | case NVM_RSP_SUCCESS: |
| 392 | break; |
| 393 | case NVM_RSP_ERR_EMPTYPAGE: |
| 394 | break; |
| 395 | case NVM_RSP_ERR_FAILWRITE: |
| 396 | gennvm_mark_blk_bad(rqd->dev, rqd); |
| 397 | default: |
| 398 | ret++; |
| 399 | } |
| 400 | |
| 401 | ret += ins->tt->end_io(rqd, error); |
| 402 | |
| 403 | return ret; |
| 404 | } |
| 405 | |
| 406 | static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk, |
| 407 | unsigned long flags) |
| 408 | { |
| 409 | int plane_cnt = 0, pl_idx, ret; |
| 410 | struct ppa_addr addr; |
| 411 | struct nvm_rq rqd; |
| 412 | |
| 413 | if (!dev->ops->erase_block) |
| 414 | return 0; |
| 415 | |
| 416 | addr = block_to_ppa(dev, blk); |
| 417 | |
| 418 | if (dev->plane_mode == NVM_PLANE_SINGLE) { |
| 419 | rqd.nr_pages = 1; |
| 420 | rqd.ppa_addr = addr; |
| 421 | } else { |
| 422 | plane_cnt = (1 << dev->plane_mode); |
| 423 | rqd.nr_pages = plane_cnt; |
| 424 | |
| 425 | rqd.ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, |
| 426 | &rqd.dma_ppa_list); |
| 427 | if (!rqd.ppa_list) { |
| 428 | pr_err("gennvm: failed to allocate dma memory\n"); |
| 429 | return -ENOMEM; |
| 430 | } |
| 431 | |
| 432 | for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) { |
| 433 | addr.g.pl = pl_idx; |
| 434 | rqd.ppa_list[pl_idx] = addr; |
| 435 | } |
| 436 | } |
| 437 | |
| 438 | gennvm_generic_to_addr_mode(dev, &rqd); |
| 439 | |
| 440 | ret = dev->ops->erase_block(dev->q, &rqd); |
| 441 | |
| 442 | if (plane_cnt) |
| 443 | nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list); |
| 444 | |
| 445 | return ret; |
| 446 | } |
| 447 | |
| 448 | static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid) |
| 449 | { |
| 450 | struct gen_nvm *gn = dev->mp; |
| 451 | |
| 452 | return &gn->luns[lunid].vlun; |
| 453 | } |
| 454 | |
| 455 | static void gennvm_free_blocks_print(struct nvm_dev *dev) |
| 456 | { |
| 457 | struct gen_nvm *gn = dev->mp; |
| 458 | struct gen_lun *lun; |
| 459 | unsigned int i; |
| 460 | |
| 461 | gennvm_for_each_lun(gn, lun, i) |
| 462 | pr_info("%s: lun%8u\t%u\n", |
| 463 | dev->name, i, lun->vlun.nr_free_blocks); |
| 464 | } |
| 465 | |
| 466 | static struct nvmm_type gennvm = { |
| 467 | .name = "gennvm", |
| 468 | .version = {0, 1, 0}, |
| 469 | |
| 470 | .register_mgr = gennvm_register, |
| 471 | .unregister_mgr = gennvm_unregister, |
| 472 | |
| 473 | .get_blk = gennvm_get_blk, |
| 474 | .put_blk = gennvm_put_blk, |
| 475 | |
| 476 | .submit_io = gennvm_submit_io, |
| 477 | .end_io = gennvm_end_io, |
| 478 | .erase_blk = gennvm_erase_blk, |
| 479 | |
| 480 | .get_lun = gennvm_get_lun, |
| 481 | .free_blocks_print = gennvm_free_blocks_print, |
| 482 | }; |
| 483 | |
| 484 | static int __init gennvm_module_init(void) |
| 485 | { |
| 486 | return nvm_register_mgr(&gennvm); |
| 487 | } |
| 488 | |
| 489 | static void gennvm_module_exit(void) |
| 490 | { |
| 491 | nvm_unregister_mgr(&gennvm); |
| 492 | } |
| 493 | |
| 494 | module_init(gennvm_module_init); |
| 495 | module_exit(gennvm_module_exit); |
| 496 | MODULE_LICENSE("GPL v2"); |
| 497 | MODULE_DESCRIPTION("Generic media manager for Open-Channel SSDs"); |