blob: 3969a9875e59f4b6893d9d1d88492be5dbc02180 [file] [log] [blame]
Matias Bjørling48add0f2015-10-28 19:54:56 +01001/*
2 * Copyright (C) 2015 Matias Bjorling <m@bjorling.me>
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version
6 * 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; see the file COPYING. If not, write to
15 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
16 * USA.
17 *
18 * Implementation of a generic nvm manager for Open-Channel SSDs.
19 */
20
21#include "gennvm.h"
22
23static void gennvm_blocks_free(struct nvm_dev *dev)
24{
25 struct gen_nvm *gn = dev->mp;
26 struct gen_lun *lun;
27 int i;
28
29 gennvm_for_each_lun(gn, lun, i) {
30 if (!lun->vlun.blocks)
31 break;
32 vfree(lun->vlun.blocks);
33 }
34}
35
36static void gennvm_luns_free(struct nvm_dev *dev)
37{
38 struct gen_nvm *gn = dev->mp;
39
40 kfree(gn->luns);
41}
42
43static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
44{
45 struct gen_lun *lun;
46 int i;
47
48 gn->luns = kcalloc(dev->nr_luns, sizeof(struct gen_lun), GFP_KERNEL);
49 if (!gn->luns)
50 return -ENOMEM;
51
52 gennvm_for_each_lun(gn, lun, i) {
53 spin_lock_init(&lun->vlun.lock);
54 INIT_LIST_HEAD(&lun->free_list);
55 INIT_LIST_HEAD(&lun->used_list);
56 INIT_LIST_HEAD(&lun->bb_list);
57
58 lun->reserved_blocks = 2; /* for GC only */
59 lun->vlun.id = i;
60 lun->vlun.lun_id = i % dev->luns_per_chnl;
61 lun->vlun.chnl_id = i / dev->luns_per_chnl;
62 lun->vlun.nr_free_blocks = dev->blks_per_lun;
Javier Gonzalez0b597332015-11-20 13:47:56 +010063 lun->vlun.nr_inuse_blocks = 0;
64 lun->vlun.nr_bad_blocks = 0;
Matias Bjørling48add0f2015-10-28 19:54:56 +010065 }
66 return 0;
67}
68
Matias Bjørling11450462015-11-16 15:34:37 +010069static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
Matias Bjørling48add0f2015-10-28 19:54:56 +010070 void *private)
71{
72 struct gen_nvm *gn = private;
Matias Bjørling11450462015-11-16 15:34:37 +010073 struct nvm_dev *dev = gn->dev;
74 struct gen_lun *lun;
Matias Bjørling48add0f2015-10-28 19:54:56 +010075 struct nvm_block *blk;
76 int i;
77
Matias Bjørling7386af22015-11-16 15:34:44 +010078 ppa = dev_to_generic_addr(gn->dev, ppa);
Matias Bjørling11450462015-11-16 15:34:37 +010079 lun = &gn->luns[(dev->nr_luns * ppa.g.ch) + ppa.g.lun];
Matias Bjørling48add0f2015-10-28 19:54:56 +010080
Matias Bjørling11450462015-11-16 15:34:37 +010081 for (i = 0; i < nr_blocks; i++) {
82 if (blks[i] == 0)
83 continue;
84
Matias Bjørling48add0f2015-10-28 19:54:56 +010085 blk = &lun->vlun.blocks[i];
86 if (!blk) {
87 pr_err("gennvm: BB data is out of bounds.\n");
88 return -EINVAL;
89 }
90
91 list_move_tail(&blk->list, &lun->bb_list);
Javier Gonzalez0b597332015-11-20 13:47:56 +010092 lun->vlun.nr_bad_blocks++;
Matias Bjørling48add0f2015-10-28 19:54:56 +010093 }
94
95 return 0;
96}
97
98static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
99{
100 struct nvm_dev *dev = private;
101 struct gen_nvm *gn = dev->mp;
102 sector_t max_pages = dev->total_pages * (dev->sec_size >> 9);
103 u64 elba = slba + nlb;
104 struct gen_lun *lun;
105 struct nvm_block *blk;
106 u64 i;
107 int lun_id;
108
109 if (unlikely(elba > dev->total_pages)) {
110 pr_err("gennvm: L2P data from device is out of bounds!\n");
111 return -EINVAL;
112 }
113
114 for (i = 0; i < nlb; i++) {
115 u64 pba = le64_to_cpu(entries[i]);
116
117 if (unlikely(pba >= max_pages && pba != U64_MAX)) {
118 pr_err("gennvm: L2P data entry is out of bounds!\n");
119 return -EINVAL;
120 }
121
122 /* Address zero is a special one. The first page on a disk is
123 * protected. It often holds internal device boot
124 * information.
125 */
126 if (!pba)
127 continue;
128
129 /* resolve block from physical address */
130 lun_id = div_u64(pba, dev->sec_per_lun);
131 lun = &gn->luns[lun_id];
132
133 /* Calculate block offset into lun */
134 pba = pba - (dev->sec_per_lun * lun_id);
135 blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
136
137 if (!blk->type) {
138 /* at this point, we don't know anything about the
139 * block. It's up to the FTL on top to re-etablish the
140 * block state
141 */
142 list_move_tail(&blk->list, &lun->used_list);
143 blk->type = 1;
144 lun->vlun.nr_free_blocks--;
Javier Gonzalez0b597332015-11-20 13:47:56 +0100145 lun->vlun.nr_inuse_blocks++;
Matias Bjørling48add0f2015-10-28 19:54:56 +0100146 }
147 }
148
149 return 0;
150}
151
152static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
153{
154 struct gen_lun *lun;
155 struct nvm_block *block;
156 sector_t lun_iter, blk_iter, cur_block_id = 0;
157 int ret;
158
159 gennvm_for_each_lun(gn, lun, lun_iter) {
160 lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) *
161 dev->blks_per_lun);
162 if (!lun->vlun.blocks)
163 return -ENOMEM;
164
165 for (blk_iter = 0; blk_iter < dev->blks_per_lun; blk_iter++) {
166 block = &lun->vlun.blocks[blk_iter];
167
168 INIT_LIST_HEAD(&block->list);
169
170 block->lun = &lun->vlun;
171 block->id = cur_block_id++;
172
173 /* First block is reserved for device */
Javier Gonzalez0b597332015-11-20 13:47:56 +0100174 if (unlikely(lun_iter == 0 && blk_iter == 0)) {
175 lun->vlun.nr_free_blocks--;
Matias Bjørling48add0f2015-10-28 19:54:56 +0100176 continue;
Javier Gonzalez0b597332015-11-20 13:47:56 +0100177 }
Matias Bjørling48add0f2015-10-28 19:54:56 +0100178
179 list_add_tail(&block->list, &lun->free_list);
180 }
181
182 if (dev->ops->get_bb_tbl) {
Matias Bjørling11450462015-11-16 15:34:37 +0100183 struct ppa_addr ppa;
184
185 ppa.ppa = 0;
186 ppa.g.ch = lun->vlun.chnl_id;
187 ppa.g.lun = lun->vlun.id;
Matias Bjørling7386af22015-11-16 15:34:44 +0100188 ppa = generic_to_dev_addr(dev, ppa);
Matias Bjørling11450462015-11-16 15:34:37 +0100189
190 ret = dev->ops->get_bb_tbl(dev->q, ppa,
191 dev->blks_per_lun,
192 gennvm_block_bb, gn);
Matias Bjørling48add0f2015-10-28 19:54:56 +0100193 if (ret)
194 pr_err("gennvm: could not read BB table\n");
195 }
196 }
197
198 if (dev->ops->get_l2p_tbl) {
199 ret = dev->ops->get_l2p_tbl(dev->q, 0, dev->total_pages,
200 gennvm_block_map, dev);
201 if (ret) {
202 pr_err("gennvm: could not read L2P table.\n");
203 pr_warn("gennvm: default block initialization");
204 }
205 }
206
207 return 0;
208}
209
Wenwei Tao8261bd482015-11-28 16:49:23 +0100210static void gennvm_free(struct nvm_dev *dev)
211{
212 gennvm_blocks_free(dev);
213 gennvm_luns_free(dev);
214 kfree(dev->mp);
215 dev->mp = NULL;
216}
217
Matias Bjørling48add0f2015-10-28 19:54:56 +0100218static int gennvm_register(struct nvm_dev *dev)
219{
220 struct gen_nvm *gn;
221 int ret;
222
223 gn = kzalloc(sizeof(struct gen_nvm), GFP_KERNEL);
224 if (!gn)
225 return -ENOMEM;
226
Matias Bjørling11450462015-11-16 15:34:37 +0100227 gn->dev = dev;
Matias Bjørling48add0f2015-10-28 19:54:56 +0100228 gn->nr_luns = dev->nr_luns;
229 dev->mp = gn;
230
231 ret = gennvm_luns_init(dev, gn);
232 if (ret) {
233 pr_err("gennvm: could not initialize luns\n");
234 goto err;
235 }
236
237 ret = gennvm_blocks_init(dev, gn);
238 if (ret) {
239 pr_err("gennvm: could not initialize blocks\n");
240 goto err;
241 }
242
243 return 1;
244err:
Wenwei Tao8261bd482015-11-28 16:49:23 +0100245 gennvm_free(dev);
Matias Bjørling48add0f2015-10-28 19:54:56 +0100246 return ret;
247}
248
249static void gennvm_unregister(struct nvm_dev *dev)
250{
Wenwei Tao8261bd482015-11-28 16:49:23 +0100251 gennvm_free(dev);
Matias Bjørling48add0f2015-10-28 19:54:56 +0100252}
253
254static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
255 struct nvm_lun *vlun, unsigned long flags)
256{
257 struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
258 struct nvm_block *blk = NULL;
259 int is_gc = flags & NVM_IOTYPE_GC;
260
261 spin_lock(&vlun->lock);
262
263 if (list_empty(&lun->free_list)) {
264 pr_err_ratelimited("gennvm: lun %u have no free pages available",
265 lun->vlun.id);
266 spin_unlock(&vlun->lock);
267 goto out;
268 }
269
270 while (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks) {
271 spin_unlock(&vlun->lock);
272 goto out;
273 }
274
275 blk = list_first_entry(&lun->free_list, struct nvm_block, list);
276 list_move_tail(&blk->list, &lun->used_list);
277 blk->type = 1;
278
279 lun->vlun.nr_free_blocks--;
Javier Gonzalez0b597332015-11-20 13:47:56 +0100280 lun->vlun.nr_inuse_blocks++;
Matias Bjørling48add0f2015-10-28 19:54:56 +0100281
282 spin_unlock(&vlun->lock);
283out:
284 return blk;
285}
286
287static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
288{
289 struct nvm_lun *vlun = blk->lun;
290 struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
291
292 spin_lock(&vlun->lock);
293
294 switch (blk->type) {
295 case 1:
296 list_move_tail(&blk->list, &lun->free_list);
297 lun->vlun.nr_free_blocks++;
Javier Gonzalez0b597332015-11-20 13:47:56 +0100298 lun->vlun.nr_inuse_blocks--;
Matias Bjørling48add0f2015-10-28 19:54:56 +0100299 blk->type = 0;
300 break;
301 case 2:
302 list_move_tail(&blk->list, &lun->bb_list);
Javier Gonzalez0b597332015-11-20 13:47:56 +0100303 lun->vlun.nr_bad_blocks++;
304 lun->vlun.nr_inuse_blocks--;
Matias Bjørling48add0f2015-10-28 19:54:56 +0100305 break;
306 default:
307 WARN_ON_ONCE(1);
308 pr_err("gennvm: erroneous block type (%lu -> %u)\n",
309 blk->id, blk->type);
310 list_move_tail(&blk->list, &lun->bb_list);
Javier Gonzalez0b597332015-11-20 13:47:56 +0100311 lun->vlun.nr_bad_blocks++;
312 lun->vlun.nr_inuse_blocks--;
Matias Bjørling48add0f2015-10-28 19:54:56 +0100313 }
314
315 spin_unlock(&vlun->lock);
316}
317
318static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
319{
320 int i;
321
322 if (rqd->nr_pages > 1) {
323 for (i = 0; i < rqd->nr_pages; i++)
Matias Bjørling7386af22015-11-16 15:34:44 +0100324 rqd->ppa_list[i] = dev_to_generic_addr(dev,
Matias Bjørling48add0f2015-10-28 19:54:56 +0100325 rqd->ppa_list[i]);
326 } else {
Matias Bjørling7386af22015-11-16 15:34:44 +0100327 rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
Matias Bjørling48add0f2015-10-28 19:54:56 +0100328 }
329}
330
331static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
332{
333 int i;
334
335 if (rqd->nr_pages > 1) {
336 for (i = 0; i < rqd->nr_pages; i++)
Matias Bjørling7386af22015-11-16 15:34:44 +0100337 rqd->ppa_list[i] = generic_to_dev_addr(dev,
Matias Bjørling48add0f2015-10-28 19:54:56 +0100338 rqd->ppa_list[i]);
339 } else {
Matias Bjørling7386af22015-11-16 15:34:44 +0100340 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
Matias Bjørling48add0f2015-10-28 19:54:56 +0100341 }
342}
343
344static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
345{
346 if (!dev->ops->submit_io)
347 return 0;
348
349 /* Convert address space */
350 gennvm_generic_to_addr_mode(dev, rqd);
351
352 rqd->dev = dev;
353 return dev->ops->submit_io(dev->q, rqd);
354}
355
356static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
357 int type)
358{
359 struct gen_nvm *gn = dev->mp;
360 struct gen_lun *lun;
361 struct nvm_block *blk;
362
363 if (unlikely(ppa->g.ch > dev->nr_chnls ||
364 ppa->g.lun > dev->luns_per_chnl ||
365 ppa->g.blk > dev->blks_per_lun)) {
366 WARN_ON_ONCE(1);
367 pr_err("gennvm: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u",
368 ppa->g.ch, dev->nr_chnls,
369 ppa->g.lun, dev->luns_per_chnl,
370 ppa->g.blk, dev->blks_per_lun);
371 return;
372 }
373
374 lun = &gn->luns[ppa->g.lun * ppa->g.ch];
375 blk = &lun->vlun.blocks[ppa->g.blk];
376
377 /* will be moved to bb list on put_blk from target */
378 blk->type = type;
379}
380
381/* mark block bad. It is expected the target recover from the error. */
382static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
383{
384 int i;
385
Matias Bjørling11450462015-11-16 15:34:37 +0100386 if (!dev->ops->set_bb_tbl)
Matias Bjørling48add0f2015-10-28 19:54:56 +0100387 return;
388
Matias Bjørling11450462015-11-16 15:34:37 +0100389 if (dev->ops->set_bb_tbl(dev->q, rqd, 1))
Matias Bjørling48add0f2015-10-28 19:54:56 +0100390 return;
391
392 gennvm_addr_to_generic_mode(dev, rqd);
393
394 /* look up blocks and mark them as bad */
395 if (rqd->nr_pages > 1)
396 for (i = 0; i < rqd->nr_pages; i++)
397 gennvm_blk_set_type(dev, &rqd->ppa_list[i], 2);
398 else
399 gennvm_blk_set_type(dev, &rqd->ppa_addr, 2);
400}
401
402static int gennvm_end_io(struct nvm_rq *rqd, int error)
403{
404 struct nvm_tgt_instance *ins = rqd->ins;
405 int ret = 0;
406
407 switch (error) {
408 case NVM_RSP_SUCCESS:
409 break;
410 case NVM_RSP_ERR_EMPTYPAGE:
411 break;
412 case NVM_RSP_ERR_FAILWRITE:
413 gennvm_mark_blk_bad(rqd->dev, rqd);
414 default:
415 ret++;
416 }
417
418 ret += ins->tt->end_io(rqd, error);
419
420 return ret;
421}
422
423static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
424 unsigned long flags)
425{
426 int plane_cnt = 0, pl_idx, ret;
427 struct ppa_addr addr;
428 struct nvm_rq rqd;
429
430 if (!dev->ops->erase_block)
431 return 0;
432
433 addr = block_to_ppa(dev, blk);
434
435 if (dev->plane_mode == NVM_PLANE_SINGLE) {
436 rqd.nr_pages = 1;
437 rqd.ppa_addr = addr;
438 } else {
439 plane_cnt = (1 << dev->plane_mode);
440 rqd.nr_pages = plane_cnt;
441
442 rqd.ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL,
443 &rqd.dma_ppa_list);
444 if (!rqd.ppa_list) {
445 pr_err("gennvm: failed to allocate dma memory\n");
446 return -ENOMEM;
447 }
448
449 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
450 addr.g.pl = pl_idx;
451 rqd.ppa_list[pl_idx] = addr;
452 }
453 }
454
455 gennvm_generic_to_addr_mode(dev, &rqd);
456
457 ret = dev->ops->erase_block(dev->q, &rqd);
458
459 if (plane_cnt)
460 nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list);
461
462 return ret;
463}
464
465static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
466{
467 struct gen_nvm *gn = dev->mp;
468
469 return &gn->luns[lunid].vlun;
470}
471
Javier Gonzalez2fde0e42015-11-20 13:47:57 +0100472static void gennvm_lun_info_print(struct nvm_dev *dev)
Matias Bjørling48add0f2015-10-28 19:54:56 +0100473{
474 struct gen_nvm *gn = dev->mp;
475 struct gen_lun *lun;
476 unsigned int i;
477
Javier Gonzalez2fde0e42015-11-20 13:47:57 +0100478
479 gennvm_for_each_lun(gn, lun, i) {
480 spin_lock(&lun->vlun.lock);
481
482 pr_info("%s: lun%8u\t%u\t%u\t%u\n",
483 dev->name, i,
484 lun->vlun.nr_free_blocks,
485 lun->vlun.nr_inuse_blocks,
486 lun->vlun.nr_bad_blocks);
487
488 spin_unlock(&lun->vlun.lock);
489 }
Matias Bjørling48add0f2015-10-28 19:54:56 +0100490}
491
492static struct nvmm_type gennvm = {
493 .name = "gennvm",
494 .version = {0, 1, 0},
495
496 .register_mgr = gennvm_register,
497 .unregister_mgr = gennvm_unregister,
498
499 .get_blk = gennvm_get_blk,
500 .put_blk = gennvm_put_blk,
501
502 .submit_io = gennvm_submit_io,
503 .end_io = gennvm_end_io,
504 .erase_blk = gennvm_erase_blk,
505
506 .get_lun = gennvm_get_lun,
Javier Gonzalez2fde0e42015-11-20 13:47:57 +0100507 .lun_info_print = gennvm_lun_info_print,
Matias Bjørling48add0f2015-10-28 19:54:56 +0100508};
509
510static int __init gennvm_module_init(void)
511{
512 return nvm_register_mgr(&gennvm);
513}
514
515static void gennvm_module_exit(void)
516{
517 nvm_unregister_mgr(&gennvm);
518}
519
520module_init(gennvm_module_init);
521module_exit(gennvm_module_exit);
522MODULE_LICENSE("GPL v2");
523MODULE_DESCRIPTION("Generic media manager for Open-Channel SSDs");