blob: dc83e010d08472d5534cd951fe0e2567e09360a2 [file] [log] [blame]
Matias Bjørlingcd9e9802015-10-28 19:54:55 +01001/*
2 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
3 * Initial release: Matias Bjorling <m@bjorling.me>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
17 * USA.
18 *
19 */
20
21#include <linux/blkdev.h>
22#include <linux/blk-mq.h>
23#include <linux/list.h>
24#include <linux/types.h>
25#include <linux/sem.h>
26#include <linux/bitmap.h>
27#include <linux/module.h>
28#include <linux/miscdevice.h>
29#include <linux/lightnvm.h>
Matias Bjørling912761622016-01-12 07:49:21 +010030#include <linux/sched/sysctl.h>
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010031#include <uapi/linux/lightnvm.h>
32
33static LIST_HEAD(nvm_targets);
34static LIST_HEAD(nvm_mgrs);
35static LIST_HEAD(nvm_devices);
36static DECLARE_RWSEM(nvm_lock);
37
38static struct nvm_tgt_type *nvm_find_target_type(const char *name)
39{
40 struct nvm_tgt_type *tt;
41
42 list_for_each_entry(tt, &nvm_targets, list)
43 if (!strcmp(name, tt->name))
44 return tt;
45
46 return NULL;
47}
48
49int nvm_register_target(struct nvm_tgt_type *tt)
50{
51 int ret = 0;
52
53 down_write(&nvm_lock);
54 if (nvm_find_target_type(tt->name))
55 ret = -EEXIST;
56 else
57 list_add(&tt->list, &nvm_targets);
58 up_write(&nvm_lock);
59
60 return ret;
61}
62EXPORT_SYMBOL(nvm_register_target);
63
64void nvm_unregister_target(struct nvm_tgt_type *tt)
65{
66 if (!tt)
67 return;
68
69 down_write(&nvm_lock);
70 list_del(&tt->list);
71 up_write(&nvm_lock);
72}
73EXPORT_SYMBOL(nvm_unregister_target);
74
75void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
76 dma_addr_t *dma_handler)
77{
Matias Bjørling16f26c32015-12-06 11:25:48 +010078 return dev->ops->dev_dma_alloc(dev, dev->ppalist_pool, mem_flags,
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010079 dma_handler);
80}
81EXPORT_SYMBOL(nvm_dev_dma_alloc);
82
83void nvm_dev_dma_free(struct nvm_dev *dev, void *ppa_list,
84 dma_addr_t dma_handler)
85{
86 dev->ops->dev_dma_free(dev->ppalist_pool, ppa_list, dma_handler);
87}
88EXPORT_SYMBOL(nvm_dev_dma_free);
89
90static struct nvmm_type *nvm_find_mgr_type(const char *name)
91{
92 struct nvmm_type *mt;
93
94 list_for_each_entry(mt, &nvm_mgrs, list)
95 if (!strcmp(name, mt->name))
96 return mt;
97
98 return NULL;
99}
100
Matias Bjørling762796b2015-12-06 11:25:49 +0100101struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
102{
103 struct nvmm_type *mt;
104 int ret;
105
106 lockdep_assert_held(&nvm_lock);
107
108 list_for_each_entry(mt, &nvm_mgrs, list) {
109 ret = mt->register_mgr(dev);
110 if (ret < 0) {
111 pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
112 ret, dev->name);
113 return NULL; /* initialization failed */
114 } else if (ret > 0)
115 return mt;
116 }
117
118 return NULL;
119}
120
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100121int nvm_register_mgr(struct nvmm_type *mt)
122{
Matias Bjørling762796b2015-12-06 11:25:49 +0100123 struct nvm_dev *dev;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100124 int ret = 0;
125
126 down_write(&nvm_lock);
Matias Bjørling762796b2015-12-06 11:25:49 +0100127 if (nvm_find_mgr_type(mt->name)) {
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100128 ret = -EEXIST;
Matias Bjørling762796b2015-12-06 11:25:49 +0100129 goto finish;
130 } else {
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100131 list_add(&mt->list, &nvm_mgrs);
Matias Bjørling762796b2015-12-06 11:25:49 +0100132 }
133
134 /* try to register media mgr if any device have none configured */
135 list_for_each_entry(dev, &nvm_devices, devices) {
136 if (dev->mt)
137 continue;
138
139 dev->mt = nvm_init_mgr(dev);
140 }
141finish:
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100142 up_write(&nvm_lock);
143
144 return ret;
145}
146EXPORT_SYMBOL(nvm_register_mgr);
147
148void nvm_unregister_mgr(struct nvmm_type *mt)
149{
150 if (!mt)
151 return;
152
153 down_write(&nvm_lock);
154 list_del(&mt->list);
155 up_write(&nvm_lock);
156}
157EXPORT_SYMBOL(nvm_unregister_mgr);
158
159static struct nvm_dev *nvm_find_nvm_dev(const char *name)
160{
161 struct nvm_dev *dev;
162
163 list_for_each_entry(dev, &nvm_devices, devices)
164 if (!strcmp(name, dev->name))
165 return dev;
166
167 return NULL;
168}
169
170struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun,
171 unsigned long flags)
172{
173 return dev->mt->get_blk(dev, lun, flags);
174}
175EXPORT_SYMBOL(nvm_get_blk);
176
177/* Assumes that all valid pages have already been moved on release to bm */
178void nvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
179{
180 return dev->mt->put_blk(dev, blk);
181}
182EXPORT_SYMBOL(nvm_put_blk);
183
184int nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
185{
186 return dev->mt->submit_io(dev, rqd);
187}
188EXPORT_SYMBOL(nvm_submit_io);
189
190int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
191{
192 return dev->mt->erase_blk(dev, blk, 0);
193}
194EXPORT_SYMBOL(nvm_erase_blk);
195
Matias Bjørling069368e2016-01-12 07:49:19 +0100196void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
197{
198 int i;
199
200 if (rqd->nr_pages > 1) {
201 for (i = 0; i < rqd->nr_pages; i++)
202 rqd->ppa_list[i] = dev_to_generic_addr(dev,
203 rqd->ppa_list[i]);
204 } else {
205 rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
206 }
207}
208EXPORT_SYMBOL(nvm_addr_to_generic_mode);
209
210void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
211{
212 int i;
213
214 if (rqd->nr_pages > 1) {
215 for (i = 0; i < rqd->nr_pages; i++)
216 rqd->ppa_list[i] = generic_to_dev_addr(dev,
217 rqd->ppa_list[i]);
218 } else {
219 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
220 }
221}
222EXPORT_SYMBOL(nvm_generic_to_addr_mode);
223
Matias Bjørlingabd805e2016-01-12 07:49:20 +0100224int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
225 struct ppa_addr *ppas, int nr_ppas)
226{
227 int i, plane_cnt, pl_idx;
228
229 if (dev->plane_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
230 rqd->nr_pages = 1;
231 rqd->ppa_addr = ppas[0];
232
233 return 0;
234 }
235
236 plane_cnt = (1 << dev->plane_mode);
237 rqd->nr_pages = plane_cnt * nr_ppas;
238
239 if (dev->ops->max_phys_sect < rqd->nr_pages)
240 return -EINVAL;
241
242 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
243 if (!rqd->ppa_list) {
244 pr_err("nvm: failed to allocate dma memory\n");
245 return -ENOMEM;
246 }
247
Matias Bjørling556755e2016-01-12 07:49:26 +0100248 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
249 for (i = 0; i < nr_ppas; i++) {
Matias Bjørlingabd805e2016-01-12 07:49:20 +0100250 ppas[i].g.pl = pl_idx;
Matias Bjørling556755e2016-01-12 07:49:26 +0100251 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i];
Matias Bjørlingabd805e2016-01-12 07:49:20 +0100252 }
253 }
254
255 return 0;
256}
257EXPORT_SYMBOL(nvm_set_rqd_ppalist);
258
259void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
260{
261 if (!rqd->ppa_list)
262 return;
263
264 nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list);
265}
266EXPORT_SYMBOL(nvm_free_rqd_ppalist);
267
Matias Bjørling81e681d2016-01-12 07:49:28 +0100268int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas)
Matias Bjørling069368e2016-01-12 07:49:19 +0100269{
Matias Bjørling069368e2016-01-12 07:49:19 +0100270 struct nvm_rq rqd;
Matias Bjørlingabd805e2016-01-12 07:49:20 +0100271 int ret;
Matias Bjørling069368e2016-01-12 07:49:19 +0100272
273 if (!dev->ops->erase_block)
274 return 0;
275
Matias Bjørlingabd805e2016-01-12 07:49:20 +0100276 memset(&rqd, 0, sizeof(struct nvm_rq));
Matias Bjørling069368e2016-01-12 07:49:19 +0100277
Matias Bjørling81e681d2016-01-12 07:49:28 +0100278 ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas);
Matias Bjørlingabd805e2016-01-12 07:49:20 +0100279 if (ret)
280 return ret;
Matias Bjørling069368e2016-01-12 07:49:19 +0100281
282 nvm_generic_to_addr_mode(dev, &rqd);
283
284 ret = dev->ops->erase_block(dev, &rqd);
285
Matias Bjørlingabd805e2016-01-12 07:49:20 +0100286 nvm_free_rqd_ppalist(dev, &rqd);
Matias Bjørling069368e2016-01-12 07:49:19 +0100287
288 return ret;
289}
290EXPORT_SYMBOL(nvm_erase_ppa);
291
Matias Bjørling912761622016-01-12 07:49:21 +0100292void nvm_end_io(struct nvm_rq *rqd, int error)
293{
Matias Bjørling72d256e2016-01-12 07:49:29 +0100294 rqd->error = error;
295 rqd->end_io(rqd);
Matias Bjørling912761622016-01-12 07:49:21 +0100296}
297EXPORT_SYMBOL(nvm_end_io);
298
Matias Bjørling09719b62016-01-12 07:49:30 +0100299static void nvm_end_io_sync(struct nvm_rq *rqd)
Matias Bjørling912761622016-01-12 07:49:21 +0100300{
301 struct completion *waiting = rqd->wait;
302
303 rqd->wait = NULL;
304
305 complete(waiting);
306}
307
Matias Bjørling09719b62016-01-12 07:49:30 +0100308int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
309 int opcode, int flags, void *buf, int len)
310{
311 DECLARE_COMPLETION_ONSTACK(wait);
312 struct nvm_rq rqd;
313 struct bio *bio;
314 int ret;
315 unsigned long hang_check;
316
317 bio = bio_map_kern(dev->q, buf, len, GFP_KERNEL);
318 if (IS_ERR_OR_NULL(bio))
319 return -ENOMEM;
320
321 memset(&rqd, 0, sizeof(struct nvm_rq));
322 ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas);
323 if (ret) {
324 bio_put(bio);
325 return ret;
326 }
327
328 rqd.opcode = opcode;
329 rqd.bio = bio;
330 rqd.wait = &wait;
331 rqd.dev = dev;
332 rqd.end_io = nvm_end_io_sync;
333 rqd.flags = flags;
334 nvm_generic_to_addr_mode(dev, &rqd);
335
336 ret = dev->ops->submit_io(dev, &rqd);
337
338 /* Prevent hang_check timer from firing at us during very long I/O */
339 hang_check = sysctl_hung_task_timeout_secs;
340 if (hang_check)
341 while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
342 else
343 wait_for_completion_io(&wait);
344
345 nvm_free_rqd_ppalist(dev, &rqd);
346
347 return rqd.error;
348}
349EXPORT_SYMBOL(nvm_submit_ppa);
350
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100351static int nvm_core_init(struct nvm_dev *dev)
352{
353 struct nvm_id *id = &dev->identity;
354 struct nvm_id_group *grp = &id->groups[0];
355
356 /* device values */
357 dev->nr_chnls = grp->num_ch;
358 dev->luns_per_chnl = grp->num_lun;
359 dev->pgs_per_blk = grp->num_pg;
360 dev->blks_per_lun = grp->num_blk;
361 dev->nr_planes = grp->num_pln;
362 dev->sec_size = grp->csecs;
363 dev->oob_size = grp->sos;
364 dev->sec_per_pg = grp->fpg_sz / grp->csecs;
Matias Bjørling7386af22015-11-16 15:34:44 +0100365 memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100366
367 dev->plane_mode = NVM_PLANE_SINGLE;
368 dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
369
Matias Bjørling4264c982015-11-16 15:34:41 +0100370 if (grp->mtype != 0) {
371 pr_err("nvm: memory type not supported\n");
372 return -EINVAL;
373 }
374
375 if (grp->fmtype != 0 && grp->fmtype != 1) {
376 pr_err("nvm: flash type not supported\n");
377 return -EINVAL;
378 }
379
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100380 if (grp->mpos & 0x020202)
381 dev->plane_mode = NVM_PLANE_DOUBLE;
382 if (grp->mpos & 0x040404)
383 dev->plane_mode = NVM_PLANE_QUAD;
384
385 /* calculated values */
386 dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
387 dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
388 dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun;
389 dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls;
390
391 dev->total_blocks = dev->nr_planes *
392 dev->blks_per_lun *
393 dev->luns_per_chnl *
394 dev->nr_chnls;
395 dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
396 INIT_LIST_HEAD(&dev->online_targets);
397
398 return 0;
399}
400
401static void nvm_free(struct nvm_dev *dev)
402{
403 if (!dev)
404 return;
405
406 if (dev->mt)
407 dev->mt->unregister_mgr(dev);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100408}
409
410static int nvm_init(struct nvm_dev *dev)
411{
Wenwei Tao480fc0d2015-11-20 13:47:53 +0100412 int ret = -EINVAL;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100413
414 if (!dev->q || !dev->ops)
Wenwei Tao480fc0d2015-11-20 13:47:53 +0100415 return ret;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100416
Matias Bjørling16f26c32015-12-06 11:25:48 +0100417 if (dev->ops->identity(dev, &dev->identity)) {
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100418 pr_err("nvm: device could not be identified\n");
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100419 goto err;
420 }
421
422 pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n",
423 dev->identity.ver_id, dev->identity.vmnt,
424 dev->identity.cgrps);
425
426 if (dev->identity.ver_id != 1) {
427 pr_err("nvm: device not supported by kernel.");
428 goto err;
429 }
430
431 if (dev->identity.cgrps != 1) {
432 pr_err("nvm: only one group configuration supported.");
433 goto err;
434 }
435
436 ret = nvm_core_init(dev);
437 if (ret) {
438 pr_err("nvm: could not initialize core structures.\n");
439 goto err;
440 }
441
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100442 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
443 dev->name, dev->sec_per_pg, dev->nr_planes,
444 dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns,
445 dev->nr_chnls);
446 return 0;
447err:
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100448 pr_err("nvm: failed to initialize nvm\n");
449 return ret;
450}
451
452static void nvm_exit(struct nvm_dev *dev)
453{
454 if (dev->ppalist_pool)
455 dev->ops->destroy_dma_pool(dev->ppalist_pool);
456 nvm_free(dev);
457
458 pr_info("nvm: successfully unloaded\n");
459}
460
461int nvm_register(struct request_queue *q, char *disk_name,
462 struct nvm_dev_ops *ops)
463{
464 struct nvm_dev *dev;
465 int ret;
466
467 if (!ops->identity)
468 return -EINVAL;
469
470 dev = kzalloc(sizeof(struct nvm_dev), GFP_KERNEL);
471 if (!dev)
472 return -ENOMEM;
473
474 dev->q = q;
475 dev->ops = ops;
476 strncpy(dev->name, disk_name, DISK_NAME_LEN);
477
478 ret = nvm_init(dev);
479 if (ret)
480 goto err_init;
481
Wenwei Taod1601472015-11-28 16:49:25 +0100482 if (dev->ops->max_phys_sect > 256) {
483 pr_info("nvm: max sectors supported is 256.\n");
484 ret = -EINVAL;
485 goto err_init;
486 }
487
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100488 if (dev->ops->max_phys_sect > 1) {
Matias Bjørling16f26c32015-12-06 11:25:48 +0100489 dev->ppalist_pool = dev->ops->create_dma_pool(dev, "ppalist");
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100490 if (!dev->ppalist_pool) {
491 pr_err("nvm: could not create ppa pool\n");
Matias Bjørling93e70c12015-11-20 13:47:54 +0100492 ret = -ENOMEM;
493 goto err_init;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100494 }
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100495 }
496
Matias Bjørling762796b2015-12-06 11:25:49 +0100497 /* register device with a supported media manager */
Matias Bjørlingedad2e62015-11-16 15:34:42 +0100498 down_write(&nvm_lock);
Matias Bjørling762796b2015-12-06 11:25:49 +0100499 dev->mt = nvm_init_mgr(dev);
Matias Bjørlingedad2e62015-11-16 15:34:42 +0100500 list_add(&dev->devices, &nvm_devices);
501 up_write(&nvm_lock);
502
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100503 return 0;
504err_init:
505 kfree(dev);
506 return ret;
507}
508EXPORT_SYMBOL(nvm_register);
509
510void nvm_unregister(char *disk_name)
511{
Wenwei Taod0a712c2015-11-28 16:49:28 +0100512 struct nvm_dev *dev;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100513
Wenwei Taod0a712c2015-11-28 16:49:28 +0100514 down_write(&nvm_lock);
515 dev = nvm_find_nvm_dev(disk_name);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100516 if (!dev) {
517 pr_err("nvm: could not find device %s to unregister\n",
518 disk_name);
Wenwei Taod0a712c2015-11-28 16:49:28 +0100519 up_write(&nvm_lock);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100520 return;
521 }
522
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100523 list_del(&dev->devices);
524 up_write(&nvm_lock);
Matias Bjørlingc1480ad2015-11-16 15:34:43 +0100525
526 nvm_exit(dev);
527 kfree(dev);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100528}
529EXPORT_SYMBOL(nvm_unregister);
530
531static const struct block_device_operations nvm_fops = {
532 .owner = THIS_MODULE,
533};
534
535static int nvm_create_target(struct nvm_dev *dev,
536 struct nvm_ioctl_create *create)
537{
538 struct nvm_ioctl_create_simple *s = &create->conf.s;
539 struct request_queue *tqueue;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100540 struct gendisk *tdisk;
541 struct nvm_tgt_type *tt;
542 struct nvm_target *t;
543 void *targetdata;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100544
545 if (!dev->mt) {
Matias Bjørling762796b2015-12-06 11:25:49 +0100546 pr_info("nvm: device has no media manager registered.\n");
547 return -ENODEV;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100548 }
549
Matias Bjørling762796b2015-12-06 11:25:49 +0100550 down_write(&nvm_lock);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100551 tt = nvm_find_target_type(create->tgttype);
552 if (!tt) {
553 pr_err("nvm: target type %s not found\n", create->tgttype);
Wenwei Taod0a712c2015-11-28 16:49:28 +0100554 up_write(&nvm_lock);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100555 return -EINVAL;
556 }
557
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100558 list_for_each_entry(t, &dev->online_targets, list) {
559 if (!strcmp(create->tgtname, t->disk->disk_name)) {
560 pr_err("nvm: target name already exists.\n");
561 up_write(&nvm_lock);
562 return -EINVAL;
563 }
564 }
565 up_write(&nvm_lock);
566
567 t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
568 if (!t)
569 return -ENOMEM;
570
571 tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
572 if (!tqueue)
573 goto err_t;
574 blk_queue_make_request(tqueue, tt->make_rq);
575
576 tdisk = alloc_disk(0);
577 if (!tdisk)
578 goto err_queue;
579
580 sprintf(tdisk->disk_name, "%s", create->tgtname);
581 tdisk->flags = GENHD_FL_EXT_DEVT;
582 tdisk->major = 0;
583 tdisk->first_minor = 0;
584 tdisk->fops = &nvm_fops;
585 tdisk->queue = tqueue;
586
587 targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
588 if (IS_ERR(targetdata))
589 goto err_init;
590
591 tdisk->private_data = targetdata;
592 tqueue->queuedata = targetdata;
593
594 blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
595
596 set_capacity(tdisk, tt->capacity(targetdata));
597 add_disk(tdisk);
598
599 t->type = tt;
600 t->disk = tdisk;
601
602 down_write(&nvm_lock);
603 list_add_tail(&t->list, &dev->online_targets);
604 up_write(&nvm_lock);
605
606 return 0;
607err_init:
608 put_disk(tdisk);
609err_queue:
610 blk_cleanup_queue(tqueue);
611err_t:
612 kfree(t);
613 return -ENOMEM;
614}
615
616static void nvm_remove_target(struct nvm_target *t)
617{
618 struct nvm_tgt_type *tt = t->type;
619 struct gendisk *tdisk = t->disk;
620 struct request_queue *q = tdisk->queue;
621
622 lockdep_assert_held(&nvm_lock);
623
624 del_gendisk(tdisk);
Javier Gonzálezd09f9582015-11-16 15:34:47 +0100625 blk_cleanup_queue(q);
626
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100627 if (tt->exit)
628 tt->exit(tdisk->private_data);
629
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100630 put_disk(tdisk);
631
632 list_del(&t->list);
633 kfree(t);
634}
635
636static int __nvm_configure_create(struct nvm_ioctl_create *create)
637{
638 struct nvm_dev *dev;
639 struct nvm_ioctl_create_simple *s;
640
Wenwei Taod0a712c2015-11-28 16:49:28 +0100641 down_write(&nvm_lock);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100642 dev = nvm_find_nvm_dev(create->dev);
Wenwei Taod0a712c2015-11-28 16:49:28 +0100643 up_write(&nvm_lock);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100644 if (!dev) {
645 pr_err("nvm: device not found\n");
646 return -EINVAL;
647 }
648
649 if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
650 pr_err("nvm: config type not valid\n");
651 return -EINVAL;
652 }
653 s = &create->conf.s;
654
655 if (s->lun_begin > s->lun_end || s->lun_end > dev->nr_luns) {
656 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
657 s->lun_begin, s->lun_end, dev->nr_luns);
658 return -EINVAL;
659 }
660
661 return nvm_create_target(dev, create);
662}
663
664static int __nvm_configure_remove(struct nvm_ioctl_remove *remove)
665{
666 struct nvm_target *t = NULL;
667 struct nvm_dev *dev;
668 int ret = -1;
669
670 down_write(&nvm_lock);
671 list_for_each_entry(dev, &nvm_devices, devices)
672 list_for_each_entry(t, &dev->online_targets, list) {
673 if (!strcmp(remove->tgtname, t->disk->disk_name)) {
674 nvm_remove_target(t);
675 ret = 0;
676 break;
677 }
678 }
679 up_write(&nvm_lock);
680
681 if (ret) {
682 pr_err("nvm: target \"%s\" doesn't exist.\n", remove->tgtname);
683 return -EINVAL;
684 }
685
686 return 0;
687}
688
689#ifdef CONFIG_NVM_DEBUG
690static int nvm_configure_show(const char *val)
691{
692 struct nvm_dev *dev;
693 char opcode, devname[DISK_NAME_LEN];
694 int ret;
695
696 ret = sscanf(val, "%c %32s", &opcode, devname);
697 if (ret != 2) {
698 pr_err("nvm: invalid command. Use \"opcode devicename\".\n");
699 return -EINVAL;
700 }
701
Wenwei Taod0a712c2015-11-28 16:49:28 +0100702 down_write(&nvm_lock);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100703 dev = nvm_find_nvm_dev(devname);
Wenwei Taod0a712c2015-11-28 16:49:28 +0100704 up_write(&nvm_lock);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100705 if (!dev) {
706 pr_err("nvm: device not found\n");
707 return -EINVAL;
708 }
709
710 if (!dev->mt)
711 return 0;
712
Javier Gonzalez2fde0e42015-11-20 13:47:57 +0100713 dev->mt->lun_info_print(dev);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100714
715 return 0;
716}
717
718static int nvm_configure_remove(const char *val)
719{
720 struct nvm_ioctl_remove remove;
721 char opcode;
722 int ret;
723
724 ret = sscanf(val, "%c %256s", &opcode, remove.tgtname);
725 if (ret != 2) {
726 pr_err("nvm: invalid command. Use \"d targetname\".\n");
727 return -EINVAL;
728 }
729
730 remove.flags = 0;
731
732 return __nvm_configure_remove(&remove);
733}
734
735static int nvm_configure_create(const char *val)
736{
737 struct nvm_ioctl_create create;
738 char opcode;
739 int lun_begin, lun_end, ret;
740
741 ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create.dev,
742 create.tgtname, create.tgttype,
743 &lun_begin, &lun_end);
744 if (ret != 6) {
745 pr_err("nvm: invalid command. Use \"opcode device name tgttype lun_begin:lun_end\".\n");
746 return -EINVAL;
747 }
748
749 create.flags = 0;
750 create.conf.type = NVM_CONFIG_TYPE_SIMPLE;
751 create.conf.s.lun_begin = lun_begin;
752 create.conf.s.lun_end = lun_end;
753
754 return __nvm_configure_create(&create);
755}
756
757
758/* Exposes administrative interface through /sys/module/lnvm/configure_by_str */
759static int nvm_configure_by_str_event(const char *val,
760 const struct kernel_param *kp)
761{
762 char opcode;
763 int ret;
764
765 ret = sscanf(val, "%c", &opcode);
766 if (ret != 1) {
767 pr_err("nvm: string must have the format of \"cmd ...\"\n");
768 return -EINVAL;
769 }
770
771 switch (opcode) {
772 case 'a':
773 return nvm_configure_create(val);
774 case 'd':
775 return nvm_configure_remove(val);
776 case 's':
777 return nvm_configure_show(val);
778 default:
779 pr_err("nvm: invalid command\n");
780 return -EINVAL;
781 }
782
783 return 0;
784}
785
786static int nvm_configure_get(char *buf, const struct kernel_param *kp)
787{
788 int sz = 0;
789 char *buf_start = buf;
790 struct nvm_dev *dev;
791
792 buf += sprintf(buf, "available devices:\n");
793 down_write(&nvm_lock);
794 list_for_each_entry(dev, &nvm_devices, devices) {
795 if (sz > 4095 - DISK_NAME_LEN)
796 break;
797 buf += sprintf(buf, " %32s\n", dev->name);
798 }
799 up_write(&nvm_lock);
800
801 return buf - buf_start - 1;
802}
803
804static const struct kernel_param_ops nvm_configure_by_str_event_param_ops = {
805 .set = nvm_configure_by_str_event,
806 .get = nvm_configure_get,
807};
808
809#undef MODULE_PARAM_PREFIX
810#define MODULE_PARAM_PREFIX "lnvm."
811
812module_param_cb(configure_debug, &nvm_configure_by_str_event_param_ops, NULL,
813 0644);
814
815#endif /* CONFIG_NVM_DEBUG */
816
817static long nvm_ioctl_info(struct file *file, void __user *arg)
818{
819 struct nvm_ioctl_info *info;
820 struct nvm_tgt_type *tt;
821 int tgt_iter = 0;
822
823 if (!capable(CAP_SYS_ADMIN))
824 return -EPERM;
825
826 info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
827 if (IS_ERR(info))
828 return -EFAULT;
829
830 info->version[0] = NVM_VERSION_MAJOR;
831 info->version[1] = NVM_VERSION_MINOR;
832 info->version[2] = NVM_VERSION_PATCH;
833
834 down_write(&nvm_lock);
835 list_for_each_entry(tt, &nvm_targets, list) {
836 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
837
838 tgt->version[0] = tt->version[0];
839 tgt->version[1] = tt->version[1];
840 tgt->version[2] = tt->version[2];
841 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
842
843 tgt_iter++;
844 }
845
846 info->tgtsize = tgt_iter;
847 up_write(&nvm_lock);
848
Sudip Mukherjee76e25082015-11-28 16:49:24 +0100849 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
850 kfree(info);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100851 return -EFAULT;
Sudip Mukherjee76e25082015-11-28 16:49:24 +0100852 }
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100853
854 kfree(info);
855 return 0;
856}
857
858static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
859{
860 struct nvm_ioctl_get_devices *devices;
861 struct nvm_dev *dev;
862 int i = 0;
863
864 if (!capable(CAP_SYS_ADMIN))
865 return -EPERM;
866
867 devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
868 if (!devices)
869 return -ENOMEM;
870
871 down_write(&nvm_lock);
872 list_for_each_entry(dev, &nvm_devices, devices) {
873 struct nvm_ioctl_device_info *info = &devices->info[i];
874
875 sprintf(info->devname, "%s", dev->name);
876 if (dev->mt) {
877 info->bmversion[0] = dev->mt->version[0];
878 info->bmversion[1] = dev->mt->version[1];
879 info->bmversion[2] = dev->mt->version[2];
880 sprintf(info->bmname, "%s", dev->mt->name);
881 } else {
882 sprintf(info->bmname, "none");
883 }
884
885 i++;
886 if (i > 31) {
887 pr_err("nvm: max 31 devices can be reported.\n");
888 break;
889 }
890 }
891 up_write(&nvm_lock);
892
893 devices->nr_devices = i;
894
Sudip Mukherjee76e25082015-11-28 16:49:24 +0100895 if (copy_to_user(arg, devices,
896 sizeof(struct nvm_ioctl_get_devices))) {
897 kfree(devices);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100898 return -EFAULT;
Sudip Mukherjee76e25082015-11-28 16:49:24 +0100899 }
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100900
901 kfree(devices);
902 return 0;
903}
904
905static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
906{
907 struct nvm_ioctl_create create;
908
909 if (!capable(CAP_SYS_ADMIN))
910 return -EPERM;
911
912 if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
913 return -EFAULT;
914
915 create.dev[DISK_NAME_LEN - 1] = '\0';
916 create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
917 create.tgtname[DISK_NAME_LEN - 1] = '\0';
918
919 if (create.flags != 0) {
920 pr_err("nvm: no flags supported\n");
921 return -EINVAL;
922 }
923
924 return __nvm_configure_create(&create);
925}
926
927static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
928{
929 struct nvm_ioctl_remove remove;
930
931 if (!capable(CAP_SYS_ADMIN))
932 return -EPERM;
933
934 if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
935 return -EFAULT;
936
937 remove.tgtname[DISK_NAME_LEN - 1] = '\0';
938
939 if (remove.flags != 0) {
940 pr_err("nvm: no flags supported\n");
941 return -EINVAL;
942 }
943
944 return __nvm_configure_remove(&remove);
945}
946
947static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
948{
949 void __user *argp = (void __user *)arg;
950
951 switch (cmd) {
952 case NVM_INFO:
953 return nvm_ioctl_info(file, argp);
954 case NVM_GET_DEVICES:
955 return nvm_ioctl_get_devices(file, argp);
956 case NVM_DEV_CREATE:
957 return nvm_ioctl_dev_create(file, argp);
958 case NVM_DEV_REMOVE:
959 return nvm_ioctl_dev_remove(file, argp);
960 }
961 return 0;
962}
963
964static const struct file_operations _ctl_fops = {
965 .open = nonseekable_open,
966 .unlocked_ioctl = nvm_ctl_ioctl,
967 .owner = THIS_MODULE,
968 .llseek = noop_llseek,
969};
970
971static struct miscdevice _nvm_misc = {
972 .minor = MISC_DYNAMIC_MINOR,
973 .name = "lightnvm",
974 .nodename = "lightnvm/control",
975 .fops = &_ctl_fops,
976};
977
978MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
979
980static int __init nvm_mod_init(void)
981{
982 int ret;
983
984 ret = misc_register(&_nvm_misc);
985 if (ret)
986 pr_err("nvm: misc_register failed for control device");
987
988 return ret;
989}
990
991static void __exit nvm_mod_exit(void)
992{
993 misc_deregister(&_nvm_misc);
994}
995
996MODULE_AUTHOR("Matias Bjorling <m@bjorling.me>");
997MODULE_LICENSE("GPL v2");
998MODULE_VERSION("0.1");
999module_init(nvm_mod_init);
1000module_exit(nvm_mod_exit);