blob: 8beb9c04d8981e40b8870b917f468f7253bdd71a [file] [log] [blame]
Matias Bjørlingcd9e9802015-10-28 19:54:55 +01001/*
2 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
3 * Initial release: Matias Bjorling <m@bjorling.me>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
17 * USA.
18 *
19 */
20
21#include <linux/blkdev.h>
22#include <linux/blk-mq.h>
23#include <linux/list.h>
24#include <linux/types.h>
25#include <linux/sem.h>
26#include <linux/bitmap.h>
27#include <linux/module.h>
28#include <linux/miscdevice.h>
29#include <linux/lightnvm.h>
Matias Bjørling912761622016-01-12 07:49:21 +010030#include <linux/sched/sysctl.h>
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010031#include <uapi/linux/lightnvm.h>
32
Simon A. F. Lund6063fe32016-05-06 20:03:02 +020033static LIST_HEAD(nvm_tgt_types);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010034static LIST_HEAD(nvm_mgrs);
35static LIST_HEAD(nvm_devices);
Simon A. F. Lund6f8645c2016-05-06 20:03:03 +020036static LIST_HEAD(nvm_targets);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010037static DECLARE_RWSEM(nvm_lock);
38
Simon A. F. Lund6f8645c2016-05-06 20:03:03 +020039static struct nvm_target *nvm_find_target(const char *name)
40{
41 struct nvm_target *tgt;
42
43 list_for_each_entry(tgt, &nvm_targets, list)
44 if (!strcmp(name, tgt->disk->disk_name))
45 return tgt;
46
47 return NULL;
48}
49
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010050static struct nvm_tgt_type *nvm_find_target_type(const char *name)
51{
52 struct nvm_tgt_type *tt;
53
Simon A. F. Lund6063fe32016-05-06 20:03:02 +020054 list_for_each_entry(tt, &nvm_tgt_types, list)
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010055 if (!strcmp(name, tt->name))
56 return tt;
57
58 return NULL;
59}
60
Simon A. F. Lund6063fe32016-05-06 20:03:02 +020061int nvm_register_tgt_type(struct nvm_tgt_type *tt)
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010062{
63 int ret = 0;
64
65 down_write(&nvm_lock);
66 if (nvm_find_target_type(tt->name))
67 ret = -EEXIST;
68 else
Simon A. F. Lund6063fe32016-05-06 20:03:02 +020069 list_add(&tt->list, &nvm_tgt_types);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010070 up_write(&nvm_lock);
71
72 return ret;
73}
Simon A. F. Lund6063fe32016-05-06 20:03:02 +020074EXPORT_SYMBOL(nvm_register_tgt_type);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010075
Simon A. F. Lund6063fe32016-05-06 20:03:02 +020076void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010077{
78 if (!tt)
79 return;
80
81 down_write(&nvm_lock);
82 list_del(&tt->list);
83 up_write(&nvm_lock);
84}
Simon A. F. Lund6063fe32016-05-06 20:03:02 +020085EXPORT_SYMBOL(nvm_unregister_tgt_type);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010086
87void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
88 dma_addr_t *dma_handler)
89{
Javier González75b85642016-05-06 20:03:13 +020090 return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010091 dma_handler);
92}
93EXPORT_SYMBOL(nvm_dev_dma_alloc);
94
Javier González75b85642016-05-06 20:03:13 +020095void nvm_dev_dma_free(struct nvm_dev *dev, void *addr,
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010096 dma_addr_t dma_handler)
97{
Javier González75b85642016-05-06 20:03:13 +020098 dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +010099}
100EXPORT_SYMBOL(nvm_dev_dma_free);
101
102static struct nvmm_type *nvm_find_mgr_type(const char *name)
103{
104 struct nvmm_type *mt;
105
106 list_for_each_entry(mt, &nvm_mgrs, list)
107 if (!strcmp(name, mt->name))
108 return mt;
109
110 return NULL;
111}
112
Johannes Thumshirn58eaaf92016-07-07 09:54:11 +0200113static struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
Matias Bjørling762796b2015-12-06 11:25:49 +0100114{
115 struct nvmm_type *mt;
116 int ret;
117
118 lockdep_assert_held(&nvm_lock);
119
120 list_for_each_entry(mt, &nvm_mgrs, list) {
Matias Bjørlingb7692072016-01-12 07:49:38 +0100121 if (strncmp(dev->sb.mmtype, mt->name, NVM_MMTYPE_LEN))
122 continue;
123
Matias Bjørling762796b2015-12-06 11:25:49 +0100124 ret = mt->register_mgr(dev);
125 if (ret < 0) {
126 pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
127 ret, dev->name);
128 return NULL; /* initialization failed */
129 } else if (ret > 0)
130 return mt;
131 }
132
133 return NULL;
134}
135
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100136int nvm_register_mgr(struct nvmm_type *mt)
137{
Matias Bjørling762796b2015-12-06 11:25:49 +0100138 struct nvm_dev *dev;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100139 int ret = 0;
140
141 down_write(&nvm_lock);
Matias Bjørling762796b2015-12-06 11:25:49 +0100142 if (nvm_find_mgr_type(mt->name)) {
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100143 ret = -EEXIST;
Matias Bjørling762796b2015-12-06 11:25:49 +0100144 goto finish;
145 } else {
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100146 list_add(&mt->list, &nvm_mgrs);
Matias Bjørling762796b2015-12-06 11:25:49 +0100147 }
148
149 /* try to register media mgr if any device have none configured */
150 list_for_each_entry(dev, &nvm_devices, devices) {
151 if (dev->mt)
152 continue;
153
154 dev->mt = nvm_init_mgr(dev);
155 }
156finish:
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100157 up_write(&nvm_lock);
158
159 return ret;
160}
161EXPORT_SYMBOL(nvm_register_mgr);
162
163void nvm_unregister_mgr(struct nvmm_type *mt)
164{
165 if (!mt)
166 return;
167
168 down_write(&nvm_lock);
169 list_del(&mt->list);
170 up_write(&nvm_lock);
171}
172EXPORT_SYMBOL(nvm_unregister_mgr);
173
174static struct nvm_dev *nvm_find_nvm_dev(const char *name)
175{
176 struct nvm_dev *dev;
177
178 list_for_each_entry(dev, &nvm_devices, devices)
179 if (!strcmp(name, dev->name))
180 return dev;
181
182 return NULL;
183}
184
Javier Gonzálezff0e4982016-01-12 07:49:33 +0100185struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *dev, struct nvm_lun *lun,
186 unsigned long flags)
187{
188 return dev->mt->get_blk_unlocked(dev, lun, flags);
189}
190EXPORT_SYMBOL(nvm_get_blk_unlocked);
191
192/* Assumes that all valid pages have already been moved on release to bm */
193void nvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk)
194{
195 return dev->mt->put_blk_unlocked(dev, blk);
196}
197EXPORT_SYMBOL(nvm_put_blk_unlocked);
198
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100199struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun,
200 unsigned long flags)
201{
202 return dev->mt->get_blk(dev, lun, flags);
203}
204EXPORT_SYMBOL(nvm_get_blk);
205
206/* Assumes that all valid pages have already been moved on release to bm */
207void nvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
208{
209 return dev->mt->put_blk(dev, blk);
210}
211EXPORT_SYMBOL(nvm_put_blk);
212
Javier González529435e2016-07-07 09:54:08 +0200213void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
214{
215 return dev->mt->mark_blk(dev, ppa, type);
216}
217EXPORT_SYMBOL(nvm_mark_blk);
218
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100219int nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
220{
221 return dev->mt->submit_io(dev, rqd);
222}
223EXPORT_SYMBOL(nvm_submit_io);
224
225int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
226{
227 return dev->mt->erase_blk(dev, blk, 0);
228}
229EXPORT_SYMBOL(nvm_erase_blk);
230
Matias Bjørling069368e2016-01-12 07:49:19 +0100231void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
232{
233 int i;
234
Javier González6d5be952016-05-06 20:03:20 +0200235 if (rqd->nr_ppas > 1) {
236 for (i = 0; i < rqd->nr_ppas; i++)
Matias Bjørling069368e2016-01-12 07:49:19 +0100237 rqd->ppa_list[i] = dev_to_generic_addr(dev,
238 rqd->ppa_list[i]);
239 } else {
240 rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
241 }
242}
243EXPORT_SYMBOL(nvm_addr_to_generic_mode);
244
245void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
246{
247 int i;
248
Javier González6d5be952016-05-06 20:03:20 +0200249 if (rqd->nr_ppas > 1) {
250 for (i = 0; i < rqd->nr_ppas; i++)
Matias Bjørling069368e2016-01-12 07:49:19 +0100251 rqd->ppa_list[i] = generic_to_dev_addr(dev,
252 rqd->ppa_list[i]);
253 } else {
254 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
255 }
256}
257EXPORT_SYMBOL(nvm_generic_to_addr_mode);
258
Matias Bjørlingabd805e2016-01-12 07:49:20 +0100259int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
Matias Bjørling5ebc7d92016-05-06 20:03:07 +0200260 struct ppa_addr *ppas, int nr_ppas, int vblk)
Matias Bjørlingabd805e2016-01-12 07:49:20 +0100261{
262 int i, plane_cnt, pl_idx;
263
Matias Bjørling5ebc7d92016-05-06 20:03:07 +0200264 if ((!vblk || dev->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
Javier González6d5be952016-05-06 20:03:20 +0200265 rqd->nr_ppas = nr_ppas;
Matias Bjørlingabd805e2016-01-12 07:49:20 +0100266 rqd->ppa_addr = ppas[0];
267
268 return 0;
269 }
270
Javier González6d5be952016-05-06 20:03:20 +0200271 rqd->nr_ppas = nr_ppas;
Matias Bjørlingabd805e2016-01-12 07:49:20 +0100272 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
273 if (!rqd->ppa_list) {
274 pr_err("nvm: failed to allocate dma memory\n");
275 return -ENOMEM;
276 }
277
Matias Bjørling5ebc7d92016-05-06 20:03:07 +0200278 if (!vblk) {
279 for (i = 0; i < nr_ppas; i++)
280 rqd->ppa_list[i] = ppas[i];
281 } else {
282 plane_cnt = dev->plane_mode;
Javier González6d5be952016-05-06 20:03:20 +0200283 rqd->nr_ppas *= plane_cnt;
Matias Bjørling5ebc7d92016-05-06 20:03:07 +0200284
Matias Bjørling556755e2016-01-12 07:49:26 +0100285 for (i = 0; i < nr_ppas; i++) {
Matias Bjørling5ebc7d92016-05-06 20:03:07 +0200286 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
287 ppas[i].g.pl = pl_idx;
288 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i];
289 }
Matias Bjørlingabd805e2016-01-12 07:49:20 +0100290 }
291 }
292
293 return 0;
294}
295EXPORT_SYMBOL(nvm_set_rqd_ppalist);
296
297void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
298{
299 if (!rqd->ppa_list)
300 return;
301
302 nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list);
303}
304EXPORT_SYMBOL(nvm_free_rqd_ppalist);
305
Matias Bjørling81e681d2016-01-12 07:49:28 +0100306int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas)
Matias Bjørling069368e2016-01-12 07:49:19 +0100307{
Matias Bjørling069368e2016-01-12 07:49:19 +0100308 struct nvm_rq rqd;
Matias Bjørlingabd805e2016-01-12 07:49:20 +0100309 int ret;
Matias Bjørling069368e2016-01-12 07:49:19 +0100310
311 if (!dev->ops->erase_block)
312 return 0;
313
Matias Bjørlingabd805e2016-01-12 07:49:20 +0100314 memset(&rqd, 0, sizeof(struct nvm_rq));
Matias Bjørling069368e2016-01-12 07:49:19 +0100315
Matias Bjørling5ebc7d92016-05-06 20:03:07 +0200316 ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
Matias Bjørlingabd805e2016-01-12 07:49:20 +0100317 if (ret)
318 return ret;
Matias Bjørling069368e2016-01-12 07:49:19 +0100319
320 nvm_generic_to_addr_mode(dev, &rqd);
321
322 ret = dev->ops->erase_block(dev, &rqd);
323
Matias Bjørlingabd805e2016-01-12 07:49:20 +0100324 nvm_free_rqd_ppalist(dev, &rqd);
Matias Bjørling069368e2016-01-12 07:49:19 +0100325
326 return ret;
327}
328EXPORT_SYMBOL(nvm_erase_ppa);
329
Matias Bjørling912761622016-01-12 07:49:21 +0100330void nvm_end_io(struct nvm_rq *rqd, int error)
331{
Matias Bjørling72d256e2016-01-12 07:49:29 +0100332 rqd->error = error;
333 rqd->end_io(rqd);
Matias Bjørling912761622016-01-12 07:49:21 +0100334}
335EXPORT_SYMBOL(nvm_end_io);
336
Matias Bjørling09719b62016-01-12 07:49:30 +0100337static void nvm_end_io_sync(struct nvm_rq *rqd)
Matias Bjørling912761622016-01-12 07:49:21 +0100338{
339 struct completion *waiting = rqd->wait;
340
341 rqd->wait = NULL;
342
343 complete(waiting);
344}
345
Matias Bjørling1145e632016-05-06 20:02:56 +0200346int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
347 int flags, void *buf, int len)
Matias Bjørling09719b62016-01-12 07:49:30 +0100348{
349 DECLARE_COMPLETION_ONSTACK(wait);
Matias Bjørling09719b62016-01-12 07:49:30 +0100350 struct bio *bio;
351 int ret;
352 unsigned long hang_check;
353
354 bio = bio_map_kern(dev->q, buf, len, GFP_KERNEL);
355 if (IS_ERR_OR_NULL(bio))
356 return -ENOMEM;
357
Matias Bjørling1145e632016-05-06 20:02:56 +0200358 nvm_generic_to_addr_mode(dev, rqd);
Matias Bjørling09719b62016-01-12 07:49:30 +0100359
Matias Bjørling1145e632016-05-06 20:02:56 +0200360 rqd->dev = dev;
361 rqd->opcode = opcode;
362 rqd->flags = flags;
363 rqd->bio = bio;
364 rqd->wait = &wait;
365 rqd->end_io = nvm_end_io_sync;
Matias Bjørling09719b62016-01-12 07:49:30 +0100366
Matias Bjørling1145e632016-05-06 20:02:56 +0200367 ret = dev->ops->submit_io(dev, rqd);
Matias Bjørlingecfb40c2016-05-06 20:02:55 +0200368 if (ret) {
Matias Bjørlingecfb40c2016-05-06 20:02:55 +0200369 bio_put(bio);
370 return ret;
371 }
Matias Bjørling09719b62016-01-12 07:49:30 +0100372
373 /* Prevent hang_check timer from firing at us during very long I/O */
374 hang_check = sysctl_hung_task_timeout_secs;
375 if (hang_check)
376 while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
377 else
378 wait_for_completion_io(&wait);
379
Matias Bjørling1145e632016-05-06 20:02:56 +0200380 return rqd->error;
381}
382
383/**
384 * nvm_submit_ppa_list - submit user-defined ppa list to device. The user must
385 * take to free ppa list if necessary.
386 * @dev: device
387 * @ppa_list: user created ppa_list
388 * @nr_ppas: length of ppa_list
389 * @opcode: device opcode
390 * @flags: device flags
391 * @buf: data buffer
392 * @len: data buffer length
393 */
394int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list,
395 int nr_ppas, int opcode, int flags, void *buf, int len)
396{
397 struct nvm_rq rqd;
398
399 if (dev->ops->max_phys_sect < nr_ppas)
400 return -EINVAL;
401
402 memset(&rqd, 0, sizeof(struct nvm_rq));
403
Javier González6d5be952016-05-06 20:03:20 +0200404 rqd.nr_ppas = nr_ppas;
Matias Bjørling1145e632016-05-06 20:02:56 +0200405 if (nr_ppas > 1)
406 rqd.ppa_list = ppa_list;
407 else
408 rqd.ppa_addr = ppa_list[0];
409
410 return __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
411}
412EXPORT_SYMBOL(nvm_submit_ppa_list);
413
414/**
415 * nvm_submit_ppa - submit PPAs to device. PPAs will automatically be unfolded
416 * as single, dual, quad plane PPAs depending on device type.
417 * @dev: device
418 * @ppa: user created ppa_list
419 * @nr_ppas: length of ppa_list
420 * @opcode: device opcode
421 * @flags: device flags
422 * @buf: data buffer
423 * @len: data buffer length
424 */
425int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
426 int opcode, int flags, void *buf, int len)
427{
428 struct nvm_rq rqd;
429 int ret;
430
431 memset(&rqd, 0, sizeof(struct nvm_rq));
Matias Bjørling5ebc7d92016-05-06 20:03:07 +0200432 ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas, 1);
Matias Bjørling1145e632016-05-06 20:02:56 +0200433 if (ret)
434 return ret;
435
436 ret = __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
437
Matias Bjørling09719b62016-01-12 07:49:30 +0100438 nvm_free_rqd_ppalist(dev, &rqd);
439
Matias Bjørling1145e632016-05-06 20:02:56 +0200440 return ret;
Matias Bjørling09719b62016-01-12 07:49:30 +0100441}
442EXPORT_SYMBOL(nvm_submit_ppa);
443
Matias Bjørling22e8c972016-05-06 20:02:58 +0200444/*
445 * folds a bad block list from its plane representation to its virtual
446 * block representation. The fold is done in place and reduced size is
447 * returned.
448 *
449 * If any of the planes status are bad or grown bad block, the virtual block
450 * is marked bad. If not bad, the first plane state acts as the block state.
451 */
452int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
453{
454 int blk, offset, pl, blktype;
455
456 if (nr_blks != dev->blks_per_lun * dev->plane_mode)
457 return -EINVAL;
458
459 for (blk = 0; blk < dev->blks_per_lun; blk++) {
460 offset = blk * dev->plane_mode;
461 blktype = blks[offset];
462
463 /* Bad blocks on any planes take precedence over other types */
464 for (pl = 0; pl < dev->plane_mode; pl++) {
465 if (blks[offset + pl] &
466 (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
467 blktype = blks[offset + pl];
468 break;
469 }
470 }
471
472 blks[blk] = blktype;
473 }
474
475 return dev->blks_per_lun;
476}
477EXPORT_SYMBOL(nvm_bb_tbl_fold);
478
Matias Bjørlinge11903f2016-05-06 20:03:05 +0200479int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks)
480{
481 ppa = generic_to_dev_addr(dev, ppa);
482
483 return dev->ops->get_bb_tbl(dev, ppa, blks);
484}
485EXPORT_SYMBOL(nvm_get_bb_tbl);
486
Matias Bjørlingca5927e2016-01-12 07:49:35 +0100487static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
488{
489 int i;
490
491 dev->lps_per_blk = dev->pgs_per_blk;
492 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
493 if (!dev->lptbl)
494 return -ENOMEM;
495
496 /* Just a linear array */
497 for (i = 0; i < dev->lps_per_blk; i++)
498 dev->lptbl[i] = i;
499
500 return 0;
501}
502
503static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
504{
505 int i, p;
506 struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
507
508 if (!mlc->num_pairs)
509 return 0;
510
511 dev->lps_per_blk = mlc->num_pairs;
512 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
513 if (!dev->lptbl)
514 return -ENOMEM;
515
516 /* The lower page table encoding consists of a list of bytes, where each
517 * has a lower and an upper half. The first half byte maintains the
518 * increment value and every value after is an offset added to the
519 * previous incrementation value */
520 dev->lptbl[0] = mlc->pairs[0] & 0xF;
521 for (i = 1; i < dev->lps_per_blk; i++) {
522 p = mlc->pairs[i >> 1];
523 if (i & 0x1) /* upper */
524 dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
525 else /* lower */
526 dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
527 }
528
529 return 0;
530}
531
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100532static int nvm_core_init(struct nvm_dev *dev)
533{
534 struct nvm_id *id = &dev->identity;
535 struct nvm_id_group *grp = &id->groups[0];
Matias Bjørling7f7c5d02016-05-06 20:02:59 +0200536 int ret;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100537
538 /* device values */
539 dev->nr_chnls = grp->num_ch;
540 dev->luns_per_chnl = grp->num_lun;
541 dev->pgs_per_blk = grp->num_pg;
542 dev->blks_per_lun = grp->num_blk;
543 dev->nr_planes = grp->num_pln;
Matias Bjørling4891d122016-05-06 20:02:57 +0200544 dev->fpg_size = grp->fpg_sz;
545 dev->pfpg_size = grp->fpg_sz * grp->num_pln;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100546 dev->sec_size = grp->csecs;
547 dev->oob_size = grp->sos;
548 dev->sec_per_pg = grp->fpg_sz / grp->csecs;
Matias Bjørlingf9a99952016-01-12 07:49:34 +0100549 dev->mccap = grp->mccap;
Matias Bjørling7386af22015-11-16 15:34:44 +0100550 memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100551
552 dev->plane_mode = NVM_PLANE_SINGLE;
553 dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
554
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100555 if (grp->mpos & 0x020202)
556 dev->plane_mode = NVM_PLANE_DOUBLE;
557 if (grp->mpos & 0x040404)
558 dev->plane_mode = NVM_PLANE_QUAD;
559
Matias Bjørling7f7c5d02016-05-06 20:02:59 +0200560 if (grp->mtype != 0) {
561 pr_err("nvm: memory type not supported\n");
562 return -EINVAL;
563 }
564
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100565 /* calculated values */
566 dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
567 dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
568 dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun;
569 dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls;
570
Matias Bjørlinged2a92a2016-02-20 08:52:42 +0100571 dev->total_secs = dev->nr_luns * dev->sec_per_lun;
Wenwei Taoda1e2842016-03-03 15:06:38 +0100572 dev->lun_map = kcalloc(BITS_TO_LONGS(dev->nr_luns),
573 sizeof(unsigned long), GFP_KERNEL);
574 if (!dev->lun_map)
575 return -ENOMEM;
Matias Bjørling7f7c5d02016-05-06 20:02:59 +0200576
577 switch (grp->fmtype) {
578 case NVM_ID_FMTYPE_SLC:
579 if (nvm_init_slc_tbl(dev, grp)) {
580 ret = -ENOMEM;
581 goto err_fmtype;
582 }
583 break;
584 case NVM_ID_FMTYPE_MLC:
585 if (nvm_init_mlc_tbl(dev, grp)) {
586 ret = -ENOMEM;
587 goto err_fmtype;
588 }
589 break;
590 default:
591 pr_err("nvm: flash type not supported\n");
592 ret = -EINVAL;
593 goto err_fmtype;
594 }
595
Matias Bjørlinge3eb3792016-01-12 07:49:36 +0100596 mutex_init(&dev->mlock);
Wenwei Tao4c9dacb2016-03-03 15:06:37 +0100597 spin_lock_init(&dev->lock);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100598
599 return 0;
Matias Bjørling7f7c5d02016-05-06 20:02:59 +0200600err_fmtype:
601 kfree(dev->lun_map);
602 return ret;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100603}
604
Matias Bjørling976bdfc2016-05-06 20:03:17 +0200605static void nvm_remove_target(struct nvm_target *t)
606{
607 struct nvm_tgt_type *tt = t->type;
608 struct gendisk *tdisk = t->disk;
609 struct request_queue *q = tdisk->queue;
610
611 lockdep_assert_held(&nvm_lock);
612
613 del_gendisk(tdisk);
614 blk_cleanup_queue(q);
615
616 if (tt->exit)
617 tt->exit(tdisk->private_data);
618
619 put_disk(tdisk);
620
621 list_del(&t->list);
622 kfree(t);
623}
624
625static void nvm_free_mgr(struct nvm_dev *dev)
626{
627 struct nvm_target *tgt, *tmp;
628
629 if (!dev->mt)
630 return;
631
632 down_write(&nvm_lock);
633 list_for_each_entry_safe(tgt, tmp, &nvm_targets, list) {
634 if (tgt->dev != dev)
635 continue;
636
637 nvm_remove_target(tgt);
638 }
639 up_write(&nvm_lock);
640
641 dev->mt->unregister_mgr(dev);
642 dev->mt = NULL;
643}
644
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100645static void nvm_free(struct nvm_dev *dev)
646{
647 if (!dev)
648 return;
649
Matias Bjørling976bdfc2016-05-06 20:03:17 +0200650 nvm_free_mgr(dev);
Matias Bjørlingca5927e2016-01-12 07:49:35 +0100651
652 kfree(dev->lptbl);
Matias Bjørling7f7c5d02016-05-06 20:02:59 +0200653 kfree(dev->lun_map);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100654}
655
656static int nvm_init(struct nvm_dev *dev)
657{
Wenwei Tao480fc0d2015-11-20 13:47:53 +0100658 int ret = -EINVAL;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100659
660 if (!dev->q || !dev->ops)
Wenwei Tao480fc0d2015-11-20 13:47:53 +0100661 return ret;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100662
Matias Bjørling16f26c32015-12-06 11:25:48 +0100663 if (dev->ops->identity(dev, &dev->identity)) {
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100664 pr_err("nvm: device could not be identified\n");
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100665 goto err;
666 }
667
668 pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n",
669 dev->identity.ver_id, dev->identity.vmnt,
670 dev->identity.cgrps);
671
672 if (dev->identity.ver_id != 1) {
673 pr_err("nvm: device not supported by kernel.");
674 goto err;
675 }
676
677 if (dev->identity.cgrps != 1) {
678 pr_err("nvm: only one group configuration supported.");
679 goto err;
680 }
681
682 ret = nvm_core_init(dev);
683 if (ret) {
684 pr_err("nvm: could not initialize core structures.\n");
685 goto err;
686 }
687
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100688 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
689 dev->name, dev->sec_per_pg, dev->nr_planes,
690 dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns,
691 dev->nr_chnls);
692 return 0;
693err:
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100694 pr_err("nvm: failed to initialize nvm\n");
695 return ret;
696}
697
698static void nvm_exit(struct nvm_dev *dev)
699{
Javier González75b85642016-05-06 20:03:13 +0200700 if (dev->dma_pool)
701 dev->ops->destroy_dma_pool(dev->dma_pool);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100702 nvm_free(dev);
703
704 pr_info("nvm: successfully unloaded\n");
705}
706
707int nvm_register(struct request_queue *q, char *disk_name,
708 struct nvm_dev_ops *ops)
709{
710 struct nvm_dev *dev;
711 int ret;
712
713 if (!ops->identity)
714 return -EINVAL;
715
716 dev = kzalloc(sizeof(struct nvm_dev), GFP_KERNEL);
717 if (!dev)
718 return -ENOMEM;
719
720 dev->q = q;
721 dev->ops = ops;
722 strncpy(dev->name, disk_name, DISK_NAME_LEN);
723
724 ret = nvm_init(dev);
725 if (ret)
726 goto err_init;
727
Wenwei Taod1601472015-11-28 16:49:25 +0100728 if (dev->ops->max_phys_sect > 256) {
729 pr_info("nvm: max sectors supported is 256.\n");
730 ret = -EINVAL;
731 goto err_init;
732 }
733
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100734 if (dev->ops->max_phys_sect > 1) {
Javier González75b85642016-05-06 20:03:13 +0200735 dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
736 if (!dev->dma_pool) {
737 pr_err("nvm: could not create dma pool\n");
Matias Bjørling93e70c12015-11-20 13:47:54 +0100738 ret = -ENOMEM;
739 goto err_init;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100740 }
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100741 }
742
Matias Bjørlingbf643182016-02-04 15:13:27 +0100743 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
744 ret = nvm_get_sysblock(dev, &dev->sb);
745 if (!ret)
746 pr_err("nvm: device not initialized.\n");
747 else if (ret < 0)
748 pr_err("nvm: err (%d) on device initialization\n", ret);
749 }
Matias Bjørlingb7692072016-01-12 07:49:38 +0100750
Matias Bjørling762796b2015-12-06 11:25:49 +0100751 /* register device with a supported media manager */
Matias Bjørlingedad2e62015-11-16 15:34:42 +0100752 down_write(&nvm_lock);
Matias Bjørlingb7692072016-01-12 07:49:38 +0100753 if (ret > 0)
754 dev->mt = nvm_init_mgr(dev);
Matias Bjørlingedad2e62015-11-16 15:34:42 +0100755 list_add(&dev->devices, &nvm_devices);
756 up_write(&nvm_lock);
757
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100758 return 0;
759err_init:
Wenwei Taoda1e2842016-03-03 15:06:38 +0100760 kfree(dev->lun_map);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100761 kfree(dev);
762 return ret;
763}
764EXPORT_SYMBOL(nvm_register);
765
766void nvm_unregister(char *disk_name)
767{
Wenwei Taod0a712c2015-11-28 16:49:28 +0100768 struct nvm_dev *dev;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100769
Wenwei Taod0a712c2015-11-28 16:49:28 +0100770 down_write(&nvm_lock);
771 dev = nvm_find_nvm_dev(disk_name);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100772 if (!dev) {
773 pr_err("nvm: could not find device %s to unregister\n",
774 disk_name);
Wenwei Taod0a712c2015-11-28 16:49:28 +0100775 up_write(&nvm_lock);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100776 return;
777 }
778
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100779 list_del(&dev->devices);
780 up_write(&nvm_lock);
Matias Bjørlingc1480ad2015-11-16 15:34:43 +0100781
782 nvm_exit(dev);
783 kfree(dev);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100784}
785EXPORT_SYMBOL(nvm_unregister);
786
787static const struct block_device_operations nvm_fops = {
788 .owner = THIS_MODULE,
789};
790
791static int nvm_create_target(struct nvm_dev *dev,
792 struct nvm_ioctl_create *create)
793{
794 struct nvm_ioctl_create_simple *s = &create->conf.s;
795 struct request_queue *tqueue;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100796 struct gendisk *tdisk;
797 struct nvm_tgt_type *tt;
798 struct nvm_target *t;
799 void *targetdata;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100800
801 if (!dev->mt) {
Matias Bjørling762796b2015-12-06 11:25:49 +0100802 pr_info("nvm: device has no media manager registered.\n");
803 return -ENODEV;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100804 }
805
Matias Bjørling762796b2015-12-06 11:25:49 +0100806 down_write(&nvm_lock);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100807 tt = nvm_find_target_type(create->tgttype);
808 if (!tt) {
809 pr_err("nvm: target type %s not found\n", create->tgttype);
Wenwei Taod0a712c2015-11-28 16:49:28 +0100810 up_write(&nvm_lock);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100811 return -EINVAL;
812 }
813
Simon A. F. Lund6f8645c2016-05-06 20:03:03 +0200814 t = nvm_find_target(create->tgtname);
815 if (t) {
816 pr_err("nvm: target name already exists.\n");
817 up_write(&nvm_lock);
818 return -EINVAL;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100819 }
820 up_write(&nvm_lock);
821
822 t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
823 if (!t)
824 return -ENOMEM;
825
826 tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
827 if (!tqueue)
828 goto err_t;
829 blk_queue_make_request(tqueue, tt->make_rq);
830
831 tdisk = alloc_disk(0);
832 if (!tdisk)
833 goto err_queue;
834
835 sprintf(tdisk->disk_name, "%s", create->tgtname);
836 tdisk->flags = GENHD_FL_EXT_DEVT;
837 tdisk->major = 0;
838 tdisk->first_minor = 0;
839 tdisk->fops = &nvm_fops;
840 tdisk->queue = tqueue;
841
842 targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
843 if (IS_ERR(targetdata))
844 goto err_init;
845
846 tdisk->private_data = targetdata;
847 tqueue->queuedata = targetdata;
848
849 blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
850
851 set_capacity(tdisk, tt->capacity(targetdata));
852 add_disk(tdisk);
853
854 t->type = tt;
855 t->disk = tdisk;
Matias Bjørling976bdfc2016-05-06 20:03:17 +0200856 t->dev = dev;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100857
858 down_write(&nvm_lock);
Simon A. F. Lund6f8645c2016-05-06 20:03:03 +0200859 list_add_tail(&t->list, &nvm_targets);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100860 up_write(&nvm_lock);
861
862 return 0;
863err_init:
864 put_disk(tdisk);
865err_queue:
866 blk_cleanup_queue(tqueue);
867err_t:
868 kfree(t);
869 return -ENOMEM;
870}
871
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100872static int __nvm_configure_create(struct nvm_ioctl_create *create)
873{
874 struct nvm_dev *dev;
875 struct nvm_ioctl_create_simple *s;
876
Wenwei Taod0a712c2015-11-28 16:49:28 +0100877 down_write(&nvm_lock);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100878 dev = nvm_find_nvm_dev(create->dev);
Wenwei Taod0a712c2015-11-28 16:49:28 +0100879 up_write(&nvm_lock);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100880 if (!dev) {
881 pr_err("nvm: device not found\n");
882 return -EINVAL;
883 }
884
885 if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
886 pr_err("nvm: config type not valid\n");
887 return -EINVAL;
888 }
889 s = &create->conf.s;
890
891 if (s->lun_begin > s->lun_end || s->lun_end > dev->nr_luns) {
892 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
893 s->lun_begin, s->lun_end, dev->nr_luns);
894 return -EINVAL;
895 }
896
897 return nvm_create_target(dev, create);
898}
899
900static int __nvm_configure_remove(struct nvm_ioctl_remove *remove)
901{
Simon A. F. Lund6f8645c2016-05-06 20:03:03 +0200902 struct nvm_target *t;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100903
904 down_write(&nvm_lock);
Simon A. F. Lund6f8645c2016-05-06 20:03:03 +0200905 t = nvm_find_target(remove->tgtname);
906 if (!t) {
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100907 pr_err("nvm: target \"%s\" doesn't exist.\n", remove->tgtname);
Simon A. F. Lund6f8645c2016-05-06 20:03:03 +0200908 up_write(&nvm_lock);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100909 return -EINVAL;
910 }
911
Simon A. F. Lund6f8645c2016-05-06 20:03:03 +0200912 nvm_remove_target(t);
913 up_write(&nvm_lock);
914
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100915 return 0;
916}
917
918#ifdef CONFIG_NVM_DEBUG
919static int nvm_configure_show(const char *val)
920{
921 struct nvm_dev *dev;
922 char opcode, devname[DISK_NAME_LEN];
923 int ret;
924
925 ret = sscanf(val, "%c %32s", &opcode, devname);
926 if (ret != 2) {
927 pr_err("nvm: invalid command. Use \"opcode devicename\".\n");
928 return -EINVAL;
929 }
930
Wenwei Taod0a712c2015-11-28 16:49:28 +0100931 down_write(&nvm_lock);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100932 dev = nvm_find_nvm_dev(devname);
Wenwei Taod0a712c2015-11-28 16:49:28 +0100933 up_write(&nvm_lock);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100934 if (!dev) {
935 pr_err("nvm: device not found\n");
936 return -EINVAL;
937 }
938
939 if (!dev->mt)
940 return 0;
941
Javier Gonzalez2fde0e42015-11-20 13:47:57 +0100942 dev->mt->lun_info_print(dev);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +0100943
944 return 0;
945}
946
947static int nvm_configure_remove(const char *val)
948{
949 struct nvm_ioctl_remove remove;
950 char opcode;
951 int ret;
952
953 ret = sscanf(val, "%c %256s", &opcode, remove.tgtname);
954 if (ret != 2) {
955 pr_err("nvm: invalid command. Use \"d targetname\".\n");
956 return -EINVAL;
957 }
958
959 remove.flags = 0;
960
961 return __nvm_configure_remove(&remove);
962}
963
964static int nvm_configure_create(const char *val)
965{
966 struct nvm_ioctl_create create;
967 char opcode;
968 int lun_begin, lun_end, ret;
969
970 ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create.dev,
971 create.tgtname, create.tgttype,
972 &lun_begin, &lun_end);
973 if (ret != 6) {
974 pr_err("nvm: invalid command. Use \"opcode device name tgttype lun_begin:lun_end\".\n");
975 return -EINVAL;
976 }
977
978 create.flags = 0;
979 create.conf.type = NVM_CONFIG_TYPE_SIMPLE;
980 create.conf.s.lun_begin = lun_begin;
981 create.conf.s.lun_end = lun_end;
982
983 return __nvm_configure_create(&create);
984}
985
986
987/* Exposes administrative interface through /sys/module/lnvm/configure_by_str */
988static int nvm_configure_by_str_event(const char *val,
989 const struct kernel_param *kp)
990{
991 char opcode;
992 int ret;
993
994 ret = sscanf(val, "%c", &opcode);
995 if (ret != 1) {
996 pr_err("nvm: string must have the format of \"cmd ...\"\n");
997 return -EINVAL;
998 }
999
1000 switch (opcode) {
1001 case 'a':
1002 return nvm_configure_create(val);
1003 case 'd':
1004 return nvm_configure_remove(val);
1005 case 's':
1006 return nvm_configure_show(val);
1007 default:
1008 pr_err("nvm: invalid command\n");
1009 return -EINVAL;
1010 }
1011
1012 return 0;
1013}
1014
1015static int nvm_configure_get(char *buf, const struct kernel_param *kp)
1016{
Alan5e422cf2016-02-19 13:56:57 +01001017 int sz;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +01001018 struct nvm_dev *dev;
1019
Alan5e422cf2016-02-19 13:56:57 +01001020 sz = sprintf(buf, "available devices:\n");
Matias Bjørlingcd9e9802015-10-28 19:54:55 +01001021 down_write(&nvm_lock);
1022 list_for_each_entry(dev, &nvm_devices, devices) {
Alan5e422cf2016-02-19 13:56:57 +01001023 if (sz > 4095 - DISK_NAME_LEN - 2)
Matias Bjørlingcd9e9802015-10-28 19:54:55 +01001024 break;
Alan5e422cf2016-02-19 13:56:57 +01001025 sz += sprintf(buf + sz, " %32s\n", dev->name);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +01001026 }
1027 up_write(&nvm_lock);
1028
Alan5e422cf2016-02-19 13:56:57 +01001029 return sz;
Matias Bjørlingcd9e9802015-10-28 19:54:55 +01001030}
1031
1032static const struct kernel_param_ops nvm_configure_by_str_event_param_ops = {
1033 .set = nvm_configure_by_str_event,
1034 .get = nvm_configure_get,
1035};
1036
1037#undef MODULE_PARAM_PREFIX
1038#define MODULE_PARAM_PREFIX "lnvm."
1039
1040module_param_cb(configure_debug, &nvm_configure_by_str_event_param_ops, NULL,
1041 0644);
1042
1043#endif /* CONFIG_NVM_DEBUG */
1044
1045static long nvm_ioctl_info(struct file *file, void __user *arg)
1046{
1047 struct nvm_ioctl_info *info;
1048 struct nvm_tgt_type *tt;
1049 int tgt_iter = 0;
1050
1051 if (!capable(CAP_SYS_ADMIN))
1052 return -EPERM;
1053
1054 info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
1055 if (IS_ERR(info))
1056 return -EFAULT;
1057
1058 info->version[0] = NVM_VERSION_MAJOR;
1059 info->version[1] = NVM_VERSION_MINOR;
1060 info->version[2] = NVM_VERSION_PATCH;
1061
1062 down_write(&nvm_lock);
Simon A. F. Lund6063fe32016-05-06 20:03:02 +02001063 list_for_each_entry(tt, &nvm_tgt_types, list) {
Matias Bjørlingcd9e9802015-10-28 19:54:55 +01001064 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
1065
1066 tgt->version[0] = tt->version[0];
1067 tgt->version[1] = tt->version[1];
1068 tgt->version[2] = tt->version[2];
1069 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
1070
1071 tgt_iter++;
1072 }
1073
1074 info->tgtsize = tgt_iter;
1075 up_write(&nvm_lock);
1076
Sudip Mukherjee76e25082015-11-28 16:49:24 +01001077 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
1078 kfree(info);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +01001079 return -EFAULT;
Sudip Mukherjee76e25082015-11-28 16:49:24 +01001080 }
Matias Bjørlingcd9e9802015-10-28 19:54:55 +01001081
1082 kfree(info);
1083 return 0;
1084}
1085
1086static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
1087{
1088 struct nvm_ioctl_get_devices *devices;
1089 struct nvm_dev *dev;
1090 int i = 0;
1091
1092 if (!capable(CAP_SYS_ADMIN))
1093 return -EPERM;
1094
1095 devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
1096 if (!devices)
1097 return -ENOMEM;
1098
1099 down_write(&nvm_lock);
1100 list_for_each_entry(dev, &nvm_devices, devices) {
1101 struct nvm_ioctl_device_info *info = &devices->info[i];
1102
1103 sprintf(info->devname, "%s", dev->name);
1104 if (dev->mt) {
1105 info->bmversion[0] = dev->mt->version[0];
1106 info->bmversion[1] = dev->mt->version[1];
1107 info->bmversion[2] = dev->mt->version[2];
1108 sprintf(info->bmname, "%s", dev->mt->name);
1109 } else {
1110 sprintf(info->bmname, "none");
1111 }
1112
1113 i++;
1114 if (i > 31) {
1115 pr_err("nvm: max 31 devices can be reported.\n");
1116 break;
1117 }
1118 }
1119 up_write(&nvm_lock);
1120
1121 devices->nr_devices = i;
1122
Sudip Mukherjee76e25082015-11-28 16:49:24 +01001123 if (copy_to_user(arg, devices,
1124 sizeof(struct nvm_ioctl_get_devices))) {
1125 kfree(devices);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +01001126 return -EFAULT;
Sudip Mukherjee76e25082015-11-28 16:49:24 +01001127 }
Matias Bjørlingcd9e9802015-10-28 19:54:55 +01001128
1129 kfree(devices);
1130 return 0;
1131}
1132
1133static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
1134{
1135 struct nvm_ioctl_create create;
1136
1137 if (!capable(CAP_SYS_ADMIN))
1138 return -EPERM;
1139
1140 if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
1141 return -EFAULT;
1142
1143 create.dev[DISK_NAME_LEN - 1] = '\0';
1144 create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
1145 create.tgtname[DISK_NAME_LEN - 1] = '\0';
1146
1147 if (create.flags != 0) {
1148 pr_err("nvm: no flags supported\n");
1149 return -EINVAL;
1150 }
1151
1152 return __nvm_configure_create(&create);
1153}
1154
1155static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
1156{
1157 struct nvm_ioctl_remove remove;
1158
1159 if (!capable(CAP_SYS_ADMIN))
1160 return -EPERM;
1161
1162 if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
1163 return -EFAULT;
1164
1165 remove.tgtname[DISK_NAME_LEN - 1] = '\0';
1166
1167 if (remove.flags != 0) {
1168 pr_err("nvm: no flags supported\n");
1169 return -EINVAL;
1170 }
1171
1172 return __nvm_configure_remove(&remove);
1173}
1174
Matias Bjørling55696152016-01-12 07:49:37 +01001175static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info)
1176{
1177 info->seqnr = 1;
1178 info->erase_cnt = 0;
1179 info->version = 1;
1180}
1181
1182static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init)
1183{
1184 struct nvm_dev *dev;
1185 struct nvm_sb_info info;
Matias Bjørlingb7692072016-01-12 07:49:38 +01001186 int ret;
Matias Bjørling55696152016-01-12 07:49:37 +01001187
1188 down_write(&nvm_lock);
1189 dev = nvm_find_nvm_dev(init->dev);
1190 up_write(&nvm_lock);
1191 if (!dev) {
1192 pr_err("nvm: device not found\n");
1193 return -EINVAL;
1194 }
1195
1196 nvm_setup_nvm_sb_info(&info);
1197
1198 strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
1199 info.fs_ppa.ppa = -1;
1200
Matias Bjørlingbf643182016-02-04 15:13:27 +01001201 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
1202 ret = nvm_init_sysblock(dev, &info);
1203 if (ret)
1204 return ret;
1205 }
Matias Bjørlingb7692072016-01-12 07:49:38 +01001206
1207 memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
1208
1209 down_write(&nvm_lock);
1210 dev->mt = nvm_init_mgr(dev);
1211 up_write(&nvm_lock);
1212
1213 return 0;
Matias Bjørling55696152016-01-12 07:49:37 +01001214}
1215
1216static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1217{
1218 struct nvm_ioctl_dev_init init;
1219
1220 if (!capable(CAP_SYS_ADMIN))
1221 return -EPERM;
1222
1223 if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1224 return -EFAULT;
1225
1226 if (init.flags != 0) {
1227 pr_err("nvm: no flags supported\n");
1228 return -EINVAL;
1229 }
1230
1231 init.dev[DISK_NAME_LEN - 1] = '\0';
1232
1233 return __nvm_ioctl_dev_init(&init);
1234}
1235
Matias Bjørling8b4970c2016-01-12 07:49:39 +01001236static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1237{
1238 struct nvm_ioctl_dev_factory fact;
1239 struct nvm_dev *dev;
1240
1241 if (!capable(CAP_SYS_ADMIN))
1242 return -EPERM;
1243
1244 if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1245 return -EFAULT;
1246
1247 fact.dev[DISK_NAME_LEN - 1] = '\0';
1248
1249 if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1250 return -EINVAL;
1251
1252 down_write(&nvm_lock);
1253 dev = nvm_find_nvm_dev(fact.dev);
1254 up_write(&nvm_lock);
1255 if (!dev) {
1256 pr_err("nvm: device not found\n");
1257 return -EINVAL;
1258 }
1259
Matias Bjørling976bdfc2016-05-06 20:03:17 +02001260 nvm_free_mgr(dev);
Matias Bjørling8b4970c2016-01-12 07:49:39 +01001261
Matias Bjørlingbf643182016-02-04 15:13:27 +01001262 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
1263 return nvm_dev_factory(dev, fact.flags);
1264
1265 return 0;
Matias Bjørling8b4970c2016-01-12 07:49:39 +01001266}
1267
Matias Bjørlingcd9e9802015-10-28 19:54:55 +01001268static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1269{
1270 void __user *argp = (void __user *)arg;
1271
1272 switch (cmd) {
1273 case NVM_INFO:
1274 return nvm_ioctl_info(file, argp);
1275 case NVM_GET_DEVICES:
1276 return nvm_ioctl_get_devices(file, argp);
1277 case NVM_DEV_CREATE:
1278 return nvm_ioctl_dev_create(file, argp);
1279 case NVM_DEV_REMOVE:
1280 return nvm_ioctl_dev_remove(file, argp);
Matias Bjørling55696152016-01-12 07:49:37 +01001281 case NVM_DEV_INIT:
1282 return nvm_ioctl_dev_init(file, argp);
Matias Bjørling8b4970c2016-01-12 07:49:39 +01001283 case NVM_DEV_FACTORY:
1284 return nvm_ioctl_dev_factory(file, argp);
Matias Bjørlingcd9e9802015-10-28 19:54:55 +01001285 }
1286 return 0;
1287}
1288
1289static const struct file_operations _ctl_fops = {
1290 .open = nonseekable_open,
1291 .unlocked_ioctl = nvm_ctl_ioctl,
1292 .owner = THIS_MODULE,
1293 .llseek = noop_llseek,
1294};
1295
1296static struct miscdevice _nvm_misc = {
1297 .minor = MISC_DYNAMIC_MINOR,
1298 .name = "lightnvm",
1299 .nodename = "lightnvm/control",
1300 .fops = &_ctl_fops,
1301};
1302
1303MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
1304
1305static int __init nvm_mod_init(void)
1306{
1307 int ret;
1308
1309 ret = misc_register(&_nvm_misc);
1310 if (ret)
1311 pr_err("nvm: misc_register failed for control device");
1312
1313 return ret;
1314}
1315
1316static void __exit nvm_mod_exit(void)
1317{
1318 misc_deregister(&_nvm_misc);
1319}
1320
1321MODULE_AUTHOR("Matias Bjorling <m@bjorling.me>");
1322MODULE_LICENSE("GPL v2");
1323MODULE_VERSION("0.1");
1324module_init(nvm_mod_init);
1325module_exit(nvm_mod_exit);