blob: 52b311cf694c9d988ba4167d9507400237b62e02 [file] [log] [blame]
Matias Bjørlingca064082015-10-29 17:57:29 +09001/*
2 * nvme-lightnvm.c - LightNVM NVMe device
3 *
4 * Copyright (C) 2014-2015 IT University of Copenhagen
5 * Initial release: Matias Bjorling <mb@lightnvm.io>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
19 * USA.
20 *
21 */
22
23#include "nvme.h"
24
25#ifdef CONFIG_NVM
26
27#include <linux/nvme.h>
28#include <linux/bitops.h>
29#include <linux/lightnvm.h>
30#include <linux/vmalloc.h>
31
32enum nvme_nvm_admin_opcode {
33 nvme_nvm_admin_identity = 0xe2,
34 nvme_nvm_admin_get_l2p_tbl = 0xea,
35 nvme_nvm_admin_get_bb_tbl = 0xf2,
36 nvme_nvm_admin_set_bb_tbl = 0xf1,
37};
38
39struct nvme_nvm_hb_rw {
40 __u8 opcode;
41 __u8 flags;
42 __u16 command_id;
43 __le32 nsid;
44 __u64 rsvd2;
45 __le64 metadata;
46 __le64 prp1;
47 __le64 prp2;
48 __le64 spba;
49 __le16 length;
50 __le16 control;
51 __le32 dsmgmt;
52 __le64 slba;
53};
54
55struct nvme_nvm_ph_rw {
56 __u8 opcode;
57 __u8 flags;
58 __u16 command_id;
59 __le32 nsid;
60 __u64 rsvd2;
61 __le64 metadata;
62 __le64 prp1;
63 __le64 prp2;
64 __le64 spba;
65 __le16 length;
66 __le16 control;
67 __le32 dsmgmt;
68 __le64 resv;
69};
70
71struct nvme_nvm_identity {
72 __u8 opcode;
73 __u8 flags;
74 __u16 command_id;
75 __le32 nsid;
76 __u64 rsvd[2];
77 __le64 prp1;
78 __le64 prp2;
79 __le32 chnl_off;
80 __u32 rsvd11[5];
81};
82
83struct nvme_nvm_l2ptbl {
84 __u8 opcode;
85 __u8 flags;
86 __u16 command_id;
87 __le32 nsid;
88 __le32 cdw2[4];
89 __le64 prp1;
90 __le64 prp2;
91 __le64 slba;
92 __le32 nlb;
93 __le16 cdw14[6];
94};
95
Matias Bjørling11450462015-11-16 15:34:37 +010096struct nvme_nvm_getbbtbl {
Matias Bjørlingca064082015-10-29 17:57:29 +090097 __u8 opcode;
98 __u8 flags;
99 __u16 command_id;
100 __le32 nsid;
101 __u64 rsvd[2];
102 __le64 prp1;
103 __le64 prp2;
Matias Bjørling11450462015-11-16 15:34:37 +0100104 __le64 spba;
105 __u32 rsvd4[4];
106};
107
108struct nvme_nvm_setbbtbl {
109 __u8 opcode;
110 __u8 flags;
111 __u16 command_id;
112 __le32 nsid;
113 __le64 rsvd[2];
114 __le64 prp1;
115 __le64 prp2;
116 __le64 spba;
117 __le16 nlb;
118 __u8 value;
119 __u8 rsvd3;
120 __u32 rsvd4[3];
Matias Bjørlingca064082015-10-29 17:57:29 +0900121};
122
123struct nvme_nvm_erase_blk {
124 __u8 opcode;
125 __u8 flags;
126 __u16 command_id;
127 __le32 nsid;
128 __u64 rsvd[2];
129 __le64 prp1;
130 __le64 prp2;
131 __le64 spba;
132 __le16 length;
133 __le16 control;
134 __le32 dsmgmt;
135 __le64 resv;
136};
137
138struct nvme_nvm_command {
139 union {
140 struct nvme_common_command common;
141 struct nvme_nvm_identity identity;
142 struct nvme_nvm_hb_rw hb_rw;
143 struct nvme_nvm_ph_rw ph_rw;
144 struct nvme_nvm_l2ptbl l2p;
Matias Bjørling11450462015-11-16 15:34:37 +0100145 struct nvme_nvm_getbbtbl get_bb;
146 struct nvme_nvm_setbbtbl set_bb;
Matias Bjørlingca064082015-10-29 17:57:29 +0900147 struct nvme_nvm_erase_blk erase;
148 };
149};
150
151struct nvme_nvm_id_group {
152 __u8 mtype;
153 __u8 fmtype;
154 __le16 res16;
155 __u8 num_ch;
156 __u8 num_lun;
157 __u8 num_pln;
Matias Bjørling36d5dbc2015-11-16 15:34:38 +0100158 __u8 rsvd1;
Matias Bjørlingca064082015-10-29 17:57:29 +0900159 __le16 num_blk;
160 __le16 num_pg;
161 __le16 fpg_sz;
162 __le16 csecs;
163 __le16 sos;
Matias Bjørling36d5dbc2015-11-16 15:34:38 +0100164 __le16 rsvd2;
Matias Bjørlingca064082015-10-29 17:57:29 +0900165 __le32 trdt;
166 __le32 trdm;
167 __le32 tprt;
168 __le32 tprm;
169 __le32 tbet;
170 __le32 tbem;
171 __le32 mpos;
Matias Bjørling12be5ed2015-11-16 15:34:39 +0100172 __le32 mccap;
Matias Bjørlingca064082015-10-29 17:57:29 +0900173 __le16 cpar;
Matias Bjørling12be5ed2015-11-16 15:34:39 +0100174 __u8 reserved[906];
Matias Bjørlingca064082015-10-29 17:57:29 +0900175} __packed;
176
177struct nvme_nvm_addr_format {
178 __u8 ch_offset;
179 __u8 ch_len;
180 __u8 lun_offset;
181 __u8 lun_len;
182 __u8 pln_offset;
183 __u8 pln_len;
184 __u8 blk_offset;
185 __u8 blk_len;
186 __u8 pg_offset;
187 __u8 pg_len;
188 __u8 sect_offset;
189 __u8 sect_len;
190 __u8 res[4];
191} __packed;
192
193struct nvme_nvm_id {
194 __u8 ver_id;
195 __u8 vmnt;
196 __u8 cgrps;
197 __u8 res[5];
198 __le32 cap;
199 __le32 dom;
200 struct nvme_nvm_addr_format ppaf;
201 __u8 ppat;
202 __u8 resv[223];
203 struct nvme_nvm_id_group groups[4];
204} __packed;
205
Matias Bjørling11450462015-11-16 15:34:37 +0100206struct nvme_nvm_bb_tbl {
207 __u8 tblid[4];
208 __le16 verid;
209 __le16 revid;
210 __le32 rvsd1;
211 __le32 tblks;
212 __le32 tfact;
213 __le32 tgrown;
214 __le32 tdresv;
215 __le32 thresv;
216 __le32 rsvd2[8];
217 __u8 blk[0];
218};
219
Matias Bjørlingca064082015-10-29 17:57:29 +0900220/*
221 * Check we didn't inadvertently grow the command struct
222 */
223static inline void _nvme_nvm_check_size(void)
224{
225 BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
226 BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64);
227 BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
Matias Bjørling11450462015-11-16 15:34:37 +0100228 BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
229 BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
Matias Bjørlingca064082015-10-29 17:57:29 +0900230 BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64);
231 BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
232 BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
233 BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128);
234 BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096);
Matias Bjørling11450462015-11-16 15:34:37 +0100235 BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 512);
Matias Bjørlingca064082015-10-29 17:57:29 +0900236}
237
238static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
239{
240 struct nvme_nvm_id_group *src;
241 struct nvm_id_group *dst;
242 int i, end;
243
244 end = min_t(u32, 4, nvm_id->cgrps);
245
246 for (i = 0; i < end; i++) {
247 src = &nvme_nvm_id->groups[i];
248 dst = &nvm_id->groups[i];
249
250 dst->mtype = src->mtype;
251 dst->fmtype = src->fmtype;
252 dst->num_ch = src->num_ch;
253 dst->num_lun = src->num_lun;
254 dst->num_pln = src->num_pln;
255
256 dst->num_pg = le16_to_cpu(src->num_pg);
257 dst->num_blk = le16_to_cpu(src->num_blk);
258 dst->fpg_sz = le16_to_cpu(src->fpg_sz);
259 dst->csecs = le16_to_cpu(src->csecs);
260 dst->sos = le16_to_cpu(src->sos);
261
262 dst->trdt = le32_to_cpu(src->trdt);
263 dst->trdm = le32_to_cpu(src->trdm);
264 dst->tprt = le32_to_cpu(src->tprt);
265 dst->tprm = le32_to_cpu(src->tprm);
266 dst->tbet = le32_to_cpu(src->tbet);
267 dst->tbem = le32_to_cpu(src->tbem);
268 dst->mpos = le32_to_cpu(src->mpos);
Matias Bjørling12be5ed2015-11-16 15:34:39 +0100269 dst->mccap = le32_to_cpu(src->mccap);
Matias Bjørlingca064082015-10-29 17:57:29 +0900270
271 dst->cpar = le16_to_cpu(src->cpar);
272 }
273
274 return 0;
275}
276
277static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id)
278{
279 struct nvme_ns *ns = q->queuedata;
280 struct nvme_nvm_id *nvme_nvm_id;
281 struct nvme_nvm_command c = {};
282 int ret;
283
284 c.identity.opcode = nvme_nvm_admin_identity;
285 c.identity.nsid = cpu_to_le32(ns->ns_id);
286 c.identity.chnl_off = 0;
287
288 nvme_nvm_id = kmalloc(sizeof(struct nvme_nvm_id), GFP_KERNEL);
289 if (!nvme_nvm_id)
290 return -ENOMEM;
291
292 ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, nvme_nvm_id,
293 sizeof(struct nvme_nvm_id));
294 if (ret) {
295 ret = -EIO;
296 goto out;
297 }
298
299 nvm_id->ver_id = nvme_nvm_id->ver_id;
300 nvm_id->vmnt = nvme_nvm_id->vmnt;
301 nvm_id->cgrps = nvme_nvm_id->cgrps;
302 nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap);
303 nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom);
304
305 ret = init_grps(nvm_id, nvme_nvm_id);
306out:
307 kfree(nvme_nvm_id);
308 return ret;
309}
310
311static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb,
312 nvm_l2p_update_fn *update_l2p, void *priv)
313{
314 struct nvme_ns *ns = q->queuedata;
315 struct nvme_dev *dev = ns->dev;
316 struct nvme_nvm_command c = {};
317 u32 len = queue_max_hw_sectors(q) << 9;
Dan Carpenter5f436e52015-11-04 01:37:31 +0300318 u32 nlb_pr_rq = len / sizeof(u64);
Matias Bjørlingca064082015-10-29 17:57:29 +0900319 u64 cmd_slba = slba;
320 void *entries;
321 int ret = 0;
322
323 c.l2p.opcode = nvme_nvm_admin_get_l2p_tbl;
324 c.l2p.nsid = cpu_to_le32(ns->ns_id);
325 entries = kmalloc(len, GFP_KERNEL);
326 if (!entries)
327 return -ENOMEM;
328
329 while (nlb) {
Dan Carpenter5f436e52015-11-04 01:37:31 +0300330 u32 cmd_nlb = min(nlb_pr_rq, nlb);
Matias Bjørlingca064082015-10-29 17:57:29 +0900331
332 c.l2p.slba = cpu_to_le64(cmd_slba);
333 c.l2p.nlb = cpu_to_le32(cmd_nlb);
334
335 ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c,
336 entries, len);
337 if (ret) {
338 dev_err(dev->dev, "L2P table transfer failed (%d)\n",
339 ret);
340 ret = -EIO;
341 goto out;
342 }
343
344 if (update_l2p(cmd_slba, cmd_nlb, entries, priv)) {
345 ret = -EINTR;
346 goto out;
347 }
348
349 cmd_slba += cmd_nlb;
350 nlb -= cmd_nlb;
351 }
352
353out:
354 kfree(entries);
355 return ret;
356}
357
Matias Bjørling11450462015-11-16 15:34:37 +0100358static int nvme_nvm_get_bb_tbl(struct request_queue *q, struct ppa_addr ppa,
359 int nr_blocks, nvm_bb_update_fn *update_bbtbl,
360 void *priv)
Matias Bjørlingca064082015-10-29 17:57:29 +0900361{
362 struct nvme_ns *ns = q->queuedata;
363 struct nvme_dev *dev = ns->dev;
364 struct nvme_nvm_command c = {};
Matias Bjørling11450462015-11-16 15:34:37 +0100365 struct nvme_nvm_bb_tbl *bb_tbl;
366 int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks;
Matias Bjørlingca064082015-10-29 17:57:29 +0900367 int ret = 0;
368
369 c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
370 c.get_bb.nsid = cpu_to_le32(ns->ns_id);
Matias Bjørling11450462015-11-16 15:34:37 +0100371 c.get_bb.spba = cpu_to_le64(ppa.ppa);
372
373 bb_tbl = kzalloc(tblsz, GFP_KERNEL);
374 if (!bb_tbl)
Matias Bjørlingca064082015-10-29 17:57:29 +0900375 return -ENOMEM;
376
Matias Bjørling11450462015-11-16 15:34:37 +0100377 ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, bb_tbl, tblsz);
Matias Bjørlingca064082015-10-29 17:57:29 +0900378 if (ret) {
379 dev_err(dev->dev, "get bad block table failed (%d)\n", ret);
380 ret = -EIO;
381 goto out;
382 }
383
Matias Bjørling11450462015-11-16 15:34:37 +0100384 if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
385 bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
386 dev_err(dev->dev, "bbt format mismatch\n");
387 ret = -EINVAL;
388 goto out;
389 }
390
391 if (le16_to_cpu(bb_tbl->verid) != 1) {
392 ret = -EINVAL;
393 dev_err(dev->dev, "bbt version not supported\n");
394 goto out;
395 }
396
397 if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) {
398 ret = -EINVAL;
399 dev_err(dev->dev, "bbt unsuspected blocks returned (%u!=%u)",
400 le32_to_cpu(bb_tbl->tblks), nr_blocks);
401 goto out;
402 }
403
404 ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv);
Matias Bjørlingca064082015-10-29 17:57:29 +0900405 if (ret) {
406 ret = -EINTR;
407 goto out;
408 }
409
410out:
Matias Bjørling11450462015-11-16 15:34:37 +0100411 kfree(bb_tbl);
412 return ret;
413}
414
415static int nvme_nvm_set_bb_tbl(struct request_queue *q, struct nvm_rq *rqd,
416 int type)
417{
418 struct nvme_ns *ns = q->queuedata;
419 struct nvme_dev *dev = ns->dev;
420 struct nvme_nvm_command c = {};
421 int ret = 0;
422
423 c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
424 c.set_bb.nsid = cpu_to_le32(ns->ns_id);
425 c.set_bb.spba = cpu_to_le64(rqd->ppa_addr.ppa);
426 c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1);
427 c.set_bb.value = type;
428
429 ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
430 if (ret)
431 dev_err(dev->dev, "set bad block table failed (%d)\n", ret);
Matias Bjørlingca064082015-10-29 17:57:29 +0900432 return ret;
433}
434
435static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
436 struct nvme_ns *ns, struct nvme_nvm_command *c)
437{
438 c->ph_rw.opcode = rqd->opcode;
439 c->ph_rw.nsid = cpu_to_le32(ns->ns_id);
440 c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
441 c->ph_rw.control = cpu_to_le16(rqd->flags);
442 c->ph_rw.length = cpu_to_le16(rqd->nr_pages - 1);
443
444 if (rqd->opcode == NVM_OP_HBWRITE || rqd->opcode == NVM_OP_HBREAD)
445 c->hb_rw.slba = cpu_to_le64(nvme_block_nr(ns,
446 rqd->bio->bi_iter.bi_sector));
447}
448
449static void nvme_nvm_end_io(struct request *rq, int error)
450{
451 struct nvm_rq *rqd = rq->end_io_data;
452 struct nvm_dev *dev = rqd->dev;
453
454 if (dev->mt->end_io(rqd, error))
455 pr_err("nvme: err status: %x result: %lx\n",
456 rq->errors, (unsigned long)rq->special);
457
458 kfree(rq->cmd);
459 blk_mq_free_request(rq);
460}
461
462static int nvme_nvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
463{
464 struct nvme_ns *ns = q->queuedata;
465 struct request *rq;
466 struct bio *bio = rqd->bio;
467 struct nvme_nvm_command *cmd;
468
469 rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0);
470 if (IS_ERR(rq))
471 return -ENOMEM;
472
473 cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
474 if (!cmd) {
475 blk_mq_free_request(rq);
476 return -ENOMEM;
477 }
478
479 rq->cmd_type = REQ_TYPE_DRV_PRIV;
480 rq->ioprio = bio_prio(bio);
481
482 if (bio_has_data(bio))
483 rq->nr_phys_segments = bio_phys_segments(q, bio);
484
485 rq->__data_len = bio->bi_iter.bi_size;
486 rq->bio = rq->biotail = bio;
487
488 nvme_nvm_rqtocmd(rq, rqd, ns, cmd);
489
490 rq->cmd = (unsigned char *)cmd;
491 rq->cmd_len = sizeof(struct nvme_nvm_command);
492 rq->special = (void *)0;
493
494 rq->end_io_data = rqd;
495
496 blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
497
498 return 0;
499}
500
501static int nvme_nvm_erase_block(struct request_queue *q, struct nvm_rq *rqd)
502{
503 struct nvme_ns *ns = q->queuedata;
504 struct nvme_nvm_command c = {};
505
506 c.erase.opcode = NVM_OP_ERASE;
507 c.erase.nsid = cpu_to_le32(ns->ns_id);
508 c.erase.spba = cpu_to_le64(rqd->ppa_addr.ppa);
509 c.erase.length = cpu_to_le16(rqd->nr_pages - 1);
510
511 return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
512}
513
514static void *nvme_nvm_create_dma_pool(struct request_queue *q, char *name)
515{
516 struct nvme_ns *ns = q->queuedata;
517 struct nvme_dev *dev = ns->dev;
518
519 return dma_pool_create(name, dev->dev, PAGE_SIZE, PAGE_SIZE, 0);
520}
521
522static void nvme_nvm_destroy_dma_pool(void *pool)
523{
524 struct dma_pool *dma_pool = pool;
525
526 dma_pool_destroy(dma_pool);
527}
528
529static void *nvme_nvm_dev_dma_alloc(struct request_queue *q, void *pool,
530 gfp_t mem_flags, dma_addr_t *dma_handler)
531{
532 return dma_pool_alloc(pool, mem_flags, dma_handler);
533}
534
535static void nvme_nvm_dev_dma_free(void *pool, void *ppa_list,
536 dma_addr_t dma_handler)
537{
538 dma_pool_free(pool, ppa_list, dma_handler);
539}
540
541static struct nvm_dev_ops nvme_nvm_dev_ops = {
542 .identity = nvme_nvm_identity,
543
544 .get_l2p_tbl = nvme_nvm_get_l2p_tbl,
545
546 .get_bb_tbl = nvme_nvm_get_bb_tbl,
Matias Bjørling11450462015-11-16 15:34:37 +0100547 .set_bb_tbl = nvme_nvm_set_bb_tbl,
Matias Bjørlingca064082015-10-29 17:57:29 +0900548
549 .submit_io = nvme_nvm_submit_io,
550 .erase_block = nvme_nvm_erase_block,
551
552 .create_dma_pool = nvme_nvm_create_dma_pool,
553 .destroy_dma_pool = nvme_nvm_destroy_dma_pool,
554 .dev_dma_alloc = nvme_nvm_dev_dma_alloc,
555 .dev_dma_free = nvme_nvm_dev_dma_free,
556
557 .max_phys_sect = 64,
558};
559
560int nvme_nvm_register(struct request_queue *q, char *disk_name)
561{
562 return nvm_register(q, disk_name, &nvme_nvm_dev_ops);
563}
564
565void nvme_nvm_unregister(struct request_queue *q, char *disk_name)
566{
567 nvm_unregister(disk_name);
568}
569
570int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
571{
572 struct nvme_dev *dev = ns->dev;
573 struct pci_dev *pdev = to_pci_dev(dev->dev);
574
575 /* QEMU NVMe simulator - PCI ID + Vendor specific bit */
576 if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x5845 &&
577 id->vs[0] == 0x1)
578 return 1;
579
580 /* CNEX Labs - PCI ID + Vendor specific bit */
581 if (pdev->vendor == 0x1d1d && pdev->device == 0x2807 &&
582 id->vs[0] == 0x1)
583 return 1;
584
585 return 0;
586}
587#else
588int nvme_nvm_register(struct request_queue *q, char *disk_name)
589{
590 return 0;
591}
592void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {};
593int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
594{
595 return 0;
596}
597#endif /* CONFIG_NVM */