blob: 9461dd639acd02007b704f7ec25ace2aa4cfb210 [file] [log] [blame]
Matias Bjørlingca064082015-10-29 17:57:29 +09001/*
2 * nvme-lightnvm.c - LightNVM NVMe device
3 *
4 * Copyright (C) 2014-2015 IT University of Copenhagen
5 * Initial release: Matias Bjorling <mb@lightnvm.io>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
19 * USA.
20 *
21 */
22
23#include "nvme.h"
24
Matias Bjørlingca064082015-10-29 17:57:29 +090025#include <linux/nvme.h>
26#include <linux/bitops.h>
27#include <linux/lightnvm.h>
28#include <linux/vmalloc.h>
29
30enum nvme_nvm_admin_opcode {
31 nvme_nvm_admin_identity = 0xe2,
32 nvme_nvm_admin_get_l2p_tbl = 0xea,
33 nvme_nvm_admin_get_bb_tbl = 0xf2,
34 nvme_nvm_admin_set_bb_tbl = 0xf1,
35};
36
37struct nvme_nvm_hb_rw {
38 __u8 opcode;
39 __u8 flags;
40 __u16 command_id;
41 __le32 nsid;
42 __u64 rsvd2;
43 __le64 metadata;
44 __le64 prp1;
45 __le64 prp2;
46 __le64 spba;
47 __le16 length;
48 __le16 control;
49 __le32 dsmgmt;
50 __le64 slba;
51};
52
53struct nvme_nvm_ph_rw {
54 __u8 opcode;
55 __u8 flags;
56 __u16 command_id;
57 __le32 nsid;
58 __u64 rsvd2;
59 __le64 metadata;
60 __le64 prp1;
61 __le64 prp2;
62 __le64 spba;
63 __le16 length;
64 __le16 control;
65 __le32 dsmgmt;
66 __le64 resv;
67};
68
69struct nvme_nvm_identity {
70 __u8 opcode;
71 __u8 flags;
72 __u16 command_id;
73 __le32 nsid;
74 __u64 rsvd[2];
75 __le64 prp1;
76 __le64 prp2;
77 __le32 chnl_off;
78 __u32 rsvd11[5];
79};
80
81struct nvme_nvm_l2ptbl {
82 __u8 opcode;
83 __u8 flags;
84 __u16 command_id;
85 __le32 nsid;
86 __le32 cdw2[4];
87 __le64 prp1;
88 __le64 prp2;
89 __le64 slba;
90 __le32 nlb;
91 __le16 cdw14[6];
92};
93
Matias Bjørling11450462015-11-16 15:34:37 +010094struct nvme_nvm_getbbtbl {
Matias Bjørlingca064082015-10-29 17:57:29 +090095 __u8 opcode;
96 __u8 flags;
97 __u16 command_id;
98 __le32 nsid;
99 __u64 rsvd[2];
100 __le64 prp1;
101 __le64 prp2;
Matias Bjørling11450462015-11-16 15:34:37 +0100102 __le64 spba;
103 __u32 rsvd4[4];
104};
105
106struct nvme_nvm_setbbtbl {
107 __u8 opcode;
108 __u8 flags;
109 __u16 command_id;
110 __le32 nsid;
111 __le64 rsvd[2];
112 __le64 prp1;
113 __le64 prp2;
114 __le64 spba;
115 __le16 nlb;
116 __u8 value;
117 __u8 rsvd3;
118 __u32 rsvd4[3];
Matias Bjørlingca064082015-10-29 17:57:29 +0900119};
120
121struct nvme_nvm_erase_blk {
122 __u8 opcode;
123 __u8 flags;
124 __u16 command_id;
125 __le32 nsid;
126 __u64 rsvd[2];
127 __le64 prp1;
128 __le64 prp2;
129 __le64 spba;
130 __le16 length;
131 __le16 control;
132 __le32 dsmgmt;
133 __le64 resv;
134};
135
136struct nvme_nvm_command {
137 union {
138 struct nvme_common_command common;
139 struct nvme_nvm_identity identity;
140 struct nvme_nvm_hb_rw hb_rw;
141 struct nvme_nvm_ph_rw ph_rw;
142 struct nvme_nvm_l2ptbl l2p;
Matias Bjørling11450462015-11-16 15:34:37 +0100143 struct nvme_nvm_getbbtbl get_bb;
144 struct nvme_nvm_setbbtbl set_bb;
Matias Bjørlingca064082015-10-29 17:57:29 +0900145 struct nvme_nvm_erase_blk erase;
146 };
147};
148
Matias Bjorling9f867262016-03-03 15:06:39 +0100149struct nvme_nvm_completion {
150 __le64 result; /* Used by LightNVM to return ppa completions */
151 __le16 sq_head; /* how much of this queue may be reclaimed */
152 __le16 sq_id; /* submission queue that generated this entry */
153 __u16 command_id; /* of the command which completed */
154 __le16 status; /* did the command fail, and if so, why? */
155};
156
Matias Bjørling6dde1d62016-02-04 15:13:26 +0100157#define NVME_NVM_LP_MLC_PAIRS 886
Matias Bjørlingca5927e2016-01-12 07:49:35 +0100158struct nvme_nvm_lp_mlc {
159 __u16 num_pairs;
Matias Bjørling6dde1d62016-02-04 15:13:26 +0100160 __u8 pairs[NVME_NVM_LP_MLC_PAIRS];
Matias Bjørlingca5927e2016-01-12 07:49:35 +0100161};
162
163struct nvme_nvm_lp_tbl {
164 __u8 id[8];
165 struct nvme_nvm_lp_mlc mlc;
166};
167
Matias Bjørlingca064082015-10-29 17:57:29 +0900168struct nvme_nvm_id_group {
169 __u8 mtype;
170 __u8 fmtype;
171 __le16 res16;
172 __u8 num_ch;
173 __u8 num_lun;
174 __u8 num_pln;
Matias Bjørling36d5dbc2015-11-16 15:34:38 +0100175 __u8 rsvd1;
Matias Bjørlingca064082015-10-29 17:57:29 +0900176 __le16 num_blk;
177 __le16 num_pg;
178 __le16 fpg_sz;
179 __le16 csecs;
180 __le16 sos;
Matias Bjørling36d5dbc2015-11-16 15:34:38 +0100181 __le16 rsvd2;
Matias Bjørlingca064082015-10-29 17:57:29 +0900182 __le32 trdt;
183 __le32 trdm;
184 __le32 tprt;
185 __le32 tprm;
186 __le32 tbet;
187 __le32 tbem;
188 __le32 mpos;
Matias Bjørling12be5ed2015-11-16 15:34:39 +0100189 __le32 mccap;
Matias Bjørlingca064082015-10-29 17:57:29 +0900190 __le16 cpar;
Matias Bjørlingca5927e2016-01-12 07:49:35 +0100191 __u8 reserved[10];
192 struct nvme_nvm_lp_tbl lptbl;
Matias Bjørlingca064082015-10-29 17:57:29 +0900193} __packed;
194
195struct nvme_nvm_addr_format {
196 __u8 ch_offset;
197 __u8 ch_len;
198 __u8 lun_offset;
199 __u8 lun_len;
200 __u8 pln_offset;
201 __u8 pln_len;
202 __u8 blk_offset;
203 __u8 blk_len;
204 __u8 pg_offset;
205 __u8 pg_len;
206 __u8 sect_offset;
207 __u8 sect_len;
208 __u8 res[4];
209} __packed;
210
211struct nvme_nvm_id {
212 __u8 ver_id;
213 __u8 vmnt;
214 __u8 cgrps;
Matias Bjørlingdad1b002015-11-16 15:34:46 +0100215 __u8 res;
Matias Bjørlingca064082015-10-29 17:57:29 +0900216 __le32 cap;
217 __le32 dom;
218 struct nvme_nvm_addr_format ppaf;
Matias Bjørlingdad1b002015-11-16 15:34:46 +0100219 __u8 resv[228];
Matias Bjørlingca064082015-10-29 17:57:29 +0900220 struct nvme_nvm_id_group groups[4];
221} __packed;
222
Matias Bjørling11450462015-11-16 15:34:37 +0100223struct nvme_nvm_bb_tbl {
224 __u8 tblid[4];
225 __le16 verid;
226 __le16 revid;
227 __le32 rvsd1;
228 __le32 tblks;
229 __le32 tfact;
230 __le32 tgrown;
231 __le32 tdresv;
232 __le32 thresv;
233 __le32 rsvd2[8];
234 __u8 blk[0];
235};
236
Matias Bjørlingca064082015-10-29 17:57:29 +0900237/*
238 * Check we didn't inadvertently grow the command struct
239 */
240static inline void _nvme_nvm_check_size(void)
241{
242 BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
243 BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64);
244 BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
Matias Bjørling11450462015-11-16 15:34:37 +0100245 BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
246 BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
Matias Bjørlingca064082015-10-29 17:57:29 +0900247 BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64);
248 BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
249 BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
250 BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128);
251 BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096);
Matias Bjørling11450462015-11-16 15:34:37 +0100252 BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 512);
Matias Bjørlingca064082015-10-29 17:57:29 +0900253}
254
255static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
256{
257 struct nvme_nvm_id_group *src;
258 struct nvm_id_group *dst;
259 int i, end;
260
261 end = min_t(u32, 4, nvm_id->cgrps);
262
263 for (i = 0; i < end; i++) {
264 src = &nvme_nvm_id->groups[i];
265 dst = &nvm_id->groups[i];
266
267 dst->mtype = src->mtype;
268 dst->fmtype = src->fmtype;
269 dst->num_ch = src->num_ch;
270 dst->num_lun = src->num_lun;
271 dst->num_pln = src->num_pln;
272
273 dst->num_pg = le16_to_cpu(src->num_pg);
274 dst->num_blk = le16_to_cpu(src->num_blk);
275 dst->fpg_sz = le16_to_cpu(src->fpg_sz);
276 dst->csecs = le16_to_cpu(src->csecs);
277 dst->sos = le16_to_cpu(src->sos);
278
279 dst->trdt = le32_to_cpu(src->trdt);
280 dst->trdm = le32_to_cpu(src->trdm);
281 dst->tprt = le32_to_cpu(src->tprt);
282 dst->tprm = le32_to_cpu(src->tprm);
283 dst->tbet = le32_to_cpu(src->tbet);
284 dst->tbem = le32_to_cpu(src->tbem);
285 dst->mpos = le32_to_cpu(src->mpos);
Matias Bjørling12be5ed2015-11-16 15:34:39 +0100286 dst->mccap = le32_to_cpu(src->mccap);
Matias Bjørlingca064082015-10-29 17:57:29 +0900287
288 dst->cpar = le16_to_cpu(src->cpar);
Matias Bjørlingca5927e2016-01-12 07:49:35 +0100289
290 if (dst->fmtype == NVM_ID_FMTYPE_MLC) {
291 memcpy(dst->lptbl.id, src->lptbl.id, 8);
292 dst->lptbl.mlc.num_pairs =
293 le16_to_cpu(src->lptbl.mlc.num_pairs);
Matias Bjørling6dde1d62016-02-04 15:13:26 +0100294
295 if (dst->lptbl.mlc.num_pairs > NVME_NVM_LP_MLC_PAIRS) {
296 pr_err("nvm: number of MLC pairs not supported\n");
297 return -EINVAL;
298 }
299
Matias Bjørlingca5927e2016-01-12 07:49:35 +0100300 memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs,
Matias Bjørling6dde1d62016-02-04 15:13:26 +0100301 dst->lptbl.mlc.num_pairs);
Matias Bjørlingca5927e2016-01-12 07:49:35 +0100302 }
Matias Bjørlingca064082015-10-29 17:57:29 +0900303 }
304
305 return 0;
306}
307
Matias Bjørling16f26c32015-12-06 11:25:48 +0100308static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
Matias Bjørlingca064082015-10-29 17:57:29 +0900309{
Matias Bjørling16f26c32015-12-06 11:25:48 +0100310 struct nvme_ns *ns = nvmdev->q->queuedata;
Matias Bjørlingca064082015-10-29 17:57:29 +0900311 struct nvme_nvm_id *nvme_nvm_id;
312 struct nvme_nvm_command c = {};
313 int ret;
314
315 c.identity.opcode = nvme_nvm_admin_identity;
316 c.identity.nsid = cpu_to_le32(ns->ns_id);
317 c.identity.chnl_off = 0;
318
319 nvme_nvm_id = kmalloc(sizeof(struct nvme_nvm_id), GFP_KERNEL);
320 if (!nvme_nvm_id)
321 return -ENOMEM;
322
Christoph Hellwigac02ddd2015-12-03 09:52:05 -0700323 ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
Wenwei Tao47b31152015-11-20 13:47:55 +0100324 nvme_nvm_id, sizeof(struct nvme_nvm_id));
Matias Bjørlingca064082015-10-29 17:57:29 +0900325 if (ret) {
326 ret = -EIO;
327 goto out;
328 }
329
330 nvm_id->ver_id = nvme_nvm_id->ver_id;
331 nvm_id->vmnt = nvme_nvm_id->vmnt;
332 nvm_id->cgrps = nvme_nvm_id->cgrps;
333 nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap);
334 nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom);
Matias Bjørling2393bd32015-11-16 15:34:45 +0100335 memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf,
336 sizeof(struct nvme_nvm_addr_format));
Matias Bjørlingca064082015-10-29 17:57:29 +0900337
338 ret = init_grps(nvm_id, nvme_nvm_id);
339out:
340 kfree(nvme_nvm_id);
341 return ret;
342}
343
Matias Bjørling16f26c32015-12-06 11:25:48 +0100344static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
Matias Bjørlingca064082015-10-29 17:57:29 +0900345 nvm_l2p_update_fn *update_l2p, void *priv)
346{
Matias Bjørling16f26c32015-12-06 11:25:48 +0100347 struct nvme_ns *ns = nvmdev->q->queuedata;
Matias Bjørlingca064082015-10-29 17:57:29 +0900348 struct nvme_nvm_command c = {};
Christoph Hellwigac02ddd2015-12-03 09:52:05 -0700349 u32 len = queue_max_hw_sectors(ns->ctrl->admin_q) << 9;
Dan Carpenter5f436e52015-11-04 01:37:31 +0300350 u32 nlb_pr_rq = len / sizeof(u64);
Matias Bjørlingca064082015-10-29 17:57:29 +0900351 u64 cmd_slba = slba;
352 void *entries;
353 int ret = 0;
354
355 c.l2p.opcode = nvme_nvm_admin_get_l2p_tbl;
356 c.l2p.nsid = cpu_to_le32(ns->ns_id);
357 entries = kmalloc(len, GFP_KERNEL);
358 if (!entries)
359 return -ENOMEM;
360
361 while (nlb) {
Dan Carpenter5f436e52015-11-04 01:37:31 +0300362 u32 cmd_nlb = min(nlb_pr_rq, nlb);
Matias Bjørlingca064082015-10-29 17:57:29 +0900363
364 c.l2p.slba = cpu_to_le64(cmd_slba);
365 c.l2p.nlb = cpu_to_le32(cmd_nlb);
366
Christoph Hellwigac02ddd2015-12-03 09:52:05 -0700367 ret = nvme_submit_sync_cmd(ns->ctrl->admin_q,
Wenwei Tao47b31152015-11-20 13:47:55 +0100368 (struct nvme_command *)&c, entries, len);
Matias Bjørlingca064082015-10-29 17:57:29 +0900369 if (ret) {
Christoph Hellwigac02ddd2015-12-03 09:52:05 -0700370 dev_err(ns->ctrl->dev, "L2P table transfer failed (%d)\n",
Matias Bjørlingca064082015-10-29 17:57:29 +0900371 ret);
372 ret = -EIO;
373 goto out;
374 }
375
376 if (update_l2p(cmd_slba, cmd_nlb, entries, priv)) {
377 ret = -EINTR;
378 goto out;
379 }
380
381 cmd_slba += cmd_nlb;
382 nlb -= cmd_nlb;
383 }
384
385out:
386 kfree(entries);
387 return ret;
388}
389
Matias Bjørlingd5bdec82016-02-19 13:56:58 +0100390static void nvme_nvm_bb_tbl_fold(struct nvm_dev *nvmdev,
391 int nr_dst_blks, u8 *dst_blks,
392 int nr_src_blks, u8 *src_blks)
393{
394 int blk, offset, pl, blktype;
395
396 for (blk = 0; blk < nr_dst_blks; blk++) {
397 offset = blk * nvmdev->plane_mode;
398 blktype = src_blks[offset];
399
400 /* Bad blocks on any planes take precedence over other types */
401 for (pl = 0; pl < nvmdev->plane_mode; pl++) {
402 if (src_blks[offset + pl] &
403 (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
404 blktype = src_blks[offset + pl];
405 break;
406 }
407 }
408
409 dst_blks[blk] = blktype;
410 }
411}
412
Matias Bjørling08236c62015-11-28 16:49:27 +0100413static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
Matias Bjørlingd5bdec82016-02-19 13:56:58 +0100414 int nr_dst_blks, nvm_bb_update_fn *update_bbtbl,
Matias Bjørling11450462015-11-16 15:34:37 +0100415 void *priv)
Matias Bjørlingca064082015-10-29 17:57:29 +0900416{
Matias Bjørling08236c62015-11-28 16:49:27 +0100417 struct request_queue *q = nvmdev->q;
Matias Bjørlingca064082015-10-29 17:57:29 +0900418 struct nvme_ns *ns = q->queuedata;
Christoph Hellwigac02ddd2015-12-03 09:52:05 -0700419 struct nvme_ctrl *ctrl = ns->ctrl;
Matias Bjørlingca064082015-10-29 17:57:29 +0900420 struct nvme_nvm_command c = {};
Matias Bjørling11450462015-11-16 15:34:37 +0100421 struct nvme_nvm_bb_tbl *bb_tbl;
Matias Bjørlingd5bdec82016-02-19 13:56:58 +0100422 u8 *dst_blks = NULL;
423 int nr_src_blks = nr_dst_blks * nvmdev->plane_mode;
424 int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_src_blks;
Matias Bjørlingca064082015-10-29 17:57:29 +0900425 int ret = 0;
426
427 c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
428 c.get_bb.nsid = cpu_to_le32(ns->ns_id);
Matias Bjørling11450462015-11-16 15:34:37 +0100429 c.get_bb.spba = cpu_to_le64(ppa.ppa);
430
431 bb_tbl = kzalloc(tblsz, GFP_KERNEL);
432 if (!bb_tbl)
Matias Bjørlingca064082015-10-29 17:57:29 +0900433 return -ENOMEM;
434
Matias Bjørlingd5bdec82016-02-19 13:56:58 +0100435 dst_blks = kzalloc(nr_dst_blks, GFP_KERNEL);
436 if (!dst_blks) {
437 ret = -ENOMEM;
438 goto out;
439 }
440
Christoph Hellwigac02ddd2015-12-03 09:52:05 -0700441 ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
Wenwei Tao47b31152015-11-20 13:47:55 +0100442 bb_tbl, tblsz);
Matias Bjørlingca064082015-10-29 17:57:29 +0900443 if (ret) {
Christoph Hellwigac02ddd2015-12-03 09:52:05 -0700444 dev_err(ctrl->dev, "get bad block table failed (%d)\n", ret);
Matias Bjørlingca064082015-10-29 17:57:29 +0900445 ret = -EIO;
446 goto out;
447 }
448
Matias Bjørling11450462015-11-16 15:34:37 +0100449 if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
450 bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
Christoph Hellwigac02ddd2015-12-03 09:52:05 -0700451 dev_err(ctrl->dev, "bbt format mismatch\n");
Matias Bjørling11450462015-11-16 15:34:37 +0100452 ret = -EINVAL;
453 goto out;
454 }
455
456 if (le16_to_cpu(bb_tbl->verid) != 1) {
457 ret = -EINVAL;
Christoph Hellwigac02ddd2015-12-03 09:52:05 -0700458 dev_err(ctrl->dev, "bbt version not supported\n");
Matias Bjørling11450462015-11-16 15:34:37 +0100459 goto out;
460 }
461
Matias Bjørlingd5bdec82016-02-19 13:56:58 +0100462 if (le32_to_cpu(bb_tbl->tblks) != nr_src_blks) {
Matias Bjørling11450462015-11-16 15:34:37 +0100463 ret = -EINVAL;
Christoph Hellwigac02ddd2015-12-03 09:52:05 -0700464 dev_err(ctrl->dev, "bbt unsuspected blocks returned (%u!=%u)",
Matias Bjørlingd5bdec82016-02-19 13:56:58 +0100465 le32_to_cpu(bb_tbl->tblks), nr_src_blks);
Matias Bjørling11450462015-11-16 15:34:37 +0100466 goto out;
467 }
468
Matias Bjørlingd5bdec82016-02-19 13:56:58 +0100469 nvme_nvm_bb_tbl_fold(nvmdev, nr_dst_blks, dst_blks,
470 nr_src_blks, bb_tbl->blk);
471
Matias Bjørling08236c62015-11-28 16:49:27 +0100472 ppa = dev_to_generic_addr(nvmdev, ppa);
Matias Bjørlingd5bdec82016-02-19 13:56:58 +0100473 ret = update_bbtbl(ppa, nr_dst_blks, dst_blks, priv);
474
Matias Bjørlingca064082015-10-29 17:57:29 +0900475out:
Matias Bjørlingd5bdec82016-02-19 13:56:58 +0100476 kfree(dst_blks);
Matias Bjørling11450462015-11-16 15:34:37 +0100477 kfree(bb_tbl);
478 return ret;
479}
480
Matias Bjørling16f26c32015-12-06 11:25:48 +0100481static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct nvm_rq *rqd,
Matias Bjørling11450462015-11-16 15:34:37 +0100482 int type)
483{
Matias Bjørling16f26c32015-12-06 11:25:48 +0100484 struct nvme_ns *ns = nvmdev->q->queuedata;
Matias Bjørling11450462015-11-16 15:34:37 +0100485 struct nvme_nvm_command c = {};
486 int ret = 0;
487
488 c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
489 c.set_bb.nsid = cpu_to_le32(ns->ns_id);
490 c.set_bb.spba = cpu_to_le64(rqd->ppa_addr.ppa);
491 c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1);
492 c.set_bb.value = type;
493
Christoph Hellwigac02ddd2015-12-03 09:52:05 -0700494 ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
Wenwei Tao47b31152015-11-20 13:47:55 +0100495 NULL, 0);
Matias Bjørling11450462015-11-16 15:34:37 +0100496 if (ret)
Christoph Hellwigac02ddd2015-12-03 09:52:05 -0700497 dev_err(ns->ctrl->dev, "set bad block table failed (%d)\n", ret);
Matias Bjørlingca064082015-10-29 17:57:29 +0900498 return ret;
499}
500
501static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
502 struct nvme_ns *ns, struct nvme_nvm_command *c)
503{
504 c->ph_rw.opcode = rqd->opcode;
505 c->ph_rw.nsid = cpu_to_le32(ns->ns_id);
506 c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
507 c->ph_rw.control = cpu_to_le16(rqd->flags);
508 c->ph_rw.length = cpu_to_le16(rqd->nr_pages - 1);
509
510 if (rqd->opcode == NVM_OP_HBWRITE || rqd->opcode == NVM_OP_HBREAD)
511 c->hb_rw.slba = cpu_to_le64(nvme_block_nr(ns,
512 rqd->bio->bi_iter.bi_sector));
513}
514
515static void nvme_nvm_end_io(struct request *rq, int error)
516{
517 struct nvm_rq *rqd = rq->end_io_data;
Matias Bjorling9f867262016-03-03 15:06:39 +0100518 struct nvme_nvm_completion *cqe = rq->special;
519
520 if (cqe)
521 rqd->ppa_status = le64_to_cpu(cqe->result);
Matias Bjørlingca064082015-10-29 17:57:29 +0900522
Matias Bjørling912761622016-01-12 07:49:21 +0100523 nvm_end_io(rqd, error);
Matias Bjørlingca064082015-10-29 17:57:29 +0900524
525 kfree(rq->cmd);
526 blk_mq_free_request(rq);
527}
528
Matias Bjørling16f26c32015-12-06 11:25:48 +0100529static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
Matias Bjørlingca064082015-10-29 17:57:29 +0900530{
Matias Bjørling16f26c32015-12-06 11:25:48 +0100531 struct request_queue *q = dev->q;
Matias Bjørlingca064082015-10-29 17:57:29 +0900532 struct nvme_ns *ns = q->queuedata;
533 struct request *rq;
534 struct bio *bio = rqd->bio;
535 struct nvme_nvm_command *cmd;
536
Christoph Hellwig6f3b0e82015-11-26 09:13:05 +0100537 rq = blk_mq_alloc_request(q, bio_rw(bio), 0);
Matias Bjørlingca064082015-10-29 17:57:29 +0900538 if (IS_ERR(rq))
539 return -ENOMEM;
540
Matias Bjorling9f867262016-03-03 15:06:39 +0100541 cmd = kzalloc(sizeof(struct nvme_nvm_command) +
542 sizeof(struct nvme_nvm_completion), GFP_KERNEL);
Matias Bjørlingca064082015-10-29 17:57:29 +0900543 if (!cmd) {
544 blk_mq_free_request(rq);
545 return -ENOMEM;
546 }
547
548 rq->cmd_type = REQ_TYPE_DRV_PRIV;
549 rq->ioprio = bio_prio(bio);
550
551 if (bio_has_data(bio))
552 rq->nr_phys_segments = bio_phys_segments(q, bio);
553
554 rq->__data_len = bio->bi_iter.bi_size;
555 rq->bio = rq->biotail = bio;
556
557 nvme_nvm_rqtocmd(rq, rqd, ns, cmd);
558
559 rq->cmd = (unsigned char *)cmd;
560 rq->cmd_len = sizeof(struct nvme_nvm_command);
Matias Bjorling9f867262016-03-03 15:06:39 +0100561 rq->special = cmd + 1;
Matias Bjørlingca064082015-10-29 17:57:29 +0900562
563 rq->end_io_data = rqd;
564
565 blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
566
567 return 0;
568}
569
Matias Bjørling16f26c32015-12-06 11:25:48 +0100570static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
Matias Bjørlingca064082015-10-29 17:57:29 +0900571{
Matias Bjørling16f26c32015-12-06 11:25:48 +0100572 struct request_queue *q = dev->q;
Matias Bjørlingca064082015-10-29 17:57:29 +0900573 struct nvme_ns *ns = q->queuedata;
574 struct nvme_nvm_command c = {};
575
576 c.erase.opcode = NVM_OP_ERASE;
577 c.erase.nsid = cpu_to_le32(ns->ns_id);
578 c.erase.spba = cpu_to_le64(rqd->ppa_addr.ppa);
579 c.erase.length = cpu_to_le16(rqd->nr_pages - 1);
580
581 return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
582}
583
Matias Bjørling16f26c32015-12-06 11:25:48 +0100584static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
Matias Bjørlingca064082015-10-29 17:57:29 +0900585{
Matias Bjørling16f26c32015-12-06 11:25:48 +0100586 struct nvme_ns *ns = nvmdev->q->queuedata;
Matias Bjørlingca064082015-10-29 17:57:29 +0900587
Christoph Hellwigac02ddd2015-12-03 09:52:05 -0700588 return dma_pool_create(name, ns->ctrl->dev, PAGE_SIZE, PAGE_SIZE, 0);
Matias Bjørlingca064082015-10-29 17:57:29 +0900589}
590
591static void nvme_nvm_destroy_dma_pool(void *pool)
592{
593 struct dma_pool *dma_pool = pool;
594
595 dma_pool_destroy(dma_pool);
596}
597
Matias Bjørling16f26c32015-12-06 11:25:48 +0100598static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
Matias Bjørlingca064082015-10-29 17:57:29 +0900599 gfp_t mem_flags, dma_addr_t *dma_handler)
600{
601 return dma_pool_alloc(pool, mem_flags, dma_handler);
602}
603
604static void nvme_nvm_dev_dma_free(void *pool, void *ppa_list,
605 dma_addr_t dma_handler)
606{
607 dma_pool_free(pool, ppa_list, dma_handler);
608}
609
610static struct nvm_dev_ops nvme_nvm_dev_ops = {
611 .identity = nvme_nvm_identity,
612
613 .get_l2p_tbl = nvme_nvm_get_l2p_tbl,
614
615 .get_bb_tbl = nvme_nvm_get_bb_tbl,
Matias Bjørling11450462015-11-16 15:34:37 +0100616 .set_bb_tbl = nvme_nvm_set_bb_tbl,
Matias Bjørlingca064082015-10-29 17:57:29 +0900617
618 .submit_io = nvme_nvm_submit_io,
619 .erase_block = nvme_nvm_erase_block,
620
621 .create_dma_pool = nvme_nvm_create_dma_pool,
622 .destroy_dma_pool = nvme_nvm_destroy_dma_pool,
623 .dev_dma_alloc = nvme_nvm_dev_dma_alloc,
624 .dev_dma_free = nvme_nvm_dev_dma_free,
625
626 .max_phys_sect = 64,
627};
628
629int nvme_nvm_register(struct request_queue *q, char *disk_name)
630{
631 return nvm_register(q, disk_name, &nvme_nvm_dev_ops);
632}
633
634void nvme_nvm_unregister(struct request_queue *q, char *disk_name)
635{
636 nvm_unregister(disk_name);
637}
638
Matias Bjørling09f2e712015-11-28 16:49:26 +0100639/* move to shared place when used in multiple places. */
640#define PCI_VENDOR_ID_CNEX 0x1d1d
641#define PCI_DEVICE_ID_CNEX_WL 0x2807
642#define PCI_DEVICE_ID_CNEX_QEMU 0x1f1f
643
Matias Bjørlingca064082015-10-29 17:57:29 +0900644int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
645{
Christoph Hellwigac02ddd2015-12-03 09:52:05 -0700646 struct nvme_ctrl *ctrl = ns->ctrl;
647 /* XXX: this is poking into PCI structures from generic code! */
648 struct pci_dev *pdev = to_pci_dev(ctrl->dev);
Matias Bjørlingca064082015-10-29 17:57:29 +0900649
650 /* QEMU NVMe simulator - PCI ID + Vendor specific bit */
Matias Bjørling09f2e712015-11-28 16:49:26 +0100651 if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
652 pdev->device == PCI_DEVICE_ID_CNEX_QEMU &&
Matias Bjørlingca064082015-10-29 17:57:29 +0900653 id->vs[0] == 0x1)
654 return 1;
655
656 /* CNEX Labs - PCI ID + Vendor specific bit */
Matias Bjørling09f2e712015-11-28 16:49:26 +0100657 if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
658 pdev->device == PCI_DEVICE_ID_CNEX_WL &&
Matias Bjørlingca064082015-10-29 17:57:29 +0900659 id->vs[0] == 0x1)
660 return 1;
661
662 return 0;
663}