blob: 7ff2e820bbf473e8f9d31a19c17cfd6ec1ccb833 [file] [log] [blame]
Christoph Hellwig21d34712015-11-26 09:08:36 +01001/*
2 * NVM Express device driver
3 * Copyright (c) 2011-2014, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/blkdev.h>
16#include <linux/blk-mq.h>
Christoph Hellwig5fd4ce12015-11-28 15:03:49 +010017#include <linux/delay.h>
Christoph Hellwig21d34712015-11-26 09:08:36 +010018#include <linux/errno.h>
Christoph Hellwig1673f1f2015-11-26 10:54:19 +010019#include <linux/hdreg.h>
Christoph Hellwig21d34712015-11-26 09:08:36 +010020#include <linux/kernel.h>
Christoph Hellwig5bae7f72015-11-28 15:39:07 +010021#include <linux/module.h>
22#include <linux/list_sort.h>
Christoph Hellwig21d34712015-11-26 09:08:36 +010023#include <linux/slab.h>
24#include <linux/types.h>
Christoph Hellwig1673f1f2015-11-26 10:54:19 +010025#include <linux/pr.h>
26#include <linux/ptrace.h>
27#include <linux/nvme_ioctl.h>
28#include <linux/t10-pi.h>
29#include <scsi/sg.h>
30#include <asm/unaligned.h>
Christoph Hellwig21d34712015-11-26 09:08:36 +010031
32#include "nvme.h"
Sagi Grimberg038bd4c2016-06-13 16:45:28 +020033#include "fabrics.h"
Christoph Hellwig21d34712015-11-26 09:08:36 +010034
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +010035#define NVME_MINORS (1U << MINORBITS)
36
Ming Linba0ba7d2016-02-10 10:03:30 -080037unsigned char admin_timeout = 60;
38module_param(admin_timeout, byte, 0644);
39MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
Ming Lin576d55d2016-02-10 10:03:32 -080040EXPORT_SYMBOL_GPL(admin_timeout);
Ming Linba0ba7d2016-02-10 10:03:30 -080041
42unsigned char nvme_io_timeout = 30;
43module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
44MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
Ming Lin576d55d2016-02-10 10:03:32 -080045EXPORT_SYMBOL_GPL(nvme_io_timeout);
Ming Linba0ba7d2016-02-10 10:03:30 -080046
47unsigned char shutdown_timeout = 5;
48module_param(shutdown_timeout, byte, 0644);
49MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
50
Keith Buschf80ec962016-07-12 16:20:31 -070051unsigned int nvme_max_retries = 5;
52module_param_named(max_retries, nvme_max_retries, uint, 0644);
53MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
54EXPORT_SYMBOL_GPL(nvme_max_retries);
Christoph Hellwig5bae7f72015-11-28 15:39:07 +010055
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +010056static int nvme_char_major;
57module_param(nvme_char_major, int, 0);
58
59static LIST_HEAD(nvme_ctrl_list);
Ming Lin9f2482b2016-02-10 10:03:31 -080060static DEFINE_SPINLOCK(dev_list_lock);
Christoph Hellwig1673f1f2015-11-26 10:54:19 +010061
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +010062static struct class *nvme_class;
63
Ming Linc55a2fd2016-05-18 14:05:02 -070064void nvme_cancel_request(struct request *req, void *data, bool reserved)
65{
66 int status;
67
68 if (!blk_mq_request_started(req))
69 return;
70
71 dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
72 "Cancelling I/O %d", req->tag);
73
74 status = NVME_SC_ABORT_REQ;
75 if (blk_queue_dying(req->q))
76 status |= NVME_SC_DNR;
77 blk_mq_complete_request(req, status);
78}
79EXPORT_SYMBOL_GPL(nvme_cancel_request);
80
Christoph Hellwigbb8d2612016-04-26 13:51:57 +020081bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
82 enum nvme_ctrl_state new_state)
83{
84 enum nvme_ctrl_state old_state = ctrl->state;
85 bool changed = false;
86
87 spin_lock_irq(&ctrl->lock);
88 switch (new_state) {
89 case NVME_CTRL_LIVE:
90 switch (old_state) {
Christoph Hellwig7d2e8002016-06-13 16:45:22 +020091 case NVME_CTRL_NEW:
Christoph Hellwigbb8d2612016-04-26 13:51:57 +020092 case NVME_CTRL_RESETTING:
Christoph Hellwigdef61ec2016-07-06 21:55:49 +090093 case NVME_CTRL_RECONNECTING:
Christoph Hellwigbb8d2612016-04-26 13:51:57 +020094 changed = true;
95 /* FALLTHRU */
96 default:
97 break;
98 }
99 break;
100 case NVME_CTRL_RESETTING:
101 switch (old_state) {
102 case NVME_CTRL_NEW:
103 case NVME_CTRL_LIVE:
Christoph Hellwigdef61ec2016-07-06 21:55:49 +0900104 case NVME_CTRL_RECONNECTING:
105 changed = true;
106 /* FALLTHRU */
107 default:
108 break;
109 }
110 break;
111 case NVME_CTRL_RECONNECTING:
112 switch (old_state) {
113 case NVME_CTRL_LIVE:
Christoph Hellwigbb8d2612016-04-26 13:51:57 +0200114 changed = true;
115 /* FALLTHRU */
116 default:
117 break;
118 }
119 break;
120 case NVME_CTRL_DELETING:
121 switch (old_state) {
122 case NVME_CTRL_LIVE:
123 case NVME_CTRL_RESETTING:
Christoph Hellwigdef61ec2016-07-06 21:55:49 +0900124 case NVME_CTRL_RECONNECTING:
Christoph Hellwigbb8d2612016-04-26 13:51:57 +0200125 changed = true;
126 /* FALLTHRU */
127 default:
128 break;
129 }
130 break;
Keith Busch0ff9d4e2016-05-12 08:37:14 -0600131 case NVME_CTRL_DEAD:
132 switch (old_state) {
133 case NVME_CTRL_DELETING:
134 changed = true;
135 /* FALLTHRU */
136 default:
137 break;
138 }
139 break;
Christoph Hellwigbb8d2612016-04-26 13:51:57 +0200140 default:
141 break;
142 }
143 spin_unlock_irq(&ctrl->lock);
144
145 if (changed)
146 ctrl->state = new_state;
147
148 return changed;
149}
150EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
151
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100152static void nvme_free_ns(struct kref *kref)
153{
154 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
155
156 if (ns->type == NVME_NS_LIGHTNVM)
157 nvme_nvm_unregister(ns->queue, ns->disk->disk_name);
158
159 spin_lock(&dev_list_lock);
160 ns->disk->private_data = NULL;
161 spin_unlock(&dev_list_lock);
162
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100163 put_disk(ns->disk);
Keith Busch075790e2016-02-24 09:15:53 -0700164 ida_simple_remove(&ns->ctrl->ns_ida, ns->instance);
165 nvme_put_ctrl(ns->ctrl);
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100166 kfree(ns);
167}
168
Christoph Hellwig5bae7f72015-11-28 15:39:07 +0100169static void nvme_put_ns(struct nvme_ns *ns)
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100170{
171 kref_put(&ns->kref, nvme_free_ns);
172}
173
174static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk)
175{
176 struct nvme_ns *ns;
177
178 spin_lock(&dev_list_lock);
179 ns = disk->private_data;
Sagi Grimberge439bb12016-02-10 10:03:29 -0800180 if (ns) {
181 if (!kref_get_unless_zero(&ns->kref))
182 goto fail;
183 if (!try_module_get(ns->ctrl->ops->module))
184 goto fail_put_ns;
185 }
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100186 spin_unlock(&dev_list_lock);
187
188 return ns;
Sagi Grimberge439bb12016-02-10 10:03:29 -0800189
190fail_put_ns:
191 kref_put(&ns->kref, nvme_free_ns);
192fail:
193 spin_unlock(&dev_list_lock);
194 return NULL;
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100195}
196
Christoph Hellwig7688faa2015-11-28 15:41:58 +0100197void nvme_requeue_req(struct request *req)
198{
199 unsigned long flags;
200
201 blk_mq_requeue_request(req);
202 spin_lock_irqsave(req->q->queue_lock, flags);
203 if (!blk_queue_stopped(req->q))
204 blk_mq_kick_requeue_list(req->q);
205 spin_unlock_irqrestore(req->q->queue_lock, flags);
206}
Ming Lin576d55d2016-02-10 10:03:32 -0800207EXPORT_SYMBOL_GPL(nvme_requeue_req);
Christoph Hellwig7688faa2015-11-28 15:41:58 +0100208
Christoph Hellwig41609822015-11-20 09:00:02 +0100209struct request *nvme_alloc_request(struct request_queue *q,
Christoph Hellwigeb71f432016-06-13 16:45:23 +0200210 struct nvme_command *cmd, unsigned int flags, int qid)
Christoph Hellwig21d34712015-11-26 09:08:36 +0100211{
Christoph Hellwig21d34712015-11-26 09:08:36 +0100212 struct request *req;
Christoph Hellwig21d34712015-11-26 09:08:36 +0100213
Christoph Hellwigeb71f432016-06-13 16:45:23 +0200214 if (qid == NVME_QID_ANY) {
215 req = blk_mq_alloc_request(q, nvme_is_write(cmd), flags);
216 } else {
217 req = blk_mq_alloc_request_hctx(q, nvme_is_write(cmd), flags,
218 qid ? qid - 1 : 0);
219 }
Christoph Hellwig21d34712015-11-26 09:08:36 +0100220 if (IS_ERR(req))
Christoph Hellwig41609822015-11-20 09:00:02 +0100221 return req;
Christoph Hellwig21d34712015-11-26 09:08:36 +0100222
223 req->cmd_type = REQ_TYPE_DRV_PRIV;
224 req->cmd_flags |= REQ_FAILFAST_DRIVER;
Christoph Hellwig21d34712015-11-26 09:08:36 +0100225 req->cmd = (unsigned char *)cmd;
226 req->cmd_len = sizeof(struct nvme_command);
Christoph Hellwig21d34712015-11-26 09:08:36 +0100227
Christoph Hellwig41609822015-11-20 09:00:02 +0100228 return req;
229}
Ming Lin576d55d2016-02-10 10:03:32 -0800230EXPORT_SYMBOL_GPL(nvme_alloc_request);
Christoph Hellwig41609822015-11-20 09:00:02 +0100231
Ming Lin8093f7c2016-04-12 13:10:14 -0600232static inline void nvme_setup_flush(struct nvme_ns *ns,
233 struct nvme_command *cmnd)
234{
235 memset(cmnd, 0, sizeof(*cmnd));
236 cmnd->common.opcode = nvme_cmd_flush;
237 cmnd->common.nsid = cpu_to_le32(ns->ns_id);
238}
239
240static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
241 struct nvme_command *cmnd)
242{
243 struct nvme_dsm_range *range;
244 struct page *page;
245 int offset;
246 unsigned int nr_bytes = blk_rq_bytes(req);
247
248 range = kmalloc(sizeof(*range), GFP_ATOMIC);
249 if (!range)
250 return BLK_MQ_RQ_QUEUE_BUSY;
251
252 range->cattr = cpu_to_le32(0);
253 range->nlb = cpu_to_le32(nr_bytes >> ns->lba_shift);
254 range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
255
256 memset(cmnd, 0, sizeof(*cmnd));
257 cmnd->dsm.opcode = nvme_cmd_dsm;
258 cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
259 cmnd->dsm.nr = 0;
260 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
261
262 req->completion_data = range;
263 page = virt_to_page(range);
264 offset = offset_in_page(range);
265 blk_add_request_payload(req, page, offset, sizeof(*range));
266
267 /*
268 * we set __data_len back to the size of the area to be discarded
269 * on disk. This allows us to report completion on the full amount
270 * of blocks described by the request.
271 */
272 req->__data_len = nr_bytes;
273
274 return 0;
275}
276
277static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
278 struct nvme_command *cmnd)
279{
280 u16 control = 0;
281 u32 dsmgmt = 0;
282
283 if (req->cmd_flags & REQ_FUA)
284 control |= NVME_RW_FUA;
285 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
286 control |= NVME_RW_LR;
287
288 if (req->cmd_flags & REQ_RAHEAD)
289 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
290
291 memset(cmnd, 0, sizeof(*cmnd));
292 cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
293 cmnd->rw.command_id = req->tag;
294 cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
295 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
296 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
297
298 if (ns->ms) {
299 switch (ns->pi_type) {
300 case NVME_NS_DPS_PI_TYPE3:
301 control |= NVME_RW_PRINFO_PRCHK_GUARD;
302 break;
303 case NVME_NS_DPS_PI_TYPE1:
304 case NVME_NS_DPS_PI_TYPE2:
305 control |= NVME_RW_PRINFO_PRCHK_GUARD |
306 NVME_RW_PRINFO_PRCHK_REF;
307 cmnd->rw.reftag = cpu_to_le32(
308 nvme_block_nr(ns, blk_rq_pos(req)));
309 break;
310 }
311 if (!blk_integrity_rq(req))
312 control |= NVME_RW_PRINFO_PRACT;
313 }
314
315 cmnd->rw.control = cpu_to_le16(control);
316 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
317}
318
319int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
320 struct nvme_command *cmd)
321{
322 int ret = 0;
323
324 if (req->cmd_type == REQ_TYPE_DRV_PRIV)
325 memcpy(cmd, req->cmd, sizeof(*cmd));
Mike Christie3a5e02c2016-06-05 14:32:23 -0500326 else if (req_op(req) == REQ_OP_FLUSH)
Ming Lin8093f7c2016-04-12 13:10:14 -0600327 nvme_setup_flush(ns, cmd);
Mike Christiec2df40d2016-06-05 14:32:17 -0500328 else if (req_op(req) == REQ_OP_DISCARD)
Ming Lin8093f7c2016-04-12 13:10:14 -0600329 ret = nvme_setup_discard(ns, req, cmd);
330 else
331 nvme_setup_rw(ns, req, cmd);
332
333 return ret;
334}
335EXPORT_SYMBOL_GPL(nvme_setup_cmd);
336
Christoph Hellwig41609822015-11-20 09:00:02 +0100337/*
338 * Returns 0 on success. If the result is negative, it's a Linux error code;
339 * if the result is positive, it's an NVM Express status code
340 */
341int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
Christoph Hellwig1cb3cce2016-02-29 15:59:47 +0100342 struct nvme_completion *cqe, void *buffer, unsigned bufflen,
Christoph Hellwigeb71f432016-06-13 16:45:23 +0200343 unsigned timeout, int qid, int at_head, int flags)
Christoph Hellwig41609822015-11-20 09:00:02 +0100344{
345 struct request *req;
346 int ret;
347
Christoph Hellwigeb71f432016-06-13 16:45:23 +0200348 req = nvme_alloc_request(q, cmd, flags, qid);
Christoph Hellwig41609822015-11-20 09:00:02 +0100349 if (IS_ERR(req))
350 return PTR_ERR(req);
351
352 req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
Christoph Hellwig1cb3cce2016-02-29 15:59:47 +0100353 req->special = cqe;
Christoph Hellwig41609822015-11-20 09:00:02 +0100354
Christoph Hellwig21d34712015-11-26 09:08:36 +0100355 if (buffer && bufflen) {
356 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
357 if (ret)
358 goto out;
Christoph Hellwig41609822015-11-20 09:00:02 +0100359 }
360
Christoph Hellwigeb71f432016-06-13 16:45:23 +0200361 blk_execute_rq(req->q, NULL, req, at_head);
Christoph Hellwig41609822015-11-20 09:00:02 +0100362 ret = req->errors;
363 out:
364 blk_mq_free_request(req);
365 return ret;
366}
Christoph Hellwigeb71f432016-06-13 16:45:23 +0200367EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
Christoph Hellwig41609822015-11-20 09:00:02 +0100368
369int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
370 void *buffer, unsigned bufflen)
371{
Christoph Hellwigeb71f432016-06-13 16:45:23 +0200372 return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
373 NVME_QID_ANY, 0, 0);
Christoph Hellwig41609822015-11-20 09:00:02 +0100374}
Ming Lin576d55d2016-02-10 10:03:32 -0800375EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
Christoph Hellwig41609822015-11-20 09:00:02 +0100376
Keith Busch0b7f1f22015-10-23 09:47:28 -0600377int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
378 void __user *ubuffer, unsigned bufflen,
379 void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
380 u32 *result, unsigned timeout)
Christoph Hellwig41609822015-11-20 09:00:02 +0100381{
Christoph Hellwig7a5abb42016-06-06 23:20:49 +0200382 bool write = nvme_is_write(cmd);
Christoph Hellwig1cb3cce2016-02-29 15:59:47 +0100383 struct nvme_completion cqe;
Keith Busch0b7f1f22015-10-23 09:47:28 -0600384 struct nvme_ns *ns = q->queuedata;
385 struct gendisk *disk = ns ? ns->disk : NULL;
Christoph Hellwig41609822015-11-20 09:00:02 +0100386 struct request *req;
Keith Busch0b7f1f22015-10-23 09:47:28 -0600387 struct bio *bio = NULL;
388 void *meta = NULL;
Christoph Hellwig41609822015-11-20 09:00:02 +0100389 int ret;
390
Christoph Hellwigeb71f432016-06-13 16:45:23 +0200391 req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY);
Christoph Hellwig41609822015-11-20 09:00:02 +0100392 if (IS_ERR(req))
393 return PTR_ERR(req);
394
395 req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
Christoph Hellwig1cb3cce2016-02-29 15:59:47 +0100396 req->special = &cqe;
Christoph Hellwig41609822015-11-20 09:00:02 +0100397
398 if (ubuffer && bufflen) {
Christoph Hellwig21d34712015-11-26 09:08:36 +0100399 ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
400 GFP_KERNEL);
401 if (ret)
402 goto out;
403 bio = req->bio;
Christoph Hellwig21d34712015-11-26 09:08:36 +0100404
Keith Busch0b7f1f22015-10-23 09:47:28 -0600405 if (!disk)
406 goto submit;
407 bio->bi_bdev = bdget_disk(disk, 0);
408 if (!bio->bi_bdev) {
409 ret = -ENODEV;
410 goto out_unmap;
411 }
412
Keith Busche9fc63d2016-02-24 09:15:58 -0700413 if (meta_buffer && meta_len) {
Keith Busch0b7f1f22015-10-23 09:47:28 -0600414 struct bio_integrity_payload *bip;
415
416 meta = kmalloc(meta_len, GFP_KERNEL);
417 if (!meta) {
418 ret = -ENOMEM;
419 goto out_unmap;
420 }
421
422 if (write) {
423 if (copy_from_user(meta, meta_buffer,
424 meta_len)) {
425 ret = -EFAULT;
426 goto out_free_meta;
427 }
428 }
429
430 bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
Keith Busch06c1e392015-12-03 09:32:21 -0700431 if (IS_ERR(bip)) {
432 ret = PTR_ERR(bip);
Keith Busch0b7f1f22015-10-23 09:47:28 -0600433 goto out_free_meta;
434 }
435
436 bip->bip_iter.bi_size = meta_len;
437 bip->bip_iter.bi_sector = meta_seed;
438
439 ret = bio_integrity_add_page(bio, virt_to_page(meta),
440 meta_len, offset_in_page(meta));
441 if (ret != meta_len) {
442 ret = -ENOMEM;
443 goto out_free_meta;
444 }
445 }
446 }
447 submit:
448 blk_execute_rq(req->q, disk, req, 0);
449 ret = req->errors;
Christoph Hellwig21d34712015-11-26 09:08:36 +0100450 if (result)
Christoph Hellwig1cb3cce2016-02-29 15:59:47 +0100451 *result = le32_to_cpu(cqe.result);
Keith Busch0b7f1f22015-10-23 09:47:28 -0600452 if (meta && !ret && !write) {
453 if (copy_to_user(meta_buffer, meta, meta_len))
454 ret = -EFAULT;
455 }
456 out_free_meta:
457 kfree(meta);
458 out_unmap:
459 if (bio) {
460 if (disk && bio->bi_bdev)
461 bdput(bio->bi_bdev);
462 blk_rq_unmap_user(bio);
463 }
Christoph Hellwig21d34712015-11-26 09:08:36 +0100464 out:
465 blk_mq_free_request(req);
466 return ret;
467}
468
Keith Busch0b7f1f22015-10-23 09:47:28 -0600469int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
470 void __user *ubuffer, unsigned bufflen, u32 *result,
471 unsigned timeout)
472{
473 return __nvme_submit_user_cmd(q, cmd, ubuffer, bufflen, NULL, 0, 0,
474 result, timeout);
475}
476
Sagi Grimberg038bd4c2016-06-13 16:45:28 +0200477static void nvme_keep_alive_end_io(struct request *rq, int error)
478{
479 struct nvme_ctrl *ctrl = rq->end_io_data;
480
481 blk_mq_free_request(rq);
482
483 if (error) {
484 dev_err(ctrl->device,
485 "failed nvme_keep_alive_end_io error=%d\n", error);
486 return;
487 }
488
489 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
490}
491
492static int nvme_keep_alive(struct nvme_ctrl *ctrl)
493{
494 struct nvme_command c;
495 struct request *rq;
496
497 memset(&c, 0, sizeof(c));
498 c.common.opcode = nvme_admin_keep_alive;
499
500 rq = nvme_alloc_request(ctrl->admin_q, &c, BLK_MQ_REQ_RESERVED,
501 NVME_QID_ANY);
502 if (IS_ERR(rq))
503 return PTR_ERR(rq);
504
505 rq->timeout = ctrl->kato * HZ;
506 rq->end_io_data = ctrl;
507
508 blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io);
509
510 return 0;
511}
512
513static void nvme_keep_alive_work(struct work_struct *work)
514{
515 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
516 struct nvme_ctrl, ka_work);
517
518 if (nvme_keep_alive(ctrl)) {
519 /* allocation failure, reset the controller */
520 dev_err(ctrl->device, "keep-alive failed\n");
521 ctrl->ops->reset_ctrl(ctrl);
522 return;
523 }
524}
525
526void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
527{
528 if (unlikely(ctrl->kato == 0))
529 return;
530
531 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
532 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
533}
534EXPORT_SYMBOL_GPL(nvme_start_keep_alive);
535
536void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
537{
538 if (unlikely(ctrl->kato == 0))
539 return;
540
541 cancel_delayed_work_sync(&ctrl->ka_work);
542}
543EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
544
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100545int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
Christoph Hellwig21d34712015-11-26 09:08:36 +0100546{
547 struct nvme_command c = { };
548 int error;
549
550 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
551 c.identify.opcode = nvme_admin_identify;
552 c.identify.cns = cpu_to_le32(1);
553
554 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
555 if (!*id)
556 return -ENOMEM;
557
558 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
559 sizeof(struct nvme_id_ctrl));
560 if (error)
561 kfree(*id);
562 return error;
563}
564
Keith Busch540c8012015-10-22 15:45:06 -0600565static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list)
566{
567 struct nvme_command c = { };
568
569 c.identify.opcode = nvme_admin_identify;
570 c.identify.cns = cpu_to_le32(2);
571 c.identify.nsid = cpu_to_le32(nsid);
572 return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000);
573}
574
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100575int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
Christoph Hellwig21d34712015-11-26 09:08:36 +0100576 struct nvme_id_ns **id)
577{
578 struct nvme_command c = { };
579 int error;
580
581 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
582 c.identify.opcode = nvme_admin_identify,
583 c.identify.nsid = cpu_to_le32(nsid),
584
585 *id = kmalloc(sizeof(struct nvme_id_ns), GFP_KERNEL);
586 if (!*id)
587 return -ENOMEM;
588
589 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
590 sizeof(struct nvme_id_ns));
591 if (error)
592 kfree(*id);
593 return error;
594}
595
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100596int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
Christoph Hellwig21d34712015-11-26 09:08:36 +0100597 dma_addr_t dma_addr, u32 *result)
598{
599 struct nvme_command c;
Christoph Hellwig1cb3cce2016-02-29 15:59:47 +0100600 struct nvme_completion cqe;
601 int ret;
Christoph Hellwig21d34712015-11-26 09:08:36 +0100602
603 memset(&c, 0, sizeof(c));
604 c.features.opcode = nvme_admin_get_features;
605 c.features.nsid = cpu_to_le32(nsid);
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200606 c.features.dptr.prp1 = cpu_to_le64(dma_addr);
Christoph Hellwig21d34712015-11-26 09:08:36 +0100607 c.features.fid = cpu_to_le32(fid);
608
Christoph Hellwigeb71f432016-06-13 16:45:23 +0200609 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0,
610 NVME_QID_ANY, 0, 0);
Christoph Hellwig1cb3cce2016-02-29 15:59:47 +0100611 if (ret >= 0)
612 *result = le32_to_cpu(cqe.result);
613 return ret;
Christoph Hellwig21d34712015-11-26 09:08:36 +0100614}
615
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100616int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
Christoph Hellwig21d34712015-11-26 09:08:36 +0100617 dma_addr_t dma_addr, u32 *result)
618{
619 struct nvme_command c;
Christoph Hellwig1cb3cce2016-02-29 15:59:47 +0100620 struct nvme_completion cqe;
621 int ret;
Christoph Hellwig21d34712015-11-26 09:08:36 +0100622
623 memset(&c, 0, sizeof(c));
624 c.features.opcode = nvme_admin_set_features;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200625 c.features.dptr.prp1 = cpu_to_le64(dma_addr);
Christoph Hellwig21d34712015-11-26 09:08:36 +0100626 c.features.fid = cpu_to_le32(fid);
627 c.features.dword11 = cpu_to_le32(dword11);
628
Christoph Hellwigeb71f432016-06-13 16:45:23 +0200629 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0,
630 NVME_QID_ANY, 0, 0);
Christoph Hellwig1cb3cce2016-02-29 15:59:47 +0100631 if (ret >= 0)
632 *result = le32_to_cpu(cqe.result);
633 return ret;
Christoph Hellwig21d34712015-11-26 09:08:36 +0100634}
635
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100636int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log)
Christoph Hellwig21d34712015-11-26 09:08:36 +0100637{
638 struct nvme_command c = { };
639 int error;
640
641 c.common.opcode = nvme_admin_get_log_page,
642 c.common.nsid = cpu_to_le32(0xFFFFFFFF),
643 c.common.cdw10[0] = cpu_to_le32(
644 (((sizeof(struct nvme_smart_log) / 4) - 1) << 16) |
645 NVME_LOG_SMART),
646
647 *log = kmalloc(sizeof(struct nvme_smart_log), GFP_KERNEL);
648 if (!*log)
649 return -ENOMEM;
650
651 error = nvme_submit_sync_cmd(dev->admin_q, &c, *log,
652 sizeof(struct nvme_smart_log));
653 if (error)
654 kfree(*log);
655 return error;
656}
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100657
Christoph Hellwig9a0be7a2015-11-26 11:09:06 +0100658int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
659{
660 u32 q_count = (*count - 1) | ((*count - 1) << 16);
661 u32 result;
662 int status, nr_io_queues;
663
664 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, 0,
665 &result);
Christoph Hellwigf5fa90d2016-06-06 23:20:50 +0200666 if (status < 0)
Christoph Hellwig9a0be7a2015-11-26 11:09:06 +0100667 return status;
668
Christoph Hellwigf5fa90d2016-06-06 23:20:50 +0200669 /*
670 * Degraded controllers might return an error when setting the queue
671 * count. We still want to be able to bring them online and offer
672 * access to the admin queue, as that might be only way to fix them up.
673 */
674 if (status > 0) {
675 dev_err(ctrl->dev, "Could not set queue count (%d)\n", status);
676 *count = 0;
677 } else {
678 nr_io_queues = min(result & 0xffff, result >> 16) + 1;
679 *count = min(*count, nr_io_queues);
680 }
681
Christoph Hellwig9a0be7a2015-11-26 11:09:06 +0100682 return 0;
683}
Ming Lin576d55d2016-02-10 10:03:32 -0800684EXPORT_SYMBOL_GPL(nvme_set_queue_count);
Christoph Hellwig9a0be7a2015-11-26 11:09:06 +0100685
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100686static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
687{
688 struct nvme_user_io io;
689 struct nvme_command c;
690 unsigned length, meta_len;
691 void __user *metadata;
692
693 if (copy_from_user(&io, uio, sizeof(io)))
694 return -EFAULT;
Keith Busch63088ec2016-02-24 09:15:57 -0700695 if (io.flags)
696 return -EINVAL;
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100697
698 switch (io.opcode) {
699 case nvme_cmd_write:
700 case nvme_cmd_read:
701 case nvme_cmd_compare:
702 break;
703 default:
704 return -EINVAL;
705 }
706
707 length = (io.nblocks + 1) << ns->lba_shift;
708 meta_len = (io.nblocks + 1) * ns->ms;
709 metadata = (void __user *)(uintptr_t)io.metadata;
710
711 if (ns->ext) {
712 length += meta_len;
713 meta_len = 0;
714 } else if (meta_len) {
715 if ((io.metadata & 3) || !io.metadata)
716 return -EINVAL;
717 }
718
719 memset(&c, 0, sizeof(c));
720 c.rw.opcode = io.opcode;
721 c.rw.flags = io.flags;
722 c.rw.nsid = cpu_to_le32(ns->ns_id);
723 c.rw.slba = cpu_to_le64(io.slba);
724 c.rw.length = cpu_to_le16(io.nblocks);
725 c.rw.control = cpu_to_le16(io.control);
726 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
727 c.rw.reftag = cpu_to_le32(io.reftag);
728 c.rw.apptag = cpu_to_le16(io.apptag);
729 c.rw.appmask = cpu_to_le16(io.appmask);
730
731 return __nvme_submit_user_cmd(ns->queue, &c,
732 (void __user *)(uintptr_t)io.addr, length,
733 metadata, meta_len, io.slba, NULL, 0);
734}
735
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +0100736static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100737 struct nvme_passthru_cmd __user *ucmd)
738{
739 struct nvme_passthru_cmd cmd;
740 struct nvme_command c;
741 unsigned timeout = 0;
742 int status;
743
744 if (!capable(CAP_SYS_ADMIN))
745 return -EACCES;
746 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
747 return -EFAULT;
Keith Busch63088ec2016-02-24 09:15:57 -0700748 if (cmd.flags)
749 return -EINVAL;
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100750
751 memset(&c, 0, sizeof(c));
752 c.common.opcode = cmd.opcode;
753 c.common.flags = cmd.flags;
754 c.common.nsid = cpu_to_le32(cmd.nsid);
755 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
756 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
757 c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
758 c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
759 c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
760 c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
761 c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
762 c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
763
764 if (cmd.timeout_ms)
765 timeout = msecs_to_jiffies(cmd.timeout_ms);
766
767 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
Arnd Bergmannd1ea7be2015-12-08 16:22:17 +0100768 (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100769 &cmd.result, timeout);
770 if (status >= 0) {
771 if (put_user(cmd.result, &ucmd->result))
772 return -EFAULT;
773 }
774
775 return status;
776}
777
778static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
779 unsigned int cmd, unsigned long arg)
780{
781 struct nvme_ns *ns = bdev->bd_disk->private_data;
782
783 switch (cmd) {
784 case NVME_IOCTL_ID:
785 force_successful_syscall_return();
786 return ns->ns_id;
787 case NVME_IOCTL_ADMIN_CMD:
788 return nvme_user_cmd(ns->ctrl, NULL, (void __user *)arg);
789 case NVME_IOCTL_IO_CMD:
790 return nvme_user_cmd(ns->ctrl, ns, (void __user *)arg);
791 case NVME_IOCTL_SUBMIT_IO:
792 return nvme_submit_io(ns, (void __user *)arg);
Christoph Hellwig44907332015-12-24 15:27:02 +0100793#ifdef CONFIG_BLK_DEV_NVME_SCSI
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100794 case SG_GET_VERSION_NUM:
795 return nvme_sg_get_version_num((void __user *)arg);
796 case SG_IO:
797 return nvme_sg_io(ns, (void __user *)arg);
Christoph Hellwig44907332015-12-24 15:27:02 +0100798#endif
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100799 default:
800 return -ENOTTY;
801 }
802}
803
804#ifdef CONFIG_COMPAT
805static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
806 unsigned int cmd, unsigned long arg)
807{
808 switch (cmd) {
809 case SG_IO:
810 return -ENOIOCTLCMD;
811 }
812 return nvme_ioctl(bdev, mode, cmd, arg);
813}
814#else
815#define nvme_compat_ioctl NULL
816#endif
817
818static int nvme_open(struct block_device *bdev, fmode_t mode)
819{
820 return nvme_get_ns_from_disk(bdev->bd_disk) ? 0 : -ENXIO;
821}
822
823static void nvme_release(struct gendisk *disk, fmode_t mode)
824{
Sagi Grimberge439bb12016-02-10 10:03:29 -0800825 struct nvme_ns *ns = disk->private_data;
826
827 module_put(ns->ctrl->ops->module);
828 nvme_put_ns(ns);
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100829}
830
831static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
832{
833 /* some standard values */
834 geo->heads = 1 << 6;
835 geo->sectors = 1 << 5;
836 geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
837 return 0;
838}
839
840#ifdef CONFIG_BLK_DEV_INTEGRITY
841static void nvme_init_integrity(struct nvme_ns *ns)
842{
843 struct blk_integrity integrity;
844
Jay Freyenseefa9a89f2016-07-20 21:26:16 -0600845 memset(&integrity, 0, sizeof(integrity));
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100846 switch (ns->pi_type) {
847 case NVME_NS_DPS_PI_TYPE3:
848 integrity.profile = &t10_pi_type3_crc;
Nicholas Bellingerba36c212016-04-09 03:04:42 +0000849 integrity.tag_size = sizeof(u16) + sizeof(u32);
850 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100851 break;
852 case NVME_NS_DPS_PI_TYPE1:
853 case NVME_NS_DPS_PI_TYPE2:
854 integrity.profile = &t10_pi_type1_crc;
Nicholas Bellingerba36c212016-04-09 03:04:42 +0000855 integrity.tag_size = sizeof(u16);
856 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100857 break;
858 default:
859 integrity.profile = NULL;
860 break;
861 }
862 integrity.tuple_size = ns->ms;
863 blk_integrity_register(ns->disk, &integrity);
864 blk_queue_max_integrity_segments(ns->queue, 1);
865}
866#else
867static void nvme_init_integrity(struct nvme_ns *ns)
868{
869}
870#endif /* CONFIG_BLK_DEV_INTEGRITY */
871
872static void nvme_config_discard(struct nvme_ns *ns)
873{
Keith Busch08095e72016-03-04 13:15:17 -0700874 struct nvme_ctrl *ctrl = ns->ctrl;
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100875 u32 logical_block_size = queue_logical_block_size(ns->queue);
Keith Busch08095e72016-03-04 13:15:17 -0700876
877 if (ctrl->quirks & NVME_QUIRK_DISCARD_ZEROES)
878 ns->queue->limits.discard_zeroes_data = 1;
879 else
880 ns->queue->limits.discard_zeroes_data = 0;
881
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100882 ns->queue->limits.discard_alignment = logical_block_size;
883 ns->queue->limits.discard_granularity = logical_block_size;
Minfei Huangbd0fc282016-05-17 15:58:41 +0800884 blk_queue_max_discard_sectors(ns->queue, UINT_MAX);
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100885 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
886}
887
Christoph Hellwig5bae7f72015-11-28 15:39:07 +0100888static int nvme_revalidate_disk(struct gendisk *disk)
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100889{
890 struct nvme_ns *ns = disk->private_data;
891 struct nvme_id_ns *id;
892 u8 lbaf, pi_type;
893 u16 old_ms;
894 unsigned short bs;
895
Keith Busch69d9a992016-02-24 09:15:56 -0700896 if (test_bit(NVME_NS_DEAD, &ns->flags)) {
897 set_capacity(disk, 0);
898 return -ENODEV;
899 }
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100900 if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) {
Sagi Grimberg1b3c47c2016-02-10 08:51:15 -0700901 dev_warn(disk_to_dev(ns->disk), "%s: Identify failure\n",
902 __func__);
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100903 return -ENODEV;
904 }
905 if (id->ncap == 0) {
906 kfree(id);
907 return -ENODEV;
908 }
909
910 if (nvme_nvm_ns_supported(ns, id) && ns->type != NVME_NS_LIGHTNVM) {
911 if (nvme_nvm_register(ns->queue, disk->disk_name)) {
Sagi Grimberg1b3c47c2016-02-10 08:51:15 -0700912 dev_warn(disk_to_dev(ns->disk),
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100913 "%s: LightNVM init failure\n", __func__);
914 kfree(id);
915 return -ENODEV;
916 }
917 ns->type = NVME_NS_LIGHTNVM;
918 }
919
Keith Busch2b9b6e82015-12-22 10:10:45 -0700920 if (ns->ctrl->vs >= NVME_VS(1, 1))
921 memcpy(ns->eui, id->eui64, sizeof(ns->eui));
922 if (ns->ctrl->vs >= NVME_VS(1, 2))
923 memcpy(ns->uuid, id->nguid, sizeof(ns->uuid));
924
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100925 old_ms = ns->ms;
926 lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
927 ns->lba_shift = id->lbaf[lbaf].ds;
928 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
929 ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
930
931 /*
932 * If identify namespace failed, use default 512 byte block size so
933 * block layer can use before failing read/write for 0 capacity.
934 */
935 if (ns->lba_shift == 0)
936 ns->lba_shift = 9;
937 bs = 1 << ns->lba_shift;
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100938 /* XXX: PI implementation requires metadata equal t10 pi tuple size */
939 pi_type = ns->ms == sizeof(struct t10_pi_tuple) ?
940 id->dps & NVME_NS_DPS_PI_MASK : 0;
941
942 blk_mq_freeze_queue(disk->queue);
943 if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
944 ns->ms != old_ms ||
945 bs != queue_logical_block_size(disk->queue) ||
946 (ns->ms && ns->ext)))
947 blk_integrity_unregister(disk);
948
949 ns->pi_type = pi_type;
950 blk_queue_logical_block_size(ns->queue, bs);
951
Keith Busch4b9d5b12015-11-20 09:13:30 +0100952 if (ns->ms && !blk_get_integrity(disk) && !ns->ext)
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100953 nvme_init_integrity(ns);
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100954 if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk))
955 set_capacity(disk, 0);
956 else
957 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
958
959 if (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM)
960 nvme_config_discard(ns);
961 blk_mq_unfreeze_queue(disk->queue);
962
963 kfree(id);
964 return 0;
965}
966
967static char nvme_pr_type(enum pr_type type)
968{
969 switch (type) {
970 case PR_WRITE_EXCLUSIVE:
971 return 1;
972 case PR_EXCLUSIVE_ACCESS:
973 return 2;
974 case PR_WRITE_EXCLUSIVE_REG_ONLY:
975 return 3;
976 case PR_EXCLUSIVE_ACCESS_REG_ONLY:
977 return 4;
978 case PR_WRITE_EXCLUSIVE_ALL_REGS:
979 return 5;
980 case PR_EXCLUSIVE_ACCESS_ALL_REGS:
981 return 6;
982 default:
983 return 0;
984 }
985};
986
987static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
988 u64 key, u64 sa_key, u8 op)
989{
990 struct nvme_ns *ns = bdev->bd_disk->private_data;
991 struct nvme_command c;
992 u8 data[16] = { 0, };
993
994 put_unaligned_le64(key, &data[0]);
995 put_unaligned_le64(sa_key, &data[8]);
996
997 memset(&c, 0, sizeof(c));
998 c.common.opcode = op;
999 c.common.nsid = cpu_to_le32(ns->ns_id);
1000 c.common.cdw10[0] = cpu_to_le32(cdw10);
1001
1002 return nvme_submit_sync_cmd(ns->queue, &c, data, 16);
1003}
1004
1005static int nvme_pr_register(struct block_device *bdev, u64 old,
1006 u64 new, unsigned flags)
1007{
1008 u32 cdw10;
1009
1010 if (flags & ~PR_FL_IGNORE_KEY)
1011 return -EOPNOTSUPP;
1012
1013 cdw10 = old ? 2 : 0;
1014 cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
1015 cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
1016 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
1017}
1018
1019static int nvme_pr_reserve(struct block_device *bdev, u64 key,
1020 enum pr_type type, unsigned flags)
1021{
1022 u32 cdw10;
1023
1024 if (flags & ~PR_FL_IGNORE_KEY)
1025 return -EOPNOTSUPP;
1026
1027 cdw10 = nvme_pr_type(type) << 8;
1028 cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
1029 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
1030}
1031
1032static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
1033 enum pr_type type, bool abort)
1034{
1035 u32 cdw10 = nvme_pr_type(type) << 8 | abort ? 2 : 1;
1036 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
1037}
1038
1039static int nvme_pr_clear(struct block_device *bdev, u64 key)
1040{
Dan Carpenter8c0b3912015-12-09 13:24:06 +03001041 u32 cdw10 = 1 | (key ? 1 << 3 : 0);
Christoph Hellwig1673f1f2015-11-26 10:54:19 +01001042 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
1043}
1044
1045static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
1046{
1047 u32 cdw10 = nvme_pr_type(type) << 8 | key ? 1 << 3 : 0;
1048 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
1049}
1050
1051static const struct pr_ops nvme_pr_ops = {
1052 .pr_register = nvme_pr_register,
1053 .pr_reserve = nvme_pr_reserve,
1054 .pr_release = nvme_pr_release,
1055 .pr_preempt = nvme_pr_preempt,
1056 .pr_clear = nvme_pr_clear,
1057};
1058
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001059static const struct block_device_operations nvme_fops = {
Christoph Hellwig1673f1f2015-11-26 10:54:19 +01001060 .owner = THIS_MODULE,
1061 .ioctl = nvme_ioctl,
1062 .compat_ioctl = nvme_compat_ioctl,
1063 .open = nvme_open,
1064 .release = nvme_release,
1065 .getgeo = nvme_getgeo,
1066 .revalidate_disk= nvme_revalidate_disk,
1067 .pr_ops = &nvme_pr_ops,
1068};
1069
Christoph Hellwig5fd4ce12015-11-28 15:03:49 +01001070static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
1071{
1072 unsigned long timeout =
1073 ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
1074 u32 csts, bit = enabled ? NVME_CSTS_RDY : 0;
1075 int ret;
1076
1077 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
1078 if ((csts & NVME_CSTS_RDY) == bit)
1079 break;
1080
1081 msleep(100);
1082 if (fatal_signal_pending(current))
1083 return -EINTR;
1084 if (time_after(jiffies, timeout)) {
Sagi Grimberg1b3c47c2016-02-10 08:51:15 -07001085 dev_err(ctrl->device,
Christoph Hellwig5fd4ce12015-11-28 15:03:49 +01001086 "Device not ready; aborting %s\n", enabled ?
1087 "initialisation" : "reset");
1088 return -ENODEV;
1089 }
1090 }
1091
1092 return ret;
1093}
1094
1095/*
1096 * If the device has been passed off to us in an enabled state, just clear
1097 * the enabled bit. The spec says we should set the 'shutdown notification
1098 * bits', but doing so may cause the device to complete commands to the
1099 * admin queue ... and we don't know what memory that might be pointing at!
1100 */
1101int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
1102{
1103 int ret;
1104
1105 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
1106 ctrl->ctrl_config &= ~NVME_CC_ENABLE;
1107
1108 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
1109 if (ret)
1110 return ret;
Guilherme G. Piccoli54adc012016-06-14 18:22:41 -03001111
1112 /* Checking for ctrl->tagset is a trick to avoid sleeping on module
1113 * load, since we only need the quirk on reset_controller. Notice
1114 * that the HGST device needs this delay only in firmware activation
1115 * procedure; unfortunately we have no (easy) way to verify this.
1116 */
1117 if ((ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) && ctrl->tagset)
1118 msleep(NVME_QUIRK_DELAY_AMOUNT);
1119
Christoph Hellwig5fd4ce12015-11-28 15:03:49 +01001120 return nvme_wait_ready(ctrl, cap, false);
1121}
Ming Lin576d55d2016-02-10 10:03:32 -08001122EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
Christoph Hellwig5fd4ce12015-11-28 15:03:49 +01001123
1124int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
1125{
1126 /*
1127 * Default to a 4K page size, with the intention to update this
1128 * path in the future to accomodate architectures with differing
1129 * kernel and IO page sizes.
1130 */
1131 unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12, page_shift = 12;
1132 int ret;
1133
1134 if (page_shift < dev_page_min) {
Sagi Grimberg1b3c47c2016-02-10 08:51:15 -07001135 dev_err(ctrl->device,
Christoph Hellwig5fd4ce12015-11-28 15:03:49 +01001136 "Minimum device page size %u too large for host (%u)\n",
1137 1 << dev_page_min, 1 << page_shift);
1138 return -ENODEV;
1139 }
1140
1141 ctrl->page_size = 1 << page_shift;
1142
1143 ctrl->ctrl_config = NVME_CC_CSS_NVM;
1144 ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
1145 ctrl->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
1146 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
1147 ctrl->ctrl_config |= NVME_CC_ENABLE;
1148
1149 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
1150 if (ret)
1151 return ret;
1152 return nvme_wait_ready(ctrl, cap, true);
1153}
Ming Lin576d55d2016-02-10 10:03:32 -08001154EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
Christoph Hellwig5fd4ce12015-11-28 15:03:49 +01001155
1156int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
1157{
1158 unsigned long timeout = SHUTDOWN_TIMEOUT + jiffies;
1159 u32 csts;
1160 int ret;
1161
1162 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
1163 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
1164
1165 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
1166 if (ret)
1167 return ret;
1168
1169 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
1170 if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT)
1171 break;
1172
1173 msleep(100);
1174 if (fatal_signal_pending(current))
1175 return -EINTR;
1176 if (time_after(jiffies, timeout)) {
Sagi Grimberg1b3c47c2016-02-10 08:51:15 -07001177 dev_err(ctrl->device,
Christoph Hellwig5fd4ce12015-11-28 15:03:49 +01001178 "Device shutdown incomplete; abort shutdown\n");
1179 return -ENODEV;
1180 }
1181 }
1182
1183 return ret;
1184}
Ming Lin576d55d2016-02-10 10:03:32 -08001185EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
Christoph Hellwig5fd4ce12015-11-28 15:03:49 +01001186
Christoph Hellwigda358252016-03-02 18:07:11 +01001187static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
1188 struct request_queue *q)
1189{
Jens Axboe7c88cb02016-04-12 15:43:09 -06001190 bool vwc = false;
1191
Christoph Hellwigda358252016-03-02 18:07:11 +01001192 if (ctrl->max_hw_sectors) {
Christoph Hellwig45686b62016-03-02 18:07:12 +01001193 u32 max_segments =
1194 (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
1195
Christoph Hellwigda358252016-03-02 18:07:11 +01001196 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
Christoph Hellwig45686b62016-03-02 18:07:12 +01001197 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
Christoph Hellwigda358252016-03-02 18:07:11 +01001198 }
1199 if (ctrl->stripe_size)
1200 blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
Christoph Hellwigda358252016-03-02 18:07:11 +01001201 blk_queue_virt_boundary(q, ctrl->page_size - 1);
Jens Axboe7c88cb02016-04-12 15:43:09 -06001202 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
1203 vwc = true;
1204 blk_queue_write_cache(q, vwc, vwc);
Christoph Hellwigda358252016-03-02 18:07:11 +01001205}
1206
Christoph Hellwig7fd89302015-11-28 15:37:52 +01001207/*
1208 * Initialize the cached copies of the Identify data and various controller
1209 * register in our nvme_ctrl structure. This should be called as soon as
1210 * the admin queue is fully up and running.
1211 */
1212int nvme_init_identify(struct nvme_ctrl *ctrl)
1213{
1214 struct nvme_id_ctrl *id;
1215 u64 cap;
1216 int ret, page_shift;
Christoph Hellwiga229dbf2016-06-06 23:20:48 +02001217 u32 max_hw_sectors;
Christoph Hellwig7fd89302015-11-28 15:37:52 +01001218
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01001219 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
1220 if (ret) {
Sagi Grimberg1b3c47c2016-02-10 08:51:15 -07001221 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01001222 return ret;
1223 }
1224
Christoph Hellwig7fd89302015-11-28 15:37:52 +01001225 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap);
1226 if (ret) {
Sagi Grimberg1b3c47c2016-02-10 08:51:15 -07001227 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
Christoph Hellwig7fd89302015-11-28 15:37:52 +01001228 return ret;
1229 }
1230 page_shift = NVME_CAP_MPSMIN(cap) + 12;
1231
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01001232 if (ctrl->vs >= NVME_VS(1, 1))
1233 ctrl->subsystem = NVME_CAP_NSSRC(cap);
1234
Christoph Hellwig7fd89302015-11-28 15:37:52 +01001235 ret = nvme_identify_ctrl(ctrl, &id);
1236 if (ret) {
Sagi Grimberg1b3c47c2016-02-10 08:51:15 -07001237 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
Christoph Hellwig7fd89302015-11-28 15:37:52 +01001238 return -EIO;
1239 }
1240
Keith Busch118472a2016-02-18 09:57:48 -07001241 ctrl->vid = le16_to_cpu(id->vid);
Christoph Hellwig7fd89302015-11-28 15:37:52 +01001242 ctrl->oncs = le16_to_cpup(&id->oncs);
Christoph Hellwig6bf25d12015-11-20 09:36:44 +01001243 atomic_set(&ctrl->abort_limit, id->acl + 1);
Christoph Hellwig7fd89302015-11-28 15:37:52 +01001244 ctrl->vwc = id->vwc;
Ming Lin931e1c22016-02-26 13:24:19 -08001245 ctrl->cntlid = le16_to_cpup(&id->cntlid);
Christoph Hellwig7fd89302015-11-28 15:37:52 +01001246 memcpy(ctrl->serial, id->sn, sizeof(id->sn));
1247 memcpy(ctrl->model, id->mn, sizeof(id->mn));
1248 memcpy(ctrl->firmware_rev, id->fr, sizeof(id->fr));
1249 if (id->mdts)
Christoph Hellwiga229dbf2016-06-06 23:20:48 +02001250 max_hw_sectors = 1 << (id->mdts + page_shift - 9);
Christoph Hellwig7fd89302015-11-28 15:37:52 +01001251 else
Christoph Hellwiga229dbf2016-06-06 23:20:48 +02001252 max_hw_sectors = UINT_MAX;
1253 ctrl->max_hw_sectors =
1254 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
Christoph Hellwig7fd89302015-11-28 15:37:52 +01001255
1256 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && id->vs[3]) {
1257 unsigned int max_hw_sectors;
1258
1259 ctrl->stripe_size = 1 << (id->vs[3] + page_shift);
1260 max_hw_sectors = ctrl->stripe_size >> (page_shift - 9);
1261 if (ctrl->max_hw_sectors) {
1262 ctrl->max_hw_sectors = min(max_hw_sectors,
1263 ctrl->max_hw_sectors);
1264 } else {
1265 ctrl->max_hw_sectors = max_hw_sectors;
1266 }
1267 }
1268
Christoph Hellwigda358252016-03-02 18:07:11 +01001269 nvme_set_queue_limits(ctrl, ctrl->admin_q);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +02001270 ctrl->sgls = le32_to_cpu(id->sgls);
Sagi Grimberg038bd4c2016-06-13 16:45:28 +02001271 ctrl->kas = le16_to_cpu(id->kas);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +02001272
1273 if (ctrl->ops->is_fabrics) {
1274 ctrl->icdoff = le16_to_cpu(id->icdoff);
1275 ctrl->ioccsz = le32_to_cpu(id->ioccsz);
1276 ctrl->iorcsz = le32_to_cpu(id->iorcsz);
1277 ctrl->maxcmd = le16_to_cpu(id->maxcmd);
1278
1279 /*
1280 * In fabrics we need to verify the cntlid matches the
1281 * admin connect
1282 */
1283 if (ctrl->cntlid != le16_to_cpu(id->cntlid))
1284 ret = -EINVAL;
Sagi Grimberg038bd4c2016-06-13 16:45:28 +02001285
1286 if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
1287 dev_err(ctrl->dev,
1288 "keep-alive support is mandatory for fabrics\n");
1289 ret = -EINVAL;
1290 }
Christoph Hellwig07bfcd02016-06-13 16:45:26 +02001291 } else {
1292 ctrl->cntlid = le16_to_cpu(id->cntlid);
1293 }
Christoph Hellwigda358252016-03-02 18:07:11 +01001294
Christoph Hellwig7fd89302015-11-28 15:37:52 +01001295 kfree(id);
Christoph Hellwig07bfcd02016-06-13 16:45:26 +02001296 return ret;
Christoph Hellwig7fd89302015-11-28 15:37:52 +01001297}
Ming Lin576d55d2016-02-10 10:03:32 -08001298EXPORT_SYMBOL_GPL(nvme_init_identify);
Christoph Hellwig7fd89302015-11-28 15:37:52 +01001299
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01001300static int nvme_dev_open(struct inode *inode, struct file *file)
Christoph Hellwig1673f1f2015-11-26 10:54:19 +01001301{
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01001302 struct nvme_ctrl *ctrl;
1303 int instance = iminor(inode);
1304 int ret = -ENODEV;
Christoph Hellwig1673f1f2015-11-26 10:54:19 +01001305
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01001306 spin_lock(&dev_list_lock);
1307 list_for_each_entry(ctrl, &nvme_ctrl_list, node) {
1308 if (ctrl->instance != instance)
1309 continue;
1310
1311 if (!ctrl->admin_q) {
1312 ret = -EWOULDBLOCK;
1313 break;
1314 }
1315 if (!kref_get_unless_zero(&ctrl->kref))
1316 break;
1317 file->private_data = ctrl;
1318 ret = 0;
1319 break;
1320 }
1321 spin_unlock(&dev_list_lock);
1322
1323 return ret;
Christoph Hellwig1673f1f2015-11-26 10:54:19 +01001324}
1325
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01001326static int nvme_dev_release(struct inode *inode, struct file *file)
Christoph Hellwig1673f1f2015-11-26 10:54:19 +01001327{
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01001328 nvme_put_ctrl(file->private_data);
1329 return 0;
Christoph Hellwig1673f1f2015-11-26 10:54:19 +01001330}
1331
Christoph Hellwigbfd89472015-12-24 15:27:01 +01001332static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
1333{
1334 struct nvme_ns *ns;
1335 int ret;
1336
1337 mutex_lock(&ctrl->namespaces_mutex);
1338 if (list_empty(&ctrl->namespaces)) {
1339 ret = -ENOTTY;
1340 goto out_unlock;
1341 }
1342
1343 ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
1344 if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
Sagi Grimberg1b3c47c2016-02-10 08:51:15 -07001345 dev_warn(ctrl->device,
Christoph Hellwigbfd89472015-12-24 15:27:01 +01001346 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
1347 ret = -EINVAL;
1348 goto out_unlock;
1349 }
1350
Sagi Grimberg1b3c47c2016-02-10 08:51:15 -07001351 dev_warn(ctrl->device,
Christoph Hellwigbfd89472015-12-24 15:27:01 +01001352 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
1353 kref_get(&ns->kref);
1354 mutex_unlock(&ctrl->namespaces_mutex);
1355
1356 ret = nvme_user_cmd(ctrl, ns, argp);
1357 nvme_put_ns(ns);
1358 return ret;
1359
1360out_unlock:
1361 mutex_unlock(&ctrl->namespaces_mutex);
1362 return ret;
1363}
1364
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01001365static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
1366 unsigned long arg)
1367{
1368 struct nvme_ctrl *ctrl = file->private_data;
1369 void __user *argp = (void __user *)arg;
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01001370
1371 switch (cmd) {
1372 case NVME_IOCTL_ADMIN_CMD:
1373 return nvme_user_cmd(ctrl, NULL, argp);
1374 case NVME_IOCTL_IO_CMD:
Christoph Hellwigbfd89472015-12-24 15:27:01 +01001375 return nvme_dev_user_cmd(ctrl, argp);
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01001376 case NVME_IOCTL_RESET:
Sagi Grimberg1b3c47c2016-02-10 08:51:15 -07001377 dev_warn(ctrl->device, "resetting controller\n");
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01001378 return ctrl->ops->reset_ctrl(ctrl);
1379 case NVME_IOCTL_SUBSYS_RESET:
1380 return nvme_reset_subsystem(ctrl);
Keith Busch9ec3bb22016-04-29 15:45:18 -06001381 case NVME_IOCTL_RESCAN:
1382 nvme_queue_scan(ctrl);
1383 return 0;
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01001384 default:
1385 return -ENOTTY;
1386 }
1387}
1388
1389static const struct file_operations nvme_dev_fops = {
1390 .owner = THIS_MODULE,
1391 .open = nvme_dev_open,
1392 .release = nvme_dev_release,
1393 .unlocked_ioctl = nvme_dev_ioctl,
1394 .compat_ioctl = nvme_dev_ioctl,
1395};
1396
1397static ssize_t nvme_sysfs_reset(struct device *dev,
1398 struct device_attribute *attr, const char *buf,
1399 size_t count)
1400{
1401 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
1402 int ret;
1403
1404 ret = ctrl->ops->reset_ctrl(ctrl);
1405 if (ret < 0)
1406 return ret;
1407 return count;
1408}
1409static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
1410
Keith Busch9ec3bb22016-04-29 15:45:18 -06001411static ssize_t nvme_sysfs_rescan(struct device *dev,
1412 struct device_attribute *attr, const char *buf,
1413 size_t count)
1414{
1415 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
1416
1417 nvme_queue_scan(ctrl);
1418 return count;
1419}
1420static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
1421
Keith Busch118472a2016-02-18 09:57:48 -07001422static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
1423 char *buf)
1424{
1425 struct nvme_ns *ns = dev_to_disk(dev)->private_data;
1426 struct nvme_ctrl *ctrl = ns->ctrl;
1427 int serial_len = sizeof(ctrl->serial);
1428 int model_len = sizeof(ctrl->model);
1429
1430 if (memchr_inv(ns->uuid, 0, sizeof(ns->uuid)))
1431 return sprintf(buf, "eui.%16phN\n", ns->uuid);
1432
1433 if (memchr_inv(ns->eui, 0, sizeof(ns->eui)))
1434 return sprintf(buf, "eui.%8phN\n", ns->eui);
1435
1436 while (ctrl->serial[serial_len - 1] == ' ')
1437 serial_len--;
1438 while (ctrl->model[model_len - 1] == ' ')
1439 model_len--;
1440
1441 return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid,
1442 serial_len, ctrl->serial, model_len, ctrl->model, ns->ns_id);
1443}
1444static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL);
1445
Keith Busch2b9b6e82015-12-22 10:10:45 -07001446static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
1447 char *buf)
1448{
1449 struct nvme_ns *ns = dev_to_disk(dev)->private_data;
1450 return sprintf(buf, "%pU\n", ns->uuid);
1451}
1452static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL);
1453
1454static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
1455 char *buf)
1456{
1457 struct nvme_ns *ns = dev_to_disk(dev)->private_data;
1458 return sprintf(buf, "%8phd\n", ns->eui);
1459}
1460static DEVICE_ATTR(eui, S_IRUGO, eui_show, NULL);
1461
1462static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
1463 char *buf)
1464{
1465 struct nvme_ns *ns = dev_to_disk(dev)->private_data;
1466 return sprintf(buf, "%d\n", ns->ns_id);
1467}
1468static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL);
1469
1470static struct attribute *nvme_ns_attrs[] = {
Keith Busch118472a2016-02-18 09:57:48 -07001471 &dev_attr_wwid.attr,
Keith Busch2b9b6e82015-12-22 10:10:45 -07001472 &dev_attr_uuid.attr,
1473 &dev_attr_eui.attr,
1474 &dev_attr_nsid.attr,
1475 NULL,
1476};
1477
Ming Lin1a353d82016-06-13 16:45:24 +02001478static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
Keith Busch2b9b6e82015-12-22 10:10:45 -07001479 struct attribute *a, int n)
1480{
1481 struct device *dev = container_of(kobj, struct device, kobj);
1482 struct nvme_ns *ns = dev_to_disk(dev)->private_data;
1483
1484 if (a == &dev_attr_uuid.attr) {
1485 if (!memchr_inv(ns->uuid, 0, sizeof(ns->uuid)))
1486 return 0;
1487 }
1488 if (a == &dev_attr_eui.attr) {
1489 if (!memchr_inv(ns->eui, 0, sizeof(ns->eui)))
1490 return 0;
1491 }
1492 return a->mode;
1493}
1494
1495static const struct attribute_group nvme_ns_attr_group = {
1496 .attrs = nvme_ns_attrs,
Ming Lin1a353d82016-06-13 16:45:24 +02001497 .is_visible = nvme_ns_attrs_are_visible,
Keith Busch2b9b6e82015-12-22 10:10:45 -07001498};
1499
Ming Lin931e1c22016-02-26 13:24:19 -08001500#define nvme_show_str_function(field) \
Keith Busch779ff7562016-01-12 15:09:31 -07001501static ssize_t field##_show(struct device *dev, \
1502 struct device_attribute *attr, char *buf) \
1503{ \
1504 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
1505 return sprintf(buf, "%.*s\n", (int)sizeof(ctrl->field), ctrl->field); \
1506} \
1507static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
1508
Ming Lin931e1c22016-02-26 13:24:19 -08001509#define nvme_show_int_function(field) \
1510static ssize_t field##_show(struct device *dev, \
1511 struct device_attribute *attr, char *buf) \
1512{ \
1513 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
1514 return sprintf(buf, "%d\n", ctrl->field); \
1515} \
1516static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
1517
1518nvme_show_str_function(model);
1519nvme_show_str_function(serial);
1520nvme_show_str_function(firmware_rev);
1521nvme_show_int_function(cntlid);
Keith Busch779ff7562016-01-12 15:09:31 -07001522
Ming Lin1a353d82016-06-13 16:45:24 +02001523static ssize_t nvme_sysfs_delete(struct device *dev,
1524 struct device_attribute *attr, const char *buf,
1525 size_t count)
1526{
1527 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
1528
1529 if (device_remove_file_self(dev, attr))
1530 ctrl->ops->delete_ctrl(ctrl);
1531 return count;
1532}
1533static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
1534
1535static ssize_t nvme_sysfs_show_transport(struct device *dev,
1536 struct device_attribute *attr,
1537 char *buf)
1538{
1539 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
1540
1541 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name);
1542}
1543static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
1544
1545static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
1546 struct device_attribute *attr,
1547 char *buf)
1548{
1549 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
1550
1551 return snprintf(buf, PAGE_SIZE, "%s\n",
1552 ctrl->ops->get_subsysnqn(ctrl));
1553}
1554static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
1555
1556static ssize_t nvme_sysfs_show_address(struct device *dev,
1557 struct device_attribute *attr,
1558 char *buf)
1559{
1560 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
1561
1562 return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
1563}
1564static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
1565
Keith Busch779ff7562016-01-12 15:09:31 -07001566static struct attribute *nvme_dev_attrs[] = {
1567 &dev_attr_reset_controller.attr,
Keith Busch9ec3bb22016-04-29 15:45:18 -06001568 &dev_attr_rescan_controller.attr,
Keith Busch779ff7562016-01-12 15:09:31 -07001569 &dev_attr_model.attr,
1570 &dev_attr_serial.attr,
1571 &dev_attr_firmware_rev.attr,
Ming Lin931e1c22016-02-26 13:24:19 -08001572 &dev_attr_cntlid.attr,
Ming Lin1a353d82016-06-13 16:45:24 +02001573 &dev_attr_delete_controller.attr,
1574 &dev_attr_transport.attr,
1575 &dev_attr_subsysnqn.attr,
1576 &dev_attr_address.attr,
Keith Busch779ff7562016-01-12 15:09:31 -07001577 NULL
1578};
1579
Ming Lin1a353d82016-06-13 16:45:24 +02001580#define CHECK_ATTR(ctrl, a, name) \
1581 if ((a) == &dev_attr_##name.attr && \
1582 !(ctrl)->ops->get_##name) \
1583 return 0
1584
1585static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
1586 struct attribute *a, int n)
1587{
1588 struct device *dev = container_of(kobj, struct device, kobj);
1589 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
1590
1591 if (a == &dev_attr_delete_controller.attr) {
1592 if (!ctrl->ops->delete_ctrl)
1593 return 0;
1594 }
1595
1596 CHECK_ATTR(ctrl, a, subsysnqn);
1597 CHECK_ATTR(ctrl, a, address);
1598
1599 return a->mode;
1600}
1601
Keith Busch779ff7562016-01-12 15:09:31 -07001602static struct attribute_group nvme_dev_attrs_group = {
Ming Lin1a353d82016-06-13 16:45:24 +02001603 .attrs = nvme_dev_attrs,
1604 .is_visible = nvme_dev_attrs_are_visible,
Keith Busch779ff7562016-01-12 15:09:31 -07001605};
1606
1607static const struct attribute_group *nvme_dev_attr_groups[] = {
1608 &nvme_dev_attrs_group,
1609 NULL,
1610};
1611
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001612static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
1613{
1614 struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
1615 struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
1616
1617 return nsa->ns_id - nsb->ns_id;
1618}
1619
Keith Busch32f0c4a2016-07-13 11:45:02 -06001620static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001621{
Keith Busch32f0c4a2016-07-13 11:45:02 -06001622 struct nvme_ns *ns, *ret = NULL;
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001623
Keith Busch32f0c4a2016-07-13 11:45:02 -06001624 mutex_lock(&ctrl->namespaces_mutex);
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001625 list_for_each_entry(ns, &ctrl->namespaces, list) {
Keith Busch32f0c4a2016-07-13 11:45:02 -06001626 if (ns->ns_id == nsid) {
1627 kref_get(&ns->kref);
1628 ret = ns;
1629 break;
1630 }
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001631 if (ns->ns_id > nsid)
1632 break;
1633 }
Keith Busch32f0c4a2016-07-13 11:45:02 -06001634 mutex_unlock(&ctrl->namespaces_mutex);
1635 return ret;
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001636}
1637
1638static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
1639{
1640 struct nvme_ns *ns;
1641 struct gendisk *disk;
1642 int node = dev_to_node(ctrl->dev);
1643
1644 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
1645 if (!ns)
1646 return;
1647
Keith Busch075790e2016-02-24 09:15:53 -07001648 ns->instance = ida_simple_get(&ctrl->ns_ida, 1, 0, GFP_KERNEL);
1649 if (ns->instance < 0)
1650 goto out_free_ns;
1651
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001652 ns->queue = blk_mq_init_queue(ctrl->tagset);
1653 if (IS_ERR(ns->queue))
Keith Busch075790e2016-02-24 09:15:53 -07001654 goto out_release_instance;
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001655 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
1656 ns->queue->queuedata = ns;
1657 ns->ctrl = ctrl;
1658
1659 disk = alloc_disk_node(0, node);
1660 if (!disk)
1661 goto out_free_queue;
1662
1663 kref_init(&ns->kref);
1664 ns->ns_id = nsid;
1665 ns->disk = disk;
1666 ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001667
Christoph Hellwigda358252016-03-02 18:07:11 +01001668
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001669 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
Christoph Hellwigda358252016-03-02 18:07:11 +01001670 nvme_set_queue_limits(ctrl, ns->queue);
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001671
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001672 disk->fops = &nvme_fops;
1673 disk->private_data = ns;
1674 disk->queue = ns->queue;
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001675 disk->flags = GENHD_FL_EXT_DEVT;
Keith Busch075790e2016-02-24 09:15:53 -07001676 sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, ns->instance);
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001677
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001678 if (nvme_revalidate_disk(ns->disk))
1679 goto out_free_disk;
1680
Keith Busch32f0c4a2016-07-13 11:45:02 -06001681 mutex_lock(&ctrl->namespaces_mutex);
1682 list_add_tail(&ns->list, &ctrl->namespaces);
1683 mutex_unlock(&ctrl->namespaces_mutex);
1684
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001685 kref_get(&ctrl->kref);
Keith Busch2b9b6e82015-12-22 10:10:45 -07001686 if (ns->type == NVME_NS_LIGHTNVM)
1687 return;
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001688
Dan Williams0d52c7562016-06-15 19:44:20 -07001689 device_add_disk(ctrl->device, ns->disk);
Keith Busch2b9b6e82015-12-22 10:10:45 -07001690 if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
1691 &nvme_ns_attr_group))
1692 pr_warn("%s: failed to create sysfs group for identification\n",
1693 ns->disk->disk_name);
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001694 return;
1695 out_free_disk:
1696 kfree(disk);
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001697 out_free_queue:
1698 blk_cleanup_queue(ns->queue);
Keith Busch075790e2016-02-24 09:15:53 -07001699 out_release_instance:
1700 ida_simple_remove(&ctrl->ns_ida, ns->instance);
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001701 out_free_ns:
1702 kfree(ns);
1703}
1704
1705static void nvme_ns_remove(struct nvme_ns *ns)
1706{
Keith Busch646017a2016-02-24 09:15:54 -07001707 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
1708 return;
1709
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001710 if (ns->disk->flags & GENHD_FL_UP) {
1711 if (blk_get_integrity(ns->disk))
1712 blk_integrity_unregister(ns->disk);
Keith Busch2b9b6e82015-12-22 10:10:45 -07001713 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
1714 &nvme_ns_attr_group);
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001715 del_gendisk(ns->disk);
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001716 blk_mq_abort_requeue_list(ns->queue);
1717 blk_cleanup_queue(ns->queue);
1718 }
Keith Busch32f0c4a2016-07-13 11:45:02 -06001719
1720 mutex_lock(&ns->ctrl->namespaces_mutex);
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001721 list_del_init(&ns->list);
Keith Busch32f0c4a2016-07-13 11:45:02 -06001722 mutex_unlock(&ns->ctrl->namespaces_mutex);
1723
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001724 nvme_put_ns(ns);
1725}
1726
Keith Busch540c8012015-10-22 15:45:06 -06001727static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
1728{
1729 struct nvme_ns *ns;
1730
Keith Busch32f0c4a2016-07-13 11:45:02 -06001731 ns = nvme_find_get_ns(ctrl, nsid);
Keith Busch540c8012015-10-22 15:45:06 -06001732 if (ns) {
1733 if (revalidate_disk(ns->disk))
1734 nvme_ns_remove(ns);
Keith Busch32f0c4a2016-07-13 11:45:02 -06001735 nvme_put_ns(ns);
Keith Busch540c8012015-10-22 15:45:06 -06001736 } else
1737 nvme_alloc_ns(ctrl, nsid);
1738}
1739
Sunad Bhandary47b0e502016-05-27 15:59:43 +05301740static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
1741 unsigned nsid)
1742{
1743 struct nvme_ns *ns, *next;
1744
1745 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
1746 if (ns->ns_id > nsid)
1747 nvme_ns_remove(ns);
1748 }
1749}
1750
Keith Busch540c8012015-10-22 15:45:06 -06001751static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
1752{
1753 struct nvme_ns *ns;
1754 __le32 *ns_list;
1755 unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024);
1756 int ret = 0;
1757
1758 ns_list = kzalloc(0x1000, GFP_KERNEL);
1759 if (!ns_list)
1760 return -ENOMEM;
1761
1762 for (i = 0; i < num_lists; i++) {
1763 ret = nvme_identify_ns_list(ctrl, prev, ns_list);
1764 if (ret)
Sunad Bhandary47b0e502016-05-27 15:59:43 +05301765 goto free;
Keith Busch540c8012015-10-22 15:45:06 -06001766
1767 for (j = 0; j < min(nn, 1024U); j++) {
1768 nsid = le32_to_cpu(ns_list[j]);
1769 if (!nsid)
1770 goto out;
1771
1772 nvme_validate_ns(ctrl, nsid);
1773
1774 while (++prev < nsid) {
Keith Busch32f0c4a2016-07-13 11:45:02 -06001775 ns = nvme_find_get_ns(ctrl, prev);
1776 if (ns) {
Keith Busch540c8012015-10-22 15:45:06 -06001777 nvme_ns_remove(ns);
Keith Busch32f0c4a2016-07-13 11:45:02 -06001778 nvme_put_ns(ns);
1779 }
Keith Busch540c8012015-10-22 15:45:06 -06001780 }
1781 }
1782 nn -= j;
1783 }
1784 out:
Sunad Bhandary47b0e502016-05-27 15:59:43 +05301785 nvme_remove_invalid_namespaces(ctrl, prev);
1786 free:
Keith Busch540c8012015-10-22 15:45:06 -06001787 kfree(ns_list);
1788 return ret;
1789}
1790
Christoph Hellwig5955be22016-04-26 13:51:59 +02001791static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001792{
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001793 unsigned i;
1794
Keith Busch540c8012015-10-22 15:45:06 -06001795 for (i = 1; i <= nn; i++)
1796 nvme_validate_ns(ctrl, i);
1797
Sunad Bhandary47b0e502016-05-27 15:59:43 +05301798 nvme_remove_invalid_namespaces(ctrl, nn);
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001799}
1800
Christoph Hellwig5955be22016-04-26 13:51:59 +02001801static void nvme_scan_work(struct work_struct *work)
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001802{
Christoph Hellwig5955be22016-04-26 13:51:59 +02001803 struct nvme_ctrl *ctrl =
1804 container_of(work, struct nvme_ctrl, scan_work);
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001805 struct nvme_id_ctrl *id;
Keith Busch540c8012015-10-22 15:45:06 -06001806 unsigned nn;
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001807
Christoph Hellwig5955be22016-04-26 13:51:59 +02001808 if (ctrl->state != NVME_CTRL_LIVE)
1809 return;
1810
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001811 if (nvme_identify_ctrl(ctrl, &id))
1812 return;
Keith Busch540c8012015-10-22 15:45:06 -06001813
1814 nn = le32_to_cpu(id->nn);
1815 if (ctrl->vs >= NVME_VS(1, 1) &&
1816 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
1817 if (!nvme_scan_ns_list(ctrl, nn))
1818 goto done;
1819 }
Christoph Hellwig5955be22016-04-26 13:51:59 +02001820 nvme_scan_ns_sequential(ctrl, nn);
Keith Busch540c8012015-10-22 15:45:06 -06001821 done:
Keith Busch32f0c4a2016-07-13 11:45:02 -06001822 mutex_lock(&ctrl->namespaces_mutex);
Keith Busch540c8012015-10-22 15:45:06 -06001823 list_sort(NULL, &ctrl->namespaces, ns_cmp);
Christoph Hellwig69d3b8a2015-12-24 15:27:00 +01001824 mutex_unlock(&ctrl->namespaces_mutex);
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001825 kfree(id);
Christoph Hellwig5955be22016-04-26 13:51:59 +02001826
1827 if (ctrl->ops->post_scan)
1828 ctrl->ops->post_scan(ctrl);
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001829}
Christoph Hellwig5955be22016-04-26 13:51:59 +02001830
1831void nvme_queue_scan(struct nvme_ctrl *ctrl)
1832{
1833 /*
1834 * Do not queue new scan work when a controller is reset during
1835 * removal.
1836 */
1837 if (ctrl->state == NVME_CTRL_LIVE)
1838 schedule_work(&ctrl->scan_work);
1839}
1840EXPORT_SYMBOL_GPL(nvme_queue_scan);
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001841
Keith Busch32f0c4a2016-07-13 11:45:02 -06001842/*
1843 * This function iterates the namespace list unlocked to allow recovery from
1844 * controller failure. It is up to the caller to ensure the namespace list is
1845 * not modified by scan work while this function is executing.
1846 */
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001847void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
1848{
1849 struct nvme_ns *ns, *next;
1850
Keith Busch0ff9d4e2016-05-12 08:37:14 -06001851 /*
1852 * The dead states indicates the controller was not gracefully
1853 * disconnected. In that case, we won't be able to flush any data while
1854 * removing the namespaces' disks; fail all the queues now to avoid
1855 * potentially having to clean up the failed sync later.
1856 */
1857 if (ctrl->state == NVME_CTRL_DEAD)
1858 nvme_kill_queues(ctrl);
1859
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001860 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
1861 nvme_ns_remove(ns);
1862}
Ming Lin576d55d2016-02-10 10:03:32 -08001863EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01001864
Christoph Hellwigf866fc42016-04-26 13:52:00 +02001865static void nvme_async_event_work(struct work_struct *work)
1866{
1867 struct nvme_ctrl *ctrl =
1868 container_of(work, struct nvme_ctrl, async_event_work);
1869
1870 spin_lock_irq(&ctrl->lock);
1871 while (ctrl->event_limit > 0) {
1872 int aer_idx = --ctrl->event_limit;
1873
1874 spin_unlock_irq(&ctrl->lock);
1875 ctrl->ops->submit_async_event(ctrl, aer_idx);
1876 spin_lock_irq(&ctrl->lock);
1877 }
1878 spin_unlock_irq(&ctrl->lock);
1879}
1880
1881void nvme_complete_async_event(struct nvme_ctrl *ctrl,
1882 struct nvme_completion *cqe)
1883{
1884 u16 status = le16_to_cpu(cqe->status) >> 1;
1885 u32 result = le32_to_cpu(cqe->result);
1886
1887 if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) {
1888 ++ctrl->event_limit;
1889 schedule_work(&ctrl->async_event_work);
1890 }
1891
1892 if (status != NVME_SC_SUCCESS)
1893 return;
1894
1895 switch (result & 0xff07) {
1896 case NVME_AER_NOTICE_NS_CHANGED:
1897 dev_info(ctrl->device, "rescanning\n");
1898 nvme_queue_scan(ctrl);
1899 break;
1900 default:
1901 dev_warn(ctrl->device, "async event result %08x\n", result);
1902 }
1903}
1904EXPORT_SYMBOL_GPL(nvme_complete_async_event);
1905
1906void nvme_queue_async_events(struct nvme_ctrl *ctrl)
1907{
1908 ctrl->event_limit = NVME_NR_AERS;
1909 schedule_work(&ctrl->async_event_work);
1910}
1911EXPORT_SYMBOL_GPL(nvme_queue_async_events);
1912
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01001913static DEFINE_IDA(nvme_instance_ida);
1914
1915static int nvme_set_instance(struct nvme_ctrl *ctrl)
1916{
1917 int instance, error;
1918
1919 do {
1920 if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL))
1921 return -ENODEV;
1922
1923 spin_lock(&dev_list_lock);
1924 error = ida_get_new(&nvme_instance_ida, &instance);
1925 spin_unlock(&dev_list_lock);
1926 } while (error == -EAGAIN);
1927
1928 if (error)
1929 return -ENODEV;
1930
1931 ctrl->instance = instance;
1932 return 0;
1933}
1934
1935static void nvme_release_instance(struct nvme_ctrl *ctrl)
1936{
1937 spin_lock(&dev_list_lock);
1938 ida_remove(&nvme_instance_ida, ctrl->instance);
1939 spin_unlock(&dev_list_lock);
1940}
1941
Keith Busch53029b02015-11-28 15:41:02 +01001942void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
Ming Lin576d55d2016-02-10 10:03:32 -08001943{
Christoph Hellwigf866fc42016-04-26 13:52:00 +02001944 flush_work(&ctrl->async_event_work);
Christoph Hellwig5955be22016-04-26 13:51:59 +02001945 flush_work(&ctrl->scan_work);
1946 nvme_remove_namespaces(ctrl);
1947
Keith Busch53029b02015-11-28 15:41:02 +01001948 device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance));
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01001949
1950 spin_lock(&dev_list_lock);
1951 list_del(&ctrl->node);
1952 spin_unlock(&dev_list_lock);
Keith Busch53029b02015-11-28 15:41:02 +01001953}
Ming Lin576d55d2016-02-10 10:03:32 -08001954EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
Keith Busch53029b02015-11-28 15:41:02 +01001955
1956static void nvme_free_ctrl(struct kref *kref)
1957{
1958 struct nvme_ctrl *ctrl = container_of(kref, struct nvme_ctrl, kref);
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01001959
1960 put_device(ctrl->device);
1961 nvme_release_instance(ctrl);
Keith Busch075790e2016-02-24 09:15:53 -07001962 ida_destroy(&ctrl->ns_ida);
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01001963
1964 ctrl->ops->free_ctrl(ctrl);
1965}
1966
1967void nvme_put_ctrl(struct nvme_ctrl *ctrl)
1968{
1969 kref_put(&ctrl->kref, nvme_free_ctrl);
1970}
Ming Lin576d55d2016-02-10 10:03:32 -08001971EXPORT_SYMBOL_GPL(nvme_put_ctrl);
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01001972
1973/*
1974 * Initialize a NVMe controller structures. This needs to be called during
1975 * earliest initialization so that we have the initialized structured around
1976 * during probing.
1977 */
1978int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
1979 const struct nvme_ctrl_ops *ops, unsigned long quirks)
1980{
1981 int ret;
1982
Christoph Hellwigbb8d2612016-04-26 13:51:57 +02001983 ctrl->state = NVME_CTRL_NEW;
1984 spin_lock_init(&ctrl->lock);
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01001985 INIT_LIST_HEAD(&ctrl->namespaces);
Christoph Hellwig69d3b8a2015-12-24 15:27:00 +01001986 mutex_init(&ctrl->namespaces_mutex);
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01001987 kref_init(&ctrl->kref);
1988 ctrl->dev = dev;
1989 ctrl->ops = ops;
1990 ctrl->quirks = quirks;
Christoph Hellwig5955be22016-04-26 13:51:59 +02001991 INIT_WORK(&ctrl->scan_work, nvme_scan_work);
Christoph Hellwigf866fc42016-04-26 13:52:00 +02001992 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01001993
1994 ret = nvme_set_instance(ctrl);
1995 if (ret)
1996 goto out;
1997
Keith Busch779ff7562016-01-12 15:09:31 -07001998 ctrl->device = device_create_with_groups(nvme_class, ctrl->dev,
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01001999 MKDEV(nvme_char_major, ctrl->instance),
Christoph Hellwigf4f0f632016-02-09 12:44:03 -07002000 ctrl, nvme_dev_attr_groups,
Keith Busch779ff7562016-01-12 15:09:31 -07002001 "nvme%d", ctrl->instance);
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01002002 if (IS_ERR(ctrl->device)) {
2003 ret = PTR_ERR(ctrl->device);
2004 goto out_release_instance;
2005 }
2006 get_device(ctrl->device);
Keith Busch075790e2016-02-24 09:15:53 -07002007 ida_init(&ctrl->ns_ida);
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01002008
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01002009 spin_lock(&dev_list_lock);
2010 list_add_tail(&ctrl->node, &nvme_ctrl_list);
2011 spin_unlock(&dev_list_lock);
2012
2013 return 0;
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01002014out_release_instance:
2015 nvme_release_instance(ctrl);
2016out:
2017 return ret;
2018}
Ming Lin576d55d2016-02-10 10:03:32 -08002019EXPORT_SYMBOL_GPL(nvme_init_ctrl);
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01002020
Keith Busch69d9a992016-02-24 09:15:56 -07002021/**
2022 * nvme_kill_queues(): Ends all namespace queues
2023 * @ctrl: the dead controller that needs to end
2024 *
2025 * Call this function when the driver determines it is unable to get the
2026 * controller in a state capable of servicing IO.
2027 */
2028void nvme_kill_queues(struct nvme_ctrl *ctrl)
2029{
2030 struct nvme_ns *ns;
2031
Keith Busch32f0c4a2016-07-13 11:45:02 -06002032 mutex_lock(&ctrl->namespaces_mutex);
2033 list_for_each_entry(ns, &ctrl->namespaces, list) {
Keith Busch69d9a992016-02-24 09:15:56 -07002034 /*
2035 * Revalidating a dead namespace sets capacity to 0. This will
2036 * end buffered writers dirtying pages that can't be synced.
2037 */
2038 if (!test_and_set_bit(NVME_NS_DEAD, &ns->flags))
2039 revalidate_disk(ns->disk);
2040
2041 blk_set_queue_dying(ns->queue);
2042 blk_mq_abort_requeue_list(ns->queue);
2043 blk_mq_start_stopped_hw_queues(ns->queue, true);
Keith Busch69d9a992016-02-24 09:15:56 -07002044 }
Keith Busch32f0c4a2016-07-13 11:45:02 -06002045 mutex_unlock(&ctrl->namespaces_mutex);
Keith Busch69d9a992016-02-24 09:15:56 -07002046}
Linus Torvalds237045f2016-03-18 17:13:31 -07002047EXPORT_SYMBOL_GPL(nvme_kill_queues);
Keith Busch69d9a992016-02-24 09:15:56 -07002048
Keith Busch25646262016-01-04 09:10:57 -07002049void nvme_stop_queues(struct nvme_ctrl *ctrl)
Sagi Grimberg363c9aa2015-12-24 15:26:59 +01002050{
2051 struct nvme_ns *ns;
2052
Keith Busch32f0c4a2016-07-13 11:45:02 -06002053 mutex_lock(&ctrl->namespaces_mutex);
2054 list_for_each_entry(ns, &ctrl->namespaces, list) {
Sagi Grimberg363c9aa2015-12-24 15:26:59 +01002055 spin_lock_irq(ns->queue->queue_lock);
2056 queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
2057 spin_unlock_irq(ns->queue->queue_lock);
2058
2059 blk_mq_cancel_requeue_work(ns->queue);
2060 blk_mq_stop_hw_queues(ns->queue);
2061 }
Keith Busch32f0c4a2016-07-13 11:45:02 -06002062 mutex_unlock(&ctrl->namespaces_mutex);
Sagi Grimberg363c9aa2015-12-24 15:26:59 +01002063}
Ming Lin576d55d2016-02-10 10:03:32 -08002064EXPORT_SYMBOL_GPL(nvme_stop_queues);
Sagi Grimberg363c9aa2015-12-24 15:26:59 +01002065
Keith Busch25646262016-01-04 09:10:57 -07002066void nvme_start_queues(struct nvme_ctrl *ctrl)
Sagi Grimberg363c9aa2015-12-24 15:26:59 +01002067{
2068 struct nvme_ns *ns;
2069
Keith Busch32f0c4a2016-07-13 11:45:02 -06002070 mutex_lock(&ctrl->namespaces_mutex);
2071 list_for_each_entry(ns, &ctrl->namespaces, list) {
Sagi Grimberg363c9aa2015-12-24 15:26:59 +01002072 queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
Sagi Grimberg363c9aa2015-12-24 15:26:59 +01002073 blk_mq_start_stopped_hw_queues(ns->queue, true);
2074 blk_mq_kick_requeue_list(ns->queue);
2075 }
Keith Busch32f0c4a2016-07-13 11:45:02 -06002076 mutex_unlock(&ctrl->namespaces_mutex);
Sagi Grimberg363c9aa2015-12-24 15:26:59 +01002077}
Ming Lin576d55d2016-02-10 10:03:32 -08002078EXPORT_SYMBOL_GPL(nvme_start_queues);
Sagi Grimberg363c9aa2015-12-24 15:26:59 +01002079
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01002080int __init nvme_core_init(void)
2081{
2082 int result;
2083
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01002084 result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme",
2085 &nvme_dev_fops);
2086 if (result < 0)
NeilBrownb09dcf52016-07-13 11:03:58 -07002087 return result;
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01002088 else if (result > 0)
2089 nvme_char_major = result;
2090
2091 nvme_class = class_create(THIS_MODULE, "nvme");
2092 if (IS_ERR(nvme_class)) {
2093 result = PTR_ERR(nvme_class);
2094 goto unregister_chrdev;
2095 }
2096
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01002097 return 0;
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01002098
2099 unregister_chrdev:
2100 __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01002101 return result;
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01002102}
2103
2104void nvme_core_exit(void)
2105{
Christoph Hellwigf3ca80f2015-11-28 15:40:19 +01002106 class_destroy(nvme_class);
2107 __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
Christoph Hellwig5bae7f72015-11-28 15:39:07 +01002108}
Ming Lin576d55d2016-02-10 10:03:32 -08002109
2110MODULE_LICENSE("GPL");
2111MODULE_VERSION("1.0");
2112module_init(nvme_core_init);
2113module_exit(nvme_core_exit);