blob: 26aa3a5afb0dbf0b79a13cf606930af9df49479d [file] [log] [blame]
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +02001/*
2 * NVMe over Fabrics loopback device.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/scatterlist.h>
16#include <linux/delay.h>
17#include <linux/blk-mq.h>
18#include <linux/nvme.h>
19#include <linux/module.h>
20#include <linux/parser.h>
21#include <linux/t10-pi.h>
22#include "nvmet.h"
23#include "../host/nvme.h"
24#include "../host/fabrics.h"
25
26#define NVME_LOOP_AQ_DEPTH 256
27
28#define NVME_LOOP_MAX_SEGMENTS 256
29
30/*
31 * We handle AEN commands ourselves and don't even let the
32 * block layer know about them.
33 */
34#define NVME_LOOP_NR_AEN_COMMANDS 1
35#define NVME_LOOP_AQ_BLKMQ_DEPTH \
36 (NVME_LOOP_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
37
38struct nvme_loop_iod {
Christoph Hellwigd49187e2016-11-10 07:32:33 -080039 struct nvme_request nvme_req;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020040 struct nvme_command cmd;
41 struct nvme_completion rsp;
42 struct nvmet_req req;
43 struct nvme_loop_queue *queue;
44 struct work_struct work;
45 struct sg_table sg_table;
46 struct scatterlist first_sgl[];
47};
48
49struct nvme_loop_ctrl {
50 spinlock_t lock;
51 struct nvme_loop_queue *queues;
52 u32 queue_count;
53
54 struct blk_mq_tag_set admin_tag_set;
55
56 struct list_head list;
57 u64 cap;
58 struct blk_mq_tag_set tag_set;
59 struct nvme_loop_iod async_event_iod;
60 struct nvme_ctrl ctrl;
61
62 struct nvmet_ctrl *target_ctrl;
63 struct work_struct delete_work;
64 struct work_struct reset_work;
65};
66
67static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
68{
69 return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
70}
71
72struct nvme_loop_queue {
73 struct nvmet_cq nvme_cq;
74 struct nvmet_sq nvme_sq;
75 struct nvme_loop_ctrl *ctrl;
76};
77
78static struct nvmet_port *nvmet_loop_port;
79
80static LIST_HEAD(nvme_loop_ctrl_list);
81static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
82
83static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
84static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
85
86static struct nvmet_fabrics_ops nvme_loop_ops;
87
88static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
89{
90 return queue - queue->ctrl->queues;
91}
92
93static void nvme_loop_complete_rq(struct request *req)
94{
95 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
96 int error = 0;
97
98 nvme_cleanup_cmd(req);
99 sg_free_table_chained(&iod->sg_table, true);
100
101 if (unlikely(req->errors)) {
102 if (nvme_req_needs_retry(req, req->errors)) {
103 nvme_requeue_req(req);
104 return;
105 }
106
107 if (req->cmd_type == REQ_TYPE_DRV_PRIV)
108 error = req->errors;
109 else
110 error = nvme_error_status(req->errors);
111 }
112
113 blk_mq_end_request(req, error);
114}
115
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800116static void nvme_loop_queue_response(struct nvmet_req *req)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200117{
118 struct nvme_loop_iod *iod =
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800119 container_of(req, struct nvme_loop_iod, req);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200120 struct nvme_completion *cqe = &iod->rsp;
121
122 /*
123 * AEN requests are special as they don't time out and can
124 * survive any kind of queue freeze and often don't respond to
125 * aborts. We don't even bother to allocate a struct request
126 * for them but rather special case them here.
127 */
128 if (unlikely(nvme_loop_queue_idx(iod->queue) == 0 &&
129 cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
Christoph Hellwig7bf58532016-11-10 07:32:34 -0800130 nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe->status,
131 &cqe->result);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200132 } else {
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800133 struct request *rq = blk_mq_rq_from_pdu(iod);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200134
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800135 iod->nvme_req.result = cqe->result;
136 blk_mq_complete_request(rq, le16_to_cpu(cqe->status) >> 1);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200137 }
138}
139
140static void nvme_loop_execute_work(struct work_struct *work)
141{
142 struct nvme_loop_iod *iod =
143 container_of(work, struct nvme_loop_iod, work);
144
145 iod->req.execute(&iod->req);
146}
147
148static enum blk_eh_timer_return
149nvme_loop_timeout(struct request *rq, bool reserved)
150{
151 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
152
153 /* queue error recovery */
154 schedule_work(&iod->queue->ctrl->reset_work);
155
156 /* fail with DNR on admin cmd timeout */
157 rq->errors = NVME_SC_ABORT_REQ | NVME_SC_DNR;
158
159 return BLK_EH_HANDLED;
160}
161
162static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
163 const struct blk_mq_queue_data *bd)
164{
165 struct nvme_ns *ns = hctx->queue->queuedata;
166 struct nvme_loop_queue *queue = hctx->driver_data;
167 struct request *req = bd->rq;
168 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
169 int ret;
170
171 ret = nvme_setup_cmd(ns, req, &iod->cmd);
172 if (ret)
173 return ret;
174
175 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
176 iod->req.port = nvmet_loop_port;
177 if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
178 &queue->nvme_sq, &nvme_loop_ops)) {
179 nvme_cleanup_cmd(req);
180 blk_mq_start_request(req);
181 nvme_loop_queue_response(&iod->req);
182 return 0;
183 }
184
185 if (blk_rq_bytes(req)) {
186 iod->sg_table.sgl = iod->first_sgl;
187 ret = sg_alloc_table_chained(&iod->sg_table,
188 req->nr_phys_segments, iod->sg_table.sgl);
189 if (ret)
190 return BLK_MQ_RQ_QUEUE_BUSY;
191
192 iod->req.sg = iod->sg_table.sgl;
193 iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
194 BUG_ON(iod->req.sg_cnt > req->nr_phys_segments);
195 }
196
197 iod->cmd.common.command_id = req->tag;
198 blk_mq_start_request(req);
199
200 schedule_work(&iod->work);
201 return 0;
202}
203
204static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
205{
206 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
207 struct nvme_loop_queue *queue = &ctrl->queues[0];
208 struct nvme_loop_iod *iod = &ctrl->async_event_iod;
209
210 memset(&iod->cmd, 0, sizeof(iod->cmd));
211 iod->cmd.common.opcode = nvme_admin_async_event;
212 iod->cmd.common.command_id = NVME_LOOP_AQ_BLKMQ_DEPTH;
213 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
214
215 if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
216 &nvme_loop_ops)) {
217 dev_err(ctrl->ctrl.device, "failed async event work\n");
218 return;
219 }
220
221 schedule_work(&iod->work);
222}
223
224static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
225 struct nvme_loop_iod *iod, unsigned int queue_idx)
226{
227 BUG_ON(queue_idx >= ctrl->queue_count);
228
229 iod->req.cmd = &iod->cmd;
230 iod->req.rsp = &iod->rsp;
231 iod->queue = &ctrl->queues[queue_idx];
232 INIT_WORK(&iod->work, nvme_loop_execute_work);
233 return 0;
234}
235
236static int nvme_loop_init_request(void *data, struct request *req,
237 unsigned int hctx_idx, unsigned int rq_idx,
238 unsigned int numa_node)
239{
240 return nvme_loop_init_iod(data, blk_mq_rq_to_pdu(req), hctx_idx + 1);
241}
242
243static int nvme_loop_init_admin_request(void *data, struct request *req,
244 unsigned int hctx_idx, unsigned int rq_idx,
245 unsigned int numa_node)
246{
247 return nvme_loop_init_iod(data, blk_mq_rq_to_pdu(req), 0);
248}
249
250static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
251 unsigned int hctx_idx)
252{
253 struct nvme_loop_ctrl *ctrl = data;
254 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
255
256 BUG_ON(hctx_idx >= ctrl->queue_count);
257
258 hctx->driver_data = queue;
259 return 0;
260}
261
262static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
263 unsigned int hctx_idx)
264{
265 struct nvme_loop_ctrl *ctrl = data;
266 struct nvme_loop_queue *queue = &ctrl->queues[0];
267
268 BUG_ON(hctx_idx != 0);
269
270 hctx->driver_data = queue;
271 return 0;
272}
273
274static struct blk_mq_ops nvme_loop_mq_ops = {
275 .queue_rq = nvme_loop_queue_rq,
276 .complete = nvme_loop_complete_rq,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200277 .init_request = nvme_loop_init_request,
278 .init_hctx = nvme_loop_init_hctx,
279 .timeout = nvme_loop_timeout,
280};
281
282static struct blk_mq_ops nvme_loop_admin_mq_ops = {
283 .queue_rq = nvme_loop_queue_rq,
284 .complete = nvme_loop_complete_rq,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200285 .init_request = nvme_loop_init_admin_request,
286 .init_hctx = nvme_loop_init_admin_hctx,
287 .timeout = nvme_loop_timeout,
288};
289
290static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
291{
292 blk_cleanup_queue(ctrl->ctrl.admin_q);
293 blk_mq_free_tag_set(&ctrl->admin_tag_set);
294 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
295}
296
297static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
298{
299 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
300
301 if (list_empty(&ctrl->list))
302 goto free_ctrl;
303
304 mutex_lock(&nvme_loop_ctrl_mutex);
305 list_del(&ctrl->list);
306 mutex_unlock(&nvme_loop_ctrl_mutex);
307
308 if (nctrl->tagset) {
309 blk_cleanup_queue(ctrl->ctrl.connect_q);
310 blk_mq_free_tag_set(&ctrl->tag_set);
311 }
312 kfree(ctrl->queues);
313 nvmf_free_options(nctrl->opts);
314free_ctrl:
315 kfree(ctrl);
316}
317
318static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
319{
320 int error;
321
322 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
323 ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
324 ctrl->admin_tag_set.queue_depth = NVME_LOOP_AQ_BLKMQ_DEPTH;
325 ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
326 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
327 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
328 SG_CHUNK_SIZE * sizeof(struct scatterlist);
329 ctrl->admin_tag_set.driver_data = ctrl;
330 ctrl->admin_tag_set.nr_hw_queues = 1;
331 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
332
333 ctrl->queues[0].ctrl = ctrl;
334 error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
335 if (error)
336 return error;
337 ctrl->queue_count = 1;
338
339 error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
340 if (error)
341 goto out_free_sq;
342
343 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
344 if (IS_ERR(ctrl->ctrl.admin_q)) {
345 error = PTR_ERR(ctrl->ctrl.admin_q);
346 goto out_free_tagset;
347 }
348
349 error = nvmf_connect_admin_queue(&ctrl->ctrl);
350 if (error)
351 goto out_cleanup_queue;
352
353 error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
354 if (error) {
355 dev_err(ctrl->ctrl.device,
356 "prop_get NVME_REG_CAP failed\n");
357 goto out_cleanup_queue;
358 }
359
360 ctrl->ctrl.sqsize =
361 min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
362
363 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
364 if (error)
365 goto out_cleanup_queue;
366
367 ctrl->ctrl.max_hw_sectors =
368 (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
369
370 error = nvme_init_identify(&ctrl->ctrl);
371 if (error)
372 goto out_cleanup_queue;
373
374 nvme_start_keep_alive(&ctrl->ctrl);
375
376 return 0;
377
378out_cleanup_queue:
379 blk_cleanup_queue(ctrl->ctrl.admin_q);
380out_free_tagset:
381 blk_mq_free_tag_set(&ctrl->admin_tag_set);
382out_free_sq:
383 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
384 return error;
385}
386
387static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
388{
389 int i;
390
391 nvme_stop_keep_alive(&ctrl->ctrl);
392
393 if (ctrl->queue_count > 1) {
394 nvme_stop_queues(&ctrl->ctrl);
395 blk_mq_tagset_busy_iter(&ctrl->tag_set,
396 nvme_cancel_request, &ctrl->ctrl);
397
398 for (i = 1; i < ctrl->queue_count; i++)
399 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
400 }
401
402 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
403 nvme_shutdown_ctrl(&ctrl->ctrl);
404
405 blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
406 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
407 nvme_cancel_request, &ctrl->ctrl);
408 nvme_loop_destroy_admin_queue(ctrl);
409}
410
411static void nvme_loop_del_ctrl_work(struct work_struct *work)
412{
413 struct nvme_loop_ctrl *ctrl = container_of(work,
414 struct nvme_loop_ctrl, delete_work);
415
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200416 nvme_uninit_ctrl(&ctrl->ctrl);
Sagi Grimberga159c642016-07-24 09:32:08 +0300417 nvme_loop_shutdown_ctrl(ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200418 nvme_put_ctrl(&ctrl->ctrl);
419}
420
421static int __nvme_loop_del_ctrl(struct nvme_loop_ctrl *ctrl)
422{
423 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
424 return -EBUSY;
425
426 if (!schedule_work(&ctrl->delete_work))
427 return -EBUSY;
428
429 return 0;
430}
431
432static int nvme_loop_del_ctrl(struct nvme_ctrl *nctrl)
433{
434 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
435 int ret;
436
437 ret = __nvme_loop_del_ctrl(ctrl);
438 if (ret)
439 return ret;
440
441 flush_work(&ctrl->delete_work);
442
443 return 0;
444}
445
446static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
447{
448 struct nvme_loop_ctrl *ctrl;
449
450 mutex_lock(&nvme_loop_ctrl_mutex);
451 list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
452 if (ctrl->ctrl.cntlid == nctrl->cntlid)
453 __nvme_loop_del_ctrl(ctrl);
454 }
455 mutex_unlock(&nvme_loop_ctrl_mutex);
456}
457
458static void nvme_loop_reset_ctrl_work(struct work_struct *work)
459{
460 struct nvme_loop_ctrl *ctrl = container_of(work,
461 struct nvme_loop_ctrl, reset_work);
462 bool changed;
463 int i, ret;
464
465 nvme_loop_shutdown_ctrl(ctrl);
466
467 ret = nvme_loop_configure_admin_queue(ctrl);
468 if (ret)
469 goto out_disable;
470
471 for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
472 ctrl->queues[i].ctrl = ctrl;
473 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
474 if (ret)
475 goto out_free_queues;
476
477 ctrl->queue_count++;
478 }
479
480 for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
481 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
482 if (ret)
483 goto out_free_queues;
484 }
485
486 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
487 WARN_ON_ONCE(!changed);
488
489 nvme_queue_scan(&ctrl->ctrl);
490 nvme_queue_async_events(&ctrl->ctrl);
491
492 nvme_start_queues(&ctrl->ctrl);
493
494 return;
495
496out_free_queues:
497 for (i = 1; i < ctrl->queue_count; i++)
498 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
499 nvme_loop_destroy_admin_queue(ctrl);
500out_disable:
501 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200502 nvme_uninit_ctrl(&ctrl->ctrl);
503 nvme_put_ctrl(&ctrl->ctrl);
504}
505
506static int nvme_loop_reset_ctrl(struct nvme_ctrl *nctrl)
507{
508 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
509
510 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
511 return -EBUSY;
512
513 if (!schedule_work(&ctrl->reset_work))
514 return -EBUSY;
515
516 flush_work(&ctrl->reset_work);
517
518 return 0;
519}
520
521static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
522 .name = "loop",
523 .module = THIS_MODULE,
524 .is_fabrics = true,
525 .reg_read32 = nvmf_reg_read32,
526 .reg_read64 = nvmf_reg_read64,
527 .reg_write32 = nvmf_reg_write32,
528 .reset_ctrl = nvme_loop_reset_ctrl,
529 .free_ctrl = nvme_loop_free_ctrl,
530 .submit_async_event = nvme_loop_submit_async_event,
531 .delete_ctrl = nvme_loop_del_ctrl,
532 .get_subsysnqn = nvmf_get_subsysnqn,
533};
534
535static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
536{
537 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
538 int ret, i;
539
540 ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
541 if (ret || !opts->nr_io_queues)
542 return ret;
543
544 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
545 opts->nr_io_queues);
546
547 for (i = 1; i <= opts->nr_io_queues; i++) {
548 ctrl->queues[i].ctrl = ctrl;
549 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
550 if (ret)
551 goto out_destroy_queues;
552
553 ctrl->queue_count++;
554 }
555
556 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
557 ctrl->tag_set.ops = &nvme_loop_mq_ops;
Jay Freyenseeeadb7cf2016-08-17 15:00:28 -0700558 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200559 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
560 ctrl->tag_set.numa_node = NUMA_NO_NODE;
561 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
562 ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
563 SG_CHUNK_SIZE * sizeof(struct scatterlist);
564 ctrl->tag_set.driver_data = ctrl;
565 ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
566 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
567 ctrl->ctrl.tagset = &ctrl->tag_set;
568
569 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
570 if (ret)
571 goto out_destroy_queues;
572
573 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
574 if (IS_ERR(ctrl->ctrl.connect_q)) {
575 ret = PTR_ERR(ctrl->ctrl.connect_q);
576 goto out_free_tagset;
577 }
578
579 for (i = 1; i <= opts->nr_io_queues; i++) {
580 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
581 if (ret)
582 goto out_cleanup_connect_q;
583 }
584
585 return 0;
586
587out_cleanup_connect_q:
588 blk_cleanup_queue(ctrl->ctrl.connect_q);
589out_free_tagset:
590 blk_mq_free_tag_set(&ctrl->tag_set);
591out_destroy_queues:
592 for (i = 1; i < ctrl->queue_count; i++)
593 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
594 return ret;
595}
596
597static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
598 struct nvmf_ctrl_options *opts)
599{
600 struct nvme_loop_ctrl *ctrl;
601 bool changed;
602 int ret;
603
604 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
605 if (!ctrl)
606 return ERR_PTR(-ENOMEM);
607 ctrl->ctrl.opts = opts;
608 INIT_LIST_HEAD(&ctrl->list);
609
610 INIT_WORK(&ctrl->delete_work, nvme_loop_del_ctrl_work);
611 INIT_WORK(&ctrl->reset_work, nvme_loop_reset_ctrl_work);
612
613 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
614 0 /* no quirks, we're perfect! */);
615 if (ret)
616 goto out_put_ctrl;
617
618 spin_lock_init(&ctrl->lock);
619
620 ret = -ENOMEM;
621
Jay Freyenseeeadb7cf2016-08-17 15:00:28 -0700622 ctrl->ctrl.sqsize = opts->queue_size - 1;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200623 ctrl->ctrl.kato = opts->kato;
624
625 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
626 GFP_KERNEL);
627 if (!ctrl->queues)
628 goto out_uninit_ctrl;
629
630 ret = nvme_loop_configure_admin_queue(ctrl);
631 if (ret)
632 goto out_free_queues;
633
634 if (opts->queue_size > ctrl->ctrl.maxcmd) {
635 /* warn if maxcmd is lower than queue_size */
636 dev_warn(ctrl->ctrl.device,
637 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
638 opts->queue_size, ctrl->ctrl.maxcmd);
639 opts->queue_size = ctrl->ctrl.maxcmd;
640 }
641
642 if (opts->nr_io_queues) {
643 ret = nvme_loop_create_io_queues(ctrl);
644 if (ret)
645 goto out_remove_admin_queue;
646 }
647
648 nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
649
650 dev_info(ctrl->ctrl.device,
651 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
652
653 kref_get(&ctrl->ctrl.kref);
654
655 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
656 WARN_ON_ONCE(!changed);
657
658 mutex_lock(&nvme_loop_ctrl_mutex);
659 list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
660 mutex_unlock(&nvme_loop_ctrl_mutex);
661
662 if (opts->nr_io_queues) {
663 nvme_queue_scan(&ctrl->ctrl);
664 nvme_queue_async_events(&ctrl->ctrl);
665 }
666
667 return &ctrl->ctrl;
668
669out_remove_admin_queue:
670 nvme_loop_destroy_admin_queue(ctrl);
671out_free_queues:
672 kfree(ctrl->queues);
673out_uninit_ctrl:
674 nvme_uninit_ctrl(&ctrl->ctrl);
675out_put_ctrl:
676 nvme_put_ctrl(&ctrl->ctrl);
677 if (ret > 0)
678 ret = -EIO;
679 return ERR_PTR(ret);
680}
681
682static int nvme_loop_add_port(struct nvmet_port *port)
683{
684 /*
685 * XXX: disalow adding more than one port so
686 * there is no connection rejections when a
687 * a subsystem is assigned to a port for which
688 * loop doesn't have a pointer.
689 * This scenario would be possible if we allowed
690 * more than one port to be added and a subsystem
691 * was assigned to a port other than nvmet_loop_port.
692 */
693
694 if (nvmet_loop_port)
695 return -EPERM;
696
697 nvmet_loop_port = port;
698 return 0;
699}
700
701static void nvme_loop_remove_port(struct nvmet_port *port)
702{
703 if (port == nvmet_loop_port)
704 nvmet_loop_port = NULL;
705}
706
707static struct nvmet_fabrics_ops nvme_loop_ops = {
708 .owner = THIS_MODULE,
709 .type = NVMF_TRTYPE_LOOP,
710 .add_port = nvme_loop_add_port,
711 .remove_port = nvme_loop_remove_port,
712 .queue_response = nvme_loop_queue_response,
713 .delete_ctrl = nvme_loop_delete_ctrl,
714};
715
716static struct nvmf_transport_ops nvme_loop_transport = {
717 .name = "loop",
718 .create_ctrl = nvme_loop_create_ctrl,
719};
720
721static int __init nvme_loop_init_module(void)
722{
723 int ret;
724
725 ret = nvmet_register_transport(&nvme_loop_ops);
726 if (ret)
727 return ret;
728 nvmf_register_transport(&nvme_loop_transport);
729 return 0;
730}
731
732static void __exit nvme_loop_cleanup_module(void)
733{
734 struct nvme_loop_ctrl *ctrl, *next;
735
736 nvmf_unregister_transport(&nvme_loop_transport);
737 nvmet_unregister_transport(&nvme_loop_ops);
738
739 mutex_lock(&nvme_loop_ctrl_mutex);
740 list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
741 __nvme_loop_del_ctrl(ctrl);
742 mutex_unlock(&nvme_loop_ctrl_mutex);
743
744 flush_scheduled_work();
745}
746
747module_init(nvme_loop_init_module);
748module_exit(nvme_loop_cleanup_module);
749
750MODULE_LICENSE("GPL v2");
751MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */