blob: feb497134aeea662092424804073be9ed104c464 [file] [log] [blame]
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +02001/*
2 * NVMe over Fabrics loopback device.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/scatterlist.h>
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020016#include <linux/blk-mq.h>
17#include <linux/nvme.h>
18#include <linux/module.h>
19#include <linux/parser.h>
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020020#include "nvmet.h"
21#include "../host/nvme.h"
22#include "../host/fabrics.h"
23
24#define NVME_LOOP_AQ_DEPTH 256
25
26#define NVME_LOOP_MAX_SEGMENTS 256
27
28/*
29 * We handle AEN commands ourselves and don't even let the
30 * block layer know about them.
31 */
32#define NVME_LOOP_NR_AEN_COMMANDS 1
33#define NVME_LOOP_AQ_BLKMQ_DEPTH \
34 (NVME_LOOP_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
35
36struct nvme_loop_iod {
Christoph Hellwigd49187e2016-11-10 07:32:33 -080037 struct nvme_request nvme_req;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020038 struct nvme_command cmd;
39 struct nvme_completion rsp;
40 struct nvmet_req req;
41 struct nvme_loop_queue *queue;
42 struct work_struct work;
43 struct sg_table sg_table;
44 struct scatterlist first_sgl[];
45};
46
47struct nvme_loop_ctrl {
48 spinlock_t lock;
49 struct nvme_loop_queue *queues;
50 u32 queue_count;
51
52 struct blk_mq_tag_set admin_tag_set;
53
54 struct list_head list;
55 u64 cap;
56 struct blk_mq_tag_set tag_set;
57 struct nvme_loop_iod async_event_iod;
58 struct nvme_ctrl ctrl;
59
60 struct nvmet_ctrl *target_ctrl;
61 struct work_struct delete_work;
62 struct work_struct reset_work;
63};
64
65static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
66{
67 return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
68}
69
70struct nvme_loop_queue {
71 struct nvmet_cq nvme_cq;
72 struct nvmet_sq nvme_sq;
73 struct nvme_loop_ctrl *ctrl;
74};
75
76static struct nvmet_port *nvmet_loop_port;
77
78static LIST_HEAD(nvme_loop_ctrl_list);
79static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
80
81static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
82static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
83
84static struct nvmet_fabrics_ops nvme_loop_ops;
85
86static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
87{
88 return queue - queue->ctrl->queues;
89}
90
91static void nvme_loop_complete_rq(struct request *req)
92{
93 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020094
95 nvme_cleanup_cmd(req);
96 sg_free_table_chained(&iod->sg_table, true);
Christoph Hellwig77f02a72017-03-30 13:41:32 +020097 nvme_complete_rq(req);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020098}
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020099
Sagi Grimberg3b068372017-02-27 18:28:25 +0200100static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
101{
102 u32 queue_idx = nvme_loop_queue_idx(queue);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200103
Sagi Grimberg3b068372017-02-27 18:28:25 +0200104 if (queue_idx == 0)
105 return queue->ctrl->admin_tag_set.tags[queue_idx];
106 return queue->ctrl->tag_set.tags[queue_idx - 1];
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200107}
108
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800109static void nvme_loop_queue_response(struct nvmet_req *req)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200110{
Sagi Grimberg3b068372017-02-27 18:28:25 +0200111 struct nvme_loop_queue *queue =
112 container_of(req->sq, struct nvme_loop_queue, nvme_sq);
113 struct nvme_completion *cqe = req->rsp;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200114
115 /*
116 * AEN requests are special as they don't time out and can
117 * survive any kind of queue freeze and often don't respond to
118 * aborts. We don't even bother to allocate a struct request
119 * for them but rather special case them here.
120 */
Sagi Grimberg3b068372017-02-27 18:28:25 +0200121 if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200122 cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
Sagi Grimberg3b068372017-02-27 18:28:25 +0200123 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
Christoph Hellwig7bf58532016-11-10 07:32:34 -0800124 &cqe->result);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200125 } else {
Sagi Grimberg3b068372017-02-27 18:28:25 +0200126 struct request *rq;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200127
Sagi Grimberg3b068372017-02-27 18:28:25 +0200128 rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
129 if (!rq) {
130 dev_err(queue->ctrl->ctrl.device,
131 "tag 0x%x on queue %d not found\n",
132 cqe->command_id, nvme_loop_queue_idx(queue));
133 return;
134 }
135
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +0200136 nvme_end_request(rq, cqe->status, cqe->result);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200137 }
138}
139
140static void nvme_loop_execute_work(struct work_struct *work)
141{
142 struct nvme_loop_iod *iod =
143 container_of(work, struct nvme_loop_iod, work);
144
145 iod->req.execute(&iod->req);
146}
147
148static enum blk_eh_timer_return
149nvme_loop_timeout(struct request *rq, bool reserved)
150{
151 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
152
153 /* queue error recovery */
154 schedule_work(&iod->queue->ctrl->reset_work);
155
156 /* fail with DNR on admin cmd timeout */
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +0200157 nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200158
159 return BLK_EH_HANDLED;
160}
161
162static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
163 const struct blk_mq_queue_data *bd)
164{
165 struct nvme_ns *ns = hctx->queue->queuedata;
166 struct nvme_loop_queue *queue = hctx->driver_data;
167 struct request *req = bd->rq;
168 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
169 int ret;
170
171 ret = nvme_setup_cmd(ns, req, &iod->cmd);
Omar Sandovalbac00002016-11-15 11:11:58 -0800172 if (ret != BLK_MQ_RQ_QUEUE_OK)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200173 return ret;
174
175 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
176 iod->req.port = nvmet_loop_port;
177 if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
178 &queue->nvme_sq, &nvme_loop_ops)) {
179 nvme_cleanup_cmd(req);
180 blk_mq_start_request(req);
181 nvme_loop_queue_response(&iod->req);
Omar Sandovalbac00002016-11-15 11:11:58 -0800182 return BLK_MQ_RQ_QUEUE_OK;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200183 }
184
185 if (blk_rq_bytes(req)) {
186 iod->sg_table.sgl = iod->first_sgl;
187 ret = sg_alloc_table_chained(&iod->sg_table,
Christoph Hellwigf9d03f92016-12-08 15:20:32 -0700188 blk_rq_nr_phys_segments(req),
189 iod->sg_table.sgl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200190 if (ret)
191 return BLK_MQ_RQ_QUEUE_BUSY;
192
193 iod->req.sg = iod->sg_table.sgl;
194 iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200195 }
196
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200197 blk_mq_start_request(req);
198
199 schedule_work(&iod->work);
Omar Sandovalbac00002016-11-15 11:11:58 -0800200 return BLK_MQ_RQ_QUEUE_OK;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200201}
202
203static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
204{
205 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
206 struct nvme_loop_queue *queue = &ctrl->queues[0];
207 struct nvme_loop_iod *iod = &ctrl->async_event_iod;
208
209 memset(&iod->cmd, 0, sizeof(iod->cmd));
210 iod->cmd.common.opcode = nvme_admin_async_event;
211 iod->cmd.common.command_id = NVME_LOOP_AQ_BLKMQ_DEPTH;
212 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
213
214 if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
215 &nvme_loop_ops)) {
216 dev_err(ctrl->ctrl.device, "failed async event work\n");
217 return;
218 }
219
220 schedule_work(&iod->work);
221}
222
223static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
224 struct nvme_loop_iod *iod, unsigned int queue_idx)
225{
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200226 iod->req.cmd = &iod->cmd;
227 iod->req.rsp = &iod->rsp;
228 iod->queue = &ctrl->queues[queue_idx];
229 INIT_WORK(&iod->work, nvme_loop_execute_work);
230 return 0;
231}
232
Christoph Hellwigd6296d32017-05-01 10:19:08 -0600233static int nvme_loop_init_request(struct blk_mq_tag_set *set,
234 struct request *req, unsigned int hctx_idx,
235 unsigned int numa_node)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200236{
Christoph Hellwigd6296d32017-05-01 10:19:08 -0600237 return nvme_loop_init_iod(set->driver_data, blk_mq_rq_to_pdu(req),
238 hctx_idx + 1);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200239}
240
Christoph Hellwigd6296d32017-05-01 10:19:08 -0600241static int nvme_loop_init_admin_request(struct blk_mq_tag_set *set,
242 struct request *req, unsigned int hctx_idx,
243 unsigned int numa_node)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200244{
Christoph Hellwigd6296d32017-05-01 10:19:08 -0600245 return nvme_loop_init_iod(set->driver_data, blk_mq_rq_to_pdu(req), 0);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200246}
247
248static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
249 unsigned int hctx_idx)
250{
251 struct nvme_loop_ctrl *ctrl = data;
252 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
253
254 BUG_ON(hctx_idx >= ctrl->queue_count);
255
256 hctx->driver_data = queue;
257 return 0;
258}
259
260static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
261 unsigned int hctx_idx)
262{
263 struct nvme_loop_ctrl *ctrl = data;
264 struct nvme_loop_queue *queue = &ctrl->queues[0];
265
266 BUG_ON(hctx_idx != 0);
267
268 hctx->driver_data = queue;
269 return 0;
270}
271
Eric Biggersf363b082017-03-30 13:39:16 -0700272static const struct blk_mq_ops nvme_loop_mq_ops = {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200273 .queue_rq = nvme_loop_queue_rq,
274 .complete = nvme_loop_complete_rq,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200275 .init_request = nvme_loop_init_request,
276 .init_hctx = nvme_loop_init_hctx,
277 .timeout = nvme_loop_timeout,
278};
279
Eric Biggersf363b082017-03-30 13:39:16 -0700280static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200281 .queue_rq = nvme_loop_queue_rq,
282 .complete = nvme_loop_complete_rq,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200283 .init_request = nvme_loop_init_admin_request,
284 .init_hctx = nvme_loop_init_admin_hctx,
285 .timeout = nvme_loop_timeout,
286};
287
288static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
289{
Sagi Grimberge4c5d372017-02-27 18:44:45 +0200290 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200291 blk_cleanup_queue(ctrl->ctrl.admin_q);
292 blk_mq_free_tag_set(&ctrl->admin_tag_set);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200293}
294
295static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
296{
297 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
298
299 if (list_empty(&ctrl->list))
300 goto free_ctrl;
301
302 mutex_lock(&nvme_loop_ctrl_mutex);
303 list_del(&ctrl->list);
304 mutex_unlock(&nvme_loop_ctrl_mutex);
305
306 if (nctrl->tagset) {
307 blk_cleanup_queue(ctrl->ctrl.connect_q);
308 blk_mq_free_tag_set(&ctrl->tag_set);
309 }
310 kfree(ctrl->queues);
311 nvmf_free_options(nctrl->opts);
312free_ctrl:
313 kfree(ctrl);
314}
315
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200316static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
317{
318 int i;
319
320 for (i = 1; i < ctrl->queue_count; i++)
321 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
322}
323
324static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
325{
326 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
327 unsigned int nr_io_queues;
328 int ret, i;
329
330 nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
331 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
332 if (ret || !nr_io_queues)
333 return ret;
334
335 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
336
337 for (i = 1; i <= nr_io_queues; i++) {
338 ctrl->queues[i].ctrl = ctrl;
339 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
340 if (ret)
341 goto out_destroy_queues;
342
343 ctrl->queue_count++;
344 }
345
346 return 0;
347
348out_destroy_queues:
349 nvme_loop_destroy_io_queues(ctrl);
350 return ret;
351}
352
Sagi Grimberg297186d2017-03-13 15:43:44 +0200353static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
354{
355 int i, ret;
356
357 for (i = 1; i < ctrl->queue_count; i++) {
358 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
359 if (ret)
360 return ret;
361 }
362
363 return 0;
364}
365
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200366static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
367{
368 int error;
369
370 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
371 ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
372 ctrl->admin_tag_set.queue_depth = NVME_LOOP_AQ_BLKMQ_DEPTH;
373 ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
374 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
375 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
376 SG_CHUNK_SIZE * sizeof(struct scatterlist);
377 ctrl->admin_tag_set.driver_data = ctrl;
378 ctrl->admin_tag_set.nr_hw_queues = 1;
379 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
380
381 ctrl->queues[0].ctrl = ctrl;
382 error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
383 if (error)
384 return error;
385 ctrl->queue_count = 1;
386
387 error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
388 if (error)
389 goto out_free_sq;
390
391 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
392 if (IS_ERR(ctrl->ctrl.admin_q)) {
393 error = PTR_ERR(ctrl->ctrl.admin_q);
394 goto out_free_tagset;
395 }
396
397 error = nvmf_connect_admin_queue(&ctrl->ctrl);
398 if (error)
399 goto out_cleanup_queue;
400
401 error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
402 if (error) {
403 dev_err(ctrl->ctrl.device,
404 "prop_get NVME_REG_CAP failed\n");
405 goto out_cleanup_queue;
406 }
407
408 ctrl->ctrl.sqsize =
Sagi Grimberg096e9e92017-04-06 09:15:50 +0300409 min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200410
411 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
412 if (error)
413 goto out_cleanup_queue;
414
415 ctrl->ctrl.max_hw_sectors =
416 (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
417
418 error = nvme_init_identify(&ctrl->ctrl);
419 if (error)
420 goto out_cleanup_queue;
421
422 nvme_start_keep_alive(&ctrl->ctrl);
423
424 return 0;
425
426out_cleanup_queue:
427 blk_cleanup_queue(ctrl->ctrl.admin_q);
428out_free_tagset:
429 blk_mq_free_tag_set(&ctrl->admin_tag_set);
430out_free_sq:
431 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
432 return error;
433}
434
435static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
436{
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200437 nvme_stop_keep_alive(&ctrl->ctrl);
438
439 if (ctrl->queue_count > 1) {
440 nvme_stop_queues(&ctrl->ctrl);
441 blk_mq_tagset_busy_iter(&ctrl->tag_set,
442 nvme_cancel_request, &ctrl->ctrl);
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200443 nvme_loop_destroy_io_queues(ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200444 }
445
446 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
447 nvme_shutdown_ctrl(&ctrl->ctrl);
448
449 blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
450 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
451 nvme_cancel_request, &ctrl->ctrl);
452 nvme_loop_destroy_admin_queue(ctrl);
453}
454
455static void nvme_loop_del_ctrl_work(struct work_struct *work)
456{
457 struct nvme_loop_ctrl *ctrl = container_of(work,
458 struct nvme_loop_ctrl, delete_work);
459
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200460 nvme_uninit_ctrl(&ctrl->ctrl);
Sagi Grimberga159c642016-07-24 09:32:08 +0300461 nvme_loop_shutdown_ctrl(ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200462 nvme_put_ctrl(&ctrl->ctrl);
463}
464
465static int __nvme_loop_del_ctrl(struct nvme_loop_ctrl *ctrl)
466{
467 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
468 return -EBUSY;
469
470 if (!schedule_work(&ctrl->delete_work))
471 return -EBUSY;
472
473 return 0;
474}
475
476static int nvme_loop_del_ctrl(struct nvme_ctrl *nctrl)
477{
478 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
479 int ret;
480
481 ret = __nvme_loop_del_ctrl(ctrl);
482 if (ret)
483 return ret;
484
485 flush_work(&ctrl->delete_work);
486
487 return 0;
488}
489
490static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
491{
492 struct nvme_loop_ctrl *ctrl;
493
494 mutex_lock(&nvme_loop_ctrl_mutex);
495 list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
496 if (ctrl->ctrl.cntlid == nctrl->cntlid)
497 __nvme_loop_del_ctrl(ctrl);
498 }
499 mutex_unlock(&nvme_loop_ctrl_mutex);
500}
501
502static void nvme_loop_reset_ctrl_work(struct work_struct *work)
503{
504 struct nvme_loop_ctrl *ctrl = container_of(work,
505 struct nvme_loop_ctrl, reset_work);
506 bool changed;
Sagi Grimberg297186d2017-03-13 15:43:44 +0200507 int ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200508
509 nvme_loop_shutdown_ctrl(ctrl);
510
511 ret = nvme_loop_configure_admin_queue(ctrl);
512 if (ret)
513 goto out_disable;
514
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200515 ret = nvme_loop_init_io_queues(ctrl);
516 if (ret)
517 goto out_destroy_admin;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200518
Sagi Grimberg297186d2017-03-13 15:43:44 +0200519 ret = nvme_loop_connect_io_queues(ctrl);
520 if (ret)
521 goto out_destroy_io;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200522
523 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
524 WARN_ON_ONCE(!changed);
525
526 nvme_queue_scan(&ctrl->ctrl);
527 nvme_queue_async_events(&ctrl->ctrl);
528
529 nvme_start_queues(&ctrl->ctrl);
530
531 return;
532
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200533out_destroy_io:
534 nvme_loop_destroy_io_queues(ctrl);
535out_destroy_admin:
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200536 nvme_loop_destroy_admin_queue(ctrl);
537out_disable:
538 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200539 nvme_uninit_ctrl(&ctrl->ctrl);
540 nvme_put_ctrl(&ctrl->ctrl);
541}
542
543static int nvme_loop_reset_ctrl(struct nvme_ctrl *nctrl)
544{
545 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
546
547 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
548 return -EBUSY;
549
550 if (!schedule_work(&ctrl->reset_work))
551 return -EBUSY;
552
553 flush_work(&ctrl->reset_work);
554
555 return 0;
556}
557
558static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
559 .name = "loop",
560 .module = THIS_MODULE,
561 .is_fabrics = true,
562 .reg_read32 = nvmf_reg_read32,
563 .reg_read64 = nvmf_reg_read64,
564 .reg_write32 = nvmf_reg_write32,
565 .reset_ctrl = nvme_loop_reset_ctrl,
566 .free_ctrl = nvme_loop_free_ctrl,
567 .submit_async_event = nvme_loop_submit_async_event,
568 .delete_ctrl = nvme_loop_del_ctrl,
569 .get_subsysnqn = nvmf_get_subsysnqn,
570};
571
572static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
573{
Sagi Grimberg297186d2017-03-13 15:43:44 +0200574 int ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200575
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200576 ret = nvme_loop_init_io_queues(ctrl);
577 if (ret)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200578 return ret;
579
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200580 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
581 ctrl->tag_set.ops = &nvme_loop_mq_ops;
Jay Freyenseeeadb7cf2016-08-17 15:00:28 -0700582 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200583 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
584 ctrl->tag_set.numa_node = NUMA_NO_NODE;
585 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
586 ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
587 SG_CHUNK_SIZE * sizeof(struct scatterlist);
588 ctrl->tag_set.driver_data = ctrl;
589 ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
590 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
591 ctrl->ctrl.tagset = &ctrl->tag_set;
592
593 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
594 if (ret)
595 goto out_destroy_queues;
596
597 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
598 if (IS_ERR(ctrl->ctrl.connect_q)) {
599 ret = PTR_ERR(ctrl->ctrl.connect_q);
600 goto out_free_tagset;
601 }
602
Sagi Grimberg297186d2017-03-13 15:43:44 +0200603 ret = nvme_loop_connect_io_queues(ctrl);
604 if (ret)
605 goto out_cleanup_connect_q;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200606
607 return 0;
608
609out_cleanup_connect_q:
610 blk_cleanup_queue(ctrl->ctrl.connect_q);
611out_free_tagset:
612 blk_mq_free_tag_set(&ctrl->tag_set);
613out_destroy_queues:
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200614 nvme_loop_destroy_io_queues(ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200615 return ret;
616}
617
618static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
619 struct nvmf_ctrl_options *opts)
620{
621 struct nvme_loop_ctrl *ctrl;
622 bool changed;
623 int ret;
624
625 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
626 if (!ctrl)
627 return ERR_PTR(-ENOMEM);
628 ctrl->ctrl.opts = opts;
629 INIT_LIST_HEAD(&ctrl->list);
630
631 INIT_WORK(&ctrl->delete_work, nvme_loop_del_ctrl_work);
632 INIT_WORK(&ctrl->reset_work, nvme_loop_reset_ctrl_work);
633
634 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
635 0 /* no quirks, we're perfect! */);
636 if (ret)
637 goto out_put_ctrl;
638
639 spin_lock_init(&ctrl->lock);
640
641 ret = -ENOMEM;
642
Jay Freyenseeeadb7cf2016-08-17 15:00:28 -0700643 ctrl->ctrl.sqsize = opts->queue_size - 1;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200644 ctrl->ctrl.kato = opts->kato;
645
646 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
647 GFP_KERNEL);
648 if (!ctrl->queues)
649 goto out_uninit_ctrl;
650
651 ret = nvme_loop_configure_admin_queue(ctrl);
652 if (ret)
653 goto out_free_queues;
654
655 if (opts->queue_size > ctrl->ctrl.maxcmd) {
656 /* warn if maxcmd is lower than queue_size */
657 dev_warn(ctrl->ctrl.device,
658 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
659 opts->queue_size, ctrl->ctrl.maxcmd);
660 opts->queue_size = ctrl->ctrl.maxcmd;
661 }
662
663 if (opts->nr_io_queues) {
664 ret = nvme_loop_create_io_queues(ctrl);
665 if (ret)
666 goto out_remove_admin_queue;
667 }
668
669 nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
670
671 dev_info(ctrl->ctrl.device,
672 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
673
674 kref_get(&ctrl->ctrl.kref);
675
676 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
677 WARN_ON_ONCE(!changed);
678
679 mutex_lock(&nvme_loop_ctrl_mutex);
680 list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
681 mutex_unlock(&nvme_loop_ctrl_mutex);
682
683 if (opts->nr_io_queues) {
684 nvme_queue_scan(&ctrl->ctrl);
685 nvme_queue_async_events(&ctrl->ctrl);
686 }
687
688 return &ctrl->ctrl;
689
690out_remove_admin_queue:
691 nvme_loop_destroy_admin_queue(ctrl);
692out_free_queues:
693 kfree(ctrl->queues);
694out_uninit_ctrl:
695 nvme_uninit_ctrl(&ctrl->ctrl);
696out_put_ctrl:
697 nvme_put_ctrl(&ctrl->ctrl);
698 if (ret > 0)
699 ret = -EIO;
700 return ERR_PTR(ret);
701}
702
703static int nvme_loop_add_port(struct nvmet_port *port)
704{
705 /*
706 * XXX: disalow adding more than one port so
707 * there is no connection rejections when a
708 * a subsystem is assigned to a port for which
709 * loop doesn't have a pointer.
710 * This scenario would be possible if we allowed
711 * more than one port to be added and a subsystem
712 * was assigned to a port other than nvmet_loop_port.
713 */
714
715 if (nvmet_loop_port)
716 return -EPERM;
717
718 nvmet_loop_port = port;
719 return 0;
720}
721
722static void nvme_loop_remove_port(struct nvmet_port *port)
723{
724 if (port == nvmet_loop_port)
725 nvmet_loop_port = NULL;
726}
727
728static struct nvmet_fabrics_ops nvme_loop_ops = {
729 .owner = THIS_MODULE,
730 .type = NVMF_TRTYPE_LOOP,
731 .add_port = nvme_loop_add_port,
732 .remove_port = nvme_loop_remove_port,
733 .queue_response = nvme_loop_queue_response,
734 .delete_ctrl = nvme_loop_delete_ctrl,
735};
736
737static struct nvmf_transport_ops nvme_loop_transport = {
738 .name = "loop",
739 .create_ctrl = nvme_loop_create_ctrl,
740};
741
742static int __init nvme_loop_init_module(void)
743{
744 int ret;
745
746 ret = nvmet_register_transport(&nvme_loop_ops);
747 if (ret)
748 return ret;
Sagi Grimbergd19eef02017-03-19 06:26:28 +0200749
750 ret = nvmf_register_transport(&nvme_loop_transport);
751 if (ret)
752 nvmet_unregister_transport(&nvme_loop_ops);
753
754 return ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200755}
756
757static void __exit nvme_loop_cleanup_module(void)
758{
759 struct nvme_loop_ctrl *ctrl, *next;
760
761 nvmf_unregister_transport(&nvme_loop_transport);
762 nvmet_unregister_transport(&nvme_loop_ops);
763
764 mutex_lock(&nvme_loop_ctrl_mutex);
765 list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
766 __nvme_loop_del_ctrl(ctrl);
767 mutex_unlock(&nvme_loop_ctrl_mutex);
768
769 flush_scheduled_work();
770}
771
772module_init(nvme_loop_init_module);
773module_exit(nvme_loop_cleanup_module);
774
775MODULE_LICENSE("GPL v2");
776MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */