blob: f67606523724fbd4fc449657051b56caba11629e [file] [log] [blame]
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +02001/*
2 * NVMe over Fabrics loopback device.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/scatterlist.h>
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020016#include <linux/blk-mq.h>
17#include <linux/nvme.h>
18#include <linux/module.h>
19#include <linux/parser.h>
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020020#include "nvmet.h"
21#include "../host/nvme.h"
22#include "../host/fabrics.h"
23
24#define NVME_LOOP_AQ_DEPTH 256
25
26#define NVME_LOOP_MAX_SEGMENTS 256
27
28/*
29 * We handle AEN commands ourselves and don't even let the
30 * block layer know about them.
31 */
32#define NVME_LOOP_NR_AEN_COMMANDS 1
33#define NVME_LOOP_AQ_BLKMQ_DEPTH \
34 (NVME_LOOP_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
35
36struct nvme_loop_iod {
Christoph Hellwigd49187e2016-11-10 07:32:33 -080037 struct nvme_request nvme_req;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020038 struct nvme_command cmd;
39 struct nvme_completion rsp;
40 struct nvmet_req req;
41 struct nvme_loop_queue *queue;
42 struct work_struct work;
43 struct sg_table sg_table;
44 struct scatterlist first_sgl[];
45};
46
47struct nvme_loop_ctrl {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020048 struct nvme_loop_queue *queues;
49 u32 queue_count;
50
51 struct blk_mq_tag_set admin_tag_set;
52
53 struct list_head list;
54 u64 cap;
55 struct blk_mq_tag_set tag_set;
56 struct nvme_loop_iod async_event_iod;
57 struct nvme_ctrl ctrl;
58
59 struct nvmet_ctrl *target_ctrl;
60 struct work_struct delete_work;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020061};
62
63static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
64{
65 return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
66}
67
68struct nvme_loop_queue {
69 struct nvmet_cq nvme_cq;
70 struct nvmet_sq nvme_sq;
71 struct nvme_loop_ctrl *ctrl;
72};
73
74static struct nvmet_port *nvmet_loop_port;
75
76static LIST_HEAD(nvme_loop_ctrl_list);
77static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
78
79static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
80static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
81
82static struct nvmet_fabrics_ops nvme_loop_ops;
83
84static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
85{
86 return queue - queue->ctrl->queues;
87}
88
89static void nvme_loop_complete_rq(struct request *req)
90{
91 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020092
93 nvme_cleanup_cmd(req);
94 sg_free_table_chained(&iod->sg_table, true);
Christoph Hellwig77f02a72017-03-30 13:41:32 +020095 nvme_complete_rq(req);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020096}
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020097
Sagi Grimberg3b068372017-02-27 18:28:25 +020098static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
99{
100 u32 queue_idx = nvme_loop_queue_idx(queue);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200101
Sagi Grimberg3b068372017-02-27 18:28:25 +0200102 if (queue_idx == 0)
103 return queue->ctrl->admin_tag_set.tags[queue_idx];
104 return queue->ctrl->tag_set.tags[queue_idx - 1];
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200105}
106
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800107static void nvme_loop_queue_response(struct nvmet_req *req)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200108{
Sagi Grimberg3b068372017-02-27 18:28:25 +0200109 struct nvme_loop_queue *queue =
110 container_of(req->sq, struct nvme_loop_queue, nvme_sq);
111 struct nvme_completion *cqe = req->rsp;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200112
113 /*
114 * AEN requests are special as they don't time out and can
115 * survive any kind of queue freeze and often don't respond to
116 * aborts. We don't even bother to allocate a struct request
117 * for them but rather special case them here.
118 */
Sagi Grimberg3b068372017-02-27 18:28:25 +0200119 if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200120 cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
Sagi Grimberg3b068372017-02-27 18:28:25 +0200121 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
Christoph Hellwig7bf58532016-11-10 07:32:34 -0800122 &cqe->result);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200123 } else {
Sagi Grimberg3b068372017-02-27 18:28:25 +0200124 struct request *rq;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200125
Sagi Grimberg3b068372017-02-27 18:28:25 +0200126 rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
127 if (!rq) {
128 dev_err(queue->ctrl->ctrl.device,
129 "tag 0x%x on queue %d not found\n",
130 cqe->command_id, nvme_loop_queue_idx(queue));
131 return;
132 }
133
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +0200134 nvme_end_request(rq, cqe->status, cqe->result);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200135 }
136}
137
138static void nvme_loop_execute_work(struct work_struct *work)
139{
140 struct nvme_loop_iod *iod =
141 container_of(work, struct nvme_loop_iod, work);
142
143 iod->req.execute(&iod->req);
144}
145
146static enum blk_eh_timer_return
147nvme_loop_timeout(struct request *rq, bool reserved)
148{
149 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
150
151 /* queue error recovery */
Christoph Hellwigd86c4d82017-06-15 15:41:08 +0200152 nvme_reset_ctrl(&iod->queue->ctrl->ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200153
154 /* fail with DNR on admin cmd timeout */
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +0200155 nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200156
157 return BLK_EH_HANDLED;
158}
159
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200160static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200161 const struct blk_mq_queue_data *bd)
162{
163 struct nvme_ns *ns = hctx->queue->queuedata;
164 struct nvme_loop_queue *queue = hctx->driver_data;
165 struct request *req = bd->rq;
166 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200167 blk_status_t ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200168
169 ret = nvme_setup_cmd(ns, req, &iod->cmd);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200170 if (ret)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200171 return ret;
172
173 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
174 iod->req.port = nvmet_loop_port;
175 if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
176 &queue->nvme_sq, &nvme_loop_ops)) {
177 nvme_cleanup_cmd(req);
178 blk_mq_start_request(req);
179 nvme_loop_queue_response(&iod->req);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200180 return BLK_STS_OK;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200181 }
182
183 if (blk_rq_bytes(req)) {
184 iod->sg_table.sgl = iod->first_sgl;
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200185 if (sg_alloc_table_chained(&iod->sg_table,
Christoph Hellwigf9d03f92016-12-08 15:20:32 -0700186 blk_rq_nr_phys_segments(req),
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200187 iod->sg_table.sgl))
188 return BLK_STS_RESOURCE;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200189
190 iod->req.sg = iod->sg_table.sgl;
191 iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200192 }
193
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200194 blk_mq_start_request(req);
195
196 schedule_work(&iod->work);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200197 return BLK_STS_OK;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200198}
199
200static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
201{
202 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
203 struct nvme_loop_queue *queue = &ctrl->queues[0];
204 struct nvme_loop_iod *iod = &ctrl->async_event_iod;
205
206 memset(&iod->cmd, 0, sizeof(iod->cmd));
207 iod->cmd.common.opcode = nvme_admin_async_event;
208 iod->cmd.common.command_id = NVME_LOOP_AQ_BLKMQ_DEPTH;
209 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
210
211 if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
212 &nvme_loop_ops)) {
213 dev_err(ctrl->ctrl.device, "failed async event work\n");
214 return;
215 }
216
217 schedule_work(&iod->work);
218}
219
220static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
221 struct nvme_loop_iod *iod, unsigned int queue_idx)
222{
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200223 iod->req.cmd = &iod->cmd;
224 iod->req.rsp = &iod->rsp;
225 iod->queue = &ctrl->queues[queue_idx];
226 INIT_WORK(&iod->work, nvme_loop_execute_work);
227 return 0;
228}
229
Christoph Hellwigd6296d32017-05-01 10:19:08 -0600230static int nvme_loop_init_request(struct blk_mq_tag_set *set,
231 struct request *req, unsigned int hctx_idx,
232 unsigned int numa_node)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200233{
Christoph Hellwig62b83b12017-06-13 09:15:21 +0200234 struct nvme_loop_ctrl *ctrl = set->driver_data;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200235
Christoph Hellwig62b83b12017-06-13 09:15:21 +0200236 return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
237 (set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200238}
239
240static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
241 unsigned int hctx_idx)
242{
243 struct nvme_loop_ctrl *ctrl = data;
244 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
245
246 BUG_ON(hctx_idx >= ctrl->queue_count);
247
248 hctx->driver_data = queue;
249 return 0;
250}
251
252static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
253 unsigned int hctx_idx)
254{
255 struct nvme_loop_ctrl *ctrl = data;
256 struct nvme_loop_queue *queue = &ctrl->queues[0];
257
258 BUG_ON(hctx_idx != 0);
259
260 hctx->driver_data = queue;
261 return 0;
262}
263
Eric Biggersf363b082017-03-30 13:39:16 -0700264static const struct blk_mq_ops nvme_loop_mq_ops = {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200265 .queue_rq = nvme_loop_queue_rq,
266 .complete = nvme_loop_complete_rq,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200267 .init_request = nvme_loop_init_request,
268 .init_hctx = nvme_loop_init_hctx,
269 .timeout = nvme_loop_timeout,
270};
271
Eric Biggersf363b082017-03-30 13:39:16 -0700272static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200273 .queue_rq = nvme_loop_queue_rq,
274 .complete = nvme_loop_complete_rq,
Christoph Hellwig62b83b12017-06-13 09:15:21 +0200275 .init_request = nvme_loop_init_request,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200276 .init_hctx = nvme_loop_init_admin_hctx,
277 .timeout = nvme_loop_timeout,
278};
279
280static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
281{
Sagi Grimberge4c5d372017-02-27 18:44:45 +0200282 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200283 blk_cleanup_queue(ctrl->ctrl.admin_q);
284 blk_mq_free_tag_set(&ctrl->admin_tag_set);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200285}
286
287static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
288{
289 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
290
291 if (list_empty(&ctrl->list))
292 goto free_ctrl;
293
294 mutex_lock(&nvme_loop_ctrl_mutex);
295 list_del(&ctrl->list);
296 mutex_unlock(&nvme_loop_ctrl_mutex);
297
298 if (nctrl->tagset) {
299 blk_cleanup_queue(ctrl->ctrl.connect_q);
300 blk_mq_free_tag_set(&ctrl->tag_set);
301 }
302 kfree(ctrl->queues);
303 nvmf_free_options(nctrl->opts);
304free_ctrl:
305 kfree(ctrl);
306}
307
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200308static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
309{
310 int i;
311
312 for (i = 1; i < ctrl->queue_count; i++)
313 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
314}
315
316static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
317{
318 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
319 unsigned int nr_io_queues;
320 int ret, i;
321
322 nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
323 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
324 if (ret || !nr_io_queues)
325 return ret;
326
327 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
328
329 for (i = 1; i <= nr_io_queues; i++) {
330 ctrl->queues[i].ctrl = ctrl;
331 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
332 if (ret)
333 goto out_destroy_queues;
334
335 ctrl->queue_count++;
336 }
337
338 return 0;
339
340out_destroy_queues:
341 nvme_loop_destroy_io_queues(ctrl);
342 return ret;
343}
344
Sagi Grimberg297186d2017-03-13 15:43:44 +0200345static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
346{
347 int i, ret;
348
349 for (i = 1; i < ctrl->queue_count; i++) {
350 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
351 if (ret)
352 return ret;
353 }
354
355 return 0;
356}
357
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200358static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
359{
360 int error;
361
362 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
363 ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
364 ctrl->admin_tag_set.queue_depth = NVME_LOOP_AQ_BLKMQ_DEPTH;
365 ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
366 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
367 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
368 SG_CHUNK_SIZE * sizeof(struct scatterlist);
369 ctrl->admin_tag_set.driver_data = ctrl;
370 ctrl->admin_tag_set.nr_hw_queues = 1;
371 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
372
373 ctrl->queues[0].ctrl = ctrl;
374 error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
375 if (error)
376 return error;
377 ctrl->queue_count = 1;
378
379 error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
380 if (error)
381 goto out_free_sq;
382
383 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
384 if (IS_ERR(ctrl->ctrl.admin_q)) {
385 error = PTR_ERR(ctrl->ctrl.admin_q);
386 goto out_free_tagset;
387 }
388
389 error = nvmf_connect_admin_queue(&ctrl->ctrl);
390 if (error)
391 goto out_cleanup_queue;
392
393 error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
394 if (error) {
395 dev_err(ctrl->ctrl.device,
396 "prop_get NVME_REG_CAP failed\n");
397 goto out_cleanup_queue;
398 }
399
400 ctrl->ctrl.sqsize =
Sagi Grimberg096e9e92017-04-06 09:15:50 +0300401 min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200402
403 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
404 if (error)
405 goto out_cleanup_queue;
406
407 ctrl->ctrl.max_hw_sectors =
408 (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
409
410 error = nvme_init_identify(&ctrl->ctrl);
411 if (error)
412 goto out_cleanup_queue;
413
414 nvme_start_keep_alive(&ctrl->ctrl);
415
416 return 0;
417
418out_cleanup_queue:
419 blk_cleanup_queue(ctrl->ctrl.admin_q);
420out_free_tagset:
421 blk_mq_free_tag_set(&ctrl->admin_tag_set);
422out_free_sq:
423 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
424 return error;
425}
426
427static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
428{
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200429 nvme_stop_keep_alive(&ctrl->ctrl);
430
431 if (ctrl->queue_count > 1) {
432 nvme_stop_queues(&ctrl->ctrl);
433 blk_mq_tagset_busy_iter(&ctrl->tag_set,
434 nvme_cancel_request, &ctrl->ctrl);
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200435 nvme_loop_destroy_io_queues(ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200436 }
437
438 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
439 nvme_shutdown_ctrl(&ctrl->ctrl);
440
441 blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
442 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
443 nvme_cancel_request, &ctrl->ctrl);
444 nvme_loop_destroy_admin_queue(ctrl);
445}
446
447static void nvme_loop_del_ctrl_work(struct work_struct *work)
448{
449 struct nvme_loop_ctrl *ctrl = container_of(work,
450 struct nvme_loop_ctrl, delete_work);
451
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200452 nvme_uninit_ctrl(&ctrl->ctrl);
Sagi Grimberga159c642016-07-24 09:32:08 +0300453 nvme_loop_shutdown_ctrl(ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200454 nvme_put_ctrl(&ctrl->ctrl);
455}
456
457static int __nvme_loop_del_ctrl(struct nvme_loop_ctrl *ctrl)
458{
459 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
460 return -EBUSY;
461
Sagi Grimberg9a6327d2017-06-07 20:31:55 +0200462 if (!queue_work(nvme_wq, &ctrl->delete_work))
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200463 return -EBUSY;
464
465 return 0;
466}
467
468static int nvme_loop_del_ctrl(struct nvme_ctrl *nctrl)
469{
470 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
471 int ret;
472
473 ret = __nvme_loop_del_ctrl(ctrl);
474 if (ret)
475 return ret;
476
477 flush_work(&ctrl->delete_work);
478
479 return 0;
480}
481
482static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
483{
484 struct nvme_loop_ctrl *ctrl;
485
486 mutex_lock(&nvme_loop_ctrl_mutex);
487 list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
488 if (ctrl->ctrl.cntlid == nctrl->cntlid)
489 __nvme_loop_del_ctrl(ctrl);
490 }
491 mutex_unlock(&nvme_loop_ctrl_mutex);
492}
493
494static void nvme_loop_reset_ctrl_work(struct work_struct *work)
495{
Christoph Hellwigd86c4d82017-06-15 15:41:08 +0200496 struct nvme_loop_ctrl *ctrl =
497 container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200498 bool changed;
Sagi Grimberg297186d2017-03-13 15:43:44 +0200499 int ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200500
501 nvme_loop_shutdown_ctrl(ctrl);
502
503 ret = nvme_loop_configure_admin_queue(ctrl);
504 if (ret)
505 goto out_disable;
506
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200507 ret = nvme_loop_init_io_queues(ctrl);
508 if (ret)
509 goto out_destroy_admin;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200510
Sagi Grimberg297186d2017-03-13 15:43:44 +0200511 ret = nvme_loop_connect_io_queues(ctrl);
512 if (ret)
513 goto out_destroy_io;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200514
515 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
516 WARN_ON_ONCE(!changed);
517
518 nvme_queue_scan(&ctrl->ctrl);
519 nvme_queue_async_events(&ctrl->ctrl);
520
521 nvme_start_queues(&ctrl->ctrl);
522
523 return;
524
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200525out_destroy_io:
526 nvme_loop_destroy_io_queues(ctrl);
527out_destroy_admin:
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200528 nvme_loop_destroy_admin_queue(ctrl);
529out_disable:
530 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200531 nvme_uninit_ctrl(&ctrl->ctrl);
532 nvme_put_ctrl(&ctrl->ctrl);
533}
534
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200535static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
536 .name = "loop",
537 .module = THIS_MODULE,
Christoph Hellwigd3d5b872017-05-20 15:14:44 +0200538 .flags = NVME_F_FABRICS,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200539 .reg_read32 = nvmf_reg_read32,
540 .reg_read64 = nvmf_reg_read64,
541 .reg_write32 = nvmf_reg_write32,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200542 .free_ctrl = nvme_loop_free_ctrl,
543 .submit_async_event = nvme_loop_submit_async_event,
544 .delete_ctrl = nvme_loop_del_ctrl,
545 .get_subsysnqn = nvmf_get_subsysnqn,
546};
547
548static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
549{
Sagi Grimberg297186d2017-03-13 15:43:44 +0200550 int ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200551
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200552 ret = nvme_loop_init_io_queues(ctrl);
553 if (ret)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200554 return ret;
555
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200556 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
557 ctrl->tag_set.ops = &nvme_loop_mq_ops;
Jay Freyenseeeadb7cf2016-08-17 15:00:28 -0700558 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200559 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
560 ctrl->tag_set.numa_node = NUMA_NO_NODE;
561 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
562 ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
563 SG_CHUNK_SIZE * sizeof(struct scatterlist);
564 ctrl->tag_set.driver_data = ctrl;
565 ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
566 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
567 ctrl->ctrl.tagset = &ctrl->tag_set;
568
569 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
570 if (ret)
571 goto out_destroy_queues;
572
573 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
574 if (IS_ERR(ctrl->ctrl.connect_q)) {
575 ret = PTR_ERR(ctrl->ctrl.connect_q);
576 goto out_free_tagset;
577 }
578
Sagi Grimberg297186d2017-03-13 15:43:44 +0200579 ret = nvme_loop_connect_io_queues(ctrl);
580 if (ret)
581 goto out_cleanup_connect_q;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200582
583 return 0;
584
585out_cleanup_connect_q:
586 blk_cleanup_queue(ctrl->ctrl.connect_q);
587out_free_tagset:
588 blk_mq_free_tag_set(&ctrl->tag_set);
589out_destroy_queues:
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200590 nvme_loop_destroy_io_queues(ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200591 return ret;
592}
593
594static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
595 struct nvmf_ctrl_options *opts)
596{
597 struct nvme_loop_ctrl *ctrl;
598 bool changed;
599 int ret;
600
601 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
602 if (!ctrl)
603 return ERR_PTR(-ENOMEM);
604 ctrl->ctrl.opts = opts;
605 INIT_LIST_HEAD(&ctrl->list);
606
607 INIT_WORK(&ctrl->delete_work, nvme_loop_del_ctrl_work);
Christoph Hellwigd86c4d82017-06-15 15:41:08 +0200608 INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200609
610 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
611 0 /* no quirks, we're perfect! */);
612 if (ret)
613 goto out_put_ctrl;
614
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200615 ret = -ENOMEM;
616
Jay Freyenseeeadb7cf2016-08-17 15:00:28 -0700617 ctrl->ctrl.sqsize = opts->queue_size - 1;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200618 ctrl->ctrl.kato = opts->kato;
619
620 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
621 GFP_KERNEL);
622 if (!ctrl->queues)
623 goto out_uninit_ctrl;
624
625 ret = nvme_loop_configure_admin_queue(ctrl);
626 if (ret)
627 goto out_free_queues;
628
629 if (opts->queue_size > ctrl->ctrl.maxcmd) {
630 /* warn if maxcmd is lower than queue_size */
631 dev_warn(ctrl->ctrl.device,
632 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
633 opts->queue_size, ctrl->ctrl.maxcmd);
634 opts->queue_size = ctrl->ctrl.maxcmd;
635 }
636
637 if (opts->nr_io_queues) {
638 ret = nvme_loop_create_io_queues(ctrl);
639 if (ret)
640 goto out_remove_admin_queue;
641 }
642
643 nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
644
645 dev_info(ctrl->ctrl.device,
646 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
647
648 kref_get(&ctrl->ctrl.kref);
649
650 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
651 WARN_ON_ONCE(!changed);
652
653 mutex_lock(&nvme_loop_ctrl_mutex);
654 list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
655 mutex_unlock(&nvme_loop_ctrl_mutex);
656
657 if (opts->nr_io_queues) {
658 nvme_queue_scan(&ctrl->ctrl);
659 nvme_queue_async_events(&ctrl->ctrl);
660 }
661
662 return &ctrl->ctrl;
663
664out_remove_admin_queue:
665 nvme_loop_destroy_admin_queue(ctrl);
666out_free_queues:
667 kfree(ctrl->queues);
668out_uninit_ctrl:
669 nvme_uninit_ctrl(&ctrl->ctrl);
670out_put_ctrl:
671 nvme_put_ctrl(&ctrl->ctrl);
672 if (ret > 0)
673 ret = -EIO;
674 return ERR_PTR(ret);
675}
676
677static int nvme_loop_add_port(struct nvmet_port *port)
678{
679 /*
680 * XXX: disalow adding more than one port so
681 * there is no connection rejections when a
682 * a subsystem is assigned to a port for which
683 * loop doesn't have a pointer.
684 * This scenario would be possible if we allowed
685 * more than one port to be added and a subsystem
686 * was assigned to a port other than nvmet_loop_port.
687 */
688
689 if (nvmet_loop_port)
690 return -EPERM;
691
692 nvmet_loop_port = port;
693 return 0;
694}
695
696static void nvme_loop_remove_port(struct nvmet_port *port)
697{
698 if (port == nvmet_loop_port)
699 nvmet_loop_port = NULL;
700}
701
702static struct nvmet_fabrics_ops nvme_loop_ops = {
703 .owner = THIS_MODULE,
704 .type = NVMF_TRTYPE_LOOP,
705 .add_port = nvme_loop_add_port,
706 .remove_port = nvme_loop_remove_port,
707 .queue_response = nvme_loop_queue_response,
708 .delete_ctrl = nvme_loop_delete_ctrl,
709};
710
711static struct nvmf_transport_ops nvme_loop_transport = {
712 .name = "loop",
713 .create_ctrl = nvme_loop_create_ctrl,
714};
715
716static int __init nvme_loop_init_module(void)
717{
718 int ret;
719
720 ret = nvmet_register_transport(&nvme_loop_ops);
721 if (ret)
722 return ret;
Sagi Grimbergd19eef02017-03-19 06:26:28 +0200723
724 ret = nvmf_register_transport(&nvme_loop_transport);
725 if (ret)
726 nvmet_unregister_transport(&nvme_loop_ops);
727
728 return ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200729}
730
731static void __exit nvme_loop_cleanup_module(void)
732{
733 struct nvme_loop_ctrl *ctrl, *next;
734
735 nvmf_unregister_transport(&nvme_loop_transport);
736 nvmet_unregister_transport(&nvme_loop_ops);
737
738 mutex_lock(&nvme_loop_ctrl_mutex);
739 list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
740 __nvme_loop_del_ctrl(ctrl);
741 mutex_unlock(&nvme_loop_ctrl_mutex);
742
Sagi Grimberg9a6327d2017-06-07 20:31:55 +0200743 flush_workqueue(nvme_wq);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200744}
745
746module_init(nvme_loop_init_module);
747module_exit(nvme_loop_cleanup_module);
748
749MODULE_LICENSE("GPL v2");
750MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */