blob: c4e3a4d00768c241a232cf3ea512572d01166ed8 [file] [log] [blame]
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +02001/*
2 * NVMe over Fabrics loopback device.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/scatterlist.h>
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020016#include <linux/blk-mq.h>
17#include <linux/nvme.h>
18#include <linux/module.h>
19#include <linux/parser.h>
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020020#include "nvmet.h"
21#include "../host/nvme.h"
22#include "../host/fabrics.h"
23
24#define NVME_LOOP_AQ_DEPTH 256
25
26#define NVME_LOOP_MAX_SEGMENTS 256
27
28/*
29 * We handle AEN commands ourselves and don't even let the
30 * block layer know about them.
31 */
32#define NVME_LOOP_NR_AEN_COMMANDS 1
33#define NVME_LOOP_AQ_BLKMQ_DEPTH \
34 (NVME_LOOP_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
35
36struct nvme_loop_iod {
Christoph Hellwigd49187e2016-11-10 07:32:33 -080037 struct nvme_request nvme_req;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020038 struct nvme_command cmd;
39 struct nvme_completion rsp;
40 struct nvmet_req req;
41 struct nvme_loop_queue *queue;
42 struct work_struct work;
43 struct sg_table sg_table;
44 struct scatterlist first_sgl[];
45};
46
47struct nvme_loop_ctrl {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020048 struct nvme_loop_queue *queues;
49 u32 queue_count;
50
51 struct blk_mq_tag_set admin_tag_set;
52
53 struct list_head list;
54 u64 cap;
55 struct blk_mq_tag_set tag_set;
56 struct nvme_loop_iod async_event_iod;
57 struct nvme_ctrl ctrl;
58
59 struct nvmet_ctrl *target_ctrl;
60 struct work_struct delete_work;
61 struct work_struct reset_work;
62};
63
64static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
65{
66 return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
67}
68
69struct nvme_loop_queue {
70 struct nvmet_cq nvme_cq;
71 struct nvmet_sq nvme_sq;
72 struct nvme_loop_ctrl *ctrl;
73};
74
75static struct nvmet_port *nvmet_loop_port;
76
77static LIST_HEAD(nvme_loop_ctrl_list);
78static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
79
80static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
81static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
82
83static struct nvmet_fabrics_ops nvme_loop_ops;
84
85static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
86{
87 return queue - queue->ctrl->queues;
88}
89
90static void nvme_loop_complete_rq(struct request *req)
91{
92 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020093
94 nvme_cleanup_cmd(req);
95 sg_free_table_chained(&iod->sg_table, true);
Christoph Hellwig77f02a72017-03-30 13:41:32 +020096 nvme_complete_rq(req);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020097}
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020098
Sagi Grimberg3b068372017-02-27 18:28:25 +020099static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
100{
101 u32 queue_idx = nvme_loop_queue_idx(queue);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200102
Sagi Grimberg3b068372017-02-27 18:28:25 +0200103 if (queue_idx == 0)
104 return queue->ctrl->admin_tag_set.tags[queue_idx];
105 return queue->ctrl->tag_set.tags[queue_idx - 1];
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200106}
107
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800108static void nvme_loop_queue_response(struct nvmet_req *req)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200109{
Sagi Grimberg3b068372017-02-27 18:28:25 +0200110 struct nvme_loop_queue *queue =
111 container_of(req->sq, struct nvme_loop_queue, nvme_sq);
112 struct nvme_completion *cqe = req->rsp;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200113
114 /*
115 * AEN requests are special as they don't time out and can
116 * survive any kind of queue freeze and often don't respond to
117 * aborts. We don't even bother to allocate a struct request
118 * for them but rather special case them here.
119 */
Sagi Grimberg3b068372017-02-27 18:28:25 +0200120 if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200121 cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
Sagi Grimberg3b068372017-02-27 18:28:25 +0200122 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
Christoph Hellwig7bf58532016-11-10 07:32:34 -0800123 &cqe->result);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200124 } else {
Sagi Grimberg3b068372017-02-27 18:28:25 +0200125 struct request *rq;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200126
Sagi Grimberg3b068372017-02-27 18:28:25 +0200127 rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
128 if (!rq) {
129 dev_err(queue->ctrl->ctrl.device,
130 "tag 0x%x on queue %d not found\n",
131 cqe->command_id, nvme_loop_queue_idx(queue));
132 return;
133 }
134
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +0200135 nvme_end_request(rq, cqe->status, cqe->result);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200136 }
137}
138
139static void nvme_loop_execute_work(struct work_struct *work)
140{
141 struct nvme_loop_iod *iod =
142 container_of(work, struct nvme_loop_iod, work);
143
144 iod->req.execute(&iod->req);
145}
146
147static enum blk_eh_timer_return
148nvme_loop_timeout(struct request *rq, bool reserved)
149{
150 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
151
152 /* queue error recovery */
Sagi Grimberg9a6327d2017-06-07 20:31:55 +0200153 queue_work(nvme_wq, &iod->queue->ctrl->reset_work);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200154
155 /* fail with DNR on admin cmd timeout */
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +0200156 nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200157
158 return BLK_EH_HANDLED;
159}
160
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200161static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200162 const struct blk_mq_queue_data *bd)
163{
164 struct nvme_ns *ns = hctx->queue->queuedata;
165 struct nvme_loop_queue *queue = hctx->driver_data;
166 struct request *req = bd->rq;
167 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200168 blk_status_t ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200169
170 ret = nvme_setup_cmd(ns, req, &iod->cmd);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200171 if (ret)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200172 return ret;
173
174 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
175 iod->req.port = nvmet_loop_port;
176 if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
177 &queue->nvme_sq, &nvme_loop_ops)) {
178 nvme_cleanup_cmd(req);
179 blk_mq_start_request(req);
180 nvme_loop_queue_response(&iod->req);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200181 return BLK_STS_OK;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200182 }
183
184 if (blk_rq_bytes(req)) {
185 iod->sg_table.sgl = iod->first_sgl;
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200186 if (sg_alloc_table_chained(&iod->sg_table,
Christoph Hellwigf9d03f92016-12-08 15:20:32 -0700187 blk_rq_nr_phys_segments(req),
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200188 iod->sg_table.sgl))
189 return BLK_STS_RESOURCE;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200190
191 iod->req.sg = iod->sg_table.sgl;
192 iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200193 }
194
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200195 blk_mq_start_request(req);
196
197 schedule_work(&iod->work);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200198 return BLK_STS_OK;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200199}
200
201static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
202{
203 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
204 struct nvme_loop_queue *queue = &ctrl->queues[0];
205 struct nvme_loop_iod *iod = &ctrl->async_event_iod;
206
207 memset(&iod->cmd, 0, sizeof(iod->cmd));
208 iod->cmd.common.opcode = nvme_admin_async_event;
209 iod->cmd.common.command_id = NVME_LOOP_AQ_BLKMQ_DEPTH;
210 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
211
212 if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
213 &nvme_loop_ops)) {
214 dev_err(ctrl->ctrl.device, "failed async event work\n");
215 return;
216 }
217
218 schedule_work(&iod->work);
219}
220
221static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
222 struct nvme_loop_iod *iod, unsigned int queue_idx)
223{
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200224 iod->req.cmd = &iod->cmd;
225 iod->req.rsp = &iod->rsp;
226 iod->queue = &ctrl->queues[queue_idx];
227 INIT_WORK(&iod->work, nvme_loop_execute_work);
228 return 0;
229}
230
Christoph Hellwigd6296d392017-05-01 10:19:08 -0600231static int nvme_loop_init_request(struct blk_mq_tag_set *set,
232 struct request *req, unsigned int hctx_idx,
233 unsigned int numa_node)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200234{
Christoph Hellwig62b83b12017-06-13 09:15:21 +0200235 struct nvme_loop_ctrl *ctrl = set->driver_data;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200236
Christoph Hellwig62b83b12017-06-13 09:15:21 +0200237 return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
238 (set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200239}
240
241static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
242 unsigned int hctx_idx)
243{
244 struct nvme_loop_ctrl *ctrl = data;
245 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
246
247 BUG_ON(hctx_idx >= ctrl->queue_count);
248
249 hctx->driver_data = queue;
250 return 0;
251}
252
253static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
254 unsigned int hctx_idx)
255{
256 struct nvme_loop_ctrl *ctrl = data;
257 struct nvme_loop_queue *queue = &ctrl->queues[0];
258
259 BUG_ON(hctx_idx != 0);
260
261 hctx->driver_data = queue;
262 return 0;
263}
264
Eric Biggersf363b082017-03-30 13:39:16 -0700265static const struct blk_mq_ops nvme_loop_mq_ops = {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200266 .queue_rq = nvme_loop_queue_rq,
267 .complete = nvme_loop_complete_rq,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200268 .init_request = nvme_loop_init_request,
269 .init_hctx = nvme_loop_init_hctx,
270 .timeout = nvme_loop_timeout,
271};
272
Eric Biggersf363b082017-03-30 13:39:16 -0700273static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200274 .queue_rq = nvme_loop_queue_rq,
275 .complete = nvme_loop_complete_rq,
Christoph Hellwig62b83b12017-06-13 09:15:21 +0200276 .init_request = nvme_loop_init_request,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200277 .init_hctx = nvme_loop_init_admin_hctx,
278 .timeout = nvme_loop_timeout,
279};
280
281static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
282{
Sagi Grimberge4c5d372017-02-27 18:44:45 +0200283 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200284 blk_cleanup_queue(ctrl->ctrl.admin_q);
285 blk_mq_free_tag_set(&ctrl->admin_tag_set);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200286}
287
288static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
289{
290 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
291
292 if (list_empty(&ctrl->list))
293 goto free_ctrl;
294
295 mutex_lock(&nvme_loop_ctrl_mutex);
296 list_del(&ctrl->list);
297 mutex_unlock(&nvme_loop_ctrl_mutex);
298
299 if (nctrl->tagset) {
300 blk_cleanup_queue(ctrl->ctrl.connect_q);
301 blk_mq_free_tag_set(&ctrl->tag_set);
302 }
303 kfree(ctrl->queues);
304 nvmf_free_options(nctrl->opts);
305free_ctrl:
306 kfree(ctrl);
307}
308
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200309static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
310{
311 int i;
312
313 for (i = 1; i < ctrl->queue_count; i++)
314 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
315}
316
317static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
318{
319 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
320 unsigned int nr_io_queues;
321 int ret, i;
322
323 nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
324 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
325 if (ret || !nr_io_queues)
326 return ret;
327
328 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
329
330 for (i = 1; i <= nr_io_queues; i++) {
331 ctrl->queues[i].ctrl = ctrl;
332 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
333 if (ret)
334 goto out_destroy_queues;
335
336 ctrl->queue_count++;
337 }
338
339 return 0;
340
341out_destroy_queues:
342 nvme_loop_destroy_io_queues(ctrl);
343 return ret;
344}
345
Sagi Grimberg297186d2017-03-13 15:43:44 +0200346static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
347{
348 int i, ret;
349
350 for (i = 1; i < ctrl->queue_count; i++) {
351 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
352 if (ret)
353 return ret;
354 }
355
356 return 0;
357}
358
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200359static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
360{
361 int error;
362
363 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
364 ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
365 ctrl->admin_tag_set.queue_depth = NVME_LOOP_AQ_BLKMQ_DEPTH;
366 ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
367 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
368 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
369 SG_CHUNK_SIZE * sizeof(struct scatterlist);
370 ctrl->admin_tag_set.driver_data = ctrl;
371 ctrl->admin_tag_set.nr_hw_queues = 1;
372 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
373
374 ctrl->queues[0].ctrl = ctrl;
375 error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
376 if (error)
377 return error;
378 ctrl->queue_count = 1;
379
380 error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
381 if (error)
382 goto out_free_sq;
383
384 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
385 if (IS_ERR(ctrl->ctrl.admin_q)) {
386 error = PTR_ERR(ctrl->ctrl.admin_q);
387 goto out_free_tagset;
388 }
389
390 error = nvmf_connect_admin_queue(&ctrl->ctrl);
391 if (error)
392 goto out_cleanup_queue;
393
394 error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
395 if (error) {
396 dev_err(ctrl->ctrl.device,
397 "prop_get NVME_REG_CAP failed\n");
398 goto out_cleanup_queue;
399 }
400
401 ctrl->ctrl.sqsize =
Sagi Grimberg096e9e92017-04-06 09:15:50 +0300402 min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200403
404 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
405 if (error)
406 goto out_cleanup_queue;
407
408 ctrl->ctrl.max_hw_sectors =
409 (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
410
411 error = nvme_init_identify(&ctrl->ctrl);
412 if (error)
413 goto out_cleanup_queue;
414
415 nvme_start_keep_alive(&ctrl->ctrl);
416
417 return 0;
418
419out_cleanup_queue:
420 blk_cleanup_queue(ctrl->ctrl.admin_q);
421out_free_tagset:
422 blk_mq_free_tag_set(&ctrl->admin_tag_set);
423out_free_sq:
424 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
425 return error;
426}
427
428static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
429{
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200430 nvme_stop_keep_alive(&ctrl->ctrl);
431
432 if (ctrl->queue_count > 1) {
433 nvme_stop_queues(&ctrl->ctrl);
434 blk_mq_tagset_busy_iter(&ctrl->tag_set,
435 nvme_cancel_request, &ctrl->ctrl);
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200436 nvme_loop_destroy_io_queues(ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200437 }
438
439 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
440 nvme_shutdown_ctrl(&ctrl->ctrl);
441
442 blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
443 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
444 nvme_cancel_request, &ctrl->ctrl);
445 nvme_loop_destroy_admin_queue(ctrl);
446}
447
448static void nvme_loop_del_ctrl_work(struct work_struct *work)
449{
450 struct nvme_loop_ctrl *ctrl = container_of(work,
451 struct nvme_loop_ctrl, delete_work);
452
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200453 nvme_uninit_ctrl(&ctrl->ctrl);
Sagi Grimberga159c642016-07-24 09:32:08 +0300454 nvme_loop_shutdown_ctrl(ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200455 nvme_put_ctrl(&ctrl->ctrl);
456}
457
458static int __nvme_loop_del_ctrl(struct nvme_loop_ctrl *ctrl)
459{
460 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
461 return -EBUSY;
462
Sagi Grimberg9a6327d2017-06-07 20:31:55 +0200463 if (!queue_work(nvme_wq, &ctrl->delete_work))
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200464 return -EBUSY;
465
466 return 0;
467}
468
469static int nvme_loop_del_ctrl(struct nvme_ctrl *nctrl)
470{
471 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
472 int ret;
473
474 ret = __nvme_loop_del_ctrl(ctrl);
475 if (ret)
476 return ret;
477
478 flush_work(&ctrl->delete_work);
479
480 return 0;
481}
482
483static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
484{
485 struct nvme_loop_ctrl *ctrl;
486
487 mutex_lock(&nvme_loop_ctrl_mutex);
488 list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
489 if (ctrl->ctrl.cntlid == nctrl->cntlid)
490 __nvme_loop_del_ctrl(ctrl);
491 }
492 mutex_unlock(&nvme_loop_ctrl_mutex);
493}
494
495static void nvme_loop_reset_ctrl_work(struct work_struct *work)
496{
497 struct nvme_loop_ctrl *ctrl = container_of(work,
498 struct nvme_loop_ctrl, reset_work);
499 bool changed;
Sagi Grimberg297186d2017-03-13 15:43:44 +0200500 int ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200501
502 nvme_loop_shutdown_ctrl(ctrl);
503
504 ret = nvme_loop_configure_admin_queue(ctrl);
505 if (ret)
506 goto out_disable;
507
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200508 ret = nvme_loop_init_io_queues(ctrl);
509 if (ret)
510 goto out_destroy_admin;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200511
Sagi Grimberg297186d2017-03-13 15:43:44 +0200512 ret = nvme_loop_connect_io_queues(ctrl);
513 if (ret)
514 goto out_destroy_io;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200515
516 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
517 WARN_ON_ONCE(!changed);
518
519 nvme_queue_scan(&ctrl->ctrl);
520 nvme_queue_async_events(&ctrl->ctrl);
521
522 nvme_start_queues(&ctrl->ctrl);
523
524 return;
525
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200526out_destroy_io:
527 nvme_loop_destroy_io_queues(ctrl);
528out_destroy_admin:
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200529 nvme_loop_destroy_admin_queue(ctrl);
530out_disable:
531 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200532 nvme_uninit_ctrl(&ctrl->ctrl);
533 nvme_put_ctrl(&ctrl->ctrl);
534}
535
536static int nvme_loop_reset_ctrl(struct nvme_ctrl *nctrl)
537{
538 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
539
540 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
541 return -EBUSY;
542
Sagi Grimberg9a6327d2017-06-07 20:31:55 +0200543 if (!queue_work(nvme_wq, &ctrl->reset_work))
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200544 return -EBUSY;
545
546 flush_work(&ctrl->reset_work);
547
548 return 0;
549}
550
551static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
552 .name = "loop",
553 .module = THIS_MODULE,
Christoph Hellwigd3d5b872017-05-20 15:14:44 +0200554 .flags = NVME_F_FABRICS,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200555 .reg_read32 = nvmf_reg_read32,
556 .reg_read64 = nvmf_reg_read64,
557 .reg_write32 = nvmf_reg_write32,
558 .reset_ctrl = nvme_loop_reset_ctrl,
559 .free_ctrl = nvme_loop_free_ctrl,
560 .submit_async_event = nvme_loop_submit_async_event,
561 .delete_ctrl = nvme_loop_del_ctrl,
562 .get_subsysnqn = nvmf_get_subsysnqn,
563};
564
565static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
566{
Sagi Grimberg297186d2017-03-13 15:43:44 +0200567 int ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200568
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200569 ret = nvme_loop_init_io_queues(ctrl);
570 if (ret)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200571 return ret;
572
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200573 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
574 ctrl->tag_set.ops = &nvme_loop_mq_ops;
Jay Freyenseeeadb7cf2016-08-17 15:00:28 -0700575 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200576 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
577 ctrl->tag_set.numa_node = NUMA_NO_NODE;
578 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
579 ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
580 SG_CHUNK_SIZE * sizeof(struct scatterlist);
581 ctrl->tag_set.driver_data = ctrl;
582 ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
583 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
584 ctrl->ctrl.tagset = &ctrl->tag_set;
585
586 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
587 if (ret)
588 goto out_destroy_queues;
589
590 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
591 if (IS_ERR(ctrl->ctrl.connect_q)) {
592 ret = PTR_ERR(ctrl->ctrl.connect_q);
593 goto out_free_tagset;
594 }
595
Sagi Grimberg297186d2017-03-13 15:43:44 +0200596 ret = nvme_loop_connect_io_queues(ctrl);
597 if (ret)
598 goto out_cleanup_connect_q;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200599
600 return 0;
601
602out_cleanup_connect_q:
603 blk_cleanup_queue(ctrl->ctrl.connect_q);
604out_free_tagset:
605 blk_mq_free_tag_set(&ctrl->tag_set);
606out_destroy_queues:
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200607 nvme_loop_destroy_io_queues(ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200608 return ret;
609}
610
611static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
612 struct nvmf_ctrl_options *opts)
613{
614 struct nvme_loop_ctrl *ctrl;
615 bool changed;
616 int ret;
617
618 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
619 if (!ctrl)
620 return ERR_PTR(-ENOMEM);
621 ctrl->ctrl.opts = opts;
622 INIT_LIST_HEAD(&ctrl->list);
623
624 INIT_WORK(&ctrl->delete_work, nvme_loop_del_ctrl_work);
625 INIT_WORK(&ctrl->reset_work, nvme_loop_reset_ctrl_work);
626
627 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
628 0 /* no quirks, we're perfect! */);
629 if (ret)
630 goto out_put_ctrl;
631
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200632 ret = -ENOMEM;
633
Jay Freyenseeeadb7cf2016-08-17 15:00:28 -0700634 ctrl->ctrl.sqsize = opts->queue_size - 1;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200635 ctrl->ctrl.kato = opts->kato;
636
637 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
638 GFP_KERNEL);
639 if (!ctrl->queues)
640 goto out_uninit_ctrl;
641
642 ret = nvme_loop_configure_admin_queue(ctrl);
643 if (ret)
644 goto out_free_queues;
645
646 if (opts->queue_size > ctrl->ctrl.maxcmd) {
647 /* warn if maxcmd is lower than queue_size */
648 dev_warn(ctrl->ctrl.device,
649 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
650 opts->queue_size, ctrl->ctrl.maxcmd);
651 opts->queue_size = ctrl->ctrl.maxcmd;
652 }
653
654 if (opts->nr_io_queues) {
655 ret = nvme_loop_create_io_queues(ctrl);
656 if (ret)
657 goto out_remove_admin_queue;
658 }
659
660 nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
661
662 dev_info(ctrl->ctrl.device,
663 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
664
665 kref_get(&ctrl->ctrl.kref);
666
667 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
668 WARN_ON_ONCE(!changed);
669
670 mutex_lock(&nvme_loop_ctrl_mutex);
671 list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
672 mutex_unlock(&nvme_loop_ctrl_mutex);
673
674 if (opts->nr_io_queues) {
675 nvme_queue_scan(&ctrl->ctrl);
676 nvme_queue_async_events(&ctrl->ctrl);
677 }
678
679 return &ctrl->ctrl;
680
681out_remove_admin_queue:
682 nvme_loop_destroy_admin_queue(ctrl);
683out_free_queues:
684 kfree(ctrl->queues);
685out_uninit_ctrl:
686 nvme_uninit_ctrl(&ctrl->ctrl);
687out_put_ctrl:
688 nvme_put_ctrl(&ctrl->ctrl);
689 if (ret > 0)
690 ret = -EIO;
691 return ERR_PTR(ret);
692}
693
694static int nvme_loop_add_port(struct nvmet_port *port)
695{
696 /*
697 * XXX: disalow adding more than one port so
698 * there is no connection rejections when a
699 * a subsystem is assigned to a port for which
700 * loop doesn't have a pointer.
701 * This scenario would be possible if we allowed
702 * more than one port to be added and a subsystem
703 * was assigned to a port other than nvmet_loop_port.
704 */
705
706 if (nvmet_loop_port)
707 return -EPERM;
708
709 nvmet_loop_port = port;
710 return 0;
711}
712
713static void nvme_loop_remove_port(struct nvmet_port *port)
714{
715 if (port == nvmet_loop_port)
716 nvmet_loop_port = NULL;
717}
718
719static struct nvmet_fabrics_ops nvme_loop_ops = {
720 .owner = THIS_MODULE,
721 .type = NVMF_TRTYPE_LOOP,
722 .add_port = nvme_loop_add_port,
723 .remove_port = nvme_loop_remove_port,
724 .queue_response = nvme_loop_queue_response,
725 .delete_ctrl = nvme_loop_delete_ctrl,
726};
727
728static struct nvmf_transport_ops nvme_loop_transport = {
729 .name = "loop",
730 .create_ctrl = nvme_loop_create_ctrl,
731};
732
733static int __init nvme_loop_init_module(void)
734{
735 int ret;
736
737 ret = nvmet_register_transport(&nvme_loop_ops);
738 if (ret)
739 return ret;
Sagi Grimbergd19eef02017-03-19 06:26:28 +0200740
741 ret = nvmf_register_transport(&nvme_loop_transport);
742 if (ret)
743 nvmet_unregister_transport(&nvme_loop_ops);
744
745 return ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200746}
747
748static void __exit nvme_loop_cleanup_module(void)
749{
750 struct nvme_loop_ctrl *ctrl, *next;
751
752 nvmf_unregister_transport(&nvme_loop_transport);
753 nvmet_unregister_transport(&nvme_loop_ops);
754
755 mutex_lock(&nvme_loop_ctrl_mutex);
756 list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
757 __nvme_loop_del_ctrl(ctrl);
758 mutex_unlock(&nvme_loop_ctrl_mutex);
759
Sagi Grimberg9a6327d2017-06-07 20:31:55 +0200760 flush_workqueue(nvme_wq);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200761}
762
763module_init(nvme_loop_init_module);
764module_exit(nvme_loop_cleanup_module);
765
766MODULE_LICENSE("GPL v2");
767MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */