blob: bc95c6ed531af58d126d919290dcb4a108d62d2e [file] [log] [blame]
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +02001/*
2 * NVMe over Fabrics loopback device.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/scatterlist.h>
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020016#include <linux/blk-mq.h>
17#include <linux/nvme.h>
18#include <linux/module.h>
19#include <linux/parser.h>
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020020#include "nvmet.h"
21#include "../host/nvme.h"
22#include "../host/fabrics.h"
23
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020024#define NVME_LOOP_MAX_SEGMENTS 256
25
26/*
27 * We handle AEN commands ourselves and don't even let the
28 * block layer know about them.
29 */
30#define NVME_LOOP_NR_AEN_COMMANDS 1
31#define NVME_LOOP_AQ_BLKMQ_DEPTH \
Sagi Grimberg7aa1f422017-06-18 16:15:59 +030032 (NVME_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020033
34struct nvme_loop_iod {
Christoph Hellwigd49187e2016-11-10 07:32:33 -080035 struct nvme_request nvme_req;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020036 struct nvme_command cmd;
37 struct nvme_completion rsp;
38 struct nvmet_req req;
39 struct nvme_loop_queue *queue;
40 struct work_struct work;
41 struct sg_table sg_table;
42 struct scatterlist first_sgl[];
43};
44
45struct nvme_loop_ctrl {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020046 struct nvme_loop_queue *queues;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020047
48 struct blk_mq_tag_set admin_tag_set;
49
50 struct list_head list;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020051 struct blk_mq_tag_set tag_set;
52 struct nvme_loop_iod async_event_iod;
53 struct nvme_ctrl ctrl;
54
55 struct nvmet_ctrl *target_ctrl;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020056};
57
58static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
59{
60 return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
61}
62
63struct nvme_loop_queue {
64 struct nvmet_cq nvme_cq;
65 struct nvmet_sq nvme_sq;
66 struct nvme_loop_ctrl *ctrl;
67};
68
69static struct nvmet_port *nvmet_loop_port;
70
71static LIST_HEAD(nvme_loop_ctrl_list);
72static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
73
74static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
75static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
76
77static struct nvmet_fabrics_ops nvme_loop_ops;
78
79static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
80{
81 return queue - queue->ctrl->queues;
82}
83
84static void nvme_loop_complete_rq(struct request *req)
85{
86 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020087
88 nvme_cleanup_cmd(req);
89 sg_free_table_chained(&iod->sg_table, true);
Christoph Hellwig77f02a72017-03-30 13:41:32 +020090 nvme_complete_rq(req);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020091}
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020092
Sagi Grimberg3b068372017-02-27 18:28:25 +020093static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
94{
95 u32 queue_idx = nvme_loop_queue_idx(queue);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020096
Sagi Grimberg3b068372017-02-27 18:28:25 +020097 if (queue_idx == 0)
98 return queue->ctrl->admin_tag_set.tags[queue_idx];
99 return queue->ctrl->tag_set.tags[queue_idx - 1];
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200100}
101
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800102static void nvme_loop_queue_response(struct nvmet_req *req)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200103{
Sagi Grimberg3b068372017-02-27 18:28:25 +0200104 struct nvme_loop_queue *queue =
105 container_of(req->sq, struct nvme_loop_queue, nvme_sq);
106 struct nvme_completion *cqe = req->rsp;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200107
108 /*
109 * AEN requests are special as they don't time out and can
110 * survive any kind of queue freeze and often don't respond to
111 * aborts. We don't even bother to allocate a struct request
112 * for them but rather special case them here.
113 */
Sagi Grimberg3b068372017-02-27 18:28:25 +0200114 if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200115 cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
Sagi Grimberg3b068372017-02-27 18:28:25 +0200116 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
Christoph Hellwig7bf58532016-11-10 07:32:34 -0800117 &cqe->result);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200118 } else {
Sagi Grimberg3b068372017-02-27 18:28:25 +0200119 struct request *rq;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200120
Sagi Grimberg3b068372017-02-27 18:28:25 +0200121 rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
122 if (!rq) {
123 dev_err(queue->ctrl->ctrl.device,
124 "tag 0x%x on queue %d not found\n",
125 cqe->command_id, nvme_loop_queue_idx(queue));
126 return;
127 }
128
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +0200129 nvme_end_request(rq, cqe->status, cqe->result);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200130 }
131}
132
133static void nvme_loop_execute_work(struct work_struct *work)
134{
135 struct nvme_loop_iod *iod =
136 container_of(work, struct nvme_loop_iod, work);
137
138 iod->req.execute(&iod->req);
139}
140
141static enum blk_eh_timer_return
142nvme_loop_timeout(struct request *rq, bool reserved)
143{
144 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
145
146 /* queue error recovery */
Christoph Hellwigd86c4d82017-06-15 15:41:08 +0200147 nvme_reset_ctrl(&iod->queue->ctrl->ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200148
149 /* fail with DNR on admin cmd timeout */
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +0200150 nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200151
152 return BLK_EH_HANDLED;
153}
154
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200155static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200156 const struct blk_mq_queue_data *bd)
157{
158 struct nvme_ns *ns = hctx->queue->queuedata;
159 struct nvme_loop_queue *queue = hctx->driver_data;
160 struct request *req = bd->rq;
161 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200162 blk_status_t ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200163
164 ret = nvme_setup_cmd(ns, req, &iod->cmd);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200165 if (ret)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200166 return ret;
167
168 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
169 iod->req.port = nvmet_loop_port;
170 if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
171 &queue->nvme_sq, &nvme_loop_ops)) {
172 nvme_cleanup_cmd(req);
173 blk_mq_start_request(req);
174 nvme_loop_queue_response(&iod->req);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200175 return BLK_STS_OK;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200176 }
177
178 if (blk_rq_bytes(req)) {
179 iod->sg_table.sgl = iod->first_sgl;
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200180 if (sg_alloc_table_chained(&iod->sg_table,
Christoph Hellwigf9d03f92016-12-08 15:20:32 -0700181 blk_rq_nr_phys_segments(req),
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200182 iod->sg_table.sgl))
183 return BLK_STS_RESOURCE;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200184
185 iod->req.sg = iod->sg_table.sgl;
186 iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200187 }
188
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200189 blk_mq_start_request(req);
190
191 schedule_work(&iod->work);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200192 return BLK_STS_OK;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200193}
194
195static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
196{
197 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
198 struct nvme_loop_queue *queue = &ctrl->queues[0];
199 struct nvme_loop_iod *iod = &ctrl->async_event_iod;
200
201 memset(&iod->cmd, 0, sizeof(iod->cmd));
202 iod->cmd.common.opcode = nvme_admin_async_event;
203 iod->cmd.common.command_id = NVME_LOOP_AQ_BLKMQ_DEPTH;
204 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
205
206 if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
207 &nvme_loop_ops)) {
208 dev_err(ctrl->ctrl.device, "failed async event work\n");
209 return;
210 }
211
212 schedule_work(&iod->work);
213}
214
215static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
216 struct nvme_loop_iod *iod, unsigned int queue_idx)
217{
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200218 iod->req.cmd = &iod->cmd;
219 iod->req.rsp = &iod->rsp;
220 iod->queue = &ctrl->queues[queue_idx];
221 INIT_WORK(&iod->work, nvme_loop_execute_work);
222 return 0;
223}
224
Christoph Hellwigd6296d392017-05-01 10:19:08 -0600225static int nvme_loop_init_request(struct blk_mq_tag_set *set,
226 struct request *req, unsigned int hctx_idx,
227 unsigned int numa_node)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200228{
Christoph Hellwig62b83b12017-06-13 09:15:21 +0200229 struct nvme_loop_ctrl *ctrl = set->driver_data;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200230
Christoph Hellwig62b83b12017-06-13 09:15:21 +0200231 return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
232 (set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200233}
234
235static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
236 unsigned int hctx_idx)
237{
238 struct nvme_loop_ctrl *ctrl = data;
239 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
240
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300241 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200242
243 hctx->driver_data = queue;
244 return 0;
245}
246
247static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
248 unsigned int hctx_idx)
249{
250 struct nvme_loop_ctrl *ctrl = data;
251 struct nvme_loop_queue *queue = &ctrl->queues[0];
252
253 BUG_ON(hctx_idx != 0);
254
255 hctx->driver_data = queue;
256 return 0;
257}
258
Eric Biggersf363b082017-03-30 13:39:16 -0700259static const struct blk_mq_ops nvme_loop_mq_ops = {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200260 .queue_rq = nvme_loop_queue_rq,
261 .complete = nvme_loop_complete_rq,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200262 .init_request = nvme_loop_init_request,
263 .init_hctx = nvme_loop_init_hctx,
264 .timeout = nvme_loop_timeout,
265};
266
Eric Biggersf363b082017-03-30 13:39:16 -0700267static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200268 .queue_rq = nvme_loop_queue_rq,
269 .complete = nvme_loop_complete_rq,
Christoph Hellwig62b83b12017-06-13 09:15:21 +0200270 .init_request = nvme_loop_init_request,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200271 .init_hctx = nvme_loop_init_admin_hctx,
272 .timeout = nvme_loop_timeout,
273};
274
275static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
276{
Sagi Grimberge4c5d372017-02-27 18:44:45 +0200277 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200278 blk_cleanup_queue(ctrl->ctrl.admin_q);
279 blk_mq_free_tag_set(&ctrl->admin_tag_set);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200280}
281
282static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
283{
284 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
285
286 if (list_empty(&ctrl->list))
287 goto free_ctrl;
288
289 mutex_lock(&nvme_loop_ctrl_mutex);
290 list_del(&ctrl->list);
291 mutex_unlock(&nvme_loop_ctrl_mutex);
292
293 if (nctrl->tagset) {
294 blk_cleanup_queue(ctrl->ctrl.connect_q);
295 blk_mq_free_tag_set(&ctrl->tag_set);
296 }
297 kfree(ctrl->queues);
298 nvmf_free_options(nctrl->opts);
299free_ctrl:
300 kfree(ctrl);
301}
302
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200303static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
304{
305 int i;
306
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300307 for (i = 1; i < ctrl->ctrl.queue_count; i++)
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200308 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
309}
310
311static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
312{
313 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
314 unsigned int nr_io_queues;
315 int ret, i;
316
317 nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
318 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
319 if (ret || !nr_io_queues)
320 return ret;
321
322 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
323
324 for (i = 1; i <= nr_io_queues; i++) {
325 ctrl->queues[i].ctrl = ctrl;
326 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
327 if (ret)
328 goto out_destroy_queues;
329
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300330 ctrl->ctrl.queue_count++;
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200331 }
332
333 return 0;
334
335out_destroy_queues:
336 nvme_loop_destroy_io_queues(ctrl);
337 return ret;
338}
339
Sagi Grimberg297186d2017-03-13 15:43:44 +0200340static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
341{
342 int i, ret;
343
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300344 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
Sagi Grimberg297186d2017-03-13 15:43:44 +0200345 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
346 if (ret)
347 return ret;
348 }
349
350 return 0;
351}
352
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200353static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
354{
355 int error;
356
357 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
358 ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
359 ctrl->admin_tag_set.queue_depth = NVME_LOOP_AQ_BLKMQ_DEPTH;
360 ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
361 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
362 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
363 SG_CHUNK_SIZE * sizeof(struct scatterlist);
364 ctrl->admin_tag_set.driver_data = ctrl;
365 ctrl->admin_tag_set.nr_hw_queues = 1;
366 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
Israel Rukshin86f36b92017-10-18 12:38:26 +0000367 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200368
369 ctrl->queues[0].ctrl = ctrl;
370 error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
371 if (error)
372 return error;
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300373 ctrl->ctrl.queue_count = 1;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200374
375 error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
376 if (error)
377 goto out_free_sq;
Sagi Grimberg34b6c232017-07-10 09:22:29 +0300378 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200379
380 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
381 if (IS_ERR(ctrl->ctrl.admin_q)) {
382 error = PTR_ERR(ctrl->ctrl.admin_q);
383 goto out_free_tagset;
384 }
385
386 error = nvmf_connect_admin_queue(&ctrl->ctrl);
387 if (error)
388 goto out_cleanup_queue;
389
Sagi Grimberg20d0dfe2017-06-27 22:16:38 +0300390 error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200391 if (error) {
392 dev_err(ctrl->ctrl.device,
393 "prop_get NVME_REG_CAP failed\n");
394 goto out_cleanup_queue;
395 }
396
397 ctrl->ctrl.sqsize =
Sagi Grimberg20d0dfe2017-06-27 22:16:38 +0300398 min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200399
Sagi Grimberg20d0dfe2017-06-27 22:16:38 +0300400 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200401 if (error)
402 goto out_cleanup_queue;
403
404 ctrl->ctrl.max_hw_sectors =
405 (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
406
407 error = nvme_init_identify(&ctrl->ctrl);
408 if (error)
409 goto out_cleanup_queue;
410
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200411 return 0;
412
413out_cleanup_queue:
414 blk_cleanup_queue(ctrl->ctrl.admin_q);
415out_free_tagset:
416 blk_mq_free_tag_set(&ctrl->admin_tag_set);
417out_free_sq:
418 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
419 return error;
420}
421
422static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
423{
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300424 if (ctrl->ctrl.queue_count > 1) {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200425 nvme_stop_queues(&ctrl->ctrl);
426 blk_mq_tagset_busy_iter(&ctrl->tag_set,
427 nvme_cancel_request, &ctrl->ctrl);
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200428 nvme_loop_destroy_io_queues(ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200429 }
430
431 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
432 nvme_shutdown_ctrl(&ctrl->ctrl);
433
Sagi Grimbergc1c0fff2017-07-02 15:40:17 +0300434 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200435 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
436 nvme_cancel_request, &ctrl->ctrl);
Sagi Grimbergc1c0fff2017-07-02 15:40:17 +0300437 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200438 nvme_loop_destroy_admin_queue(ctrl);
439}
440
Christoph Hellwigc5017e82017-10-29 10:44:29 +0200441static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200442{
Christoph Hellwigc5017e82017-10-29 10:44:29 +0200443 nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200444}
445
446static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
447{
448 struct nvme_loop_ctrl *ctrl;
449
450 mutex_lock(&nvme_loop_ctrl_mutex);
451 list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
452 if (ctrl->ctrl.cntlid == nctrl->cntlid)
Christoph Hellwigc5017e82017-10-29 10:44:29 +0200453 nvme_delete_ctrl(&ctrl->ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200454 }
455 mutex_unlock(&nvme_loop_ctrl_mutex);
456}
457
458static void nvme_loop_reset_ctrl_work(struct work_struct *work)
459{
Christoph Hellwigd86c4d82017-06-15 15:41:08 +0200460 struct nvme_loop_ctrl *ctrl =
461 container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200462 bool changed;
Sagi Grimberg297186d2017-03-13 15:43:44 +0200463 int ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200464
Sagi Grimbergd09f2b42017-07-02 10:56:43 +0300465 nvme_stop_ctrl(&ctrl->ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200466 nvme_loop_shutdown_ctrl(ctrl);
467
468 ret = nvme_loop_configure_admin_queue(ctrl);
469 if (ret)
470 goto out_disable;
471
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200472 ret = nvme_loop_init_io_queues(ctrl);
473 if (ret)
474 goto out_destroy_admin;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200475
Sagi Grimberg297186d2017-03-13 15:43:44 +0200476 ret = nvme_loop_connect_io_queues(ctrl);
477 if (ret)
478 goto out_destroy_io;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200479
Sagi Grimberg4368c392017-06-29 11:13:43 +0300480 blk_mq_update_nr_hw_queues(&ctrl->tag_set,
481 ctrl->ctrl.queue_count - 1);
482
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200483 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
484 WARN_ON_ONCE(!changed);
485
Sagi Grimbergd09f2b42017-07-02 10:56:43 +0300486 nvme_start_ctrl(&ctrl->ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200487
488 return;
489
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200490out_destroy_io:
491 nvme_loop_destroy_io_queues(ctrl);
492out_destroy_admin:
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200493 nvme_loop_destroy_admin_queue(ctrl);
494out_disable:
495 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200496 nvme_uninit_ctrl(&ctrl->ctrl);
497 nvme_put_ctrl(&ctrl->ctrl);
498}
499
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200500static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
501 .name = "loop",
502 .module = THIS_MODULE,
Christoph Hellwigd3d5b872017-05-20 15:14:44 +0200503 .flags = NVME_F_FABRICS,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200504 .reg_read32 = nvmf_reg_read32,
505 .reg_read64 = nvmf_reg_read64,
506 .reg_write32 = nvmf_reg_write32,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200507 .free_ctrl = nvme_loop_free_ctrl,
508 .submit_async_event = nvme_loop_submit_async_event,
Christoph Hellwigc5017e82017-10-29 10:44:29 +0200509 .delete_ctrl = nvme_loop_delete_ctrl_host,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200510};
511
512static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
513{
Sagi Grimberg297186d2017-03-13 15:43:44 +0200514 int ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200515
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200516 ret = nvme_loop_init_io_queues(ctrl);
517 if (ret)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200518 return ret;
519
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200520 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
521 ctrl->tag_set.ops = &nvme_loop_mq_ops;
Jay Freyenseeeadb7cf2016-08-17 15:00:28 -0700522 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200523 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
524 ctrl->tag_set.numa_node = NUMA_NO_NODE;
525 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
526 ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
527 SG_CHUNK_SIZE * sizeof(struct scatterlist);
528 ctrl->tag_set.driver_data = ctrl;
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300529 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200530 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
531 ctrl->ctrl.tagset = &ctrl->tag_set;
532
533 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
534 if (ret)
535 goto out_destroy_queues;
536
537 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
538 if (IS_ERR(ctrl->ctrl.connect_q)) {
539 ret = PTR_ERR(ctrl->ctrl.connect_q);
540 goto out_free_tagset;
541 }
542
Sagi Grimberg297186d2017-03-13 15:43:44 +0200543 ret = nvme_loop_connect_io_queues(ctrl);
544 if (ret)
545 goto out_cleanup_connect_q;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200546
547 return 0;
548
549out_cleanup_connect_q:
550 blk_cleanup_queue(ctrl->ctrl.connect_q);
551out_free_tagset:
552 blk_mq_free_tag_set(&ctrl->tag_set);
553out_destroy_queues:
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200554 nvme_loop_destroy_io_queues(ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200555 return ret;
556}
557
558static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
559 struct nvmf_ctrl_options *opts)
560{
561 struct nvme_loop_ctrl *ctrl;
562 bool changed;
563 int ret;
564
565 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
566 if (!ctrl)
567 return ERR_PTR(-ENOMEM);
568 ctrl->ctrl.opts = opts;
569 INIT_LIST_HEAD(&ctrl->list);
570
Christoph Hellwigd86c4d82017-06-15 15:41:08 +0200571 INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200572
573 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
574 0 /* no quirks, we're perfect! */);
575 if (ret)
576 goto out_put_ctrl;
577
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200578 ret = -ENOMEM;
579
Jay Freyenseeeadb7cf2016-08-17 15:00:28 -0700580 ctrl->ctrl.sqsize = opts->queue_size - 1;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200581 ctrl->ctrl.kato = opts->kato;
582
583 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
584 GFP_KERNEL);
585 if (!ctrl->queues)
586 goto out_uninit_ctrl;
587
588 ret = nvme_loop_configure_admin_queue(ctrl);
589 if (ret)
590 goto out_free_queues;
591
592 if (opts->queue_size > ctrl->ctrl.maxcmd) {
593 /* warn if maxcmd is lower than queue_size */
594 dev_warn(ctrl->ctrl.device,
595 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
596 opts->queue_size, ctrl->ctrl.maxcmd);
597 opts->queue_size = ctrl->ctrl.maxcmd;
598 }
599
600 if (opts->nr_io_queues) {
601 ret = nvme_loop_create_io_queues(ctrl);
602 if (ret)
603 goto out_remove_admin_queue;
604 }
605
606 nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
607
608 dev_info(ctrl->ctrl.device,
609 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
610
Christoph Hellwigd22524a2017-10-18 13:25:42 +0200611 nvme_get_ctrl(&ctrl->ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200612
613 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
614 WARN_ON_ONCE(!changed);
615
616 mutex_lock(&nvme_loop_ctrl_mutex);
617 list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
618 mutex_unlock(&nvme_loop_ctrl_mutex);
619
Sagi Grimbergd09f2b42017-07-02 10:56:43 +0300620 nvme_start_ctrl(&ctrl->ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200621
622 return &ctrl->ctrl;
623
624out_remove_admin_queue:
625 nvme_loop_destroy_admin_queue(ctrl);
626out_free_queues:
627 kfree(ctrl->queues);
628out_uninit_ctrl:
629 nvme_uninit_ctrl(&ctrl->ctrl);
630out_put_ctrl:
631 nvme_put_ctrl(&ctrl->ctrl);
632 if (ret > 0)
633 ret = -EIO;
634 return ERR_PTR(ret);
635}
636
637static int nvme_loop_add_port(struct nvmet_port *port)
638{
639 /*
640 * XXX: disalow adding more than one port so
641 * there is no connection rejections when a
642 * a subsystem is assigned to a port for which
643 * loop doesn't have a pointer.
644 * This scenario would be possible if we allowed
645 * more than one port to be added and a subsystem
646 * was assigned to a port other than nvmet_loop_port.
647 */
648
649 if (nvmet_loop_port)
650 return -EPERM;
651
652 nvmet_loop_port = port;
653 return 0;
654}
655
656static void nvme_loop_remove_port(struct nvmet_port *port)
657{
658 if (port == nvmet_loop_port)
659 nvmet_loop_port = NULL;
660}
661
662static struct nvmet_fabrics_ops nvme_loop_ops = {
663 .owner = THIS_MODULE,
664 .type = NVMF_TRTYPE_LOOP,
665 .add_port = nvme_loop_add_port,
666 .remove_port = nvme_loop_remove_port,
667 .queue_response = nvme_loop_queue_response,
668 .delete_ctrl = nvme_loop_delete_ctrl,
669};
670
671static struct nvmf_transport_ops nvme_loop_transport = {
672 .name = "loop",
673 .create_ctrl = nvme_loop_create_ctrl,
674};
675
676static int __init nvme_loop_init_module(void)
677{
678 int ret;
679
680 ret = nvmet_register_transport(&nvme_loop_ops);
681 if (ret)
682 return ret;
Sagi Grimbergd19eef02017-03-19 06:26:28 +0200683
684 ret = nvmf_register_transport(&nvme_loop_transport);
685 if (ret)
686 nvmet_unregister_transport(&nvme_loop_ops);
687
688 return ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200689}
690
691static void __exit nvme_loop_cleanup_module(void)
692{
693 struct nvme_loop_ctrl *ctrl, *next;
694
695 nvmf_unregister_transport(&nvme_loop_transport);
696 nvmet_unregister_transport(&nvme_loop_ops);
697
698 mutex_lock(&nvme_loop_ctrl_mutex);
699 list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
Christoph Hellwigc5017e82017-10-29 10:44:29 +0200700 nvme_delete_ctrl(&ctrl->ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200701 mutex_unlock(&nvme_loop_ctrl_mutex);
702
Sagi Grimberg9a6327d2017-06-07 20:31:55 +0200703 flush_workqueue(nvme_wq);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200704}
705
706module_init(nvme_loop_init_module);
707module_exit(nvme_loop_cleanup_module);
708
709MODULE_LICENSE("GPL v2");
710MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */