blob: f40e70eb4a380c0183ac123f991602fbce1886ca [file] [log] [blame]
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +02001/*
2 * NVMe over Fabrics loopback device.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/scatterlist.h>
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020016#include <linux/blk-mq.h>
17#include <linux/nvme.h>
18#include <linux/module.h>
19#include <linux/parser.h>
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020020#include "nvmet.h"
21#include "../host/nvme.h"
22#include "../host/fabrics.h"
23
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020024#define NVME_LOOP_MAX_SEGMENTS 256
25
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020026struct nvme_loop_iod {
Christoph Hellwigd49187e2016-11-10 07:32:33 -080027 struct nvme_request nvme_req;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020028 struct nvme_command cmd;
29 struct nvme_completion rsp;
30 struct nvmet_req req;
31 struct nvme_loop_queue *queue;
32 struct work_struct work;
33 struct sg_table sg_table;
34 struct scatterlist first_sgl[];
35};
36
37struct nvme_loop_ctrl {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020038 struct nvme_loop_queue *queues;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020039
40 struct blk_mq_tag_set admin_tag_set;
41
42 struct list_head list;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020043 struct blk_mq_tag_set tag_set;
44 struct nvme_loop_iod async_event_iod;
45 struct nvme_ctrl ctrl;
46
47 struct nvmet_ctrl *target_ctrl;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020048};
49
50static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
51{
52 return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
53}
54
55struct nvme_loop_queue {
56 struct nvmet_cq nvme_cq;
57 struct nvmet_sq nvme_sq;
58 struct nvme_loop_ctrl *ctrl;
59};
60
61static struct nvmet_port *nvmet_loop_port;
62
63static LIST_HEAD(nvme_loop_ctrl_list);
64static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
65
66static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
67static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
68
69static struct nvmet_fabrics_ops nvme_loop_ops;
70
71static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
72{
73 return queue - queue->ctrl->queues;
74}
75
76static void nvme_loop_complete_rq(struct request *req)
77{
78 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020079
80 nvme_cleanup_cmd(req);
81 sg_free_table_chained(&iod->sg_table, true);
Christoph Hellwig77f02a72017-03-30 13:41:32 +020082 nvme_complete_rq(req);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020083}
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020084
Sagi Grimberg3b068372017-02-27 18:28:25 +020085static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
86{
87 u32 queue_idx = nvme_loop_queue_idx(queue);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020088
Sagi Grimberg3b068372017-02-27 18:28:25 +020089 if (queue_idx == 0)
90 return queue->ctrl->admin_tag_set.tags[queue_idx];
91 return queue->ctrl->tag_set.tags[queue_idx - 1];
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020092}
93
Christoph Hellwigd49187e2016-11-10 07:32:33 -080094static void nvme_loop_queue_response(struct nvmet_req *req)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020095{
Sagi Grimberg3b068372017-02-27 18:28:25 +020096 struct nvme_loop_queue *queue =
97 container_of(req->sq, struct nvme_loop_queue, nvme_sq);
98 struct nvme_completion *cqe = req->rsp;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020099
100 /*
101 * AEN requests are special as they don't time out and can
102 * survive any kind of queue freeze and often don't respond to
103 * aborts. We don't even bother to allocate a struct request
104 * for them but rather special case them here.
105 */
Sagi Grimberg3b068372017-02-27 18:28:25 +0200106 if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
Keith Busch38dabe22017-11-07 15:13:10 -0700107 cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
Sagi Grimberg3b068372017-02-27 18:28:25 +0200108 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
Christoph Hellwig7bf58532016-11-10 07:32:34 -0800109 &cqe->result);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200110 } else {
Sagi Grimberg3b068372017-02-27 18:28:25 +0200111 struct request *rq;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200112
Sagi Grimberg3b068372017-02-27 18:28:25 +0200113 rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
114 if (!rq) {
115 dev_err(queue->ctrl->ctrl.device,
116 "tag 0x%x on queue %d not found\n",
117 cqe->command_id, nvme_loop_queue_idx(queue));
118 return;
119 }
120
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +0200121 nvme_end_request(rq, cqe->status, cqe->result);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200122 }
123}
124
125static void nvme_loop_execute_work(struct work_struct *work)
126{
127 struct nvme_loop_iod *iod =
128 container_of(work, struct nvme_loop_iod, work);
129
130 iod->req.execute(&iod->req);
131}
132
133static enum blk_eh_timer_return
134nvme_loop_timeout(struct request *rq, bool reserved)
135{
136 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
137
138 /* queue error recovery */
Christoph Hellwigd86c4d82017-06-15 15:41:08 +0200139 nvme_reset_ctrl(&iod->queue->ctrl->ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200140
141 /* fail with DNR on admin cmd timeout */
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +0200142 nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200143
144 return BLK_EH_HANDLED;
145}
146
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200147static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200148 const struct blk_mq_queue_data *bd)
149{
150 struct nvme_ns *ns = hctx->queue->queuedata;
151 struct nvme_loop_queue *queue = hctx->driver_data;
152 struct request *req = bd->rq;
153 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200154 blk_status_t ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200155
156 ret = nvme_setup_cmd(ns, req, &iod->cmd);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200157 if (ret)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200158 return ret;
159
160 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
161 iod->req.port = nvmet_loop_port;
162 if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
163 &queue->nvme_sq, &nvme_loop_ops)) {
164 nvme_cleanup_cmd(req);
165 blk_mq_start_request(req);
166 nvme_loop_queue_response(&iod->req);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200167 return BLK_STS_OK;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200168 }
169
170 if (blk_rq_bytes(req)) {
171 iod->sg_table.sgl = iod->first_sgl;
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200172 if (sg_alloc_table_chained(&iod->sg_table,
Christoph Hellwigf9d03f92016-12-08 15:20:32 -0700173 blk_rq_nr_phys_segments(req),
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200174 iod->sg_table.sgl))
175 return BLK_STS_RESOURCE;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200176
177 iod->req.sg = iod->sg_table.sgl;
178 iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200179 }
180
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200181 blk_mq_start_request(req);
182
183 schedule_work(&iod->work);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200184 return BLK_STS_OK;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200185}
186
Keith Buschad22c352017-11-07 15:13:12 -0700187static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200188{
189 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
190 struct nvme_loop_queue *queue = &ctrl->queues[0];
191 struct nvme_loop_iod *iod = &ctrl->async_event_iod;
192
193 memset(&iod->cmd, 0, sizeof(iod->cmd));
194 iod->cmd.common.opcode = nvme_admin_async_event;
Keith Busch38dabe22017-11-07 15:13:10 -0700195 iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200196 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
197
198 if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
199 &nvme_loop_ops)) {
200 dev_err(ctrl->ctrl.device, "failed async event work\n");
201 return;
202 }
203
204 schedule_work(&iod->work);
205}
206
207static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
208 struct nvme_loop_iod *iod, unsigned int queue_idx)
209{
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200210 iod->req.cmd = &iod->cmd;
211 iod->req.rsp = &iod->rsp;
212 iod->queue = &ctrl->queues[queue_idx];
213 INIT_WORK(&iod->work, nvme_loop_execute_work);
214 return 0;
215}
216
Christoph Hellwigd6296d392017-05-01 10:19:08 -0600217static int nvme_loop_init_request(struct blk_mq_tag_set *set,
218 struct request *req, unsigned int hctx_idx,
219 unsigned int numa_node)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200220{
Christoph Hellwig62b83b12017-06-13 09:15:21 +0200221 struct nvme_loop_ctrl *ctrl = set->driver_data;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200222
Christoph Hellwig62b83b12017-06-13 09:15:21 +0200223 return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
224 (set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200225}
226
227static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
228 unsigned int hctx_idx)
229{
230 struct nvme_loop_ctrl *ctrl = data;
231 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
232
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300233 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200234
235 hctx->driver_data = queue;
236 return 0;
237}
238
239static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
240 unsigned int hctx_idx)
241{
242 struct nvme_loop_ctrl *ctrl = data;
243 struct nvme_loop_queue *queue = &ctrl->queues[0];
244
245 BUG_ON(hctx_idx != 0);
246
247 hctx->driver_data = queue;
248 return 0;
249}
250
Eric Biggersf363b082017-03-30 13:39:16 -0700251static const struct blk_mq_ops nvme_loop_mq_ops = {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200252 .queue_rq = nvme_loop_queue_rq,
253 .complete = nvme_loop_complete_rq,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200254 .init_request = nvme_loop_init_request,
255 .init_hctx = nvme_loop_init_hctx,
256 .timeout = nvme_loop_timeout,
257};
258
Eric Biggersf363b082017-03-30 13:39:16 -0700259static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200260 .queue_rq = nvme_loop_queue_rq,
261 .complete = nvme_loop_complete_rq,
Christoph Hellwig62b83b12017-06-13 09:15:21 +0200262 .init_request = nvme_loop_init_request,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200263 .init_hctx = nvme_loop_init_admin_hctx,
264 .timeout = nvme_loop_timeout,
265};
266
267static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
268{
Sagi Grimberge4c5d372017-02-27 18:44:45 +0200269 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200270 blk_cleanup_queue(ctrl->ctrl.admin_q);
271 blk_mq_free_tag_set(&ctrl->admin_tag_set);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200272}
273
274static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
275{
276 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
277
278 if (list_empty(&ctrl->list))
279 goto free_ctrl;
280
281 mutex_lock(&nvme_loop_ctrl_mutex);
282 list_del(&ctrl->list);
283 mutex_unlock(&nvme_loop_ctrl_mutex);
284
285 if (nctrl->tagset) {
286 blk_cleanup_queue(ctrl->ctrl.connect_q);
287 blk_mq_free_tag_set(&ctrl->tag_set);
288 }
289 kfree(ctrl->queues);
290 nvmf_free_options(nctrl->opts);
291free_ctrl:
292 kfree(ctrl);
293}
294
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200295static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
296{
297 int i;
298
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300299 for (i = 1; i < ctrl->ctrl.queue_count; i++)
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200300 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
301}
302
303static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
304{
305 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
306 unsigned int nr_io_queues;
307 int ret, i;
308
309 nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
310 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
311 if (ret || !nr_io_queues)
312 return ret;
313
314 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
315
316 for (i = 1; i <= nr_io_queues; i++) {
317 ctrl->queues[i].ctrl = ctrl;
318 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
319 if (ret)
320 goto out_destroy_queues;
321
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300322 ctrl->ctrl.queue_count++;
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200323 }
324
325 return 0;
326
327out_destroy_queues:
328 nvme_loop_destroy_io_queues(ctrl);
329 return ret;
330}
331
Sagi Grimberg297186d2017-03-13 15:43:44 +0200332static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
333{
334 int i, ret;
335
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300336 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
Sagi Grimberg297186d2017-03-13 15:43:44 +0200337 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
338 if (ret)
339 return ret;
340 }
341
342 return 0;
343}
344
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200345static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
346{
347 int error;
348
349 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
350 ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
Keith Busch38dabe22017-11-07 15:13:10 -0700351 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200352 ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
353 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
354 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
355 SG_CHUNK_SIZE * sizeof(struct scatterlist);
356 ctrl->admin_tag_set.driver_data = ctrl;
357 ctrl->admin_tag_set.nr_hw_queues = 1;
358 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
Israel Rukshin86f36b92017-10-18 12:38:26 +0000359 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200360
361 ctrl->queues[0].ctrl = ctrl;
362 error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
363 if (error)
364 return error;
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300365 ctrl->ctrl.queue_count = 1;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200366
367 error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
368 if (error)
369 goto out_free_sq;
Sagi Grimberg34b6c232017-07-10 09:22:29 +0300370 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200371
372 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
373 if (IS_ERR(ctrl->ctrl.admin_q)) {
374 error = PTR_ERR(ctrl->ctrl.admin_q);
375 goto out_free_tagset;
376 }
377
378 error = nvmf_connect_admin_queue(&ctrl->ctrl);
379 if (error)
380 goto out_cleanup_queue;
381
Sagi Grimberg20d0dfe2017-06-27 22:16:38 +0300382 error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200383 if (error) {
384 dev_err(ctrl->ctrl.device,
385 "prop_get NVME_REG_CAP failed\n");
386 goto out_cleanup_queue;
387 }
388
389 ctrl->ctrl.sqsize =
Sagi Grimberg20d0dfe2017-06-27 22:16:38 +0300390 min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200391
Sagi Grimberg20d0dfe2017-06-27 22:16:38 +0300392 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200393 if (error)
394 goto out_cleanup_queue;
395
396 ctrl->ctrl.max_hw_sectors =
397 (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
398
399 error = nvme_init_identify(&ctrl->ctrl);
400 if (error)
401 goto out_cleanup_queue;
402
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200403 return 0;
404
405out_cleanup_queue:
406 blk_cleanup_queue(ctrl->ctrl.admin_q);
407out_free_tagset:
408 blk_mq_free_tag_set(&ctrl->admin_tag_set);
409out_free_sq:
410 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
411 return error;
412}
413
414static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
415{
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300416 if (ctrl->ctrl.queue_count > 1) {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200417 nvme_stop_queues(&ctrl->ctrl);
418 blk_mq_tagset_busy_iter(&ctrl->tag_set,
419 nvme_cancel_request, &ctrl->ctrl);
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200420 nvme_loop_destroy_io_queues(ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200421 }
422
423 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
424 nvme_shutdown_ctrl(&ctrl->ctrl);
425
Sagi Grimbergc1c0fff2017-07-02 15:40:17 +0300426 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200427 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
428 nvme_cancel_request, &ctrl->ctrl);
Sagi Grimbergc1c0fff2017-07-02 15:40:17 +0300429 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200430 nvme_loop_destroy_admin_queue(ctrl);
431}
432
Christoph Hellwigc5017e82017-10-29 10:44:29 +0200433static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200434{
Christoph Hellwigc5017e82017-10-29 10:44:29 +0200435 nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200436}
437
438static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
439{
440 struct nvme_loop_ctrl *ctrl;
441
442 mutex_lock(&nvme_loop_ctrl_mutex);
443 list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
444 if (ctrl->ctrl.cntlid == nctrl->cntlid)
Christoph Hellwigc5017e82017-10-29 10:44:29 +0200445 nvme_delete_ctrl(&ctrl->ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200446 }
447 mutex_unlock(&nvme_loop_ctrl_mutex);
448}
449
450static void nvme_loop_reset_ctrl_work(struct work_struct *work)
451{
Christoph Hellwigd86c4d82017-06-15 15:41:08 +0200452 struct nvme_loop_ctrl *ctrl =
453 container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200454 bool changed;
Sagi Grimberg297186d2017-03-13 15:43:44 +0200455 int ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200456
Sagi Grimbergd09f2b42017-07-02 10:56:43 +0300457 nvme_stop_ctrl(&ctrl->ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200458 nvme_loop_shutdown_ctrl(ctrl);
459
460 ret = nvme_loop_configure_admin_queue(ctrl);
461 if (ret)
462 goto out_disable;
463
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200464 ret = nvme_loop_init_io_queues(ctrl);
465 if (ret)
466 goto out_destroy_admin;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200467
Sagi Grimberg297186d2017-03-13 15:43:44 +0200468 ret = nvme_loop_connect_io_queues(ctrl);
469 if (ret)
470 goto out_destroy_io;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200471
Sagi Grimberg4368c392017-06-29 11:13:43 +0300472 blk_mq_update_nr_hw_queues(&ctrl->tag_set,
473 ctrl->ctrl.queue_count - 1);
474
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200475 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
476 WARN_ON_ONCE(!changed);
477
Sagi Grimbergd09f2b42017-07-02 10:56:43 +0300478 nvme_start_ctrl(&ctrl->ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200479
480 return;
481
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200482out_destroy_io:
483 nvme_loop_destroy_io_queues(ctrl);
484out_destroy_admin:
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200485 nvme_loop_destroy_admin_queue(ctrl);
486out_disable:
487 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200488 nvme_uninit_ctrl(&ctrl->ctrl);
489 nvme_put_ctrl(&ctrl->ctrl);
490}
491
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200492static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
493 .name = "loop",
494 .module = THIS_MODULE,
Christoph Hellwigd3d5b872017-05-20 15:14:44 +0200495 .flags = NVME_F_FABRICS,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200496 .reg_read32 = nvmf_reg_read32,
497 .reg_read64 = nvmf_reg_read64,
498 .reg_write32 = nvmf_reg_write32,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200499 .free_ctrl = nvme_loop_free_ctrl,
500 .submit_async_event = nvme_loop_submit_async_event,
Christoph Hellwigc5017e82017-10-29 10:44:29 +0200501 .delete_ctrl = nvme_loop_delete_ctrl_host,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200502};
503
504static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
505{
Sagi Grimberg297186d2017-03-13 15:43:44 +0200506 int ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200507
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200508 ret = nvme_loop_init_io_queues(ctrl);
509 if (ret)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200510 return ret;
511
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200512 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
513 ctrl->tag_set.ops = &nvme_loop_mq_ops;
Jay Freyenseeeadb7cf2016-08-17 15:00:28 -0700514 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200515 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
516 ctrl->tag_set.numa_node = NUMA_NO_NODE;
517 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
518 ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
519 SG_CHUNK_SIZE * sizeof(struct scatterlist);
520 ctrl->tag_set.driver_data = ctrl;
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300521 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200522 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
523 ctrl->ctrl.tagset = &ctrl->tag_set;
524
525 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
526 if (ret)
527 goto out_destroy_queues;
528
529 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
530 if (IS_ERR(ctrl->ctrl.connect_q)) {
531 ret = PTR_ERR(ctrl->ctrl.connect_q);
532 goto out_free_tagset;
533 }
534
Sagi Grimberg297186d2017-03-13 15:43:44 +0200535 ret = nvme_loop_connect_io_queues(ctrl);
536 if (ret)
537 goto out_cleanup_connect_q;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200538
539 return 0;
540
541out_cleanup_connect_q:
542 blk_cleanup_queue(ctrl->ctrl.connect_q);
543out_free_tagset:
544 blk_mq_free_tag_set(&ctrl->tag_set);
545out_destroy_queues:
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200546 nvme_loop_destroy_io_queues(ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200547 return ret;
548}
549
550static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
551 struct nvmf_ctrl_options *opts)
552{
553 struct nvme_loop_ctrl *ctrl;
554 bool changed;
555 int ret;
556
557 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
558 if (!ctrl)
559 return ERR_PTR(-ENOMEM);
560 ctrl->ctrl.opts = opts;
561 INIT_LIST_HEAD(&ctrl->list);
562
Christoph Hellwigd86c4d82017-06-15 15:41:08 +0200563 INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200564
565 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
566 0 /* no quirks, we're perfect! */);
567 if (ret)
568 goto out_put_ctrl;
569
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200570 ret = -ENOMEM;
571
Jay Freyenseeeadb7cf2016-08-17 15:00:28 -0700572 ctrl->ctrl.sqsize = opts->queue_size - 1;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200573 ctrl->ctrl.kato = opts->kato;
574
575 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
576 GFP_KERNEL);
577 if (!ctrl->queues)
578 goto out_uninit_ctrl;
579
580 ret = nvme_loop_configure_admin_queue(ctrl);
581 if (ret)
582 goto out_free_queues;
583
584 if (opts->queue_size > ctrl->ctrl.maxcmd) {
585 /* warn if maxcmd is lower than queue_size */
586 dev_warn(ctrl->ctrl.device,
587 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
588 opts->queue_size, ctrl->ctrl.maxcmd);
589 opts->queue_size = ctrl->ctrl.maxcmd;
590 }
591
592 if (opts->nr_io_queues) {
593 ret = nvme_loop_create_io_queues(ctrl);
594 if (ret)
595 goto out_remove_admin_queue;
596 }
597
598 nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
599
600 dev_info(ctrl->ctrl.device,
601 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
602
Christoph Hellwigd22524a2017-10-18 13:25:42 +0200603 nvme_get_ctrl(&ctrl->ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200604
605 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
606 WARN_ON_ONCE(!changed);
607
608 mutex_lock(&nvme_loop_ctrl_mutex);
609 list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
610 mutex_unlock(&nvme_loop_ctrl_mutex);
611
Sagi Grimbergd09f2b42017-07-02 10:56:43 +0300612 nvme_start_ctrl(&ctrl->ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200613
614 return &ctrl->ctrl;
615
616out_remove_admin_queue:
617 nvme_loop_destroy_admin_queue(ctrl);
618out_free_queues:
619 kfree(ctrl->queues);
620out_uninit_ctrl:
621 nvme_uninit_ctrl(&ctrl->ctrl);
622out_put_ctrl:
623 nvme_put_ctrl(&ctrl->ctrl);
624 if (ret > 0)
625 ret = -EIO;
626 return ERR_PTR(ret);
627}
628
629static int nvme_loop_add_port(struct nvmet_port *port)
630{
631 /*
632 * XXX: disalow adding more than one port so
633 * there is no connection rejections when a
634 * a subsystem is assigned to a port for which
635 * loop doesn't have a pointer.
636 * This scenario would be possible if we allowed
637 * more than one port to be added and a subsystem
638 * was assigned to a port other than nvmet_loop_port.
639 */
640
641 if (nvmet_loop_port)
642 return -EPERM;
643
644 nvmet_loop_port = port;
645 return 0;
646}
647
648static void nvme_loop_remove_port(struct nvmet_port *port)
649{
650 if (port == nvmet_loop_port)
651 nvmet_loop_port = NULL;
652}
653
654static struct nvmet_fabrics_ops nvme_loop_ops = {
655 .owner = THIS_MODULE,
656 .type = NVMF_TRTYPE_LOOP,
657 .add_port = nvme_loop_add_port,
658 .remove_port = nvme_loop_remove_port,
659 .queue_response = nvme_loop_queue_response,
660 .delete_ctrl = nvme_loop_delete_ctrl,
661};
662
663static struct nvmf_transport_ops nvme_loop_transport = {
664 .name = "loop",
665 .create_ctrl = nvme_loop_create_ctrl,
666};
667
668static int __init nvme_loop_init_module(void)
669{
670 int ret;
671
672 ret = nvmet_register_transport(&nvme_loop_ops);
673 if (ret)
674 return ret;
Sagi Grimbergd19eef02017-03-19 06:26:28 +0200675
676 ret = nvmf_register_transport(&nvme_loop_transport);
677 if (ret)
678 nvmet_unregister_transport(&nvme_loop_ops);
679
680 return ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200681}
682
683static void __exit nvme_loop_cleanup_module(void)
684{
685 struct nvme_loop_ctrl *ctrl, *next;
686
687 nvmf_unregister_transport(&nvme_loop_transport);
688 nvmet_unregister_transport(&nvme_loop_ops);
689
690 mutex_lock(&nvme_loop_ctrl_mutex);
691 list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
Christoph Hellwigc5017e82017-10-29 10:44:29 +0200692 nvme_delete_ctrl(&ctrl->ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200693 mutex_unlock(&nvme_loop_ctrl_mutex);
694
Sagi Grimberg9a6327d2017-06-07 20:31:55 +0200695 flush_workqueue(nvme_wq);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200696}
697
698module_init(nvme_loop_init_module);
699module_exit(nvme_loop_cleanup_module);
700
701MODULE_LICENSE("GPL v2");
702MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */