blob: 27a8561c0cb97596d25b53620493978040082124 [file] [log] [blame]
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +02001/*
2 * NVMe over Fabrics loopback device.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/scatterlist.h>
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020016#include <linux/blk-mq.h>
17#include <linux/nvme.h>
18#include <linux/module.h>
19#include <linux/parser.h>
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020020#include "nvmet.h"
21#include "../host/nvme.h"
22#include "../host/fabrics.h"
23
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020024#define NVME_LOOP_MAX_SEGMENTS 256
25
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020026struct nvme_loop_iod {
Christoph Hellwigd49187e2016-11-10 07:32:33 -080027 struct nvme_request nvme_req;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020028 struct nvme_command cmd;
29 struct nvme_completion rsp;
30 struct nvmet_req req;
31 struct nvme_loop_queue *queue;
32 struct work_struct work;
33 struct sg_table sg_table;
34 struct scatterlist first_sgl[];
35};
36
37struct nvme_loop_ctrl {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020038 struct nvme_loop_queue *queues;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020039
40 struct blk_mq_tag_set admin_tag_set;
41
42 struct list_head list;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020043 struct blk_mq_tag_set tag_set;
44 struct nvme_loop_iod async_event_iod;
45 struct nvme_ctrl ctrl;
46
47 struct nvmet_ctrl *target_ctrl;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020048};
49
50static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
51{
52 return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
53}
54
Sagi Grimberg9d7fab02017-10-24 15:25:22 +030055enum nvme_loop_queue_flags {
56 NVME_LOOP_Q_LIVE = 0,
57};
58
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020059struct nvme_loop_queue {
60 struct nvmet_cq nvme_cq;
61 struct nvmet_sq nvme_sq;
62 struct nvme_loop_ctrl *ctrl;
Sagi Grimberg9d7fab02017-10-24 15:25:22 +030063 unsigned long flags;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020064};
65
66static struct nvmet_port *nvmet_loop_port;
67
68static LIST_HEAD(nvme_loop_ctrl_list);
69static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
70
71static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
72static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
73
Christoph Hellwige929f062018-03-20 20:41:35 +010074static const struct nvmet_fabrics_ops nvme_loop_ops;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020075
76static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
77{
78 return queue - queue->ctrl->queues;
79}
80
81static void nvme_loop_complete_rq(struct request *req)
82{
83 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020084
85 nvme_cleanup_cmd(req);
86 sg_free_table_chained(&iod->sg_table, true);
Christoph Hellwig77f02a72017-03-30 13:41:32 +020087 nvme_complete_rq(req);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020088}
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020089
Sagi Grimberg3b068372017-02-27 18:28:25 +020090static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
91{
92 u32 queue_idx = nvme_loop_queue_idx(queue);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020093
Sagi Grimberg3b068372017-02-27 18:28:25 +020094 if (queue_idx == 0)
95 return queue->ctrl->admin_tag_set.tags[queue_idx];
96 return queue->ctrl->tag_set.tags[queue_idx - 1];
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020097}
98
Christoph Hellwigd49187e2016-11-10 07:32:33 -080099static void nvme_loop_queue_response(struct nvmet_req *req)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200100{
Sagi Grimberg3b068372017-02-27 18:28:25 +0200101 struct nvme_loop_queue *queue =
102 container_of(req->sq, struct nvme_loop_queue, nvme_sq);
103 struct nvme_completion *cqe = req->rsp;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200104
105 /*
106 * AEN requests are special as they don't time out and can
107 * survive any kind of queue freeze and often don't respond to
108 * aborts. We don't even bother to allocate a struct request
109 * for them but rather special case them here.
110 */
Sagi Grimberg3b068372017-02-27 18:28:25 +0200111 if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
Keith Busch38dabe22017-11-07 15:13:10 -0700112 cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
Sagi Grimberg3b068372017-02-27 18:28:25 +0200113 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
Christoph Hellwig7bf58532016-11-10 07:32:34 -0800114 &cqe->result);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200115 } else {
Sagi Grimberg3b068372017-02-27 18:28:25 +0200116 struct request *rq;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200117
Sagi Grimberg3b068372017-02-27 18:28:25 +0200118 rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
119 if (!rq) {
120 dev_err(queue->ctrl->ctrl.device,
121 "tag 0x%x on queue %d not found\n",
122 cqe->command_id, nvme_loop_queue_idx(queue));
123 return;
124 }
125
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +0200126 nvme_end_request(rq, cqe->status, cqe->result);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200127 }
128}
129
130static void nvme_loop_execute_work(struct work_struct *work)
131{
132 struct nvme_loop_iod *iod =
133 container_of(work, struct nvme_loop_iod, work);
134
Christoph Hellwig5e62d5c2017-11-09 14:29:58 +0100135 nvmet_req_execute(&iod->req);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200136}
137
138static enum blk_eh_timer_return
139nvme_loop_timeout(struct request *rq, bool reserved)
140{
141 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
142
143 /* queue error recovery */
Christoph Hellwigd86c4d82017-06-15 15:41:08 +0200144 nvme_reset_ctrl(&iod->queue->ctrl->ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200145
146 /* fail with DNR on admin cmd timeout */
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +0200147 nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200148
149 return BLK_EH_HANDLED;
150}
151
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200152static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200153 const struct blk_mq_queue_data *bd)
154{
155 struct nvme_ns *ns = hctx->queue->queuedata;
156 struct nvme_loop_queue *queue = hctx->driver_data;
157 struct request *req = bd->rq;
158 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200159 blk_status_t ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200160
James Smartbb06ec312018-04-12 09:16:15 -0600161 ret = nvmf_check_if_ready(&queue->ctrl->ctrl, req,
162 test_bit(NVME_LOOP_Q_LIVE, &queue->flags), true);
Sagi Grimberg9d7fab02017-10-24 15:25:22 +0300163 if (unlikely(ret))
164 return ret;
165
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200166 ret = nvme_setup_cmd(ns, req, &iod->cmd);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200167 if (ret)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200168 return ret;
169
Ming Lei11d9ea62018-04-12 09:16:04 -0600170 blk_mq_start_request(req);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200171 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
172 iod->req.port = nvmet_loop_port;
173 if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
Ming Lei11d9ea62018-04-12 09:16:04 -0600174 &queue->nvme_sq, &nvme_loop_ops))
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200175 return BLK_STS_OK;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200176
Christoph Hellwig796b0b82018-02-22 07:24:10 -0800177 if (blk_rq_payload_bytes(req)) {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200178 iod->sg_table.sgl = iod->first_sgl;
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200179 if (sg_alloc_table_chained(&iod->sg_table,
Christoph Hellwigf9d03f92016-12-08 15:20:32 -0700180 blk_rq_nr_phys_segments(req),
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200181 iod->sg_table.sgl))
182 return BLK_STS_RESOURCE;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200183
184 iod->req.sg = iod->sg_table.sgl;
185 iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
Christoph Hellwig796b0b82018-02-22 07:24:10 -0800186 iod->req.transfer_len = blk_rq_payload_bytes(req);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200187 }
188
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200189 schedule_work(&iod->work);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200190 return BLK_STS_OK;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200191}
192
Keith Buschad22c352017-11-07 15:13:12 -0700193static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200194{
195 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
196 struct nvme_loop_queue *queue = &ctrl->queues[0];
197 struct nvme_loop_iod *iod = &ctrl->async_event_iod;
198
199 memset(&iod->cmd, 0, sizeof(iod->cmd));
200 iod->cmd.common.opcode = nvme_admin_async_event;
Keith Busch38dabe22017-11-07 15:13:10 -0700201 iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200202 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
203
204 if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
205 &nvme_loop_ops)) {
206 dev_err(ctrl->ctrl.device, "failed async event work\n");
207 return;
208 }
209
210 schedule_work(&iod->work);
211}
212
213static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
214 struct nvme_loop_iod *iod, unsigned int queue_idx)
215{
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200216 iod->req.cmd = &iod->cmd;
217 iod->req.rsp = &iod->rsp;
218 iod->queue = &ctrl->queues[queue_idx];
219 INIT_WORK(&iod->work, nvme_loop_execute_work);
220 return 0;
221}
222
Christoph Hellwigd6296d392017-05-01 10:19:08 -0600223static int nvme_loop_init_request(struct blk_mq_tag_set *set,
224 struct request *req, unsigned int hctx_idx,
225 unsigned int numa_node)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200226{
Christoph Hellwig62b83b12017-06-13 09:15:21 +0200227 struct nvme_loop_ctrl *ctrl = set->driver_data;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200228
Christoph Hellwig62b83b12017-06-13 09:15:21 +0200229 return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
230 (set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200231}
232
233static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
234 unsigned int hctx_idx)
235{
236 struct nvme_loop_ctrl *ctrl = data;
237 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
238
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300239 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200240
241 hctx->driver_data = queue;
242 return 0;
243}
244
245static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
246 unsigned int hctx_idx)
247{
248 struct nvme_loop_ctrl *ctrl = data;
249 struct nvme_loop_queue *queue = &ctrl->queues[0];
250
251 BUG_ON(hctx_idx != 0);
252
253 hctx->driver_data = queue;
254 return 0;
255}
256
Eric Biggersf363b082017-03-30 13:39:16 -0700257static const struct blk_mq_ops nvme_loop_mq_ops = {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200258 .queue_rq = nvme_loop_queue_rq,
259 .complete = nvme_loop_complete_rq,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200260 .init_request = nvme_loop_init_request,
261 .init_hctx = nvme_loop_init_hctx,
262 .timeout = nvme_loop_timeout,
263};
264
Eric Biggersf363b082017-03-30 13:39:16 -0700265static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200266 .queue_rq = nvme_loop_queue_rq,
267 .complete = nvme_loop_complete_rq,
Christoph Hellwig62b83b12017-06-13 09:15:21 +0200268 .init_request = nvme_loop_init_request,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200269 .init_hctx = nvme_loop_init_admin_hctx,
270 .timeout = nvme_loop_timeout,
271};
272
273static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
274{
Sagi Grimberg9d7fab02017-10-24 15:25:22 +0300275 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
Sagi Grimberge4c5d372017-02-27 18:44:45 +0200276 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200277 blk_cleanup_queue(ctrl->ctrl.admin_q);
278 blk_mq_free_tag_set(&ctrl->admin_tag_set);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200279}
280
281static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
282{
283 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
284
285 if (list_empty(&ctrl->list))
286 goto free_ctrl;
287
288 mutex_lock(&nvme_loop_ctrl_mutex);
289 list_del(&ctrl->list);
290 mutex_unlock(&nvme_loop_ctrl_mutex);
291
292 if (nctrl->tagset) {
293 blk_cleanup_queue(ctrl->ctrl.connect_q);
294 blk_mq_free_tag_set(&ctrl->tag_set);
295 }
296 kfree(ctrl->queues);
297 nvmf_free_options(nctrl->opts);
298free_ctrl:
299 kfree(ctrl);
300}
301
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200302static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
303{
304 int i;
305
Sagi Grimberg9d7fab02017-10-24 15:25:22 +0300306 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
307 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200308 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
Sagi Grimberg9d7fab02017-10-24 15:25:22 +0300309 }
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200310}
311
312static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
313{
314 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
315 unsigned int nr_io_queues;
316 int ret, i;
317
318 nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
319 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
320 if (ret || !nr_io_queues)
321 return ret;
322
323 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
324
325 for (i = 1; i <= nr_io_queues; i++) {
326 ctrl->queues[i].ctrl = ctrl;
327 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
328 if (ret)
329 goto out_destroy_queues;
330
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300331 ctrl->ctrl.queue_count++;
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200332 }
333
334 return 0;
335
336out_destroy_queues:
337 nvme_loop_destroy_io_queues(ctrl);
338 return ret;
339}
340
Sagi Grimberg297186d2017-03-13 15:43:44 +0200341static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
342{
343 int i, ret;
344
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300345 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
Sagi Grimberg297186d2017-03-13 15:43:44 +0200346 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
347 if (ret)
348 return ret;
Sagi Grimberg9d7fab02017-10-24 15:25:22 +0300349 set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
Sagi Grimberg297186d2017-03-13 15:43:44 +0200350 }
351
352 return 0;
353}
354
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200355static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
356{
357 int error;
358
359 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
360 ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
Keith Busch38dabe22017-11-07 15:13:10 -0700361 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200362 ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
363 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
364 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
365 SG_CHUNK_SIZE * sizeof(struct scatterlist);
366 ctrl->admin_tag_set.driver_data = ctrl;
367 ctrl->admin_tag_set.nr_hw_queues = 1;
368 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
Israel Rukshin86f36b92017-10-18 12:38:26 +0000369 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200370
371 ctrl->queues[0].ctrl = ctrl;
372 error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
373 if (error)
374 return error;
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300375 ctrl->ctrl.queue_count = 1;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200376
377 error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
378 if (error)
379 goto out_free_sq;
Sagi Grimberg34b6c232017-07-10 09:22:29 +0300380 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200381
382 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
383 if (IS_ERR(ctrl->ctrl.admin_q)) {
384 error = PTR_ERR(ctrl->ctrl.admin_q);
385 goto out_free_tagset;
386 }
387
388 error = nvmf_connect_admin_queue(&ctrl->ctrl);
389 if (error)
390 goto out_cleanup_queue;
391
Sagi Grimberg9d7fab02017-10-24 15:25:22 +0300392 set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
393
Sagi Grimberg20d0dfe2017-06-27 22:16:38 +0300394 error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200395 if (error) {
396 dev_err(ctrl->ctrl.device,
397 "prop_get NVME_REG_CAP failed\n");
398 goto out_cleanup_queue;
399 }
400
401 ctrl->ctrl.sqsize =
Sagi Grimberg20d0dfe2017-06-27 22:16:38 +0300402 min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200403
Sagi Grimberg20d0dfe2017-06-27 22:16:38 +0300404 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200405 if (error)
406 goto out_cleanup_queue;
407
408 ctrl->ctrl.max_hw_sectors =
409 (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
410
411 error = nvme_init_identify(&ctrl->ctrl);
412 if (error)
413 goto out_cleanup_queue;
414
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200415 return 0;
416
417out_cleanup_queue:
418 blk_cleanup_queue(ctrl->ctrl.admin_q);
419out_free_tagset:
420 blk_mq_free_tag_set(&ctrl->admin_tag_set);
421out_free_sq:
422 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
423 return error;
424}
425
426static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
427{
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300428 if (ctrl->ctrl.queue_count > 1) {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200429 nvme_stop_queues(&ctrl->ctrl);
430 blk_mq_tagset_busy_iter(&ctrl->tag_set,
431 nvme_cancel_request, &ctrl->ctrl);
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200432 nvme_loop_destroy_io_queues(ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200433 }
434
435 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
436 nvme_shutdown_ctrl(&ctrl->ctrl);
437
Sagi Grimbergc1c0fff2017-07-02 15:40:17 +0300438 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200439 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
440 nvme_cancel_request, &ctrl->ctrl);
Sagi Grimbergc1c0fff2017-07-02 15:40:17 +0300441 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200442 nvme_loop_destroy_admin_queue(ctrl);
443}
444
Christoph Hellwigc5017e82017-10-29 10:44:29 +0200445static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200446{
Christoph Hellwigc5017e82017-10-29 10:44:29 +0200447 nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200448}
449
450static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
451{
452 struct nvme_loop_ctrl *ctrl;
453
454 mutex_lock(&nvme_loop_ctrl_mutex);
455 list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
456 if (ctrl->ctrl.cntlid == nctrl->cntlid)
Christoph Hellwigc5017e82017-10-29 10:44:29 +0200457 nvme_delete_ctrl(&ctrl->ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200458 }
459 mutex_unlock(&nvme_loop_ctrl_mutex);
460}
461
462static void nvme_loop_reset_ctrl_work(struct work_struct *work)
463{
Christoph Hellwigd86c4d82017-06-15 15:41:08 +0200464 struct nvme_loop_ctrl *ctrl =
465 container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200466 bool changed;
Sagi Grimberg297186d2017-03-13 15:43:44 +0200467 int ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200468
Sagi Grimbergd09f2b42017-07-02 10:56:43 +0300469 nvme_stop_ctrl(&ctrl->ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200470 nvme_loop_shutdown_ctrl(ctrl);
471
Johannes Thumshirn8bfc3b42018-05-03 17:00:35 +0200472 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
473 /* state change failure should never happen */
474 WARN_ON_ONCE(1);
475 return;
476 }
477
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200478 ret = nvme_loop_configure_admin_queue(ctrl);
479 if (ret)
480 goto out_disable;
481
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200482 ret = nvme_loop_init_io_queues(ctrl);
483 if (ret)
484 goto out_destroy_admin;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200485
Sagi Grimberg297186d2017-03-13 15:43:44 +0200486 ret = nvme_loop_connect_io_queues(ctrl);
487 if (ret)
488 goto out_destroy_io;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200489
Sagi Grimberg4368c392017-06-29 11:13:43 +0300490 blk_mq_update_nr_hw_queues(&ctrl->tag_set,
491 ctrl->ctrl.queue_count - 1);
492
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200493 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
494 WARN_ON_ONCE(!changed);
495
Sagi Grimbergd09f2b42017-07-02 10:56:43 +0300496 nvme_start_ctrl(&ctrl->ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200497
498 return;
499
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200500out_destroy_io:
501 nvme_loop_destroy_io_queues(ctrl);
502out_destroy_admin:
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200503 nvme_loop_destroy_admin_queue(ctrl);
504out_disable:
505 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200506 nvme_uninit_ctrl(&ctrl->ctrl);
507 nvme_put_ctrl(&ctrl->ctrl);
508}
509
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200510static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
511 .name = "loop",
512 .module = THIS_MODULE,
Christoph Hellwigd3d5b872017-05-20 15:14:44 +0200513 .flags = NVME_F_FABRICS,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200514 .reg_read32 = nvmf_reg_read32,
515 .reg_read64 = nvmf_reg_read64,
516 .reg_write32 = nvmf_reg_write32,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200517 .free_ctrl = nvme_loop_free_ctrl,
518 .submit_async_event = nvme_loop_submit_async_event,
Christoph Hellwigc5017e82017-10-29 10:44:29 +0200519 .delete_ctrl = nvme_loop_delete_ctrl_host,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200520};
521
522static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
523{
Sagi Grimberg297186d2017-03-13 15:43:44 +0200524 int ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200525
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200526 ret = nvme_loop_init_io_queues(ctrl);
527 if (ret)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200528 return ret;
529
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200530 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
531 ctrl->tag_set.ops = &nvme_loop_mq_ops;
Jay Freyenseeeadb7cf2016-08-17 15:00:28 -0700532 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200533 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
534 ctrl->tag_set.numa_node = NUMA_NO_NODE;
535 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
536 ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
537 SG_CHUNK_SIZE * sizeof(struct scatterlist);
538 ctrl->tag_set.driver_data = ctrl;
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300539 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200540 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
541 ctrl->ctrl.tagset = &ctrl->tag_set;
542
543 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
544 if (ret)
545 goto out_destroy_queues;
546
547 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
548 if (IS_ERR(ctrl->ctrl.connect_q)) {
549 ret = PTR_ERR(ctrl->ctrl.connect_q);
550 goto out_free_tagset;
551 }
552
Sagi Grimberg297186d2017-03-13 15:43:44 +0200553 ret = nvme_loop_connect_io_queues(ctrl);
554 if (ret)
555 goto out_cleanup_connect_q;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200556
557 return 0;
558
559out_cleanup_connect_q:
560 blk_cleanup_queue(ctrl->ctrl.connect_q);
561out_free_tagset:
562 blk_mq_free_tag_set(&ctrl->tag_set);
563out_destroy_queues:
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200564 nvme_loop_destroy_io_queues(ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200565 return ret;
566}
567
568static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
569 struct nvmf_ctrl_options *opts)
570{
571 struct nvme_loop_ctrl *ctrl;
572 bool changed;
573 int ret;
574
575 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
576 if (!ctrl)
577 return ERR_PTR(-ENOMEM);
578 ctrl->ctrl.opts = opts;
579 INIT_LIST_HEAD(&ctrl->list);
580
Christoph Hellwigd86c4d82017-06-15 15:41:08 +0200581 INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200582
583 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
584 0 /* no quirks, we're perfect! */);
585 if (ret)
586 goto out_put_ctrl;
587
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200588 ret = -ENOMEM;
589
Jay Freyenseeeadb7cf2016-08-17 15:00:28 -0700590 ctrl->ctrl.sqsize = opts->queue_size - 1;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200591 ctrl->ctrl.kato = opts->kato;
592
593 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
594 GFP_KERNEL);
595 if (!ctrl->queues)
596 goto out_uninit_ctrl;
597
598 ret = nvme_loop_configure_admin_queue(ctrl);
599 if (ret)
600 goto out_free_queues;
601
602 if (opts->queue_size > ctrl->ctrl.maxcmd) {
603 /* warn if maxcmd is lower than queue_size */
604 dev_warn(ctrl->ctrl.device,
605 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
606 opts->queue_size, ctrl->ctrl.maxcmd);
607 opts->queue_size = ctrl->ctrl.maxcmd;
608 }
609
610 if (opts->nr_io_queues) {
611 ret = nvme_loop_create_io_queues(ctrl);
612 if (ret)
613 goto out_remove_admin_queue;
614 }
615
616 nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
617
618 dev_info(ctrl->ctrl.device,
619 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
620
Christoph Hellwigd22524a2017-10-18 13:25:42 +0200621 nvme_get_ctrl(&ctrl->ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200622
623 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
624 WARN_ON_ONCE(!changed);
625
626 mutex_lock(&nvme_loop_ctrl_mutex);
627 list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
628 mutex_unlock(&nvme_loop_ctrl_mutex);
629
Sagi Grimbergd09f2b42017-07-02 10:56:43 +0300630 nvme_start_ctrl(&ctrl->ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200631
632 return &ctrl->ctrl;
633
634out_remove_admin_queue:
635 nvme_loop_destroy_admin_queue(ctrl);
636out_free_queues:
637 kfree(ctrl->queues);
638out_uninit_ctrl:
639 nvme_uninit_ctrl(&ctrl->ctrl);
640out_put_ctrl:
641 nvme_put_ctrl(&ctrl->ctrl);
642 if (ret > 0)
643 ret = -EIO;
644 return ERR_PTR(ret);
645}
646
647static int nvme_loop_add_port(struct nvmet_port *port)
648{
649 /*
650 * XXX: disalow adding more than one port so
651 * there is no connection rejections when a
652 * a subsystem is assigned to a port for which
653 * loop doesn't have a pointer.
654 * This scenario would be possible if we allowed
655 * more than one port to be added and a subsystem
656 * was assigned to a port other than nvmet_loop_port.
657 */
658
659 if (nvmet_loop_port)
660 return -EPERM;
661
662 nvmet_loop_port = port;
663 return 0;
664}
665
666static void nvme_loop_remove_port(struct nvmet_port *port)
667{
668 if (port == nvmet_loop_port)
669 nvmet_loop_port = NULL;
670}
671
Christoph Hellwige929f062018-03-20 20:41:35 +0100672static const struct nvmet_fabrics_ops nvme_loop_ops = {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200673 .owner = THIS_MODULE,
674 .type = NVMF_TRTYPE_LOOP,
675 .add_port = nvme_loop_add_port,
676 .remove_port = nvme_loop_remove_port,
677 .queue_response = nvme_loop_queue_response,
678 .delete_ctrl = nvme_loop_delete_ctrl,
679};
680
681static struct nvmf_transport_ops nvme_loop_transport = {
682 .name = "loop",
Roy Shterman0de5cd32017-12-25 14:18:30 +0200683 .module = THIS_MODULE,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200684 .create_ctrl = nvme_loop_create_ctrl,
685};
686
687static int __init nvme_loop_init_module(void)
688{
689 int ret;
690
691 ret = nvmet_register_transport(&nvme_loop_ops);
692 if (ret)
693 return ret;
Sagi Grimbergd19eef02017-03-19 06:26:28 +0200694
695 ret = nvmf_register_transport(&nvme_loop_transport);
696 if (ret)
697 nvmet_unregister_transport(&nvme_loop_ops);
698
699 return ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200700}
701
702static void __exit nvme_loop_cleanup_module(void)
703{
704 struct nvme_loop_ctrl *ctrl, *next;
705
706 nvmf_unregister_transport(&nvme_loop_transport);
707 nvmet_unregister_transport(&nvme_loop_ops);
708
709 mutex_lock(&nvme_loop_ctrl_mutex);
710 list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
Christoph Hellwigc5017e82017-10-29 10:44:29 +0200711 nvme_delete_ctrl(&ctrl->ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200712 mutex_unlock(&nvme_loop_ctrl_mutex);
713
Roy Shtermanb227c592018-01-14 12:39:02 +0200714 flush_workqueue(nvme_delete_wq);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200715}
716
717module_init(nvme_loop_init_module);
718module_exit(nvme_loop_cleanup_module);
719
720MODULE_LICENSE("GPL v2");
721MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */