blob: 568ed86256968f83fd300f357b300983a88eac6b [file] [log] [blame]
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +02001/*
2 * NVMe over Fabrics loopback device.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/scatterlist.h>
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020016#include <linux/blk-mq.h>
17#include <linux/nvme.h>
18#include <linux/module.h>
19#include <linux/parser.h>
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020020#include "nvmet.h"
21#include "../host/nvme.h"
22#include "../host/fabrics.h"
23
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020024#define NVME_LOOP_MAX_SEGMENTS 256
25
26/*
27 * We handle AEN commands ourselves and don't even let the
28 * block layer know about them.
29 */
30#define NVME_LOOP_NR_AEN_COMMANDS 1
31#define NVME_LOOP_AQ_BLKMQ_DEPTH \
Sagi Grimberg7aa1f422017-06-18 16:15:59 +030032 (NVME_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020033
34struct nvme_loop_iod {
Christoph Hellwigd49187e2016-11-10 07:32:33 -080035 struct nvme_request nvme_req;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020036 struct nvme_command cmd;
37 struct nvme_completion rsp;
38 struct nvmet_req req;
39 struct nvme_loop_queue *queue;
40 struct work_struct work;
41 struct sg_table sg_table;
42 struct scatterlist first_sgl[];
43};
44
45struct nvme_loop_ctrl {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020046 struct nvme_loop_queue *queues;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020047
48 struct blk_mq_tag_set admin_tag_set;
49
50 struct list_head list;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020051 struct blk_mq_tag_set tag_set;
52 struct nvme_loop_iod async_event_iod;
53 struct nvme_ctrl ctrl;
54
55 struct nvmet_ctrl *target_ctrl;
56 struct work_struct delete_work;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020057};
58
59static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
60{
61 return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
62}
63
64struct nvme_loop_queue {
65 struct nvmet_cq nvme_cq;
66 struct nvmet_sq nvme_sq;
67 struct nvme_loop_ctrl *ctrl;
68};
69
70static struct nvmet_port *nvmet_loop_port;
71
72static LIST_HEAD(nvme_loop_ctrl_list);
73static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
74
75static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
76static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
77
78static struct nvmet_fabrics_ops nvme_loop_ops;
79
80static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
81{
82 return queue - queue->ctrl->queues;
83}
84
85static void nvme_loop_complete_rq(struct request *req)
86{
87 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020088
89 nvme_cleanup_cmd(req);
90 sg_free_table_chained(&iod->sg_table, true);
Christoph Hellwig77f02a72017-03-30 13:41:32 +020091 nvme_complete_rq(req);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020092}
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020093
Sagi Grimberg3b068372017-02-27 18:28:25 +020094static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
95{
96 u32 queue_idx = nvme_loop_queue_idx(queue);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020097
Sagi Grimberg3b068372017-02-27 18:28:25 +020098 if (queue_idx == 0)
99 return queue->ctrl->admin_tag_set.tags[queue_idx];
100 return queue->ctrl->tag_set.tags[queue_idx - 1];
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200101}
102
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800103static void nvme_loop_queue_response(struct nvmet_req *req)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200104{
Sagi Grimberg3b068372017-02-27 18:28:25 +0200105 struct nvme_loop_queue *queue =
106 container_of(req->sq, struct nvme_loop_queue, nvme_sq);
107 struct nvme_completion *cqe = req->rsp;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200108
109 /*
110 * AEN requests are special as they don't time out and can
111 * survive any kind of queue freeze and often don't respond to
112 * aborts. We don't even bother to allocate a struct request
113 * for them but rather special case them here.
114 */
Sagi Grimberg3b068372017-02-27 18:28:25 +0200115 if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200116 cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
Sagi Grimberg3b068372017-02-27 18:28:25 +0200117 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
Christoph Hellwig7bf58532016-11-10 07:32:34 -0800118 &cqe->result);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200119 } else {
Sagi Grimberg3b068372017-02-27 18:28:25 +0200120 struct request *rq;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200121
Sagi Grimberg3b068372017-02-27 18:28:25 +0200122 rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
123 if (!rq) {
124 dev_err(queue->ctrl->ctrl.device,
125 "tag 0x%x on queue %d not found\n",
126 cqe->command_id, nvme_loop_queue_idx(queue));
127 return;
128 }
129
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +0200130 nvme_end_request(rq, cqe->status, cqe->result);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200131 }
132}
133
134static void nvme_loop_execute_work(struct work_struct *work)
135{
136 struct nvme_loop_iod *iod =
137 container_of(work, struct nvme_loop_iod, work);
138
139 iod->req.execute(&iod->req);
140}
141
142static enum blk_eh_timer_return
143nvme_loop_timeout(struct request *rq, bool reserved)
144{
145 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
146
147 /* queue error recovery */
Christoph Hellwigd86c4d82017-06-15 15:41:08 +0200148 nvme_reset_ctrl(&iod->queue->ctrl->ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200149
150 /* fail with DNR on admin cmd timeout */
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +0200151 nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200152
153 return BLK_EH_HANDLED;
154}
155
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200156static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200157 const struct blk_mq_queue_data *bd)
158{
159 struct nvme_ns *ns = hctx->queue->queuedata;
160 struct nvme_loop_queue *queue = hctx->driver_data;
161 struct request *req = bd->rq;
162 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200163 blk_status_t ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200164
165 ret = nvme_setup_cmd(ns, req, &iod->cmd);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200166 if (ret)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200167 return ret;
168
169 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
170 iod->req.port = nvmet_loop_port;
171 if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
172 &queue->nvme_sq, &nvme_loop_ops)) {
173 nvme_cleanup_cmd(req);
174 blk_mq_start_request(req);
175 nvme_loop_queue_response(&iod->req);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200176 return BLK_STS_OK;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200177 }
178
179 if (blk_rq_bytes(req)) {
180 iod->sg_table.sgl = iod->first_sgl;
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200181 if (sg_alloc_table_chained(&iod->sg_table,
Christoph Hellwigf9d03f92016-12-08 15:20:32 -0700182 blk_rq_nr_phys_segments(req),
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200183 iod->sg_table.sgl))
184 return BLK_STS_RESOURCE;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200185
186 iod->req.sg = iod->sg_table.sgl;
187 iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200188 }
189
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200190 blk_mq_start_request(req);
191
192 schedule_work(&iod->work);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200193 return BLK_STS_OK;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200194}
195
196static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
197{
198 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
199 struct nvme_loop_queue *queue = &ctrl->queues[0];
200 struct nvme_loop_iod *iod = &ctrl->async_event_iod;
201
202 memset(&iod->cmd, 0, sizeof(iod->cmd));
203 iod->cmd.common.opcode = nvme_admin_async_event;
204 iod->cmd.common.command_id = NVME_LOOP_AQ_BLKMQ_DEPTH;
205 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
206
207 if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
208 &nvme_loop_ops)) {
209 dev_err(ctrl->ctrl.device, "failed async event work\n");
210 return;
211 }
212
213 schedule_work(&iod->work);
214}
215
216static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
217 struct nvme_loop_iod *iod, unsigned int queue_idx)
218{
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200219 iod->req.cmd = &iod->cmd;
220 iod->req.rsp = &iod->rsp;
221 iod->queue = &ctrl->queues[queue_idx];
222 INIT_WORK(&iod->work, nvme_loop_execute_work);
223 return 0;
224}
225
Christoph Hellwigd6296d392017-05-01 10:19:08 -0600226static int nvme_loop_init_request(struct blk_mq_tag_set *set,
227 struct request *req, unsigned int hctx_idx,
228 unsigned int numa_node)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200229{
Christoph Hellwig62b83b12017-06-13 09:15:21 +0200230 struct nvme_loop_ctrl *ctrl = set->driver_data;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200231
Christoph Hellwig62b83b12017-06-13 09:15:21 +0200232 return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
233 (set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200234}
235
236static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
237 unsigned int hctx_idx)
238{
239 struct nvme_loop_ctrl *ctrl = data;
240 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
241
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300242 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200243
244 hctx->driver_data = queue;
245 return 0;
246}
247
248static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
249 unsigned int hctx_idx)
250{
251 struct nvme_loop_ctrl *ctrl = data;
252 struct nvme_loop_queue *queue = &ctrl->queues[0];
253
254 BUG_ON(hctx_idx != 0);
255
256 hctx->driver_data = queue;
257 return 0;
258}
259
Eric Biggersf363b082017-03-30 13:39:16 -0700260static const struct blk_mq_ops nvme_loop_mq_ops = {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200261 .queue_rq = nvme_loop_queue_rq,
262 .complete = nvme_loop_complete_rq,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200263 .init_request = nvme_loop_init_request,
264 .init_hctx = nvme_loop_init_hctx,
265 .timeout = nvme_loop_timeout,
266};
267
Eric Biggersf363b082017-03-30 13:39:16 -0700268static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200269 .queue_rq = nvme_loop_queue_rq,
270 .complete = nvme_loop_complete_rq,
Christoph Hellwig62b83b12017-06-13 09:15:21 +0200271 .init_request = nvme_loop_init_request,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200272 .init_hctx = nvme_loop_init_admin_hctx,
273 .timeout = nvme_loop_timeout,
274};
275
276static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
277{
Sagi Grimberge4c5d372017-02-27 18:44:45 +0200278 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200279 blk_cleanup_queue(ctrl->ctrl.admin_q);
280 blk_mq_free_tag_set(&ctrl->admin_tag_set);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200281}
282
283static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
284{
285 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
286
287 if (list_empty(&ctrl->list))
288 goto free_ctrl;
289
290 mutex_lock(&nvme_loop_ctrl_mutex);
291 list_del(&ctrl->list);
292 mutex_unlock(&nvme_loop_ctrl_mutex);
293
294 if (nctrl->tagset) {
295 blk_cleanup_queue(ctrl->ctrl.connect_q);
296 blk_mq_free_tag_set(&ctrl->tag_set);
297 }
298 kfree(ctrl->queues);
299 nvmf_free_options(nctrl->opts);
300free_ctrl:
301 kfree(ctrl);
302}
303
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200304static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
305{
306 int i;
307
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300308 for (i = 1; i < ctrl->ctrl.queue_count; i++)
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200309 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
310}
311
312static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
313{
314 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
315 unsigned int nr_io_queues;
316 int ret, i;
317
318 nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
319 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
320 if (ret || !nr_io_queues)
321 return ret;
322
323 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
324
325 for (i = 1; i <= nr_io_queues; i++) {
326 ctrl->queues[i].ctrl = ctrl;
327 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
328 if (ret)
329 goto out_destroy_queues;
330
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300331 ctrl->ctrl.queue_count++;
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200332 }
333
334 return 0;
335
336out_destroy_queues:
337 nvme_loop_destroy_io_queues(ctrl);
338 return ret;
339}
340
Sagi Grimberg297186d2017-03-13 15:43:44 +0200341static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
342{
343 int i, ret;
344
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300345 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
Sagi Grimberg297186d2017-03-13 15:43:44 +0200346 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
347 if (ret)
348 return ret;
349 }
350
351 return 0;
352}
353
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200354static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
355{
356 int error;
357
358 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
359 ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
360 ctrl->admin_tag_set.queue_depth = NVME_LOOP_AQ_BLKMQ_DEPTH;
361 ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
362 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
363 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
364 SG_CHUNK_SIZE * sizeof(struct scatterlist);
365 ctrl->admin_tag_set.driver_data = ctrl;
366 ctrl->admin_tag_set.nr_hw_queues = 1;
367 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
368
369 ctrl->queues[0].ctrl = ctrl;
370 error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
371 if (error)
372 return error;
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300373 ctrl->ctrl.queue_count = 1;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200374
375 error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
376 if (error)
377 goto out_free_sq;
378
379 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
380 if (IS_ERR(ctrl->ctrl.admin_q)) {
381 error = PTR_ERR(ctrl->ctrl.admin_q);
382 goto out_free_tagset;
383 }
384
385 error = nvmf_connect_admin_queue(&ctrl->ctrl);
386 if (error)
387 goto out_cleanup_queue;
388
Sagi Grimberg20d0dfe2017-06-27 22:16:38 +0300389 error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200390 if (error) {
391 dev_err(ctrl->ctrl.device,
392 "prop_get NVME_REG_CAP failed\n");
393 goto out_cleanup_queue;
394 }
395
396 ctrl->ctrl.sqsize =
Sagi Grimberg20d0dfe2017-06-27 22:16:38 +0300397 min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200398
Sagi Grimberg20d0dfe2017-06-27 22:16:38 +0300399 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200400 if (error)
401 goto out_cleanup_queue;
402
403 ctrl->ctrl.max_hw_sectors =
404 (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
405
406 error = nvme_init_identify(&ctrl->ctrl);
407 if (error)
408 goto out_cleanup_queue;
409
410 nvme_start_keep_alive(&ctrl->ctrl);
411
412 return 0;
413
414out_cleanup_queue:
415 blk_cleanup_queue(ctrl->ctrl.admin_q);
416out_free_tagset:
417 blk_mq_free_tag_set(&ctrl->admin_tag_set);
418out_free_sq:
419 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
420 return error;
421}
422
423static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
424{
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200425 nvme_stop_keep_alive(&ctrl->ctrl);
426
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300427 if (ctrl->ctrl.queue_count > 1) {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200428 nvme_stop_queues(&ctrl->ctrl);
429 blk_mq_tagset_busy_iter(&ctrl->tag_set,
430 nvme_cancel_request, &ctrl->ctrl);
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200431 nvme_loop_destroy_io_queues(ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200432 }
433
434 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
435 nvme_shutdown_ctrl(&ctrl->ctrl);
436
437 blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
438 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
439 nvme_cancel_request, &ctrl->ctrl);
440 nvme_loop_destroy_admin_queue(ctrl);
441}
442
443static void nvme_loop_del_ctrl_work(struct work_struct *work)
444{
445 struct nvme_loop_ctrl *ctrl = container_of(work,
446 struct nvme_loop_ctrl, delete_work);
447
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200448 nvme_uninit_ctrl(&ctrl->ctrl);
Sagi Grimberga159c642016-07-24 09:32:08 +0300449 nvme_loop_shutdown_ctrl(ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200450 nvme_put_ctrl(&ctrl->ctrl);
451}
452
453static int __nvme_loop_del_ctrl(struct nvme_loop_ctrl *ctrl)
454{
455 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
456 return -EBUSY;
457
Sagi Grimberg9a6327d2017-06-07 20:31:55 +0200458 if (!queue_work(nvme_wq, &ctrl->delete_work))
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200459 return -EBUSY;
460
461 return 0;
462}
463
464static int nvme_loop_del_ctrl(struct nvme_ctrl *nctrl)
465{
466 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
467 int ret;
468
469 ret = __nvme_loop_del_ctrl(ctrl);
470 if (ret)
471 return ret;
472
473 flush_work(&ctrl->delete_work);
474
475 return 0;
476}
477
478static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
479{
480 struct nvme_loop_ctrl *ctrl;
481
482 mutex_lock(&nvme_loop_ctrl_mutex);
483 list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
484 if (ctrl->ctrl.cntlid == nctrl->cntlid)
485 __nvme_loop_del_ctrl(ctrl);
486 }
487 mutex_unlock(&nvme_loop_ctrl_mutex);
488}
489
490static void nvme_loop_reset_ctrl_work(struct work_struct *work)
491{
Christoph Hellwigd86c4d82017-06-15 15:41:08 +0200492 struct nvme_loop_ctrl *ctrl =
493 container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200494 bool changed;
Sagi Grimberg297186d2017-03-13 15:43:44 +0200495 int ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200496
497 nvme_loop_shutdown_ctrl(ctrl);
498
499 ret = nvme_loop_configure_admin_queue(ctrl);
500 if (ret)
501 goto out_disable;
502
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200503 ret = nvme_loop_init_io_queues(ctrl);
504 if (ret)
505 goto out_destroy_admin;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200506
Sagi Grimberg297186d2017-03-13 15:43:44 +0200507 ret = nvme_loop_connect_io_queues(ctrl);
508 if (ret)
509 goto out_destroy_io;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200510
511 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
512 WARN_ON_ONCE(!changed);
513
514 nvme_queue_scan(&ctrl->ctrl);
515 nvme_queue_async_events(&ctrl->ctrl);
516
517 nvme_start_queues(&ctrl->ctrl);
518
519 return;
520
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200521out_destroy_io:
522 nvme_loop_destroy_io_queues(ctrl);
523out_destroy_admin:
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200524 nvme_loop_destroy_admin_queue(ctrl);
525out_disable:
526 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200527 nvme_uninit_ctrl(&ctrl->ctrl);
528 nvme_put_ctrl(&ctrl->ctrl);
529}
530
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200531static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
532 .name = "loop",
533 .module = THIS_MODULE,
Christoph Hellwigd3d5b872017-05-20 15:14:44 +0200534 .flags = NVME_F_FABRICS,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200535 .reg_read32 = nvmf_reg_read32,
536 .reg_read64 = nvmf_reg_read64,
537 .reg_write32 = nvmf_reg_write32,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200538 .free_ctrl = nvme_loop_free_ctrl,
539 .submit_async_event = nvme_loop_submit_async_event,
540 .delete_ctrl = nvme_loop_del_ctrl,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200541};
542
543static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
544{
Sagi Grimberg297186d2017-03-13 15:43:44 +0200545 int ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200546
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200547 ret = nvme_loop_init_io_queues(ctrl);
548 if (ret)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200549 return ret;
550
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200551 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
552 ctrl->tag_set.ops = &nvme_loop_mq_ops;
Jay Freyenseeeadb7cf2016-08-17 15:00:28 -0700553 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200554 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
555 ctrl->tag_set.numa_node = NUMA_NO_NODE;
556 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
557 ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
558 SG_CHUNK_SIZE * sizeof(struct scatterlist);
559 ctrl->tag_set.driver_data = ctrl;
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300560 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200561 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
562 ctrl->ctrl.tagset = &ctrl->tag_set;
563
564 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
565 if (ret)
566 goto out_destroy_queues;
567
568 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
569 if (IS_ERR(ctrl->ctrl.connect_q)) {
570 ret = PTR_ERR(ctrl->ctrl.connect_q);
571 goto out_free_tagset;
572 }
573
Sagi Grimberg297186d2017-03-13 15:43:44 +0200574 ret = nvme_loop_connect_io_queues(ctrl);
575 if (ret)
576 goto out_cleanup_connect_q;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200577
578 return 0;
579
580out_cleanup_connect_q:
581 blk_cleanup_queue(ctrl->ctrl.connect_q);
582out_free_tagset:
583 blk_mq_free_tag_set(&ctrl->tag_set);
584out_destroy_queues:
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200585 nvme_loop_destroy_io_queues(ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200586 return ret;
587}
588
589static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
590 struct nvmf_ctrl_options *opts)
591{
592 struct nvme_loop_ctrl *ctrl;
593 bool changed;
594 int ret;
595
596 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
597 if (!ctrl)
598 return ERR_PTR(-ENOMEM);
599 ctrl->ctrl.opts = opts;
600 INIT_LIST_HEAD(&ctrl->list);
601
602 INIT_WORK(&ctrl->delete_work, nvme_loop_del_ctrl_work);
Christoph Hellwigd86c4d82017-06-15 15:41:08 +0200603 INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200604
605 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
606 0 /* no quirks, we're perfect! */);
607 if (ret)
608 goto out_put_ctrl;
609
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200610 ret = -ENOMEM;
611
Jay Freyenseeeadb7cf2016-08-17 15:00:28 -0700612 ctrl->ctrl.sqsize = opts->queue_size - 1;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200613 ctrl->ctrl.kato = opts->kato;
614
615 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
616 GFP_KERNEL);
617 if (!ctrl->queues)
618 goto out_uninit_ctrl;
619
620 ret = nvme_loop_configure_admin_queue(ctrl);
621 if (ret)
622 goto out_free_queues;
623
624 if (opts->queue_size > ctrl->ctrl.maxcmd) {
625 /* warn if maxcmd is lower than queue_size */
626 dev_warn(ctrl->ctrl.device,
627 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
628 opts->queue_size, ctrl->ctrl.maxcmd);
629 opts->queue_size = ctrl->ctrl.maxcmd;
630 }
631
632 if (opts->nr_io_queues) {
633 ret = nvme_loop_create_io_queues(ctrl);
634 if (ret)
635 goto out_remove_admin_queue;
636 }
637
638 nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
639
640 dev_info(ctrl->ctrl.device,
641 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
642
643 kref_get(&ctrl->ctrl.kref);
644
645 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
646 WARN_ON_ONCE(!changed);
647
648 mutex_lock(&nvme_loop_ctrl_mutex);
649 list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
650 mutex_unlock(&nvme_loop_ctrl_mutex);
651
652 if (opts->nr_io_queues) {
653 nvme_queue_scan(&ctrl->ctrl);
654 nvme_queue_async_events(&ctrl->ctrl);
655 }
656
657 return &ctrl->ctrl;
658
659out_remove_admin_queue:
660 nvme_loop_destroy_admin_queue(ctrl);
661out_free_queues:
662 kfree(ctrl->queues);
663out_uninit_ctrl:
664 nvme_uninit_ctrl(&ctrl->ctrl);
665out_put_ctrl:
666 nvme_put_ctrl(&ctrl->ctrl);
667 if (ret > 0)
668 ret = -EIO;
669 return ERR_PTR(ret);
670}
671
672static int nvme_loop_add_port(struct nvmet_port *port)
673{
674 /*
675 * XXX: disalow adding more than one port so
676 * there is no connection rejections when a
677 * a subsystem is assigned to a port for which
678 * loop doesn't have a pointer.
679 * This scenario would be possible if we allowed
680 * more than one port to be added and a subsystem
681 * was assigned to a port other than nvmet_loop_port.
682 */
683
684 if (nvmet_loop_port)
685 return -EPERM;
686
687 nvmet_loop_port = port;
688 return 0;
689}
690
691static void nvme_loop_remove_port(struct nvmet_port *port)
692{
693 if (port == nvmet_loop_port)
694 nvmet_loop_port = NULL;
695}
696
697static struct nvmet_fabrics_ops nvme_loop_ops = {
698 .owner = THIS_MODULE,
699 .type = NVMF_TRTYPE_LOOP,
700 .add_port = nvme_loop_add_port,
701 .remove_port = nvme_loop_remove_port,
702 .queue_response = nvme_loop_queue_response,
703 .delete_ctrl = nvme_loop_delete_ctrl,
704};
705
706static struct nvmf_transport_ops nvme_loop_transport = {
707 .name = "loop",
708 .create_ctrl = nvme_loop_create_ctrl,
709};
710
711static int __init nvme_loop_init_module(void)
712{
713 int ret;
714
715 ret = nvmet_register_transport(&nvme_loop_ops);
716 if (ret)
717 return ret;
Sagi Grimbergd19eef02017-03-19 06:26:28 +0200718
719 ret = nvmf_register_transport(&nvme_loop_transport);
720 if (ret)
721 nvmet_unregister_transport(&nvme_loop_ops);
722
723 return ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200724}
725
726static void __exit nvme_loop_cleanup_module(void)
727{
728 struct nvme_loop_ctrl *ctrl, *next;
729
730 nvmf_unregister_transport(&nvme_loop_transport);
731 nvmet_unregister_transport(&nvme_loop_ops);
732
733 mutex_lock(&nvme_loop_ctrl_mutex);
734 list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
735 __nvme_loop_del_ctrl(ctrl);
736 mutex_unlock(&nvme_loop_ctrl_mutex);
737
Sagi Grimberg9a6327d2017-06-07 20:31:55 +0200738 flush_workqueue(nvme_wq);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200739}
740
741module_init(nvme_loop_init_module);
742module_exit(nvme_loop_cleanup_module);
743
744MODULE_LICENSE("GPL v2");
745MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */