blob: 86c09e2a149002bd32d7dca5de6b3dc821e573f9 [file] [log] [blame]
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +02001/*
2 * NVMe over Fabrics loopback device.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/scatterlist.h>
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020016#include <linux/blk-mq.h>
17#include <linux/nvme.h>
18#include <linux/module.h>
19#include <linux/parser.h>
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020020#include "nvmet.h"
21#include "../host/nvme.h"
22#include "../host/fabrics.h"
23
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020024#define NVME_LOOP_MAX_SEGMENTS 256
25
26/*
27 * We handle AEN commands ourselves and don't even let the
28 * block layer know about them.
29 */
30#define NVME_LOOP_NR_AEN_COMMANDS 1
31#define NVME_LOOP_AQ_BLKMQ_DEPTH \
Sagi Grimberg7aa1f422017-06-18 16:15:59 +030032 (NVME_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020033
34struct nvme_loop_iod {
Christoph Hellwigd49187e2016-11-10 07:32:33 -080035 struct nvme_request nvme_req;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020036 struct nvme_command cmd;
37 struct nvme_completion rsp;
38 struct nvmet_req req;
39 struct nvme_loop_queue *queue;
40 struct work_struct work;
41 struct sg_table sg_table;
42 struct scatterlist first_sgl[];
43};
44
45struct nvme_loop_ctrl {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020046 struct nvme_loop_queue *queues;
47 u32 queue_count;
48
49 struct blk_mq_tag_set admin_tag_set;
50
51 struct list_head list;
52 u64 cap;
53 struct blk_mq_tag_set tag_set;
54 struct nvme_loop_iod async_event_iod;
55 struct nvme_ctrl ctrl;
56
57 struct nvmet_ctrl *target_ctrl;
58 struct work_struct delete_work;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020059};
60
61static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
62{
63 return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
64}
65
66struct nvme_loop_queue {
67 struct nvmet_cq nvme_cq;
68 struct nvmet_sq nvme_sq;
69 struct nvme_loop_ctrl *ctrl;
70};
71
72static struct nvmet_port *nvmet_loop_port;
73
74static LIST_HEAD(nvme_loop_ctrl_list);
75static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
76
77static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
78static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
79
80static struct nvmet_fabrics_ops nvme_loop_ops;
81
82static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
83{
84 return queue - queue->ctrl->queues;
85}
86
87static void nvme_loop_complete_rq(struct request *req)
88{
89 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020090
91 nvme_cleanup_cmd(req);
92 sg_free_table_chained(&iod->sg_table, true);
Christoph Hellwig77f02a72017-03-30 13:41:32 +020093 nvme_complete_rq(req);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020094}
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020095
Sagi Grimberg3b068372017-02-27 18:28:25 +020096static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
97{
98 u32 queue_idx = nvme_loop_queue_idx(queue);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020099
Sagi Grimberg3b068372017-02-27 18:28:25 +0200100 if (queue_idx == 0)
101 return queue->ctrl->admin_tag_set.tags[queue_idx];
102 return queue->ctrl->tag_set.tags[queue_idx - 1];
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200103}
104
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800105static void nvme_loop_queue_response(struct nvmet_req *req)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200106{
Sagi Grimberg3b068372017-02-27 18:28:25 +0200107 struct nvme_loop_queue *queue =
108 container_of(req->sq, struct nvme_loop_queue, nvme_sq);
109 struct nvme_completion *cqe = req->rsp;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200110
111 /*
112 * AEN requests are special as they don't time out and can
113 * survive any kind of queue freeze and often don't respond to
114 * aborts. We don't even bother to allocate a struct request
115 * for them but rather special case them here.
116 */
Sagi Grimberg3b068372017-02-27 18:28:25 +0200117 if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200118 cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
Sagi Grimberg3b068372017-02-27 18:28:25 +0200119 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
Christoph Hellwig7bf58532016-11-10 07:32:34 -0800120 &cqe->result);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200121 } else {
Sagi Grimberg3b068372017-02-27 18:28:25 +0200122 struct request *rq;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200123
Sagi Grimberg3b068372017-02-27 18:28:25 +0200124 rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
125 if (!rq) {
126 dev_err(queue->ctrl->ctrl.device,
127 "tag 0x%x on queue %d not found\n",
128 cqe->command_id, nvme_loop_queue_idx(queue));
129 return;
130 }
131
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +0200132 nvme_end_request(rq, cqe->status, cqe->result);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200133 }
134}
135
136static void nvme_loop_execute_work(struct work_struct *work)
137{
138 struct nvme_loop_iod *iod =
139 container_of(work, struct nvme_loop_iod, work);
140
141 iod->req.execute(&iod->req);
142}
143
144static enum blk_eh_timer_return
145nvme_loop_timeout(struct request *rq, bool reserved)
146{
147 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
148
149 /* queue error recovery */
Christoph Hellwigd86c4d82017-06-15 15:41:08 +0200150 nvme_reset_ctrl(&iod->queue->ctrl->ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200151
152 /* fail with DNR on admin cmd timeout */
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +0200153 nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200154
155 return BLK_EH_HANDLED;
156}
157
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200158static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200159 const struct blk_mq_queue_data *bd)
160{
161 struct nvme_ns *ns = hctx->queue->queuedata;
162 struct nvme_loop_queue *queue = hctx->driver_data;
163 struct request *req = bd->rq;
164 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200165 blk_status_t ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200166
167 ret = nvme_setup_cmd(ns, req, &iod->cmd);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200168 if (ret)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200169 return ret;
170
171 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
172 iod->req.port = nvmet_loop_port;
173 if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
174 &queue->nvme_sq, &nvme_loop_ops)) {
175 nvme_cleanup_cmd(req);
176 blk_mq_start_request(req);
177 nvme_loop_queue_response(&iod->req);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200178 return BLK_STS_OK;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200179 }
180
181 if (blk_rq_bytes(req)) {
182 iod->sg_table.sgl = iod->first_sgl;
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200183 if (sg_alloc_table_chained(&iod->sg_table,
Christoph Hellwigf9d03f92016-12-08 15:20:32 -0700184 blk_rq_nr_phys_segments(req),
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200185 iod->sg_table.sgl))
186 return BLK_STS_RESOURCE;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200187
188 iod->req.sg = iod->sg_table.sgl;
189 iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200190 }
191
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200192 blk_mq_start_request(req);
193
194 schedule_work(&iod->work);
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200195 return BLK_STS_OK;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200196}
197
198static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
199{
200 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
201 struct nvme_loop_queue *queue = &ctrl->queues[0];
202 struct nvme_loop_iod *iod = &ctrl->async_event_iod;
203
204 memset(&iod->cmd, 0, sizeof(iod->cmd));
205 iod->cmd.common.opcode = nvme_admin_async_event;
206 iod->cmd.common.command_id = NVME_LOOP_AQ_BLKMQ_DEPTH;
207 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
208
209 if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
210 &nvme_loop_ops)) {
211 dev_err(ctrl->ctrl.device, "failed async event work\n");
212 return;
213 }
214
215 schedule_work(&iod->work);
216}
217
218static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
219 struct nvme_loop_iod *iod, unsigned int queue_idx)
220{
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200221 iod->req.cmd = &iod->cmd;
222 iod->req.rsp = &iod->rsp;
223 iod->queue = &ctrl->queues[queue_idx];
224 INIT_WORK(&iod->work, nvme_loop_execute_work);
225 return 0;
226}
227
Christoph Hellwigd6296d392017-05-01 10:19:08 -0600228static int nvme_loop_init_request(struct blk_mq_tag_set *set,
229 struct request *req, unsigned int hctx_idx,
230 unsigned int numa_node)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200231{
Christoph Hellwig62b83b12017-06-13 09:15:21 +0200232 struct nvme_loop_ctrl *ctrl = set->driver_data;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200233
Christoph Hellwig62b83b12017-06-13 09:15:21 +0200234 return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
235 (set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200236}
237
238static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
239 unsigned int hctx_idx)
240{
241 struct nvme_loop_ctrl *ctrl = data;
242 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
243
244 BUG_ON(hctx_idx >= ctrl->queue_count);
245
246 hctx->driver_data = queue;
247 return 0;
248}
249
250static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
251 unsigned int hctx_idx)
252{
253 struct nvme_loop_ctrl *ctrl = data;
254 struct nvme_loop_queue *queue = &ctrl->queues[0];
255
256 BUG_ON(hctx_idx != 0);
257
258 hctx->driver_data = queue;
259 return 0;
260}
261
Eric Biggersf363b082017-03-30 13:39:16 -0700262static const struct blk_mq_ops nvme_loop_mq_ops = {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200263 .queue_rq = nvme_loop_queue_rq,
264 .complete = nvme_loop_complete_rq,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200265 .init_request = nvme_loop_init_request,
266 .init_hctx = nvme_loop_init_hctx,
267 .timeout = nvme_loop_timeout,
268};
269
Eric Biggersf363b082017-03-30 13:39:16 -0700270static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200271 .queue_rq = nvme_loop_queue_rq,
272 .complete = nvme_loop_complete_rq,
Christoph Hellwig62b83b12017-06-13 09:15:21 +0200273 .init_request = nvme_loop_init_request,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200274 .init_hctx = nvme_loop_init_admin_hctx,
275 .timeout = nvme_loop_timeout,
276};
277
278static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
279{
Sagi Grimberge4c5d372017-02-27 18:44:45 +0200280 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200281 blk_cleanup_queue(ctrl->ctrl.admin_q);
282 blk_mq_free_tag_set(&ctrl->admin_tag_set);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200283}
284
285static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
286{
287 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
288
289 if (list_empty(&ctrl->list))
290 goto free_ctrl;
291
292 mutex_lock(&nvme_loop_ctrl_mutex);
293 list_del(&ctrl->list);
294 mutex_unlock(&nvme_loop_ctrl_mutex);
295
296 if (nctrl->tagset) {
297 blk_cleanup_queue(ctrl->ctrl.connect_q);
298 blk_mq_free_tag_set(&ctrl->tag_set);
299 }
300 kfree(ctrl->queues);
301 nvmf_free_options(nctrl->opts);
302free_ctrl:
303 kfree(ctrl);
304}
305
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200306static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
307{
308 int i;
309
310 for (i = 1; i < ctrl->queue_count; i++)
311 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
312}
313
314static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
315{
316 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
317 unsigned int nr_io_queues;
318 int ret, i;
319
320 nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
321 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
322 if (ret || !nr_io_queues)
323 return ret;
324
325 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
326
327 for (i = 1; i <= nr_io_queues; i++) {
328 ctrl->queues[i].ctrl = ctrl;
329 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
330 if (ret)
331 goto out_destroy_queues;
332
333 ctrl->queue_count++;
334 }
335
336 return 0;
337
338out_destroy_queues:
339 nvme_loop_destroy_io_queues(ctrl);
340 return ret;
341}
342
Sagi Grimberg297186d2017-03-13 15:43:44 +0200343static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
344{
345 int i, ret;
346
347 for (i = 1; i < ctrl->queue_count; i++) {
348 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
349 if (ret)
350 return ret;
351 }
352
353 return 0;
354}
355
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200356static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
357{
358 int error;
359
360 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
361 ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
362 ctrl->admin_tag_set.queue_depth = NVME_LOOP_AQ_BLKMQ_DEPTH;
363 ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
364 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
365 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
366 SG_CHUNK_SIZE * sizeof(struct scatterlist);
367 ctrl->admin_tag_set.driver_data = ctrl;
368 ctrl->admin_tag_set.nr_hw_queues = 1;
369 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
370
371 ctrl->queues[0].ctrl = ctrl;
372 error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
373 if (error)
374 return error;
375 ctrl->queue_count = 1;
376
377 error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
378 if (error)
379 goto out_free_sq;
380
381 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
382 if (IS_ERR(ctrl->ctrl.admin_q)) {
383 error = PTR_ERR(ctrl->ctrl.admin_q);
384 goto out_free_tagset;
385 }
386
387 error = nvmf_connect_admin_queue(&ctrl->ctrl);
388 if (error)
389 goto out_cleanup_queue;
390
391 error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
392 if (error) {
393 dev_err(ctrl->ctrl.device,
394 "prop_get NVME_REG_CAP failed\n");
395 goto out_cleanup_queue;
396 }
397
398 ctrl->ctrl.sqsize =
Sagi Grimberg096e9e92017-04-06 09:15:50 +0300399 min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200400
401 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
402 if (error)
403 goto out_cleanup_queue;
404
405 ctrl->ctrl.max_hw_sectors =
406 (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
407
408 error = nvme_init_identify(&ctrl->ctrl);
409 if (error)
410 goto out_cleanup_queue;
411
412 nvme_start_keep_alive(&ctrl->ctrl);
413
414 return 0;
415
416out_cleanup_queue:
417 blk_cleanup_queue(ctrl->ctrl.admin_q);
418out_free_tagset:
419 blk_mq_free_tag_set(&ctrl->admin_tag_set);
420out_free_sq:
421 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
422 return error;
423}
424
425static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
426{
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200427 nvme_stop_keep_alive(&ctrl->ctrl);
428
429 if (ctrl->queue_count > 1) {
430 nvme_stop_queues(&ctrl->ctrl);
431 blk_mq_tagset_busy_iter(&ctrl->tag_set,
432 nvme_cancel_request, &ctrl->ctrl);
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200433 nvme_loop_destroy_io_queues(ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200434 }
435
436 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
437 nvme_shutdown_ctrl(&ctrl->ctrl);
438
439 blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
440 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
441 nvme_cancel_request, &ctrl->ctrl);
442 nvme_loop_destroy_admin_queue(ctrl);
443}
444
445static void nvme_loop_del_ctrl_work(struct work_struct *work)
446{
447 struct nvme_loop_ctrl *ctrl = container_of(work,
448 struct nvme_loop_ctrl, delete_work);
449
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200450 nvme_uninit_ctrl(&ctrl->ctrl);
Sagi Grimberga159c642016-07-24 09:32:08 +0300451 nvme_loop_shutdown_ctrl(ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200452 nvme_put_ctrl(&ctrl->ctrl);
453}
454
455static int __nvme_loop_del_ctrl(struct nvme_loop_ctrl *ctrl)
456{
457 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
458 return -EBUSY;
459
Sagi Grimberg9a6327d2017-06-07 20:31:55 +0200460 if (!queue_work(nvme_wq, &ctrl->delete_work))
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200461 return -EBUSY;
462
463 return 0;
464}
465
466static int nvme_loop_del_ctrl(struct nvme_ctrl *nctrl)
467{
468 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
469 int ret;
470
471 ret = __nvme_loop_del_ctrl(ctrl);
472 if (ret)
473 return ret;
474
475 flush_work(&ctrl->delete_work);
476
477 return 0;
478}
479
480static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
481{
482 struct nvme_loop_ctrl *ctrl;
483
484 mutex_lock(&nvme_loop_ctrl_mutex);
485 list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
486 if (ctrl->ctrl.cntlid == nctrl->cntlid)
487 __nvme_loop_del_ctrl(ctrl);
488 }
489 mutex_unlock(&nvme_loop_ctrl_mutex);
490}
491
492static void nvme_loop_reset_ctrl_work(struct work_struct *work)
493{
Christoph Hellwigd86c4d82017-06-15 15:41:08 +0200494 struct nvme_loop_ctrl *ctrl =
495 container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200496 bool changed;
Sagi Grimberg297186d2017-03-13 15:43:44 +0200497 int ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200498
499 nvme_loop_shutdown_ctrl(ctrl);
500
501 ret = nvme_loop_configure_admin_queue(ctrl);
502 if (ret)
503 goto out_disable;
504
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200505 ret = nvme_loop_init_io_queues(ctrl);
506 if (ret)
507 goto out_destroy_admin;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200508
Sagi Grimberg297186d2017-03-13 15:43:44 +0200509 ret = nvme_loop_connect_io_queues(ctrl);
510 if (ret)
511 goto out_destroy_io;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200512
513 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
514 WARN_ON_ONCE(!changed);
515
516 nvme_queue_scan(&ctrl->ctrl);
517 nvme_queue_async_events(&ctrl->ctrl);
518
519 nvme_start_queues(&ctrl->ctrl);
520
521 return;
522
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200523out_destroy_io:
524 nvme_loop_destroy_io_queues(ctrl);
525out_destroy_admin:
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200526 nvme_loop_destroy_admin_queue(ctrl);
527out_disable:
528 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200529 nvme_uninit_ctrl(&ctrl->ctrl);
530 nvme_put_ctrl(&ctrl->ctrl);
531}
532
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200533static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
534 .name = "loop",
535 .module = THIS_MODULE,
Christoph Hellwigd3d5b872017-05-20 15:14:44 +0200536 .flags = NVME_F_FABRICS,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200537 .reg_read32 = nvmf_reg_read32,
538 .reg_read64 = nvmf_reg_read64,
539 .reg_write32 = nvmf_reg_write32,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200540 .free_ctrl = nvme_loop_free_ctrl,
541 .submit_async_event = nvme_loop_submit_async_event,
542 .delete_ctrl = nvme_loop_del_ctrl,
543 .get_subsysnqn = nvmf_get_subsysnqn,
544};
545
546static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
547{
Sagi Grimberg297186d2017-03-13 15:43:44 +0200548 int ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200549
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200550 ret = nvme_loop_init_io_queues(ctrl);
551 if (ret)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200552 return ret;
553
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200554 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
555 ctrl->tag_set.ops = &nvme_loop_mq_ops;
Jay Freyenseeeadb7cf2016-08-17 15:00:28 -0700556 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200557 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
558 ctrl->tag_set.numa_node = NUMA_NO_NODE;
559 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
560 ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
561 SG_CHUNK_SIZE * sizeof(struct scatterlist);
562 ctrl->tag_set.driver_data = ctrl;
563 ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
564 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
565 ctrl->ctrl.tagset = &ctrl->tag_set;
566
567 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
568 if (ret)
569 goto out_destroy_queues;
570
571 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
572 if (IS_ERR(ctrl->ctrl.connect_q)) {
573 ret = PTR_ERR(ctrl->ctrl.connect_q);
574 goto out_free_tagset;
575 }
576
Sagi Grimberg297186d2017-03-13 15:43:44 +0200577 ret = nvme_loop_connect_io_queues(ctrl);
578 if (ret)
579 goto out_cleanup_connect_q;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200580
581 return 0;
582
583out_cleanup_connect_q:
584 blk_cleanup_queue(ctrl->ctrl.connect_q);
585out_free_tagset:
586 blk_mq_free_tag_set(&ctrl->tag_set);
587out_destroy_queues:
Sagi Grimberg945dd5b2017-03-13 13:27:51 +0200588 nvme_loop_destroy_io_queues(ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200589 return ret;
590}
591
592static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
593 struct nvmf_ctrl_options *opts)
594{
595 struct nvme_loop_ctrl *ctrl;
596 bool changed;
597 int ret;
598
599 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
600 if (!ctrl)
601 return ERR_PTR(-ENOMEM);
602 ctrl->ctrl.opts = opts;
603 INIT_LIST_HEAD(&ctrl->list);
604
605 INIT_WORK(&ctrl->delete_work, nvme_loop_del_ctrl_work);
Christoph Hellwigd86c4d82017-06-15 15:41:08 +0200606 INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200607
608 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
609 0 /* no quirks, we're perfect! */);
610 if (ret)
611 goto out_put_ctrl;
612
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200613 ret = -ENOMEM;
614
Jay Freyenseeeadb7cf2016-08-17 15:00:28 -0700615 ctrl->ctrl.sqsize = opts->queue_size - 1;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200616 ctrl->ctrl.kato = opts->kato;
617
618 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
619 GFP_KERNEL);
620 if (!ctrl->queues)
621 goto out_uninit_ctrl;
622
623 ret = nvme_loop_configure_admin_queue(ctrl);
624 if (ret)
625 goto out_free_queues;
626
627 if (opts->queue_size > ctrl->ctrl.maxcmd) {
628 /* warn if maxcmd is lower than queue_size */
629 dev_warn(ctrl->ctrl.device,
630 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
631 opts->queue_size, ctrl->ctrl.maxcmd);
632 opts->queue_size = ctrl->ctrl.maxcmd;
633 }
634
635 if (opts->nr_io_queues) {
636 ret = nvme_loop_create_io_queues(ctrl);
637 if (ret)
638 goto out_remove_admin_queue;
639 }
640
641 nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
642
643 dev_info(ctrl->ctrl.device,
644 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
645
646 kref_get(&ctrl->ctrl.kref);
647
648 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
649 WARN_ON_ONCE(!changed);
650
651 mutex_lock(&nvme_loop_ctrl_mutex);
652 list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
653 mutex_unlock(&nvme_loop_ctrl_mutex);
654
655 if (opts->nr_io_queues) {
656 nvme_queue_scan(&ctrl->ctrl);
657 nvme_queue_async_events(&ctrl->ctrl);
658 }
659
660 return &ctrl->ctrl;
661
662out_remove_admin_queue:
663 nvme_loop_destroy_admin_queue(ctrl);
664out_free_queues:
665 kfree(ctrl->queues);
666out_uninit_ctrl:
667 nvme_uninit_ctrl(&ctrl->ctrl);
668out_put_ctrl:
669 nvme_put_ctrl(&ctrl->ctrl);
670 if (ret > 0)
671 ret = -EIO;
672 return ERR_PTR(ret);
673}
674
675static int nvme_loop_add_port(struct nvmet_port *port)
676{
677 /*
678 * XXX: disalow adding more than one port so
679 * there is no connection rejections when a
680 * a subsystem is assigned to a port for which
681 * loop doesn't have a pointer.
682 * This scenario would be possible if we allowed
683 * more than one port to be added and a subsystem
684 * was assigned to a port other than nvmet_loop_port.
685 */
686
687 if (nvmet_loop_port)
688 return -EPERM;
689
690 nvmet_loop_port = port;
691 return 0;
692}
693
694static void nvme_loop_remove_port(struct nvmet_port *port)
695{
696 if (port == nvmet_loop_port)
697 nvmet_loop_port = NULL;
698}
699
700static struct nvmet_fabrics_ops nvme_loop_ops = {
701 .owner = THIS_MODULE,
702 .type = NVMF_TRTYPE_LOOP,
703 .add_port = nvme_loop_add_port,
704 .remove_port = nvme_loop_remove_port,
705 .queue_response = nvme_loop_queue_response,
706 .delete_ctrl = nvme_loop_delete_ctrl,
707};
708
709static struct nvmf_transport_ops nvme_loop_transport = {
710 .name = "loop",
711 .create_ctrl = nvme_loop_create_ctrl,
712};
713
714static int __init nvme_loop_init_module(void)
715{
716 int ret;
717
718 ret = nvmet_register_transport(&nvme_loop_ops);
719 if (ret)
720 return ret;
Sagi Grimbergd19eef02017-03-19 06:26:28 +0200721
722 ret = nvmf_register_transport(&nvme_loop_transport);
723 if (ret)
724 nvmet_unregister_transport(&nvme_loop_ops);
725
726 return ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200727}
728
729static void __exit nvme_loop_cleanup_module(void)
730{
731 struct nvme_loop_ctrl *ctrl, *next;
732
733 nvmf_unregister_transport(&nvme_loop_transport);
734 nvmet_unregister_transport(&nvme_loop_ops);
735
736 mutex_lock(&nvme_loop_ctrl_mutex);
737 list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
738 __nvme_loop_del_ctrl(ctrl);
739 mutex_unlock(&nvme_loop_ctrl_mutex);
740
Sagi Grimberg9a6327d2017-06-07 20:31:55 +0200741 flush_workqueue(nvme_wq);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200742}
743
744module_init(nvme_loop_init_module);
745module_exit(nvme_loop_cleanup_module);
746
747MODULE_LICENSE("GPL v2");
748MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */