blob: 33b431e4eec3a3be28159a89203336924506835b [file] [log] [blame]
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +02001/*
2 * NVMe over Fabrics loopback device.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/scatterlist.h>
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020016#include <linux/blk-mq.h>
17#include <linux/nvme.h>
18#include <linux/module.h>
19#include <linux/parser.h>
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020020#include "nvmet.h"
21#include "../host/nvme.h"
22#include "../host/fabrics.h"
23
24#define NVME_LOOP_AQ_DEPTH 256
25
26#define NVME_LOOP_MAX_SEGMENTS 256
27
28/*
29 * We handle AEN commands ourselves and don't even let the
30 * block layer know about them.
31 */
32#define NVME_LOOP_NR_AEN_COMMANDS 1
33#define NVME_LOOP_AQ_BLKMQ_DEPTH \
34 (NVME_LOOP_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
35
36struct nvme_loop_iod {
Christoph Hellwigd49187e2016-11-10 07:32:33 -080037 struct nvme_request nvme_req;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020038 struct nvme_command cmd;
39 struct nvme_completion rsp;
40 struct nvmet_req req;
41 struct nvme_loop_queue *queue;
42 struct work_struct work;
43 struct sg_table sg_table;
44 struct scatterlist first_sgl[];
45};
46
47struct nvme_loop_ctrl {
48 spinlock_t lock;
49 struct nvme_loop_queue *queues;
50 u32 queue_count;
51
52 struct blk_mq_tag_set admin_tag_set;
53
54 struct list_head list;
55 u64 cap;
56 struct blk_mq_tag_set tag_set;
57 struct nvme_loop_iod async_event_iod;
58 struct nvme_ctrl ctrl;
59
60 struct nvmet_ctrl *target_ctrl;
61 struct work_struct delete_work;
62 struct work_struct reset_work;
63};
64
65static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
66{
67 return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
68}
69
70struct nvme_loop_queue {
71 struct nvmet_cq nvme_cq;
72 struct nvmet_sq nvme_sq;
73 struct nvme_loop_ctrl *ctrl;
74};
75
76static struct nvmet_port *nvmet_loop_port;
77
78static LIST_HEAD(nvme_loop_ctrl_list);
79static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
80
81static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
82static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
83
84static struct nvmet_fabrics_ops nvme_loop_ops;
85
86static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
87{
88 return queue - queue->ctrl->queues;
89}
90
91static void nvme_loop_complete_rq(struct request *req)
92{
93 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020094
95 nvme_cleanup_cmd(req);
96 sg_free_table_chained(&iod->sg_table, true);
Christoph Hellwig77f02a72017-03-30 13:41:32 +020097 nvme_complete_rq(req);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020098}
99
Sagi Grimberg3b068372017-02-27 18:28:25 +0200100static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
101{
102 u32 queue_idx = nvme_loop_queue_idx(queue);
103
104 if (queue_idx == 0)
105 return queue->ctrl->admin_tag_set.tags[queue_idx];
106 return queue->ctrl->tag_set.tags[queue_idx - 1];
107}
108
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800109static void nvme_loop_queue_response(struct nvmet_req *req)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200110{
Sagi Grimberg3b068372017-02-27 18:28:25 +0200111 struct nvme_loop_queue *queue =
112 container_of(req->sq, struct nvme_loop_queue, nvme_sq);
113 struct nvme_completion *cqe = req->rsp;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200114
115 /*
116 * AEN requests are special as they don't time out and can
117 * survive any kind of queue freeze and often don't respond to
118 * aborts. We don't even bother to allocate a struct request
119 * for them but rather special case them here.
120 */
Sagi Grimberg3b068372017-02-27 18:28:25 +0200121 if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200122 cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
Sagi Grimberg3b068372017-02-27 18:28:25 +0200123 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
Christoph Hellwig7bf58532016-11-10 07:32:34 -0800124 &cqe->result);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200125 } else {
Sagi Grimberg3b068372017-02-27 18:28:25 +0200126 struct request *rq;
127 struct nvme_loop_iod *iod;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200128
Sagi Grimberg3b068372017-02-27 18:28:25 +0200129 rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
130 if (!rq) {
131 dev_err(queue->ctrl->ctrl.device,
132 "tag 0x%x on queue %d not found\n",
133 cqe->command_id, nvme_loop_queue_idx(queue));
134 return;
135 }
136
137 iod = blk_mq_rq_to_pdu(rq);
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800138 iod->nvme_req.result = cqe->result;
139 blk_mq_complete_request(rq, le16_to_cpu(cqe->status) >> 1);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200140 }
141}
142
143static void nvme_loop_execute_work(struct work_struct *work)
144{
145 struct nvme_loop_iod *iod =
146 container_of(work, struct nvme_loop_iod, work);
147
148 iod->req.execute(&iod->req);
149}
150
151static enum blk_eh_timer_return
152nvme_loop_timeout(struct request *rq, bool reserved)
153{
154 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
155
156 /* queue error recovery */
157 schedule_work(&iod->queue->ctrl->reset_work);
158
159 /* fail with DNR on admin cmd timeout */
160 rq->errors = NVME_SC_ABORT_REQ | NVME_SC_DNR;
161
162 return BLK_EH_HANDLED;
163}
164
165static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
166 const struct blk_mq_queue_data *bd)
167{
168 struct nvme_ns *ns = hctx->queue->queuedata;
169 struct nvme_loop_queue *queue = hctx->driver_data;
170 struct request *req = bd->rq;
171 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
172 int ret;
173
174 ret = nvme_setup_cmd(ns, req, &iod->cmd);
Omar Sandovalbac00002016-11-15 11:11:58 -0800175 if (ret != BLK_MQ_RQ_QUEUE_OK)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200176 return ret;
177
178 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
179 iod->req.port = nvmet_loop_port;
180 if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
181 &queue->nvme_sq, &nvme_loop_ops)) {
182 nvme_cleanup_cmd(req);
183 blk_mq_start_request(req);
184 nvme_loop_queue_response(&iod->req);
Omar Sandovalbac00002016-11-15 11:11:58 -0800185 return BLK_MQ_RQ_QUEUE_OK;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200186 }
187
188 if (blk_rq_bytes(req)) {
189 iod->sg_table.sgl = iod->first_sgl;
190 ret = sg_alloc_table_chained(&iod->sg_table,
Christoph Hellwigf9d03f92016-12-08 15:20:32 -0700191 blk_rq_nr_phys_segments(req),
192 iod->sg_table.sgl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200193 if (ret)
194 return BLK_MQ_RQ_QUEUE_BUSY;
195
196 iod->req.sg = iod->sg_table.sgl;
197 iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200198 }
199
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200200 blk_mq_start_request(req);
201
202 schedule_work(&iod->work);
Omar Sandovalbac00002016-11-15 11:11:58 -0800203 return BLK_MQ_RQ_QUEUE_OK;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200204}
205
206static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
207{
208 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
209 struct nvme_loop_queue *queue = &ctrl->queues[0];
210 struct nvme_loop_iod *iod = &ctrl->async_event_iod;
211
212 memset(&iod->cmd, 0, sizeof(iod->cmd));
213 iod->cmd.common.opcode = nvme_admin_async_event;
214 iod->cmd.common.command_id = NVME_LOOP_AQ_BLKMQ_DEPTH;
215 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
216
217 if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
218 &nvme_loop_ops)) {
219 dev_err(ctrl->ctrl.device, "failed async event work\n");
220 return;
221 }
222
223 schedule_work(&iod->work);
224}
225
226static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
227 struct nvme_loop_iod *iod, unsigned int queue_idx)
228{
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200229 iod->req.cmd = &iod->cmd;
230 iod->req.rsp = &iod->rsp;
231 iod->queue = &ctrl->queues[queue_idx];
232 INIT_WORK(&iod->work, nvme_loop_execute_work);
233 return 0;
234}
235
236static int nvme_loop_init_request(void *data, struct request *req,
237 unsigned int hctx_idx, unsigned int rq_idx,
238 unsigned int numa_node)
239{
240 return nvme_loop_init_iod(data, blk_mq_rq_to_pdu(req), hctx_idx + 1);
241}
242
243static int nvme_loop_init_admin_request(void *data, struct request *req,
244 unsigned int hctx_idx, unsigned int rq_idx,
245 unsigned int numa_node)
246{
247 return nvme_loop_init_iod(data, blk_mq_rq_to_pdu(req), 0);
248}
249
250static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
251 unsigned int hctx_idx)
252{
253 struct nvme_loop_ctrl *ctrl = data;
254 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
255
256 BUG_ON(hctx_idx >= ctrl->queue_count);
257
258 hctx->driver_data = queue;
259 return 0;
260}
261
262static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
263 unsigned int hctx_idx)
264{
265 struct nvme_loop_ctrl *ctrl = data;
266 struct nvme_loop_queue *queue = &ctrl->queues[0];
267
268 BUG_ON(hctx_idx != 0);
269
270 hctx->driver_data = queue;
271 return 0;
272}
273
Eric Biggersf363b082017-03-30 13:39:16 -0700274static const struct blk_mq_ops nvme_loop_mq_ops = {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200275 .queue_rq = nvme_loop_queue_rq,
276 .complete = nvme_loop_complete_rq,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200277 .init_request = nvme_loop_init_request,
278 .init_hctx = nvme_loop_init_hctx,
279 .timeout = nvme_loop_timeout,
280};
281
Eric Biggersf363b082017-03-30 13:39:16 -0700282static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200283 .queue_rq = nvme_loop_queue_rq,
284 .complete = nvme_loop_complete_rq,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200285 .init_request = nvme_loop_init_admin_request,
286 .init_hctx = nvme_loop_init_admin_hctx,
287 .timeout = nvme_loop_timeout,
288};
289
290static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
291{
Sagi Grimbergd4769832017-02-27 18:44:45 +0200292 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200293 blk_cleanup_queue(ctrl->ctrl.admin_q);
294 blk_mq_free_tag_set(&ctrl->admin_tag_set);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200295}
296
297static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
298{
299 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
300
301 if (list_empty(&ctrl->list))
302 goto free_ctrl;
303
304 mutex_lock(&nvme_loop_ctrl_mutex);
305 list_del(&ctrl->list);
306 mutex_unlock(&nvme_loop_ctrl_mutex);
307
308 if (nctrl->tagset) {
309 blk_cleanup_queue(ctrl->ctrl.connect_q);
310 blk_mq_free_tag_set(&ctrl->tag_set);
311 }
312 kfree(ctrl->queues);
313 nvmf_free_options(nctrl->opts);
314free_ctrl:
315 kfree(ctrl);
316}
317
Sagi Grimberg6ecda702017-03-13 13:27:51 +0200318static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
319{
320 int i;
321
322 for (i = 1; i < ctrl->queue_count; i++)
323 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
324}
325
326static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
327{
328 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
329 unsigned int nr_io_queues;
330 int ret, i;
331
332 nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
333 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
334 if (ret || !nr_io_queues)
335 return ret;
336
337 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
338
339 for (i = 1; i <= nr_io_queues; i++) {
340 ctrl->queues[i].ctrl = ctrl;
341 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
342 if (ret)
343 goto out_destroy_queues;
344
345 ctrl->queue_count++;
346 }
347
348 return 0;
349
350out_destroy_queues:
351 nvme_loop_destroy_io_queues(ctrl);
352 return ret;
353}
354
Sagi Grimberg297186d2017-03-13 15:43:44 +0200355static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
356{
357 int i, ret;
358
359 for (i = 1; i < ctrl->queue_count; i++) {
360 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
361 if (ret)
362 return ret;
363 }
364
365 return 0;
366}
367
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200368static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
369{
370 int error;
371
372 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
373 ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
374 ctrl->admin_tag_set.queue_depth = NVME_LOOP_AQ_BLKMQ_DEPTH;
375 ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
376 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
377 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
378 SG_CHUNK_SIZE * sizeof(struct scatterlist);
379 ctrl->admin_tag_set.driver_data = ctrl;
380 ctrl->admin_tag_set.nr_hw_queues = 1;
381 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
382
383 ctrl->queues[0].ctrl = ctrl;
384 error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
385 if (error)
386 return error;
387 ctrl->queue_count = 1;
388
389 error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
390 if (error)
391 goto out_free_sq;
392
393 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
394 if (IS_ERR(ctrl->ctrl.admin_q)) {
395 error = PTR_ERR(ctrl->ctrl.admin_q);
396 goto out_free_tagset;
397 }
398
399 error = nvmf_connect_admin_queue(&ctrl->ctrl);
400 if (error)
401 goto out_cleanup_queue;
402
403 error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
404 if (error) {
405 dev_err(ctrl->ctrl.device,
406 "prop_get NVME_REG_CAP failed\n");
407 goto out_cleanup_queue;
408 }
409
410 ctrl->ctrl.sqsize =
411 min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
412
413 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
414 if (error)
415 goto out_cleanup_queue;
416
417 ctrl->ctrl.max_hw_sectors =
418 (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
419
420 error = nvme_init_identify(&ctrl->ctrl);
421 if (error)
422 goto out_cleanup_queue;
423
424 nvme_start_keep_alive(&ctrl->ctrl);
425
426 return 0;
427
428out_cleanup_queue:
429 blk_cleanup_queue(ctrl->ctrl.admin_q);
430out_free_tagset:
431 blk_mq_free_tag_set(&ctrl->admin_tag_set);
432out_free_sq:
433 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
434 return error;
435}
436
437static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
438{
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200439 nvme_stop_keep_alive(&ctrl->ctrl);
440
441 if (ctrl->queue_count > 1) {
442 nvme_stop_queues(&ctrl->ctrl);
443 blk_mq_tagset_busy_iter(&ctrl->tag_set,
444 nvme_cancel_request, &ctrl->ctrl);
Sagi Grimberg6ecda702017-03-13 13:27:51 +0200445 nvme_loop_destroy_io_queues(ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200446 }
447
448 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
449 nvme_shutdown_ctrl(&ctrl->ctrl);
450
451 blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
452 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
453 nvme_cancel_request, &ctrl->ctrl);
454 nvme_loop_destroy_admin_queue(ctrl);
455}
456
457static void nvme_loop_del_ctrl_work(struct work_struct *work)
458{
459 struct nvme_loop_ctrl *ctrl = container_of(work,
460 struct nvme_loop_ctrl, delete_work);
461
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200462 nvme_uninit_ctrl(&ctrl->ctrl);
Sagi Grimberga159c642016-07-24 09:32:08 +0300463 nvme_loop_shutdown_ctrl(ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200464 nvme_put_ctrl(&ctrl->ctrl);
465}
466
467static int __nvme_loop_del_ctrl(struct nvme_loop_ctrl *ctrl)
468{
469 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
470 return -EBUSY;
471
472 if (!schedule_work(&ctrl->delete_work))
473 return -EBUSY;
474
475 return 0;
476}
477
478static int nvme_loop_del_ctrl(struct nvme_ctrl *nctrl)
479{
480 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
481 int ret;
482
483 ret = __nvme_loop_del_ctrl(ctrl);
484 if (ret)
485 return ret;
486
487 flush_work(&ctrl->delete_work);
488
489 return 0;
490}
491
492static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
493{
494 struct nvme_loop_ctrl *ctrl;
495
496 mutex_lock(&nvme_loop_ctrl_mutex);
497 list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
498 if (ctrl->ctrl.cntlid == nctrl->cntlid)
499 __nvme_loop_del_ctrl(ctrl);
500 }
501 mutex_unlock(&nvme_loop_ctrl_mutex);
502}
503
504static void nvme_loop_reset_ctrl_work(struct work_struct *work)
505{
506 struct nvme_loop_ctrl *ctrl = container_of(work,
507 struct nvme_loop_ctrl, reset_work);
508 bool changed;
Sagi Grimberg297186d2017-03-13 15:43:44 +0200509 int ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200510
511 nvme_loop_shutdown_ctrl(ctrl);
512
513 ret = nvme_loop_configure_admin_queue(ctrl);
514 if (ret)
515 goto out_disable;
516
Sagi Grimberg6ecda702017-03-13 13:27:51 +0200517 ret = nvme_loop_init_io_queues(ctrl);
518 if (ret)
519 goto out_destroy_admin;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200520
Sagi Grimberg297186d2017-03-13 15:43:44 +0200521 ret = nvme_loop_connect_io_queues(ctrl);
522 if (ret)
523 goto out_destroy_io;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200524
525 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
526 WARN_ON_ONCE(!changed);
527
528 nvme_queue_scan(&ctrl->ctrl);
529 nvme_queue_async_events(&ctrl->ctrl);
530
531 nvme_start_queues(&ctrl->ctrl);
532
533 return;
534
Sagi Grimberg6ecda702017-03-13 13:27:51 +0200535out_destroy_io:
536 nvme_loop_destroy_io_queues(ctrl);
537out_destroy_admin:
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200538 nvme_loop_destroy_admin_queue(ctrl);
539out_disable:
540 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200541 nvme_uninit_ctrl(&ctrl->ctrl);
542 nvme_put_ctrl(&ctrl->ctrl);
543}
544
545static int nvme_loop_reset_ctrl(struct nvme_ctrl *nctrl)
546{
547 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
548
549 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
550 return -EBUSY;
551
552 if (!schedule_work(&ctrl->reset_work))
553 return -EBUSY;
554
555 flush_work(&ctrl->reset_work);
556
557 return 0;
558}
559
560static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
561 .name = "loop",
562 .module = THIS_MODULE,
563 .is_fabrics = true,
564 .reg_read32 = nvmf_reg_read32,
565 .reg_read64 = nvmf_reg_read64,
566 .reg_write32 = nvmf_reg_write32,
567 .reset_ctrl = nvme_loop_reset_ctrl,
568 .free_ctrl = nvme_loop_free_ctrl,
569 .submit_async_event = nvme_loop_submit_async_event,
570 .delete_ctrl = nvme_loop_del_ctrl,
571 .get_subsysnqn = nvmf_get_subsysnqn,
572};
573
574static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
575{
Sagi Grimberg297186d2017-03-13 15:43:44 +0200576 int ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200577
Sagi Grimberg6ecda702017-03-13 13:27:51 +0200578 ret = nvme_loop_init_io_queues(ctrl);
579 if (ret)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200580 return ret;
581
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200582 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
583 ctrl->tag_set.ops = &nvme_loop_mq_ops;
Jay Freyenseeeadb7cf2016-08-17 15:00:28 -0700584 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200585 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
586 ctrl->tag_set.numa_node = NUMA_NO_NODE;
587 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
588 ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
589 SG_CHUNK_SIZE * sizeof(struct scatterlist);
590 ctrl->tag_set.driver_data = ctrl;
591 ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
592 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
593 ctrl->ctrl.tagset = &ctrl->tag_set;
594
595 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
596 if (ret)
597 goto out_destroy_queues;
598
599 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
600 if (IS_ERR(ctrl->ctrl.connect_q)) {
601 ret = PTR_ERR(ctrl->ctrl.connect_q);
602 goto out_free_tagset;
603 }
604
Sagi Grimberg297186d2017-03-13 15:43:44 +0200605 ret = nvme_loop_connect_io_queues(ctrl);
606 if (ret)
607 goto out_cleanup_connect_q;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200608
609 return 0;
610
611out_cleanup_connect_q:
612 blk_cleanup_queue(ctrl->ctrl.connect_q);
613out_free_tagset:
614 blk_mq_free_tag_set(&ctrl->tag_set);
615out_destroy_queues:
Sagi Grimberg6ecda702017-03-13 13:27:51 +0200616 nvme_loop_destroy_io_queues(ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200617 return ret;
618}
619
620static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
621 struct nvmf_ctrl_options *opts)
622{
623 struct nvme_loop_ctrl *ctrl;
624 bool changed;
625 int ret;
626
627 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
628 if (!ctrl)
629 return ERR_PTR(-ENOMEM);
630 ctrl->ctrl.opts = opts;
631 INIT_LIST_HEAD(&ctrl->list);
632
633 INIT_WORK(&ctrl->delete_work, nvme_loop_del_ctrl_work);
634 INIT_WORK(&ctrl->reset_work, nvme_loop_reset_ctrl_work);
635
636 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
637 0 /* no quirks, we're perfect! */);
638 if (ret)
639 goto out_put_ctrl;
640
641 spin_lock_init(&ctrl->lock);
642
643 ret = -ENOMEM;
644
Jay Freyenseeeadb7cf2016-08-17 15:00:28 -0700645 ctrl->ctrl.sqsize = opts->queue_size - 1;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200646 ctrl->ctrl.kato = opts->kato;
647
648 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
649 GFP_KERNEL);
650 if (!ctrl->queues)
651 goto out_uninit_ctrl;
652
653 ret = nvme_loop_configure_admin_queue(ctrl);
654 if (ret)
655 goto out_free_queues;
656
657 if (opts->queue_size > ctrl->ctrl.maxcmd) {
658 /* warn if maxcmd is lower than queue_size */
659 dev_warn(ctrl->ctrl.device,
660 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
661 opts->queue_size, ctrl->ctrl.maxcmd);
662 opts->queue_size = ctrl->ctrl.maxcmd;
663 }
664
665 if (opts->nr_io_queues) {
666 ret = nvme_loop_create_io_queues(ctrl);
667 if (ret)
668 goto out_remove_admin_queue;
669 }
670
671 nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
672
673 dev_info(ctrl->ctrl.device,
674 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
675
676 kref_get(&ctrl->ctrl.kref);
677
678 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
679 WARN_ON_ONCE(!changed);
680
681 mutex_lock(&nvme_loop_ctrl_mutex);
682 list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
683 mutex_unlock(&nvme_loop_ctrl_mutex);
684
685 if (opts->nr_io_queues) {
686 nvme_queue_scan(&ctrl->ctrl);
687 nvme_queue_async_events(&ctrl->ctrl);
688 }
689
690 return &ctrl->ctrl;
691
692out_remove_admin_queue:
693 nvme_loop_destroy_admin_queue(ctrl);
694out_free_queues:
695 kfree(ctrl->queues);
696out_uninit_ctrl:
697 nvme_uninit_ctrl(&ctrl->ctrl);
698out_put_ctrl:
699 nvme_put_ctrl(&ctrl->ctrl);
700 if (ret > 0)
701 ret = -EIO;
702 return ERR_PTR(ret);
703}
704
705static int nvme_loop_add_port(struct nvmet_port *port)
706{
707 /*
708 * XXX: disalow adding more than one port so
709 * there is no connection rejections when a
710 * a subsystem is assigned to a port for which
711 * loop doesn't have a pointer.
712 * This scenario would be possible if we allowed
713 * more than one port to be added and a subsystem
714 * was assigned to a port other than nvmet_loop_port.
715 */
716
717 if (nvmet_loop_port)
718 return -EPERM;
719
720 nvmet_loop_port = port;
721 return 0;
722}
723
724static void nvme_loop_remove_port(struct nvmet_port *port)
725{
726 if (port == nvmet_loop_port)
727 nvmet_loop_port = NULL;
728}
729
730static struct nvmet_fabrics_ops nvme_loop_ops = {
731 .owner = THIS_MODULE,
732 .type = NVMF_TRTYPE_LOOP,
733 .add_port = nvme_loop_add_port,
734 .remove_port = nvme_loop_remove_port,
735 .queue_response = nvme_loop_queue_response,
736 .delete_ctrl = nvme_loop_delete_ctrl,
737};
738
739static struct nvmf_transport_ops nvme_loop_transport = {
740 .name = "loop",
741 .create_ctrl = nvme_loop_create_ctrl,
742};
743
744static int __init nvme_loop_init_module(void)
745{
746 int ret;
747
748 ret = nvmet_register_transport(&nvme_loop_ops);
749 if (ret)
750 return ret;
Sagi Grimbergd19eef02017-03-19 06:26:28 +0200751
752 ret = nvmf_register_transport(&nvme_loop_transport);
753 if (ret)
754 nvmet_unregister_transport(&nvme_loop_ops);
755
756 return ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200757}
758
759static void __exit nvme_loop_cleanup_module(void)
760{
761 struct nvme_loop_ctrl *ctrl, *next;
762
763 nvmf_unregister_transport(&nvme_loop_transport);
764 nvmet_unregister_transport(&nvme_loop_ops);
765
766 mutex_lock(&nvme_loop_ctrl_mutex);
767 list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
768 __nvme_loop_del_ctrl(ctrl);
769 mutex_unlock(&nvme_loop_ctrl_mutex);
770
771 flush_scheduled_work();
772}
773
774module_init(nvme_loop_init_module);
775module_exit(nvme_loop_cleanup_module);
776
777MODULE_LICENSE("GPL v2");
778MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */