blob: ac828af16c342a355aba4401873cafe0633f21f6 [file] [log] [blame]
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +02001/*
2 * NVMe over Fabrics loopback device.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/scatterlist.h>
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020016#include <linux/blk-mq.h>
17#include <linux/nvme.h>
18#include <linux/module.h>
19#include <linux/parser.h>
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020020#include "nvmet.h"
21#include "../host/nvme.h"
22#include "../host/fabrics.h"
23
24#define NVME_LOOP_AQ_DEPTH 256
25
26#define NVME_LOOP_MAX_SEGMENTS 256
27
28/*
29 * We handle AEN commands ourselves and don't even let the
30 * block layer know about them.
31 */
32#define NVME_LOOP_NR_AEN_COMMANDS 1
33#define NVME_LOOP_AQ_BLKMQ_DEPTH \
34 (NVME_LOOP_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
35
36struct nvme_loop_iod {
Christoph Hellwigd49187e2016-11-10 07:32:33 -080037 struct nvme_request nvme_req;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +020038 struct nvme_command cmd;
39 struct nvme_completion rsp;
40 struct nvmet_req req;
41 struct nvme_loop_queue *queue;
42 struct work_struct work;
43 struct sg_table sg_table;
44 struct scatterlist first_sgl[];
45};
46
47struct nvme_loop_ctrl {
48 spinlock_t lock;
49 struct nvme_loop_queue *queues;
50 u32 queue_count;
51
52 struct blk_mq_tag_set admin_tag_set;
53
54 struct list_head list;
55 u64 cap;
56 struct blk_mq_tag_set tag_set;
57 struct nvme_loop_iod async_event_iod;
58 struct nvme_ctrl ctrl;
59
60 struct nvmet_ctrl *target_ctrl;
61 struct work_struct delete_work;
62 struct work_struct reset_work;
63};
64
65static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
66{
67 return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
68}
69
70struct nvme_loop_queue {
71 struct nvmet_cq nvme_cq;
72 struct nvmet_sq nvme_sq;
73 struct nvme_loop_ctrl *ctrl;
74};
75
76static struct nvmet_port *nvmet_loop_port;
77
78static LIST_HEAD(nvme_loop_ctrl_list);
79static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
80
81static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
82static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
83
84static struct nvmet_fabrics_ops nvme_loop_ops;
85
86static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
87{
88 return queue - queue->ctrl->queues;
89}
90
91static void nvme_loop_complete_rq(struct request *req)
92{
93 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
94 int error = 0;
95
96 nvme_cleanup_cmd(req);
97 sg_free_table_chained(&iod->sg_table, true);
98
99 if (unlikely(req->errors)) {
100 if (nvme_req_needs_retry(req, req->errors)) {
101 nvme_requeue_req(req);
102 return;
103 }
104
Christoph Hellwig57292b52017-01-31 16:57:29 +0100105 if (blk_rq_is_passthrough(req))
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200106 error = req->errors;
107 else
108 error = nvme_error_status(req->errors);
109 }
110
111 blk_mq_end_request(req, error);
112}
113
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800114static void nvme_loop_queue_response(struct nvmet_req *req)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200115{
116 struct nvme_loop_iod *iod =
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800117 container_of(req, struct nvme_loop_iod, req);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200118 struct nvme_completion *cqe = &iod->rsp;
119
120 /*
121 * AEN requests are special as they don't time out and can
122 * survive any kind of queue freeze and often don't respond to
123 * aborts. We don't even bother to allocate a struct request
124 * for them but rather special case them here.
125 */
126 if (unlikely(nvme_loop_queue_idx(iod->queue) == 0 &&
127 cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
Christoph Hellwig7bf58532016-11-10 07:32:34 -0800128 nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe->status,
129 &cqe->result);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200130 } else {
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800131 struct request *rq = blk_mq_rq_from_pdu(iod);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200132
Christoph Hellwigd49187e2016-11-10 07:32:33 -0800133 iod->nvme_req.result = cqe->result;
134 blk_mq_complete_request(rq, le16_to_cpu(cqe->status) >> 1);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200135 }
136}
137
138static void nvme_loop_execute_work(struct work_struct *work)
139{
140 struct nvme_loop_iod *iod =
141 container_of(work, struct nvme_loop_iod, work);
142
143 iod->req.execute(&iod->req);
144}
145
146static enum blk_eh_timer_return
147nvme_loop_timeout(struct request *rq, bool reserved)
148{
149 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
150
151 /* queue error recovery */
152 schedule_work(&iod->queue->ctrl->reset_work);
153
154 /* fail with DNR on admin cmd timeout */
155 rq->errors = NVME_SC_ABORT_REQ | NVME_SC_DNR;
156
157 return BLK_EH_HANDLED;
158}
159
160static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
161 const struct blk_mq_queue_data *bd)
162{
163 struct nvme_ns *ns = hctx->queue->queuedata;
164 struct nvme_loop_queue *queue = hctx->driver_data;
165 struct request *req = bd->rq;
166 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
167 int ret;
168
169 ret = nvme_setup_cmd(ns, req, &iod->cmd);
Omar Sandovalbac00002016-11-15 11:11:58 -0800170 if (ret != BLK_MQ_RQ_QUEUE_OK)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200171 return ret;
172
173 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
174 iod->req.port = nvmet_loop_port;
175 if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
176 &queue->nvme_sq, &nvme_loop_ops)) {
177 nvme_cleanup_cmd(req);
178 blk_mq_start_request(req);
179 nvme_loop_queue_response(&iod->req);
Omar Sandovalbac00002016-11-15 11:11:58 -0800180 return BLK_MQ_RQ_QUEUE_OK;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200181 }
182
183 if (blk_rq_bytes(req)) {
184 iod->sg_table.sgl = iod->first_sgl;
185 ret = sg_alloc_table_chained(&iod->sg_table,
Christoph Hellwigf9d03f92016-12-08 15:20:32 -0700186 blk_rq_nr_phys_segments(req),
187 iod->sg_table.sgl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200188 if (ret)
189 return BLK_MQ_RQ_QUEUE_BUSY;
190
191 iod->req.sg = iod->sg_table.sgl;
192 iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200193 }
194
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200195 blk_mq_start_request(req);
196
197 schedule_work(&iod->work);
Omar Sandovalbac00002016-11-15 11:11:58 -0800198 return BLK_MQ_RQ_QUEUE_OK;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200199}
200
201static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
202{
203 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
204 struct nvme_loop_queue *queue = &ctrl->queues[0];
205 struct nvme_loop_iod *iod = &ctrl->async_event_iod;
206
207 memset(&iod->cmd, 0, sizeof(iod->cmd));
208 iod->cmd.common.opcode = nvme_admin_async_event;
209 iod->cmd.common.command_id = NVME_LOOP_AQ_BLKMQ_DEPTH;
210 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
211
212 if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
213 &nvme_loop_ops)) {
214 dev_err(ctrl->ctrl.device, "failed async event work\n");
215 return;
216 }
217
218 schedule_work(&iod->work);
219}
220
221static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
222 struct nvme_loop_iod *iod, unsigned int queue_idx)
223{
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200224 iod->req.cmd = &iod->cmd;
225 iod->req.rsp = &iod->rsp;
226 iod->queue = &ctrl->queues[queue_idx];
227 INIT_WORK(&iod->work, nvme_loop_execute_work);
228 return 0;
229}
230
231static int nvme_loop_init_request(void *data, struct request *req,
232 unsigned int hctx_idx, unsigned int rq_idx,
233 unsigned int numa_node)
234{
235 return nvme_loop_init_iod(data, blk_mq_rq_to_pdu(req), hctx_idx + 1);
236}
237
238static int nvme_loop_init_admin_request(void *data, struct request *req,
239 unsigned int hctx_idx, unsigned int rq_idx,
240 unsigned int numa_node)
241{
242 return nvme_loop_init_iod(data, blk_mq_rq_to_pdu(req), 0);
243}
244
245static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
246 unsigned int hctx_idx)
247{
248 struct nvme_loop_ctrl *ctrl = data;
249 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
250
251 BUG_ON(hctx_idx >= ctrl->queue_count);
252
253 hctx->driver_data = queue;
254 return 0;
255}
256
257static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
258 unsigned int hctx_idx)
259{
260 struct nvme_loop_ctrl *ctrl = data;
261 struct nvme_loop_queue *queue = &ctrl->queues[0];
262
263 BUG_ON(hctx_idx != 0);
264
265 hctx->driver_data = queue;
266 return 0;
267}
268
Eric Biggersf363b082017-03-30 13:39:16 -0700269static const struct blk_mq_ops nvme_loop_mq_ops = {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200270 .queue_rq = nvme_loop_queue_rq,
271 .complete = nvme_loop_complete_rq,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200272 .init_request = nvme_loop_init_request,
273 .init_hctx = nvme_loop_init_hctx,
274 .timeout = nvme_loop_timeout,
275};
276
Eric Biggersf363b082017-03-30 13:39:16 -0700277static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200278 .queue_rq = nvme_loop_queue_rq,
279 .complete = nvme_loop_complete_rq,
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200280 .init_request = nvme_loop_init_admin_request,
281 .init_hctx = nvme_loop_init_admin_hctx,
282 .timeout = nvme_loop_timeout,
283};
284
285static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
286{
Sagi Grimbergd4769832017-02-27 18:44:45 +0200287 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200288 blk_cleanup_queue(ctrl->ctrl.admin_q);
289 blk_mq_free_tag_set(&ctrl->admin_tag_set);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200290}
291
292static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
293{
294 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
295
296 if (list_empty(&ctrl->list))
297 goto free_ctrl;
298
299 mutex_lock(&nvme_loop_ctrl_mutex);
300 list_del(&ctrl->list);
301 mutex_unlock(&nvme_loop_ctrl_mutex);
302
303 if (nctrl->tagset) {
304 blk_cleanup_queue(ctrl->ctrl.connect_q);
305 blk_mq_free_tag_set(&ctrl->tag_set);
306 }
307 kfree(ctrl->queues);
308 nvmf_free_options(nctrl->opts);
309free_ctrl:
310 kfree(ctrl);
311}
312
Sagi Grimberg6ecda702017-03-13 13:27:51 +0200313static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
314{
315 int i;
316
317 for (i = 1; i < ctrl->queue_count; i++)
318 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
319}
320
321static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
322{
323 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
324 unsigned int nr_io_queues;
325 int ret, i;
326
327 nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
328 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
329 if (ret || !nr_io_queues)
330 return ret;
331
332 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
333
334 for (i = 1; i <= nr_io_queues; i++) {
335 ctrl->queues[i].ctrl = ctrl;
336 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
337 if (ret)
338 goto out_destroy_queues;
339
340 ctrl->queue_count++;
341 }
342
343 return 0;
344
345out_destroy_queues:
346 nvme_loop_destroy_io_queues(ctrl);
347 return ret;
348}
349
Sagi Grimberg297186d2017-03-13 15:43:44 +0200350static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
351{
352 int i, ret;
353
354 for (i = 1; i < ctrl->queue_count; i++) {
355 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
356 if (ret)
357 return ret;
358 }
359
360 return 0;
361}
362
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200363static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
364{
365 int error;
366
367 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
368 ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
369 ctrl->admin_tag_set.queue_depth = NVME_LOOP_AQ_BLKMQ_DEPTH;
370 ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
371 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
372 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
373 SG_CHUNK_SIZE * sizeof(struct scatterlist);
374 ctrl->admin_tag_set.driver_data = ctrl;
375 ctrl->admin_tag_set.nr_hw_queues = 1;
376 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
377
378 ctrl->queues[0].ctrl = ctrl;
379 error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
380 if (error)
381 return error;
382 ctrl->queue_count = 1;
383
384 error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
385 if (error)
386 goto out_free_sq;
387
388 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
389 if (IS_ERR(ctrl->ctrl.admin_q)) {
390 error = PTR_ERR(ctrl->ctrl.admin_q);
391 goto out_free_tagset;
392 }
393
394 error = nvmf_connect_admin_queue(&ctrl->ctrl);
395 if (error)
396 goto out_cleanup_queue;
397
398 error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
399 if (error) {
400 dev_err(ctrl->ctrl.device,
401 "prop_get NVME_REG_CAP failed\n");
402 goto out_cleanup_queue;
403 }
404
405 ctrl->ctrl.sqsize =
406 min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
407
408 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
409 if (error)
410 goto out_cleanup_queue;
411
412 ctrl->ctrl.max_hw_sectors =
413 (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
414
415 error = nvme_init_identify(&ctrl->ctrl);
416 if (error)
417 goto out_cleanup_queue;
418
419 nvme_start_keep_alive(&ctrl->ctrl);
420
421 return 0;
422
423out_cleanup_queue:
424 blk_cleanup_queue(ctrl->ctrl.admin_q);
425out_free_tagset:
426 blk_mq_free_tag_set(&ctrl->admin_tag_set);
427out_free_sq:
428 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
429 return error;
430}
431
432static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
433{
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200434 nvme_stop_keep_alive(&ctrl->ctrl);
435
436 if (ctrl->queue_count > 1) {
437 nvme_stop_queues(&ctrl->ctrl);
438 blk_mq_tagset_busy_iter(&ctrl->tag_set,
439 nvme_cancel_request, &ctrl->ctrl);
Sagi Grimberg6ecda702017-03-13 13:27:51 +0200440 nvme_loop_destroy_io_queues(ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200441 }
442
443 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
444 nvme_shutdown_ctrl(&ctrl->ctrl);
445
446 blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
447 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
448 nvme_cancel_request, &ctrl->ctrl);
449 nvme_loop_destroy_admin_queue(ctrl);
450}
451
452static void nvme_loop_del_ctrl_work(struct work_struct *work)
453{
454 struct nvme_loop_ctrl *ctrl = container_of(work,
455 struct nvme_loop_ctrl, delete_work);
456
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200457 nvme_uninit_ctrl(&ctrl->ctrl);
Sagi Grimberga159c642016-07-24 09:32:08 +0300458 nvme_loop_shutdown_ctrl(ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200459 nvme_put_ctrl(&ctrl->ctrl);
460}
461
462static int __nvme_loop_del_ctrl(struct nvme_loop_ctrl *ctrl)
463{
464 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
465 return -EBUSY;
466
467 if (!schedule_work(&ctrl->delete_work))
468 return -EBUSY;
469
470 return 0;
471}
472
473static int nvme_loop_del_ctrl(struct nvme_ctrl *nctrl)
474{
475 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
476 int ret;
477
478 ret = __nvme_loop_del_ctrl(ctrl);
479 if (ret)
480 return ret;
481
482 flush_work(&ctrl->delete_work);
483
484 return 0;
485}
486
487static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
488{
489 struct nvme_loop_ctrl *ctrl;
490
491 mutex_lock(&nvme_loop_ctrl_mutex);
492 list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
493 if (ctrl->ctrl.cntlid == nctrl->cntlid)
494 __nvme_loop_del_ctrl(ctrl);
495 }
496 mutex_unlock(&nvme_loop_ctrl_mutex);
497}
498
499static void nvme_loop_reset_ctrl_work(struct work_struct *work)
500{
501 struct nvme_loop_ctrl *ctrl = container_of(work,
502 struct nvme_loop_ctrl, reset_work);
503 bool changed;
Sagi Grimberg297186d2017-03-13 15:43:44 +0200504 int ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200505
506 nvme_loop_shutdown_ctrl(ctrl);
507
508 ret = nvme_loop_configure_admin_queue(ctrl);
509 if (ret)
510 goto out_disable;
511
Sagi Grimberg6ecda702017-03-13 13:27:51 +0200512 ret = nvme_loop_init_io_queues(ctrl);
513 if (ret)
514 goto out_destroy_admin;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200515
Sagi Grimberg297186d2017-03-13 15:43:44 +0200516 ret = nvme_loop_connect_io_queues(ctrl);
517 if (ret)
518 goto out_destroy_io;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200519
520 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
521 WARN_ON_ONCE(!changed);
522
523 nvme_queue_scan(&ctrl->ctrl);
524 nvme_queue_async_events(&ctrl->ctrl);
525
526 nvme_start_queues(&ctrl->ctrl);
527
528 return;
529
Sagi Grimberg6ecda702017-03-13 13:27:51 +0200530out_destroy_io:
531 nvme_loop_destroy_io_queues(ctrl);
532out_destroy_admin:
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200533 nvme_loop_destroy_admin_queue(ctrl);
534out_disable:
535 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200536 nvme_uninit_ctrl(&ctrl->ctrl);
537 nvme_put_ctrl(&ctrl->ctrl);
538}
539
540static int nvme_loop_reset_ctrl(struct nvme_ctrl *nctrl)
541{
542 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
543
544 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
545 return -EBUSY;
546
547 if (!schedule_work(&ctrl->reset_work))
548 return -EBUSY;
549
550 flush_work(&ctrl->reset_work);
551
552 return 0;
553}
554
555static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
556 .name = "loop",
557 .module = THIS_MODULE,
558 .is_fabrics = true,
559 .reg_read32 = nvmf_reg_read32,
560 .reg_read64 = nvmf_reg_read64,
561 .reg_write32 = nvmf_reg_write32,
562 .reset_ctrl = nvme_loop_reset_ctrl,
563 .free_ctrl = nvme_loop_free_ctrl,
564 .submit_async_event = nvme_loop_submit_async_event,
565 .delete_ctrl = nvme_loop_del_ctrl,
566 .get_subsysnqn = nvmf_get_subsysnqn,
567};
568
569static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
570{
Sagi Grimberg297186d2017-03-13 15:43:44 +0200571 int ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200572
Sagi Grimberg6ecda702017-03-13 13:27:51 +0200573 ret = nvme_loop_init_io_queues(ctrl);
574 if (ret)
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200575 return ret;
576
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200577 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
578 ctrl->tag_set.ops = &nvme_loop_mq_ops;
Jay Freyenseeeadb7cf2016-08-17 15:00:28 -0700579 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200580 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
581 ctrl->tag_set.numa_node = NUMA_NO_NODE;
582 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
583 ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
584 SG_CHUNK_SIZE * sizeof(struct scatterlist);
585 ctrl->tag_set.driver_data = ctrl;
586 ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
587 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
588 ctrl->ctrl.tagset = &ctrl->tag_set;
589
590 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
591 if (ret)
592 goto out_destroy_queues;
593
594 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
595 if (IS_ERR(ctrl->ctrl.connect_q)) {
596 ret = PTR_ERR(ctrl->ctrl.connect_q);
597 goto out_free_tagset;
598 }
599
Sagi Grimberg297186d2017-03-13 15:43:44 +0200600 ret = nvme_loop_connect_io_queues(ctrl);
601 if (ret)
602 goto out_cleanup_connect_q;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200603
604 return 0;
605
606out_cleanup_connect_q:
607 blk_cleanup_queue(ctrl->ctrl.connect_q);
608out_free_tagset:
609 blk_mq_free_tag_set(&ctrl->tag_set);
610out_destroy_queues:
Sagi Grimberg6ecda702017-03-13 13:27:51 +0200611 nvme_loop_destroy_io_queues(ctrl);
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200612 return ret;
613}
614
615static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
616 struct nvmf_ctrl_options *opts)
617{
618 struct nvme_loop_ctrl *ctrl;
619 bool changed;
620 int ret;
621
622 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
623 if (!ctrl)
624 return ERR_PTR(-ENOMEM);
625 ctrl->ctrl.opts = opts;
626 INIT_LIST_HEAD(&ctrl->list);
627
628 INIT_WORK(&ctrl->delete_work, nvme_loop_del_ctrl_work);
629 INIT_WORK(&ctrl->reset_work, nvme_loop_reset_ctrl_work);
630
631 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
632 0 /* no quirks, we're perfect! */);
633 if (ret)
634 goto out_put_ctrl;
635
636 spin_lock_init(&ctrl->lock);
637
638 ret = -ENOMEM;
639
Jay Freyenseeeadb7cf2016-08-17 15:00:28 -0700640 ctrl->ctrl.sqsize = opts->queue_size - 1;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200641 ctrl->ctrl.kato = opts->kato;
642
643 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
644 GFP_KERNEL);
645 if (!ctrl->queues)
646 goto out_uninit_ctrl;
647
648 ret = nvme_loop_configure_admin_queue(ctrl);
649 if (ret)
650 goto out_free_queues;
651
652 if (opts->queue_size > ctrl->ctrl.maxcmd) {
653 /* warn if maxcmd is lower than queue_size */
654 dev_warn(ctrl->ctrl.device,
655 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
656 opts->queue_size, ctrl->ctrl.maxcmd);
657 opts->queue_size = ctrl->ctrl.maxcmd;
658 }
659
660 if (opts->nr_io_queues) {
661 ret = nvme_loop_create_io_queues(ctrl);
662 if (ret)
663 goto out_remove_admin_queue;
664 }
665
666 nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
667
668 dev_info(ctrl->ctrl.device,
669 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
670
671 kref_get(&ctrl->ctrl.kref);
672
673 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
674 WARN_ON_ONCE(!changed);
675
676 mutex_lock(&nvme_loop_ctrl_mutex);
677 list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
678 mutex_unlock(&nvme_loop_ctrl_mutex);
679
680 if (opts->nr_io_queues) {
681 nvme_queue_scan(&ctrl->ctrl);
682 nvme_queue_async_events(&ctrl->ctrl);
683 }
684
685 return &ctrl->ctrl;
686
687out_remove_admin_queue:
688 nvme_loop_destroy_admin_queue(ctrl);
689out_free_queues:
690 kfree(ctrl->queues);
691out_uninit_ctrl:
692 nvme_uninit_ctrl(&ctrl->ctrl);
693out_put_ctrl:
694 nvme_put_ctrl(&ctrl->ctrl);
695 if (ret > 0)
696 ret = -EIO;
697 return ERR_PTR(ret);
698}
699
700static int nvme_loop_add_port(struct nvmet_port *port)
701{
702 /*
703 * XXX: disalow adding more than one port so
704 * there is no connection rejections when a
705 * a subsystem is assigned to a port for which
706 * loop doesn't have a pointer.
707 * This scenario would be possible if we allowed
708 * more than one port to be added and a subsystem
709 * was assigned to a port other than nvmet_loop_port.
710 */
711
712 if (nvmet_loop_port)
713 return -EPERM;
714
715 nvmet_loop_port = port;
716 return 0;
717}
718
719static void nvme_loop_remove_port(struct nvmet_port *port)
720{
721 if (port == nvmet_loop_port)
722 nvmet_loop_port = NULL;
723}
724
725static struct nvmet_fabrics_ops nvme_loop_ops = {
726 .owner = THIS_MODULE,
727 .type = NVMF_TRTYPE_LOOP,
728 .add_port = nvme_loop_add_port,
729 .remove_port = nvme_loop_remove_port,
730 .queue_response = nvme_loop_queue_response,
731 .delete_ctrl = nvme_loop_delete_ctrl,
732};
733
734static struct nvmf_transport_ops nvme_loop_transport = {
735 .name = "loop",
736 .create_ctrl = nvme_loop_create_ctrl,
737};
738
739static int __init nvme_loop_init_module(void)
740{
741 int ret;
742
743 ret = nvmet_register_transport(&nvme_loop_ops);
744 if (ret)
745 return ret;
Sagi Grimbergd19eef02017-03-19 06:26:28 +0200746
747 ret = nvmf_register_transport(&nvme_loop_transport);
748 if (ret)
749 nvmet_unregister_transport(&nvme_loop_ops);
750
751 return ret;
Christoph Hellwig3a85a5d2016-06-21 18:04:21 +0200752}
753
754static void __exit nvme_loop_cleanup_module(void)
755{
756 struct nvme_loop_ctrl *ctrl, *next;
757
758 nvmf_unregister_transport(&nvme_loop_transport);
759 nvmet_unregister_transport(&nvme_loop_ops);
760
761 mutex_lock(&nvme_loop_ctrl_mutex);
762 list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
763 __nvme_loop_del_ctrl(ctrl);
764 mutex_unlock(&nvme_loop_ctrl_mutex);
765
766 flush_scheduled_work();
767}
768
769module_init(nvme_loop_init_module);
770module_exit(nvme_loop_cleanup_module);
771
772MODULE_LICENSE("GPL v2");
773MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */