blob: c03460d0ca94bd7f0a20b9e2e4832a6f505748bc [file] [log] [blame]
Christoph Hellwig71102302016-07-06 21:55:52 +09001/*
2 * NVMe over Fabrics RDMA host code.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Christoph Hellwig71102302016-07-06 21:55:52 +090015#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/slab.h>
18#include <linux/err.h>
19#include <linux/string.h>
Christoph Hellwig71102302016-07-06 21:55:52 +090020#include <linux/atomic.h>
21#include <linux/blk-mq.h>
Sagi Grimberg0b366582017-07-13 11:09:44 +030022#include <linux/blk-mq-rdma.h>
Christoph Hellwig71102302016-07-06 21:55:52 +090023#include <linux/types.h>
24#include <linux/list.h>
25#include <linux/mutex.h>
26#include <linux/scatterlist.h>
27#include <linux/nvme.h>
Christoph Hellwig71102302016-07-06 21:55:52 +090028#include <asm/unaligned.h>
29
30#include <rdma/ib_verbs.h>
31#include <rdma/rdma_cm.h>
Christoph Hellwig71102302016-07-06 21:55:52 +090032#include <linux/nvme-rdma.h>
33
34#include "nvme.h"
35#include "fabrics.h"
36
37
Sagi Grimberg782d8202017-03-21 16:32:38 +020038#define NVME_RDMA_CONNECT_TIMEOUT_MS 3000 /* 3 second */
Christoph Hellwig71102302016-07-06 21:55:52 +090039
Christoph Hellwig71102302016-07-06 21:55:52 +090040#define NVME_RDMA_MAX_SEGMENTS 256
41
42#define NVME_RDMA_MAX_INLINE_SEGMENTS 1
43
Christoph Hellwig71102302016-07-06 21:55:52 +090044struct nvme_rdma_device {
Max Gurtovoyf87c89a2017-10-23 12:59:27 +030045 struct ib_device *dev;
46 struct ib_pd *pd;
Christoph Hellwig71102302016-07-06 21:55:52 +090047 struct kref ref;
48 struct list_head entry;
49};
50
51struct nvme_rdma_qe {
52 struct ib_cqe cqe;
53 void *data;
54 u64 dma;
55};
56
57struct nvme_rdma_queue;
58struct nvme_rdma_request {
Christoph Hellwigd49187e2016-11-10 07:32:33 -080059 struct nvme_request req;
Christoph Hellwig71102302016-07-06 21:55:52 +090060 struct ib_mr *mr;
61 struct nvme_rdma_qe sqe;
Sagi Grimberg4af7f7f2017-11-23 17:35:22 +020062 union nvme_result result;
63 __le16 status;
64 refcount_t ref;
Christoph Hellwig71102302016-07-06 21:55:52 +090065 struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
66 u32 num_sge;
67 int nents;
68 bool inline_data;
Christoph Hellwig71102302016-07-06 21:55:52 +090069 struct ib_reg_wr reg_wr;
70 struct ib_cqe reg_cqe;
71 struct nvme_rdma_queue *queue;
72 struct sg_table sg_table;
73 struct scatterlist first_sgl[];
74};
75
76enum nvme_rdma_queue_flags {
Sagi Grimberg5013e982017-10-11 15:29:12 +030077 NVME_RDMA_Q_ALLOCATED = 0,
78 NVME_RDMA_Q_LIVE = 1,
Christoph Hellwig71102302016-07-06 21:55:52 +090079};
80
81struct nvme_rdma_queue {
82 struct nvme_rdma_qe *rsp_ring;
Christoph Hellwig71102302016-07-06 21:55:52 +090083 int queue_size;
84 size_t cmnd_capsule_len;
85 struct nvme_rdma_ctrl *ctrl;
86 struct nvme_rdma_device *device;
87 struct ib_cq *ib_cq;
88 struct ib_qp *qp;
89
90 unsigned long flags;
91 struct rdma_cm_id *cm_id;
92 int cm_error;
93 struct completion cm_done;
94};
95
96struct nvme_rdma_ctrl {
Christoph Hellwig71102302016-07-06 21:55:52 +090097 /* read only in the hot path */
98 struct nvme_rdma_queue *queues;
Christoph Hellwig71102302016-07-06 21:55:52 +090099
100 /* other member variables */
Christoph Hellwig71102302016-07-06 21:55:52 +0900101 struct blk_mq_tag_set tag_set;
Christoph Hellwig71102302016-07-06 21:55:52 +0900102 struct work_struct err_work;
103
104 struct nvme_rdma_qe async_event_sqe;
105
Christoph Hellwig71102302016-07-06 21:55:52 +0900106 struct delayed_work reconnect_work;
107
108 struct list_head list;
109
110 struct blk_mq_tag_set admin_tag_set;
111 struct nvme_rdma_device *device;
112
Christoph Hellwig71102302016-07-06 21:55:52 +0900113 u32 max_fr_pages;
114
Sagi Grimberg0928f9b2017-02-05 21:49:32 +0200115 struct sockaddr_storage addr;
116 struct sockaddr_storage src_addr;
Christoph Hellwig71102302016-07-06 21:55:52 +0900117
118 struct nvme_ctrl ctrl;
119};
120
121static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl)
122{
123 return container_of(ctrl, struct nvme_rdma_ctrl, ctrl);
124}
125
126static LIST_HEAD(device_list);
127static DEFINE_MUTEX(device_list_mutex);
128
129static LIST_HEAD(nvme_rdma_ctrl_list);
130static DEFINE_MUTEX(nvme_rdma_ctrl_mutex);
131
Christoph Hellwig71102302016-07-06 21:55:52 +0900132/*
133 * Disabling this option makes small I/O goes faster, but is fundamentally
134 * unsafe. With it turned off we will have to register a global rkey that
135 * allows read and write access to all physical memory.
136 */
137static bool register_always = true;
138module_param(register_always, bool, 0444);
139MODULE_PARM_DESC(register_always,
140 "Use memory registration even for contiguous memory regions");
141
142static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
143 struct rdma_cm_event *event);
144static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
Christoph Hellwig71102302016-07-06 21:55:52 +0900145
Sagi Grimberg90af3512017-07-10 09:22:28 +0300146static const struct blk_mq_ops nvme_rdma_mq_ops;
147static const struct blk_mq_ops nvme_rdma_admin_mq_ops;
148
Christoph Hellwig71102302016-07-06 21:55:52 +0900149/* XXX: really should move to a generic header sooner or later.. */
150static inline void put_unaligned_le24(u32 val, u8 *p)
151{
152 *p++ = val;
153 *p++ = val >> 8;
154 *p++ = val >> 16;
155}
156
157static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue)
158{
159 return queue - queue->ctrl->queues;
160}
161
162static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue)
163{
164 return queue->cmnd_capsule_len - sizeof(struct nvme_command);
165}
166
167static void nvme_rdma_free_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
168 size_t capsule_size, enum dma_data_direction dir)
169{
170 ib_dma_unmap_single(ibdev, qe->dma, capsule_size, dir);
171 kfree(qe->data);
172}
173
174static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
175 size_t capsule_size, enum dma_data_direction dir)
176{
177 qe->data = kzalloc(capsule_size, GFP_KERNEL);
178 if (!qe->data)
179 return -ENOMEM;
180
181 qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir);
182 if (ib_dma_mapping_error(ibdev, qe->dma)) {
183 kfree(qe->data);
184 return -ENOMEM;
185 }
186
187 return 0;
188}
189
190static void nvme_rdma_free_ring(struct ib_device *ibdev,
191 struct nvme_rdma_qe *ring, size_t ib_queue_size,
192 size_t capsule_size, enum dma_data_direction dir)
193{
194 int i;
195
196 for (i = 0; i < ib_queue_size; i++)
197 nvme_rdma_free_qe(ibdev, &ring[i], capsule_size, dir);
198 kfree(ring);
199}
200
201static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev,
202 size_t ib_queue_size, size_t capsule_size,
203 enum dma_data_direction dir)
204{
205 struct nvme_rdma_qe *ring;
206 int i;
207
208 ring = kcalloc(ib_queue_size, sizeof(struct nvme_rdma_qe), GFP_KERNEL);
209 if (!ring)
210 return NULL;
211
212 for (i = 0; i < ib_queue_size; i++) {
213 if (nvme_rdma_alloc_qe(ibdev, &ring[i], capsule_size, dir))
214 goto out_free_ring;
215 }
216
217 return ring;
218
219out_free_ring:
220 nvme_rdma_free_ring(ibdev, ring, i, capsule_size, dir);
221 return NULL;
222}
223
224static void nvme_rdma_qp_event(struct ib_event *event, void *context)
225{
Max Gurtovoy27a4bee2016-11-23 11:38:48 +0200226 pr_debug("QP event %s (%d)\n",
227 ib_event_msg(event->event), event->event);
228
Christoph Hellwig71102302016-07-06 21:55:52 +0900229}
230
231static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue)
232{
233 wait_for_completion_interruptible_timeout(&queue->cm_done,
234 msecs_to_jiffies(NVME_RDMA_CONNECT_TIMEOUT_MS) + 1);
235 return queue->cm_error;
236}
237
238static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
239{
240 struct nvme_rdma_device *dev = queue->device;
241 struct ib_qp_init_attr init_attr;
242 int ret;
243
244 memset(&init_attr, 0, sizeof(init_attr));
245 init_attr.event_handler = nvme_rdma_qp_event;
246 /* +1 for drain */
247 init_attr.cap.max_send_wr = factor * queue->queue_size + 1;
248 /* +1 for drain */
249 init_attr.cap.max_recv_wr = queue->queue_size + 1;
250 init_attr.cap.max_recv_sge = 1;
251 init_attr.cap.max_send_sge = 1 + NVME_RDMA_MAX_INLINE_SEGMENTS;
252 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
253 init_attr.qp_type = IB_QPT_RC;
254 init_attr.send_cq = queue->ib_cq;
255 init_attr.recv_cq = queue->ib_cq;
256
257 ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr);
258
259 queue->qp = queue->cm_id->qp;
260 return ret;
261}
262
263static int nvme_rdma_reinit_request(void *data, struct request *rq)
264{
265 struct nvme_rdma_ctrl *ctrl = data;
266 struct nvme_rdma_device *dev = ctrl->device;
267 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
268 int ret = 0;
269
Sagi Grimberg0fc176d2017-10-11 15:29:09 +0300270 if (WARN_ON_ONCE(!req->mr))
271 return 0;
272
Christoph Hellwig71102302016-07-06 21:55:52 +0900273 ib_dereg_mr(req->mr);
274
275 req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
276 ctrl->max_fr_pages);
277 if (IS_ERR(req->mr)) {
Christoph Hellwig71102302016-07-06 21:55:52 +0900278 ret = PTR_ERR(req->mr);
Wei Yongjun458a9632016-07-12 11:06:17 +0000279 req->mr = NULL;
Colin Ian King1bda18d2016-09-05 16:24:38 +0100280 goto out;
Christoph Hellwig71102302016-07-06 21:55:52 +0900281 }
282
Sagi Grimbergf5b7b552016-08-24 12:25:56 +0300283 req->mr->need_inval = false;
Christoph Hellwig71102302016-07-06 21:55:52 +0900284
285out:
286 return ret;
287}
288
Christoph Hellwig385475e2017-06-13 09:15:19 +0200289static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
290 struct request *rq, unsigned int hctx_idx)
Christoph Hellwig71102302016-07-06 21:55:52 +0900291{
Christoph Hellwig385475e2017-06-13 09:15:19 +0200292 struct nvme_rdma_ctrl *ctrl = set->driver_data;
Christoph Hellwig71102302016-07-06 21:55:52 +0900293 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
Christoph Hellwig385475e2017-06-13 09:15:19 +0200294 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
Christoph Hellwig71102302016-07-06 21:55:52 +0900295 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
296 struct nvme_rdma_device *dev = queue->device;
297
298 if (req->mr)
299 ib_dereg_mr(req->mr);
300
301 nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command),
302 DMA_TO_DEVICE);
303}
304
Christoph Hellwig385475e2017-06-13 09:15:19 +0200305static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
306 struct request *rq, unsigned int hctx_idx,
307 unsigned int numa_node)
Christoph Hellwig71102302016-07-06 21:55:52 +0900308{
Christoph Hellwig385475e2017-06-13 09:15:19 +0200309 struct nvme_rdma_ctrl *ctrl = set->driver_data;
Christoph Hellwig71102302016-07-06 21:55:52 +0900310 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
Christoph Hellwig385475e2017-06-13 09:15:19 +0200311 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
Christoph Hellwig71102302016-07-06 21:55:52 +0900312 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
313 struct nvme_rdma_device *dev = queue->device;
314 struct ib_device *ibdev = dev->dev;
315 int ret;
316
Christoph Hellwig71102302016-07-06 21:55:52 +0900317 ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command),
318 DMA_TO_DEVICE);
319 if (ret)
320 return ret;
321
322 req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
323 ctrl->max_fr_pages);
324 if (IS_ERR(req->mr)) {
325 ret = PTR_ERR(req->mr);
326 goto out_free_qe;
327 }
328
329 req->queue = queue;
330
331 return 0;
332
333out_free_qe:
334 nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command),
335 DMA_TO_DEVICE);
336 return -ENOMEM;
337}
338
Christoph Hellwig71102302016-07-06 21:55:52 +0900339static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
340 unsigned int hctx_idx)
341{
342 struct nvme_rdma_ctrl *ctrl = data;
343 struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1];
344
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300345 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
Christoph Hellwig71102302016-07-06 21:55:52 +0900346
347 hctx->driver_data = queue;
348 return 0;
349}
350
351static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
352 unsigned int hctx_idx)
353{
354 struct nvme_rdma_ctrl *ctrl = data;
355 struct nvme_rdma_queue *queue = &ctrl->queues[0];
356
357 BUG_ON(hctx_idx != 0);
358
359 hctx->driver_data = queue;
360 return 0;
361}
362
363static void nvme_rdma_free_dev(struct kref *ref)
364{
365 struct nvme_rdma_device *ndev =
366 container_of(ref, struct nvme_rdma_device, ref);
367
368 mutex_lock(&device_list_mutex);
369 list_del(&ndev->entry);
370 mutex_unlock(&device_list_mutex);
371
Christoph Hellwig71102302016-07-06 21:55:52 +0900372 ib_dealloc_pd(ndev->pd);
Christoph Hellwig71102302016-07-06 21:55:52 +0900373 kfree(ndev);
374}
375
376static void nvme_rdma_dev_put(struct nvme_rdma_device *dev)
377{
378 kref_put(&dev->ref, nvme_rdma_free_dev);
379}
380
381static int nvme_rdma_dev_get(struct nvme_rdma_device *dev)
382{
383 return kref_get_unless_zero(&dev->ref);
384}
385
386static struct nvme_rdma_device *
387nvme_rdma_find_get_device(struct rdma_cm_id *cm_id)
388{
389 struct nvme_rdma_device *ndev;
390
391 mutex_lock(&device_list_mutex);
392 list_for_each_entry(ndev, &device_list, entry) {
393 if (ndev->dev->node_guid == cm_id->device->node_guid &&
394 nvme_rdma_dev_get(ndev))
395 goto out_unlock;
396 }
397
398 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
399 if (!ndev)
400 goto out_err;
401
402 ndev->dev = cm_id->device;
403 kref_init(&ndev->ref);
404
Christoph Hellwig11975e02016-09-05 12:56:20 +0200405 ndev->pd = ib_alloc_pd(ndev->dev,
406 register_always ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY);
Christoph Hellwig71102302016-07-06 21:55:52 +0900407 if (IS_ERR(ndev->pd))
408 goto out_free_dev;
409
Christoph Hellwig71102302016-07-06 21:55:52 +0900410 if (!(ndev->dev->attrs.device_cap_flags &
411 IB_DEVICE_MEM_MGT_EXTENSIONS)) {
412 dev_err(&ndev->dev->dev,
413 "Memory registrations not supported.\n");
Christoph Hellwig11975e02016-09-05 12:56:20 +0200414 goto out_free_pd;
Christoph Hellwig71102302016-07-06 21:55:52 +0900415 }
416
417 list_add(&ndev->entry, &device_list);
418out_unlock:
419 mutex_unlock(&device_list_mutex);
420 return ndev;
421
Christoph Hellwig71102302016-07-06 21:55:52 +0900422out_free_pd:
423 ib_dealloc_pd(ndev->pd);
424out_free_dev:
425 kfree(ndev);
426out_err:
427 mutex_unlock(&device_list_mutex);
428 return NULL;
429}
430
431static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
432{
Sagi Grimberg0c5b43b2017-10-11 15:29:08 +0300433 struct nvme_rdma_device *dev = queue->device;
434 struct ib_device *ibdev = dev->dev;
Christoph Hellwig71102302016-07-06 21:55:52 +0900435
436 rdma_destroy_qp(queue->cm_id);
437 ib_free_cq(queue->ib_cq);
438
439 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
440 sizeof(struct nvme_completion), DMA_FROM_DEVICE);
441
442 nvme_rdma_dev_put(dev);
443}
444
Sagi Grimbergca6e95b2017-05-04 13:33:09 +0300445static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
Christoph Hellwig71102302016-07-06 21:55:52 +0900446{
Sagi Grimbergca6e95b2017-05-04 13:33:09 +0300447 struct ib_device *ibdev;
Christoph Hellwig71102302016-07-06 21:55:52 +0900448 const int send_wr_factor = 3; /* MR, SEND, INV */
449 const int cq_factor = send_wr_factor + 1; /* + RECV */
450 int comp_vector, idx = nvme_rdma_queue_idx(queue);
Christoph Hellwig71102302016-07-06 21:55:52 +0900451 int ret;
452
Sagi Grimbergca6e95b2017-05-04 13:33:09 +0300453 queue->device = nvme_rdma_find_get_device(queue->cm_id);
454 if (!queue->device) {
455 dev_err(queue->cm_id->device->dev.parent,
456 "no client data found!\n");
457 return -ECONNREFUSED;
458 }
459 ibdev = queue->device->dev;
Christoph Hellwig71102302016-07-06 21:55:52 +0900460
461 /*
Sagi Grimberg0b366582017-07-13 11:09:44 +0300462 * Spread I/O queues completion vectors according their queue index.
463 * Admin queues can always go on completion vector 0.
Christoph Hellwig71102302016-07-06 21:55:52 +0900464 */
Sagi Grimberg0b366582017-07-13 11:09:44 +0300465 comp_vector = idx == 0 ? idx : idx - 1;
Christoph Hellwig71102302016-07-06 21:55:52 +0900466
467 /* +1 for ib_stop_cq */
Sagi Grimbergca6e95b2017-05-04 13:33:09 +0300468 queue->ib_cq = ib_alloc_cq(ibdev, queue,
469 cq_factor * queue->queue_size + 1,
470 comp_vector, IB_POLL_SOFTIRQ);
Christoph Hellwig71102302016-07-06 21:55:52 +0900471 if (IS_ERR(queue->ib_cq)) {
472 ret = PTR_ERR(queue->ib_cq);
Sagi Grimbergca6e95b2017-05-04 13:33:09 +0300473 goto out_put_dev;
Christoph Hellwig71102302016-07-06 21:55:52 +0900474 }
475
476 ret = nvme_rdma_create_qp(queue, send_wr_factor);
477 if (ret)
478 goto out_destroy_ib_cq;
479
480 queue->rsp_ring = nvme_rdma_alloc_ring(ibdev, queue->queue_size,
481 sizeof(struct nvme_completion), DMA_FROM_DEVICE);
482 if (!queue->rsp_ring) {
483 ret = -ENOMEM;
484 goto out_destroy_qp;
485 }
486
487 return 0;
488
489out_destroy_qp:
Max Gurtovoy1f61def2017-11-06 16:18:51 +0200490 rdma_destroy_qp(queue->cm_id);
Christoph Hellwig71102302016-07-06 21:55:52 +0900491out_destroy_ib_cq:
492 ib_free_cq(queue->ib_cq);
Sagi Grimbergca6e95b2017-05-04 13:33:09 +0300493out_put_dev:
494 nvme_rdma_dev_put(queue->device);
Christoph Hellwig71102302016-07-06 21:55:52 +0900495 return ret;
496}
497
Sagi Grimberg41e8cfa2017-07-10 09:22:36 +0300498static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
Christoph Hellwig71102302016-07-06 21:55:52 +0900499 int idx, size_t queue_size)
500{
501 struct nvme_rdma_queue *queue;
Max Gurtovoy8f4e8da2017-02-19 20:08:03 +0200502 struct sockaddr *src_addr = NULL;
Christoph Hellwig71102302016-07-06 21:55:52 +0900503 int ret;
504
505 queue = &ctrl->queues[idx];
506 queue->ctrl = ctrl;
507 init_completion(&queue->cm_done);
508
509 if (idx > 0)
510 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
511 else
512 queue->cmnd_capsule_len = sizeof(struct nvme_command);
513
514 queue->queue_size = queue_size;
515
516 queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue,
517 RDMA_PS_TCP, IB_QPT_RC);
518 if (IS_ERR(queue->cm_id)) {
519 dev_info(ctrl->ctrl.device,
520 "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id));
521 return PTR_ERR(queue->cm_id);
522 }
523
Max Gurtovoy8f4e8da2017-02-19 20:08:03 +0200524 if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
Sagi Grimberg0928f9b2017-02-05 21:49:32 +0200525 src_addr = (struct sockaddr *)&ctrl->src_addr;
Max Gurtovoy8f4e8da2017-02-19 20:08:03 +0200526
Sagi Grimberg0928f9b2017-02-05 21:49:32 +0200527 queue->cm_error = -ETIMEDOUT;
528 ret = rdma_resolve_addr(queue->cm_id, src_addr,
529 (struct sockaddr *)&ctrl->addr,
Christoph Hellwig71102302016-07-06 21:55:52 +0900530 NVME_RDMA_CONNECT_TIMEOUT_MS);
531 if (ret) {
532 dev_info(ctrl->ctrl.device,
533 "rdma_resolve_addr failed (%d).\n", ret);
534 goto out_destroy_cm_id;
535 }
536
537 ret = nvme_rdma_wait_for_cm(queue);
538 if (ret) {
539 dev_info(ctrl->ctrl.device,
Sagi Grimbergd8bfcee2017-10-11 15:29:07 +0300540 "rdma connection establishment failed (%d)\n", ret);
Christoph Hellwig71102302016-07-06 21:55:52 +0900541 goto out_destroy_cm_id;
542 }
543
Sagi Grimberg5013e982017-10-11 15:29:12 +0300544 set_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags);
Christoph Hellwig71102302016-07-06 21:55:52 +0900545
546 return 0;
547
548out_destroy_cm_id:
549 rdma_destroy_id(queue->cm_id);
550 return ret;
551}
552
553static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
554{
Sagi Grimberga57bd542017-08-28 21:41:10 +0200555 if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
556 return;
557
Christoph Hellwig71102302016-07-06 21:55:52 +0900558 rdma_disconnect(queue->cm_id);
559 ib_drain_qp(queue->qp);
560}
561
562static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
563{
Sagi Grimberg5013e982017-10-11 15:29:12 +0300564 if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
Christoph Hellwig71102302016-07-06 21:55:52 +0900565 return;
Sagi Grimberga57bd542017-08-28 21:41:10 +0200566
Sagi Grimbergbd9f0752017-10-19 16:00:30 +0300567 if (nvme_rdma_queue_idx(queue) == 0) {
568 nvme_rdma_free_qe(queue->device->dev,
569 &queue->ctrl->async_event_sqe,
570 sizeof(struct nvme_command), DMA_TO_DEVICE);
571 }
572
Sagi Grimberga57bd542017-08-28 21:41:10 +0200573 nvme_rdma_destroy_queue_ib(queue);
574 rdma_destroy_id(queue->cm_id);
Christoph Hellwig71102302016-07-06 21:55:52 +0900575}
576
577static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
578{
579 int i;
580
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300581 for (i = 1; i < ctrl->ctrl.queue_count; i++)
Sagi Grimberga57bd542017-08-28 21:41:10 +0200582 nvme_rdma_free_queue(&ctrl->queues[i]);
Christoph Hellwig71102302016-07-06 21:55:52 +0900583}
584
Sagi Grimberga57bd542017-08-28 21:41:10 +0200585static void nvme_rdma_stop_io_queues(struct nvme_rdma_ctrl *ctrl)
586{
587 int i;
588
589 for (i = 1; i < ctrl->ctrl.queue_count; i++)
590 nvme_rdma_stop_queue(&ctrl->queues[i]);
Christoph Hellwig71102302016-07-06 21:55:52 +0900591}
592
Sagi Grimberg68e16fc2017-07-10 09:22:37 +0300593static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
594{
595 int ret;
596
597 if (idx)
598 ret = nvmf_connect_io_queue(&ctrl->ctrl, idx);
599 else
600 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
601
602 if (!ret)
603 set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[idx].flags);
604 else
605 dev_info(ctrl->ctrl.device,
606 "failed to connect queue: %d ret=%d\n", idx, ret);
607 return ret;
608}
609
610static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl)
Christoph Hellwig71102302016-07-06 21:55:52 +0900611{
612 int i, ret = 0;
613
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300614 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
Sagi Grimberg68e16fc2017-07-10 09:22:37 +0300615 ret = nvme_rdma_start_queue(ctrl, i);
616 if (ret)
Sagi Grimberga57bd542017-08-28 21:41:10 +0200617 goto out_stop_queues;
Christoph Hellwig71102302016-07-06 21:55:52 +0900618 }
619
Steve Wisec8dbc372016-11-08 09:16:02 -0800620 return 0;
621
Sagi Grimberga57bd542017-08-28 21:41:10 +0200622out_stop_queues:
Sagi Grimberg68e16fc2017-07-10 09:22:37 +0300623 for (i--; i >= 1; i--)
624 nvme_rdma_stop_queue(&ctrl->queues[i]);
Christoph Hellwig71102302016-07-06 21:55:52 +0900625 return ret;
626}
627
Sagi Grimberg41e8cfa2017-07-10 09:22:36 +0300628static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
Christoph Hellwig71102302016-07-06 21:55:52 +0900629{
Sagi Grimbergc248c642017-03-09 13:26:07 +0200630 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
Sagi Grimberg0b366582017-07-13 11:09:44 +0300631 struct ib_device *ibdev = ctrl->device->dev;
Sagi Grimbergc248c642017-03-09 13:26:07 +0200632 unsigned int nr_io_queues;
Christoph Hellwig71102302016-07-06 21:55:52 +0900633 int i, ret;
634
Sagi Grimbergc248c642017-03-09 13:26:07 +0200635 nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
Sagi Grimberg0b366582017-07-13 11:09:44 +0300636
637 /*
638 * we map queues according to the device irq vectors for
639 * optimal locality so we don't need more queues than
640 * completion vectors.
641 */
642 nr_io_queues = min_t(unsigned int, nr_io_queues,
643 ibdev->num_comp_vectors);
644
Sagi Grimbergc248c642017-03-09 13:26:07 +0200645 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
646 if (ret)
647 return ret;
648
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300649 ctrl->ctrl.queue_count = nr_io_queues + 1;
650 if (ctrl->ctrl.queue_count < 2)
Sagi Grimbergc248c642017-03-09 13:26:07 +0200651 return 0;
652
653 dev_info(ctrl->ctrl.device,
654 "creating %d I/O queues.\n", nr_io_queues);
655
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300656 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
Sagi Grimberg41e8cfa2017-07-10 09:22:36 +0300657 ret = nvme_rdma_alloc_queue(ctrl, i,
658 ctrl->ctrl.sqsize + 1);
659 if (ret)
Christoph Hellwig71102302016-07-06 21:55:52 +0900660 goto out_free_queues;
Christoph Hellwig71102302016-07-06 21:55:52 +0900661 }
662
663 return 0;
664
665out_free_queues:
Steve Wisef361e5a2016-09-02 09:01:27 -0700666 for (i--; i >= 1; i--)
Sagi Grimberga57bd542017-08-28 21:41:10 +0200667 nvme_rdma_free_queue(&ctrl->queues[i]);
Christoph Hellwig71102302016-07-06 21:55:52 +0900668
669 return ret;
670}
671
Sagi Grimberg60070c72017-10-11 15:29:06 +0300672static void nvme_rdma_free_tagset(struct nvme_ctrl *nctrl,
673 struct blk_mq_tag_set *set)
Sagi Grimbergb28a3082017-07-10 09:22:30 +0300674{
675 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
Sagi Grimbergb28a3082017-07-10 09:22:30 +0300676
677 blk_mq_free_tag_set(set);
678 nvme_rdma_dev_put(ctrl->device);
679}
680
681static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
682 bool admin)
683{
684 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
685 struct blk_mq_tag_set *set;
686 int ret;
687
688 if (admin) {
689 set = &ctrl->admin_tag_set;
690 memset(set, 0, sizeof(*set));
691 set->ops = &nvme_rdma_admin_mq_ops;
Keith Busch38dabe22017-11-07 15:13:10 -0700692 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
Sagi Grimbergb28a3082017-07-10 09:22:30 +0300693 set->reserved_tags = 2; /* connect + keep-alive */
694 set->numa_node = NUMA_NO_NODE;
695 set->cmd_size = sizeof(struct nvme_rdma_request) +
696 SG_CHUNK_SIZE * sizeof(struct scatterlist);
697 set->driver_data = ctrl;
698 set->nr_hw_queues = 1;
699 set->timeout = ADMIN_TIMEOUT;
Israel Rukshin94f29d42017-10-18 12:38:24 +0000700 set->flags = BLK_MQ_F_NO_SCHED;
Sagi Grimbergb28a3082017-07-10 09:22:30 +0300701 } else {
702 set = &ctrl->tag_set;
703 memset(set, 0, sizeof(*set));
704 set->ops = &nvme_rdma_mq_ops;
705 set->queue_depth = nctrl->opts->queue_size;
706 set->reserved_tags = 1; /* fabric connect */
707 set->numa_node = NUMA_NO_NODE;
708 set->flags = BLK_MQ_F_SHOULD_MERGE;
709 set->cmd_size = sizeof(struct nvme_rdma_request) +
710 SG_CHUNK_SIZE * sizeof(struct scatterlist);
711 set->driver_data = ctrl;
712 set->nr_hw_queues = nctrl->queue_count - 1;
713 set->timeout = NVME_IO_TIMEOUT;
714 }
715
716 ret = blk_mq_alloc_tag_set(set);
717 if (ret)
718 goto out;
719
720 /*
721 * We need a reference on the device as long as the tag_set is alive,
722 * as the MRs in the request structures need a valid ib_device.
723 */
724 ret = nvme_rdma_dev_get(ctrl->device);
725 if (!ret) {
726 ret = -EINVAL;
727 goto out_free_tagset;
728 }
729
730 return set;
731
732out_free_tagset:
733 blk_mq_free_tag_set(set);
734out:
735 return ERR_PTR(ret);
736}
737
Sagi Grimberg3f02fff2017-07-10 09:22:32 +0300738static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
739 bool remove)
Christoph Hellwig71102302016-07-06 21:55:52 +0900740{
Sagi Grimberga57bd542017-08-28 21:41:10 +0200741 nvme_rdma_stop_queue(&ctrl->queues[0]);
Sagi Grimberg3f02fff2017-07-10 09:22:32 +0300742 if (remove) {
743 blk_cleanup_queue(ctrl->ctrl.admin_q);
Sagi Grimberg60070c72017-10-11 15:29:06 +0300744 nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
Sagi Grimberg3f02fff2017-07-10 09:22:32 +0300745 }
Sagi Grimberga57bd542017-08-28 21:41:10 +0200746 nvme_rdma_free_queue(&ctrl->queues[0]);
Christoph Hellwig71102302016-07-06 21:55:52 +0900747}
748
Sagi Grimberg3f02fff2017-07-10 09:22:32 +0300749static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
750 bool new)
Sagi Grimberg90af3512017-07-10 09:22:28 +0300751{
752 int error;
753
Sagi Grimberg41e8cfa2017-07-10 09:22:36 +0300754 error = nvme_rdma_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
Sagi Grimberg90af3512017-07-10 09:22:28 +0300755 if (error)
756 return error;
757
758 ctrl->device = ctrl->queues[0].device;
759
Sagi Grimberg90af3512017-07-10 09:22:28 +0300760 ctrl->max_fr_pages = min_t(u32, NVME_RDMA_MAX_SEGMENTS,
761 ctrl->device->dev->attrs.max_fast_reg_page_list_len);
762
Sagi Grimberg3f02fff2017-07-10 09:22:32 +0300763 if (new) {
764 ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
Sagi Grimbergf04b9cc2017-10-19 18:10:53 +0300765 if (IS_ERR(ctrl->ctrl.admin_tagset)) {
766 error = PTR_ERR(ctrl->ctrl.admin_tagset);
Sagi Grimberg3f02fff2017-07-10 09:22:32 +0300767 goto out_free_queue;
Sagi Grimbergf04b9cc2017-10-19 18:10:53 +0300768 }
Sagi Grimberg90af3512017-07-10 09:22:28 +0300769
Sagi Grimberg3f02fff2017-07-10 09:22:32 +0300770 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
771 if (IS_ERR(ctrl->ctrl.admin_q)) {
772 error = PTR_ERR(ctrl->ctrl.admin_q);
773 goto out_free_tagset;
774 }
775 } else {
Sagi Grimberg31b84462017-10-11 12:53:07 +0300776 error = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
Sagi Grimberg3f02fff2017-07-10 09:22:32 +0300777 if (error)
778 goto out_free_queue;
Sagi Grimberg90af3512017-07-10 09:22:28 +0300779 }
780
Sagi Grimberg68e16fc2017-07-10 09:22:37 +0300781 error = nvme_rdma_start_queue(ctrl, 0);
Sagi Grimberg90af3512017-07-10 09:22:28 +0300782 if (error)
783 goto out_cleanup_queue;
784
Sagi Grimberg09fdc232017-07-10 09:22:39 +0300785 error = ctrl->ctrl.ops->reg_read64(&ctrl->ctrl, NVME_REG_CAP,
Sagi Grimberg90af3512017-07-10 09:22:28 +0300786 &ctrl->ctrl.cap);
787 if (error) {
788 dev_err(ctrl->ctrl.device,
789 "prop_get NVME_REG_CAP failed\n");
790 goto out_cleanup_queue;
791 }
792
793 ctrl->ctrl.sqsize =
794 min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
795
796 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
797 if (error)
798 goto out_cleanup_queue;
799
800 ctrl->ctrl.max_hw_sectors =
Linus Torvalds126e76f2017-09-09 12:49:01 -0700801 (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9);
Sagi Grimberg90af3512017-07-10 09:22:28 +0300802
803 error = nvme_init_identify(&ctrl->ctrl);
804 if (error)
805 goto out_cleanup_queue;
806
807 error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev,
808 &ctrl->async_event_sqe, sizeof(struct nvme_command),
809 DMA_TO_DEVICE);
810 if (error)
811 goto out_cleanup_queue;
812
813 return 0;
814
815out_cleanup_queue:
Sagi Grimberg3f02fff2017-07-10 09:22:32 +0300816 if (new)
817 blk_cleanup_queue(ctrl->ctrl.admin_q);
Sagi Grimberg90af3512017-07-10 09:22:28 +0300818out_free_tagset:
Sagi Grimberg3f02fff2017-07-10 09:22:32 +0300819 if (new)
Sagi Grimberg60070c72017-10-11 15:29:06 +0300820 nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
Sagi Grimberg90af3512017-07-10 09:22:28 +0300821out_free_queue:
822 nvme_rdma_free_queue(&ctrl->queues[0]);
823 return error;
824}
825
Sagi Grimberga57bd542017-08-28 21:41:10 +0200826static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
827 bool remove)
828{
829 nvme_rdma_stop_io_queues(ctrl);
830 if (remove) {
831 blk_cleanup_queue(ctrl->ctrl.connect_q);
Sagi Grimberg60070c72017-10-11 15:29:06 +0300832 nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
Sagi Grimberga57bd542017-08-28 21:41:10 +0200833 }
834 nvme_rdma_free_io_queues(ctrl);
835}
836
837static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
838{
839 int ret;
840
Sagi Grimberg41e8cfa2017-07-10 09:22:36 +0300841 ret = nvme_rdma_alloc_io_queues(ctrl);
Sagi Grimberga57bd542017-08-28 21:41:10 +0200842 if (ret)
843 return ret;
844
845 if (new) {
846 ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false);
Sagi Grimbergf04b9cc2017-10-19 18:10:53 +0300847 if (IS_ERR(ctrl->ctrl.tagset)) {
848 ret = PTR_ERR(ctrl->ctrl.tagset);
Sagi Grimberga57bd542017-08-28 21:41:10 +0200849 goto out_free_io_queues;
Sagi Grimbergf04b9cc2017-10-19 18:10:53 +0300850 }
Sagi Grimberga57bd542017-08-28 21:41:10 +0200851
852 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
853 if (IS_ERR(ctrl->ctrl.connect_q)) {
854 ret = PTR_ERR(ctrl->ctrl.connect_q);
855 goto out_free_tag_set;
856 }
857 } else {
Sagi Grimberg31b84462017-10-11 12:53:07 +0300858 ret = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
Sagi Grimberga57bd542017-08-28 21:41:10 +0200859 if (ret)
860 goto out_free_io_queues;
861
862 blk_mq_update_nr_hw_queues(&ctrl->tag_set,
863 ctrl->ctrl.queue_count - 1);
864 }
865
Sagi Grimberg68e16fc2017-07-10 09:22:37 +0300866 ret = nvme_rdma_start_io_queues(ctrl);
Sagi Grimberga57bd542017-08-28 21:41:10 +0200867 if (ret)
868 goto out_cleanup_connect_q;
869
870 return 0;
871
872out_cleanup_connect_q:
873 if (new)
874 blk_cleanup_queue(ctrl->ctrl.connect_q);
875out_free_tag_set:
876 if (new)
Sagi Grimberg60070c72017-10-11 15:29:06 +0300877 nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
Sagi Grimberga57bd542017-08-28 21:41:10 +0200878out_free_io_queues:
879 nvme_rdma_free_io_queues(ctrl);
880 return ret;
Christoph Hellwig71102302016-07-06 21:55:52 +0900881}
882
883static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
884{
885 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
886
887 if (list_empty(&ctrl->list))
888 goto free_ctrl;
889
890 mutex_lock(&nvme_rdma_ctrl_mutex);
891 list_del(&ctrl->list);
892 mutex_unlock(&nvme_rdma_ctrl_mutex);
893
Christoph Hellwig71102302016-07-06 21:55:52 +0900894 kfree(ctrl->queues);
895 nvmf_free_options(nctrl->opts);
896free_ctrl:
897 kfree(ctrl);
898}
899
Sagi Grimbergfd8563c2017-03-18 20:58:29 +0200900static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
901{
902 /* If we are resetting/deleting then do nothing */
903 if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) {
904 WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW ||
905 ctrl->ctrl.state == NVME_CTRL_LIVE);
906 return;
907 }
908
909 if (nvmf_should_reconnect(&ctrl->ctrl)) {
910 dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n",
911 ctrl->ctrl.opts->reconnect_delay);
Sagi Grimberg9a6327d2017-06-07 20:31:55 +0200912 queue_delayed_work(nvme_wq, &ctrl->reconnect_work,
Sagi Grimbergfd8563c2017-03-18 20:58:29 +0200913 ctrl->ctrl.opts->reconnect_delay * HZ);
914 } else {
915 dev_info(ctrl->ctrl.device, "Removing controller...\n");
Sagi Grimberg12fa1302017-10-29 14:21:01 +0200916 nvme_delete_ctrl(&ctrl->ctrl);
Sagi Grimbergfd8563c2017-03-18 20:58:29 +0200917 }
918}
919
Christoph Hellwig71102302016-07-06 21:55:52 +0900920static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
921{
922 struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work),
923 struct nvme_rdma_ctrl, reconnect_work);
924 bool changed;
925 int ret;
926
Sagi Grimbergfdf9dfa2017-05-04 13:33:15 +0300927 ++ctrl->ctrl.nr_reconnects;
Sagi Grimbergfd8563c2017-03-18 20:58:29 +0200928
Sagi Grimberg31fdf182017-08-28 21:40:06 +0200929 ret = nvme_rdma_configure_admin_queue(ctrl, false);
Christoph Hellwig71102302016-07-06 21:55:52 +0900930 if (ret)
Sagi Grimberge818a5b2017-06-05 20:35:56 +0300931 goto requeue;
Christoph Hellwig71102302016-07-06 21:55:52 +0900932
Sagi Grimbergd858e5f2017-04-24 10:58:29 +0300933 if (ctrl->ctrl.queue_count > 1) {
Sagi Grimberga57bd542017-08-28 21:41:10 +0200934 ret = nvme_rdma_configure_io_queues(ctrl, false);
Christoph Hellwig71102302016-07-06 21:55:52 +0900935 if (ret)
Sagi Grimberg5e1fe612017-10-11 15:29:11 +0300936 goto destroy_admin;
Christoph Hellwig71102302016-07-06 21:55:52 +0900937 }
938
939 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
Sagi Grimberg0a960afd2017-09-21 17:01:37 +0300940 if (!changed) {
941 /* state change failure is ok if we're in DELETING state */
942 WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING);
943 return;
944 }
945
Sagi Grimbergd09f2b42017-07-02 10:56:43 +0300946 nvme_start_ctrl(&ctrl->ctrl);
Christoph Hellwig71102302016-07-06 21:55:52 +0900947
Sagi Grimberg5e1fe612017-10-11 15:29:11 +0300948 dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n",
949 ctrl->ctrl.nr_reconnects);
950
951 ctrl->ctrl.nr_reconnects = 0;
Christoph Hellwig71102302016-07-06 21:55:52 +0900952
953 return;
954
Sagi Grimberg5e1fe612017-10-11 15:29:11 +0300955destroy_admin:
956 nvme_rdma_destroy_admin_queue(ctrl, false);
Christoph Hellwig71102302016-07-06 21:55:52 +0900957requeue:
Sagi Grimbergfd8563c2017-03-18 20:58:29 +0200958 dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
Sagi Grimbergfdf9dfa2017-05-04 13:33:15 +0300959 ctrl->ctrl.nr_reconnects);
Sagi Grimbergfd8563c2017-03-18 20:58:29 +0200960 nvme_rdma_reconnect_or_remove(ctrl);
Christoph Hellwig71102302016-07-06 21:55:52 +0900961}
962
963static void nvme_rdma_error_recovery_work(struct work_struct *work)
964{
965 struct nvme_rdma_ctrl *ctrl = container_of(work,
966 struct nvme_rdma_ctrl, err_work);
967
Sagi Grimberge4d753d2017-09-21 17:01:38 +0300968 nvme_stop_keep_alive(&ctrl->ctrl);
Sagi Grimberge89ca582016-09-02 09:01:54 -0700969
Sagi Grimberg148b4e72017-07-10 09:22:35 +0300970 if (ctrl->ctrl.queue_count > 1) {
Christoph Hellwig71102302016-07-06 21:55:52 +0900971 nvme_stop_queues(&ctrl->ctrl);
Christoph Hellwig71102302016-07-06 21:55:52 +0900972 blk_mq_tagset_busy_iter(&ctrl->tag_set,
973 nvme_cancel_request, &ctrl->ctrl);
Sagi Grimberg5e1fe612017-10-11 15:29:11 +0300974 nvme_rdma_destroy_io_queues(ctrl, false);
975 }
976
977 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
Christoph Hellwig71102302016-07-06 21:55:52 +0900978 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
979 nvme_cancel_request, &ctrl->ctrl);
Sagi Grimberg5e1fe612017-10-11 15:29:11 +0300980 nvme_rdma_destroy_admin_queue(ctrl, false);
Christoph Hellwig71102302016-07-06 21:55:52 +0900981
Sagi Grimberge818a5b2017-06-05 20:35:56 +0300982 /*
983 * queues are not a live anymore, so restart the queues to fail fast
984 * new IO
985 */
Sagi Grimbergfb051332017-07-02 15:33:32 +0300986 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
Sagi Grimberge818a5b2017-06-05 20:35:56 +0300987 nvme_start_queues(&ctrl->ctrl);
988
Sagi Grimbergfd8563c2017-03-18 20:58:29 +0200989 nvme_rdma_reconnect_or_remove(ctrl);
Christoph Hellwig71102302016-07-06 21:55:52 +0900990}
991
992static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
993{
994 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING))
995 return;
996
Sagi Grimberg9a6327d2017-06-07 20:31:55 +0200997 queue_work(nvme_wq, &ctrl->err_work);
Christoph Hellwig71102302016-07-06 21:55:52 +0900998}
999
1000static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
1001 const char *op)
1002{
1003 struct nvme_rdma_queue *queue = cq->cq_context;
1004 struct nvme_rdma_ctrl *ctrl = queue->ctrl;
1005
1006 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
1007 dev_info(ctrl->ctrl.device,
1008 "%s for CQE 0x%p failed with status %s (%d)\n",
1009 op, wc->wr_cqe,
1010 ib_wc_status_msg(wc->status), wc->status);
1011 nvme_rdma_error_recovery(ctrl);
1012}
1013
1014static void nvme_rdma_memreg_done(struct ib_cq *cq, struct ib_wc *wc)
1015{
1016 if (unlikely(wc->status != IB_WC_SUCCESS))
1017 nvme_rdma_wr_error(cq, wc, "MEMREG");
1018}
1019
1020static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
1021{
Sagi Grimberg2f122e42017-11-23 17:35:23 +02001022 struct nvme_rdma_request *req =
1023 container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe);
1024 struct request *rq = blk_mq_rq_from_pdu(req);
1025
1026 if (unlikely(wc->status != IB_WC_SUCCESS)) {
Christoph Hellwig71102302016-07-06 21:55:52 +09001027 nvme_rdma_wr_error(cq, wc, "LOCAL_INV");
Sagi Grimberg2f122e42017-11-23 17:35:23 +02001028 return;
1029 }
1030
1031 if (refcount_dec_and_test(&req->ref))
1032 nvme_end_request(rq, req->status, req->result);
1033
Christoph Hellwig71102302016-07-06 21:55:52 +09001034}
1035
1036static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
1037 struct nvme_rdma_request *req)
1038{
1039 struct ib_send_wr *bad_wr;
1040 struct ib_send_wr wr = {
1041 .opcode = IB_WR_LOCAL_INV,
1042 .next = NULL,
1043 .num_sge = 0,
Sagi Grimberg2f122e42017-11-23 17:35:23 +02001044 .send_flags = IB_SEND_SIGNALED,
Christoph Hellwig71102302016-07-06 21:55:52 +09001045 .ex.invalidate_rkey = req->mr->rkey,
1046 };
1047
1048 req->reg_cqe.done = nvme_rdma_inv_rkey_done;
1049 wr.wr_cqe = &req->reg_cqe;
1050
1051 return ib_post_send(queue->qp, &wr, &bad_wr);
1052}
1053
1054static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
1055 struct request *rq)
1056{
1057 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
Christoph Hellwig71102302016-07-06 21:55:52 +09001058 struct nvme_rdma_device *dev = queue->device;
1059 struct ib_device *ibdev = dev->dev;
Christoph Hellwig71102302016-07-06 21:55:52 +09001060
1061 if (!blk_rq_bytes(rq))
1062 return;
1063
Christoph Hellwig71102302016-07-06 21:55:52 +09001064 ib_dma_unmap_sg(ibdev, req->sg_table.sgl,
1065 req->nents, rq_data_dir(rq) ==
1066 WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1067
1068 nvme_cleanup_cmd(rq);
1069 sg_free_table_chained(&req->sg_table, true);
1070}
1071
1072static int nvme_rdma_set_sg_null(struct nvme_command *c)
1073{
1074 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
1075
1076 sg->addr = 0;
1077 put_unaligned_le24(0, sg->length);
1078 put_unaligned_le32(0, sg->key);
1079 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
1080 return 0;
1081}
1082
1083static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
1084 struct nvme_rdma_request *req, struct nvme_command *c)
1085{
1086 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
1087
1088 req->sge[1].addr = sg_dma_address(req->sg_table.sgl);
1089 req->sge[1].length = sg_dma_len(req->sg_table.sgl);
1090 req->sge[1].lkey = queue->device->pd->local_dma_lkey;
1091
1092 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
1093 sg->length = cpu_to_le32(sg_dma_len(req->sg_table.sgl));
1094 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
1095
1096 req->inline_data = true;
1097 req->num_sge++;
1098 return 0;
1099}
1100
1101static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue,
1102 struct nvme_rdma_request *req, struct nvme_command *c)
1103{
1104 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
1105
1106 sg->addr = cpu_to_le64(sg_dma_address(req->sg_table.sgl));
1107 put_unaligned_le24(sg_dma_len(req->sg_table.sgl), sg->length);
Christoph Hellwig11975e02016-09-05 12:56:20 +02001108 put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key);
Christoph Hellwig71102302016-07-06 21:55:52 +09001109 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
1110 return 0;
1111}
1112
1113static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
1114 struct nvme_rdma_request *req, struct nvme_command *c,
1115 int count)
1116{
1117 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
1118 int nr;
1119
Max Gurtovoyb925a2d2017-08-28 12:52:27 +03001120 /*
1121 * Align the MR to a 4K page size to match the ctrl page size and
1122 * the block virtual boundary.
1123 */
1124 nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, SZ_4K);
Max Gurtovoya7b7c7a2017-08-14 15:29:26 +03001125 if (unlikely(nr < count)) {
Christoph Hellwig71102302016-07-06 21:55:52 +09001126 if (nr < 0)
1127 return nr;
1128 return -EINVAL;
1129 }
1130
1131 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
1132
1133 req->reg_cqe.done = nvme_rdma_memreg_done;
1134 memset(&req->reg_wr, 0, sizeof(req->reg_wr));
1135 req->reg_wr.wr.opcode = IB_WR_REG_MR;
1136 req->reg_wr.wr.wr_cqe = &req->reg_cqe;
1137 req->reg_wr.wr.num_sge = 0;
1138 req->reg_wr.mr = req->mr;
1139 req->reg_wr.key = req->mr->rkey;
1140 req->reg_wr.access = IB_ACCESS_LOCAL_WRITE |
1141 IB_ACCESS_REMOTE_READ |
1142 IB_ACCESS_REMOTE_WRITE;
1143
Sagi Grimbergf5b7b552016-08-24 12:25:56 +03001144 req->mr->need_inval = true;
Christoph Hellwig71102302016-07-06 21:55:52 +09001145
1146 sg->addr = cpu_to_le64(req->mr->iova);
1147 put_unaligned_le24(req->mr->length, sg->length);
1148 put_unaligned_le32(req->mr->rkey, sg->key);
1149 sg->type = (NVME_KEY_SGL_FMT_DATA_DESC << 4) |
1150 NVME_SGL_FMT_INVALIDATE;
1151
1152 return 0;
1153}
1154
1155static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
Christoph Hellwigb131c612017-01-13 12:29:12 +01001156 struct request *rq, struct nvme_command *c)
Christoph Hellwig71102302016-07-06 21:55:52 +09001157{
1158 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1159 struct nvme_rdma_device *dev = queue->device;
1160 struct ib_device *ibdev = dev->dev;
Christoph Hellwigf9d03f92016-12-08 15:20:32 -07001161 int count, ret;
Christoph Hellwig71102302016-07-06 21:55:52 +09001162
1163 req->num_sge = 1;
1164 req->inline_data = false;
Sagi Grimbergf5b7b552016-08-24 12:25:56 +03001165 req->mr->need_inval = false;
Sagi Grimberg4af7f7f2017-11-23 17:35:22 +02001166 refcount_set(&req->ref, 2); /* send and recv completions */
Christoph Hellwig71102302016-07-06 21:55:52 +09001167
1168 c->common.flags |= NVME_CMD_SGL_METABUF;
1169
1170 if (!blk_rq_bytes(rq))
1171 return nvme_rdma_set_sg_null(c);
1172
1173 req->sg_table.sgl = req->first_sgl;
Christoph Hellwigf9d03f92016-12-08 15:20:32 -07001174 ret = sg_alloc_table_chained(&req->sg_table,
1175 blk_rq_nr_phys_segments(rq), req->sg_table.sgl);
Christoph Hellwig71102302016-07-06 21:55:52 +09001176 if (ret)
1177 return -ENOMEM;
1178
Christoph Hellwigf9d03f92016-12-08 15:20:32 -07001179 req->nents = blk_rq_map_sg(rq->q, rq, req->sg_table.sgl);
Christoph Hellwig71102302016-07-06 21:55:52 +09001180
Christoph Hellwigf9d03f92016-12-08 15:20:32 -07001181 count = ib_dma_map_sg(ibdev, req->sg_table.sgl, req->nents,
Christoph Hellwig71102302016-07-06 21:55:52 +09001182 rq_data_dir(rq) == WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1183 if (unlikely(count <= 0)) {
1184 sg_free_table_chained(&req->sg_table, true);
1185 return -EIO;
1186 }
1187
1188 if (count == 1) {
Christoph Hellwigb131c612017-01-13 12:29:12 +01001189 if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
1190 blk_rq_payload_bytes(rq) <=
1191 nvme_rdma_inline_data_size(queue))
Christoph Hellwig71102302016-07-06 21:55:52 +09001192 return nvme_rdma_map_sg_inline(queue, req, c);
1193
Christoph Hellwig11975e02016-09-05 12:56:20 +02001194 if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
Christoph Hellwig71102302016-07-06 21:55:52 +09001195 return nvme_rdma_map_sg_single(queue, req, c);
1196 }
1197
1198 return nvme_rdma_map_sg_fr(queue, req, c, count);
1199}
1200
1201static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
1202{
Sagi Grimberg4af7f7f2017-11-23 17:35:22 +02001203 struct nvme_rdma_qe *qe =
1204 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
1205 struct nvme_rdma_request *req =
1206 container_of(qe, struct nvme_rdma_request, sqe);
1207 struct request *rq = blk_mq_rq_from_pdu(req);
1208
1209 if (unlikely(wc->status != IB_WC_SUCCESS)) {
Christoph Hellwig71102302016-07-06 21:55:52 +09001210 nvme_rdma_wr_error(cq, wc, "SEND");
Sagi Grimberg4af7f7f2017-11-23 17:35:22 +02001211 return;
1212 }
1213
1214 if (refcount_dec_and_test(&req->ref))
1215 nvme_end_request(rq, req->status, req->result);
Christoph Hellwig71102302016-07-06 21:55:52 +09001216}
1217
1218static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
1219 struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
Sagi Grimbergb4b591c2017-11-23 17:35:21 +02001220 struct ib_send_wr *first)
Christoph Hellwig71102302016-07-06 21:55:52 +09001221{
1222 struct ib_send_wr wr, *bad_wr;
1223 int ret;
1224
1225 sge->addr = qe->dma;
1226 sge->length = sizeof(struct nvme_command),
1227 sge->lkey = queue->device->pd->local_dma_lkey;
1228
Christoph Hellwig71102302016-07-06 21:55:52 +09001229 wr.next = NULL;
1230 wr.wr_cqe = &qe->cqe;
1231 wr.sg_list = sge;
1232 wr.num_sge = num_sge;
1233 wr.opcode = IB_WR_SEND;
Sagi Grimbergb4b591c2017-11-23 17:35:21 +02001234 wr.send_flags = IB_SEND_SIGNALED;
Christoph Hellwig71102302016-07-06 21:55:52 +09001235
1236 if (first)
1237 first->next = &wr;
1238 else
1239 first = &wr;
1240
1241 ret = ib_post_send(queue->qp, first, &bad_wr);
Max Gurtovoya7b7c7a2017-08-14 15:29:26 +03001242 if (unlikely(ret)) {
Christoph Hellwig71102302016-07-06 21:55:52 +09001243 dev_err(queue->ctrl->ctrl.device,
1244 "%s failed with error code %d\n", __func__, ret);
1245 }
1246 return ret;
1247}
1248
1249static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue,
1250 struct nvme_rdma_qe *qe)
1251{
1252 struct ib_recv_wr wr, *bad_wr;
1253 struct ib_sge list;
1254 int ret;
1255
1256 list.addr = qe->dma;
1257 list.length = sizeof(struct nvme_completion);
1258 list.lkey = queue->device->pd->local_dma_lkey;
1259
1260 qe->cqe.done = nvme_rdma_recv_done;
1261
1262 wr.next = NULL;
1263 wr.wr_cqe = &qe->cqe;
1264 wr.sg_list = &list;
1265 wr.num_sge = 1;
1266
1267 ret = ib_post_recv(queue->qp, &wr, &bad_wr);
Max Gurtovoya7b7c7a2017-08-14 15:29:26 +03001268 if (unlikely(ret)) {
Christoph Hellwig71102302016-07-06 21:55:52 +09001269 dev_err(queue->ctrl->ctrl.device,
1270 "%s failed with error code %d\n", __func__, ret);
1271 }
1272 return ret;
1273}
1274
1275static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue)
1276{
1277 u32 queue_idx = nvme_rdma_queue_idx(queue);
1278
1279 if (queue_idx == 0)
1280 return queue->ctrl->admin_tag_set.tags[queue_idx];
1281 return queue->ctrl->tag_set.tags[queue_idx - 1];
1282}
1283
Sagi Grimbergb4b591c2017-11-23 17:35:21 +02001284static void nvme_rdma_async_done(struct ib_cq *cq, struct ib_wc *wc)
1285{
1286 if (unlikely(wc->status != IB_WC_SUCCESS))
1287 nvme_rdma_wr_error(cq, wc, "ASYNC");
1288}
1289
Keith Buschad22c352017-11-07 15:13:12 -07001290static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg)
Christoph Hellwig71102302016-07-06 21:55:52 +09001291{
1292 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
1293 struct nvme_rdma_queue *queue = &ctrl->queues[0];
1294 struct ib_device *dev = queue->device->dev;
1295 struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe;
1296 struct nvme_command *cmd = sqe->data;
1297 struct ib_sge sge;
1298 int ret;
1299
Christoph Hellwig71102302016-07-06 21:55:52 +09001300 ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE);
1301
1302 memset(cmd, 0, sizeof(*cmd));
1303 cmd->common.opcode = nvme_admin_async_event;
Keith Busch38dabe22017-11-07 15:13:10 -07001304 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
Christoph Hellwig71102302016-07-06 21:55:52 +09001305 cmd->common.flags |= NVME_CMD_SGL_METABUF;
1306 nvme_rdma_set_sg_null(cmd);
1307
Sagi Grimbergb4b591c2017-11-23 17:35:21 +02001308 sqe->cqe.done = nvme_rdma_async_done;
1309
Christoph Hellwig71102302016-07-06 21:55:52 +09001310 ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd),
1311 DMA_TO_DEVICE);
1312
Sagi Grimbergb4b591c2017-11-23 17:35:21 +02001313 ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL);
Christoph Hellwig71102302016-07-06 21:55:52 +09001314 WARN_ON_ONCE(ret);
1315}
1316
1317static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
1318 struct nvme_completion *cqe, struct ib_wc *wc, int tag)
1319{
Christoph Hellwig71102302016-07-06 21:55:52 +09001320 struct request *rq;
1321 struct nvme_rdma_request *req;
1322 int ret = 0;
1323
Christoph Hellwig71102302016-07-06 21:55:52 +09001324 rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id);
1325 if (!rq) {
1326 dev_err(queue->ctrl->ctrl.device,
1327 "tag 0x%x on QP %#x not found\n",
1328 cqe->command_id, queue->qp->qp_num);
1329 nvme_rdma_error_recovery(queue->ctrl);
1330 return ret;
1331 }
1332 req = blk_mq_rq_to_pdu(rq);
1333
Sagi Grimberg4af7f7f2017-11-23 17:35:22 +02001334 req->status = cqe->status;
1335 req->result = cqe->result;
Christoph Hellwig71102302016-07-06 21:55:52 +09001336
1337 if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) &&
Sagi Grimberg2f122e42017-11-23 17:35:23 +02001338 wc->ex.invalidate_rkey == req->mr->rkey) {
Sagi Grimbergf5b7b552016-08-24 12:25:56 +03001339 req->mr->need_inval = false;
Sagi Grimberg2f122e42017-11-23 17:35:23 +02001340 } else if (req->mr->need_inval) {
1341 ret = nvme_rdma_inv_rkey(queue, req);
1342 if (unlikely(ret < 0)) {
1343 dev_err(queue->ctrl->ctrl.device,
1344 "Queueing INV WR for rkey %#x failed (%d)\n",
1345 req->mr->rkey, ret);
1346 nvme_rdma_error_recovery(queue->ctrl);
1347 }
1348 /* the local invalidation completion will end the request */
1349 return 0;
1350 }
Christoph Hellwig71102302016-07-06 21:55:52 +09001351
Sagi Grimberg4af7f7f2017-11-23 17:35:22 +02001352 if (refcount_dec_and_test(&req->ref)) {
1353 if (rq->tag == tag)
1354 ret = 1;
1355 nvme_end_request(rq, req->status, req->result);
1356 }
1357
Christoph Hellwig71102302016-07-06 21:55:52 +09001358 return ret;
1359}
1360
1361static int __nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc, int tag)
1362{
1363 struct nvme_rdma_qe *qe =
1364 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
1365 struct nvme_rdma_queue *queue = cq->cq_context;
1366 struct ib_device *ibdev = queue->device->dev;
1367 struct nvme_completion *cqe = qe->data;
1368 const size_t len = sizeof(struct nvme_completion);
1369 int ret = 0;
1370
1371 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1372 nvme_rdma_wr_error(cq, wc, "RECV");
1373 return 0;
1374 }
1375
1376 ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE);
1377 /*
1378 * AEN requests are special as they don't time out and can
1379 * survive any kind of queue freeze and often don't respond to
1380 * aborts. We don't even bother to allocate a struct request
1381 * for them but rather special case them here.
1382 */
1383 if (unlikely(nvme_rdma_queue_idx(queue) == 0 &&
Keith Busch38dabe22017-11-07 15:13:10 -07001384 cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
Christoph Hellwig7bf58532016-11-10 07:32:34 -08001385 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
1386 &cqe->result);
Christoph Hellwig71102302016-07-06 21:55:52 +09001387 else
1388 ret = nvme_rdma_process_nvme_rsp(queue, cqe, wc, tag);
1389 ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE);
1390
1391 nvme_rdma_post_recv(queue, qe);
1392 return ret;
1393}
1394
1395static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1396{
1397 __nvme_rdma_recv_done(cq, wc, -1);
1398}
1399
1400static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
1401{
1402 int ret, i;
1403
1404 for (i = 0; i < queue->queue_size; i++) {
1405 ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]);
1406 if (ret)
1407 goto out_destroy_queue_ib;
1408 }
1409
1410 return 0;
1411
1412out_destroy_queue_ib:
1413 nvme_rdma_destroy_queue_ib(queue);
1414 return ret;
1415}
1416
1417static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
1418 struct rdma_cm_event *ev)
1419{
Steve Wise7f039532016-10-26 12:36:47 -07001420 struct rdma_cm_id *cm_id = queue->cm_id;
1421 int status = ev->status;
1422 const char *rej_msg;
1423 const struct nvme_rdma_cm_rej *rej_data;
1424 u8 rej_data_len;
1425
1426 rej_msg = rdma_reject_msg(cm_id, status);
1427 rej_data = rdma_consumer_reject_data(cm_id, ev, &rej_data_len);
1428
1429 if (rej_data && rej_data_len >= sizeof(u16)) {
1430 u16 sts = le16_to_cpu(rej_data->sts);
Christoph Hellwig71102302016-07-06 21:55:52 +09001431
1432 dev_err(queue->ctrl->ctrl.device,
Steve Wise7f039532016-10-26 12:36:47 -07001433 "Connect rejected: status %d (%s) nvme status %d (%s).\n",
1434 status, rej_msg, sts, nvme_rdma_cm_msg(sts));
Christoph Hellwig71102302016-07-06 21:55:52 +09001435 } else {
1436 dev_err(queue->ctrl->ctrl.device,
Steve Wise7f039532016-10-26 12:36:47 -07001437 "Connect rejected: status %d (%s).\n", status, rej_msg);
Christoph Hellwig71102302016-07-06 21:55:52 +09001438 }
1439
1440 return -ECONNRESET;
1441}
1442
1443static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
1444{
Christoph Hellwig71102302016-07-06 21:55:52 +09001445 int ret;
1446
Sagi Grimbergca6e95b2017-05-04 13:33:09 +03001447 ret = nvme_rdma_create_queue_ib(queue);
1448 if (ret)
1449 return ret;
Christoph Hellwig71102302016-07-06 21:55:52 +09001450
1451 ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CONNECT_TIMEOUT_MS);
1452 if (ret) {
1453 dev_err(queue->ctrl->ctrl.device,
1454 "rdma_resolve_route failed (%d).\n",
1455 queue->cm_error);
1456 goto out_destroy_queue;
1457 }
1458
1459 return 0;
1460
1461out_destroy_queue:
1462 nvme_rdma_destroy_queue_ib(queue);
Christoph Hellwig71102302016-07-06 21:55:52 +09001463 return ret;
1464}
1465
1466static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
1467{
1468 struct nvme_rdma_ctrl *ctrl = queue->ctrl;
1469 struct rdma_conn_param param = { };
Roland Dreier0b857b42016-07-31 00:27:39 -07001470 struct nvme_rdma_cm_req priv = { };
Christoph Hellwig71102302016-07-06 21:55:52 +09001471 int ret;
1472
1473 param.qp_num = queue->qp->qp_num;
1474 param.flow_control = 1;
1475
1476 param.responder_resources = queue->device->dev->attrs.max_qp_rd_atom;
Sagi Grimberg2ac17c22016-06-22 15:06:00 +03001477 /* maximum retry count */
1478 param.retry_count = 7;
Christoph Hellwig71102302016-07-06 21:55:52 +09001479 param.rnr_retry_count = 7;
1480 param.private_data = &priv;
1481 param.private_data_len = sizeof(priv);
1482
1483 priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1484 priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue));
Jay Freyenseef994d9d2016-08-17 15:00:26 -07001485 /*
1486 * set the admin queue depth to the minimum size
1487 * specified by the Fabrics standard.
1488 */
1489 if (priv.qid == 0) {
Sagi Grimberg7aa1f422017-06-18 16:15:59 +03001490 priv.hrqsize = cpu_to_le16(NVME_AQ_DEPTH);
1491 priv.hsqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
Jay Freyenseef994d9d2016-08-17 15:00:26 -07001492 } else {
Jay Freyenseec5af8652016-08-17 15:00:27 -07001493 /*
1494 * current interpretation of the fabrics spec
1495 * is at minimum you make hrqsize sqsize+1, or a
1496 * 1's based representation of sqsize.
1497 */
Jay Freyenseef994d9d2016-08-17 15:00:26 -07001498 priv.hrqsize = cpu_to_le16(queue->queue_size);
Jay Freyenseec5af8652016-08-17 15:00:27 -07001499 priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
Jay Freyenseef994d9d2016-08-17 15:00:26 -07001500 }
Christoph Hellwig71102302016-07-06 21:55:52 +09001501
1502 ret = rdma_connect(queue->cm_id, &param);
1503 if (ret) {
1504 dev_err(ctrl->ctrl.device,
1505 "rdma_connect failed (%d).\n", ret);
1506 goto out_destroy_queue_ib;
1507 }
1508
1509 return 0;
1510
1511out_destroy_queue_ib:
1512 nvme_rdma_destroy_queue_ib(queue);
1513 return ret;
1514}
1515
Christoph Hellwig71102302016-07-06 21:55:52 +09001516static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
1517 struct rdma_cm_event *ev)
1518{
1519 struct nvme_rdma_queue *queue = cm_id->context;
1520 int cm_error = 0;
1521
1522 dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n",
1523 rdma_event_msg(ev->event), ev->event,
1524 ev->status, cm_id);
1525
1526 switch (ev->event) {
1527 case RDMA_CM_EVENT_ADDR_RESOLVED:
1528 cm_error = nvme_rdma_addr_resolved(queue);
1529 break;
1530 case RDMA_CM_EVENT_ROUTE_RESOLVED:
1531 cm_error = nvme_rdma_route_resolved(queue);
1532 break;
1533 case RDMA_CM_EVENT_ESTABLISHED:
1534 queue->cm_error = nvme_rdma_conn_established(queue);
1535 /* complete cm_done regardless of success/failure */
1536 complete(&queue->cm_done);
1537 return 0;
1538 case RDMA_CM_EVENT_REJECTED:
Sagi Grimbergabf87d52017-05-04 13:33:10 +03001539 nvme_rdma_destroy_queue_ib(queue);
Christoph Hellwig71102302016-07-06 21:55:52 +09001540 cm_error = nvme_rdma_conn_rejected(queue, ev);
1541 break;
Christoph Hellwig71102302016-07-06 21:55:52 +09001542 case RDMA_CM_EVENT_ROUTE_ERROR:
1543 case RDMA_CM_EVENT_CONNECT_ERROR:
1544 case RDMA_CM_EVENT_UNREACHABLE:
Sagi Grimbergabf87d52017-05-04 13:33:10 +03001545 nvme_rdma_destroy_queue_ib(queue);
1546 case RDMA_CM_EVENT_ADDR_ERROR:
Christoph Hellwig71102302016-07-06 21:55:52 +09001547 dev_dbg(queue->ctrl->ctrl.device,
1548 "CM error event %d\n", ev->event);
1549 cm_error = -ECONNRESET;
1550 break;
1551 case RDMA_CM_EVENT_DISCONNECTED:
1552 case RDMA_CM_EVENT_ADDR_CHANGE:
1553 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1554 dev_dbg(queue->ctrl->ctrl.device,
1555 "disconnect received - connection closed\n");
1556 nvme_rdma_error_recovery(queue->ctrl);
1557 break;
1558 case RDMA_CM_EVENT_DEVICE_REMOVAL:
Steve Wisee87a9112016-09-02 09:01:54 -07001559 /* device removal is handled via the ib_client API */
1560 break;
Christoph Hellwig71102302016-07-06 21:55:52 +09001561 default:
1562 dev_err(queue->ctrl->ctrl.device,
1563 "Unexpected RDMA CM event (%d)\n", ev->event);
1564 nvme_rdma_error_recovery(queue->ctrl);
1565 break;
1566 }
1567
1568 if (cm_error) {
1569 queue->cm_error = cm_error;
1570 complete(&queue->cm_done);
1571 }
1572
1573 return 0;
1574}
1575
1576static enum blk_eh_timer_return
1577nvme_rdma_timeout(struct request *rq, bool reserved)
1578{
1579 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1580
Nitzan Carmie62a5382017-10-22 09:37:04 +00001581 dev_warn(req->queue->ctrl->ctrl.device,
1582 "I/O %d QID %d timeout, reset controller\n",
1583 rq->tag, nvme_rdma_queue_idx(req->queue));
1584
Christoph Hellwig71102302016-07-06 21:55:52 +09001585 /* queue error recovery */
1586 nvme_rdma_error_recovery(req->queue->ctrl);
1587
1588 /* fail with DNR on cmd timeout */
Christoph Hellwig27fa9bc2017-04-20 16:02:57 +02001589 nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
Christoph Hellwig71102302016-07-06 21:55:52 +09001590
1591 return BLK_EH_HANDLED;
1592}
1593
Christoph Hellwig553cd9e2016-11-02 08:49:18 -06001594/*
1595 * We cannot accept any other command until the Connect command has completed.
1596 */
Christoph Hellwiga104c9f2017-06-12 18:26:06 +02001597static inline blk_status_t
Sagi Grimberg48832f82017-10-24 15:25:20 +03001598nvme_rdma_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
Christoph Hellwig553cd9e2016-11-02 08:49:18 -06001599{
Sagi Grimberg48832f82017-10-24 15:25:20 +03001600 if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags)))
1601 return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
1602 return BLK_STS_OK;
Christoph Hellwig553cd9e2016-11-02 08:49:18 -06001603}
1604
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001605static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
Christoph Hellwig71102302016-07-06 21:55:52 +09001606 const struct blk_mq_queue_data *bd)
1607{
1608 struct nvme_ns *ns = hctx->queue->queuedata;
1609 struct nvme_rdma_queue *queue = hctx->driver_data;
1610 struct request *rq = bd->rq;
1611 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1612 struct nvme_rdma_qe *sqe = &req->sqe;
1613 struct nvme_command *c = sqe->data;
Christoph Hellwig71102302016-07-06 21:55:52 +09001614 struct ib_device *dev;
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001615 blk_status_t ret;
1616 int err;
Christoph Hellwig71102302016-07-06 21:55:52 +09001617
1618 WARN_ON_ONCE(rq->tag < 0);
1619
Sagi Grimberg48832f82017-10-24 15:25:20 +03001620 ret = nvme_rdma_is_ready(queue, rq);
Sagi Grimberge818a5b2017-06-05 20:35:56 +03001621 if (unlikely(ret))
Christoph Hellwiga104c9f2017-06-12 18:26:06 +02001622 return ret;
Christoph Hellwig553cd9e2016-11-02 08:49:18 -06001623
Christoph Hellwig71102302016-07-06 21:55:52 +09001624 dev = queue->device->dev;
1625 ib_dma_sync_single_for_cpu(dev, sqe->dma,
1626 sizeof(struct nvme_command), DMA_TO_DEVICE);
1627
1628 ret = nvme_setup_cmd(ns, rq, c);
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001629 if (ret)
Christoph Hellwig71102302016-07-06 21:55:52 +09001630 return ret;
1631
Christoph Hellwig71102302016-07-06 21:55:52 +09001632 blk_mq_start_request(rq);
1633
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001634 err = nvme_rdma_map_data(queue, rq, c);
Max Gurtovoya7b7c7a2017-08-14 15:29:26 +03001635 if (unlikely(err < 0)) {
Christoph Hellwig71102302016-07-06 21:55:52 +09001636 dev_err(queue->ctrl->ctrl.device,
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001637 "Failed to map data (%d)\n", err);
Christoph Hellwig71102302016-07-06 21:55:52 +09001638 nvme_cleanup_cmd(rq);
1639 goto err;
1640 }
1641
Sagi Grimbergb4b591c2017-11-23 17:35:21 +02001642 sqe->cqe.done = nvme_rdma_send_done;
1643
Christoph Hellwig71102302016-07-06 21:55:52 +09001644 ib_dma_sync_single_for_device(dev, sqe->dma,
1645 sizeof(struct nvme_command), DMA_TO_DEVICE);
1646
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001647 err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
Sagi Grimbergb4b591c2017-11-23 17:35:21 +02001648 req->mr->need_inval ? &req->reg_wr.wr : NULL);
Max Gurtovoya7b7c7a2017-08-14 15:29:26 +03001649 if (unlikely(err)) {
Christoph Hellwig71102302016-07-06 21:55:52 +09001650 nvme_rdma_unmap_data(queue, rq);
1651 goto err;
1652 }
1653
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001654 return BLK_STS_OK;
Christoph Hellwig71102302016-07-06 21:55:52 +09001655err:
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001656 if (err == -ENOMEM || err == -EAGAIN)
1657 return BLK_STS_RESOURCE;
1658 return BLK_STS_IOERR;
Christoph Hellwig71102302016-07-06 21:55:52 +09001659}
1660
1661static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
1662{
1663 struct nvme_rdma_queue *queue = hctx->driver_data;
1664 struct ib_cq *cq = queue->ib_cq;
1665 struct ib_wc wc;
1666 int found = 0;
1667
Christoph Hellwig71102302016-07-06 21:55:52 +09001668 while (ib_poll_cq(cq, 1, &wc) > 0) {
1669 struct ib_cqe *cqe = wc.wr_cqe;
1670
1671 if (cqe) {
1672 if (cqe->done == nvme_rdma_recv_done)
1673 found |= __nvme_rdma_recv_done(cq, &wc, tag);
1674 else
1675 cqe->done(cq, &wc);
1676 }
1677 }
1678
1679 return found;
1680}
1681
1682static void nvme_rdma_complete_rq(struct request *rq)
1683{
1684 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
Christoph Hellwig71102302016-07-06 21:55:52 +09001685
Christoph Hellwig77f02a72017-03-30 13:41:32 +02001686 nvme_rdma_unmap_data(req->queue, rq);
1687 nvme_complete_rq(rq);
Christoph Hellwig71102302016-07-06 21:55:52 +09001688}
1689
Sagi Grimberg0b366582017-07-13 11:09:44 +03001690static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
1691{
1692 struct nvme_rdma_ctrl *ctrl = set->driver_data;
1693
1694 return blk_mq_rdma_map_queues(set, ctrl->device->dev, 0);
1695}
1696
Eric Biggersf363b082017-03-30 13:39:16 -07001697static const struct blk_mq_ops nvme_rdma_mq_ops = {
Christoph Hellwig71102302016-07-06 21:55:52 +09001698 .queue_rq = nvme_rdma_queue_rq,
1699 .complete = nvme_rdma_complete_rq,
Christoph Hellwig71102302016-07-06 21:55:52 +09001700 .init_request = nvme_rdma_init_request,
1701 .exit_request = nvme_rdma_exit_request,
Christoph Hellwig71102302016-07-06 21:55:52 +09001702 .init_hctx = nvme_rdma_init_hctx,
1703 .poll = nvme_rdma_poll,
1704 .timeout = nvme_rdma_timeout,
Sagi Grimberg0b366582017-07-13 11:09:44 +03001705 .map_queues = nvme_rdma_map_queues,
Christoph Hellwig71102302016-07-06 21:55:52 +09001706};
1707
Eric Biggersf363b082017-03-30 13:39:16 -07001708static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
Christoph Hellwig71102302016-07-06 21:55:52 +09001709 .queue_rq = nvme_rdma_queue_rq,
1710 .complete = nvme_rdma_complete_rq,
Christoph Hellwig385475e2017-06-13 09:15:19 +02001711 .init_request = nvme_rdma_init_request,
1712 .exit_request = nvme_rdma_exit_request,
Christoph Hellwig71102302016-07-06 21:55:52 +09001713 .init_hctx = nvme_rdma_init_admin_hctx,
1714 .timeout = nvme_rdma_timeout,
1715};
1716
Sagi Grimberg18398af2017-07-10 09:22:31 +03001717static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
Christoph Hellwig71102302016-07-06 21:55:52 +09001718{
Christoph Hellwig71102302016-07-06 21:55:52 +09001719 cancel_work_sync(&ctrl->err_work);
1720 cancel_delayed_work_sync(&ctrl->reconnect_work);
1721
Sagi Grimbergd858e5f2017-04-24 10:58:29 +03001722 if (ctrl->ctrl.queue_count > 1) {
Christoph Hellwig71102302016-07-06 21:55:52 +09001723 nvme_stop_queues(&ctrl->ctrl);
1724 blk_mq_tagset_busy_iter(&ctrl->tag_set,
1725 nvme_cancel_request, &ctrl->ctrl);
Sagi Grimberga57bd542017-08-28 21:41:10 +02001726 nvme_rdma_destroy_io_queues(ctrl, shutdown);
Christoph Hellwig71102302016-07-06 21:55:52 +09001727 }
1728
Sagi Grimberg18398af2017-07-10 09:22:31 +03001729 if (shutdown)
Christoph Hellwig71102302016-07-06 21:55:52 +09001730 nvme_shutdown_ctrl(&ctrl->ctrl);
Sagi Grimberg18398af2017-07-10 09:22:31 +03001731 else
1732 nvme_disable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
Christoph Hellwig71102302016-07-06 21:55:52 +09001733
Sagi Grimbergfb051332017-07-02 15:33:32 +03001734 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
Christoph Hellwig71102302016-07-06 21:55:52 +09001735 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
1736 nvme_cancel_request, &ctrl->ctrl);
Sagi Grimbergfb051332017-07-02 15:33:32 +03001737 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
Sagi Grimberg3f02fff2017-07-10 09:22:32 +03001738 nvme_rdma_destroy_admin_queue(ctrl, shutdown);
Christoph Hellwig71102302016-07-06 21:55:52 +09001739}
1740
Christoph Hellwigc5017e82017-10-29 10:44:29 +02001741static void nvme_rdma_delete_ctrl(struct nvme_ctrl *ctrl)
Sagi Grimberg2461a8d2016-07-24 09:29:51 +03001742{
Christoph Hellwige9bc2582017-10-29 10:44:30 +02001743 nvme_rdma_shutdown_ctrl(to_rdma_ctrl(ctrl), true);
Christoph Hellwig71102302016-07-06 21:55:52 +09001744}
1745
Christoph Hellwig71102302016-07-06 21:55:52 +09001746static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
1747{
Christoph Hellwigd86c4d82017-06-15 15:41:08 +02001748 struct nvme_rdma_ctrl *ctrl =
1749 container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work);
Christoph Hellwig71102302016-07-06 21:55:52 +09001750 int ret;
1751 bool changed;
1752
Sagi Grimbergd09f2b42017-07-02 10:56:43 +03001753 nvme_stop_ctrl(&ctrl->ctrl);
Sagi Grimberg18398af2017-07-10 09:22:31 +03001754 nvme_rdma_shutdown_ctrl(ctrl, false);
Christoph Hellwig71102302016-07-06 21:55:52 +09001755
Sagi Grimberg3f02fff2017-07-10 09:22:32 +03001756 ret = nvme_rdma_configure_admin_queue(ctrl, false);
Sagi Grimberg370ae6e2017-07-10 09:22:38 +03001757 if (ret)
1758 goto out_fail;
Christoph Hellwig71102302016-07-06 21:55:52 +09001759
Sagi Grimbergd858e5f2017-04-24 10:58:29 +03001760 if (ctrl->ctrl.queue_count > 1) {
Sagi Grimberga57bd542017-08-28 21:41:10 +02001761 ret = nvme_rdma_configure_io_queues(ctrl, false);
Christoph Hellwig71102302016-07-06 21:55:52 +09001762 if (ret)
Sagi Grimberg370ae6e2017-07-10 09:22:38 +03001763 goto out_fail;
Christoph Hellwig71102302016-07-06 21:55:52 +09001764 }
1765
1766 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
Sagi Grimberg0ad0bfa2017-10-11 12:49:51 +03001767 if (!changed) {
1768 /* state change failure is ok if we're in DELETING state */
1769 WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING);
1770 return;
1771 }
Christoph Hellwig71102302016-07-06 21:55:52 +09001772
Sagi Grimbergd09f2b42017-07-02 10:56:43 +03001773 nvme_start_ctrl(&ctrl->ctrl);
Christoph Hellwig71102302016-07-06 21:55:52 +09001774
1775 return;
1776
Sagi Grimberg370ae6e2017-07-10 09:22:38 +03001777out_fail:
Christoph Hellwig71102302016-07-06 21:55:52 +09001778 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
Christoph Hellwige9bc2582017-10-29 10:44:30 +02001779 nvme_remove_namespaces(&ctrl->ctrl);
1780 nvme_rdma_shutdown_ctrl(ctrl, true);
1781 nvme_uninit_ctrl(&ctrl->ctrl);
1782 nvme_put_ctrl(&ctrl->ctrl);
Christoph Hellwig71102302016-07-06 21:55:52 +09001783}
1784
Christoph Hellwig71102302016-07-06 21:55:52 +09001785static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
1786 .name = "rdma",
1787 .module = THIS_MODULE,
Christoph Hellwigd3d5b872017-05-20 15:14:44 +02001788 .flags = NVME_F_FABRICS,
Christoph Hellwig71102302016-07-06 21:55:52 +09001789 .reg_read32 = nvmf_reg_read32,
1790 .reg_read64 = nvmf_reg_read64,
1791 .reg_write32 = nvmf_reg_write32,
Christoph Hellwig71102302016-07-06 21:55:52 +09001792 .free_ctrl = nvme_rdma_free_ctrl,
1793 .submit_async_event = nvme_rdma_submit_async_event,
Christoph Hellwigc5017e82017-10-29 10:44:29 +02001794 .delete_ctrl = nvme_rdma_delete_ctrl,
Christoph Hellwig71102302016-07-06 21:55:52 +09001795 .get_address = nvmf_get_address,
Sagi Grimberg31b84462017-10-11 12:53:07 +03001796 .reinit_request = nvme_rdma_reinit_request,
Christoph Hellwig71102302016-07-06 21:55:52 +09001797};
1798
James Smart36e835f2017-10-20 16:17:09 -07001799static inline bool
1800__nvme_rdma_options_match(struct nvme_rdma_ctrl *ctrl,
1801 struct nvmf_ctrl_options *opts)
1802{
1803 char *stdport = __stringify(NVME_RDMA_IP_PORT);
1804
1805
1806 if (!nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts) ||
1807 strcmp(opts->traddr, ctrl->ctrl.opts->traddr))
1808 return false;
1809
1810 if (opts->mask & NVMF_OPT_TRSVCID &&
1811 ctrl->ctrl.opts->mask & NVMF_OPT_TRSVCID) {
1812 if (strcmp(opts->trsvcid, ctrl->ctrl.opts->trsvcid))
1813 return false;
1814 } else if (opts->mask & NVMF_OPT_TRSVCID) {
1815 if (strcmp(opts->trsvcid, stdport))
1816 return false;
1817 } else if (ctrl->ctrl.opts->mask & NVMF_OPT_TRSVCID) {
1818 if (strcmp(stdport, ctrl->ctrl.opts->trsvcid))
1819 return false;
1820 }
1821 /* else, it's a match as both have stdport. Fall to next checks */
1822
1823 /*
1824 * checking the local address is rough. In most cases, one
1825 * is not specified and the host port is selected by the stack.
1826 *
1827 * Assume no match if:
1828 * local address is specified and address is not the same
1829 * local address is not specified but remote is, or vice versa
1830 * (admin using specific host_traddr when it matters).
1831 */
1832 if (opts->mask & NVMF_OPT_HOST_TRADDR &&
1833 ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) {
1834 if (strcmp(opts->host_traddr, ctrl->ctrl.opts->host_traddr))
1835 return false;
1836 } else if (opts->mask & NVMF_OPT_HOST_TRADDR ||
1837 ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
1838 return false;
1839 /*
1840 * if neither controller had an host port specified, assume it's
1841 * a match as everything else matched.
1842 */
1843
1844 return true;
1845}
1846
1847/*
1848 * Fails a connection request if it matches an existing controller
1849 * (association) with the same tuple:
1850 * <Host NQN, Host ID, local address, remote address, remote port, SUBSYS NQN>
1851 *
1852 * if local address is not specified in the request, it will match an
1853 * existing controller with all the other parameters the same and no
1854 * local port address specified as well.
1855 *
1856 * The ports don't need to be compared as they are intrinsically
1857 * already matched by the port pointers supplied.
1858 */
1859static bool
1860nvme_rdma_existing_controller(struct nvmf_ctrl_options *opts)
1861{
1862 struct nvme_rdma_ctrl *ctrl;
1863 bool found = false;
1864
1865 mutex_lock(&nvme_rdma_ctrl_mutex);
1866 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
1867 found = __nvme_rdma_options_match(ctrl, opts);
1868 if (found)
1869 break;
1870 }
1871 mutex_unlock(&nvme_rdma_ctrl_mutex);
1872
1873 return found;
1874}
1875
Christoph Hellwig71102302016-07-06 21:55:52 +09001876static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
1877 struct nvmf_ctrl_options *opts)
1878{
1879 struct nvme_rdma_ctrl *ctrl;
1880 int ret;
1881 bool changed;
Sagi Grimberg0928f9b2017-02-05 21:49:32 +02001882 char *port;
Christoph Hellwig71102302016-07-06 21:55:52 +09001883
1884 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1885 if (!ctrl)
1886 return ERR_PTR(-ENOMEM);
1887 ctrl->ctrl.opts = opts;
1888 INIT_LIST_HEAD(&ctrl->list);
1889
Sagi Grimberg0928f9b2017-02-05 21:49:32 +02001890 if (opts->mask & NVMF_OPT_TRSVCID)
1891 port = opts->trsvcid;
1892 else
1893 port = __stringify(NVME_RDMA_IP_PORT);
1894
1895 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
1896 opts->traddr, port, &ctrl->addr);
Christoph Hellwig71102302016-07-06 21:55:52 +09001897 if (ret) {
Sagi Grimberg0928f9b2017-02-05 21:49:32 +02001898 pr_err("malformed address passed: %s:%s\n", opts->traddr, port);
Christoph Hellwig71102302016-07-06 21:55:52 +09001899 goto out_free_ctrl;
1900 }
1901
Max Gurtovoy8f4e8da2017-02-19 20:08:03 +02001902 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
Sagi Grimberg0928f9b2017-02-05 21:49:32 +02001903 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
1904 opts->host_traddr, NULL, &ctrl->src_addr);
Max Gurtovoy8f4e8da2017-02-19 20:08:03 +02001905 if (ret) {
Sagi Grimberg0928f9b2017-02-05 21:49:32 +02001906 pr_err("malformed src address passed: %s\n",
Max Gurtovoy8f4e8da2017-02-19 20:08:03 +02001907 opts->host_traddr);
1908 goto out_free_ctrl;
1909 }
1910 }
1911
James Smart36e835f2017-10-20 16:17:09 -07001912 if (!opts->duplicate_connect && nvme_rdma_existing_controller(opts)) {
1913 ret = -EALREADY;
1914 goto out_free_ctrl;
1915 }
1916
Christoph Hellwig71102302016-07-06 21:55:52 +09001917 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
1918 0 /* no quirks, we're perfect! */);
1919 if (ret)
1920 goto out_free_ctrl;
1921
Christoph Hellwig71102302016-07-06 21:55:52 +09001922 INIT_DELAYED_WORK(&ctrl->reconnect_work,
1923 nvme_rdma_reconnect_ctrl_work);
1924 INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
Christoph Hellwigd86c4d82017-06-15 15:41:08 +02001925 INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work);
Christoph Hellwig71102302016-07-06 21:55:52 +09001926
Sagi Grimbergd858e5f2017-04-24 10:58:29 +03001927 ctrl->ctrl.queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
Jay Freyenseec5af8652016-08-17 15:00:27 -07001928 ctrl->ctrl.sqsize = opts->queue_size - 1;
Christoph Hellwig71102302016-07-06 21:55:52 +09001929 ctrl->ctrl.kato = opts->kato;
1930
1931 ret = -ENOMEM;
Sagi Grimbergd858e5f2017-04-24 10:58:29 +03001932 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
Christoph Hellwig71102302016-07-06 21:55:52 +09001933 GFP_KERNEL);
1934 if (!ctrl->queues)
1935 goto out_uninit_ctrl;
1936
Sagi Grimberg3f02fff2017-07-10 09:22:32 +03001937 ret = nvme_rdma_configure_admin_queue(ctrl, true);
Christoph Hellwig71102302016-07-06 21:55:52 +09001938 if (ret)
1939 goto out_kfree_queues;
1940
1941 /* sanity check icdoff */
1942 if (ctrl->ctrl.icdoff) {
1943 dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
Dan Carpenterbb472ba2017-06-14 13:46:45 +03001944 ret = -EINVAL;
Christoph Hellwig71102302016-07-06 21:55:52 +09001945 goto out_remove_admin_queue;
1946 }
1947
1948 /* sanity check keyed sgls */
1949 if (!(ctrl->ctrl.sgls & (1 << 20))) {
1950 dev_err(ctrl->ctrl.device, "Mandatory keyed sgls are not support\n");
Dan Carpenterbb472ba2017-06-14 13:46:45 +03001951 ret = -EINVAL;
Christoph Hellwig71102302016-07-06 21:55:52 +09001952 goto out_remove_admin_queue;
1953 }
1954
1955 if (opts->queue_size > ctrl->ctrl.maxcmd) {
1956 /* warn if maxcmd is lower than queue_size */
1957 dev_warn(ctrl->ctrl.device,
1958 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
1959 opts->queue_size, ctrl->ctrl.maxcmd);
1960 opts->queue_size = ctrl->ctrl.maxcmd;
1961 }
1962
Samuel Jones76c08bf2016-10-25 09:22:34 +02001963 if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
1964 /* warn if sqsize is lower than queue_size */
1965 dev_warn(ctrl->ctrl.device,
1966 "queue_size %zu > ctrl sqsize %u, clamping down\n",
1967 opts->queue_size, ctrl->ctrl.sqsize + 1);
1968 opts->queue_size = ctrl->ctrl.sqsize + 1;
1969 }
1970
Christoph Hellwig71102302016-07-06 21:55:52 +09001971 if (opts->nr_io_queues) {
Sagi Grimberga57bd542017-08-28 21:41:10 +02001972 ret = nvme_rdma_configure_io_queues(ctrl, true);
Christoph Hellwig71102302016-07-06 21:55:52 +09001973 if (ret)
1974 goto out_remove_admin_queue;
1975 }
1976
1977 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
1978 WARN_ON_ONCE(!changed);
1979
Sagi Grimberg0928f9b2017-02-05 21:49:32 +02001980 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n",
Christoph Hellwig71102302016-07-06 21:55:52 +09001981 ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
1982
Christoph Hellwigd22524a2017-10-18 13:25:42 +02001983 nvme_get_ctrl(&ctrl->ctrl);
Christoph Hellwig71102302016-07-06 21:55:52 +09001984
1985 mutex_lock(&nvme_rdma_ctrl_mutex);
1986 list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
1987 mutex_unlock(&nvme_rdma_ctrl_mutex);
1988
Sagi Grimbergd09f2b42017-07-02 10:56:43 +03001989 nvme_start_ctrl(&ctrl->ctrl);
Christoph Hellwig71102302016-07-06 21:55:52 +09001990
1991 return &ctrl->ctrl;
1992
1993out_remove_admin_queue:
Sagi Grimberg3f02fff2017-07-10 09:22:32 +03001994 nvme_rdma_destroy_admin_queue(ctrl, true);
Christoph Hellwig71102302016-07-06 21:55:52 +09001995out_kfree_queues:
1996 kfree(ctrl->queues);
1997out_uninit_ctrl:
1998 nvme_uninit_ctrl(&ctrl->ctrl);
1999 nvme_put_ctrl(&ctrl->ctrl);
2000 if (ret > 0)
2001 ret = -EIO;
2002 return ERR_PTR(ret);
2003out_free_ctrl:
2004 kfree(ctrl);
2005 return ERR_PTR(ret);
2006}
2007
2008static struct nvmf_transport_ops nvme_rdma_transport = {
2009 .name = "rdma",
2010 .required_opts = NVMF_OPT_TRADDR,
Max Gurtovoy8f4e8da2017-02-19 20:08:03 +02002011 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
Sagi Grimbergfd8563c2017-03-18 20:58:29 +02002012 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO,
Christoph Hellwig71102302016-07-06 21:55:52 +09002013 .create_ctrl = nvme_rdma_create_ctrl,
2014};
2015
Steve Wisee87a9112016-09-02 09:01:54 -07002016static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
2017{
2018 struct nvme_rdma_ctrl *ctrl;
2019
2020 /* Delete all controllers using this device */
2021 mutex_lock(&nvme_rdma_ctrl_mutex);
2022 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
2023 if (ctrl->device->dev != ib_device)
2024 continue;
2025 dev_info(ctrl->ctrl.device,
2026 "Removing ctrl: NQN \"%s\", addr %pISp\n",
2027 ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
Christoph Hellwigc5017e82017-10-29 10:44:29 +02002028 nvme_delete_ctrl(&ctrl->ctrl);
Steve Wisee87a9112016-09-02 09:01:54 -07002029 }
2030 mutex_unlock(&nvme_rdma_ctrl_mutex);
2031
Sagi Grimberg9a6327d2017-06-07 20:31:55 +02002032 flush_workqueue(nvme_wq);
Steve Wisee87a9112016-09-02 09:01:54 -07002033}
2034
2035static struct ib_client nvme_rdma_ib_client = {
2036 .name = "nvme_rdma",
Steve Wisee87a9112016-09-02 09:01:54 -07002037 .remove = nvme_rdma_remove_one
2038};
2039
Christoph Hellwig71102302016-07-06 21:55:52 +09002040static int __init nvme_rdma_init_module(void)
2041{
Steve Wisee87a9112016-09-02 09:01:54 -07002042 int ret;
2043
Steve Wisee87a9112016-09-02 09:01:54 -07002044 ret = ib_register_client(&nvme_rdma_ib_client);
Sagi Grimberga56c79c2017-03-19 06:21:42 +02002045 if (ret)
Sagi Grimberg9a6327d2017-06-07 20:31:55 +02002046 return ret;
Steve Wisee87a9112016-09-02 09:01:54 -07002047
Sagi Grimberga56c79c2017-03-19 06:21:42 +02002048 ret = nvmf_register_transport(&nvme_rdma_transport);
2049 if (ret)
2050 goto err_unreg_client;
2051
2052 return 0;
2053
2054err_unreg_client:
2055 ib_unregister_client(&nvme_rdma_ib_client);
Sagi Grimberga56c79c2017-03-19 06:21:42 +02002056 return ret;
Christoph Hellwig71102302016-07-06 21:55:52 +09002057}
2058
2059static void __exit nvme_rdma_cleanup_module(void)
2060{
Christoph Hellwig71102302016-07-06 21:55:52 +09002061 nvmf_unregister_transport(&nvme_rdma_transport);
Steve Wisee87a9112016-09-02 09:01:54 -07002062 ib_unregister_client(&nvme_rdma_ib_client);
Christoph Hellwig71102302016-07-06 21:55:52 +09002063}
2064
2065module_init(nvme_rdma_init_module);
2066module_exit(nvme_rdma_cleanup_module);
2067
2068MODULE_LICENSE("GPL v2");