Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 1 | /* |
| 2 | * NVMe over Fabrics RDMA target. |
| 3 | * Copyright (c) 2015-2016 HGST, a Western Digital Company. |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
| 6 | * under the terms and conditions of the GNU General Public License, |
| 7 | * version 2, as published by the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. |
| 13 | */ |
| 14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 15 | #include <linux/atomic.h> |
| 16 | #include <linux/ctype.h> |
| 17 | #include <linux/delay.h> |
| 18 | #include <linux/err.h> |
| 19 | #include <linux/init.h> |
| 20 | #include <linux/module.h> |
| 21 | #include <linux/nvme.h> |
| 22 | #include <linux/slab.h> |
| 23 | #include <linux/string.h> |
| 24 | #include <linux/wait.h> |
| 25 | #include <linux/inet.h> |
| 26 | #include <asm/unaligned.h> |
| 27 | |
| 28 | #include <rdma/ib_verbs.h> |
| 29 | #include <rdma/rdma_cm.h> |
| 30 | #include <rdma/rw.h> |
| 31 | |
| 32 | #include <linux/nvme-rdma.h> |
| 33 | #include "nvmet.h" |
| 34 | |
| 35 | /* |
| 36 | * We allow up to a page of inline data to go with the SQE |
| 37 | */ |
| 38 | #define NVMET_RDMA_INLINE_DATA_SIZE PAGE_SIZE |
| 39 | |
| 40 | struct nvmet_rdma_cmd { |
| 41 | struct ib_sge sge[2]; |
| 42 | struct ib_cqe cqe; |
| 43 | struct ib_recv_wr wr; |
| 44 | struct scatterlist inline_sg; |
| 45 | struct page *inline_page; |
| 46 | struct nvme_command *nvme_cmd; |
| 47 | struct nvmet_rdma_queue *queue; |
| 48 | }; |
| 49 | |
| 50 | enum { |
| 51 | NVMET_RDMA_REQ_INLINE_DATA = (1 << 0), |
| 52 | NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1), |
| 53 | }; |
| 54 | |
| 55 | struct nvmet_rdma_rsp { |
| 56 | struct ib_sge send_sge; |
| 57 | struct ib_cqe send_cqe; |
| 58 | struct ib_send_wr send_wr; |
| 59 | |
| 60 | struct nvmet_rdma_cmd *cmd; |
| 61 | struct nvmet_rdma_queue *queue; |
| 62 | |
| 63 | struct ib_cqe read_cqe; |
| 64 | struct rdma_rw_ctx rw; |
| 65 | |
| 66 | struct nvmet_req req; |
| 67 | |
| 68 | u8 n_rdma; |
| 69 | u32 flags; |
| 70 | u32 invalidate_rkey; |
| 71 | |
| 72 | struct list_head wait_list; |
| 73 | struct list_head free_list; |
| 74 | }; |
| 75 | |
| 76 | enum nvmet_rdma_queue_state { |
| 77 | NVMET_RDMA_Q_CONNECTING, |
| 78 | NVMET_RDMA_Q_LIVE, |
| 79 | NVMET_RDMA_Q_DISCONNECTING, |
Sagi Grimberg | d8f7750 | 2016-05-19 15:24:55 +0300 | [diff] [blame] | 80 | NVMET_RDMA_IN_DEVICE_REMOVAL, |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 81 | }; |
| 82 | |
| 83 | struct nvmet_rdma_queue { |
| 84 | struct rdma_cm_id *cm_id; |
| 85 | struct nvmet_port *port; |
| 86 | struct ib_cq *cq; |
| 87 | atomic_t sq_wr_avail; |
| 88 | struct nvmet_rdma_device *dev; |
| 89 | spinlock_t state_lock; |
| 90 | enum nvmet_rdma_queue_state state; |
| 91 | struct nvmet_cq nvme_cq; |
| 92 | struct nvmet_sq nvme_sq; |
| 93 | |
| 94 | struct nvmet_rdma_rsp *rsps; |
| 95 | struct list_head free_rsps; |
| 96 | spinlock_t rsps_lock; |
| 97 | struct nvmet_rdma_cmd *cmds; |
| 98 | |
| 99 | struct work_struct release_work; |
| 100 | struct list_head rsp_wait_list; |
| 101 | struct list_head rsp_wr_wait_list; |
| 102 | spinlock_t rsp_wr_wait_lock; |
| 103 | |
| 104 | int idx; |
| 105 | int host_qid; |
| 106 | int recv_queue_size; |
| 107 | int send_queue_size; |
| 108 | |
| 109 | struct list_head queue_list; |
| 110 | }; |
| 111 | |
| 112 | struct nvmet_rdma_device { |
| 113 | struct ib_device *device; |
| 114 | struct ib_pd *pd; |
| 115 | struct ib_srq *srq; |
| 116 | struct nvmet_rdma_cmd *srq_cmds; |
| 117 | size_t srq_size; |
| 118 | struct kref ref; |
| 119 | struct list_head entry; |
| 120 | }; |
| 121 | |
| 122 | static bool nvmet_rdma_use_srq; |
| 123 | module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444); |
| 124 | MODULE_PARM_DESC(use_srq, "Use shared receive queue."); |
| 125 | |
| 126 | static DEFINE_IDA(nvmet_rdma_queue_ida); |
| 127 | static LIST_HEAD(nvmet_rdma_queue_list); |
| 128 | static DEFINE_MUTEX(nvmet_rdma_queue_mutex); |
| 129 | |
| 130 | static LIST_HEAD(device_list); |
| 131 | static DEFINE_MUTEX(device_list_mutex); |
| 132 | |
| 133 | static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp); |
| 134 | static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc); |
| 135 | static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); |
| 136 | static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc); |
| 137 | static void nvmet_rdma_qp_event(struct ib_event *event, void *priv); |
| 138 | static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); |
| 139 | |
| 140 | static struct nvmet_fabrics_ops nvmet_rdma_ops; |
| 141 | |
| 142 | /* XXX: really should move to a generic header sooner or later.. */ |
| 143 | static inline u32 get_unaligned_le24(const u8 *p) |
| 144 | { |
| 145 | return (u32)p[0] | (u32)p[1] << 8 | (u32)p[2] << 16; |
| 146 | } |
| 147 | |
| 148 | static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp) |
| 149 | { |
| 150 | return nvme_is_write(rsp->req.cmd) && |
| 151 | rsp->req.data_len && |
| 152 | !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); |
| 153 | } |
| 154 | |
| 155 | static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp) |
| 156 | { |
| 157 | return !nvme_is_write(rsp->req.cmd) && |
| 158 | rsp->req.data_len && |
| 159 | !rsp->req.rsp->status && |
| 160 | !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); |
| 161 | } |
| 162 | |
| 163 | static inline struct nvmet_rdma_rsp * |
| 164 | nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) |
| 165 | { |
| 166 | struct nvmet_rdma_rsp *rsp; |
| 167 | unsigned long flags; |
| 168 | |
| 169 | spin_lock_irqsave(&queue->rsps_lock, flags); |
| 170 | rsp = list_first_entry(&queue->free_rsps, |
| 171 | struct nvmet_rdma_rsp, free_list); |
| 172 | list_del(&rsp->free_list); |
| 173 | spin_unlock_irqrestore(&queue->rsps_lock, flags); |
| 174 | |
| 175 | return rsp; |
| 176 | } |
| 177 | |
| 178 | static inline void |
| 179 | nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) |
| 180 | { |
| 181 | unsigned long flags; |
| 182 | |
| 183 | spin_lock_irqsave(&rsp->queue->rsps_lock, flags); |
| 184 | list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); |
| 185 | spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); |
| 186 | } |
| 187 | |
| 188 | static void nvmet_rdma_free_sgl(struct scatterlist *sgl, unsigned int nents) |
| 189 | { |
| 190 | struct scatterlist *sg; |
| 191 | int count; |
| 192 | |
| 193 | if (!sgl || !nents) |
| 194 | return; |
| 195 | |
| 196 | for_each_sg(sgl, sg, nents, count) |
| 197 | __free_page(sg_page(sg)); |
| 198 | kfree(sgl); |
| 199 | } |
| 200 | |
| 201 | static int nvmet_rdma_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, |
| 202 | u32 length) |
| 203 | { |
| 204 | struct scatterlist *sg; |
| 205 | struct page *page; |
| 206 | unsigned int nent; |
| 207 | int i = 0; |
| 208 | |
| 209 | nent = DIV_ROUND_UP(length, PAGE_SIZE); |
| 210 | sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL); |
| 211 | if (!sg) |
| 212 | goto out; |
| 213 | |
| 214 | sg_init_table(sg, nent); |
| 215 | |
| 216 | while (length) { |
| 217 | u32 page_len = min_t(u32, length, PAGE_SIZE); |
| 218 | |
| 219 | page = alloc_page(GFP_KERNEL); |
| 220 | if (!page) |
| 221 | goto out_free_pages; |
| 222 | |
| 223 | sg_set_page(&sg[i], page, page_len, 0); |
| 224 | length -= page_len; |
| 225 | i++; |
| 226 | } |
| 227 | *sgl = sg; |
| 228 | *nents = nent; |
| 229 | return 0; |
| 230 | |
| 231 | out_free_pages: |
| 232 | while (i > 0) { |
| 233 | i--; |
| 234 | __free_page(sg_page(&sg[i])); |
| 235 | } |
| 236 | kfree(sg); |
| 237 | out: |
| 238 | return NVME_SC_INTERNAL; |
| 239 | } |
| 240 | |
| 241 | static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev, |
| 242 | struct nvmet_rdma_cmd *c, bool admin) |
| 243 | { |
| 244 | /* NVMe command / RDMA RECV */ |
| 245 | c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL); |
| 246 | if (!c->nvme_cmd) |
| 247 | goto out; |
| 248 | |
| 249 | c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd, |
| 250 | sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); |
| 251 | if (ib_dma_mapping_error(ndev->device, c->sge[0].addr)) |
| 252 | goto out_free_cmd; |
| 253 | |
| 254 | c->sge[0].length = sizeof(*c->nvme_cmd); |
| 255 | c->sge[0].lkey = ndev->pd->local_dma_lkey; |
| 256 | |
| 257 | if (!admin) { |
| 258 | c->inline_page = alloc_pages(GFP_KERNEL, |
| 259 | get_order(NVMET_RDMA_INLINE_DATA_SIZE)); |
| 260 | if (!c->inline_page) |
| 261 | goto out_unmap_cmd; |
| 262 | c->sge[1].addr = ib_dma_map_page(ndev->device, |
| 263 | c->inline_page, 0, NVMET_RDMA_INLINE_DATA_SIZE, |
| 264 | DMA_FROM_DEVICE); |
| 265 | if (ib_dma_mapping_error(ndev->device, c->sge[1].addr)) |
| 266 | goto out_free_inline_page; |
| 267 | c->sge[1].length = NVMET_RDMA_INLINE_DATA_SIZE; |
| 268 | c->sge[1].lkey = ndev->pd->local_dma_lkey; |
| 269 | } |
| 270 | |
| 271 | c->cqe.done = nvmet_rdma_recv_done; |
| 272 | |
| 273 | c->wr.wr_cqe = &c->cqe; |
| 274 | c->wr.sg_list = c->sge; |
| 275 | c->wr.num_sge = admin ? 1 : 2; |
| 276 | |
| 277 | return 0; |
| 278 | |
| 279 | out_free_inline_page: |
| 280 | if (!admin) { |
| 281 | __free_pages(c->inline_page, |
| 282 | get_order(NVMET_RDMA_INLINE_DATA_SIZE)); |
| 283 | } |
| 284 | out_unmap_cmd: |
| 285 | ib_dma_unmap_single(ndev->device, c->sge[0].addr, |
| 286 | sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); |
| 287 | out_free_cmd: |
| 288 | kfree(c->nvme_cmd); |
| 289 | |
| 290 | out: |
| 291 | return -ENOMEM; |
| 292 | } |
| 293 | |
| 294 | static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev, |
| 295 | struct nvmet_rdma_cmd *c, bool admin) |
| 296 | { |
| 297 | if (!admin) { |
| 298 | ib_dma_unmap_page(ndev->device, c->sge[1].addr, |
| 299 | NVMET_RDMA_INLINE_DATA_SIZE, DMA_FROM_DEVICE); |
| 300 | __free_pages(c->inline_page, |
| 301 | get_order(NVMET_RDMA_INLINE_DATA_SIZE)); |
| 302 | } |
| 303 | ib_dma_unmap_single(ndev->device, c->sge[0].addr, |
| 304 | sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); |
| 305 | kfree(c->nvme_cmd); |
| 306 | } |
| 307 | |
| 308 | static struct nvmet_rdma_cmd * |
| 309 | nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev, |
| 310 | int nr_cmds, bool admin) |
| 311 | { |
| 312 | struct nvmet_rdma_cmd *cmds; |
| 313 | int ret = -EINVAL, i; |
| 314 | |
| 315 | cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL); |
| 316 | if (!cmds) |
| 317 | goto out; |
| 318 | |
| 319 | for (i = 0; i < nr_cmds; i++) { |
| 320 | ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin); |
| 321 | if (ret) |
| 322 | goto out_free; |
| 323 | } |
| 324 | |
| 325 | return cmds; |
| 326 | |
| 327 | out_free: |
| 328 | while (--i >= 0) |
| 329 | nvmet_rdma_free_cmd(ndev, cmds + i, admin); |
| 330 | kfree(cmds); |
| 331 | out: |
| 332 | return ERR_PTR(ret); |
| 333 | } |
| 334 | |
| 335 | static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev, |
| 336 | struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin) |
| 337 | { |
| 338 | int i; |
| 339 | |
| 340 | for (i = 0; i < nr_cmds; i++) |
| 341 | nvmet_rdma_free_cmd(ndev, cmds + i, admin); |
| 342 | kfree(cmds); |
| 343 | } |
| 344 | |
| 345 | static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, |
| 346 | struct nvmet_rdma_rsp *r) |
| 347 | { |
| 348 | /* NVMe CQE / RDMA SEND */ |
| 349 | r->req.rsp = kmalloc(sizeof(*r->req.rsp), GFP_KERNEL); |
| 350 | if (!r->req.rsp) |
| 351 | goto out; |
| 352 | |
| 353 | r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.rsp, |
| 354 | sizeof(*r->req.rsp), DMA_TO_DEVICE); |
| 355 | if (ib_dma_mapping_error(ndev->device, r->send_sge.addr)) |
| 356 | goto out_free_rsp; |
| 357 | |
| 358 | r->send_sge.length = sizeof(*r->req.rsp); |
| 359 | r->send_sge.lkey = ndev->pd->local_dma_lkey; |
| 360 | |
| 361 | r->send_cqe.done = nvmet_rdma_send_done; |
| 362 | |
| 363 | r->send_wr.wr_cqe = &r->send_cqe; |
| 364 | r->send_wr.sg_list = &r->send_sge; |
| 365 | r->send_wr.num_sge = 1; |
| 366 | r->send_wr.send_flags = IB_SEND_SIGNALED; |
| 367 | |
| 368 | /* Data In / RDMA READ */ |
| 369 | r->read_cqe.done = nvmet_rdma_read_data_done; |
| 370 | return 0; |
| 371 | |
| 372 | out_free_rsp: |
| 373 | kfree(r->req.rsp); |
| 374 | out: |
| 375 | return -ENOMEM; |
| 376 | } |
| 377 | |
| 378 | static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, |
| 379 | struct nvmet_rdma_rsp *r) |
| 380 | { |
| 381 | ib_dma_unmap_single(ndev->device, r->send_sge.addr, |
| 382 | sizeof(*r->req.rsp), DMA_TO_DEVICE); |
| 383 | kfree(r->req.rsp); |
| 384 | } |
| 385 | |
| 386 | static int |
| 387 | nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) |
| 388 | { |
| 389 | struct nvmet_rdma_device *ndev = queue->dev; |
| 390 | int nr_rsps = queue->recv_queue_size * 2; |
| 391 | int ret = -EINVAL, i; |
| 392 | |
| 393 | queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp), |
| 394 | GFP_KERNEL); |
| 395 | if (!queue->rsps) |
| 396 | goto out; |
| 397 | |
| 398 | for (i = 0; i < nr_rsps; i++) { |
| 399 | struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; |
| 400 | |
| 401 | ret = nvmet_rdma_alloc_rsp(ndev, rsp); |
| 402 | if (ret) |
| 403 | goto out_free; |
| 404 | |
| 405 | list_add_tail(&rsp->free_list, &queue->free_rsps); |
| 406 | } |
| 407 | |
| 408 | return 0; |
| 409 | |
| 410 | out_free: |
| 411 | while (--i >= 0) { |
| 412 | struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; |
| 413 | |
| 414 | list_del(&rsp->free_list); |
| 415 | nvmet_rdma_free_rsp(ndev, rsp); |
| 416 | } |
| 417 | kfree(queue->rsps); |
| 418 | out: |
| 419 | return ret; |
| 420 | } |
| 421 | |
| 422 | static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) |
| 423 | { |
| 424 | struct nvmet_rdma_device *ndev = queue->dev; |
| 425 | int i, nr_rsps = queue->recv_queue_size * 2; |
| 426 | |
| 427 | for (i = 0; i < nr_rsps; i++) { |
| 428 | struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; |
| 429 | |
| 430 | list_del(&rsp->free_list); |
| 431 | nvmet_rdma_free_rsp(ndev, rsp); |
| 432 | } |
| 433 | kfree(queue->rsps); |
| 434 | } |
| 435 | |
| 436 | static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, |
| 437 | struct nvmet_rdma_cmd *cmd) |
| 438 | { |
| 439 | struct ib_recv_wr *bad_wr; |
| 440 | |
Parav Pandit | 748ff84 | 2017-01-19 09:55:08 -0600 | [diff] [blame] | 441 | ib_dma_sync_single_for_device(ndev->device, |
| 442 | cmd->sge[0].addr, cmd->sge[0].length, |
| 443 | DMA_FROM_DEVICE); |
| 444 | |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 445 | if (ndev->srq) |
| 446 | return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr); |
| 447 | return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr); |
| 448 | } |
| 449 | |
| 450 | static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue) |
| 451 | { |
| 452 | spin_lock(&queue->rsp_wr_wait_lock); |
| 453 | while (!list_empty(&queue->rsp_wr_wait_list)) { |
| 454 | struct nvmet_rdma_rsp *rsp; |
| 455 | bool ret; |
| 456 | |
| 457 | rsp = list_entry(queue->rsp_wr_wait_list.next, |
| 458 | struct nvmet_rdma_rsp, wait_list); |
| 459 | list_del(&rsp->wait_list); |
| 460 | |
| 461 | spin_unlock(&queue->rsp_wr_wait_lock); |
| 462 | ret = nvmet_rdma_execute_command(rsp); |
| 463 | spin_lock(&queue->rsp_wr_wait_lock); |
| 464 | |
| 465 | if (!ret) { |
| 466 | list_add(&rsp->wait_list, &queue->rsp_wr_wait_list); |
| 467 | break; |
| 468 | } |
| 469 | } |
| 470 | spin_unlock(&queue->rsp_wr_wait_lock); |
| 471 | } |
| 472 | |
| 473 | |
| 474 | static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp) |
| 475 | { |
| 476 | struct nvmet_rdma_queue *queue = rsp->queue; |
| 477 | |
| 478 | atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); |
| 479 | |
| 480 | if (rsp->n_rdma) { |
| 481 | rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, |
| 482 | queue->cm_id->port_num, rsp->req.sg, |
| 483 | rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); |
| 484 | } |
| 485 | |
| 486 | if (rsp->req.sg != &rsp->cmd->inline_sg) |
| 487 | nvmet_rdma_free_sgl(rsp->req.sg, rsp->req.sg_cnt); |
| 488 | |
| 489 | if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list))) |
| 490 | nvmet_rdma_process_wr_wait_list(queue); |
| 491 | |
| 492 | nvmet_rdma_put_rsp(rsp); |
| 493 | } |
| 494 | |
| 495 | static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue) |
| 496 | { |
| 497 | if (queue->nvme_sq.ctrl) { |
| 498 | nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); |
| 499 | } else { |
| 500 | /* |
| 501 | * we didn't setup the controller yet in case |
| 502 | * of admin connect error, just disconnect and |
| 503 | * cleanup the queue |
| 504 | */ |
| 505 | nvmet_rdma_queue_disconnect(queue); |
| 506 | } |
| 507 | } |
| 508 | |
| 509 | static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) |
| 510 | { |
| 511 | struct nvmet_rdma_rsp *rsp = |
| 512 | container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); |
| 513 | |
| 514 | nvmet_rdma_release_rsp(rsp); |
| 515 | |
| 516 | if (unlikely(wc->status != IB_WC_SUCCESS && |
| 517 | wc->status != IB_WC_WR_FLUSH_ERR)) { |
| 518 | pr_err("SEND for CQE 0x%p failed with status %s (%d).\n", |
| 519 | wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); |
| 520 | nvmet_rdma_error_comp(rsp->queue); |
| 521 | } |
| 522 | } |
| 523 | |
| 524 | static void nvmet_rdma_queue_response(struct nvmet_req *req) |
| 525 | { |
| 526 | struct nvmet_rdma_rsp *rsp = |
| 527 | container_of(req, struct nvmet_rdma_rsp, req); |
| 528 | struct rdma_cm_id *cm_id = rsp->queue->cm_id; |
| 529 | struct ib_send_wr *first_wr, *bad_wr; |
| 530 | |
| 531 | if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) { |
| 532 | rsp->send_wr.opcode = IB_WR_SEND_WITH_INV; |
| 533 | rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey; |
| 534 | } else { |
| 535 | rsp->send_wr.opcode = IB_WR_SEND; |
| 536 | } |
| 537 | |
| 538 | if (nvmet_rdma_need_data_out(rsp)) |
| 539 | first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp, |
| 540 | cm_id->port_num, NULL, &rsp->send_wr); |
| 541 | else |
| 542 | first_wr = &rsp->send_wr; |
| 543 | |
| 544 | nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); |
Parav Pandit | 748ff84 | 2017-01-19 09:55:08 -0600 | [diff] [blame] | 545 | |
| 546 | ib_dma_sync_single_for_device(rsp->queue->dev->device, |
| 547 | rsp->send_sge.addr, rsp->send_sge.length, |
| 548 | DMA_TO_DEVICE); |
| 549 | |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 550 | if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) { |
| 551 | pr_err("sending cmd response failed\n"); |
| 552 | nvmet_rdma_release_rsp(rsp); |
| 553 | } |
| 554 | } |
| 555 | |
| 556 | static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc) |
| 557 | { |
| 558 | struct nvmet_rdma_rsp *rsp = |
| 559 | container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe); |
| 560 | struct nvmet_rdma_queue *queue = cq->cq_context; |
| 561 | |
| 562 | WARN_ON(rsp->n_rdma <= 0); |
| 563 | atomic_add(rsp->n_rdma, &queue->sq_wr_avail); |
| 564 | rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, |
| 565 | queue->cm_id->port_num, rsp->req.sg, |
| 566 | rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); |
| 567 | rsp->n_rdma = 0; |
| 568 | |
| 569 | if (unlikely(wc->status != IB_WC_SUCCESS)) { |
| 570 | nvmet_rdma_release_rsp(rsp); |
| 571 | if (wc->status != IB_WC_WR_FLUSH_ERR) { |
| 572 | pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n", |
| 573 | wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); |
| 574 | nvmet_rdma_error_comp(queue); |
| 575 | } |
| 576 | return; |
| 577 | } |
| 578 | |
| 579 | rsp->req.execute(&rsp->req); |
| 580 | } |
| 581 | |
| 582 | static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len, |
| 583 | u64 off) |
| 584 | { |
| 585 | sg_init_table(&rsp->cmd->inline_sg, 1); |
| 586 | sg_set_page(&rsp->cmd->inline_sg, rsp->cmd->inline_page, len, off); |
| 587 | rsp->req.sg = &rsp->cmd->inline_sg; |
| 588 | rsp->req.sg_cnt = 1; |
| 589 | } |
| 590 | |
| 591 | static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp) |
| 592 | { |
| 593 | struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl; |
| 594 | u64 off = le64_to_cpu(sgl->addr); |
| 595 | u32 len = le32_to_cpu(sgl->length); |
| 596 | |
| 597 | if (!nvme_is_write(rsp->req.cmd)) |
| 598 | return NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
| 599 | |
| 600 | if (off + len > NVMET_RDMA_INLINE_DATA_SIZE) { |
| 601 | pr_err("invalid inline data offset!\n"); |
| 602 | return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; |
| 603 | } |
| 604 | |
| 605 | /* no data command? */ |
| 606 | if (!len) |
| 607 | return 0; |
| 608 | |
| 609 | nvmet_rdma_use_inline_sg(rsp, len, off); |
| 610 | rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA; |
| 611 | return 0; |
| 612 | } |
| 613 | |
| 614 | static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, |
| 615 | struct nvme_keyed_sgl_desc *sgl, bool invalidate) |
| 616 | { |
| 617 | struct rdma_cm_id *cm_id = rsp->queue->cm_id; |
| 618 | u64 addr = le64_to_cpu(sgl->addr); |
| 619 | u32 len = get_unaligned_le24(sgl->length); |
| 620 | u32 key = get_unaligned_le32(sgl->key); |
| 621 | int ret; |
| 622 | u16 status; |
| 623 | |
| 624 | /* no data command? */ |
| 625 | if (!len) |
| 626 | return 0; |
| 627 | |
Sagi Grimberg | 40e64e0 | 2016-07-28 18:04:09 +0300 | [diff] [blame] | 628 | status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt, |
| 629 | len); |
| 630 | if (status) |
| 631 | return status; |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 632 | |
| 633 | ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, |
| 634 | rsp->req.sg, rsp->req.sg_cnt, 0, addr, key, |
| 635 | nvmet_data_dir(&rsp->req)); |
| 636 | if (ret < 0) |
| 637 | return NVME_SC_INTERNAL; |
| 638 | rsp->n_rdma += ret; |
| 639 | |
| 640 | if (invalidate) { |
| 641 | rsp->invalidate_rkey = key; |
| 642 | rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY; |
| 643 | } |
| 644 | |
| 645 | return 0; |
| 646 | } |
| 647 | |
| 648 | static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) |
| 649 | { |
| 650 | struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl; |
| 651 | |
| 652 | switch (sgl->type >> 4) { |
| 653 | case NVME_SGL_FMT_DATA_DESC: |
| 654 | switch (sgl->type & 0xf) { |
| 655 | case NVME_SGL_FMT_OFFSET: |
| 656 | return nvmet_rdma_map_sgl_inline(rsp); |
| 657 | default: |
| 658 | pr_err("invalid SGL subtype: %#x\n", sgl->type); |
| 659 | return NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
| 660 | } |
| 661 | case NVME_KEY_SGL_FMT_DATA_DESC: |
| 662 | switch (sgl->type & 0xf) { |
| 663 | case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE: |
| 664 | return nvmet_rdma_map_sgl_keyed(rsp, sgl, true); |
| 665 | case NVME_SGL_FMT_ADDRESS: |
| 666 | return nvmet_rdma_map_sgl_keyed(rsp, sgl, false); |
| 667 | default: |
| 668 | pr_err("invalid SGL subtype: %#x\n", sgl->type); |
| 669 | return NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
| 670 | } |
| 671 | default: |
| 672 | pr_err("invalid SGL type: %#x\n", sgl->type); |
| 673 | return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR; |
| 674 | } |
| 675 | } |
| 676 | |
| 677 | static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp) |
| 678 | { |
| 679 | struct nvmet_rdma_queue *queue = rsp->queue; |
| 680 | |
| 681 | if (unlikely(atomic_sub_return(1 + rsp->n_rdma, |
| 682 | &queue->sq_wr_avail) < 0)) { |
| 683 | pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n", |
| 684 | 1 + rsp->n_rdma, queue->idx, |
| 685 | queue->nvme_sq.ctrl->cntlid); |
| 686 | atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); |
| 687 | return false; |
| 688 | } |
| 689 | |
| 690 | if (nvmet_rdma_need_data_in(rsp)) { |
| 691 | if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp, |
| 692 | queue->cm_id->port_num, &rsp->read_cqe, NULL)) |
| 693 | nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR); |
| 694 | } else { |
| 695 | rsp->req.execute(&rsp->req); |
| 696 | } |
| 697 | |
| 698 | return true; |
| 699 | } |
| 700 | |
| 701 | static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, |
| 702 | struct nvmet_rdma_rsp *cmd) |
| 703 | { |
| 704 | u16 status; |
| 705 | |
Parav Pandit | 748ff84 | 2017-01-19 09:55:08 -0600 | [diff] [blame] | 706 | ib_dma_sync_single_for_cpu(queue->dev->device, |
| 707 | cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, |
| 708 | DMA_FROM_DEVICE); |
| 709 | ib_dma_sync_single_for_cpu(queue->dev->device, |
| 710 | cmd->send_sge.addr, cmd->send_sge.length, |
| 711 | DMA_TO_DEVICE); |
| 712 | |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 713 | if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, |
| 714 | &queue->nvme_sq, &nvmet_rdma_ops)) |
| 715 | return; |
| 716 | |
| 717 | status = nvmet_rdma_map_sgl(cmd); |
| 718 | if (status) |
| 719 | goto out_err; |
| 720 | |
| 721 | if (unlikely(!nvmet_rdma_execute_command(cmd))) { |
| 722 | spin_lock(&queue->rsp_wr_wait_lock); |
| 723 | list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list); |
| 724 | spin_unlock(&queue->rsp_wr_wait_lock); |
| 725 | } |
| 726 | |
| 727 | return; |
| 728 | |
| 729 | out_err: |
| 730 | nvmet_req_complete(&cmd->req, status); |
| 731 | } |
| 732 | |
| 733 | static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) |
| 734 | { |
| 735 | struct nvmet_rdma_cmd *cmd = |
| 736 | container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe); |
| 737 | struct nvmet_rdma_queue *queue = cq->cq_context; |
| 738 | struct nvmet_rdma_rsp *rsp; |
| 739 | |
| 740 | if (unlikely(wc->status != IB_WC_SUCCESS)) { |
| 741 | if (wc->status != IB_WC_WR_FLUSH_ERR) { |
| 742 | pr_err("RECV for CQE 0x%p failed with status %s (%d)\n", |
| 743 | wc->wr_cqe, ib_wc_status_msg(wc->status), |
| 744 | wc->status); |
| 745 | nvmet_rdma_error_comp(queue); |
| 746 | } |
| 747 | return; |
| 748 | } |
| 749 | |
| 750 | if (unlikely(wc->byte_len < sizeof(struct nvme_command))) { |
| 751 | pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n"); |
| 752 | nvmet_rdma_error_comp(queue); |
| 753 | return; |
| 754 | } |
| 755 | |
| 756 | cmd->queue = queue; |
| 757 | rsp = nvmet_rdma_get_rsp(queue); |
Sagi Grimberg | b25634e | 2017-03-09 13:45:52 +0200 | [diff] [blame] | 758 | rsp->queue = queue; |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 759 | rsp->cmd = cmd; |
| 760 | rsp->flags = 0; |
| 761 | rsp->req.cmd = cmd->nvme_cmd; |
Sagi Grimberg | b25634e | 2017-03-09 13:45:52 +0200 | [diff] [blame] | 762 | rsp->req.port = queue->port; |
| 763 | rsp->n_rdma = 0; |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 764 | |
| 765 | if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { |
| 766 | unsigned long flags; |
| 767 | |
| 768 | spin_lock_irqsave(&queue->state_lock, flags); |
| 769 | if (queue->state == NVMET_RDMA_Q_CONNECTING) |
| 770 | list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); |
| 771 | else |
| 772 | nvmet_rdma_put_rsp(rsp); |
| 773 | spin_unlock_irqrestore(&queue->state_lock, flags); |
| 774 | return; |
| 775 | } |
| 776 | |
| 777 | nvmet_rdma_handle_command(queue, rsp); |
| 778 | } |
| 779 | |
| 780 | static void nvmet_rdma_destroy_srq(struct nvmet_rdma_device *ndev) |
| 781 | { |
| 782 | if (!ndev->srq) |
| 783 | return; |
| 784 | |
| 785 | nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false); |
| 786 | ib_destroy_srq(ndev->srq); |
| 787 | } |
| 788 | |
| 789 | static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev) |
| 790 | { |
| 791 | struct ib_srq_init_attr srq_attr = { NULL, }; |
| 792 | struct ib_srq *srq; |
| 793 | size_t srq_size; |
| 794 | int ret, i; |
| 795 | |
| 796 | srq_size = 4095; /* XXX: tune */ |
| 797 | |
| 798 | srq_attr.attr.max_wr = srq_size; |
| 799 | srq_attr.attr.max_sge = 2; |
| 800 | srq_attr.attr.srq_limit = 0; |
| 801 | srq_attr.srq_type = IB_SRQT_BASIC; |
| 802 | srq = ib_create_srq(ndev->pd, &srq_attr); |
| 803 | if (IS_ERR(srq)) { |
| 804 | /* |
| 805 | * If SRQs aren't supported we just go ahead and use normal |
| 806 | * non-shared receive queues. |
| 807 | */ |
| 808 | pr_info("SRQ requested but not supported.\n"); |
| 809 | return 0; |
| 810 | } |
| 811 | |
| 812 | ndev->srq_cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false); |
| 813 | if (IS_ERR(ndev->srq_cmds)) { |
| 814 | ret = PTR_ERR(ndev->srq_cmds); |
| 815 | goto out_destroy_srq; |
| 816 | } |
| 817 | |
| 818 | ndev->srq = srq; |
| 819 | ndev->srq_size = srq_size; |
| 820 | |
| 821 | for (i = 0; i < srq_size; i++) |
| 822 | nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]); |
| 823 | |
| 824 | return 0; |
| 825 | |
| 826 | out_destroy_srq: |
| 827 | ib_destroy_srq(srq); |
| 828 | return ret; |
| 829 | } |
| 830 | |
| 831 | static void nvmet_rdma_free_dev(struct kref *ref) |
| 832 | { |
| 833 | struct nvmet_rdma_device *ndev = |
| 834 | container_of(ref, struct nvmet_rdma_device, ref); |
| 835 | |
| 836 | mutex_lock(&device_list_mutex); |
| 837 | list_del(&ndev->entry); |
| 838 | mutex_unlock(&device_list_mutex); |
| 839 | |
| 840 | nvmet_rdma_destroy_srq(ndev); |
| 841 | ib_dealloc_pd(ndev->pd); |
| 842 | |
| 843 | kfree(ndev); |
| 844 | } |
| 845 | |
| 846 | static struct nvmet_rdma_device * |
| 847 | nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id) |
| 848 | { |
| 849 | struct nvmet_rdma_device *ndev; |
| 850 | int ret; |
| 851 | |
| 852 | mutex_lock(&device_list_mutex); |
| 853 | list_for_each_entry(ndev, &device_list, entry) { |
| 854 | if (ndev->device->node_guid == cm_id->device->node_guid && |
| 855 | kref_get_unless_zero(&ndev->ref)) |
| 856 | goto out_unlock; |
| 857 | } |
| 858 | |
| 859 | ndev = kzalloc(sizeof(*ndev), GFP_KERNEL); |
| 860 | if (!ndev) |
| 861 | goto out_err; |
| 862 | |
| 863 | ndev->device = cm_id->device; |
| 864 | kref_init(&ndev->ref); |
| 865 | |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 866 | ndev->pd = ib_alloc_pd(ndev->device, 0); |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 867 | if (IS_ERR(ndev->pd)) |
| 868 | goto out_free_dev; |
| 869 | |
| 870 | if (nvmet_rdma_use_srq) { |
| 871 | ret = nvmet_rdma_init_srq(ndev); |
| 872 | if (ret) |
| 873 | goto out_free_pd; |
| 874 | } |
| 875 | |
| 876 | list_add(&ndev->entry, &device_list); |
| 877 | out_unlock: |
| 878 | mutex_unlock(&device_list_mutex); |
| 879 | pr_debug("added %s.\n", ndev->device->name); |
| 880 | return ndev; |
| 881 | |
| 882 | out_free_pd: |
| 883 | ib_dealloc_pd(ndev->pd); |
| 884 | out_free_dev: |
| 885 | kfree(ndev); |
| 886 | out_err: |
| 887 | mutex_unlock(&device_list_mutex); |
| 888 | return NULL; |
| 889 | } |
| 890 | |
| 891 | static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) |
| 892 | { |
| 893 | struct ib_qp_init_attr qp_attr; |
| 894 | struct nvmet_rdma_device *ndev = queue->dev; |
| 895 | int comp_vector, nr_cqe, ret, i; |
| 896 | |
| 897 | /* |
| 898 | * Spread the io queues across completion vectors, |
| 899 | * but still keep all admin queues on vector 0. |
| 900 | */ |
| 901 | comp_vector = !queue->host_qid ? 0 : |
| 902 | queue->idx % ndev->device->num_comp_vectors; |
| 903 | |
| 904 | /* |
| 905 | * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND. |
| 906 | */ |
| 907 | nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size; |
| 908 | |
| 909 | queue->cq = ib_alloc_cq(ndev->device, queue, |
| 910 | nr_cqe + 1, comp_vector, |
| 911 | IB_POLL_WORKQUEUE); |
| 912 | if (IS_ERR(queue->cq)) { |
| 913 | ret = PTR_ERR(queue->cq); |
| 914 | pr_err("failed to create CQ cqe= %d ret= %d\n", |
| 915 | nr_cqe + 1, ret); |
| 916 | goto out; |
| 917 | } |
| 918 | |
| 919 | memset(&qp_attr, 0, sizeof(qp_attr)); |
| 920 | qp_attr.qp_context = queue; |
| 921 | qp_attr.event_handler = nvmet_rdma_qp_event; |
| 922 | qp_attr.send_cq = queue->cq; |
| 923 | qp_attr.recv_cq = queue->cq; |
| 924 | qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; |
| 925 | qp_attr.qp_type = IB_QPT_RC; |
| 926 | /* +1 for drain */ |
| 927 | qp_attr.cap.max_send_wr = queue->send_queue_size + 1; |
| 928 | qp_attr.cap.max_rdma_ctxs = queue->send_queue_size; |
| 929 | qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd, |
| 930 | ndev->device->attrs.max_sge); |
| 931 | |
| 932 | if (ndev->srq) { |
| 933 | qp_attr.srq = ndev->srq; |
| 934 | } else { |
| 935 | /* +1 for drain */ |
| 936 | qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size; |
| 937 | qp_attr.cap.max_recv_sge = 2; |
| 938 | } |
| 939 | |
| 940 | ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr); |
| 941 | if (ret) { |
| 942 | pr_err("failed to create_qp ret= %d\n", ret); |
| 943 | goto err_destroy_cq; |
| 944 | } |
| 945 | |
| 946 | atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr); |
| 947 | |
| 948 | pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n", |
| 949 | __func__, queue->cq->cqe, qp_attr.cap.max_send_sge, |
| 950 | qp_attr.cap.max_send_wr, queue->cm_id); |
| 951 | |
| 952 | if (!ndev->srq) { |
| 953 | for (i = 0; i < queue->recv_queue_size; i++) { |
| 954 | queue->cmds[i].queue = queue; |
| 955 | nvmet_rdma_post_recv(ndev, &queue->cmds[i]); |
| 956 | } |
| 957 | } |
| 958 | |
| 959 | out: |
| 960 | return ret; |
| 961 | |
| 962 | err_destroy_cq: |
| 963 | ib_free_cq(queue->cq); |
| 964 | goto out; |
| 965 | } |
| 966 | |
| 967 | static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) |
| 968 | { |
Sagi Grimberg | 14c862d | 2016-11-06 11:03:59 +0200 | [diff] [blame] | 969 | ib_drain_qp(queue->cm_id->qp); |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 970 | rdma_destroy_qp(queue->cm_id); |
| 971 | ib_free_cq(queue->cq); |
| 972 | } |
| 973 | |
| 974 | static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue) |
| 975 | { |
| 976 | pr_info("freeing queue %d\n", queue->idx); |
| 977 | |
| 978 | nvmet_sq_destroy(&queue->nvme_sq); |
| 979 | |
| 980 | nvmet_rdma_destroy_queue_ib(queue); |
| 981 | if (!queue->dev->srq) { |
| 982 | nvmet_rdma_free_cmds(queue->dev, queue->cmds, |
| 983 | queue->recv_queue_size, |
| 984 | !queue->host_qid); |
| 985 | } |
| 986 | nvmet_rdma_free_rsps(queue); |
| 987 | ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx); |
| 988 | kfree(queue); |
| 989 | } |
| 990 | |
| 991 | static void nvmet_rdma_release_queue_work(struct work_struct *w) |
| 992 | { |
| 993 | struct nvmet_rdma_queue *queue = |
| 994 | container_of(w, struct nvmet_rdma_queue, release_work); |
| 995 | struct rdma_cm_id *cm_id = queue->cm_id; |
| 996 | struct nvmet_rdma_device *dev = queue->dev; |
Vincent Stehlé | 3256aae | 2016-08-16 15:11:25 +0200 | [diff] [blame] | 997 | enum nvmet_rdma_queue_state state = queue->state; |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 998 | |
| 999 | nvmet_rdma_free_queue(queue); |
Sagi Grimberg | d8f7750 | 2016-05-19 15:24:55 +0300 | [diff] [blame] | 1000 | |
Vincent Stehlé | 3256aae | 2016-08-16 15:11:25 +0200 | [diff] [blame] | 1001 | if (state != NVMET_RDMA_IN_DEVICE_REMOVAL) |
Sagi Grimberg | d8f7750 | 2016-05-19 15:24:55 +0300 | [diff] [blame] | 1002 | rdma_destroy_id(cm_id); |
| 1003 | |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 1004 | kref_put(&dev->ref, nvmet_rdma_free_dev); |
| 1005 | } |
| 1006 | |
| 1007 | static int |
| 1008 | nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn, |
| 1009 | struct nvmet_rdma_queue *queue) |
| 1010 | { |
| 1011 | struct nvme_rdma_cm_req *req; |
| 1012 | |
| 1013 | req = (struct nvme_rdma_cm_req *)conn->private_data; |
| 1014 | if (!req || conn->private_data_len == 0) |
| 1015 | return NVME_RDMA_CM_INVALID_LEN; |
| 1016 | |
| 1017 | if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0) |
| 1018 | return NVME_RDMA_CM_INVALID_RECFMT; |
| 1019 | |
| 1020 | queue->host_qid = le16_to_cpu(req->qid); |
| 1021 | |
| 1022 | /* |
Jay Freyensee | b825b44 | 2016-08-17 15:00:25 -0700 | [diff] [blame] | 1023 | * req->hsqsize corresponds to our recv queue size plus 1 |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 1024 | * req->hrqsize corresponds to our send queue size |
| 1025 | */ |
Jay Freyensee | b825b44 | 2016-08-17 15:00:25 -0700 | [diff] [blame] | 1026 | queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1; |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 1027 | queue->send_queue_size = le16_to_cpu(req->hrqsize); |
| 1028 | |
| 1029 | if (!queue->host_qid && queue->recv_queue_size > NVMF_AQ_DEPTH) |
| 1030 | return NVME_RDMA_CM_INVALID_HSQSIZE; |
| 1031 | |
| 1032 | /* XXX: Should we enforce some kind of max for IO queues? */ |
| 1033 | |
| 1034 | return 0; |
| 1035 | } |
| 1036 | |
| 1037 | static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id, |
| 1038 | enum nvme_rdma_cm_status status) |
| 1039 | { |
| 1040 | struct nvme_rdma_cm_rej rej; |
| 1041 | |
Max Gurtovoy | 7a01a6e | 2017-02-20 13:44:29 +0200 | [diff] [blame] | 1042 | pr_debug("rejecting connect request: status %d (%s)\n", |
| 1043 | status, nvme_rdma_cm_msg(status)); |
| 1044 | |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 1045 | rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); |
| 1046 | rej.sts = cpu_to_le16(status); |
| 1047 | |
| 1048 | return rdma_reject(cm_id, (void *)&rej, sizeof(rej)); |
| 1049 | } |
| 1050 | |
| 1051 | static struct nvmet_rdma_queue * |
| 1052 | nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, |
| 1053 | struct rdma_cm_id *cm_id, |
| 1054 | struct rdma_cm_event *event) |
| 1055 | { |
| 1056 | struct nvmet_rdma_queue *queue; |
| 1057 | int ret; |
| 1058 | |
| 1059 | queue = kzalloc(sizeof(*queue), GFP_KERNEL); |
| 1060 | if (!queue) { |
| 1061 | ret = NVME_RDMA_CM_NO_RSC; |
| 1062 | goto out_reject; |
| 1063 | } |
| 1064 | |
| 1065 | ret = nvmet_sq_init(&queue->nvme_sq); |
Bart Van Assche | 70d4281 | 2016-10-18 12:59:47 -0700 | [diff] [blame] | 1066 | if (ret) { |
| 1067 | ret = NVME_RDMA_CM_NO_RSC; |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 1068 | goto out_free_queue; |
Bart Van Assche | 70d4281 | 2016-10-18 12:59:47 -0700 | [diff] [blame] | 1069 | } |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 1070 | |
| 1071 | ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue); |
| 1072 | if (ret) |
| 1073 | goto out_destroy_sq; |
| 1074 | |
| 1075 | /* |
| 1076 | * Schedules the actual release because calling rdma_destroy_id from |
| 1077 | * inside a CM callback would trigger a deadlock. (great API design..) |
| 1078 | */ |
| 1079 | INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work); |
| 1080 | queue->dev = ndev; |
| 1081 | queue->cm_id = cm_id; |
| 1082 | |
| 1083 | spin_lock_init(&queue->state_lock); |
| 1084 | queue->state = NVMET_RDMA_Q_CONNECTING; |
| 1085 | INIT_LIST_HEAD(&queue->rsp_wait_list); |
| 1086 | INIT_LIST_HEAD(&queue->rsp_wr_wait_list); |
| 1087 | spin_lock_init(&queue->rsp_wr_wait_lock); |
| 1088 | INIT_LIST_HEAD(&queue->free_rsps); |
| 1089 | spin_lock_init(&queue->rsps_lock); |
Sagi Grimberg | 766dbb1 | 2016-11-06 11:09:49 +0200 | [diff] [blame] | 1090 | INIT_LIST_HEAD(&queue->queue_list); |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 1091 | |
| 1092 | queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL); |
| 1093 | if (queue->idx < 0) { |
| 1094 | ret = NVME_RDMA_CM_NO_RSC; |
Christophe JAILLET | 6ccaeb5 | 2017-02-19 20:04:38 +0100 | [diff] [blame] | 1095 | goto out_destroy_sq; |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 1096 | } |
| 1097 | |
| 1098 | ret = nvmet_rdma_alloc_rsps(queue); |
| 1099 | if (ret) { |
| 1100 | ret = NVME_RDMA_CM_NO_RSC; |
| 1101 | goto out_ida_remove; |
| 1102 | } |
| 1103 | |
| 1104 | if (!ndev->srq) { |
| 1105 | queue->cmds = nvmet_rdma_alloc_cmds(ndev, |
| 1106 | queue->recv_queue_size, |
| 1107 | !queue->host_qid); |
| 1108 | if (IS_ERR(queue->cmds)) { |
| 1109 | ret = NVME_RDMA_CM_NO_RSC; |
| 1110 | goto out_free_responses; |
| 1111 | } |
| 1112 | } |
| 1113 | |
| 1114 | ret = nvmet_rdma_create_queue_ib(queue); |
| 1115 | if (ret) { |
| 1116 | pr_err("%s: creating RDMA queue failed (%d).\n", |
| 1117 | __func__, ret); |
| 1118 | ret = NVME_RDMA_CM_NO_RSC; |
| 1119 | goto out_free_cmds; |
| 1120 | } |
| 1121 | |
| 1122 | return queue; |
| 1123 | |
| 1124 | out_free_cmds: |
| 1125 | if (!ndev->srq) { |
| 1126 | nvmet_rdma_free_cmds(queue->dev, queue->cmds, |
| 1127 | queue->recv_queue_size, |
| 1128 | !queue->host_qid); |
| 1129 | } |
| 1130 | out_free_responses: |
| 1131 | nvmet_rdma_free_rsps(queue); |
| 1132 | out_ida_remove: |
| 1133 | ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx); |
| 1134 | out_destroy_sq: |
| 1135 | nvmet_sq_destroy(&queue->nvme_sq); |
| 1136 | out_free_queue: |
| 1137 | kfree(queue); |
| 1138 | out_reject: |
| 1139 | nvmet_rdma_cm_reject(cm_id, ret); |
| 1140 | return NULL; |
| 1141 | } |
| 1142 | |
| 1143 | static void nvmet_rdma_qp_event(struct ib_event *event, void *priv) |
| 1144 | { |
| 1145 | struct nvmet_rdma_queue *queue = priv; |
| 1146 | |
| 1147 | switch (event->event) { |
| 1148 | case IB_EVENT_COMM_EST: |
| 1149 | rdma_notify(queue->cm_id, event->event); |
| 1150 | break; |
| 1151 | default: |
Max Gurtovoy | 675796b | 2016-11-23 11:38:47 +0200 | [diff] [blame] | 1152 | pr_err("received IB QP event: %s (%d)\n", |
| 1153 | ib_event_msg(event->event), event->event); |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 1154 | break; |
| 1155 | } |
| 1156 | } |
| 1157 | |
| 1158 | static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id, |
| 1159 | struct nvmet_rdma_queue *queue, |
| 1160 | struct rdma_conn_param *p) |
| 1161 | { |
| 1162 | struct rdma_conn_param param = { }; |
| 1163 | struct nvme_rdma_cm_rep priv = { }; |
| 1164 | int ret = -ENOMEM; |
| 1165 | |
| 1166 | param.rnr_retry_count = 7; |
| 1167 | param.flow_control = 1; |
| 1168 | param.initiator_depth = min_t(u8, p->initiator_depth, |
| 1169 | queue->dev->device->attrs.max_qp_init_rd_atom); |
| 1170 | param.private_data = &priv; |
| 1171 | param.private_data_len = sizeof(priv); |
| 1172 | priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); |
| 1173 | priv.crqsize = cpu_to_le16(queue->recv_queue_size); |
| 1174 | |
| 1175 | ret = rdma_accept(cm_id, ¶m); |
| 1176 | if (ret) |
| 1177 | pr_err("rdma_accept failed (error code = %d)\n", ret); |
| 1178 | |
| 1179 | return ret; |
| 1180 | } |
| 1181 | |
| 1182 | static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, |
| 1183 | struct rdma_cm_event *event) |
| 1184 | { |
| 1185 | struct nvmet_rdma_device *ndev; |
| 1186 | struct nvmet_rdma_queue *queue; |
| 1187 | int ret = -EINVAL; |
| 1188 | |
| 1189 | ndev = nvmet_rdma_find_get_device(cm_id); |
| 1190 | if (!ndev) { |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 1191 | nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC); |
| 1192 | return -ECONNREFUSED; |
| 1193 | } |
| 1194 | |
| 1195 | queue = nvmet_rdma_alloc_queue(ndev, cm_id, event); |
| 1196 | if (!queue) { |
| 1197 | ret = -ENOMEM; |
| 1198 | goto put_device; |
| 1199 | } |
| 1200 | queue->port = cm_id->context; |
| 1201 | |
| 1202 | ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); |
| 1203 | if (ret) |
| 1204 | goto release_queue; |
| 1205 | |
| 1206 | mutex_lock(&nvmet_rdma_queue_mutex); |
| 1207 | list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list); |
| 1208 | mutex_unlock(&nvmet_rdma_queue_mutex); |
| 1209 | |
| 1210 | return 0; |
| 1211 | |
| 1212 | release_queue: |
| 1213 | nvmet_rdma_free_queue(queue); |
| 1214 | put_device: |
| 1215 | kref_put(&ndev->ref, nvmet_rdma_free_dev); |
| 1216 | |
| 1217 | return ret; |
| 1218 | } |
| 1219 | |
| 1220 | static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue) |
| 1221 | { |
| 1222 | unsigned long flags; |
| 1223 | |
| 1224 | spin_lock_irqsave(&queue->state_lock, flags); |
| 1225 | if (queue->state != NVMET_RDMA_Q_CONNECTING) { |
| 1226 | pr_warn("trying to establish a connected queue\n"); |
| 1227 | goto out_unlock; |
| 1228 | } |
| 1229 | queue->state = NVMET_RDMA_Q_LIVE; |
| 1230 | |
| 1231 | while (!list_empty(&queue->rsp_wait_list)) { |
| 1232 | struct nvmet_rdma_rsp *cmd; |
| 1233 | |
| 1234 | cmd = list_first_entry(&queue->rsp_wait_list, |
| 1235 | struct nvmet_rdma_rsp, wait_list); |
| 1236 | list_del(&cmd->wait_list); |
| 1237 | |
| 1238 | spin_unlock_irqrestore(&queue->state_lock, flags); |
| 1239 | nvmet_rdma_handle_command(queue, cmd); |
| 1240 | spin_lock_irqsave(&queue->state_lock, flags); |
| 1241 | } |
| 1242 | |
| 1243 | out_unlock: |
| 1244 | spin_unlock_irqrestore(&queue->state_lock, flags); |
| 1245 | } |
| 1246 | |
| 1247 | static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) |
| 1248 | { |
| 1249 | bool disconnect = false; |
| 1250 | unsigned long flags; |
| 1251 | |
| 1252 | pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state); |
| 1253 | |
| 1254 | spin_lock_irqsave(&queue->state_lock, flags); |
| 1255 | switch (queue->state) { |
| 1256 | case NVMET_RDMA_Q_CONNECTING: |
| 1257 | case NVMET_RDMA_Q_LIVE: |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 1258 | queue->state = NVMET_RDMA_Q_DISCONNECTING; |
Sagi Grimberg | d8f7750 | 2016-05-19 15:24:55 +0300 | [diff] [blame] | 1259 | case NVMET_RDMA_IN_DEVICE_REMOVAL: |
| 1260 | disconnect = true; |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 1261 | break; |
| 1262 | case NVMET_RDMA_Q_DISCONNECTING: |
| 1263 | break; |
| 1264 | } |
| 1265 | spin_unlock_irqrestore(&queue->state_lock, flags); |
| 1266 | |
| 1267 | if (disconnect) { |
| 1268 | rdma_disconnect(queue->cm_id); |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 1269 | schedule_work(&queue->release_work); |
| 1270 | } |
| 1271 | } |
| 1272 | |
| 1273 | static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) |
| 1274 | { |
| 1275 | bool disconnect = false; |
| 1276 | |
| 1277 | mutex_lock(&nvmet_rdma_queue_mutex); |
| 1278 | if (!list_empty(&queue->queue_list)) { |
| 1279 | list_del_init(&queue->queue_list); |
| 1280 | disconnect = true; |
| 1281 | } |
| 1282 | mutex_unlock(&nvmet_rdma_queue_mutex); |
| 1283 | |
| 1284 | if (disconnect) |
| 1285 | __nvmet_rdma_queue_disconnect(queue); |
| 1286 | } |
| 1287 | |
| 1288 | static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, |
| 1289 | struct nvmet_rdma_queue *queue) |
| 1290 | { |
| 1291 | WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); |
| 1292 | |
Sagi Grimberg | 766dbb1 | 2016-11-06 11:09:49 +0200 | [diff] [blame] | 1293 | mutex_lock(&nvmet_rdma_queue_mutex); |
| 1294 | if (!list_empty(&queue->queue_list)) |
| 1295 | list_del_init(&queue->queue_list); |
| 1296 | mutex_unlock(&nvmet_rdma_queue_mutex); |
| 1297 | |
| 1298 | pr_err("failed to connect queue %d\n", queue->idx); |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 1299 | schedule_work(&queue->release_work); |
| 1300 | } |
| 1301 | |
Sagi Grimberg | d8f7750 | 2016-05-19 15:24:55 +0300 | [diff] [blame] | 1302 | /** |
| 1303 | * nvme_rdma_device_removal() - Handle RDMA device removal |
| 1304 | * @queue: nvmet rdma queue (cm id qp_context) |
| 1305 | * @addr: nvmet address (cm_id context) |
| 1306 | * |
| 1307 | * DEVICE_REMOVAL event notifies us that the RDMA device is about |
| 1308 | * to unplug so we should take care of destroying our RDMA resources. |
| 1309 | * This event will be generated for each allocated cm_id. |
| 1310 | * |
| 1311 | * Note that this event can be generated on a normal queue cm_id |
| 1312 | * and/or a device bound listener cm_id (where in this case |
| 1313 | * queue will be null). |
| 1314 | * |
| 1315 | * we claim ownership on destroying the cm_id. For queues we move |
| 1316 | * the queue state to NVMET_RDMA_IN_DEVICE_REMOVAL and for port |
| 1317 | * we nullify the priv to prevent double cm_id destruction and destroying |
| 1318 | * the cm_id implicitely by returning a non-zero rc to the callout. |
| 1319 | */ |
| 1320 | static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id, |
| 1321 | struct nvmet_rdma_queue *queue) |
| 1322 | { |
| 1323 | unsigned long flags; |
| 1324 | |
| 1325 | if (!queue) { |
| 1326 | struct nvmet_port *port = cm_id->context; |
| 1327 | |
| 1328 | /* |
| 1329 | * This is a listener cm_id. Make sure that |
| 1330 | * future remove_port won't invoke a double |
| 1331 | * cm_id destroy. use atomic xchg to make sure |
| 1332 | * we don't compete with remove_port. |
| 1333 | */ |
| 1334 | if (xchg(&port->priv, NULL) != cm_id) |
| 1335 | return 0; |
| 1336 | } else { |
| 1337 | /* |
| 1338 | * This is a queue cm_id. Make sure that |
| 1339 | * release queue will not destroy the cm_id |
| 1340 | * and schedule all ctrl queues removal (only |
| 1341 | * if the queue is not disconnecting already). |
| 1342 | */ |
| 1343 | spin_lock_irqsave(&queue->state_lock, flags); |
| 1344 | if (queue->state != NVMET_RDMA_Q_DISCONNECTING) |
| 1345 | queue->state = NVMET_RDMA_IN_DEVICE_REMOVAL; |
| 1346 | spin_unlock_irqrestore(&queue->state_lock, flags); |
| 1347 | nvmet_rdma_queue_disconnect(queue); |
| 1348 | flush_scheduled_work(); |
| 1349 | } |
| 1350 | |
| 1351 | /* |
| 1352 | * We need to return 1 so that the core will destroy |
| 1353 | * it's own ID. What a great API design.. |
| 1354 | */ |
| 1355 | return 1; |
| 1356 | } |
| 1357 | |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 1358 | static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, |
| 1359 | struct rdma_cm_event *event) |
| 1360 | { |
| 1361 | struct nvmet_rdma_queue *queue = NULL; |
| 1362 | int ret = 0; |
| 1363 | |
| 1364 | if (cm_id->qp) |
| 1365 | queue = cm_id->qp->qp_context; |
| 1366 | |
| 1367 | pr_debug("%s (%d): status %d id %p\n", |
| 1368 | rdma_event_msg(event->event), event->event, |
| 1369 | event->status, cm_id); |
| 1370 | |
| 1371 | switch (event->event) { |
| 1372 | case RDMA_CM_EVENT_CONNECT_REQUEST: |
| 1373 | ret = nvmet_rdma_queue_connect(cm_id, event); |
| 1374 | break; |
| 1375 | case RDMA_CM_EVENT_ESTABLISHED: |
| 1376 | nvmet_rdma_queue_established(queue); |
| 1377 | break; |
| 1378 | case RDMA_CM_EVENT_ADDR_CHANGE: |
| 1379 | case RDMA_CM_EVENT_DISCONNECTED: |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 1380 | case RDMA_CM_EVENT_TIMEWAIT_EXIT: |
Bart Van Assche | fa14a0a | 2016-11-01 18:36:46 +0200 | [diff] [blame] | 1381 | /* |
| 1382 | * We might end up here when we already freed the qp |
| 1383 | * which means queue release sequence is in progress, |
| 1384 | * so don't get in the way... |
| 1385 | */ |
| 1386 | if (queue) |
| 1387 | nvmet_rdma_queue_disconnect(queue); |
Sagi Grimberg | d8f7750 | 2016-05-19 15:24:55 +0300 | [diff] [blame] | 1388 | break; |
| 1389 | case RDMA_CM_EVENT_DEVICE_REMOVAL: |
| 1390 | ret = nvmet_rdma_device_removal(cm_id, queue); |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 1391 | break; |
| 1392 | case RDMA_CM_EVENT_REJECTED: |
Steve Wise | 512fb1b | 2016-10-26 12:36:48 -0700 | [diff] [blame] | 1393 | pr_debug("Connection rejected: %s\n", |
| 1394 | rdma_reject_msg(cm_id, event->status)); |
| 1395 | /* FALLTHROUGH */ |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 1396 | case RDMA_CM_EVENT_UNREACHABLE: |
| 1397 | case RDMA_CM_EVENT_CONNECT_ERROR: |
| 1398 | nvmet_rdma_queue_connect_fail(cm_id, queue); |
| 1399 | break; |
| 1400 | default: |
| 1401 | pr_err("received unrecognized RDMA CM event %d\n", |
| 1402 | event->event); |
| 1403 | break; |
| 1404 | } |
| 1405 | |
| 1406 | return ret; |
| 1407 | } |
| 1408 | |
| 1409 | static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl) |
| 1410 | { |
| 1411 | struct nvmet_rdma_queue *queue; |
| 1412 | |
| 1413 | restart: |
| 1414 | mutex_lock(&nvmet_rdma_queue_mutex); |
| 1415 | list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) { |
| 1416 | if (queue->nvme_sq.ctrl == ctrl) { |
| 1417 | list_del_init(&queue->queue_list); |
| 1418 | mutex_unlock(&nvmet_rdma_queue_mutex); |
| 1419 | |
| 1420 | __nvmet_rdma_queue_disconnect(queue); |
| 1421 | goto restart; |
| 1422 | } |
| 1423 | } |
| 1424 | mutex_unlock(&nvmet_rdma_queue_mutex); |
| 1425 | } |
| 1426 | |
| 1427 | static int nvmet_rdma_add_port(struct nvmet_port *port) |
| 1428 | { |
| 1429 | struct rdma_cm_id *cm_id; |
| 1430 | struct sockaddr_in addr_in; |
| 1431 | u16 port_in; |
| 1432 | int ret; |
| 1433 | |
| 1434 | switch (port->disc_addr.adrfam) { |
| 1435 | case NVMF_ADDR_FAMILY_IP4: |
| 1436 | break; |
| 1437 | default: |
| 1438 | pr_err("address family %d not supported\n", |
| 1439 | port->disc_addr.adrfam); |
| 1440 | return -EINVAL; |
| 1441 | } |
| 1442 | |
| 1443 | ret = kstrtou16(port->disc_addr.trsvcid, 0, &port_in); |
| 1444 | if (ret) |
| 1445 | return ret; |
| 1446 | |
| 1447 | addr_in.sin_family = AF_INET; |
| 1448 | addr_in.sin_addr.s_addr = in_aton(port->disc_addr.traddr); |
| 1449 | addr_in.sin_port = htons(port_in); |
| 1450 | |
| 1451 | cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port, |
| 1452 | RDMA_PS_TCP, IB_QPT_RC); |
| 1453 | if (IS_ERR(cm_id)) { |
| 1454 | pr_err("CM ID creation failed\n"); |
| 1455 | return PTR_ERR(cm_id); |
| 1456 | } |
| 1457 | |
| 1458 | ret = rdma_bind_addr(cm_id, (struct sockaddr *)&addr_in); |
| 1459 | if (ret) { |
| 1460 | pr_err("binding CM ID to %pISpc failed (%d)\n", &addr_in, ret); |
| 1461 | goto out_destroy_id; |
| 1462 | } |
| 1463 | |
| 1464 | ret = rdma_listen(cm_id, 128); |
| 1465 | if (ret) { |
| 1466 | pr_err("listening to %pISpc failed (%d)\n", &addr_in, ret); |
| 1467 | goto out_destroy_id; |
| 1468 | } |
| 1469 | |
| 1470 | pr_info("enabling port %d (%pISpc)\n", |
| 1471 | le16_to_cpu(port->disc_addr.portid), &addr_in); |
| 1472 | port->priv = cm_id; |
| 1473 | return 0; |
| 1474 | |
| 1475 | out_destroy_id: |
| 1476 | rdma_destroy_id(cm_id); |
| 1477 | return ret; |
| 1478 | } |
| 1479 | |
| 1480 | static void nvmet_rdma_remove_port(struct nvmet_port *port) |
| 1481 | { |
Sagi Grimberg | d8f7750 | 2016-05-19 15:24:55 +0300 | [diff] [blame] | 1482 | struct rdma_cm_id *cm_id = xchg(&port->priv, NULL); |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 1483 | |
Sagi Grimberg | d8f7750 | 2016-05-19 15:24:55 +0300 | [diff] [blame] | 1484 | if (cm_id) |
| 1485 | rdma_destroy_id(cm_id); |
Christoph Hellwig | 8f000ca | 2016-07-06 21:55:51 +0900 | [diff] [blame] | 1486 | } |
| 1487 | |
| 1488 | static struct nvmet_fabrics_ops nvmet_rdma_ops = { |
| 1489 | .owner = THIS_MODULE, |
| 1490 | .type = NVMF_TRTYPE_RDMA, |
| 1491 | .sqe_inline_size = NVMET_RDMA_INLINE_DATA_SIZE, |
| 1492 | .msdbd = 1, |
| 1493 | .has_keyed_sgls = 1, |
| 1494 | .add_port = nvmet_rdma_add_port, |
| 1495 | .remove_port = nvmet_rdma_remove_port, |
| 1496 | .queue_response = nvmet_rdma_queue_response, |
| 1497 | .delete_ctrl = nvmet_rdma_delete_ctrl, |
| 1498 | }; |
| 1499 | |
| 1500 | static int __init nvmet_rdma_init(void) |
| 1501 | { |
| 1502 | return nvmet_register_transport(&nvmet_rdma_ops); |
| 1503 | } |
| 1504 | |
| 1505 | static void __exit nvmet_rdma_exit(void) |
| 1506 | { |
| 1507 | struct nvmet_rdma_queue *queue; |
| 1508 | |
| 1509 | nvmet_unregister_transport(&nvmet_rdma_ops); |
| 1510 | |
| 1511 | flush_scheduled_work(); |
| 1512 | |
| 1513 | mutex_lock(&nvmet_rdma_queue_mutex); |
| 1514 | while ((queue = list_first_entry_or_null(&nvmet_rdma_queue_list, |
| 1515 | struct nvmet_rdma_queue, queue_list))) { |
| 1516 | list_del_init(&queue->queue_list); |
| 1517 | |
| 1518 | mutex_unlock(&nvmet_rdma_queue_mutex); |
| 1519 | __nvmet_rdma_queue_disconnect(queue); |
| 1520 | mutex_lock(&nvmet_rdma_queue_mutex); |
| 1521 | } |
| 1522 | mutex_unlock(&nvmet_rdma_queue_mutex); |
| 1523 | |
| 1524 | flush_scheduled_work(); |
| 1525 | ida_destroy(&nvmet_rdma_queue_ida); |
| 1526 | } |
| 1527 | |
| 1528 | module_init(nvmet_rdma_init); |
| 1529 | module_exit(nvmet_rdma_exit); |
| 1530 | |
| 1531 | MODULE_LICENSE("GPL v2"); |
| 1532 | MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */ |