blob: 10420a18d02f46dc07f1698387e11105ab76c004 [file] [log] [blame]
Adit Ranadive29c8d9e2016-10-02 19:10:22 -07001/*
2 * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of EITHER the GNU General Public License
6 * version 2 as published by the Free Software Foundation or the BSD
7 * 2-Clause License. This program is distributed in the hope that it
8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10 * See the GNU General Public License version 2 for more details at
11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program available in the file COPYING in the main
15 * directory of this source tree.
16 *
17 * The BSD 2-Clause License
18 *
19 * Redistribution and use in source and binary forms, with or
20 * without modification, are permitted provided that the following
21 * conditions are met:
22 *
23 * - Redistributions of source code must retain the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer.
26 *
27 * - Redistributions in binary form must reproduce the above
28 * copyright notice, this list of conditions and the following
29 * disclaimer in the documentation and/or other materials
30 * provided with the distribution.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
44 */
45
46#include <asm/page.h>
47#include <linux/io.h>
48#include <linux/wait.h>
49#include <rdma/ib_addr.h>
50#include <rdma/ib_smi.h>
51#include <rdma/ib_user_verbs.h>
52
53#include "pvrdma.h"
54
55static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq,
56 struct pvrdma_cq **recv_cq)
57{
58 *send_cq = to_vcq(qp->ibqp.send_cq);
59 *recv_cq = to_vcq(qp->ibqp.recv_cq);
60}
61
62static void pvrdma_lock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq,
63 unsigned long *scq_flags,
64 unsigned long *rcq_flags)
65 __acquires(scq->cq_lock) __acquires(rcq->cq_lock)
66{
67 if (scq == rcq) {
68 spin_lock_irqsave(&scq->cq_lock, *scq_flags);
69 __acquire(rcq->cq_lock);
70 } else if (scq->cq_handle < rcq->cq_handle) {
71 spin_lock_irqsave(&scq->cq_lock, *scq_flags);
72 spin_lock_irqsave_nested(&rcq->cq_lock, *rcq_flags,
73 SINGLE_DEPTH_NESTING);
74 } else {
75 spin_lock_irqsave(&rcq->cq_lock, *rcq_flags);
76 spin_lock_irqsave_nested(&scq->cq_lock, *scq_flags,
77 SINGLE_DEPTH_NESTING);
78 }
79}
80
81static void pvrdma_unlock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq,
82 unsigned long *scq_flags,
83 unsigned long *rcq_flags)
84 __releases(scq->cq_lock) __releases(rcq->cq_lock)
85{
86 if (scq == rcq) {
87 __release(rcq->cq_lock);
88 spin_unlock_irqrestore(&scq->cq_lock, *scq_flags);
89 } else if (scq->cq_handle < rcq->cq_handle) {
90 spin_unlock_irqrestore(&rcq->cq_lock, *rcq_flags);
91 spin_unlock_irqrestore(&scq->cq_lock, *scq_flags);
92 } else {
93 spin_unlock_irqrestore(&scq->cq_lock, *scq_flags);
94 spin_unlock_irqrestore(&rcq->cq_lock, *rcq_flags);
95 }
96}
97
98static void pvrdma_reset_qp(struct pvrdma_qp *qp)
99{
100 struct pvrdma_cq *scq, *rcq;
101 unsigned long scq_flags, rcq_flags;
102
103 /* Clean up cqes */
104 get_cqs(qp, &scq, &rcq);
105 pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags);
106
107 _pvrdma_flush_cqe(qp, scq);
108 if (scq != rcq)
109 _pvrdma_flush_cqe(qp, rcq);
110
111 pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
112
113 /*
114 * Reset queuepair. The checks are because usermode queuepairs won't
115 * have kernel ringstates.
116 */
117 if (qp->rq.ring) {
118 atomic_set(&qp->rq.ring->cons_head, 0);
119 atomic_set(&qp->rq.ring->prod_tail, 0);
120 }
121 if (qp->sq.ring) {
122 atomic_set(&qp->sq.ring->cons_head, 0);
123 atomic_set(&qp->sq.ring->prod_tail, 0);
124 }
125}
126
127static int pvrdma_set_rq_size(struct pvrdma_dev *dev,
128 struct ib_qp_cap *req_cap,
129 struct pvrdma_qp *qp)
130{
131 if (req_cap->max_recv_wr > dev->dsr->caps.max_qp_wr ||
132 req_cap->max_recv_sge > dev->dsr->caps.max_sge) {
133 dev_warn(&dev->pdev->dev, "recv queue size invalid\n");
134 return -EINVAL;
135 }
136
137 qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_recv_wr));
138 qp->rq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_recv_sge));
139
140 /* Write back */
141 req_cap->max_recv_wr = qp->rq.wqe_cnt;
142 req_cap->max_recv_sge = qp->rq.max_sg;
143
144 qp->rq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_rq_wqe_hdr) +
145 sizeof(struct pvrdma_sge) *
146 qp->rq.max_sg);
147 qp->npages_recv = (qp->rq.wqe_cnt * qp->rq.wqe_size + PAGE_SIZE - 1) /
148 PAGE_SIZE;
149
150 return 0;
151}
152
153static int pvrdma_set_sq_size(struct pvrdma_dev *dev, struct ib_qp_cap *req_cap,
Yuval Shaia1dd70ea2017-01-17 10:34:00 +0200154 struct pvrdma_qp *qp)
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700155{
156 if (req_cap->max_send_wr > dev->dsr->caps.max_qp_wr ||
157 req_cap->max_send_sge > dev->dsr->caps.max_sge) {
158 dev_warn(&dev->pdev->dev, "send queue size invalid\n");
159 return -EINVAL;
160 }
161
162 qp->sq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_send_wr));
163 qp->sq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_send_sge));
164
165 /* Write back */
166 req_cap->max_send_wr = qp->sq.wqe_cnt;
167 req_cap->max_send_sge = qp->sq.max_sg;
168
169 qp->sq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_sq_wqe_hdr) +
170 sizeof(struct pvrdma_sge) *
171 qp->sq.max_sg);
172 /* Note: one extra page for the header. */
Adit Ranadivee51c2fb2017-02-22 17:22:57 -0800173 qp->npages_send = PVRDMA_QP_NUM_HEADER_PAGES +
174 (qp->sq.wqe_cnt * qp->sq.wqe_size + PAGE_SIZE - 1) /
175 PAGE_SIZE;
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700176
177 return 0;
178}
179
180/**
181 * pvrdma_create_qp - create queue pair
182 * @pd: protection domain
183 * @init_attr: queue pair attributes
184 * @udata: user data
185 *
186 * @return: the ib_qp pointer on success, otherwise returns an errno.
187 */
188struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
189 struct ib_qp_init_attr *init_attr,
190 struct ib_udata *udata)
191{
192 struct pvrdma_qp *qp = NULL;
193 struct pvrdma_dev *dev = to_vdev(pd->device);
194 union pvrdma_cmd_req req;
195 union pvrdma_cmd_resp rsp;
196 struct pvrdma_cmd_create_qp *cmd = &req.create_qp;
197 struct pvrdma_cmd_create_qp_resp *resp = &rsp.create_qp_resp;
198 struct pvrdma_create_qp ucmd;
199 unsigned long flags;
200 int ret;
Bryan Tan8b10ba72017-11-06 11:48:53 -0800201 bool is_srq = !!init_attr->srq;
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700202
203 if (init_attr->create_flags) {
204 dev_warn(&dev->pdev->dev,
205 "invalid create queuepair flags %#x\n",
206 init_attr->create_flags);
207 return ERR_PTR(-EINVAL);
208 }
209
210 if (init_attr->qp_type != IB_QPT_RC &&
211 init_attr->qp_type != IB_QPT_UD &&
212 init_attr->qp_type != IB_QPT_GSI) {
213 dev_warn(&dev->pdev->dev, "queuepair type %d not supported\n",
214 init_attr->qp_type);
215 return ERR_PTR(-EINVAL);
216 }
217
Bryan Tan8b10ba72017-11-06 11:48:53 -0800218 if (is_srq && !dev->dsr->caps.max_srq) {
219 dev_warn(&dev->pdev->dev,
220 "SRQs not supported by device\n");
221 return ERR_PTR(-EINVAL);
222 }
223
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700224 if (!atomic_add_unless(&dev->num_qps, 1, dev->dsr->caps.max_qp))
225 return ERR_PTR(-ENOMEM);
226
227 switch (init_attr->qp_type) {
228 case IB_QPT_GSI:
229 if (init_attr->port_num == 0 ||
230 init_attr->port_num > pd->device->phys_port_cnt ||
231 udata) {
232 dev_warn(&dev->pdev->dev, "invalid queuepair attrs\n");
233 ret = -EINVAL;
234 goto err_qp;
235 }
236 /* fall through */
237 case IB_QPT_RC:
238 case IB_QPT_UD:
239 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
240 if (!qp) {
241 ret = -ENOMEM;
242 goto err_qp;
243 }
244
245 spin_lock_init(&qp->sq.lock);
246 spin_lock_init(&qp->rq.lock);
247 mutex_init(&qp->mutex);
248 atomic_set(&qp->refcnt, 1);
249 init_waitqueue_head(&qp->wait);
250
251 qp->state = IB_QPS_RESET;
252
253 if (pd->uobject && udata) {
254 dev_dbg(&dev->pdev->dev,
255 "create queuepair from user space\n");
256
257 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
258 ret = -EFAULT;
259 goto err_qp;
260 }
261
Bryan Tan8b10ba72017-11-06 11:48:53 -0800262 if (!is_srq) {
263 /* set qp->sq.wqe_cnt, shift, buf_size.. */
264 qp->rumem = ib_umem_get(pd->uobject->context,
265 ucmd.rbuf_addr,
266 ucmd.rbuf_size, 0, 0);
267 if (IS_ERR(qp->rumem)) {
268 ret = PTR_ERR(qp->rumem);
269 goto err_qp;
270 }
271 qp->srq = NULL;
272 } else {
273 qp->rumem = NULL;
274 qp->srq = to_vsrq(init_attr->srq);
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700275 }
276
277 qp->sumem = ib_umem_get(pd->uobject->context,
278 ucmd.sbuf_addr,
279 ucmd.sbuf_size, 0, 0);
280 if (IS_ERR(qp->sumem)) {
Bryan Tan8b10ba72017-11-06 11:48:53 -0800281 if (!is_srq)
282 ib_umem_release(qp->rumem);
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700283 ret = PTR_ERR(qp->sumem);
284 goto err_qp;
285 }
286
287 qp->npages_send = ib_umem_page_count(qp->sumem);
Bryan Tan8b10ba72017-11-06 11:48:53 -0800288 if (!is_srq)
289 qp->npages_recv = ib_umem_page_count(qp->rumem);
290 else
291 qp->npages_recv = 0;
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700292 qp->npages = qp->npages_send + qp->npages_recv;
293 } else {
294 qp->is_kernel = true;
295
296 ret = pvrdma_set_sq_size(to_vdev(pd->device),
Yuval Shaia1dd70ea2017-01-17 10:34:00 +0200297 &init_attr->cap, qp);
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700298 if (ret)
299 goto err_qp;
300
301 ret = pvrdma_set_rq_size(to_vdev(pd->device),
302 &init_attr->cap, qp);
303 if (ret)
304 goto err_qp;
305
306 qp->npages = qp->npages_send + qp->npages_recv;
307
308 /* Skip header page. */
Adit Ranadivee51c2fb2017-02-22 17:22:57 -0800309 qp->sq.offset = PVRDMA_QP_NUM_HEADER_PAGES * PAGE_SIZE;
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700310
311 /* Recv queue pages are after send pages. */
312 qp->rq.offset = qp->npages_send * PAGE_SIZE;
313 }
314
315 if (qp->npages < 0 || qp->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
316 dev_warn(&dev->pdev->dev,
317 "overflow pages in queuepair\n");
318 ret = -EINVAL;
319 goto err_umem;
320 }
321
322 ret = pvrdma_page_dir_init(dev, &qp->pdir, qp->npages,
323 qp->is_kernel);
324 if (ret) {
325 dev_warn(&dev->pdev->dev,
326 "could not allocate page directory\n");
327 goto err_umem;
328 }
329
330 if (!qp->is_kernel) {
331 pvrdma_page_dir_insert_umem(&qp->pdir, qp->sumem, 0);
Bryan Tan8b10ba72017-11-06 11:48:53 -0800332 if (!is_srq)
333 pvrdma_page_dir_insert_umem(&qp->pdir,
334 qp->rumem,
335 qp->npages_send);
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700336 } else {
337 /* Ring state is always the first page. */
338 qp->sq.ring = qp->pdir.pages[0];
Bryan Tan8b10ba72017-11-06 11:48:53 -0800339 qp->rq.ring = is_srq ? NULL : &qp->sq.ring[1];
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700340 }
341 break;
342 default:
343 ret = -EINVAL;
344 goto err_qp;
345 }
346
347 /* Not supported */
348 init_attr->cap.max_inline_data = 0;
349
350 memset(cmd, 0, sizeof(*cmd));
351 cmd->hdr.cmd = PVRDMA_CMD_CREATE_QP;
352 cmd->pd_handle = to_vpd(pd)->pd_handle;
353 cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle;
354 cmd->recv_cq_handle = to_vcq(init_attr->recv_cq)->cq_handle;
Bryan Tan8b10ba72017-11-06 11:48:53 -0800355 if (is_srq)
356 cmd->srq_handle = to_vsrq(init_attr->srq)->srq_handle;
357 else
358 cmd->srq_handle = 0;
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700359 cmd->max_send_wr = init_attr->cap.max_send_wr;
360 cmd->max_recv_wr = init_attr->cap.max_recv_wr;
361 cmd->max_send_sge = init_attr->cap.max_send_sge;
362 cmd->max_recv_sge = init_attr->cap.max_recv_sge;
363 cmd->max_inline_data = init_attr->cap.max_inline_data;
364 cmd->sq_sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
365 cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type);
Bryan Tan8b10ba72017-11-06 11:48:53 -0800366 cmd->is_srq = is_srq;
367 cmd->lkey = 0;
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700368 cmd->access_flags = IB_ACCESS_LOCAL_WRITE;
369 cmd->total_chunks = qp->npages;
Adit Ranadivee51c2fb2017-02-22 17:22:57 -0800370 cmd->send_chunks = qp->npages_send - PVRDMA_QP_NUM_HEADER_PAGES;
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700371 cmd->pdir_dma = qp->pdir.dir_dma;
372
373 dev_dbg(&dev->pdev->dev, "create queuepair with %d, %d, %d, %d\n",
374 cmd->max_send_wr, cmd->max_recv_wr, cmd->max_send_sge,
375 cmd->max_recv_sge);
376
377 ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_QP_RESP);
378 if (ret < 0) {
379 dev_warn(&dev->pdev->dev,
380 "could not create queuepair, error: %d\n", ret);
381 goto err_pdir;
382 }
383
384 /* max_send_wr/_recv_wr/_send_sge/_recv_sge/_inline_data */
385 qp->qp_handle = resp->qpn;
386 qp->port = init_attr->port_num;
387 qp->ibqp.qp_num = resp->qpn;
388 spin_lock_irqsave(&dev->qp_tbl_lock, flags);
389 dev->qp_tbl[qp->qp_handle % dev->dsr->caps.max_qp] = qp;
390 spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
391
392 return &qp->ibqp;
393
394err_pdir:
395 pvrdma_page_dir_cleanup(dev, &qp->pdir);
396err_umem:
397 if (pd->uobject && udata) {
398 if (qp->rumem)
399 ib_umem_release(qp->rumem);
400 if (qp->sumem)
401 ib_umem_release(qp->sumem);
402 }
403err_qp:
404 kfree(qp);
405 atomic_dec(&dev->num_qps);
406
407 return ERR_PTR(ret);
408}
409
410static void pvrdma_free_qp(struct pvrdma_qp *qp)
411{
412 struct pvrdma_dev *dev = to_vdev(qp->ibqp.device);
413 struct pvrdma_cq *scq;
414 struct pvrdma_cq *rcq;
415 unsigned long flags, scq_flags, rcq_flags;
416
417 /* In case cq is polling */
418 get_cqs(qp, &scq, &rcq);
419 pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags);
420
421 _pvrdma_flush_cqe(qp, scq);
422 if (scq != rcq)
423 _pvrdma_flush_cqe(qp, rcq);
424
425 spin_lock_irqsave(&dev->qp_tbl_lock, flags);
426 dev->qp_tbl[qp->qp_handle] = NULL;
427 spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
428
429 pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
430
431 atomic_dec(&qp->refcnt);
432 wait_event(qp->wait, !atomic_read(&qp->refcnt));
433
434 pvrdma_page_dir_cleanup(dev, &qp->pdir);
435
436 kfree(qp);
437
438 atomic_dec(&dev->num_qps);
439}
440
441/**
442 * pvrdma_destroy_qp - destroy a queue pair
443 * @qp: the queue pair to destroy
444 *
445 * @return: 0 on success.
446 */
447int pvrdma_destroy_qp(struct ib_qp *qp)
448{
449 struct pvrdma_qp *vqp = to_vqp(qp);
450 union pvrdma_cmd_req req;
451 struct pvrdma_cmd_destroy_qp *cmd = &req.destroy_qp;
452 int ret;
453
454 memset(cmd, 0, sizeof(*cmd));
455 cmd->hdr.cmd = PVRDMA_CMD_DESTROY_QP;
456 cmd->qp_handle = vqp->qp_handle;
457
458 ret = pvrdma_cmd_post(to_vdev(qp->device), &req, NULL, 0);
459 if (ret < 0)
460 dev_warn(&to_vdev(qp->device)->pdev->dev,
461 "destroy queuepair failed, error: %d\n", ret);
462
463 pvrdma_free_qp(vqp);
464
465 return 0;
466}
467
468/**
469 * pvrdma_modify_qp - modify queue pair attributes
470 * @ibqp: the queue pair
471 * @attr: the new queue pair's attributes
472 * @attr_mask: attributes mask
473 * @udata: user data
474 *
475 * @returns 0 on success, otherwise returns an errno.
476 */
477int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
478 int attr_mask, struct ib_udata *udata)
479{
480 struct pvrdma_dev *dev = to_vdev(ibqp->device);
481 struct pvrdma_qp *qp = to_vqp(ibqp);
482 union pvrdma_cmd_req req;
483 union pvrdma_cmd_resp rsp;
484 struct pvrdma_cmd_modify_qp *cmd = &req.modify_qp;
485 int cur_state, next_state;
486 int ret;
487
488 /* Sanity checking. Should need lock here */
489 mutex_lock(&qp->mutex);
490 cur_state = (attr_mask & IB_QP_CUR_STATE) ? attr->cur_qp_state :
491 qp->state;
492 next_state = (attr_mask & IB_QP_STATE) ? attr->qp_state : cur_state;
493
494 if (!ib_modify_qp_is_ok(cur_state, next_state, ibqp->qp_type,
495 attr_mask, IB_LINK_LAYER_ETHERNET)) {
496 ret = -EINVAL;
497 goto out;
498 }
499
500 if (attr_mask & IB_QP_PORT) {
501 if (attr->port_num == 0 ||
502 attr->port_num > ibqp->device->phys_port_cnt) {
503 ret = -EINVAL;
504 goto out;
505 }
506 }
507
508 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
509 if (attr->min_rnr_timer > 31) {
510 ret = -EINVAL;
511 goto out;
512 }
513 }
514
515 if (attr_mask & IB_QP_PKEY_INDEX) {
516 if (attr->pkey_index >= dev->dsr->caps.max_pkeys) {
517 ret = -EINVAL;
518 goto out;
519 }
520 }
521
522 if (attr_mask & IB_QP_QKEY)
523 qp->qkey = attr->qkey;
524
525 if (cur_state == next_state && cur_state == IB_QPS_RESET) {
526 ret = 0;
527 goto out;
528 }
529
530 qp->state = next_state;
531 memset(cmd, 0, sizeof(*cmd));
532 cmd->hdr.cmd = PVRDMA_CMD_MODIFY_QP;
533 cmd->qp_handle = qp->qp_handle;
534 cmd->attr_mask = ib_qp_attr_mask_to_pvrdma(attr_mask);
535 cmd->attrs.qp_state = ib_qp_state_to_pvrdma(attr->qp_state);
536 cmd->attrs.cur_qp_state =
537 ib_qp_state_to_pvrdma(attr->cur_qp_state);
538 cmd->attrs.path_mtu = ib_mtu_to_pvrdma(attr->path_mtu);
539 cmd->attrs.path_mig_state =
540 ib_mig_state_to_pvrdma(attr->path_mig_state);
541 cmd->attrs.qkey = attr->qkey;
542 cmd->attrs.rq_psn = attr->rq_psn;
543 cmd->attrs.sq_psn = attr->sq_psn;
544 cmd->attrs.dest_qp_num = attr->dest_qp_num;
545 cmd->attrs.qp_access_flags =
546 ib_access_flags_to_pvrdma(attr->qp_access_flags);
547 cmd->attrs.pkey_index = attr->pkey_index;
548 cmd->attrs.alt_pkey_index = attr->alt_pkey_index;
549 cmd->attrs.en_sqd_async_notify = attr->en_sqd_async_notify;
550 cmd->attrs.sq_draining = attr->sq_draining;
551 cmd->attrs.max_rd_atomic = attr->max_rd_atomic;
552 cmd->attrs.max_dest_rd_atomic = attr->max_dest_rd_atomic;
553 cmd->attrs.min_rnr_timer = attr->min_rnr_timer;
554 cmd->attrs.port_num = attr->port_num;
555 cmd->attrs.timeout = attr->timeout;
556 cmd->attrs.retry_cnt = attr->retry_cnt;
557 cmd->attrs.rnr_retry = attr->rnr_retry;
558 cmd->attrs.alt_port_num = attr->alt_port_num;
559 cmd->attrs.alt_timeout = attr->alt_timeout;
560 ib_qp_cap_to_pvrdma(&cmd->attrs.cap, &attr->cap);
Dasaratharaman Chandramoulif988653a2017-04-29 14:41:26 -0400561 rdma_ah_attr_to_pvrdma(&cmd->attrs.ah_attr, &attr->ah_attr);
562 rdma_ah_attr_to_pvrdma(&cmd->attrs.alt_ah_attr, &attr->alt_ah_attr);
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700563
564 ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_MODIFY_QP_RESP);
565 if (ret < 0) {
566 dev_warn(&dev->pdev->dev,
567 "could not modify queuepair, error: %d\n", ret);
568 } else if (rsp.hdr.err > 0) {
569 dev_warn(&dev->pdev->dev,
570 "cannot modify queuepair, error: %d\n", rsp.hdr.err);
571 ret = -EINVAL;
572 }
573
574 if (ret == 0 && next_state == IB_QPS_RESET)
575 pvrdma_reset_qp(qp);
576
577out:
578 mutex_unlock(&qp->mutex);
579
580 return ret;
581}
582
Adit Ranadive6332dee2017-02-22 17:22:56 -0800583static inline void *get_sq_wqe(struct pvrdma_qp *qp, unsigned int n)
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700584{
585 return pvrdma_page_dir_get_ptr(&qp->pdir,
586 qp->sq.offset + n * qp->sq.wqe_size);
587}
588
Adit Ranadive6332dee2017-02-22 17:22:56 -0800589static inline void *get_rq_wqe(struct pvrdma_qp *qp, unsigned int n)
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700590{
591 return pvrdma_page_dir_get_ptr(&qp->pdir,
592 qp->rq.offset + n * qp->rq.wqe_size);
593}
594
595static int set_reg_seg(struct pvrdma_sq_wqe_hdr *wqe_hdr, struct ib_reg_wr *wr)
596{
597 struct pvrdma_user_mr *mr = to_vmr(wr->mr);
598
599 wqe_hdr->wr.fast_reg.iova_start = mr->ibmr.iova;
600 wqe_hdr->wr.fast_reg.pl_pdir_dma = mr->pdir.dir_dma;
601 wqe_hdr->wr.fast_reg.page_shift = mr->page_shift;
602 wqe_hdr->wr.fast_reg.page_list_len = mr->npages;
603 wqe_hdr->wr.fast_reg.length = mr->ibmr.length;
604 wqe_hdr->wr.fast_reg.access_flags = wr->access;
605 wqe_hdr->wr.fast_reg.rkey = wr->key;
606
607 return pvrdma_page_dir_insert_page_list(&mr->pdir, mr->pages,
608 mr->npages);
609}
610
611/**
612 * pvrdma_post_send - post send work request entries on a QP
613 * @ibqp: the QP
614 * @wr: work request list to post
615 * @bad_wr: the first bad WR returned
616 *
617 * @return: 0 on success, otherwise errno returned.
618 */
619int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
620 struct ib_send_wr **bad_wr)
621{
622 struct pvrdma_qp *qp = to_vqp(ibqp);
623 struct pvrdma_dev *dev = to_vdev(ibqp->device);
624 unsigned long flags;
625 struct pvrdma_sq_wqe_hdr *wqe_hdr;
626 struct pvrdma_sge *sge;
Adit Ranadive6332dee2017-02-22 17:22:56 -0800627 int i, ret;
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700628
629 /*
630 * In states lower than RTS, we can fail immediately. In other states,
631 * just post and let the device figure it out.
632 */
633 if (qp->state < IB_QPS_RTS) {
634 *bad_wr = wr;
635 return -EINVAL;
636 }
637
638 spin_lock_irqsave(&qp->sq.lock, flags);
639
Adit Ranadive6332dee2017-02-22 17:22:56 -0800640 while (wr) {
641 unsigned int tail = 0;
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700642
643 if (unlikely(!pvrdma_idx_ring_has_space(
644 qp->sq.ring, qp->sq.wqe_cnt, &tail))) {
645 dev_warn_ratelimited(&dev->pdev->dev,
646 "send queue is full\n");
647 *bad_wr = wr;
648 ret = -ENOMEM;
649 goto out;
650 }
651
652 if (unlikely(wr->num_sge > qp->sq.max_sg || wr->num_sge < 0)) {
653 dev_warn_ratelimited(&dev->pdev->dev,
654 "send SGE overflow\n");
655 *bad_wr = wr;
656 ret = -EINVAL;
657 goto out;
658 }
659
660 if (unlikely(wr->opcode < 0)) {
661 dev_warn_ratelimited(&dev->pdev->dev,
662 "invalid send opcode\n");
663 *bad_wr = wr;
664 ret = -EINVAL;
665 goto out;
666 }
667
668 /*
669 * Only support UD, RC.
670 * Need to check opcode table for thorough checking.
671 * opcode _UD _UC _RC
672 * _SEND x x x
673 * _SEND_WITH_IMM x x x
674 * _RDMA_WRITE x x
675 * _RDMA_WRITE_WITH_IMM x x
676 * _LOCAL_INV x x
677 * _SEND_WITH_INV x x
678 * _RDMA_READ x
679 * _ATOMIC_CMP_AND_SWP x
680 * _ATOMIC_FETCH_AND_ADD x
681 * _MASK_ATOMIC_CMP_AND_SWP x
682 * _MASK_ATOMIC_FETCH_AND_ADD x
683 * _REG_MR x
684 *
685 */
686 if (qp->ibqp.qp_type != IB_QPT_UD &&
687 qp->ibqp.qp_type != IB_QPT_RC &&
688 wr->opcode != IB_WR_SEND) {
689 dev_warn_ratelimited(&dev->pdev->dev,
690 "unsupported queuepair type\n");
691 *bad_wr = wr;
692 ret = -EINVAL;
693 goto out;
694 } else if (qp->ibqp.qp_type == IB_QPT_UD ||
695 qp->ibqp.qp_type == IB_QPT_GSI) {
696 if (wr->opcode != IB_WR_SEND &&
697 wr->opcode != IB_WR_SEND_WITH_IMM) {
698 dev_warn_ratelimited(&dev->pdev->dev,
699 "invalid send opcode\n");
700 *bad_wr = wr;
701 ret = -EINVAL;
702 goto out;
703 }
704 }
705
Adit Ranadive6332dee2017-02-22 17:22:56 -0800706 wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, tail);
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700707 memset(wqe_hdr, 0, sizeof(*wqe_hdr));
708 wqe_hdr->wr_id = wr->wr_id;
709 wqe_hdr->num_sge = wr->num_sge;
710 wqe_hdr->opcode = ib_wr_opcode_to_pvrdma(wr->opcode);
711 wqe_hdr->send_flags = ib_send_flags_to_pvrdma(wr->send_flags);
712 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
713 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
714 wqe_hdr->ex.imm_data = wr->ex.imm_data;
715
716 switch (qp->ibqp.qp_type) {
717 case IB_QPT_GSI:
718 case IB_QPT_UD:
719 if (unlikely(!ud_wr(wr)->ah)) {
720 dev_warn_ratelimited(&dev->pdev->dev,
721 "invalid address handle\n");
722 *bad_wr = wr;
723 ret = -EINVAL;
724 goto out;
725 }
726
727 /*
728 * Use qkey from qp context if high order bit set,
729 * otherwise from work request.
730 */
731 wqe_hdr->wr.ud.remote_qpn = ud_wr(wr)->remote_qpn;
732 wqe_hdr->wr.ud.remote_qkey =
733 ud_wr(wr)->remote_qkey & 0x80000000 ?
734 qp->qkey : ud_wr(wr)->remote_qkey;
735 wqe_hdr->wr.ud.av = to_vah(ud_wr(wr)->ah)->av;
736
737 break;
738 case IB_QPT_RC:
739 switch (wr->opcode) {
740 case IB_WR_RDMA_READ:
741 case IB_WR_RDMA_WRITE:
742 case IB_WR_RDMA_WRITE_WITH_IMM:
743 wqe_hdr->wr.rdma.remote_addr =
744 rdma_wr(wr)->remote_addr;
745 wqe_hdr->wr.rdma.rkey = rdma_wr(wr)->rkey;
746 break;
747 case IB_WR_LOCAL_INV:
748 case IB_WR_SEND_WITH_INV:
749 wqe_hdr->ex.invalidate_rkey =
750 wr->ex.invalidate_rkey;
751 break;
752 case IB_WR_ATOMIC_CMP_AND_SWP:
753 case IB_WR_ATOMIC_FETCH_AND_ADD:
754 wqe_hdr->wr.atomic.remote_addr =
755 atomic_wr(wr)->remote_addr;
756 wqe_hdr->wr.atomic.rkey = atomic_wr(wr)->rkey;
757 wqe_hdr->wr.atomic.compare_add =
758 atomic_wr(wr)->compare_add;
759 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP)
760 wqe_hdr->wr.atomic.swap =
761 atomic_wr(wr)->swap;
762 break;
763 case IB_WR_REG_MR:
764 ret = set_reg_seg(wqe_hdr, reg_wr(wr));
765 if (ret < 0) {
766 dev_warn_ratelimited(&dev->pdev->dev,
767 "Failed to set fast register work request\n");
768 *bad_wr = wr;
769 goto out;
770 }
771 break;
772 default:
773 break;
774 }
775
776 break;
777 default:
778 dev_warn_ratelimited(&dev->pdev->dev,
779 "invalid queuepair type\n");
780 ret = -EINVAL;
781 *bad_wr = wr;
782 goto out;
783 }
784
785 sge = (struct pvrdma_sge *)(wqe_hdr + 1);
786 for (i = 0; i < wr->num_sge; i++) {
787 /* Need to check wqe_size 0 or max size */
788 sge->addr = wr->sg_list[i].addr;
789 sge->length = wr->sg_list[i].length;
790 sge->lkey = wr->sg_list[i].lkey;
791 sge++;
792 }
793
794 /* Make sure wqe is written before index update */
795 smp_wmb();
796
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700797 /* Update shared sq ring */
798 pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail,
799 qp->sq.wqe_cnt);
Adit Ranadive6332dee2017-02-22 17:22:56 -0800800
801 wr = wr->next;
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700802 }
803
804 ret = 0;
805
806out:
807 spin_unlock_irqrestore(&qp->sq.lock, flags);
808
809 if (!ret)
810 pvrdma_write_uar_qp(dev, PVRDMA_UAR_QP_SEND | qp->qp_handle);
811
812 return ret;
813}
814
815/**
816 * pvrdma_post_receive - post receive work request entries on a QP
817 * @ibqp: the QP
818 * @wr: the work request list to post
819 * @bad_wr: the first bad WR returned
820 *
821 * @return: 0 on success, otherwise errno returned.
822 */
823int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
824 struct ib_recv_wr **bad_wr)
825{
826 struct pvrdma_dev *dev = to_vdev(ibqp->device);
827 unsigned long flags;
828 struct pvrdma_qp *qp = to_vqp(ibqp);
829 struct pvrdma_rq_wqe_hdr *wqe_hdr;
830 struct pvrdma_sge *sge;
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700831 int ret = 0;
832 int i;
833
834 /*
835 * In the RESET state, we can fail immediately. For other states,
836 * just post and let the device figure it out.
837 */
838 if (qp->state == IB_QPS_RESET) {
839 *bad_wr = wr;
840 return -EINVAL;
841 }
842
Bryan Tan8b10ba72017-11-06 11:48:53 -0800843 if (qp->srq) {
844 dev_warn(&dev->pdev->dev, "QP associated with SRQ\n");
845 *bad_wr = wr;
846 return -EINVAL;
847 }
848
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700849 spin_lock_irqsave(&qp->rq.lock, flags);
850
Adit Ranadive6332dee2017-02-22 17:22:56 -0800851 while (wr) {
852 unsigned int tail = 0;
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700853
854 if (unlikely(wr->num_sge > qp->rq.max_sg ||
855 wr->num_sge < 0)) {
856 ret = -EINVAL;
857 *bad_wr = wr;
858 dev_warn_ratelimited(&dev->pdev->dev,
859 "recv SGE overflow\n");
860 goto out;
861 }
862
863 if (unlikely(!pvrdma_idx_ring_has_space(
864 qp->rq.ring, qp->rq.wqe_cnt, &tail))) {
865 ret = -ENOMEM;
866 *bad_wr = wr;
867 dev_warn_ratelimited(&dev->pdev->dev,
868 "recv queue full\n");
869 goto out;
870 }
871
Adit Ranadive6332dee2017-02-22 17:22:56 -0800872 wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, tail);
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700873 wqe_hdr->wr_id = wr->wr_id;
874 wqe_hdr->num_sge = wr->num_sge;
875 wqe_hdr->total_len = 0;
876
877 sge = (struct pvrdma_sge *)(wqe_hdr + 1);
878 for (i = 0; i < wr->num_sge; i++) {
879 sge->addr = wr->sg_list[i].addr;
880 sge->length = wr->sg_list[i].length;
881 sge->lkey = wr->sg_list[i].lkey;
882 sge++;
883 }
884
885 /* Make sure wqe is written before index update */
886 smp_wmb();
887
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700888 /* Update shared rq ring */
889 pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail,
890 qp->rq.wqe_cnt);
Adit Ranadive6332dee2017-02-22 17:22:56 -0800891
892 wr = wr->next;
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700893 }
894
895 spin_unlock_irqrestore(&qp->rq.lock, flags);
896
897 pvrdma_write_uar_qp(dev, PVRDMA_UAR_QP_RECV | qp->qp_handle);
898
899 return ret;
900
901out:
902 spin_unlock_irqrestore(&qp->rq.lock, flags);
903
904 return ret;
905}
906
907/**
908 * pvrdma_query_qp - query a queue pair's attributes
909 * @ibqp: the queue pair to query
910 * @attr: the queue pair's attributes
911 * @attr_mask: attributes mask
912 * @init_attr: initial queue pair attributes
913 *
914 * @returns 0 on success, otherwise returns an errno.
915 */
916int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
917 int attr_mask, struct ib_qp_init_attr *init_attr)
918{
919 struct pvrdma_dev *dev = to_vdev(ibqp->device);
920 struct pvrdma_qp *qp = to_vqp(ibqp);
921 union pvrdma_cmd_req req;
922 union pvrdma_cmd_resp rsp;
923 struct pvrdma_cmd_query_qp *cmd = &req.query_qp;
924 struct pvrdma_cmd_query_qp_resp *resp = &rsp.query_qp_resp;
925 int ret = 0;
926
927 mutex_lock(&qp->mutex);
928
929 if (qp->state == IB_QPS_RESET) {
930 attr->qp_state = IB_QPS_RESET;
931 goto out;
932 }
933
934 memset(cmd, 0, sizeof(*cmd));
935 cmd->hdr.cmd = PVRDMA_CMD_QUERY_QP;
936 cmd->qp_handle = qp->qp_handle;
937 cmd->attr_mask = ib_qp_attr_mask_to_pvrdma(attr_mask);
938
939 ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_QP_RESP);
940 if (ret < 0) {
941 dev_warn(&dev->pdev->dev,
942 "could not query queuepair, error: %d\n", ret);
943 goto out;
944 }
945
946 attr->qp_state = pvrdma_qp_state_to_ib(resp->attrs.qp_state);
947 attr->cur_qp_state =
948 pvrdma_qp_state_to_ib(resp->attrs.cur_qp_state);
949 attr->path_mtu = pvrdma_mtu_to_ib(resp->attrs.path_mtu);
950 attr->path_mig_state =
951 pvrdma_mig_state_to_ib(resp->attrs.path_mig_state);
952 attr->qkey = resp->attrs.qkey;
953 attr->rq_psn = resp->attrs.rq_psn;
954 attr->sq_psn = resp->attrs.sq_psn;
955 attr->dest_qp_num = resp->attrs.dest_qp_num;
956 attr->qp_access_flags =
957 pvrdma_access_flags_to_ib(resp->attrs.qp_access_flags);
958 attr->pkey_index = resp->attrs.pkey_index;
959 attr->alt_pkey_index = resp->attrs.alt_pkey_index;
960 attr->en_sqd_async_notify = resp->attrs.en_sqd_async_notify;
961 attr->sq_draining = resp->attrs.sq_draining;
962 attr->max_rd_atomic = resp->attrs.max_rd_atomic;
963 attr->max_dest_rd_atomic = resp->attrs.max_dest_rd_atomic;
964 attr->min_rnr_timer = resp->attrs.min_rnr_timer;
965 attr->port_num = resp->attrs.port_num;
966 attr->timeout = resp->attrs.timeout;
967 attr->retry_cnt = resp->attrs.retry_cnt;
968 attr->rnr_retry = resp->attrs.rnr_retry;
969 attr->alt_port_num = resp->attrs.alt_port_num;
970 attr->alt_timeout = resp->attrs.alt_timeout;
971 pvrdma_qp_cap_to_ib(&attr->cap, &resp->attrs.cap);
Dasaratharaman Chandramoulif988653a2017-04-29 14:41:26 -0400972 pvrdma_ah_attr_to_rdma(&attr->ah_attr, &resp->attrs.ah_attr);
973 pvrdma_ah_attr_to_rdma(&attr->alt_ah_attr, &resp->attrs.alt_ah_attr);
Adit Ranadive29c8d9e2016-10-02 19:10:22 -0700974
975 qp->state = attr->qp_state;
976
977 ret = 0;
978
979out:
980 attr->cur_qp_state = attr->qp_state;
981
982 init_attr->event_handler = qp->ibqp.event_handler;
983 init_attr->qp_context = qp->ibqp.qp_context;
984 init_attr->send_cq = qp->ibqp.send_cq;
985 init_attr->recv_cq = qp->ibqp.recv_cq;
986 init_attr->srq = qp->ibqp.srq;
987 init_attr->xrcd = NULL;
988 init_attr->cap = attr->cap;
989 init_attr->sq_sig_type = 0;
990 init_attr->qp_type = qp->ibqp.qp_type;
991 init_attr->create_flags = 0;
992 init_attr->port_num = qp->port;
993
994 mutex_unlock(&qp->mutex);
995 return ret;
996}