blob: 22ba24f2a2c18b165623be787c00e0c9637f25fd [file] [log] [blame]
Moni Shoua8700e3e2016-06-16 16:45:23 +03001/*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/skbuff.h>
35#include <linux/delay.h>
36#include <linux/sched.h>
37
38#include "rxe.h"
39#include "rxe_loc.h"
40#include "rxe_queue.h"
41#include "rxe_task.h"
42
43char *rxe_qp_state_name[] = {
44 [QP_STATE_RESET] = "RESET",
45 [QP_STATE_INIT] = "INIT",
46 [QP_STATE_READY] = "READY",
47 [QP_STATE_DRAIN] = "DRAIN",
48 [QP_STATE_DRAINED] = "DRAINED",
49 [QP_STATE_ERROR] = "ERROR",
50};
51
52static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
53 int has_srq)
54{
55 if (cap->max_send_wr > rxe->attr.max_qp_wr) {
56 pr_warn("invalid send wr = %d > %d\n",
57 cap->max_send_wr, rxe->attr.max_qp_wr);
58 goto err1;
59 }
60
61 if (cap->max_send_sge > rxe->attr.max_sge) {
62 pr_warn("invalid send sge = %d > %d\n",
63 cap->max_send_sge, rxe->attr.max_sge);
64 goto err1;
65 }
66
67 if (!has_srq) {
68 if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
69 pr_warn("invalid recv wr = %d > %d\n",
70 cap->max_recv_wr, rxe->attr.max_qp_wr);
71 goto err1;
72 }
73
74 if (cap->max_recv_sge > rxe->attr.max_sge) {
75 pr_warn("invalid recv sge = %d > %d\n",
76 cap->max_recv_sge, rxe->attr.max_sge);
77 goto err1;
78 }
79 }
80
81 if (cap->max_inline_data > rxe->max_inline_data) {
82 pr_warn("invalid max inline data = %d > %d\n",
83 cap->max_inline_data, rxe->max_inline_data);
84 goto err1;
85 }
86
87 return 0;
88
89err1:
90 return -EINVAL;
91}
92
93int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
94{
95 struct ib_qp_cap *cap = &init->cap;
96 struct rxe_port *port;
97 int port_num = init->port_num;
98
99 if (!init->recv_cq || !init->send_cq) {
100 pr_warn("missing cq\n");
101 goto err1;
102 }
103
104 if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
105 goto err1;
106
107 if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) {
108 if (port_num != 1) {
109 pr_warn("invalid port = %d\n", port_num);
110 goto err1;
111 }
112
113 port = &rxe->port;
114
115 if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) {
116 pr_warn("SMI QP exists for port %d\n", port_num);
117 goto err1;
118 }
119
120 if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
121 pr_warn("GSI QP exists for port %d\n", port_num);
122 goto err1;
123 }
124 }
125
126 return 0;
127
128err1:
129 return -EINVAL;
130}
131
132static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
133{
134 qp->resp.res_head = 0;
135 qp->resp.res_tail = 0;
136 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
137
138 if (!qp->resp.resources)
139 return -ENOMEM;
140
141 return 0;
142}
143
144static void free_rd_atomic_resources(struct rxe_qp *qp)
145{
146 if (qp->resp.resources) {
147 int i;
148
149 for (i = 0; i < qp->attr.max_rd_atomic; i++) {
150 struct resp_res *res = &qp->resp.resources[i];
151
152 free_rd_atomic_resource(qp, res);
153 }
154 kfree(qp->resp.resources);
155 qp->resp.resources = NULL;
156 }
157}
158
159void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
160{
161 if (res->type == RXE_ATOMIC_MASK) {
162 rxe_drop_ref(qp);
163 kfree_skb(res->atomic.skb);
164 } else if (res->type == RXE_READ_MASK) {
165 if (res->read.mr)
166 rxe_drop_ref(res->read.mr);
167 }
168 res->type = 0;
169}
170
171static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
172{
173 int i;
174 struct resp_res *res;
175
176 if (qp->resp.resources) {
177 for (i = 0; i < qp->attr.max_rd_atomic; i++) {
178 res = &qp->resp.resources[i];
179 free_rd_atomic_resource(qp, res);
180 }
181 }
182}
183
184static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
185 struct ib_qp_init_attr *init)
186{
187 struct rxe_port *port;
188 u32 qpn;
189
190 qp->sq_sig_type = init->sq_sig_type;
191 qp->attr.path_mtu = 1;
192 qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu);
193
194 qpn = qp->pelem.index;
195 port = &rxe->port;
196
197 switch (init->qp_type) {
198 case IB_QPT_SMI:
199 qp->ibqp.qp_num = 0;
200 port->qp_smi_index = qpn;
201 qp->attr.port_num = init->port_num;
202 break;
203
204 case IB_QPT_GSI:
205 qp->ibqp.qp_num = 1;
206 port->qp_gsi_index = qpn;
207 qp->attr.port_num = init->port_num;
208 break;
209
210 default:
211 qp->ibqp.qp_num = qpn;
212 break;
213 }
214
215 INIT_LIST_HEAD(&qp->grp_list);
216
217 skb_queue_head_init(&qp->send_pkts);
218
219 spin_lock_init(&qp->grp_lock);
220 spin_lock_init(&qp->state_lock);
221
222 atomic_set(&qp->ssn, 0);
223 atomic_set(&qp->skb_out, 0);
224}
225
226static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
227 struct ib_qp_init_attr *init,
228 struct ib_ucontext *context, struct ib_udata *udata)
229{
230 int err;
231 int wqe_size;
232
233 err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
234 if (err < 0)
235 return err;
236 qp->sk->sk->sk_user_data = qp;
237
238 qp->sq.max_wr = init->cap.max_send_wr;
239 qp->sq.max_sge = init->cap.max_send_sge;
240 qp->sq.max_inline = init->cap.max_inline_data;
241
242 wqe_size = max_t(int, sizeof(struct rxe_send_wqe) +
243 qp->sq.max_sge * sizeof(struct ib_sge),
244 sizeof(struct rxe_send_wqe) +
245 qp->sq.max_inline);
246
247 qp->sq.queue = rxe_queue_init(rxe,
248 &qp->sq.max_wr,
249 wqe_size);
250 if (!qp->sq.queue)
251 return -ENOMEM;
252
253 err = do_mmap_info(rxe, udata, true,
254 context, qp->sq.queue->buf,
255 qp->sq.queue->buf_size, &qp->sq.queue->ip);
256
257 if (err) {
258 kvfree(qp->sq.queue->buf);
259 kfree(qp->sq.queue);
260 return err;
261 }
262
263 qp->req.wqe_index = producer_index(qp->sq.queue);
264 qp->req.state = QP_STATE_RESET;
265 qp->req.opcode = -1;
266 qp->comp.opcode = -1;
267
268 spin_lock_init(&qp->sq.sq_lock);
269 skb_queue_head_init(&qp->req_pkts);
270
271 rxe_init_task(rxe, &qp->req.task, qp,
272 rxe_requester, "req");
273 rxe_init_task(rxe, &qp->comp.task, qp,
274 rxe_completer, "comp");
275
276 init_timer(&qp->rnr_nak_timer);
277 qp->rnr_nak_timer.function = rnr_nak_timer;
278 qp->rnr_nak_timer.data = (unsigned long)qp;
279
280 init_timer(&qp->retrans_timer);
281 qp->retrans_timer.function = retransmit_timer;
282 qp->retrans_timer.data = (unsigned long)qp;
283 qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
284
285 return 0;
286}
287
288static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
289 struct ib_qp_init_attr *init,
290 struct ib_ucontext *context, struct ib_udata *udata)
291{
292 int err;
293 int wqe_size;
294
295 if (!qp->srq) {
296 qp->rq.max_wr = init->cap.max_recv_wr;
297 qp->rq.max_sge = init->cap.max_recv_sge;
298
299 wqe_size = rcv_wqe_size(qp->rq.max_sge);
300
301 pr_debug("max_wr = %d, max_sge = %d, wqe_size = %d\n",
302 qp->rq.max_wr, qp->rq.max_sge, wqe_size);
303
304 qp->rq.queue = rxe_queue_init(rxe,
305 &qp->rq.max_wr,
306 wqe_size);
307 if (!qp->rq.queue)
308 return -ENOMEM;
309
310 err = do_mmap_info(rxe, udata, false, context,
311 qp->rq.queue->buf,
312 qp->rq.queue->buf_size,
313 &qp->rq.queue->ip);
314 if (err) {
315 kvfree(qp->rq.queue->buf);
316 kfree(qp->rq.queue);
317 return err;
318 }
319 }
320
321 spin_lock_init(&qp->rq.producer_lock);
322 spin_lock_init(&qp->rq.consumer_lock);
323
324 skb_queue_head_init(&qp->resp_pkts);
325
326 rxe_init_task(rxe, &qp->resp.task, qp,
327 rxe_responder, "resp");
328
329 qp->resp.opcode = OPCODE_NONE;
330 qp->resp.msn = 0;
331 qp->resp.state = QP_STATE_RESET;
332
333 return 0;
334}
335
336/* called by the create qp verb */
337int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
338 struct ib_qp_init_attr *init, struct ib_udata *udata,
339 struct ib_pd *ibpd)
340{
341 int err;
342 struct rxe_cq *rcq = to_rcq(init->recv_cq);
343 struct rxe_cq *scq = to_rcq(init->send_cq);
344 struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
345 struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
346
347 rxe_add_ref(pd);
348 rxe_add_ref(rcq);
349 rxe_add_ref(scq);
350 if (srq)
351 rxe_add_ref(srq);
352
353 qp->pd = pd;
354 qp->rcq = rcq;
355 qp->scq = scq;
356 qp->srq = srq;
357
358 rxe_qp_init_misc(rxe, qp, init);
359
360 err = rxe_qp_init_req(rxe, qp, init, context, udata);
361 if (err)
362 goto err1;
363
364 err = rxe_qp_init_resp(rxe, qp, init, context, udata);
365 if (err)
366 goto err2;
367
368 qp->attr.qp_state = IB_QPS_RESET;
369 qp->valid = 1;
370
371 return 0;
372
373err2:
374 rxe_queue_cleanup(qp->sq.queue);
375err1:
376 if (srq)
377 rxe_drop_ref(srq);
378 rxe_drop_ref(scq);
379 rxe_drop_ref(rcq);
380 rxe_drop_ref(pd);
381
382 return err;
383}
384
385/* called by the query qp verb */
386int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
387{
388 init->event_handler = qp->ibqp.event_handler;
389 init->qp_context = qp->ibqp.qp_context;
390 init->send_cq = qp->ibqp.send_cq;
391 init->recv_cq = qp->ibqp.recv_cq;
392 init->srq = qp->ibqp.srq;
393
394 init->cap.max_send_wr = qp->sq.max_wr;
395 init->cap.max_send_sge = qp->sq.max_sge;
396 init->cap.max_inline_data = qp->sq.max_inline;
397
398 if (!qp->srq) {
399 init->cap.max_recv_wr = qp->rq.max_wr;
400 init->cap.max_recv_sge = qp->rq.max_sge;
401 }
402
403 init->sq_sig_type = qp->sq_sig_type;
404
405 init->qp_type = qp->ibqp.qp_type;
406 init->port_num = 1;
407
408 return 0;
409}
410
411/* called by the modify qp verb, this routine checks all the parameters before
412 * making any changes
413 */
414int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
415 struct ib_qp_attr *attr, int mask)
416{
417 enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
418 attr->cur_qp_state : qp->attr.qp_state;
419 enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
420 attr->qp_state : cur_state;
421
422 if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask,
423 IB_LINK_LAYER_ETHERNET)) {
424 pr_warn("invalid mask or state for qp\n");
425 goto err1;
426 }
427
428 if (mask & IB_QP_STATE) {
429 if (cur_state == IB_QPS_SQD) {
430 if (qp->req.state == QP_STATE_DRAIN &&
431 new_state != IB_QPS_ERR)
432 goto err1;
433 }
434 }
435
436 if (mask & IB_QP_PORT) {
437 if (attr->port_num != 1) {
438 pr_warn("invalid port %d\n", attr->port_num);
439 goto err1;
440 }
441 }
442
443 if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
444 goto err1;
445
446 if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr))
447 goto err1;
448
449 if (mask & IB_QP_ALT_PATH) {
450 if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
451 goto err1;
452 if (attr->alt_port_num != 1) {
453 pr_warn("invalid alt port %d\n", attr->alt_port_num);
454 goto err1;
455 }
456 if (attr->alt_timeout > 31) {
457 pr_warn("invalid QP alt timeout %d > 31\n",
458 attr->alt_timeout);
459 goto err1;
460 }
461 }
462
463 if (mask & IB_QP_PATH_MTU) {
464 struct rxe_port *port = &rxe->port;
465
466 enum ib_mtu max_mtu = port->attr.max_mtu;
467 enum ib_mtu mtu = attr->path_mtu;
468
469 if (mtu > max_mtu) {
470 pr_debug("invalid mtu (%d) > (%d)\n",
471 ib_mtu_enum_to_int(mtu),
472 ib_mtu_enum_to_int(max_mtu));
473 goto err1;
474 }
475 }
476
477 if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
478 if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
479 pr_warn("invalid max_rd_atomic %d > %d\n",
480 attr->max_rd_atomic,
481 rxe->attr.max_qp_rd_atom);
482 goto err1;
483 }
484 }
485
486 if (mask & IB_QP_TIMEOUT) {
487 if (attr->timeout > 31) {
488 pr_warn("invalid QP timeout %d > 31\n",
489 attr->timeout);
490 goto err1;
491 }
492 }
493
494 return 0;
495
496err1:
497 return -EINVAL;
498}
499
500/* move the qp to the reset state */
501static void rxe_qp_reset(struct rxe_qp *qp)
502{
503 /* stop tasks from running */
504 rxe_disable_task(&qp->resp.task);
505
506 /* stop request/comp */
507 if (qp->sq.queue) {
508 if (qp_type(qp) == IB_QPT_RC)
509 rxe_disable_task(&qp->comp.task);
510 rxe_disable_task(&qp->req.task);
511 }
512
513 /* move qp to the reset state */
514 qp->req.state = QP_STATE_RESET;
515 qp->resp.state = QP_STATE_RESET;
516
517 /* let state machines reset themselves drain work and packet queues
518 * etc.
519 */
520 __rxe_do_task(&qp->resp.task);
521
522 if (qp->sq.queue) {
523 __rxe_do_task(&qp->comp.task);
524 __rxe_do_task(&qp->req.task);
525 }
526
527 /* cleanup attributes */
528 atomic_set(&qp->ssn, 0);
529 qp->req.opcode = -1;
530 qp->req.need_retry = 0;
531 qp->req.noack_pkts = 0;
532 qp->resp.msn = 0;
533 qp->resp.opcode = -1;
534 qp->resp.drop_msg = 0;
535 qp->resp.goto_error = 0;
536 qp->resp.sent_psn_nak = 0;
537
538 if (qp->resp.mr) {
539 rxe_drop_ref(qp->resp.mr);
540 qp->resp.mr = NULL;
541 }
542
543 cleanup_rd_atomic_resources(qp);
544
545 /* reenable tasks */
546 rxe_enable_task(&qp->resp.task);
547
548 if (qp->sq.queue) {
549 if (qp_type(qp) == IB_QPT_RC)
550 rxe_enable_task(&qp->comp.task);
551
552 rxe_enable_task(&qp->req.task);
553 }
554}
555
556/* drain the send queue */
557static void rxe_qp_drain(struct rxe_qp *qp)
558{
559 if (qp->sq.queue) {
560 if (qp->req.state != QP_STATE_DRAINED) {
561 qp->req.state = QP_STATE_DRAIN;
562 if (qp_type(qp) == IB_QPT_RC)
563 rxe_run_task(&qp->comp.task, 1);
564 else
565 __rxe_do_task(&qp->comp.task);
566 rxe_run_task(&qp->req.task, 1);
567 }
568 }
569}
570
571/* move the qp to the error state */
572void rxe_qp_error(struct rxe_qp *qp)
573{
574 qp->req.state = QP_STATE_ERROR;
575 qp->resp.state = QP_STATE_ERROR;
576
577 /* drain work and packet queues */
578 rxe_run_task(&qp->resp.task, 1);
579
580 if (qp_type(qp) == IB_QPT_RC)
581 rxe_run_task(&qp->comp.task, 1);
582 else
583 __rxe_do_task(&qp->comp.task);
584 rxe_run_task(&qp->req.task, 1);
585}
586
587/* called by the modify qp verb */
588int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
589 struct ib_udata *udata)
590{
591 int err;
592 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
593 union ib_gid sgid;
594 struct ib_gid_attr sgid_attr;
595
596 if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
597 int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
598
599 free_rd_atomic_resources(qp);
600
601 err = alloc_rd_atomic_resources(qp, max_rd_atomic);
602 if (err)
603 return err;
604
605 qp->attr.max_rd_atomic = max_rd_atomic;
606 atomic_set(&qp->req.rd_atomic, max_rd_atomic);
607 }
608
609 if (mask & IB_QP_CUR_STATE)
610 qp->attr.cur_qp_state = attr->qp_state;
611
612 if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
613 qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
614
615 if (mask & IB_QP_ACCESS_FLAGS)
616 qp->attr.qp_access_flags = attr->qp_access_flags;
617
618 if (mask & IB_QP_PKEY_INDEX)
619 qp->attr.pkey_index = attr->pkey_index;
620
621 if (mask & IB_QP_PORT)
622 qp->attr.port_num = attr->port_num;
623
624 if (mask & IB_QP_QKEY)
625 qp->attr.qkey = attr->qkey;
626
627 if (mask & IB_QP_AV) {
628 ib_get_cached_gid(&rxe->ib_dev, 1,
629 attr->ah_attr.grh.sgid_index, &sgid,
630 &sgid_attr);
631 rxe_av_from_attr(rxe, attr->port_num, &qp->pri_av,
632 &attr->ah_attr);
633 rxe_av_fill_ip_info(rxe, &qp->pri_av, &attr->ah_attr,
634 &sgid_attr, &sgid);
635 if (sgid_attr.ndev)
636 dev_put(sgid_attr.ndev);
637 }
638
639 if (mask & IB_QP_ALT_PATH) {
640 ib_get_cached_gid(&rxe->ib_dev, 1,
641 attr->alt_ah_attr.grh.sgid_index, &sgid,
642 &sgid_attr);
643
644 rxe_av_from_attr(rxe, attr->alt_port_num, &qp->alt_av,
645 &attr->alt_ah_attr);
646 rxe_av_fill_ip_info(rxe, &qp->alt_av, &attr->alt_ah_attr,
647 &sgid_attr, &sgid);
648 if (sgid_attr.ndev)
649 dev_put(sgid_attr.ndev);
650
651 qp->attr.alt_port_num = attr->alt_port_num;
652 qp->attr.alt_pkey_index = attr->alt_pkey_index;
653 qp->attr.alt_timeout = attr->alt_timeout;
654 }
655
656 if (mask & IB_QP_PATH_MTU) {
657 qp->attr.path_mtu = attr->path_mtu;
658 qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
659 }
660
661 if (mask & IB_QP_TIMEOUT) {
662 qp->attr.timeout = attr->timeout;
663 if (attr->timeout == 0) {
664 qp->qp_timeout_jiffies = 0;
665 } else {
666 /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
667 int j = nsecs_to_jiffies(4096ULL << attr->timeout);
668
669 qp->qp_timeout_jiffies = j ? j : 1;
670 }
671 }
672
673 if (mask & IB_QP_RETRY_CNT) {
674 qp->attr.retry_cnt = attr->retry_cnt;
675 qp->comp.retry_cnt = attr->retry_cnt;
676 pr_debug("set retry count = %d\n", attr->retry_cnt);
677 }
678
679 if (mask & IB_QP_RNR_RETRY) {
680 qp->attr.rnr_retry = attr->rnr_retry;
681 qp->comp.rnr_retry = attr->rnr_retry;
682 pr_debug("set rnr retry count = %d\n", attr->rnr_retry);
683 }
684
685 if (mask & IB_QP_RQ_PSN) {
686 qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
687 qp->resp.psn = qp->attr.rq_psn;
688 pr_debug("set resp psn = 0x%x\n", qp->resp.psn);
689 }
690
691 if (mask & IB_QP_MIN_RNR_TIMER) {
692 qp->attr.min_rnr_timer = attr->min_rnr_timer;
693 pr_debug("set min rnr timer = 0x%x\n",
694 attr->min_rnr_timer);
695 }
696
697 if (mask & IB_QP_SQ_PSN) {
698 qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
699 qp->req.psn = qp->attr.sq_psn;
700 qp->comp.psn = qp->attr.sq_psn;
701 pr_debug("set req psn = 0x%x\n", qp->req.psn);
702 }
703
704 if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
705 qp->attr.max_dest_rd_atomic =
706 __roundup_pow_of_two(attr->max_dest_rd_atomic);
707 }
708
709 if (mask & IB_QP_PATH_MIG_STATE)
710 qp->attr.path_mig_state = attr->path_mig_state;
711
712 if (mask & IB_QP_DEST_QPN)
713 qp->attr.dest_qp_num = attr->dest_qp_num;
714
715 if (mask & IB_QP_STATE) {
716 qp->attr.qp_state = attr->qp_state;
717
718 switch (attr->qp_state) {
719 case IB_QPS_RESET:
720 pr_debug("qp state -> RESET\n");
721 rxe_qp_reset(qp);
722 break;
723
724 case IB_QPS_INIT:
725 pr_debug("qp state -> INIT\n");
726 qp->req.state = QP_STATE_INIT;
727 qp->resp.state = QP_STATE_INIT;
728 break;
729
730 case IB_QPS_RTR:
731 pr_debug("qp state -> RTR\n");
732 qp->resp.state = QP_STATE_READY;
733 break;
734
735 case IB_QPS_RTS:
736 pr_debug("qp state -> RTS\n");
737 qp->req.state = QP_STATE_READY;
738 break;
739
740 case IB_QPS_SQD:
741 pr_debug("qp state -> SQD\n");
742 rxe_qp_drain(qp);
743 break;
744
745 case IB_QPS_SQE:
746 pr_warn("qp state -> SQE !!?\n");
747 /* Not possible from modify_qp. */
748 break;
749
750 case IB_QPS_ERR:
751 pr_debug("qp state -> ERR\n");
752 rxe_qp_error(qp);
753 break;
754 }
755 }
756
757 return 0;
758}
759
760/* called by the query qp verb */
761int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
762{
763 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
764
765 *attr = qp->attr;
766
767 attr->rq_psn = qp->resp.psn;
768 attr->sq_psn = qp->req.psn;
769
770 attr->cap.max_send_wr = qp->sq.max_wr;
771 attr->cap.max_send_sge = qp->sq.max_sge;
772 attr->cap.max_inline_data = qp->sq.max_inline;
773
774 if (!qp->srq) {
775 attr->cap.max_recv_wr = qp->rq.max_wr;
776 attr->cap.max_recv_sge = qp->rq.max_sge;
777 }
778
779 rxe_av_to_attr(rxe, &qp->pri_av, &attr->ah_attr);
780 rxe_av_to_attr(rxe, &qp->alt_av, &attr->alt_ah_attr);
781
782 if (qp->req.state == QP_STATE_DRAIN) {
783 attr->sq_draining = 1;
784 /* applications that get this state
785 * typically spin on it. yield the
786 * processor
787 */
788 cond_resched();
789 } else {
790 attr->sq_draining = 0;
791 }
792
793 pr_debug("attr->sq_draining = %d\n", attr->sq_draining);
794
795 return 0;
796}
797
798/* called by the destroy qp verb */
799void rxe_qp_destroy(struct rxe_qp *qp)
800{
801 qp->valid = 0;
802 qp->qp_timeout_jiffies = 0;
803 rxe_cleanup_task(&qp->resp.task);
804
805 del_timer_sync(&qp->retrans_timer);
806 del_timer_sync(&qp->rnr_nak_timer);
807
808 rxe_cleanup_task(&qp->req.task);
809 if (qp_type(qp) == IB_QPT_RC)
810 rxe_cleanup_task(&qp->comp.task);
811
812 /* flush out any receive wr's or pending requests */
813 __rxe_do_task(&qp->req.task);
814 if (qp->sq.queue) {
815 __rxe_do_task(&qp->comp.task);
816 __rxe_do_task(&qp->req.task);
817 }
818}
819
820/* called when the last reference to the qp is dropped */
821void rxe_qp_cleanup(void *arg)
822{
823 struct rxe_qp *qp = arg;
824
825 rxe_drop_all_mcast_groups(qp);
826
827 if (qp->sq.queue)
828 rxe_queue_cleanup(qp->sq.queue);
829
830 if (qp->srq)
831 rxe_drop_ref(qp->srq);
832
833 if (qp->rq.queue)
834 rxe_queue_cleanup(qp->rq.queue);
835
836 if (qp->scq)
837 rxe_drop_ref(qp->scq);
838 if (qp->rcq)
839 rxe_drop_ref(qp->rcq);
840 if (qp->pd)
841 rxe_drop_ref(qp->pd);
842
843 if (qp->resp.mr) {
844 rxe_drop_ref(qp->resp.mr);
845 qp->resp.mr = NULL;
846 }
847
848 free_rd_atomic_resources(qp);
849
850 kernel_sock_shutdown(qp->sk, SHUT_RDWR);
851}