blob: 071430c594b749d9c899abd9e96b10a7ca87b466 [file] [log] [blame]
Moni Shoua8700e3e2016-06-16 16:45:23 +03001/*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include "rxe.h"
35#include "rxe_loc.h"
36#include "rxe_queue.h"
37
38static int rxe_query_device(struct ib_device *dev,
39 struct ib_device_attr *attr,
40 struct ib_udata *uhw)
41{
42 struct rxe_dev *rxe = to_rdev(dev);
43
44 if (uhw->inlen || uhw->outlen)
45 return -EINVAL;
46
47 *attr = rxe->attr;
48 return 0;
49}
50
51static void rxe_eth_speed_to_ib_speed(int speed, u8 *active_speed,
52 u8 *active_width)
53{
54 if (speed <= 1000) {
55 *active_width = IB_WIDTH_1X;
56 *active_speed = IB_SPEED_SDR;
57 } else if (speed <= 10000) {
58 *active_width = IB_WIDTH_1X;
59 *active_speed = IB_SPEED_FDR10;
60 } else if (speed <= 20000) {
61 *active_width = IB_WIDTH_4X;
62 *active_speed = IB_SPEED_DDR;
63 } else if (speed <= 30000) {
64 *active_width = IB_WIDTH_4X;
65 *active_speed = IB_SPEED_QDR;
66 } else if (speed <= 40000) {
67 *active_width = IB_WIDTH_4X;
68 *active_speed = IB_SPEED_FDR10;
69 } else {
70 *active_width = IB_WIDTH_4X;
71 *active_speed = IB_SPEED_EDR;
72 }
73}
74
75static int rxe_query_port(struct ib_device *dev,
76 u8 port_num, struct ib_port_attr *attr)
77{
78 struct rxe_dev *rxe = to_rdev(dev);
79 struct rxe_port *port;
80 u32 speed;
81
82 if (unlikely(port_num != 1)) {
83 pr_warn("invalid port_number %d\n", port_num);
84 goto err1;
85 }
86
87 port = &rxe->port;
88
89 *attr = port->attr;
90
91 mutex_lock(&rxe->usdev_lock);
92 if (rxe->ndev->ethtool_ops->get_link_ksettings) {
93 struct ethtool_link_ksettings ks;
94
95 rxe->ndev->ethtool_ops->get_link_ksettings(rxe->ndev, &ks);
96 speed = ks.base.speed;
97 } else if (rxe->ndev->ethtool_ops->get_settings) {
98 struct ethtool_cmd cmd;
99
100 rxe->ndev->ethtool_ops->get_settings(rxe->ndev, &cmd);
101 speed = cmd.speed;
102 } else {
Parav Pandite404f942016-09-28 20:26:26 +0000103 pr_warn("%s speed is unknown, defaulting to 1000\n",
104 rxe->ndev->name);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300105 speed = 1000;
106 }
Parav Pandite404f942016-09-28 20:26:26 +0000107 rxe_eth_speed_to_ib_speed(speed, &attr->active_speed,
108 &attr->active_width);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300109 mutex_unlock(&rxe->usdev_lock);
110
111 return 0;
112
113err1:
114 return -EINVAL;
115}
116
117static int rxe_query_gid(struct ib_device *device,
118 u8 port_num, int index, union ib_gid *gid)
119{
120 int ret;
121
122 if (index > RXE_PORT_GID_TBL_LEN)
123 return -EINVAL;
124
125 ret = ib_get_cached_gid(device, port_num, index, gid, NULL);
126 if (ret == -EAGAIN) {
127 memcpy(gid, &zgid, sizeof(*gid));
128 return 0;
129 }
130
131 return ret;
132}
133
134static int rxe_add_gid(struct ib_device *device, u8 port_num, unsigned int
135 index, const union ib_gid *gid,
136 const struct ib_gid_attr *attr, void **context)
137{
138 if (index >= RXE_PORT_GID_TBL_LEN)
139 return -EINVAL;
140 return 0;
141}
142
143static int rxe_del_gid(struct ib_device *device, u8 port_num, unsigned int
144 index, void **context)
145{
146 if (index >= RXE_PORT_GID_TBL_LEN)
147 return -EINVAL;
148 return 0;
149}
150
151static struct net_device *rxe_get_netdev(struct ib_device *device,
152 u8 port_num)
153{
154 struct rxe_dev *rxe = to_rdev(device);
155
156 if (rxe->ndev) {
157 dev_hold(rxe->ndev);
158 return rxe->ndev;
159 }
160
161 return NULL;
162}
163
164static int rxe_query_pkey(struct ib_device *device,
165 u8 port_num, u16 index, u16 *pkey)
166{
167 struct rxe_dev *rxe = to_rdev(device);
168 struct rxe_port *port;
169
170 if (unlikely(port_num != 1)) {
171 dev_warn(device->dma_device, "invalid port_num = %d\n",
172 port_num);
173 goto err1;
174 }
175
176 port = &rxe->port;
177
178 if (unlikely(index >= port->attr.pkey_tbl_len)) {
179 dev_warn(device->dma_device, "invalid index = %d\n",
180 index);
181 goto err1;
182 }
183
184 *pkey = port->pkey_tbl[index];
185 return 0;
186
187err1:
188 return -EINVAL;
189}
190
191static int rxe_modify_device(struct ib_device *dev,
192 int mask, struct ib_device_modify *attr)
193{
194 struct rxe_dev *rxe = to_rdev(dev);
195
196 if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
197 rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
198
199 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
200 memcpy(rxe->ib_dev.node_desc,
201 attr->node_desc, sizeof(rxe->ib_dev.node_desc));
202 }
203
204 return 0;
205}
206
207static int rxe_modify_port(struct ib_device *dev,
208 u8 port_num, int mask, struct ib_port_modify *attr)
209{
210 struct rxe_dev *rxe = to_rdev(dev);
211 struct rxe_port *port;
212
213 if (unlikely(port_num != 1)) {
214 pr_warn("invalid port_num = %d\n", port_num);
215 goto err1;
216 }
217
218 port = &rxe->port;
219
220 port->attr.port_cap_flags |= attr->set_port_cap_mask;
221 port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
222
223 if (mask & IB_PORT_RESET_QKEY_CNTR)
224 port->attr.qkey_viol_cntr = 0;
225
226 return 0;
227
228err1:
229 return -EINVAL;
230}
231
232static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
233 u8 port_num)
234{
235 struct rxe_dev *rxe = to_rdev(dev);
236
237 return rxe->ifc_ops->link_layer(rxe, port_num);
238}
239
240static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
241 struct ib_udata *udata)
242{
243 struct rxe_dev *rxe = to_rdev(dev);
244 struct rxe_ucontext *uc;
245
246 uc = rxe_alloc(&rxe->uc_pool);
247 return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
248}
249
250static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
251{
252 struct rxe_ucontext *uc = to_ruc(ibuc);
253
254 rxe_drop_ref(uc);
255 return 0;
256}
257
258static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
259 struct ib_port_immutable *immutable)
260{
261 int err;
262 struct ib_port_attr attr;
263
264 err = rxe_query_port(dev, port_num, &attr);
265 if (err)
266 return err;
267
268 immutable->pkey_tbl_len = attr.pkey_tbl_len;
269 immutable->gid_tbl_len = attr.gid_tbl_len;
270 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
271 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
272
273 return 0;
274}
275
276static struct ib_pd *rxe_alloc_pd(struct ib_device *dev,
277 struct ib_ucontext *context,
278 struct ib_udata *udata)
279{
280 struct rxe_dev *rxe = to_rdev(dev);
281 struct rxe_pd *pd;
282
283 pd = rxe_alloc(&rxe->pd_pool);
284 return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
285}
286
287static int rxe_dealloc_pd(struct ib_pd *ibpd)
288{
289 struct rxe_pd *pd = to_rpd(ibpd);
290
291 rxe_drop_ref(pd);
292 return 0;
293}
294
295static int rxe_init_av(struct rxe_dev *rxe, struct ib_ah_attr *attr,
296 struct rxe_av *av)
297{
298 int err;
299 union ib_gid sgid;
300 struct ib_gid_attr sgid_attr;
301
302 err = ib_get_cached_gid(&rxe->ib_dev, attr->port_num,
303 attr->grh.sgid_index, &sgid,
304 &sgid_attr);
305 if (err) {
306 pr_err("Failed to query sgid. err = %d\n", err);
307 return err;
308 }
309
310 err = rxe_av_from_attr(rxe, attr->port_num, av, attr);
311 if (!err)
312 err = rxe_av_fill_ip_info(rxe, av, attr, &sgid_attr, &sgid);
313
314 if (sgid_attr.ndev)
315 dev_put(sgid_attr.ndev);
316 return err;
317}
318
319static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
320{
321 int err;
322 struct rxe_dev *rxe = to_rdev(ibpd->device);
323 struct rxe_pd *pd = to_rpd(ibpd);
324 struct rxe_ah *ah;
325
326 err = rxe_av_chk_attr(rxe, attr);
327 if (err)
328 goto err1;
329
330 ah = rxe_alloc(&rxe->ah_pool);
331 if (!ah) {
332 err = -ENOMEM;
333 goto err1;
334 }
335
336 rxe_add_ref(pd);
337 ah->pd = pd;
338
339 err = rxe_init_av(rxe, attr, &ah->av);
340 if (err)
341 goto err2;
342
343 return &ah->ibah;
344
345err2:
346 rxe_drop_ref(pd);
347 rxe_drop_ref(ah);
348err1:
349 return ERR_PTR(err);
350}
351
352static int rxe_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
353{
354 int err;
355 struct rxe_dev *rxe = to_rdev(ibah->device);
356 struct rxe_ah *ah = to_rah(ibah);
357
358 err = rxe_av_chk_attr(rxe, attr);
359 if (err)
360 return err;
361
362 err = rxe_init_av(rxe, attr, &ah->av);
363 if (err)
364 return err;
365
366 return 0;
367}
368
369static int rxe_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
370{
371 struct rxe_dev *rxe = to_rdev(ibah->device);
372 struct rxe_ah *ah = to_rah(ibah);
373
374 rxe_av_to_attr(rxe, &ah->av, attr);
375 return 0;
376}
377
378static int rxe_destroy_ah(struct ib_ah *ibah)
379{
380 struct rxe_ah *ah = to_rah(ibah);
381
382 rxe_drop_ref(ah->pd);
383 rxe_drop_ref(ah);
384 return 0;
385}
386
387static int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr)
388{
389 int err;
390 int i;
391 u32 length;
392 struct rxe_recv_wqe *recv_wqe;
393 int num_sge = ibwr->num_sge;
394
395 if (unlikely(queue_full(rq->queue))) {
396 err = -ENOMEM;
397 goto err1;
398 }
399
400 if (unlikely(num_sge > rq->max_sge)) {
401 err = -EINVAL;
402 goto err1;
403 }
404
405 length = 0;
406 for (i = 0; i < num_sge; i++)
407 length += ibwr->sg_list[i].length;
408
409 recv_wqe = producer_addr(rq->queue);
410 recv_wqe->wr_id = ibwr->wr_id;
411 recv_wqe->num_sge = num_sge;
412
413 memcpy(recv_wqe->dma.sge, ibwr->sg_list,
414 num_sge * sizeof(struct ib_sge));
415
416 recv_wqe->dma.length = length;
417 recv_wqe->dma.resid = length;
418 recv_wqe->dma.num_sge = num_sge;
419 recv_wqe->dma.cur_sge = 0;
420 recv_wqe->dma.sge_offset = 0;
421
422 /* make sure all changes to the work queue are written before we
423 * update the producer pointer
424 */
425 smp_wmb();
426
427 advance_producer(rq->queue);
428 return 0;
429
430err1:
431 return err;
432}
433
434static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
435 struct ib_srq_init_attr *init,
436 struct ib_udata *udata)
437{
438 int err;
439 struct rxe_dev *rxe = to_rdev(ibpd->device);
440 struct rxe_pd *pd = to_rpd(ibpd);
441 struct rxe_srq *srq;
442 struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
443
444 err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
445 if (err)
446 goto err1;
447
448 srq = rxe_alloc(&rxe->srq_pool);
449 if (!srq) {
450 err = -ENOMEM;
451 goto err1;
452 }
453
454 rxe_add_index(srq);
455 rxe_add_ref(pd);
456 srq->pd = pd;
457
458 err = rxe_srq_from_init(rxe, srq, init, context, udata);
459 if (err)
460 goto err2;
461
462 return &srq->ibsrq;
463
464err2:
465 rxe_drop_ref(pd);
466 rxe_drop_index(srq);
467 rxe_drop_ref(srq);
468err1:
469 return ERR_PTR(err);
470}
471
472static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
473 enum ib_srq_attr_mask mask,
474 struct ib_udata *udata)
475{
476 int err;
477 struct rxe_srq *srq = to_rsrq(ibsrq);
478 struct rxe_dev *rxe = to_rdev(ibsrq->device);
479
480 err = rxe_srq_chk_attr(rxe, srq, attr, mask);
481 if (err)
482 goto err1;
483
484 err = rxe_srq_from_attr(rxe, srq, attr, mask, udata);
485 if (err)
486 goto err1;
487
488 return 0;
489
490err1:
491 return err;
492}
493
494static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
495{
496 struct rxe_srq *srq = to_rsrq(ibsrq);
497
498 if (srq->error)
499 return -EINVAL;
500
501 attr->max_wr = srq->rq.queue->buf->index_mask;
502 attr->max_sge = srq->rq.max_sge;
503 attr->srq_limit = srq->limit;
504 return 0;
505}
506
507static int rxe_destroy_srq(struct ib_srq *ibsrq)
508{
509 struct rxe_srq *srq = to_rsrq(ibsrq);
510
511 if (srq->rq.queue)
512 rxe_queue_cleanup(srq->rq.queue);
513
514 rxe_drop_ref(srq->pd);
515 rxe_drop_index(srq);
516 rxe_drop_ref(srq);
517
518 return 0;
519}
520
521static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
522 struct ib_recv_wr **bad_wr)
523{
524 int err = 0;
525 unsigned long flags;
526 struct rxe_srq *srq = to_rsrq(ibsrq);
527
528 spin_lock_irqsave(&srq->rq.producer_lock, flags);
529
530 while (wr) {
531 err = post_one_recv(&srq->rq, wr);
532 if (unlikely(err))
533 break;
534 wr = wr->next;
535 }
536
537 spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
538
539 if (err)
540 *bad_wr = wr;
541
542 return err;
543}
544
545static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
546 struct ib_qp_init_attr *init,
547 struct ib_udata *udata)
548{
549 int err;
550 struct rxe_dev *rxe = to_rdev(ibpd->device);
551 struct rxe_pd *pd = to_rpd(ibpd);
552 struct rxe_qp *qp;
553
554 err = rxe_qp_chk_init(rxe, init);
555 if (err)
556 goto err1;
557
558 qp = rxe_alloc(&rxe->qp_pool);
559 if (!qp) {
560 err = -ENOMEM;
561 goto err1;
562 }
563
564 if (udata) {
565 if (udata->inlen) {
566 err = -EINVAL;
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500567 goto err2;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300568 }
569 qp->is_user = 1;
570 }
571
572 rxe_add_index(qp);
573
574 err = rxe_qp_from_init(rxe, qp, pd, init, udata, ibpd);
575 if (err)
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500576 goto err3;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300577
578 return &qp->ibqp;
579
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500580err3:
Moni Shoua8700e3e2016-06-16 16:45:23 +0300581 rxe_drop_index(qp);
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500582err2:
Moni Shoua8700e3e2016-06-16 16:45:23 +0300583 rxe_drop_ref(qp);
584err1:
585 return ERR_PTR(err);
586}
587
588static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
589 int mask, struct ib_udata *udata)
590{
591 int err;
592 struct rxe_dev *rxe = to_rdev(ibqp->device);
593 struct rxe_qp *qp = to_rqp(ibqp);
594
595 err = rxe_qp_chk_attr(rxe, qp, attr, mask);
596 if (err)
597 goto err1;
598
599 err = rxe_qp_from_attr(qp, attr, mask, udata);
600 if (err)
601 goto err1;
602
603 return 0;
604
605err1:
606 return err;
607}
608
609static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
610 int mask, struct ib_qp_init_attr *init)
611{
612 struct rxe_qp *qp = to_rqp(ibqp);
613
614 rxe_qp_to_init(qp, init);
615 rxe_qp_to_attr(qp, attr, mask);
616
617 return 0;
618}
619
620static int rxe_destroy_qp(struct ib_qp *ibqp)
621{
622 struct rxe_qp *qp = to_rqp(ibqp);
623
624 rxe_qp_destroy(qp);
625 rxe_drop_index(qp);
626 rxe_drop_ref(qp);
627 return 0;
628}
629
630static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr,
631 unsigned int mask, unsigned int length)
632{
633 int num_sge = ibwr->num_sge;
634 struct rxe_sq *sq = &qp->sq;
635
636 if (unlikely(num_sge > sq->max_sge))
637 goto err1;
638
639 if (unlikely(mask & WR_ATOMIC_MASK)) {
640 if (length < 8)
641 goto err1;
642
643 if (atomic_wr(ibwr)->remote_addr & 0x7)
644 goto err1;
645 }
646
647 if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
648 (length > sq->max_inline)))
649 goto err1;
650
651 return 0;
652
653err1:
654 return -EINVAL;
655}
656
657static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
658 struct ib_send_wr *ibwr)
659{
660 wr->wr_id = ibwr->wr_id;
661 wr->num_sge = ibwr->num_sge;
662 wr->opcode = ibwr->opcode;
663 wr->send_flags = ibwr->send_flags;
664
665 if (qp_type(qp) == IB_QPT_UD ||
666 qp_type(qp) == IB_QPT_SMI ||
667 qp_type(qp) == IB_QPT_GSI) {
668 wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
669 wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
670 if (qp_type(qp) == IB_QPT_GSI)
671 wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
672 if (wr->opcode == IB_WR_SEND_WITH_IMM)
673 wr->ex.imm_data = ibwr->ex.imm_data;
674 } else {
675 switch (wr->opcode) {
676 case IB_WR_RDMA_WRITE_WITH_IMM:
677 wr->ex.imm_data = ibwr->ex.imm_data;
678 case IB_WR_RDMA_READ:
679 case IB_WR_RDMA_WRITE:
680 wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
681 wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
682 break;
683 case IB_WR_SEND_WITH_IMM:
684 wr->ex.imm_data = ibwr->ex.imm_data;
685 break;
686 case IB_WR_SEND_WITH_INV:
687 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
688 break;
689 case IB_WR_ATOMIC_CMP_AND_SWP:
690 case IB_WR_ATOMIC_FETCH_AND_ADD:
691 wr->wr.atomic.remote_addr =
692 atomic_wr(ibwr)->remote_addr;
693 wr->wr.atomic.compare_add =
694 atomic_wr(ibwr)->compare_add;
695 wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
696 wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
697 break;
698 case IB_WR_LOCAL_INV:
699 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
700 break;
701 case IB_WR_REG_MR:
702 wr->wr.reg.mr = reg_wr(ibwr)->mr;
703 wr->wr.reg.key = reg_wr(ibwr)->key;
704 wr->wr.reg.access = reg_wr(ibwr)->access;
705 break;
706 default:
707 break;
708 }
709 }
710}
711
712static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
713 unsigned int mask, unsigned int length,
714 struct rxe_send_wqe *wqe)
715{
716 int num_sge = ibwr->num_sge;
717 struct ib_sge *sge;
718 int i;
719 u8 *p;
720
721 init_send_wr(qp, &wqe->wr, ibwr);
722
723 if (qp_type(qp) == IB_QPT_UD ||
724 qp_type(qp) == IB_QPT_SMI ||
725 qp_type(qp) == IB_QPT_GSI)
726 memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
727
728 if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
729 p = wqe->dma.inline_data;
730
731 sge = ibwr->sg_list;
732 for (i = 0; i < num_sge; i++, sge++) {
733 if (qp->is_user && copy_from_user(p, (__user void *)
734 (uintptr_t)sge->addr, sge->length))
735 return -EFAULT;
736
737 else if (!qp->is_user)
738 memcpy(p, (void *)(uintptr_t)sge->addr,
739 sge->length);
740
741 p += sge->length;
742 }
743 } else if (mask & WR_REG_MASK) {
744 wqe->mask = mask;
745 wqe->state = wqe_state_posted;
746 return 0;
747 } else
748 memcpy(wqe->dma.sge, ibwr->sg_list,
749 num_sge * sizeof(struct ib_sge));
750
751 wqe->iova = (mask & WR_ATOMIC_MASK) ?
752 atomic_wr(ibwr)->remote_addr :
753 rdma_wr(ibwr)->remote_addr;
754 wqe->mask = mask;
755 wqe->dma.length = length;
756 wqe->dma.resid = length;
757 wqe->dma.num_sge = num_sge;
758 wqe->dma.cur_sge = 0;
759 wqe->dma.sge_offset = 0;
760 wqe->state = wqe_state_posted;
761 wqe->ssn = atomic_add_return(1, &qp->ssn);
762
763 return 0;
764}
765
766static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
Parav Pandite404f942016-09-28 20:26:26 +0000767 unsigned int mask, u32 length)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300768{
769 int err;
770 struct rxe_sq *sq = &qp->sq;
771 struct rxe_send_wqe *send_wqe;
772 unsigned long flags;
773
774 err = validate_send_wr(qp, ibwr, mask, length);
775 if (err)
776 return err;
777
778 spin_lock_irqsave(&qp->sq.sq_lock, flags);
779
780 if (unlikely(queue_full(sq->queue))) {
781 err = -ENOMEM;
782 goto err1;
783 }
784
785 send_wqe = producer_addr(sq->queue);
786
787 err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
788 if (unlikely(err))
789 goto err1;
790
791 /*
792 * make sure all changes to the work queue are
793 * written before we update the producer pointer
794 */
795 smp_wmb();
796
797 advance_producer(sq->queue);
798 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
799
800 return 0;
801
802err1:
803 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
804 return err;
805}
806
Parav Pandit063af592016-09-28 20:24:12 +0000807static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
808 struct ib_send_wr **bad_wr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300809{
810 int err = 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300811 unsigned int mask;
812 unsigned int length = 0;
813 int i;
814 int must_sched;
815
Moni Shoua8700e3e2016-06-16 16:45:23 +0300816 while (wr) {
817 mask = wr_opcode_mask(wr->opcode, qp);
818 if (unlikely(!mask)) {
819 err = -EINVAL;
820 *bad_wr = wr;
821 break;
822 }
823
824 if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
825 !(mask & WR_INLINE_MASK))) {
826 err = -EINVAL;
827 *bad_wr = wr;
828 break;
829 }
830
831 length = 0;
832 for (i = 0; i < wr->num_sge; i++)
833 length += wr->sg_list[i].length;
834
835 err = post_one_send(qp, wr, mask, length);
836
837 if (err) {
838 *bad_wr = wr;
839 break;
840 }
841 wr = wr->next;
842 }
843
844 /*
845 * Must sched in case of GSI QP because ib_send_mad() hold irq lock,
846 * and the requester call ip_local_out_sk() that takes spin_lock_bh.
847 */
848 must_sched = (qp_type(qp) == IB_QPT_GSI) ||
849 (queue_count(qp->sq.queue) > 1);
850
851 rxe_run_task(&qp->req.task, must_sched);
852
853 return err;
854}
855
Parav Pandit063af592016-09-28 20:24:12 +0000856static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
857 struct ib_send_wr **bad_wr)
858{
859 struct rxe_qp *qp = to_rqp(ibqp);
860
861 if (unlikely(!qp->valid)) {
862 *bad_wr = wr;
863 return -EINVAL;
864 }
865
866 if (unlikely(qp->req.state < QP_STATE_READY)) {
867 *bad_wr = wr;
868 return -EINVAL;
869 }
870
871 if (qp->is_user) {
872 /* Utilize process context to do protocol processing */
873 rxe_run_task(&qp->req.task, 0);
874 return 0;
875 } else
876 return rxe_post_send_kernel(qp, wr, bad_wr);
877}
878
Moni Shoua8700e3e2016-06-16 16:45:23 +0300879static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
880 struct ib_recv_wr **bad_wr)
881{
882 int err = 0;
883 struct rxe_qp *qp = to_rqp(ibqp);
884 struct rxe_rq *rq = &qp->rq;
885 unsigned long flags;
886
887 if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
888 *bad_wr = wr;
889 err = -EINVAL;
890 goto err1;
891 }
892
893 if (unlikely(qp->srq)) {
894 *bad_wr = wr;
895 err = -EINVAL;
896 goto err1;
897 }
898
899 spin_lock_irqsave(&rq->producer_lock, flags);
900
901 while (wr) {
902 err = post_one_recv(rq, wr);
903 if (unlikely(err)) {
904 *bad_wr = wr;
905 break;
906 }
907 wr = wr->next;
908 }
909
910 spin_unlock_irqrestore(&rq->producer_lock, flags);
911
912err1:
913 return err;
914}
915
916static struct ib_cq *rxe_create_cq(struct ib_device *dev,
917 const struct ib_cq_init_attr *attr,
918 struct ib_ucontext *context,
919 struct ib_udata *udata)
920{
921 int err;
922 struct rxe_dev *rxe = to_rdev(dev);
923 struct rxe_cq *cq;
924
925 if (attr->flags)
926 return ERR_PTR(-EINVAL);
927
928 err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector, udata);
929 if (err)
930 goto err1;
931
932 cq = rxe_alloc(&rxe->cq_pool);
933 if (!cq) {
934 err = -ENOMEM;
935 goto err1;
936 }
937
938 err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
939 context, udata);
940 if (err)
941 goto err2;
942
943 return &cq->ibcq;
944
945err2:
946 rxe_drop_ref(cq);
947err1:
948 return ERR_PTR(err);
949}
950
951static int rxe_destroy_cq(struct ib_cq *ibcq)
952{
953 struct rxe_cq *cq = to_rcq(ibcq);
954
955 rxe_drop_ref(cq);
956 return 0;
957}
958
959static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
960{
961 int err;
962 struct rxe_cq *cq = to_rcq(ibcq);
963 struct rxe_dev *rxe = to_rdev(ibcq->device);
964
965 err = rxe_cq_chk_attr(rxe, cq, cqe, 0, udata);
966 if (err)
967 goto err1;
968
969 err = rxe_cq_resize_queue(cq, cqe, udata);
970 if (err)
971 goto err1;
972
973 return 0;
974
975err1:
976 return err;
977}
978
979static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
980{
981 int i;
982 struct rxe_cq *cq = to_rcq(ibcq);
983 struct rxe_cqe *cqe;
984 unsigned long flags;
985
986 spin_lock_irqsave(&cq->cq_lock, flags);
987 for (i = 0; i < num_entries; i++) {
988 cqe = queue_head(cq->queue);
989 if (!cqe)
990 break;
991
992 memcpy(wc++, &cqe->ibwc, sizeof(*wc));
993 advance_consumer(cq->queue);
994 }
995 spin_unlock_irqrestore(&cq->cq_lock, flags);
996
997 return i;
998}
999
1000static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
1001{
1002 struct rxe_cq *cq = to_rcq(ibcq);
1003 int count = queue_count(cq->queue);
1004
1005 return (count > wc_cnt) ? wc_cnt : count;
1006}
1007
1008static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
1009{
1010 struct rxe_cq *cq = to_rcq(ibcq);
Andrew Boyeraccacb82016-11-23 12:39:22 -05001011 unsigned long irq_flags;
1012 int ret = 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001013
Andrew Boyeraccacb82016-11-23 12:39:22 -05001014 spin_lock_irqsave(&cq->cq_lock, irq_flags);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001015 if (cq->notify != IB_CQ_NEXT_COMP)
1016 cq->notify = flags & IB_CQ_SOLICITED_MASK;
1017
Andrew Boyeraccacb82016-11-23 12:39:22 -05001018 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
1019 ret = 1;
1020
1021 spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
1022
1023 return ret;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001024}
1025
1026static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
1027{
1028 struct rxe_dev *rxe = to_rdev(ibpd->device);
1029 struct rxe_pd *pd = to_rpd(ibpd);
1030 struct rxe_mem *mr;
1031 int err;
1032
1033 mr = rxe_alloc(&rxe->mr_pool);
1034 if (!mr) {
1035 err = -ENOMEM;
1036 goto err1;
1037 }
1038
1039 rxe_add_index(mr);
1040
1041 rxe_add_ref(pd);
1042
1043 err = rxe_mem_init_dma(rxe, pd, access, mr);
1044 if (err)
1045 goto err2;
1046
1047 return &mr->ibmr;
1048
1049err2:
1050 rxe_drop_ref(pd);
1051 rxe_drop_index(mr);
1052 rxe_drop_ref(mr);
1053err1:
1054 return ERR_PTR(err);
1055}
1056
1057static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
1058 u64 start,
1059 u64 length,
1060 u64 iova,
1061 int access, struct ib_udata *udata)
1062{
1063 int err;
1064 struct rxe_dev *rxe = to_rdev(ibpd->device);
1065 struct rxe_pd *pd = to_rpd(ibpd);
1066 struct rxe_mem *mr;
1067
1068 mr = rxe_alloc(&rxe->mr_pool);
1069 if (!mr) {
1070 err = -ENOMEM;
1071 goto err2;
1072 }
1073
1074 rxe_add_index(mr);
1075
1076 rxe_add_ref(pd);
1077
1078 err = rxe_mem_init_user(rxe, pd, start, length, iova,
1079 access, udata, mr);
1080 if (err)
1081 goto err3;
1082
1083 return &mr->ibmr;
1084
1085err3:
1086 rxe_drop_ref(pd);
1087 rxe_drop_index(mr);
1088 rxe_drop_ref(mr);
1089err2:
1090 return ERR_PTR(err);
1091}
1092
1093static int rxe_dereg_mr(struct ib_mr *ibmr)
1094{
1095 struct rxe_mem *mr = to_rmr(ibmr);
1096
1097 mr->state = RXE_MEM_STATE_ZOMBIE;
1098 rxe_drop_ref(mr->pd);
1099 rxe_drop_index(mr);
1100 rxe_drop_ref(mr);
1101 return 0;
1102}
1103
1104static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
1105 enum ib_mr_type mr_type,
1106 u32 max_num_sg)
1107{
1108 struct rxe_dev *rxe = to_rdev(ibpd->device);
1109 struct rxe_pd *pd = to_rpd(ibpd);
1110 struct rxe_mem *mr;
1111 int err;
1112
1113 if (mr_type != IB_MR_TYPE_MEM_REG)
1114 return ERR_PTR(-EINVAL);
1115
1116 mr = rxe_alloc(&rxe->mr_pool);
1117 if (!mr) {
1118 err = -ENOMEM;
1119 goto err1;
1120 }
1121
1122 rxe_add_index(mr);
1123
1124 rxe_add_ref(pd);
1125
1126 err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr);
1127 if (err)
1128 goto err2;
1129
1130 return &mr->ibmr;
1131
1132err2:
1133 rxe_drop_ref(pd);
1134 rxe_drop_index(mr);
1135 rxe_drop_ref(mr);
1136err1:
1137 return ERR_PTR(err);
1138}
1139
1140static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
1141{
1142 struct rxe_mem *mr = to_rmr(ibmr);
1143 struct rxe_map *map;
1144 struct rxe_phys_buf *buf;
1145
1146 if (unlikely(mr->nbuf == mr->num_buf))
1147 return -ENOMEM;
1148
1149 map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
1150 buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
1151
1152 buf->addr = addr;
1153 buf->size = ibmr->page_size;
1154 mr->nbuf++;
1155
1156 return 0;
1157}
1158
Parav Pandite404f942016-09-28 20:26:26 +00001159static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1160 int sg_nents, unsigned int *sg_offset)
Moni Shoua8700e3e2016-06-16 16:45:23 +03001161{
1162 struct rxe_mem *mr = to_rmr(ibmr);
1163 int n;
1164
1165 mr->nbuf = 0;
1166
1167 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
1168
1169 mr->va = ibmr->iova;
1170 mr->iova = ibmr->iova;
1171 mr->length = ibmr->length;
1172 mr->page_shift = ilog2(ibmr->page_size);
1173 mr->page_mask = ibmr->page_size - 1;
1174 mr->offset = mr->iova & mr->page_mask;
1175
1176 return n;
1177}
1178
1179static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1180{
1181 int err;
1182 struct rxe_dev *rxe = to_rdev(ibqp->device);
1183 struct rxe_qp *qp = to_rqp(ibqp);
1184 struct rxe_mc_grp *grp;
1185
1186 /* takes a ref on grp if successful */
1187 err = rxe_mcast_get_grp(rxe, mgid, &grp);
1188 if (err)
1189 return err;
1190
1191 err = rxe_mcast_add_grp_elem(rxe, qp, grp);
1192
1193 rxe_drop_ref(grp);
1194 return err;
1195}
1196
1197static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1198{
1199 struct rxe_dev *rxe = to_rdev(ibqp->device);
1200 struct rxe_qp *qp = to_rqp(ibqp);
1201
1202 return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
1203}
1204
1205static ssize_t rxe_show_parent(struct device *device,
1206 struct device_attribute *attr, char *buf)
1207{
1208 struct rxe_dev *rxe = container_of(device, struct rxe_dev,
1209 ib_dev.dev);
1210 char *name;
1211
1212 name = rxe->ifc_ops->parent_name(rxe, 1);
1213 return snprintf(buf, 16, "%s\n", name);
1214}
1215
1216static DEVICE_ATTR(parent, S_IRUGO, rxe_show_parent, NULL);
1217
1218static struct device_attribute *rxe_dev_attributes[] = {
1219 &dev_attr_parent,
1220};
1221
1222int rxe_register_device(struct rxe_dev *rxe)
1223{
1224 int err;
1225 int i;
1226 struct ib_device *dev = &rxe->ib_dev;
1227
1228 strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
1229 strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1230
1231 dev->owner = THIS_MODULE;
1232 dev->node_type = RDMA_NODE_IB_CA;
1233 dev->phys_port_cnt = 1;
1234 dev->num_comp_vectors = RXE_NUM_COMP_VECTORS;
1235 dev->dma_device = rxe->ifc_ops->dma_device(rxe);
1236 dev->local_dma_lkey = 0;
1237 dev->node_guid = rxe->ifc_ops->node_guid(rxe);
1238 dev->dma_ops = &rxe_dma_mapping_ops;
1239
1240 dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
1241 dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
1242 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
1243 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
1244 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
1245 | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
1246 | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
1247 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
1248 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
1249 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
1250 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
1251 | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
1252 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
1253 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
1254 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
1255 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
1256 | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
1257 | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
1258 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
1259 | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
1260 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
1261 | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
1262 | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
1263 | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
1264 | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
1265 | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
1266 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
1267 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
1268 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
1269 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
1270 | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
1271 | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
1272 ;
1273
1274 dev->query_device = rxe_query_device;
1275 dev->modify_device = rxe_modify_device;
1276 dev->query_port = rxe_query_port;
1277 dev->modify_port = rxe_modify_port;
1278 dev->get_link_layer = rxe_get_link_layer;
1279 dev->query_gid = rxe_query_gid;
1280 dev->get_netdev = rxe_get_netdev;
1281 dev->add_gid = rxe_add_gid;
1282 dev->del_gid = rxe_del_gid;
1283 dev->query_pkey = rxe_query_pkey;
1284 dev->alloc_ucontext = rxe_alloc_ucontext;
1285 dev->dealloc_ucontext = rxe_dealloc_ucontext;
1286 dev->mmap = rxe_mmap;
1287 dev->get_port_immutable = rxe_port_immutable;
1288 dev->alloc_pd = rxe_alloc_pd;
1289 dev->dealloc_pd = rxe_dealloc_pd;
1290 dev->create_ah = rxe_create_ah;
1291 dev->modify_ah = rxe_modify_ah;
1292 dev->query_ah = rxe_query_ah;
1293 dev->destroy_ah = rxe_destroy_ah;
1294 dev->create_srq = rxe_create_srq;
1295 dev->modify_srq = rxe_modify_srq;
1296 dev->query_srq = rxe_query_srq;
1297 dev->destroy_srq = rxe_destroy_srq;
1298 dev->post_srq_recv = rxe_post_srq_recv;
1299 dev->create_qp = rxe_create_qp;
1300 dev->modify_qp = rxe_modify_qp;
1301 dev->query_qp = rxe_query_qp;
1302 dev->destroy_qp = rxe_destroy_qp;
1303 dev->post_send = rxe_post_send;
1304 dev->post_recv = rxe_post_recv;
1305 dev->create_cq = rxe_create_cq;
1306 dev->destroy_cq = rxe_destroy_cq;
1307 dev->resize_cq = rxe_resize_cq;
1308 dev->poll_cq = rxe_poll_cq;
1309 dev->peek_cq = rxe_peek_cq;
1310 dev->req_notify_cq = rxe_req_notify_cq;
1311 dev->get_dma_mr = rxe_get_dma_mr;
1312 dev->reg_user_mr = rxe_reg_user_mr;
1313 dev->dereg_mr = rxe_dereg_mr;
1314 dev->alloc_mr = rxe_alloc_mr;
1315 dev->map_mr_sg = rxe_map_mr_sg;
1316 dev->attach_mcast = rxe_attach_mcast;
1317 dev->detach_mcast = rxe_detach_mcast;
1318
1319 err = ib_register_device(dev, NULL);
1320 if (err) {
1321 pr_warn("rxe_register_device failed, err = %d\n", err);
1322 goto err1;
1323 }
1324
1325 for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
1326 err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
1327 if (err) {
1328 pr_warn("device_create_file failed, i = %d, err = %d\n",
1329 i, err);
1330 goto err2;
1331 }
1332 }
1333
1334 return 0;
1335
1336err2:
1337 ib_unregister_device(dev);
1338err1:
1339 return err;
1340}
1341
1342int rxe_unregister_device(struct rxe_dev *rxe)
1343{
1344 int i;
1345 struct ib_device *dev = &rxe->ib_dev;
1346
1347 for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i)
1348 device_remove_file(&dev->dev, rxe_dev_attributes[i]);
1349
1350 ib_unregister_device(dev);
1351
1352 return 0;
1353}