blob: 486035a85bac44f02ff171f9e26e8ee59cb16eaf [file] [log] [blame]
Moni Shoua8700e3e2016-06-16 16:45:23 +03001/*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
Bart Van Assche0bbb3b72017-01-20 13:04:37 -080034#include <linux/dma-mapping.h>
Moni Shoua8700e3e2016-06-16 16:45:23 +030035#include "rxe.h"
36#include "rxe_loc.h"
37#include "rxe_queue.h"
Yonatan Cohen0b1e5b92017-03-10 18:23:56 +020038#include "rxe_hw_counters.h"
Moni Shoua8700e3e2016-06-16 16:45:23 +030039
40static int rxe_query_device(struct ib_device *dev,
41 struct ib_device_attr *attr,
42 struct ib_udata *uhw)
43{
44 struct rxe_dev *rxe = to_rdev(dev);
45
46 if (uhw->inlen || uhw->outlen)
47 return -EINVAL;
48
49 *attr = rxe->attr;
50 return 0;
51}
52
53static void rxe_eth_speed_to_ib_speed(int speed, u8 *active_speed,
54 u8 *active_width)
55{
56 if (speed <= 1000) {
57 *active_width = IB_WIDTH_1X;
58 *active_speed = IB_SPEED_SDR;
59 } else if (speed <= 10000) {
60 *active_width = IB_WIDTH_1X;
61 *active_speed = IB_SPEED_FDR10;
62 } else if (speed <= 20000) {
63 *active_width = IB_WIDTH_4X;
64 *active_speed = IB_SPEED_DDR;
65 } else if (speed <= 30000) {
66 *active_width = IB_WIDTH_4X;
67 *active_speed = IB_SPEED_QDR;
68 } else if (speed <= 40000) {
69 *active_width = IB_WIDTH_4X;
70 *active_speed = IB_SPEED_FDR10;
71 } else {
72 *active_width = IB_WIDTH_4X;
73 *active_speed = IB_SPEED_EDR;
74 }
75}
76
77static int rxe_query_port(struct ib_device *dev,
78 u8 port_num, struct ib_port_attr *attr)
79{
80 struct rxe_dev *rxe = to_rdev(dev);
81 struct rxe_port *port;
82 u32 speed;
83
84 if (unlikely(port_num != 1)) {
85 pr_warn("invalid port_number %d\n", port_num);
86 goto err1;
87 }
88
89 port = &rxe->port;
90
Or Gerlitzc4550c62017-01-24 13:02:39 +020091 /* *attr being zeroed by the caller, avoid zeroing it here */
Moni Shoua8700e3e2016-06-16 16:45:23 +030092 *attr = port->attr;
93
94 mutex_lock(&rxe->usdev_lock);
95 if (rxe->ndev->ethtool_ops->get_link_ksettings) {
96 struct ethtool_link_ksettings ks;
97
98 rxe->ndev->ethtool_ops->get_link_ksettings(rxe->ndev, &ks);
99 speed = ks.base.speed;
100 } else if (rxe->ndev->ethtool_ops->get_settings) {
101 struct ethtool_cmd cmd;
102
103 rxe->ndev->ethtool_ops->get_settings(rxe->ndev, &cmd);
104 speed = cmd.speed;
105 } else {
Parav Pandite404f942016-09-28 20:26:26 +0000106 pr_warn("%s speed is unknown, defaulting to 1000\n",
107 rxe->ndev->name);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300108 speed = 1000;
109 }
Parav Pandite404f942016-09-28 20:26:26 +0000110 rxe_eth_speed_to_ib_speed(speed, &attr->active_speed,
111 &attr->active_width);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300112 mutex_unlock(&rxe->usdev_lock);
113
114 return 0;
115
116err1:
117 return -EINVAL;
118}
119
120static int rxe_query_gid(struct ib_device *device,
121 u8 port_num, int index, union ib_gid *gid)
122{
123 int ret;
124
125 if (index > RXE_PORT_GID_TBL_LEN)
126 return -EINVAL;
127
128 ret = ib_get_cached_gid(device, port_num, index, gid, NULL);
129 if (ret == -EAGAIN) {
130 memcpy(gid, &zgid, sizeof(*gid));
131 return 0;
132 }
133
134 return ret;
135}
136
137static int rxe_add_gid(struct ib_device *device, u8 port_num, unsigned int
138 index, const union ib_gid *gid,
139 const struct ib_gid_attr *attr, void **context)
140{
141 if (index >= RXE_PORT_GID_TBL_LEN)
142 return -EINVAL;
143 return 0;
144}
145
146static int rxe_del_gid(struct ib_device *device, u8 port_num, unsigned int
147 index, void **context)
148{
149 if (index >= RXE_PORT_GID_TBL_LEN)
150 return -EINVAL;
151 return 0;
152}
153
154static struct net_device *rxe_get_netdev(struct ib_device *device,
155 u8 port_num)
156{
157 struct rxe_dev *rxe = to_rdev(device);
158
159 if (rxe->ndev) {
160 dev_hold(rxe->ndev);
161 return rxe->ndev;
162 }
163
164 return NULL;
165}
166
167static int rxe_query_pkey(struct ib_device *device,
168 u8 port_num, u16 index, u16 *pkey)
169{
170 struct rxe_dev *rxe = to_rdev(device);
171 struct rxe_port *port;
172
173 if (unlikely(port_num != 1)) {
Bart Van Assche85e9f1d2017-01-20 13:04:29 -0800174 dev_warn(device->dev.parent, "invalid port_num = %d\n",
Moni Shoua8700e3e2016-06-16 16:45:23 +0300175 port_num);
176 goto err1;
177 }
178
179 port = &rxe->port;
180
181 if (unlikely(index >= port->attr.pkey_tbl_len)) {
Bart Van Assche85e9f1d2017-01-20 13:04:29 -0800182 dev_warn(device->dev.parent, "invalid index = %d\n",
Moni Shoua8700e3e2016-06-16 16:45:23 +0300183 index);
184 goto err1;
185 }
186
187 *pkey = port->pkey_tbl[index];
188 return 0;
189
190err1:
191 return -EINVAL;
192}
193
194static int rxe_modify_device(struct ib_device *dev,
195 int mask, struct ib_device_modify *attr)
196{
197 struct rxe_dev *rxe = to_rdev(dev);
198
199 if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
200 rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
201
202 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
203 memcpy(rxe->ib_dev.node_desc,
204 attr->node_desc, sizeof(rxe->ib_dev.node_desc));
205 }
206
207 return 0;
208}
209
210static int rxe_modify_port(struct ib_device *dev,
211 u8 port_num, int mask, struct ib_port_modify *attr)
212{
213 struct rxe_dev *rxe = to_rdev(dev);
214 struct rxe_port *port;
215
216 if (unlikely(port_num != 1)) {
217 pr_warn("invalid port_num = %d\n", port_num);
218 goto err1;
219 }
220
221 port = &rxe->port;
222
223 port->attr.port_cap_flags |= attr->set_port_cap_mask;
224 port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
225
226 if (mask & IB_PORT_RESET_QKEY_CNTR)
227 port->attr.qkey_viol_cntr = 0;
228
229 return 0;
230
231err1:
232 return -EINVAL;
233}
234
235static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
236 u8 port_num)
237{
238 struct rxe_dev *rxe = to_rdev(dev);
239
Bart Van Assche839f5ac2017-01-10 11:15:53 -0800240 return rxe_link_layer(rxe, port_num);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300241}
242
243static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
244 struct ib_udata *udata)
245{
246 struct rxe_dev *rxe = to_rdev(dev);
247 struct rxe_ucontext *uc;
248
249 uc = rxe_alloc(&rxe->uc_pool);
250 return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
251}
252
253static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
254{
255 struct rxe_ucontext *uc = to_ruc(ibuc);
256
257 rxe_drop_ref(uc);
258 return 0;
259}
260
261static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
262 struct ib_port_immutable *immutable)
263{
264 int err;
265 struct ib_port_attr attr;
266
Or Gerlitzc4550c62017-01-24 13:02:39 +0200267 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
268
269 err = ib_query_port(dev, port_num, &attr);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300270 if (err)
271 return err;
272
273 immutable->pkey_tbl_len = attr.pkey_tbl_len;
274 immutable->gid_tbl_len = attr.gid_tbl_len;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300275 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
276
277 return 0;
278}
279
280static struct ib_pd *rxe_alloc_pd(struct ib_device *dev,
281 struct ib_ucontext *context,
282 struct ib_udata *udata)
283{
284 struct rxe_dev *rxe = to_rdev(dev);
285 struct rxe_pd *pd;
286
287 pd = rxe_alloc(&rxe->pd_pool);
288 return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
289}
290
291static int rxe_dealloc_pd(struct ib_pd *ibpd)
292{
293 struct rxe_pd *pd = to_rpd(ibpd);
294
295 rxe_drop_ref(pd);
296 return 0;
297}
298
299static int rxe_init_av(struct rxe_dev *rxe, struct ib_ah_attr *attr,
300 struct rxe_av *av)
301{
302 int err;
303 union ib_gid sgid;
304 struct ib_gid_attr sgid_attr;
305
306 err = ib_get_cached_gid(&rxe->ib_dev, attr->port_num,
307 attr->grh.sgid_index, &sgid,
308 &sgid_attr);
309 if (err) {
310 pr_err("Failed to query sgid. err = %d\n", err);
311 return err;
312 }
313
314 err = rxe_av_from_attr(rxe, attr->port_num, av, attr);
315 if (!err)
316 err = rxe_av_fill_ip_info(rxe, av, attr, &sgid_attr, &sgid);
317
318 if (sgid_attr.ndev)
319 dev_put(sgid_attr.ndev);
320 return err;
321}
322
Moni Shoua477864c2016-11-23 08:23:24 +0200323static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
324 struct ib_udata *udata)
325
Moni Shoua8700e3e2016-06-16 16:45:23 +0300326{
327 int err;
328 struct rxe_dev *rxe = to_rdev(ibpd->device);
329 struct rxe_pd *pd = to_rpd(ibpd);
330 struct rxe_ah *ah;
331
332 err = rxe_av_chk_attr(rxe, attr);
333 if (err)
334 goto err1;
335
336 ah = rxe_alloc(&rxe->ah_pool);
337 if (!ah) {
338 err = -ENOMEM;
339 goto err1;
340 }
341
342 rxe_add_ref(pd);
343 ah->pd = pd;
344
345 err = rxe_init_av(rxe, attr, &ah->av);
346 if (err)
347 goto err2;
348
349 return &ah->ibah;
350
351err2:
352 rxe_drop_ref(pd);
353 rxe_drop_ref(ah);
354err1:
355 return ERR_PTR(err);
356}
357
358static int rxe_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
359{
360 int err;
361 struct rxe_dev *rxe = to_rdev(ibah->device);
362 struct rxe_ah *ah = to_rah(ibah);
363
364 err = rxe_av_chk_attr(rxe, attr);
365 if (err)
366 return err;
367
368 err = rxe_init_av(rxe, attr, &ah->av);
369 if (err)
370 return err;
371
372 return 0;
373}
374
375static int rxe_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
376{
377 struct rxe_dev *rxe = to_rdev(ibah->device);
378 struct rxe_ah *ah = to_rah(ibah);
379
380 rxe_av_to_attr(rxe, &ah->av, attr);
381 return 0;
382}
383
384static int rxe_destroy_ah(struct ib_ah *ibah)
385{
386 struct rxe_ah *ah = to_rah(ibah);
387
388 rxe_drop_ref(ah->pd);
389 rxe_drop_ref(ah);
390 return 0;
391}
392
393static int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr)
394{
395 int err;
396 int i;
397 u32 length;
398 struct rxe_recv_wqe *recv_wqe;
399 int num_sge = ibwr->num_sge;
400
401 if (unlikely(queue_full(rq->queue))) {
402 err = -ENOMEM;
403 goto err1;
404 }
405
406 if (unlikely(num_sge > rq->max_sge)) {
407 err = -EINVAL;
408 goto err1;
409 }
410
411 length = 0;
412 for (i = 0; i < num_sge; i++)
413 length += ibwr->sg_list[i].length;
414
415 recv_wqe = producer_addr(rq->queue);
416 recv_wqe->wr_id = ibwr->wr_id;
417 recv_wqe->num_sge = num_sge;
418
419 memcpy(recv_wqe->dma.sge, ibwr->sg_list,
420 num_sge * sizeof(struct ib_sge));
421
422 recv_wqe->dma.length = length;
423 recv_wqe->dma.resid = length;
424 recv_wqe->dma.num_sge = num_sge;
425 recv_wqe->dma.cur_sge = 0;
426 recv_wqe->dma.sge_offset = 0;
427
428 /* make sure all changes to the work queue are written before we
429 * update the producer pointer
430 */
431 smp_wmb();
432
433 advance_producer(rq->queue);
434 return 0;
435
436err1:
437 return err;
438}
439
440static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
441 struct ib_srq_init_attr *init,
442 struct ib_udata *udata)
443{
444 int err;
445 struct rxe_dev *rxe = to_rdev(ibpd->device);
446 struct rxe_pd *pd = to_rpd(ibpd);
447 struct rxe_srq *srq;
448 struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
449
450 err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
451 if (err)
452 goto err1;
453
454 srq = rxe_alloc(&rxe->srq_pool);
455 if (!srq) {
456 err = -ENOMEM;
457 goto err1;
458 }
459
460 rxe_add_index(srq);
461 rxe_add_ref(pd);
462 srq->pd = pd;
463
464 err = rxe_srq_from_init(rxe, srq, init, context, udata);
465 if (err)
466 goto err2;
467
468 return &srq->ibsrq;
469
470err2:
471 rxe_drop_ref(pd);
472 rxe_drop_index(srq);
473 rxe_drop_ref(srq);
474err1:
475 return ERR_PTR(err);
476}
477
478static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
479 enum ib_srq_attr_mask mask,
480 struct ib_udata *udata)
481{
482 int err;
483 struct rxe_srq *srq = to_rsrq(ibsrq);
484 struct rxe_dev *rxe = to_rdev(ibsrq->device);
485
486 err = rxe_srq_chk_attr(rxe, srq, attr, mask);
487 if (err)
488 goto err1;
489
490 err = rxe_srq_from_attr(rxe, srq, attr, mask, udata);
491 if (err)
492 goto err1;
493
494 return 0;
495
496err1:
497 return err;
498}
499
500static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
501{
502 struct rxe_srq *srq = to_rsrq(ibsrq);
503
504 if (srq->error)
505 return -EINVAL;
506
507 attr->max_wr = srq->rq.queue->buf->index_mask;
508 attr->max_sge = srq->rq.max_sge;
509 attr->srq_limit = srq->limit;
510 return 0;
511}
512
513static int rxe_destroy_srq(struct ib_srq *ibsrq)
514{
515 struct rxe_srq *srq = to_rsrq(ibsrq);
516
517 if (srq->rq.queue)
518 rxe_queue_cleanup(srq->rq.queue);
519
520 rxe_drop_ref(srq->pd);
521 rxe_drop_index(srq);
522 rxe_drop_ref(srq);
523
524 return 0;
525}
526
527static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
528 struct ib_recv_wr **bad_wr)
529{
530 int err = 0;
531 unsigned long flags;
532 struct rxe_srq *srq = to_rsrq(ibsrq);
533
534 spin_lock_irqsave(&srq->rq.producer_lock, flags);
535
536 while (wr) {
537 err = post_one_recv(&srq->rq, wr);
538 if (unlikely(err))
539 break;
540 wr = wr->next;
541 }
542
543 spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
544
545 if (err)
546 *bad_wr = wr;
547
548 return err;
549}
550
551static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
552 struct ib_qp_init_attr *init,
553 struct ib_udata *udata)
554{
555 int err;
556 struct rxe_dev *rxe = to_rdev(ibpd->device);
557 struct rxe_pd *pd = to_rpd(ibpd);
558 struct rxe_qp *qp;
559
560 err = rxe_qp_chk_init(rxe, init);
561 if (err)
562 goto err1;
563
564 qp = rxe_alloc(&rxe->qp_pool);
565 if (!qp) {
566 err = -ENOMEM;
567 goto err1;
568 }
569
570 if (udata) {
571 if (udata->inlen) {
572 err = -EINVAL;
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500573 goto err2;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300574 }
575 qp->is_user = 1;
576 }
577
578 rxe_add_index(qp);
579
580 err = rxe_qp_from_init(rxe, qp, pd, init, udata, ibpd);
581 if (err)
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500582 goto err3;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300583
584 return &qp->ibqp;
585
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500586err3:
Moni Shoua8700e3e2016-06-16 16:45:23 +0300587 rxe_drop_index(qp);
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500588err2:
Moni Shoua8700e3e2016-06-16 16:45:23 +0300589 rxe_drop_ref(qp);
590err1:
591 return ERR_PTR(err);
592}
593
594static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
595 int mask, struct ib_udata *udata)
596{
597 int err;
598 struct rxe_dev *rxe = to_rdev(ibqp->device);
599 struct rxe_qp *qp = to_rqp(ibqp);
600
601 err = rxe_qp_chk_attr(rxe, qp, attr, mask);
602 if (err)
603 goto err1;
604
605 err = rxe_qp_from_attr(qp, attr, mask, udata);
606 if (err)
607 goto err1;
608
609 return 0;
610
611err1:
612 return err;
613}
614
615static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
616 int mask, struct ib_qp_init_attr *init)
617{
618 struct rxe_qp *qp = to_rqp(ibqp);
619
620 rxe_qp_to_init(qp, init);
621 rxe_qp_to_attr(qp, attr, mask);
622
623 return 0;
624}
625
626static int rxe_destroy_qp(struct ib_qp *ibqp)
627{
628 struct rxe_qp *qp = to_rqp(ibqp);
629
630 rxe_qp_destroy(qp);
631 rxe_drop_index(qp);
632 rxe_drop_ref(qp);
633 return 0;
634}
635
636static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr,
637 unsigned int mask, unsigned int length)
638{
639 int num_sge = ibwr->num_sge;
640 struct rxe_sq *sq = &qp->sq;
641
642 if (unlikely(num_sge > sq->max_sge))
643 goto err1;
644
645 if (unlikely(mask & WR_ATOMIC_MASK)) {
646 if (length < 8)
647 goto err1;
648
649 if (atomic_wr(ibwr)->remote_addr & 0x7)
650 goto err1;
651 }
652
653 if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
654 (length > sq->max_inline)))
655 goto err1;
656
657 return 0;
658
659err1:
660 return -EINVAL;
661}
662
663static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
664 struct ib_send_wr *ibwr)
665{
666 wr->wr_id = ibwr->wr_id;
667 wr->num_sge = ibwr->num_sge;
668 wr->opcode = ibwr->opcode;
669 wr->send_flags = ibwr->send_flags;
670
671 if (qp_type(qp) == IB_QPT_UD ||
672 qp_type(qp) == IB_QPT_SMI ||
673 qp_type(qp) == IB_QPT_GSI) {
674 wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
675 wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
676 if (qp_type(qp) == IB_QPT_GSI)
677 wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
678 if (wr->opcode == IB_WR_SEND_WITH_IMM)
679 wr->ex.imm_data = ibwr->ex.imm_data;
680 } else {
681 switch (wr->opcode) {
682 case IB_WR_RDMA_WRITE_WITH_IMM:
683 wr->ex.imm_data = ibwr->ex.imm_data;
684 case IB_WR_RDMA_READ:
685 case IB_WR_RDMA_WRITE:
686 wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
687 wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
688 break;
689 case IB_WR_SEND_WITH_IMM:
690 wr->ex.imm_data = ibwr->ex.imm_data;
691 break;
692 case IB_WR_SEND_WITH_INV:
693 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
694 break;
695 case IB_WR_ATOMIC_CMP_AND_SWP:
696 case IB_WR_ATOMIC_FETCH_AND_ADD:
697 wr->wr.atomic.remote_addr =
698 atomic_wr(ibwr)->remote_addr;
699 wr->wr.atomic.compare_add =
700 atomic_wr(ibwr)->compare_add;
701 wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
702 wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
703 break;
704 case IB_WR_LOCAL_INV:
705 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
706 break;
707 case IB_WR_REG_MR:
708 wr->wr.reg.mr = reg_wr(ibwr)->mr;
709 wr->wr.reg.key = reg_wr(ibwr)->key;
710 wr->wr.reg.access = reg_wr(ibwr)->access;
711 break;
712 default:
713 break;
714 }
715 }
716}
717
718static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
719 unsigned int mask, unsigned int length,
720 struct rxe_send_wqe *wqe)
721{
722 int num_sge = ibwr->num_sge;
723 struct ib_sge *sge;
724 int i;
725 u8 *p;
726
727 init_send_wr(qp, &wqe->wr, ibwr);
728
729 if (qp_type(qp) == IB_QPT_UD ||
730 qp_type(qp) == IB_QPT_SMI ||
731 qp_type(qp) == IB_QPT_GSI)
732 memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
733
734 if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
735 p = wqe->dma.inline_data;
736
737 sge = ibwr->sg_list;
738 for (i = 0; i < num_sge; i++, sge++) {
739 if (qp->is_user && copy_from_user(p, (__user void *)
740 (uintptr_t)sge->addr, sge->length))
741 return -EFAULT;
742
743 else if (!qp->is_user)
744 memcpy(p, (void *)(uintptr_t)sge->addr,
745 sge->length);
746
747 p += sge->length;
748 }
749 } else if (mask & WR_REG_MASK) {
750 wqe->mask = mask;
751 wqe->state = wqe_state_posted;
752 return 0;
753 } else
754 memcpy(wqe->dma.sge, ibwr->sg_list,
755 num_sge * sizeof(struct ib_sge));
756
757 wqe->iova = (mask & WR_ATOMIC_MASK) ?
758 atomic_wr(ibwr)->remote_addr :
759 rdma_wr(ibwr)->remote_addr;
760 wqe->mask = mask;
761 wqe->dma.length = length;
762 wqe->dma.resid = length;
763 wqe->dma.num_sge = num_sge;
764 wqe->dma.cur_sge = 0;
765 wqe->dma.sge_offset = 0;
766 wqe->state = wqe_state_posted;
767 wqe->ssn = atomic_add_return(1, &qp->ssn);
768
769 return 0;
770}
771
772static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
Parav Pandite404f942016-09-28 20:26:26 +0000773 unsigned int mask, u32 length)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300774{
775 int err;
776 struct rxe_sq *sq = &qp->sq;
777 struct rxe_send_wqe *send_wqe;
778 unsigned long flags;
779
780 err = validate_send_wr(qp, ibwr, mask, length);
781 if (err)
782 return err;
783
784 spin_lock_irqsave(&qp->sq.sq_lock, flags);
785
786 if (unlikely(queue_full(sq->queue))) {
787 err = -ENOMEM;
788 goto err1;
789 }
790
791 send_wqe = producer_addr(sq->queue);
792
793 err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
794 if (unlikely(err))
795 goto err1;
796
797 /*
798 * make sure all changes to the work queue are
799 * written before we update the producer pointer
800 */
801 smp_wmb();
802
803 advance_producer(sq->queue);
804 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
805
806 return 0;
807
808err1:
809 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
810 return err;
811}
812
Parav Pandit063af592016-09-28 20:24:12 +0000813static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
814 struct ib_send_wr **bad_wr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300815{
816 int err = 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300817 unsigned int mask;
818 unsigned int length = 0;
819 int i;
820 int must_sched;
821
Moni Shoua8700e3e2016-06-16 16:45:23 +0300822 while (wr) {
823 mask = wr_opcode_mask(wr->opcode, qp);
824 if (unlikely(!mask)) {
825 err = -EINVAL;
826 *bad_wr = wr;
827 break;
828 }
829
830 if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
831 !(mask & WR_INLINE_MASK))) {
832 err = -EINVAL;
833 *bad_wr = wr;
834 break;
835 }
836
837 length = 0;
838 for (i = 0; i < wr->num_sge; i++)
839 length += wr->sg_list[i].length;
840
841 err = post_one_send(qp, wr, mask, length);
842
843 if (err) {
844 *bad_wr = wr;
845 break;
846 }
847 wr = wr->next;
848 }
849
850 /*
851 * Must sched in case of GSI QP because ib_send_mad() hold irq lock,
852 * and the requester call ip_local_out_sk() that takes spin_lock_bh.
853 */
854 must_sched = (qp_type(qp) == IB_QPT_GSI) ||
855 (queue_count(qp->sq.queue) > 1);
856
857 rxe_run_task(&qp->req.task, must_sched);
858
859 return err;
860}
861
Parav Pandit063af592016-09-28 20:24:12 +0000862static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
863 struct ib_send_wr **bad_wr)
864{
865 struct rxe_qp *qp = to_rqp(ibqp);
866
867 if (unlikely(!qp->valid)) {
868 *bad_wr = wr;
869 return -EINVAL;
870 }
871
872 if (unlikely(qp->req.state < QP_STATE_READY)) {
873 *bad_wr = wr;
874 return -EINVAL;
875 }
876
877 if (qp->is_user) {
878 /* Utilize process context to do protocol processing */
879 rxe_run_task(&qp->req.task, 0);
880 return 0;
881 } else
882 return rxe_post_send_kernel(qp, wr, bad_wr);
883}
884
Moni Shoua8700e3e2016-06-16 16:45:23 +0300885static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
886 struct ib_recv_wr **bad_wr)
887{
888 int err = 0;
889 struct rxe_qp *qp = to_rqp(ibqp);
890 struct rxe_rq *rq = &qp->rq;
891 unsigned long flags;
892
893 if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
894 *bad_wr = wr;
895 err = -EINVAL;
896 goto err1;
897 }
898
899 if (unlikely(qp->srq)) {
900 *bad_wr = wr;
901 err = -EINVAL;
902 goto err1;
903 }
904
905 spin_lock_irqsave(&rq->producer_lock, flags);
906
907 while (wr) {
908 err = post_one_recv(rq, wr);
909 if (unlikely(err)) {
910 *bad_wr = wr;
911 break;
912 }
913 wr = wr->next;
914 }
915
916 spin_unlock_irqrestore(&rq->producer_lock, flags);
917
918err1:
919 return err;
920}
921
922static struct ib_cq *rxe_create_cq(struct ib_device *dev,
923 const struct ib_cq_init_attr *attr,
924 struct ib_ucontext *context,
925 struct ib_udata *udata)
926{
927 int err;
928 struct rxe_dev *rxe = to_rdev(dev);
929 struct rxe_cq *cq;
930
931 if (attr->flags)
932 return ERR_PTR(-EINVAL);
933
934 err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector, udata);
935 if (err)
936 goto err1;
937
938 cq = rxe_alloc(&rxe->cq_pool);
939 if (!cq) {
940 err = -ENOMEM;
941 goto err1;
942 }
943
944 err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
945 context, udata);
946 if (err)
947 goto err2;
948
949 return &cq->ibcq;
950
951err2:
952 rxe_drop_ref(cq);
953err1:
954 return ERR_PTR(err);
955}
956
957static int rxe_destroy_cq(struct ib_cq *ibcq)
958{
959 struct rxe_cq *cq = to_rcq(ibcq);
960
961 rxe_drop_ref(cq);
962 return 0;
963}
964
965static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
966{
967 int err;
968 struct rxe_cq *cq = to_rcq(ibcq);
969 struct rxe_dev *rxe = to_rdev(ibcq->device);
970
971 err = rxe_cq_chk_attr(rxe, cq, cqe, 0, udata);
972 if (err)
973 goto err1;
974
975 err = rxe_cq_resize_queue(cq, cqe, udata);
976 if (err)
977 goto err1;
978
979 return 0;
980
981err1:
982 return err;
983}
984
985static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
986{
987 int i;
988 struct rxe_cq *cq = to_rcq(ibcq);
989 struct rxe_cqe *cqe;
990 unsigned long flags;
991
992 spin_lock_irqsave(&cq->cq_lock, flags);
993 for (i = 0; i < num_entries; i++) {
994 cqe = queue_head(cq->queue);
995 if (!cqe)
996 break;
997
998 memcpy(wc++, &cqe->ibwc, sizeof(*wc));
999 advance_consumer(cq->queue);
1000 }
1001 spin_unlock_irqrestore(&cq->cq_lock, flags);
1002
1003 return i;
1004}
1005
1006static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
1007{
1008 struct rxe_cq *cq = to_rcq(ibcq);
1009 int count = queue_count(cq->queue);
1010
1011 return (count > wc_cnt) ? wc_cnt : count;
1012}
1013
1014static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
1015{
1016 struct rxe_cq *cq = to_rcq(ibcq);
Andrew Boyeraccacb82016-11-23 12:39:22 -05001017 unsigned long irq_flags;
1018 int ret = 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001019
Andrew Boyeraccacb82016-11-23 12:39:22 -05001020 spin_lock_irqsave(&cq->cq_lock, irq_flags);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001021 if (cq->notify != IB_CQ_NEXT_COMP)
1022 cq->notify = flags & IB_CQ_SOLICITED_MASK;
1023
Andrew Boyeraccacb82016-11-23 12:39:22 -05001024 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
1025 ret = 1;
1026
1027 spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
1028
1029 return ret;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001030}
1031
1032static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
1033{
1034 struct rxe_dev *rxe = to_rdev(ibpd->device);
1035 struct rxe_pd *pd = to_rpd(ibpd);
1036 struct rxe_mem *mr;
1037 int err;
1038
1039 mr = rxe_alloc(&rxe->mr_pool);
1040 if (!mr) {
1041 err = -ENOMEM;
1042 goto err1;
1043 }
1044
1045 rxe_add_index(mr);
1046
1047 rxe_add_ref(pd);
1048
1049 err = rxe_mem_init_dma(rxe, pd, access, mr);
1050 if (err)
1051 goto err2;
1052
1053 return &mr->ibmr;
1054
1055err2:
1056 rxe_drop_ref(pd);
1057 rxe_drop_index(mr);
1058 rxe_drop_ref(mr);
1059err1:
1060 return ERR_PTR(err);
1061}
1062
1063static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
1064 u64 start,
1065 u64 length,
1066 u64 iova,
1067 int access, struct ib_udata *udata)
1068{
1069 int err;
1070 struct rxe_dev *rxe = to_rdev(ibpd->device);
1071 struct rxe_pd *pd = to_rpd(ibpd);
1072 struct rxe_mem *mr;
1073
1074 mr = rxe_alloc(&rxe->mr_pool);
1075 if (!mr) {
1076 err = -ENOMEM;
1077 goto err2;
1078 }
1079
1080 rxe_add_index(mr);
1081
1082 rxe_add_ref(pd);
1083
1084 err = rxe_mem_init_user(rxe, pd, start, length, iova,
1085 access, udata, mr);
1086 if (err)
1087 goto err3;
1088
1089 return &mr->ibmr;
1090
1091err3:
1092 rxe_drop_ref(pd);
1093 rxe_drop_index(mr);
1094 rxe_drop_ref(mr);
1095err2:
1096 return ERR_PTR(err);
1097}
1098
1099static int rxe_dereg_mr(struct ib_mr *ibmr)
1100{
1101 struct rxe_mem *mr = to_rmr(ibmr);
1102
1103 mr->state = RXE_MEM_STATE_ZOMBIE;
1104 rxe_drop_ref(mr->pd);
1105 rxe_drop_index(mr);
1106 rxe_drop_ref(mr);
1107 return 0;
1108}
1109
1110static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
1111 enum ib_mr_type mr_type,
1112 u32 max_num_sg)
1113{
1114 struct rxe_dev *rxe = to_rdev(ibpd->device);
1115 struct rxe_pd *pd = to_rpd(ibpd);
1116 struct rxe_mem *mr;
1117 int err;
1118
1119 if (mr_type != IB_MR_TYPE_MEM_REG)
1120 return ERR_PTR(-EINVAL);
1121
1122 mr = rxe_alloc(&rxe->mr_pool);
1123 if (!mr) {
1124 err = -ENOMEM;
1125 goto err1;
1126 }
1127
1128 rxe_add_index(mr);
1129
1130 rxe_add_ref(pd);
1131
1132 err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr);
1133 if (err)
1134 goto err2;
1135
1136 return &mr->ibmr;
1137
1138err2:
1139 rxe_drop_ref(pd);
1140 rxe_drop_index(mr);
1141 rxe_drop_ref(mr);
1142err1:
1143 return ERR_PTR(err);
1144}
1145
1146static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
1147{
1148 struct rxe_mem *mr = to_rmr(ibmr);
1149 struct rxe_map *map;
1150 struct rxe_phys_buf *buf;
1151
1152 if (unlikely(mr->nbuf == mr->num_buf))
1153 return -ENOMEM;
1154
1155 map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
1156 buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
1157
1158 buf->addr = addr;
1159 buf->size = ibmr->page_size;
1160 mr->nbuf++;
1161
1162 return 0;
1163}
1164
Parav Pandite404f942016-09-28 20:26:26 +00001165static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1166 int sg_nents, unsigned int *sg_offset)
Moni Shoua8700e3e2016-06-16 16:45:23 +03001167{
1168 struct rxe_mem *mr = to_rmr(ibmr);
1169 int n;
1170
1171 mr->nbuf = 0;
1172
1173 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
1174
1175 mr->va = ibmr->iova;
1176 mr->iova = ibmr->iova;
1177 mr->length = ibmr->length;
1178 mr->page_shift = ilog2(ibmr->page_size);
1179 mr->page_mask = ibmr->page_size - 1;
1180 mr->offset = mr->iova & mr->page_mask;
1181
1182 return n;
1183}
1184
1185static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1186{
1187 int err;
1188 struct rxe_dev *rxe = to_rdev(ibqp->device);
1189 struct rxe_qp *qp = to_rqp(ibqp);
1190 struct rxe_mc_grp *grp;
1191
1192 /* takes a ref on grp if successful */
1193 err = rxe_mcast_get_grp(rxe, mgid, &grp);
1194 if (err)
1195 return err;
1196
1197 err = rxe_mcast_add_grp_elem(rxe, qp, grp);
1198
1199 rxe_drop_ref(grp);
1200 return err;
1201}
1202
1203static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1204{
1205 struct rxe_dev *rxe = to_rdev(ibqp->device);
1206 struct rxe_qp *qp = to_rqp(ibqp);
1207
1208 return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
1209}
1210
1211static ssize_t rxe_show_parent(struct device *device,
1212 struct device_attribute *attr, char *buf)
1213{
1214 struct rxe_dev *rxe = container_of(device, struct rxe_dev,
1215 ib_dev.dev);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001216
Bart Van Assche839f5ac2017-01-10 11:15:53 -08001217 return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
Moni Shoua8700e3e2016-06-16 16:45:23 +03001218}
1219
1220static DEVICE_ATTR(parent, S_IRUGO, rxe_show_parent, NULL);
1221
1222static struct device_attribute *rxe_dev_attributes[] = {
1223 &dev_attr_parent,
1224};
1225
1226int rxe_register_device(struct rxe_dev *rxe)
1227{
1228 int err;
1229 int i;
1230 struct ib_device *dev = &rxe->ib_dev;
1231
1232 strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
1233 strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1234
1235 dev->owner = THIS_MODULE;
1236 dev->node_type = RDMA_NODE_IB_CA;
1237 dev->phys_port_cnt = 1;
1238 dev->num_comp_vectors = RXE_NUM_COMP_VECTORS;
Bart Van Assche85e9f1d2017-01-20 13:04:29 -08001239 dev->dev.parent = rxe_dma_device(rxe);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001240 dev->local_dma_lkey = 0;
Bart Van Assche839f5ac2017-01-10 11:15:53 -08001241 dev->node_guid = rxe_node_guid(rxe);
Bart Van Assche0bbb3b72017-01-20 13:04:37 -08001242 dev->dev.dma_ops = &dma_virt_ops;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001243
1244 dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
1245 dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
1246 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
1247 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
1248 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
1249 | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
1250 | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
1251 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
1252 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
1253 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
1254 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
1255 | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
1256 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
1257 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
1258 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
1259 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
1260 | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
1261 | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
1262 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
1263 | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
1264 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
1265 | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
1266 | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
1267 | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
1268 | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
1269 | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
1270 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
1271 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
1272 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
1273 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
1274 | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
1275 | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
1276 ;
1277
1278 dev->query_device = rxe_query_device;
1279 dev->modify_device = rxe_modify_device;
1280 dev->query_port = rxe_query_port;
1281 dev->modify_port = rxe_modify_port;
1282 dev->get_link_layer = rxe_get_link_layer;
1283 dev->query_gid = rxe_query_gid;
1284 dev->get_netdev = rxe_get_netdev;
1285 dev->add_gid = rxe_add_gid;
1286 dev->del_gid = rxe_del_gid;
1287 dev->query_pkey = rxe_query_pkey;
1288 dev->alloc_ucontext = rxe_alloc_ucontext;
1289 dev->dealloc_ucontext = rxe_dealloc_ucontext;
1290 dev->mmap = rxe_mmap;
1291 dev->get_port_immutable = rxe_port_immutable;
1292 dev->alloc_pd = rxe_alloc_pd;
1293 dev->dealloc_pd = rxe_dealloc_pd;
1294 dev->create_ah = rxe_create_ah;
1295 dev->modify_ah = rxe_modify_ah;
1296 dev->query_ah = rxe_query_ah;
1297 dev->destroy_ah = rxe_destroy_ah;
1298 dev->create_srq = rxe_create_srq;
1299 dev->modify_srq = rxe_modify_srq;
1300 dev->query_srq = rxe_query_srq;
1301 dev->destroy_srq = rxe_destroy_srq;
1302 dev->post_srq_recv = rxe_post_srq_recv;
1303 dev->create_qp = rxe_create_qp;
1304 dev->modify_qp = rxe_modify_qp;
1305 dev->query_qp = rxe_query_qp;
1306 dev->destroy_qp = rxe_destroy_qp;
1307 dev->post_send = rxe_post_send;
1308 dev->post_recv = rxe_post_recv;
1309 dev->create_cq = rxe_create_cq;
1310 dev->destroy_cq = rxe_destroy_cq;
1311 dev->resize_cq = rxe_resize_cq;
1312 dev->poll_cq = rxe_poll_cq;
1313 dev->peek_cq = rxe_peek_cq;
1314 dev->req_notify_cq = rxe_req_notify_cq;
1315 dev->get_dma_mr = rxe_get_dma_mr;
1316 dev->reg_user_mr = rxe_reg_user_mr;
1317 dev->dereg_mr = rxe_dereg_mr;
1318 dev->alloc_mr = rxe_alloc_mr;
1319 dev->map_mr_sg = rxe_map_mr_sg;
1320 dev->attach_mcast = rxe_attach_mcast;
1321 dev->detach_mcast = rxe_detach_mcast;
Yonatan Cohen0b1e5b92017-03-10 18:23:56 +02001322 dev->get_hw_stats = rxe_ib_get_hw_stats;
1323 dev->alloc_hw_stats = rxe_ib_alloc_hw_stats;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001324
yonatanccee26882017-04-20 20:55:55 +03001325 rxe->tfm = crypto_alloc_shash("crc32", 0, 0);
1326 if (IS_ERR(rxe->tfm)) {
1327 pr_err("failed to allocate crc algorithmi err:%ld",
1328 PTR_ERR(rxe->tfm));
1329 return PTR_ERR(rxe->tfm);
1330 }
1331
Moni Shoua8700e3e2016-06-16 16:45:23 +03001332 err = ib_register_device(dev, NULL);
1333 if (err) {
1334 pr_warn("rxe_register_device failed, err = %d\n", err);
1335 goto err1;
1336 }
1337
1338 for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
1339 err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
1340 if (err) {
1341 pr_warn("device_create_file failed, i = %d, err = %d\n",
1342 i, err);
1343 goto err2;
1344 }
1345 }
1346
1347 return 0;
1348
1349err2:
1350 ib_unregister_device(dev);
1351err1:
yonatanccee26882017-04-20 20:55:55 +03001352 crypto_free_shash(rxe->tfm);
1353
Moni Shoua8700e3e2016-06-16 16:45:23 +03001354 return err;
1355}
1356
1357int rxe_unregister_device(struct rxe_dev *rxe)
1358{
1359 int i;
1360 struct ib_device *dev = &rxe->ib_dev;
1361
1362 for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i)
1363 device_remove_file(&dev->dev, rxe_dev_attributes[i]);
1364
1365 ib_unregister_device(dev);
1366
1367 return 0;
1368}