blob: e15344148173e0bb41ac0fe4457aa7883abe9fbf [file] [log] [blame]
Moni Shoua8700e3e2016-06-16 16:45:23 +03001/*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
Bart Van Assche0bbb3b72017-01-20 13:04:37 -080034#include <linux/dma-mapping.h>
Yuval Shaia4d6f2852017-03-14 16:01:57 +020035#include <net/addrconf.h>
Moni Shoua8700e3e2016-06-16 16:45:23 +030036#include "rxe.h"
37#include "rxe_loc.h"
38#include "rxe_queue.h"
Yonatan Cohen0b1e5b92017-03-10 18:23:56 +020039#include "rxe_hw_counters.h"
Moni Shoua8700e3e2016-06-16 16:45:23 +030040
41static int rxe_query_device(struct ib_device *dev,
42 struct ib_device_attr *attr,
43 struct ib_udata *uhw)
44{
45 struct rxe_dev *rxe = to_rdev(dev);
46
47 if (uhw->inlen || uhw->outlen)
48 return -EINVAL;
49
50 *attr = rxe->attr;
51 return 0;
52}
53
Moni Shoua8700e3e2016-06-16 16:45:23 +030054static int rxe_query_port(struct ib_device *dev,
55 u8 port_num, struct ib_port_attr *attr)
56{
57 struct rxe_dev *rxe = to_rdev(dev);
58 struct rxe_port *port;
Yuval Shaiad4186192017-06-14 23:13:34 +030059 int rc = -EINVAL;
Moni Shoua8700e3e2016-06-16 16:45:23 +030060
61 if (unlikely(port_num != 1)) {
62 pr_warn("invalid port_number %d\n", port_num);
Yuval Shaiad4186192017-06-14 23:13:34 +030063 goto out;
Moni Shoua8700e3e2016-06-16 16:45:23 +030064 }
65
66 port = &rxe->port;
67
Or Gerlitzc4550c62017-01-24 13:02:39 +020068 /* *attr being zeroed by the caller, avoid zeroing it here */
Moni Shoua8700e3e2016-06-16 16:45:23 +030069 *attr = port->attr;
70
71 mutex_lock(&rxe->usdev_lock);
Yuval Shaiad4186192017-06-14 23:13:34 +030072 rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
73 &attr->active_width);
Moni Shoua8700e3e2016-06-16 16:45:23 +030074 mutex_unlock(&rxe->usdev_lock);
75
Yuval Shaiad4186192017-06-14 23:13:34 +030076out:
77 return rc;
Moni Shoua8700e3e2016-06-16 16:45:23 +030078}
79
80static int rxe_query_gid(struct ib_device *device,
81 u8 port_num, int index, union ib_gid *gid)
82{
83 int ret;
84
85 if (index > RXE_PORT_GID_TBL_LEN)
86 return -EINVAL;
87
88 ret = ib_get_cached_gid(device, port_num, index, gid, NULL);
89 if (ret == -EAGAIN) {
90 memcpy(gid, &zgid, sizeof(*gid));
91 return 0;
92 }
93
94 return ret;
95}
96
97static int rxe_add_gid(struct ib_device *device, u8 port_num, unsigned int
98 index, const union ib_gid *gid,
99 const struct ib_gid_attr *attr, void **context)
100{
101 if (index >= RXE_PORT_GID_TBL_LEN)
102 return -EINVAL;
103 return 0;
104}
105
106static int rxe_del_gid(struct ib_device *device, u8 port_num, unsigned int
107 index, void **context)
108{
109 if (index >= RXE_PORT_GID_TBL_LEN)
110 return -EINVAL;
111 return 0;
112}
113
114static struct net_device *rxe_get_netdev(struct ib_device *device,
115 u8 port_num)
116{
117 struct rxe_dev *rxe = to_rdev(device);
118
119 if (rxe->ndev) {
120 dev_hold(rxe->ndev);
121 return rxe->ndev;
122 }
123
124 return NULL;
125}
126
127static int rxe_query_pkey(struct ib_device *device,
128 u8 port_num, u16 index, u16 *pkey)
129{
130 struct rxe_dev *rxe = to_rdev(device);
131 struct rxe_port *port;
132
133 if (unlikely(port_num != 1)) {
Bart Van Assche85e9f1d2017-01-20 13:04:29 -0800134 dev_warn(device->dev.parent, "invalid port_num = %d\n",
Moni Shoua8700e3e2016-06-16 16:45:23 +0300135 port_num);
136 goto err1;
137 }
138
139 port = &rxe->port;
140
141 if (unlikely(index >= port->attr.pkey_tbl_len)) {
Bart Van Assche85e9f1d2017-01-20 13:04:29 -0800142 dev_warn(device->dev.parent, "invalid index = %d\n",
Moni Shoua8700e3e2016-06-16 16:45:23 +0300143 index);
144 goto err1;
145 }
146
147 *pkey = port->pkey_tbl[index];
148 return 0;
149
150err1:
151 return -EINVAL;
152}
153
154static int rxe_modify_device(struct ib_device *dev,
155 int mask, struct ib_device_modify *attr)
156{
157 struct rxe_dev *rxe = to_rdev(dev);
158
159 if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
160 rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
161
162 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
163 memcpy(rxe->ib_dev.node_desc,
164 attr->node_desc, sizeof(rxe->ib_dev.node_desc));
165 }
166
167 return 0;
168}
169
170static int rxe_modify_port(struct ib_device *dev,
171 u8 port_num, int mask, struct ib_port_modify *attr)
172{
173 struct rxe_dev *rxe = to_rdev(dev);
174 struct rxe_port *port;
175
176 if (unlikely(port_num != 1)) {
177 pr_warn("invalid port_num = %d\n", port_num);
178 goto err1;
179 }
180
181 port = &rxe->port;
182
183 port->attr.port_cap_flags |= attr->set_port_cap_mask;
184 port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
185
186 if (mask & IB_PORT_RESET_QKEY_CNTR)
187 port->attr.qkey_viol_cntr = 0;
188
189 return 0;
190
191err1:
192 return -EINVAL;
193}
194
195static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
196 u8 port_num)
197{
198 struct rxe_dev *rxe = to_rdev(dev);
199
Bart Van Assche839f5ac2017-01-10 11:15:53 -0800200 return rxe_link_layer(rxe, port_num);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300201}
202
203static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
204 struct ib_udata *udata)
205{
206 struct rxe_dev *rxe = to_rdev(dev);
207 struct rxe_ucontext *uc;
208
209 uc = rxe_alloc(&rxe->uc_pool);
210 return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
211}
212
213static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
214{
215 struct rxe_ucontext *uc = to_ruc(ibuc);
216
217 rxe_drop_ref(uc);
218 return 0;
219}
220
221static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
222 struct ib_port_immutable *immutable)
223{
224 int err;
225 struct ib_port_attr attr;
226
Or Gerlitzc4550c62017-01-24 13:02:39 +0200227 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
228
229 err = ib_query_port(dev, port_num, &attr);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300230 if (err)
231 return err;
232
233 immutable->pkey_tbl_len = attr.pkey_tbl_len;
234 immutable->gid_tbl_len = attr.gid_tbl_len;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300235 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
236
237 return 0;
238}
239
240static struct ib_pd *rxe_alloc_pd(struct ib_device *dev,
241 struct ib_ucontext *context,
242 struct ib_udata *udata)
243{
244 struct rxe_dev *rxe = to_rdev(dev);
245 struct rxe_pd *pd;
246
247 pd = rxe_alloc(&rxe->pd_pool);
248 return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
249}
250
251static int rxe_dealloc_pd(struct ib_pd *ibpd)
252{
253 struct rxe_pd *pd = to_rpd(ibpd);
254
255 rxe_drop_ref(pd);
256 return 0;
257}
258
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400259static int rxe_init_av(struct rxe_dev *rxe, struct rdma_ah_attr *attr,
Moni Shoua8700e3e2016-06-16 16:45:23 +0300260 struct rxe_av *av)
261{
262 int err;
263 union ib_gid sgid;
264 struct ib_gid_attr sgid_attr;
265
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400266 err = ib_get_cached_gid(&rxe->ib_dev, rdma_ah_get_port_num(attr),
267 rdma_ah_read_grh(attr)->sgid_index, &sgid,
Moni Shoua8700e3e2016-06-16 16:45:23 +0300268 &sgid_attr);
269 if (err) {
270 pr_err("Failed to query sgid. err = %d\n", err);
271 return err;
272 }
273
Zhu Yanjunca3d9fe2018-01-31 06:06:55 -0500274 rxe_av_from_attr(rdma_ah_get_port_num(attr), av, attr);
275 err = rxe_av_fill_ip_info(rxe, av, attr, &sgid_attr, &sgid);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300276
277 if (sgid_attr.ndev)
278 dev_put(sgid_attr.ndev);
279 return err;
280}
281
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400282static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd,
283 struct rdma_ah_attr *attr,
Moni Shoua477864c2016-11-23 08:23:24 +0200284 struct ib_udata *udata)
285
Moni Shoua8700e3e2016-06-16 16:45:23 +0300286{
287 int err;
288 struct rxe_dev *rxe = to_rdev(ibpd->device);
289 struct rxe_pd *pd = to_rpd(ibpd);
290 struct rxe_ah *ah;
291
292 err = rxe_av_chk_attr(rxe, attr);
293 if (err)
294 goto err1;
295
296 ah = rxe_alloc(&rxe->ah_pool);
297 if (!ah) {
298 err = -ENOMEM;
299 goto err1;
300 }
301
302 rxe_add_ref(pd);
303 ah->pd = pd;
304
305 err = rxe_init_av(rxe, attr, &ah->av);
306 if (err)
307 goto err2;
308
309 return &ah->ibah;
310
311err2:
312 rxe_drop_ref(pd);
313 rxe_drop_ref(ah);
314err1:
315 return ERR_PTR(err);
316}
317
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400318static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300319{
320 int err;
321 struct rxe_dev *rxe = to_rdev(ibah->device);
322 struct rxe_ah *ah = to_rah(ibah);
323
324 err = rxe_av_chk_attr(rxe, attr);
325 if (err)
326 return err;
327
328 err = rxe_init_av(rxe, attr, &ah->av);
329 if (err)
330 return err;
331
332 return 0;
333}
334
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400335static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300336{
337 struct rxe_dev *rxe = to_rdev(ibah->device);
338 struct rxe_ah *ah = to_rah(ibah);
339
Dasaratharaman Chandramoulieca7ddf2017-04-29 14:41:17 -0400340 memset(attr, 0, sizeof(*attr));
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400341 attr->type = ibah->type;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300342 rxe_av_to_attr(rxe, &ah->av, attr);
343 return 0;
344}
345
346static int rxe_destroy_ah(struct ib_ah *ibah)
347{
348 struct rxe_ah *ah = to_rah(ibah);
349
350 rxe_drop_ref(ah->pd);
351 rxe_drop_ref(ah);
352 return 0;
353}
354
355static int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr)
356{
357 int err;
358 int i;
359 u32 length;
360 struct rxe_recv_wqe *recv_wqe;
361 int num_sge = ibwr->num_sge;
362
363 if (unlikely(queue_full(rq->queue))) {
364 err = -ENOMEM;
365 goto err1;
366 }
367
368 if (unlikely(num_sge > rq->max_sge)) {
369 err = -EINVAL;
370 goto err1;
371 }
372
373 length = 0;
374 for (i = 0; i < num_sge; i++)
375 length += ibwr->sg_list[i].length;
376
377 recv_wqe = producer_addr(rq->queue);
378 recv_wqe->wr_id = ibwr->wr_id;
379 recv_wqe->num_sge = num_sge;
380
381 memcpy(recv_wqe->dma.sge, ibwr->sg_list,
382 num_sge * sizeof(struct ib_sge));
383
384 recv_wqe->dma.length = length;
385 recv_wqe->dma.resid = length;
386 recv_wqe->dma.num_sge = num_sge;
387 recv_wqe->dma.cur_sge = 0;
388 recv_wqe->dma.sge_offset = 0;
389
390 /* make sure all changes to the work queue are written before we
391 * update the producer pointer
392 */
393 smp_wmb();
394
395 advance_producer(rq->queue);
396 return 0;
397
398err1:
399 return err;
400}
401
402static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
403 struct ib_srq_init_attr *init,
404 struct ib_udata *udata)
405{
406 int err;
407 struct rxe_dev *rxe = to_rdev(ibpd->device);
408 struct rxe_pd *pd = to_rpd(ibpd);
409 struct rxe_srq *srq;
410 struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
411
412 err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
413 if (err)
414 goto err1;
415
416 srq = rxe_alloc(&rxe->srq_pool);
417 if (!srq) {
418 err = -ENOMEM;
419 goto err1;
420 }
421
422 rxe_add_index(srq);
423 rxe_add_ref(pd);
424 srq->pd = pd;
425
426 err = rxe_srq_from_init(rxe, srq, init, context, udata);
427 if (err)
428 goto err2;
429
430 return &srq->ibsrq;
431
432err2:
433 rxe_drop_ref(pd);
434 rxe_drop_index(srq);
435 rxe_drop_ref(srq);
436err1:
437 return ERR_PTR(err);
438}
439
440static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
441 enum ib_srq_attr_mask mask,
442 struct ib_udata *udata)
443{
444 int err;
445 struct rxe_srq *srq = to_rsrq(ibsrq);
446 struct rxe_dev *rxe = to_rdev(ibsrq->device);
447
448 err = rxe_srq_chk_attr(rxe, srq, attr, mask);
449 if (err)
450 goto err1;
451
452 err = rxe_srq_from_attr(rxe, srq, attr, mask, udata);
453 if (err)
454 goto err1;
455
456 return 0;
457
458err1:
459 return err;
460}
461
462static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
463{
464 struct rxe_srq *srq = to_rsrq(ibsrq);
465
466 if (srq->error)
467 return -EINVAL;
468
469 attr->max_wr = srq->rq.queue->buf->index_mask;
470 attr->max_sge = srq->rq.max_sge;
471 attr->srq_limit = srq->limit;
472 return 0;
473}
474
475static int rxe_destroy_srq(struct ib_srq *ibsrq)
476{
477 struct rxe_srq *srq = to_rsrq(ibsrq);
478
479 if (srq->rq.queue)
480 rxe_queue_cleanup(srq->rq.queue);
481
482 rxe_drop_ref(srq->pd);
483 rxe_drop_index(srq);
484 rxe_drop_ref(srq);
485
486 return 0;
487}
488
489static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
490 struct ib_recv_wr **bad_wr)
491{
492 int err = 0;
493 unsigned long flags;
494 struct rxe_srq *srq = to_rsrq(ibsrq);
495
496 spin_lock_irqsave(&srq->rq.producer_lock, flags);
497
498 while (wr) {
499 err = post_one_recv(&srq->rq, wr);
500 if (unlikely(err))
501 break;
502 wr = wr->next;
503 }
504
505 spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
506
507 if (err)
508 *bad_wr = wr;
509
510 return err;
511}
512
513static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
514 struct ib_qp_init_attr *init,
515 struct ib_udata *udata)
516{
517 int err;
518 struct rxe_dev *rxe = to_rdev(ibpd->device);
519 struct rxe_pd *pd = to_rpd(ibpd);
520 struct rxe_qp *qp;
521
522 err = rxe_qp_chk_init(rxe, init);
523 if (err)
524 goto err1;
525
526 qp = rxe_alloc(&rxe->qp_pool);
527 if (!qp) {
528 err = -ENOMEM;
529 goto err1;
530 }
531
532 if (udata) {
533 if (udata->inlen) {
534 err = -EINVAL;
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500535 goto err2;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300536 }
537 qp->is_user = 1;
538 }
539
540 rxe_add_index(qp);
541
542 err = rxe_qp_from_init(rxe, qp, pd, init, udata, ibpd);
543 if (err)
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500544 goto err3;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300545
546 return &qp->ibqp;
547
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500548err3:
Moni Shoua8700e3e2016-06-16 16:45:23 +0300549 rxe_drop_index(qp);
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500550err2:
Moni Shoua8700e3e2016-06-16 16:45:23 +0300551 rxe_drop_ref(qp);
552err1:
553 return ERR_PTR(err);
554}
555
556static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
557 int mask, struct ib_udata *udata)
558{
559 int err;
560 struct rxe_dev *rxe = to_rdev(ibqp->device);
561 struct rxe_qp *qp = to_rqp(ibqp);
562
563 err = rxe_qp_chk_attr(rxe, qp, attr, mask);
564 if (err)
565 goto err1;
566
567 err = rxe_qp_from_attr(qp, attr, mask, udata);
568 if (err)
569 goto err1;
570
571 return 0;
572
573err1:
574 return err;
575}
576
577static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
578 int mask, struct ib_qp_init_attr *init)
579{
580 struct rxe_qp *qp = to_rqp(ibqp);
581
582 rxe_qp_to_init(qp, init);
583 rxe_qp_to_attr(qp, attr, mask);
584
585 return 0;
586}
587
588static int rxe_destroy_qp(struct ib_qp *ibqp)
589{
590 struct rxe_qp *qp = to_rqp(ibqp);
591
592 rxe_qp_destroy(qp);
593 rxe_drop_index(qp);
594 rxe_drop_ref(qp);
595 return 0;
596}
597
598static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr,
599 unsigned int mask, unsigned int length)
600{
601 int num_sge = ibwr->num_sge;
602 struct rxe_sq *sq = &qp->sq;
603
604 if (unlikely(num_sge > sq->max_sge))
605 goto err1;
606
607 if (unlikely(mask & WR_ATOMIC_MASK)) {
608 if (length < 8)
609 goto err1;
610
611 if (atomic_wr(ibwr)->remote_addr & 0x7)
612 goto err1;
613 }
614
615 if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
616 (length > sq->max_inline)))
617 goto err1;
618
619 return 0;
620
621err1:
622 return -EINVAL;
623}
624
625static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
626 struct ib_send_wr *ibwr)
627{
628 wr->wr_id = ibwr->wr_id;
629 wr->num_sge = ibwr->num_sge;
630 wr->opcode = ibwr->opcode;
631 wr->send_flags = ibwr->send_flags;
632
633 if (qp_type(qp) == IB_QPT_UD ||
634 qp_type(qp) == IB_QPT_SMI ||
635 qp_type(qp) == IB_QPT_GSI) {
636 wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
637 wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
638 if (qp_type(qp) == IB_QPT_GSI)
639 wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
640 if (wr->opcode == IB_WR_SEND_WITH_IMM)
641 wr->ex.imm_data = ibwr->ex.imm_data;
642 } else {
643 switch (wr->opcode) {
644 case IB_WR_RDMA_WRITE_WITH_IMM:
645 wr->ex.imm_data = ibwr->ex.imm_data;
Bart Van Asscheea6ee932017-10-11 10:49:24 -0700646 /* fall through */
Moni Shoua8700e3e2016-06-16 16:45:23 +0300647 case IB_WR_RDMA_READ:
648 case IB_WR_RDMA_WRITE:
649 wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
650 wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
651 break;
652 case IB_WR_SEND_WITH_IMM:
653 wr->ex.imm_data = ibwr->ex.imm_data;
654 break;
655 case IB_WR_SEND_WITH_INV:
656 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
657 break;
658 case IB_WR_ATOMIC_CMP_AND_SWP:
659 case IB_WR_ATOMIC_FETCH_AND_ADD:
660 wr->wr.atomic.remote_addr =
661 atomic_wr(ibwr)->remote_addr;
662 wr->wr.atomic.compare_add =
663 atomic_wr(ibwr)->compare_add;
664 wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
665 wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
666 break;
667 case IB_WR_LOCAL_INV:
668 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
669 break;
670 case IB_WR_REG_MR:
671 wr->wr.reg.mr = reg_wr(ibwr)->mr;
672 wr->wr.reg.key = reg_wr(ibwr)->key;
673 wr->wr.reg.access = reg_wr(ibwr)->access;
674 break;
675 default:
676 break;
677 }
678 }
679}
680
681static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
682 unsigned int mask, unsigned int length,
683 struct rxe_send_wqe *wqe)
684{
685 int num_sge = ibwr->num_sge;
686 struct ib_sge *sge;
687 int i;
688 u8 *p;
689
690 init_send_wr(qp, &wqe->wr, ibwr);
691
692 if (qp_type(qp) == IB_QPT_UD ||
693 qp_type(qp) == IB_QPT_SMI ||
694 qp_type(qp) == IB_QPT_GSI)
695 memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
696
697 if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
698 p = wqe->dma.inline_data;
699
700 sge = ibwr->sg_list;
701 for (i = 0; i < num_sge; i++, sge++) {
Jia-Ju Bai07d432b2017-06-05 20:23:40 +0800702 memcpy(p, (void *)(uintptr_t)sge->addr,
703 sge->length);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300704
705 p += sge->length;
706 }
707 } else if (mask & WR_REG_MASK) {
708 wqe->mask = mask;
709 wqe->state = wqe_state_posted;
710 return 0;
711 } else
712 memcpy(wqe->dma.sge, ibwr->sg_list,
713 num_sge * sizeof(struct ib_sge));
714
715 wqe->iova = (mask & WR_ATOMIC_MASK) ?
716 atomic_wr(ibwr)->remote_addr :
717 rdma_wr(ibwr)->remote_addr;
718 wqe->mask = mask;
719 wqe->dma.length = length;
720 wqe->dma.resid = length;
721 wqe->dma.num_sge = num_sge;
722 wqe->dma.cur_sge = 0;
723 wqe->dma.sge_offset = 0;
724 wqe->state = wqe_state_posted;
725 wqe->ssn = atomic_add_return(1, &qp->ssn);
726
727 return 0;
728}
729
730static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
Parav Pandite404f942016-09-28 20:26:26 +0000731 unsigned int mask, u32 length)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300732{
733 int err;
734 struct rxe_sq *sq = &qp->sq;
735 struct rxe_send_wqe *send_wqe;
736 unsigned long flags;
737
738 err = validate_send_wr(qp, ibwr, mask, length);
739 if (err)
740 return err;
741
742 spin_lock_irqsave(&qp->sq.sq_lock, flags);
743
744 if (unlikely(queue_full(sq->queue))) {
745 err = -ENOMEM;
746 goto err1;
747 }
748
749 send_wqe = producer_addr(sq->queue);
750
751 err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
752 if (unlikely(err))
753 goto err1;
754
755 /*
756 * make sure all changes to the work queue are
757 * written before we update the producer pointer
758 */
759 smp_wmb();
760
761 advance_producer(sq->queue);
762 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
763
764 return 0;
765
766err1:
767 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
768 return err;
769}
770
Parav Pandit063af592016-09-28 20:24:12 +0000771static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
772 struct ib_send_wr **bad_wr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300773{
774 int err = 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300775 unsigned int mask;
776 unsigned int length = 0;
777 int i;
778 int must_sched;
779
Moni Shoua8700e3e2016-06-16 16:45:23 +0300780 while (wr) {
781 mask = wr_opcode_mask(wr->opcode, qp);
782 if (unlikely(!mask)) {
783 err = -EINVAL;
784 *bad_wr = wr;
785 break;
786 }
787
788 if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
789 !(mask & WR_INLINE_MASK))) {
790 err = -EINVAL;
791 *bad_wr = wr;
792 break;
793 }
794
795 length = 0;
796 for (i = 0; i < wr->num_sge; i++)
797 length += wr->sg_list[i].length;
798
799 err = post_one_send(qp, wr, mask, length);
800
801 if (err) {
802 *bad_wr = wr;
803 break;
804 }
805 wr = wr->next;
806 }
807
808 /*
809 * Must sched in case of GSI QP because ib_send_mad() hold irq lock,
810 * and the requester call ip_local_out_sk() that takes spin_lock_bh.
811 */
812 must_sched = (qp_type(qp) == IB_QPT_GSI) ||
813 (queue_count(qp->sq.queue) > 1);
814
815 rxe_run_task(&qp->req.task, must_sched);
Bart Van Assche6f301e02018-01-09 11:23:40 -0800816 if (unlikely(qp->req.state == QP_STATE_ERROR))
817 rxe_run_task(&qp->comp.task, 1);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300818
819 return err;
820}
821
Parav Pandit063af592016-09-28 20:24:12 +0000822static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
823 struct ib_send_wr **bad_wr)
824{
825 struct rxe_qp *qp = to_rqp(ibqp);
826
827 if (unlikely(!qp->valid)) {
828 *bad_wr = wr;
829 return -EINVAL;
830 }
831
832 if (unlikely(qp->req.state < QP_STATE_READY)) {
833 *bad_wr = wr;
834 return -EINVAL;
835 }
836
837 if (qp->is_user) {
838 /* Utilize process context to do protocol processing */
839 rxe_run_task(&qp->req.task, 0);
840 return 0;
841 } else
842 return rxe_post_send_kernel(qp, wr, bad_wr);
843}
844
Moni Shoua8700e3e2016-06-16 16:45:23 +0300845static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
846 struct ib_recv_wr **bad_wr)
847{
848 int err = 0;
849 struct rxe_qp *qp = to_rqp(ibqp);
850 struct rxe_rq *rq = &qp->rq;
851 unsigned long flags;
852
853 if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
854 *bad_wr = wr;
855 err = -EINVAL;
856 goto err1;
857 }
858
859 if (unlikely(qp->srq)) {
860 *bad_wr = wr;
861 err = -EINVAL;
862 goto err1;
863 }
864
865 spin_lock_irqsave(&rq->producer_lock, flags);
866
867 while (wr) {
868 err = post_one_recv(rq, wr);
869 if (unlikely(err)) {
870 *bad_wr = wr;
871 break;
872 }
873 wr = wr->next;
874 }
875
876 spin_unlock_irqrestore(&rq->producer_lock, flags);
877
Vijay Immanuel12171972017-06-27 12:19:38 +0300878 if (qp->resp.state == QP_STATE_ERROR)
879 rxe_run_task(&qp->resp.task, 1);
880
Moni Shoua8700e3e2016-06-16 16:45:23 +0300881err1:
882 return err;
883}
884
885static struct ib_cq *rxe_create_cq(struct ib_device *dev,
886 const struct ib_cq_init_attr *attr,
887 struct ib_ucontext *context,
888 struct ib_udata *udata)
889{
890 int err;
891 struct rxe_dev *rxe = to_rdev(dev);
892 struct rxe_cq *cq;
893
894 if (attr->flags)
895 return ERR_PTR(-EINVAL);
896
897 err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector, udata);
898 if (err)
899 goto err1;
900
901 cq = rxe_alloc(&rxe->cq_pool);
902 if (!cq) {
903 err = -ENOMEM;
904 goto err1;
905 }
906
907 err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
908 context, udata);
909 if (err)
910 goto err2;
911
912 return &cq->ibcq;
913
914err2:
915 rxe_drop_ref(cq);
916err1:
917 return ERR_PTR(err);
918}
919
920static int rxe_destroy_cq(struct ib_cq *ibcq)
921{
922 struct rxe_cq *cq = to_rcq(ibcq);
923
Andrew Boyerbfc3ae02017-08-28 16:11:50 -0400924 rxe_cq_disable(cq);
925
Moni Shoua8700e3e2016-06-16 16:45:23 +0300926 rxe_drop_ref(cq);
927 return 0;
928}
929
930static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
931{
932 int err;
933 struct rxe_cq *cq = to_rcq(ibcq);
934 struct rxe_dev *rxe = to_rdev(ibcq->device);
935
936 err = rxe_cq_chk_attr(rxe, cq, cqe, 0, udata);
937 if (err)
938 goto err1;
939
940 err = rxe_cq_resize_queue(cq, cqe, udata);
941 if (err)
942 goto err1;
943
944 return 0;
945
946err1:
947 return err;
948}
949
950static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
951{
952 int i;
953 struct rxe_cq *cq = to_rcq(ibcq);
954 struct rxe_cqe *cqe;
955 unsigned long flags;
956
957 spin_lock_irqsave(&cq->cq_lock, flags);
958 for (i = 0; i < num_entries; i++) {
959 cqe = queue_head(cq->queue);
960 if (!cqe)
961 break;
962
963 memcpy(wc++, &cqe->ibwc, sizeof(*wc));
964 advance_consumer(cq->queue);
965 }
966 spin_unlock_irqrestore(&cq->cq_lock, flags);
967
968 return i;
969}
970
971static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
972{
973 struct rxe_cq *cq = to_rcq(ibcq);
974 int count = queue_count(cq->queue);
975
976 return (count > wc_cnt) ? wc_cnt : count;
977}
978
979static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
980{
981 struct rxe_cq *cq = to_rcq(ibcq);
Andrew Boyeraccacb82016-11-23 12:39:22 -0500982 unsigned long irq_flags;
983 int ret = 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300984
Andrew Boyeraccacb82016-11-23 12:39:22 -0500985 spin_lock_irqsave(&cq->cq_lock, irq_flags);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300986 if (cq->notify != IB_CQ_NEXT_COMP)
987 cq->notify = flags & IB_CQ_SOLICITED_MASK;
988
Andrew Boyeraccacb82016-11-23 12:39:22 -0500989 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
990 ret = 1;
991
992 spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
993
994 return ret;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300995}
996
997static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
998{
999 struct rxe_dev *rxe = to_rdev(ibpd->device);
1000 struct rxe_pd *pd = to_rpd(ibpd);
1001 struct rxe_mem *mr;
1002 int err;
1003
1004 mr = rxe_alloc(&rxe->mr_pool);
1005 if (!mr) {
1006 err = -ENOMEM;
1007 goto err1;
1008 }
1009
1010 rxe_add_index(mr);
1011
1012 rxe_add_ref(pd);
1013
1014 err = rxe_mem_init_dma(rxe, pd, access, mr);
1015 if (err)
1016 goto err2;
1017
1018 return &mr->ibmr;
1019
1020err2:
1021 rxe_drop_ref(pd);
1022 rxe_drop_index(mr);
1023 rxe_drop_ref(mr);
1024err1:
1025 return ERR_PTR(err);
1026}
1027
1028static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
1029 u64 start,
1030 u64 length,
1031 u64 iova,
1032 int access, struct ib_udata *udata)
1033{
1034 int err;
1035 struct rxe_dev *rxe = to_rdev(ibpd->device);
1036 struct rxe_pd *pd = to_rpd(ibpd);
1037 struct rxe_mem *mr;
1038
1039 mr = rxe_alloc(&rxe->mr_pool);
1040 if (!mr) {
1041 err = -ENOMEM;
1042 goto err2;
1043 }
1044
1045 rxe_add_index(mr);
1046
1047 rxe_add_ref(pd);
1048
1049 err = rxe_mem_init_user(rxe, pd, start, length, iova,
1050 access, udata, mr);
1051 if (err)
1052 goto err3;
1053
1054 return &mr->ibmr;
1055
1056err3:
1057 rxe_drop_ref(pd);
1058 rxe_drop_index(mr);
1059 rxe_drop_ref(mr);
1060err2:
1061 return ERR_PTR(err);
1062}
1063
1064static int rxe_dereg_mr(struct ib_mr *ibmr)
1065{
1066 struct rxe_mem *mr = to_rmr(ibmr);
1067
1068 mr->state = RXE_MEM_STATE_ZOMBIE;
1069 rxe_drop_ref(mr->pd);
1070 rxe_drop_index(mr);
1071 rxe_drop_ref(mr);
1072 return 0;
1073}
1074
1075static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
1076 enum ib_mr_type mr_type,
1077 u32 max_num_sg)
1078{
1079 struct rxe_dev *rxe = to_rdev(ibpd->device);
1080 struct rxe_pd *pd = to_rpd(ibpd);
1081 struct rxe_mem *mr;
1082 int err;
1083
1084 if (mr_type != IB_MR_TYPE_MEM_REG)
1085 return ERR_PTR(-EINVAL);
1086
1087 mr = rxe_alloc(&rxe->mr_pool);
1088 if (!mr) {
1089 err = -ENOMEM;
1090 goto err1;
1091 }
1092
1093 rxe_add_index(mr);
1094
1095 rxe_add_ref(pd);
1096
1097 err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr);
1098 if (err)
1099 goto err2;
1100
1101 return &mr->ibmr;
1102
1103err2:
1104 rxe_drop_ref(pd);
1105 rxe_drop_index(mr);
1106 rxe_drop_ref(mr);
1107err1:
1108 return ERR_PTR(err);
1109}
1110
1111static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
1112{
1113 struct rxe_mem *mr = to_rmr(ibmr);
1114 struct rxe_map *map;
1115 struct rxe_phys_buf *buf;
1116
1117 if (unlikely(mr->nbuf == mr->num_buf))
1118 return -ENOMEM;
1119
1120 map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
1121 buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
1122
1123 buf->addr = addr;
1124 buf->size = ibmr->page_size;
1125 mr->nbuf++;
1126
1127 return 0;
1128}
1129
Parav Pandite404f942016-09-28 20:26:26 +00001130static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1131 int sg_nents, unsigned int *sg_offset)
Moni Shoua8700e3e2016-06-16 16:45:23 +03001132{
1133 struct rxe_mem *mr = to_rmr(ibmr);
1134 int n;
1135
1136 mr->nbuf = 0;
1137
1138 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
1139
1140 mr->va = ibmr->iova;
1141 mr->iova = ibmr->iova;
1142 mr->length = ibmr->length;
1143 mr->page_shift = ilog2(ibmr->page_size);
1144 mr->page_mask = ibmr->page_size - 1;
1145 mr->offset = mr->iova & mr->page_mask;
1146
1147 return n;
1148}
1149
1150static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1151{
1152 int err;
1153 struct rxe_dev *rxe = to_rdev(ibqp->device);
1154 struct rxe_qp *qp = to_rqp(ibqp);
1155 struct rxe_mc_grp *grp;
1156
1157 /* takes a ref on grp if successful */
1158 err = rxe_mcast_get_grp(rxe, mgid, &grp);
1159 if (err)
1160 return err;
1161
1162 err = rxe_mcast_add_grp_elem(rxe, qp, grp);
1163
1164 rxe_drop_ref(grp);
1165 return err;
1166}
1167
1168static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1169{
1170 struct rxe_dev *rxe = to_rdev(ibqp->device);
1171 struct rxe_qp *qp = to_rqp(ibqp);
1172
1173 return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
1174}
1175
Kamal Heibc05d2662017-06-15 11:29:05 +03001176static ssize_t parent_show(struct device *device,
1177 struct device_attribute *attr, char *buf)
Moni Shoua8700e3e2016-06-16 16:45:23 +03001178{
1179 struct rxe_dev *rxe = container_of(device, struct rxe_dev,
1180 ib_dev.dev);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001181
Bart Van Assche839f5ac2017-01-10 11:15:53 -08001182 return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
Moni Shoua8700e3e2016-06-16 16:45:23 +03001183}
1184
Kamal Heibc05d2662017-06-15 11:29:05 +03001185static DEVICE_ATTR_RO(parent);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001186
1187static struct device_attribute *rxe_dev_attributes[] = {
1188 &dev_attr_parent,
1189};
1190
1191int rxe_register_device(struct rxe_dev *rxe)
1192{
1193 int err;
1194 int i;
1195 struct ib_device *dev = &rxe->ib_dev;
Thomas Bogendoerfer3192c532017-10-31 11:16:46 +01001196 struct crypto_shash *tfm;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001197
1198 strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
1199 strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1200
1201 dev->owner = THIS_MODULE;
1202 dev->node_type = RDMA_NODE_IB_CA;
1203 dev->phys_port_cnt = 1;
Sagi Grimberg67cf3622017-05-04 16:23:07 +03001204 dev->num_comp_vectors = num_possible_cpus();
Bart Van Assche85e9f1d2017-01-20 13:04:29 -08001205 dev->dev.parent = rxe_dma_device(rxe);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001206 dev->local_dma_lkey = 0;
Yuval Shaia4d6f2852017-03-14 16:01:57 +02001207 addrconf_addr_eui48((unsigned char *)&dev->node_guid,
1208 rxe->ndev->dev_addr);
Bart Van Assche0bbb3b72017-01-20 13:04:37 -08001209 dev->dev.dma_ops = &dma_virt_ops;
yonatanc56012e12017-06-22 17:10:00 +03001210 dma_coerce_mask_and_coherent(&dev->dev,
1211 dma_get_required_mask(dev->dev.parent));
Moni Shoua8700e3e2016-06-16 16:45:23 +03001212
1213 dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
1214 dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
1215 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
1216 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
1217 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
1218 | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
1219 | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
1220 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
1221 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
1222 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
1223 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
1224 | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
1225 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
1226 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
1227 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
1228 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
1229 | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
1230 | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
1231 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
1232 | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
1233 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
1234 | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
1235 | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
1236 | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
1237 | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
1238 | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
1239 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
1240 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
1241 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
1242 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
1243 | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
1244 | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
1245 ;
1246
1247 dev->query_device = rxe_query_device;
1248 dev->modify_device = rxe_modify_device;
1249 dev->query_port = rxe_query_port;
1250 dev->modify_port = rxe_modify_port;
1251 dev->get_link_layer = rxe_get_link_layer;
1252 dev->query_gid = rxe_query_gid;
1253 dev->get_netdev = rxe_get_netdev;
1254 dev->add_gid = rxe_add_gid;
1255 dev->del_gid = rxe_del_gid;
1256 dev->query_pkey = rxe_query_pkey;
1257 dev->alloc_ucontext = rxe_alloc_ucontext;
1258 dev->dealloc_ucontext = rxe_dealloc_ucontext;
1259 dev->mmap = rxe_mmap;
1260 dev->get_port_immutable = rxe_port_immutable;
1261 dev->alloc_pd = rxe_alloc_pd;
1262 dev->dealloc_pd = rxe_dealloc_pd;
1263 dev->create_ah = rxe_create_ah;
1264 dev->modify_ah = rxe_modify_ah;
1265 dev->query_ah = rxe_query_ah;
1266 dev->destroy_ah = rxe_destroy_ah;
1267 dev->create_srq = rxe_create_srq;
1268 dev->modify_srq = rxe_modify_srq;
1269 dev->query_srq = rxe_query_srq;
1270 dev->destroy_srq = rxe_destroy_srq;
1271 dev->post_srq_recv = rxe_post_srq_recv;
1272 dev->create_qp = rxe_create_qp;
1273 dev->modify_qp = rxe_modify_qp;
1274 dev->query_qp = rxe_query_qp;
1275 dev->destroy_qp = rxe_destroy_qp;
1276 dev->post_send = rxe_post_send;
1277 dev->post_recv = rxe_post_recv;
1278 dev->create_cq = rxe_create_cq;
1279 dev->destroy_cq = rxe_destroy_cq;
1280 dev->resize_cq = rxe_resize_cq;
1281 dev->poll_cq = rxe_poll_cq;
1282 dev->peek_cq = rxe_peek_cq;
1283 dev->req_notify_cq = rxe_req_notify_cq;
1284 dev->get_dma_mr = rxe_get_dma_mr;
1285 dev->reg_user_mr = rxe_reg_user_mr;
1286 dev->dereg_mr = rxe_dereg_mr;
1287 dev->alloc_mr = rxe_alloc_mr;
1288 dev->map_mr_sg = rxe_map_mr_sg;
1289 dev->attach_mcast = rxe_attach_mcast;
1290 dev->detach_mcast = rxe_detach_mcast;
Yonatan Cohen0b1e5b92017-03-10 18:23:56 +02001291 dev->get_hw_stats = rxe_ib_get_hw_stats;
1292 dev->alloc_hw_stats = rxe_ib_alloc_hw_stats;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001293
Thomas Bogendoerfer3192c532017-10-31 11:16:46 +01001294 tfm = crypto_alloc_shash("crc32", 0, 0);
1295 if (IS_ERR(tfm)) {
Colin Ian King27b0b832017-04-24 10:26:42 +01001296 pr_err("failed to allocate crc algorithm err:%ld\n",
Thomas Bogendoerfer3192c532017-10-31 11:16:46 +01001297 PTR_ERR(tfm));
1298 return PTR_ERR(tfm);
yonatanccee26882017-04-20 20:55:55 +03001299 }
Thomas Bogendoerfer3192c532017-10-31 11:16:46 +01001300 rxe->tfm = tfm;
yonatanccee26882017-04-20 20:55:55 +03001301
Moni Shoua8700e3e2016-06-16 16:45:23 +03001302 err = ib_register_device(dev, NULL);
1303 if (err) {
Kamal Heib61013822017-06-15 11:29:06 +03001304 pr_warn("%s failed with error %d\n", __func__, err);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001305 goto err1;
1306 }
1307
1308 for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
1309 err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
1310 if (err) {
Kamal Heib61013822017-06-15 11:29:06 +03001311 pr_warn("%s failed with error %d for attr number %d\n",
1312 __func__, err, i);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001313 goto err2;
1314 }
1315 }
1316
1317 return 0;
1318
1319err2:
1320 ib_unregister_device(dev);
1321err1:
yonatanccee26882017-04-20 20:55:55 +03001322 crypto_free_shash(rxe->tfm);
1323
Moni Shoua8700e3e2016-06-16 16:45:23 +03001324 return err;
1325}
1326
1327int rxe_unregister_device(struct rxe_dev *rxe)
1328{
1329 int i;
1330 struct ib_device *dev = &rxe->ib_dev;
1331
1332 for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i)
1333 device_remove_file(&dev->dev, rxe_dev_attributes[i]);
1334
1335 ib_unregister_device(dev);
1336
1337 return 0;
1338}