blob: 0661c2783b14674b0e21acb493f55966aa06455b [file] [log] [blame]
Moni Shoua8700e3e2016-06-16 16:45:23 +03001/*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
Bart Van Assche0bbb3b72017-01-20 13:04:37 -080034#include <linux/dma-mapping.h>
Yuval Shaia4d6f2852017-03-14 16:01:57 +020035#include <net/addrconf.h>
Moni Shoua8700e3e2016-06-16 16:45:23 +030036#include "rxe.h"
37#include "rxe_loc.h"
38#include "rxe_queue.h"
Yonatan Cohen0b1e5b92017-03-10 18:23:56 +020039#include "rxe_hw_counters.h"
Moni Shoua8700e3e2016-06-16 16:45:23 +030040
41static int rxe_query_device(struct ib_device *dev,
42 struct ib_device_attr *attr,
43 struct ib_udata *uhw)
44{
45 struct rxe_dev *rxe = to_rdev(dev);
46
47 if (uhw->inlen || uhw->outlen)
48 return -EINVAL;
49
50 *attr = rxe->attr;
51 return 0;
52}
53
Moni Shoua8700e3e2016-06-16 16:45:23 +030054static int rxe_query_port(struct ib_device *dev,
55 u8 port_num, struct ib_port_attr *attr)
56{
57 struct rxe_dev *rxe = to_rdev(dev);
58 struct rxe_port *port;
Yuval Shaiad4186192017-06-14 23:13:34 +030059 int rc = -EINVAL;
Moni Shoua8700e3e2016-06-16 16:45:23 +030060
61 if (unlikely(port_num != 1)) {
62 pr_warn("invalid port_number %d\n", port_num);
Yuval Shaiad4186192017-06-14 23:13:34 +030063 goto out;
Moni Shoua8700e3e2016-06-16 16:45:23 +030064 }
65
66 port = &rxe->port;
67
Or Gerlitzc4550c62017-01-24 13:02:39 +020068 /* *attr being zeroed by the caller, avoid zeroing it here */
Moni Shoua8700e3e2016-06-16 16:45:23 +030069 *attr = port->attr;
70
71 mutex_lock(&rxe->usdev_lock);
Yuval Shaiad4186192017-06-14 23:13:34 +030072 rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
73 &attr->active_width);
Moni Shoua8700e3e2016-06-16 16:45:23 +030074 mutex_unlock(&rxe->usdev_lock);
75
Yuval Shaiad4186192017-06-14 23:13:34 +030076out:
77 return rc;
Moni Shoua8700e3e2016-06-16 16:45:23 +030078}
79
Moni Shoua8700e3e2016-06-16 16:45:23 +030080static int rxe_add_gid(struct ib_device *device, u8 port_num, unsigned int
81 index, const union ib_gid *gid,
82 const struct ib_gid_attr *attr, void **context)
83{
84 if (index >= RXE_PORT_GID_TBL_LEN)
85 return -EINVAL;
86 return 0;
87}
88
89static int rxe_del_gid(struct ib_device *device, u8 port_num, unsigned int
90 index, void **context)
91{
92 if (index >= RXE_PORT_GID_TBL_LEN)
93 return -EINVAL;
94 return 0;
95}
96
97static struct net_device *rxe_get_netdev(struct ib_device *device,
98 u8 port_num)
99{
100 struct rxe_dev *rxe = to_rdev(device);
101
102 if (rxe->ndev) {
103 dev_hold(rxe->ndev);
104 return rxe->ndev;
105 }
106
107 return NULL;
108}
109
110static int rxe_query_pkey(struct ib_device *device,
111 u8 port_num, u16 index, u16 *pkey)
112{
113 struct rxe_dev *rxe = to_rdev(device);
114 struct rxe_port *port;
115
116 if (unlikely(port_num != 1)) {
Bart Van Assche85e9f1d2017-01-20 13:04:29 -0800117 dev_warn(device->dev.parent, "invalid port_num = %d\n",
Moni Shoua8700e3e2016-06-16 16:45:23 +0300118 port_num);
119 goto err1;
120 }
121
122 port = &rxe->port;
123
124 if (unlikely(index >= port->attr.pkey_tbl_len)) {
Bart Van Assche85e9f1d2017-01-20 13:04:29 -0800125 dev_warn(device->dev.parent, "invalid index = %d\n",
Moni Shoua8700e3e2016-06-16 16:45:23 +0300126 index);
127 goto err1;
128 }
129
130 *pkey = port->pkey_tbl[index];
131 return 0;
132
133err1:
134 return -EINVAL;
135}
136
137static int rxe_modify_device(struct ib_device *dev,
138 int mask, struct ib_device_modify *attr)
139{
140 struct rxe_dev *rxe = to_rdev(dev);
141
142 if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
143 rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
144
145 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
146 memcpy(rxe->ib_dev.node_desc,
147 attr->node_desc, sizeof(rxe->ib_dev.node_desc));
148 }
149
150 return 0;
151}
152
153static int rxe_modify_port(struct ib_device *dev,
154 u8 port_num, int mask, struct ib_port_modify *attr)
155{
156 struct rxe_dev *rxe = to_rdev(dev);
157 struct rxe_port *port;
158
159 if (unlikely(port_num != 1)) {
160 pr_warn("invalid port_num = %d\n", port_num);
161 goto err1;
162 }
163
164 port = &rxe->port;
165
166 port->attr.port_cap_flags |= attr->set_port_cap_mask;
167 port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
168
169 if (mask & IB_PORT_RESET_QKEY_CNTR)
170 port->attr.qkey_viol_cntr = 0;
171
172 return 0;
173
174err1:
175 return -EINVAL;
176}
177
178static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
179 u8 port_num)
180{
181 struct rxe_dev *rxe = to_rdev(dev);
182
Bart Van Assche839f5ac2017-01-10 11:15:53 -0800183 return rxe_link_layer(rxe, port_num);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300184}
185
186static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
187 struct ib_udata *udata)
188{
189 struct rxe_dev *rxe = to_rdev(dev);
190 struct rxe_ucontext *uc;
191
192 uc = rxe_alloc(&rxe->uc_pool);
193 return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
194}
195
196static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
197{
198 struct rxe_ucontext *uc = to_ruc(ibuc);
199
200 rxe_drop_ref(uc);
201 return 0;
202}
203
204static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
205 struct ib_port_immutable *immutable)
206{
207 int err;
208 struct ib_port_attr attr;
209
Or Gerlitzc4550c62017-01-24 13:02:39 +0200210 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
211
212 err = ib_query_port(dev, port_num, &attr);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300213 if (err)
214 return err;
215
216 immutable->pkey_tbl_len = attr.pkey_tbl_len;
217 immutable->gid_tbl_len = attr.gid_tbl_len;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300218 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
219
220 return 0;
221}
222
223static struct ib_pd *rxe_alloc_pd(struct ib_device *dev,
224 struct ib_ucontext *context,
225 struct ib_udata *udata)
226{
227 struct rxe_dev *rxe = to_rdev(dev);
228 struct rxe_pd *pd;
229
230 pd = rxe_alloc(&rxe->pd_pool);
231 return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
232}
233
234static int rxe_dealloc_pd(struct ib_pd *ibpd)
235{
236 struct rxe_pd *pd = to_rpd(ibpd);
237
238 rxe_drop_ref(pd);
239 return 0;
240}
241
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400242static int rxe_init_av(struct rxe_dev *rxe, struct rdma_ah_attr *attr,
Moni Shoua8700e3e2016-06-16 16:45:23 +0300243 struct rxe_av *av)
244{
245 int err;
246 union ib_gid sgid;
247 struct ib_gid_attr sgid_attr;
248
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400249 err = ib_get_cached_gid(&rxe->ib_dev, rdma_ah_get_port_num(attr),
250 rdma_ah_read_grh(attr)->sgid_index, &sgid,
Moni Shoua8700e3e2016-06-16 16:45:23 +0300251 &sgid_attr);
252 if (err) {
253 pr_err("Failed to query sgid. err = %d\n", err);
254 return err;
255 }
256
Zhu Yanjunca3d9fe2018-01-31 06:06:55 -0500257 rxe_av_from_attr(rdma_ah_get_port_num(attr), av, attr);
Zhu Yanjun316663c2018-01-31 06:06:59 -0500258 rxe_av_fill_ip_info(av, attr, &sgid_attr, &sgid);
Parav Pandit3e44e0e2018-04-01 15:08:23 +0300259 dev_put(sgid_attr.ndev);
Zhu Yanjun45a290f2018-01-31 06:06:58 -0500260 return 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300261}
262
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400263static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd,
264 struct rdma_ah_attr *attr,
Moni Shoua477864c2016-11-23 08:23:24 +0200265 struct ib_udata *udata)
266
Moni Shoua8700e3e2016-06-16 16:45:23 +0300267{
268 int err;
269 struct rxe_dev *rxe = to_rdev(ibpd->device);
270 struct rxe_pd *pd = to_rpd(ibpd);
271 struct rxe_ah *ah;
272
273 err = rxe_av_chk_attr(rxe, attr);
274 if (err)
275 goto err1;
276
277 ah = rxe_alloc(&rxe->ah_pool);
278 if (!ah) {
279 err = -ENOMEM;
280 goto err1;
281 }
282
283 rxe_add_ref(pd);
284 ah->pd = pd;
285
286 err = rxe_init_av(rxe, attr, &ah->av);
287 if (err)
288 goto err2;
289
290 return &ah->ibah;
291
292err2:
293 rxe_drop_ref(pd);
294 rxe_drop_ref(ah);
295err1:
296 return ERR_PTR(err);
297}
298
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400299static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300300{
301 int err;
302 struct rxe_dev *rxe = to_rdev(ibah->device);
303 struct rxe_ah *ah = to_rah(ibah);
304
305 err = rxe_av_chk_attr(rxe, attr);
306 if (err)
307 return err;
308
309 err = rxe_init_av(rxe, attr, &ah->av);
310 if (err)
311 return err;
312
313 return 0;
314}
315
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400316static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300317{
Moni Shoua8700e3e2016-06-16 16:45:23 +0300318 struct rxe_ah *ah = to_rah(ibah);
319
Dasaratharaman Chandramoulieca7ddf2017-04-29 14:41:17 -0400320 memset(attr, 0, sizeof(*attr));
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400321 attr->type = ibah->type;
Zhu Yanjun9c96f3d2018-01-31 06:06:56 -0500322 rxe_av_to_attr(&ah->av, attr);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300323 return 0;
324}
325
326static int rxe_destroy_ah(struct ib_ah *ibah)
327{
328 struct rxe_ah *ah = to_rah(ibah);
329
330 rxe_drop_ref(ah->pd);
331 rxe_drop_ref(ah);
332 return 0;
333}
334
335static int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr)
336{
337 int err;
338 int i;
339 u32 length;
340 struct rxe_recv_wqe *recv_wqe;
341 int num_sge = ibwr->num_sge;
342
343 if (unlikely(queue_full(rq->queue))) {
344 err = -ENOMEM;
345 goto err1;
346 }
347
348 if (unlikely(num_sge > rq->max_sge)) {
349 err = -EINVAL;
350 goto err1;
351 }
352
353 length = 0;
354 for (i = 0; i < num_sge; i++)
355 length += ibwr->sg_list[i].length;
356
357 recv_wqe = producer_addr(rq->queue);
358 recv_wqe->wr_id = ibwr->wr_id;
359 recv_wqe->num_sge = num_sge;
360
361 memcpy(recv_wqe->dma.sge, ibwr->sg_list,
362 num_sge * sizeof(struct ib_sge));
363
364 recv_wqe->dma.length = length;
365 recv_wqe->dma.resid = length;
366 recv_wqe->dma.num_sge = num_sge;
367 recv_wqe->dma.cur_sge = 0;
368 recv_wqe->dma.sge_offset = 0;
369
370 /* make sure all changes to the work queue are written before we
371 * update the producer pointer
372 */
373 smp_wmb();
374
375 advance_producer(rq->queue);
376 return 0;
377
378err1:
379 return err;
380}
381
382static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
383 struct ib_srq_init_attr *init,
384 struct ib_udata *udata)
385{
386 int err;
387 struct rxe_dev *rxe = to_rdev(ibpd->device);
388 struct rxe_pd *pd = to_rpd(ibpd);
389 struct rxe_srq *srq;
390 struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600391 struct rxe_create_srq_resp __user *uresp = NULL;
392
393 if (udata) {
394 if (udata->outlen < sizeof(*uresp))
395 return ERR_PTR(-EINVAL);
396 uresp = udata->outbuf;
397 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300398
399 err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
400 if (err)
401 goto err1;
402
403 srq = rxe_alloc(&rxe->srq_pool);
404 if (!srq) {
405 err = -ENOMEM;
406 goto err1;
407 }
408
409 rxe_add_index(srq);
410 rxe_add_ref(pd);
411 srq->pd = pd;
412
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600413 err = rxe_srq_from_init(rxe, srq, init, context, uresp);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300414 if (err)
415 goto err2;
416
417 return &srq->ibsrq;
418
419err2:
420 rxe_drop_ref(pd);
421 rxe_drop_index(srq);
422 rxe_drop_ref(srq);
423err1:
424 return ERR_PTR(err);
425}
426
427static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
428 enum ib_srq_attr_mask mask,
429 struct ib_udata *udata)
430{
431 int err;
432 struct rxe_srq *srq = to_rsrq(ibsrq);
433 struct rxe_dev *rxe = to_rdev(ibsrq->device);
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600434 struct rxe_modify_srq_cmd ucmd = {};
435
436 if (udata) {
437 if (udata->inlen < sizeof(ucmd))
438 return -EINVAL;
439
440 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
441 if (err)
442 return err;
443 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300444
445 err = rxe_srq_chk_attr(rxe, srq, attr, mask);
446 if (err)
447 goto err1;
448
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600449 err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300450 if (err)
451 goto err1;
452
453 return 0;
454
455err1:
456 return err;
457}
458
459static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
460{
461 struct rxe_srq *srq = to_rsrq(ibsrq);
462
463 if (srq->error)
464 return -EINVAL;
465
466 attr->max_wr = srq->rq.queue->buf->index_mask;
467 attr->max_sge = srq->rq.max_sge;
468 attr->srq_limit = srq->limit;
469 return 0;
470}
471
472static int rxe_destroy_srq(struct ib_srq *ibsrq)
473{
474 struct rxe_srq *srq = to_rsrq(ibsrq);
475
476 if (srq->rq.queue)
477 rxe_queue_cleanup(srq->rq.queue);
478
479 rxe_drop_ref(srq->pd);
480 rxe_drop_index(srq);
481 rxe_drop_ref(srq);
482
483 return 0;
484}
485
486static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
487 struct ib_recv_wr **bad_wr)
488{
489 int err = 0;
490 unsigned long flags;
491 struct rxe_srq *srq = to_rsrq(ibsrq);
492
493 spin_lock_irqsave(&srq->rq.producer_lock, flags);
494
495 while (wr) {
496 err = post_one_recv(&srq->rq, wr);
497 if (unlikely(err))
498 break;
499 wr = wr->next;
500 }
501
502 spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
503
504 if (err)
505 *bad_wr = wr;
506
507 return err;
508}
509
510static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
511 struct ib_qp_init_attr *init,
512 struct ib_udata *udata)
513{
514 int err;
515 struct rxe_dev *rxe = to_rdev(ibpd->device);
516 struct rxe_pd *pd = to_rpd(ibpd);
517 struct rxe_qp *qp;
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600518 struct rxe_create_qp_resp __user *uresp = NULL;
519
520 if (udata) {
521 if (udata->outlen < sizeof(*uresp))
522 return ERR_PTR(-EINVAL);
523 uresp = udata->outbuf;
524 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300525
526 err = rxe_qp_chk_init(rxe, init);
527 if (err)
528 goto err1;
529
530 qp = rxe_alloc(&rxe->qp_pool);
531 if (!qp) {
532 err = -ENOMEM;
533 goto err1;
534 }
535
536 if (udata) {
537 if (udata->inlen) {
538 err = -EINVAL;
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500539 goto err2;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300540 }
541 qp->is_user = 1;
542 }
543
544 rxe_add_index(qp);
545
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600546 err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300547 if (err)
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500548 goto err3;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300549
550 return &qp->ibqp;
551
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500552err3:
Moni Shoua8700e3e2016-06-16 16:45:23 +0300553 rxe_drop_index(qp);
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500554err2:
Moni Shoua8700e3e2016-06-16 16:45:23 +0300555 rxe_drop_ref(qp);
556err1:
557 return ERR_PTR(err);
558}
559
560static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
561 int mask, struct ib_udata *udata)
562{
563 int err;
564 struct rxe_dev *rxe = to_rdev(ibqp->device);
565 struct rxe_qp *qp = to_rqp(ibqp);
566
567 err = rxe_qp_chk_attr(rxe, qp, attr, mask);
568 if (err)
569 goto err1;
570
571 err = rxe_qp_from_attr(qp, attr, mask, udata);
572 if (err)
573 goto err1;
574
575 return 0;
576
577err1:
578 return err;
579}
580
581static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
582 int mask, struct ib_qp_init_attr *init)
583{
584 struct rxe_qp *qp = to_rqp(ibqp);
585
586 rxe_qp_to_init(qp, init);
587 rxe_qp_to_attr(qp, attr, mask);
588
589 return 0;
590}
591
592static int rxe_destroy_qp(struct ib_qp *ibqp)
593{
594 struct rxe_qp *qp = to_rqp(ibqp);
595
596 rxe_qp_destroy(qp);
597 rxe_drop_index(qp);
598 rxe_drop_ref(qp);
599 return 0;
600}
601
602static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr,
603 unsigned int mask, unsigned int length)
604{
605 int num_sge = ibwr->num_sge;
606 struct rxe_sq *sq = &qp->sq;
607
608 if (unlikely(num_sge > sq->max_sge))
609 goto err1;
610
611 if (unlikely(mask & WR_ATOMIC_MASK)) {
612 if (length < 8)
613 goto err1;
614
615 if (atomic_wr(ibwr)->remote_addr & 0x7)
616 goto err1;
617 }
618
619 if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
620 (length > sq->max_inline)))
621 goto err1;
622
623 return 0;
624
625err1:
626 return -EINVAL;
627}
628
629static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
630 struct ib_send_wr *ibwr)
631{
632 wr->wr_id = ibwr->wr_id;
633 wr->num_sge = ibwr->num_sge;
634 wr->opcode = ibwr->opcode;
635 wr->send_flags = ibwr->send_flags;
636
637 if (qp_type(qp) == IB_QPT_UD ||
638 qp_type(qp) == IB_QPT_SMI ||
639 qp_type(qp) == IB_QPT_GSI) {
640 wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
641 wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
642 if (qp_type(qp) == IB_QPT_GSI)
643 wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
644 if (wr->opcode == IB_WR_SEND_WITH_IMM)
645 wr->ex.imm_data = ibwr->ex.imm_data;
646 } else {
647 switch (wr->opcode) {
648 case IB_WR_RDMA_WRITE_WITH_IMM:
649 wr->ex.imm_data = ibwr->ex.imm_data;
Bart Van Asscheea6ee932017-10-11 10:49:24 -0700650 /* fall through */
Moni Shoua8700e3e2016-06-16 16:45:23 +0300651 case IB_WR_RDMA_READ:
652 case IB_WR_RDMA_WRITE:
653 wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
654 wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
655 break;
656 case IB_WR_SEND_WITH_IMM:
657 wr->ex.imm_data = ibwr->ex.imm_data;
658 break;
659 case IB_WR_SEND_WITH_INV:
660 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
661 break;
662 case IB_WR_ATOMIC_CMP_AND_SWP:
663 case IB_WR_ATOMIC_FETCH_AND_ADD:
664 wr->wr.atomic.remote_addr =
665 atomic_wr(ibwr)->remote_addr;
666 wr->wr.atomic.compare_add =
667 atomic_wr(ibwr)->compare_add;
668 wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
669 wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
670 break;
671 case IB_WR_LOCAL_INV:
672 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
673 break;
674 case IB_WR_REG_MR:
675 wr->wr.reg.mr = reg_wr(ibwr)->mr;
676 wr->wr.reg.key = reg_wr(ibwr)->key;
677 wr->wr.reg.access = reg_wr(ibwr)->access;
678 break;
679 default:
680 break;
681 }
682 }
683}
684
685static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
686 unsigned int mask, unsigned int length,
687 struct rxe_send_wqe *wqe)
688{
689 int num_sge = ibwr->num_sge;
690 struct ib_sge *sge;
691 int i;
692 u8 *p;
693
694 init_send_wr(qp, &wqe->wr, ibwr);
695
696 if (qp_type(qp) == IB_QPT_UD ||
697 qp_type(qp) == IB_QPT_SMI ||
698 qp_type(qp) == IB_QPT_GSI)
699 memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
700
701 if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
702 p = wqe->dma.inline_data;
703
704 sge = ibwr->sg_list;
705 for (i = 0; i < num_sge; i++, sge++) {
Jia-Ju Bai07d432b2017-06-05 20:23:40 +0800706 memcpy(p, (void *)(uintptr_t)sge->addr,
707 sge->length);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300708
709 p += sge->length;
710 }
711 } else if (mask & WR_REG_MASK) {
712 wqe->mask = mask;
713 wqe->state = wqe_state_posted;
714 return 0;
715 } else
716 memcpy(wqe->dma.sge, ibwr->sg_list,
717 num_sge * sizeof(struct ib_sge));
718
Bart Van Asschea6544a62018-03-01 14:00:29 -0800719 wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
720 mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300721 wqe->mask = mask;
722 wqe->dma.length = length;
723 wqe->dma.resid = length;
724 wqe->dma.num_sge = num_sge;
725 wqe->dma.cur_sge = 0;
726 wqe->dma.sge_offset = 0;
727 wqe->state = wqe_state_posted;
728 wqe->ssn = atomic_add_return(1, &qp->ssn);
729
730 return 0;
731}
732
733static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
Parav Pandite404f942016-09-28 20:26:26 +0000734 unsigned int mask, u32 length)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300735{
736 int err;
737 struct rxe_sq *sq = &qp->sq;
738 struct rxe_send_wqe *send_wqe;
739 unsigned long flags;
740
741 err = validate_send_wr(qp, ibwr, mask, length);
742 if (err)
743 return err;
744
745 spin_lock_irqsave(&qp->sq.sq_lock, flags);
746
747 if (unlikely(queue_full(sq->queue))) {
748 err = -ENOMEM;
749 goto err1;
750 }
751
752 send_wqe = producer_addr(sq->queue);
753
754 err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
755 if (unlikely(err))
756 goto err1;
757
758 /*
759 * make sure all changes to the work queue are
760 * written before we update the producer pointer
761 */
762 smp_wmb();
763
764 advance_producer(sq->queue);
765 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
766
767 return 0;
768
769err1:
770 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
771 return err;
772}
773
Parav Pandit063af592016-09-28 20:24:12 +0000774static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
775 struct ib_send_wr **bad_wr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300776{
777 int err = 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300778 unsigned int mask;
779 unsigned int length = 0;
780 int i;
781 int must_sched;
782
Moni Shoua8700e3e2016-06-16 16:45:23 +0300783 while (wr) {
784 mask = wr_opcode_mask(wr->opcode, qp);
785 if (unlikely(!mask)) {
786 err = -EINVAL;
787 *bad_wr = wr;
788 break;
789 }
790
791 if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
792 !(mask & WR_INLINE_MASK))) {
793 err = -EINVAL;
794 *bad_wr = wr;
795 break;
796 }
797
798 length = 0;
799 for (i = 0; i < wr->num_sge; i++)
800 length += wr->sg_list[i].length;
801
802 err = post_one_send(qp, wr, mask, length);
803
804 if (err) {
805 *bad_wr = wr;
806 break;
807 }
808 wr = wr->next;
809 }
810
811 /*
812 * Must sched in case of GSI QP because ib_send_mad() hold irq lock,
813 * and the requester call ip_local_out_sk() that takes spin_lock_bh.
814 */
815 must_sched = (qp_type(qp) == IB_QPT_GSI) ||
816 (queue_count(qp->sq.queue) > 1);
817
818 rxe_run_task(&qp->req.task, must_sched);
Bart Van Assche6f301e02018-01-09 11:23:40 -0800819 if (unlikely(qp->req.state == QP_STATE_ERROR))
820 rxe_run_task(&qp->comp.task, 1);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300821
822 return err;
823}
824
Parav Pandit063af592016-09-28 20:24:12 +0000825static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
826 struct ib_send_wr **bad_wr)
827{
828 struct rxe_qp *qp = to_rqp(ibqp);
829
830 if (unlikely(!qp->valid)) {
831 *bad_wr = wr;
832 return -EINVAL;
833 }
834
835 if (unlikely(qp->req.state < QP_STATE_READY)) {
836 *bad_wr = wr;
837 return -EINVAL;
838 }
839
840 if (qp->is_user) {
841 /* Utilize process context to do protocol processing */
842 rxe_run_task(&qp->req.task, 0);
843 return 0;
844 } else
845 return rxe_post_send_kernel(qp, wr, bad_wr);
846}
847
Moni Shoua8700e3e2016-06-16 16:45:23 +0300848static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
849 struct ib_recv_wr **bad_wr)
850{
851 int err = 0;
852 struct rxe_qp *qp = to_rqp(ibqp);
853 struct rxe_rq *rq = &qp->rq;
854 unsigned long flags;
855
856 if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
857 *bad_wr = wr;
858 err = -EINVAL;
859 goto err1;
860 }
861
862 if (unlikely(qp->srq)) {
863 *bad_wr = wr;
864 err = -EINVAL;
865 goto err1;
866 }
867
868 spin_lock_irqsave(&rq->producer_lock, flags);
869
870 while (wr) {
871 err = post_one_recv(rq, wr);
872 if (unlikely(err)) {
873 *bad_wr = wr;
874 break;
875 }
876 wr = wr->next;
877 }
878
879 spin_unlock_irqrestore(&rq->producer_lock, flags);
880
Vijay Immanuel12171972017-06-27 12:19:38 +0300881 if (qp->resp.state == QP_STATE_ERROR)
882 rxe_run_task(&qp->resp.task, 1);
883
Moni Shoua8700e3e2016-06-16 16:45:23 +0300884err1:
885 return err;
886}
887
888static struct ib_cq *rxe_create_cq(struct ib_device *dev,
889 const struct ib_cq_init_attr *attr,
890 struct ib_ucontext *context,
891 struct ib_udata *udata)
892{
893 int err;
894 struct rxe_dev *rxe = to_rdev(dev);
895 struct rxe_cq *cq;
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600896 struct rxe_create_cq_resp __user *uresp = NULL;
897
898 if (udata) {
899 if (udata->outlen < sizeof(*uresp))
900 return ERR_PTR(-EINVAL);
901 uresp = udata->outbuf;
902 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300903
904 if (attr->flags)
905 return ERR_PTR(-EINVAL);
906
Jason Gunthorpeb92ec0f2018-03-13 16:33:17 -0600907 err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300908 if (err)
909 goto err1;
910
911 cq = rxe_alloc(&rxe->cq_pool);
912 if (!cq) {
913 err = -ENOMEM;
914 goto err1;
915 }
916
917 err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600918 context, uresp);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300919 if (err)
920 goto err2;
921
922 return &cq->ibcq;
923
924err2:
925 rxe_drop_ref(cq);
926err1:
927 return ERR_PTR(err);
928}
929
930static int rxe_destroy_cq(struct ib_cq *ibcq)
931{
932 struct rxe_cq *cq = to_rcq(ibcq);
933
Andrew Boyerbfc3ae02017-08-28 16:11:50 -0400934 rxe_cq_disable(cq);
935
Moni Shoua8700e3e2016-06-16 16:45:23 +0300936 rxe_drop_ref(cq);
937 return 0;
938}
939
940static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
941{
942 int err;
943 struct rxe_cq *cq = to_rcq(ibcq);
944 struct rxe_dev *rxe = to_rdev(ibcq->device);
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600945 struct rxe_resize_cq_resp __user *uresp = NULL;
946
947 if (udata) {
948 if (udata->outlen < sizeof(*uresp))
949 return -EINVAL;
950 uresp = udata->outbuf;
951 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300952
Jason Gunthorpeb92ec0f2018-03-13 16:33:17 -0600953 err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300954 if (err)
955 goto err1;
956
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600957 err = rxe_cq_resize_queue(cq, cqe, uresp);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300958 if (err)
959 goto err1;
960
961 return 0;
962
963err1:
964 return err;
965}
966
967static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
968{
969 int i;
970 struct rxe_cq *cq = to_rcq(ibcq);
971 struct rxe_cqe *cqe;
972 unsigned long flags;
973
974 spin_lock_irqsave(&cq->cq_lock, flags);
975 for (i = 0; i < num_entries; i++) {
976 cqe = queue_head(cq->queue);
977 if (!cqe)
978 break;
979
980 memcpy(wc++, &cqe->ibwc, sizeof(*wc));
981 advance_consumer(cq->queue);
982 }
983 spin_unlock_irqrestore(&cq->cq_lock, flags);
984
985 return i;
986}
987
988static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
989{
990 struct rxe_cq *cq = to_rcq(ibcq);
991 int count = queue_count(cq->queue);
992
993 return (count > wc_cnt) ? wc_cnt : count;
994}
995
996static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
997{
998 struct rxe_cq *cq = to_rcq(ibcq);
Andrew Boyeraccacb82016-11-23 12:39:22 -0500999 unsigned long irq_flags;
1000 int ret = 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001001
Andrew Boyeraccacb82016-11-23 12:39:22 -05001002 spin_lock_irqsave(&cq->cq_lock, irq_flags);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001003 if (cq->notify != IB_CQ_NEXT_COMP)
1004 cq->notify = flags & IB_CQ_SOLICITED_MASK;
1005
Andrew Boyeraccacb82016-11-23 12:39:22 -05001006 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
1007 ret = 1;
1008
1009 spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
1010
1011 return ret;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001012}
1013
1014static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
1015{
1016 struct rxe_dev *rxe = to_rdev(ibpd->device);
1017 struct rxe_pd *pd = to_rpd(ibpd);
1018 struct rxe_mem *mr;
1019 int err;
1020
1021 mr = rxe_alloc(&rxe->mr_pool);
1022 if (!mr) {
1023 err = -ENOMEM;
1024 goto err1;
1025 }
1026
1027 rxe_add_index(mr);
1028
1029 rxe_add_ref(pd);
1030
1031 err = rxe_mem_init_dma(rxe, pd, access, mr);
1032 if (err)
1033 goto err2;
1034
1035 return &mr->ibmr;
1036
1037err2:
1038 rxe_drop_ref(pd);
1039 rxe_drop_index(mr);
1040 rxe_drop_ref(mr);
1041err1:
1042 return ERR_PTR(err);
1043}
1044
1045static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
1046 u64 start,
1047 u64 length,
1048 u64 iova,
1049 int access, struct ib_udata *udata)
1050{
1051 int err;
1052 struct rxe_dev *rxe = to_rdev(ibpd->device);
1053 struct rxe_pd *pd = to_rpd(ibpd);
1054 struct rxe_mem *mr;
1055
1056 mr = rxe_alloc(&rxe->mr_pool);
1057 if (!mr) {
1058 err = -ENOMEM;
1059 goto err2;
1060 }
1061
1062 rxe_add_index(mr);
1063
1064 rxe_add_ref(pd);
1065
1066 err = rxe_mem_init_user(rxe, pd, start, length, iova,
1067 access, udata, mr);
1068 if (err)
1069 goto err3;
1070
1071 return &mr->ibmr;
1072
1073err3:
1074 rxe_drop_ref(pd);
1075 rxe_drop_index(mr);
1076 rxe_drop_ref(mr);
1077err2:
1078 return ERR_PTR(err);
1079}
1080
1081static int rxe_dereg_mr(struct ib_mr *ibmr)
1082{
1083 struct rxe_mem *mr = to_rmr(ibmr);
1084
1085 mr->state = RXE_MEM_STATE_ZOMBIE;
1086 rxe_drop_ref(mr->pd);
1087 rxe_drop_index(mr);
1088 rxe_drop_ref(mr);
1089 return 0;
1090}
1091
1092static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
1093 enum ib_mr_type mr_type,
1094 u32 max_num_sg)
1095{
1096 struct rxe_dev *rxe = to_rdev(ibpd->device);
1097 struct rxe_pd *pd = to_rpd(ibpd);
1098 struct rxe_mem *mr;
1099 int err;
1100
1101 if (mr_type != IB_MR_TYPE_MEM_REG)
1102 return ERR_PTR(-EINVAL);
1103
1104 mr = rxe_alloc(&rxe->mr_pool);
1105 if (!mr) {
1106 err = -ENOMEM;
1107 goto err1;
1108 }
1109
1110 rxe_add_index(mr);
1111
1112 rxe_add_ref(pd);
1113
1114 err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr);
1115 if (err)
1116 goto err2;
1117
1118 return &mr->ibmr;
1119
1120err2:
1121 rxe_drop_ref(pd);
1122 rxe_drop_index(mr);
1123 rxe_drop_ref(mr);
1124err1:
1125 return ERR_PTR(err);
1126}
1127
1128static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
1129{
1130 struct rxe_mem *mr = to_rmr(ibmr);
1131 struct rxe_map *map;
1132 struct rxe_phys_buf *buf;
1133
1134 if (unlikely(mr->nbuf == mr->num_buf))
1135 return -ENOMEM;
1136
1137 map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
1138 buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
1139
1140 buf->addr = addr;
1141 buf->size = ibmr->page_size;
1142 mr->nbuf++;
1143
1144 return 0;
1145}
1146
Parav Pandite404f942016-09-28 20:26:26 +00001147static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1148 int sg_nents, unsigned int *sg_offset)
Moni Shoua8700e3e2016-06-16 16:45:23 +03001149{
1150 struct rxe_mem *mr = to_rmr(ibmr);
1151 int n;
1152
1153 mr->nbuf = 0;
1154
1155 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
1156
1157 mr->va = ibmr->iova;
1158 mr->iova = ibmr->iova;
1159 mr->length = ibmr->length;
1160 mr->page_shift = ilog2(ibmr->page_size);
1161 mr->page_mask = ibmr->page_size - 1;
1162 mr->offset = mr->iova & mr->page_mask;
1163
1164 return n;
1165}
1166
1167static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1168{
1169 int err;
1170 struct rxe_dev *rxe = to_rdev(ibqp->device);
1171 struct rxe_qp *qp = to_rqp(ibqp);
1172 struct rxe_mc_grp *grp;
1173
1174 /* takes a ref on grp if successful */
1175 err = rxe_mcast_get_grp(rxe, mgid, &grp);
1176 if (err)
1177 return err;
1178
1179 err = rxe_mcast_add_grp_elem(rxe, qp, grp);
1180
1181 rxe_drop_ref(grp);
1182 return err;
1183}
1184
1185static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1186{
1187 struct rxe_dev *rxe = to_rdev(ibqp->device);
1188 struct rxe_qp *qp = to_rqp(ibqp);
1189
1190 return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
1191}
1192
Kamal Heibc05d2662017-06-15 11:29:05 +03001193static ssize_t parent_show(struct device *device,
1194 struct device_attribute *attr, char *buf)
Moni Shoua8700e3e2016-06-16 16:45:23 +03001195{
1196 struct rxe_dev *rxe = container_of(device, struct rxe_dev,
1197 ib_dev.dev);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001198
Bart Van Assche839f5ac2017-01-10 11:15:53 -08001199 return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
Moni Shoua8700e3e2016-06-16 16:45:23 +03001200}
1201
Kamal Heibc05d2662017-06-15 11:29:05 +03001202static DEVICE_ATTR_RO(parent);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001203
1204static struct device_attribute *rxe_dev_attributes[] = {
1205 &dev_attr_parent,
1206};
1207
1208int rxe_register_device(struct rxe_dev *rxe)
1209{
1210 int err;
1211 int i;
1212 struct ib_device *dev = &rxe->ib_dev;
Thomas Bogendoerfer3192c532017-10-31 11:16:46 +01001213 struct crypto_shash *tfm;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001214
1215 strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
1216 strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1217
1218 dev->owner = THIS_MODULE;
1219 dev->node_type = RDMA_NODE_IB_CA;
1220 dev->phys_port_cnt = 1;
Sagi Grimberg67cf3622017-05-04 16:23:07 +03001221 dev->num_comp_vectors = num_possible_cpus();
Bart Van Assche85e9f1d2017-01-20 13:04:29 -08001222 dev->dev.parent = rxe_dma_device(rxe);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001223 dev->local_dma_lkey = 0;
Yuval Shaia4d6f2852017-03-14 16:01:57 +02001224 addrconf_addr_eui48((unsigned char *)&dev->node_guid,
1225 rxe->ndev->dev_addr);
Bart Van Assche0bbb3b72017-01-20 13:04:37 -08001226 dev->dev.dma_ops = &dma_virt_ops;
yonatanc56012e12017-06-22 17:10:00 +03001227 dma_coerce_mask_and_coherent(&dev->dev,
1228 dma_get_required_mask(dev->dev.parent));
Moni Shoua8700e3e2016-06-16 16:45:23 +03001229
1230 dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
1231 dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
1232 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
1233 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
1234 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
1235 | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
1236 | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
1237 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
1238 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
1239 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
1240 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
1241 | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
1242 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
1243 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
1244 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
1245 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
1246 | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
1247 | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
1248 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
1249 | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
1250 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
1251 | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
1252 | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
1253 | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
1254 | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
1255 | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
1256 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
1257 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
1258 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
1259 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
1260 | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
1261 | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
1262 ;
1263
1264 dev->query_device = rxe_query_device;
1265 dev->modify_device = rxe_modify_device;
1266 dev->query_port = rxe_query_port;
1267 dev->modify_port = rxe_modify_port;
1268 dev->get_link_layer = rxe_get_link_layer;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001269 dev->get_netdev = rxe_get_netdev;
1270 dev->add_gid = rxe_add_gid;
1271 dev->del_gid = rxe_del_gid;
1272 dev->query_pkey = rxe_query_pkey;
1273 dev->alloc_ucontext = rxe_alloc_ucontext;
1274 dev->dealloc_ucontext = rxe_dealloc_ucontext;
1275 dev->mmap = rxe_mmap;
1276 dev->get_port_immutable = rxe_port_immutable;
1277 dev->alloc_pd = rxe_alloc_pd;
1278 dev->dealloc_pd = rxe_dealloc_pd;
1279 dev->create_ah = rxe_create_ah;
1280 dev->modify_ah = rxe_modify_ah;
1281 dev->query_ah = rxe_query_ah;
1282 dev->destroy_ah = rxe_destroy_ah;
1283 dev->create_srq = rxe_create_srq;
1284 dev->modify_srq = rxe_modify_srq;
1285 dev->query_srq = rxe_query_srq;
1286 dev->destroy_srq = rxe_destroy_srq;
1287 dev->post_srq_recv = rxe_post_srq_recv;
1288 dev->create_qp = rxe_create_qp;
1289 dev->modify_qp = rxe_modify_qp;
1290 dev->query_qp = rxe_query_qp;
1291 dev->destroy_qp = rxe_destroy_qp;
1292 dev->post_send = rxe_post_send;
1293 dev->post_recv = rxe_post_recv;
1294 dev->create_cq = rxe_create_cq;
1295 dev->destroy_cq = rxe_destroy_cq;
1296 dev->resize_cq = rxe_resize_cq;
1297 dev->poll_cq = rxe_poll_cq;
1298 dev->peek_cq = rxe_peek_cq;
1299 dev->req_notify_cq = rxe_req_notify_cq;
1300 dev->get_dma_mr = rxe_get_dma_mr;
1301 dev->reg_user_mr = rxe_reg_user_mr;
1302 dev->dereg_mr = rxe_dereg_mr;
1303 dev->alloc_mr = rxe_alloc_mr;
1304 dev->map_mr_sg = rxe_map_mr_sg;
1305 dev->attach_mcast = rxe_attach_mcast;
1306 dev->detach_mcast = rxe_detach_mcast;
Yonatan Cohen0b1e5b92017-03-10 18:23:56 +02001307 dev->get_hw_stats = rxe_ib_get_hw_stats;
1308 dev->alloc_hw_stats = rxe_ib_alloc_hw_stats;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001309
Thomas Bogendoerfer3192c532017-10-31 11:16:46 +01001310 tfm = crypto_alloc_shash("crc32", 0, 0);
1311 if (IS_ERR(tfm)) {
Colin Ian King27b0b832017-04-24 10:26:42 +01001312 pr_err("failed to allocate crc algorithm err:%ld\n",
Thomas Bogendoerfer3192c532017-10-31 11:16:46 +01001313 PTR_ERR(tfm));
1314 return PTR_ERR(tfm);
yonatanccee26882017-04-20 20:55:55 +03001315 }
Thomas Bogendoerfer3192c532017-10-31 11:16:46 +01001316 rxe->tfm = tfm;
yonatanccee26882017-04-20 20:55:55 +03001317
Matan Barak0ede73b2018-03-19 15:02:34 +02001318 dev->driver_id = RDMA_DRIVER_RXE;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001319 err = ib_register_device(dev, NULL);
1320 if (err) {
Kamal Heib61013822017-06-15 11:29:06 +03001321 pr_warn("%s failed with error %d\n", __func__, err);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001322 goto err1;
1323 }
1324
1325 for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
1326 err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
1327 if (err) {
Kamal Heib61013822017-06-15 11:29:06 +03001328 pr_warn("%s failed with error %d for attr number %d\n",
1329 __func__, err, i);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001330 goto err2;
1331 }
1332 }
1333
1334 return 0;
1335
1336err2:
1337 ib_unregister_device(dev);
1338err1:
yonatanccee26882017-04-20 20:55:55 +03001339 crypto_free_shash(rxe->tfm);
1340
Moni Shoua8700e3e2016-06-16 16:45:23 +03001341 return err;
1342}
1343
1344int rxe_unregister_device(struct rxe_dev *rxe)
1345{
1346 int i;
1347 struct ib_device *dev = &rxe->ib_dev;
1348
1349 for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i)
1350 device_remove_file(&dev->dev, rxe_dev_attributes[i]);
1351
1352 ib_unregister_device(dev);
1353
1354 return 0;
1355}