blob: ced79e49234b223e5553a51465aaecd466dc50f6 [file] [log] [blame]
Moni Shoua8700e3e2016-06-16 16:45:23 +03001/*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
Bart Van Assche0bbb3b72017-01-20 13:04:37 -080034#include <linux/dma-mapping.h>
Yuval Shaia4d6f2852017-03-14 16:01:57 +020035#include <net/addrconf.h>
Moni Shoua8700e3e2016-06-16 16:45:23 +030036#include "rxe.h"
37#include "rxe_loc.h"
38#include "rxe_queue.h"
Yonatan Cohen0b1e5b92017-03-10 18:23:56 +020039#include "rxe_hw_counters.h"
Moni Shoua8700e3e2016-06-16 16:45:23 +030040
41static int rxe_query_device(struct ib_device *dev,
42 struct ib_device_attr *attr,
43 struct ib_udata *uhw)
44{
45 struct rxe_dev *rxe = to_rdev(dev);
46
47 if (uhw->inlen || uhw->outlen)
48 return -EINVAL;
49
50 *attr = rxe->attr;
51 return 0;
52}
53
Moni Shoua8700e3e2016-06-16 16:45:23 +030054static int rxe_query_port(struct ib_device *dev,
55 u8 port_num, struct ib_port_attr *attr)
56{
57 struct rxe_dev *rxe = to_rdev(dev);
58 struct rxe_port *port;
Yuval Shaiad4186192017-06-14 23:13:34 +030059 int rc = -EINVAL;
Moni Shoua8700e3e2016-06-16 16:45:23 +030060
61 if (unlikely(port_num != 1)) {
62 pr_warn("invalid port_number %d\n", port_num);
Yuval Shaiad4186192017-06-14 23:13:34 +030063 goto out;
Moni Shoua8700e3e2016-06-16 16:45:23 +030064 }
65
66 port = &rxe->port;
67
Or Gerlitzc4550c62017-01-24 13:02:39 +020068 /* *attr being zeroed by the caller, avoid zeroing it here */
Moni Shoua8700e3e2016-06-16 16:45:23 +030069 *attr = port->attr;
70
71 mutex_lock(&rxe->usdev_lock);
Yuval Shaiad4186192017-06-14 23:13:34 +030072 rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
73 &attr->active_width);
Moni Shoua8700e3e2016-06-16 16:45:23 +030074 mutex_unlock(&rxe->usdev_lock);
75
Yuval Shaiad4186192017-06-14 23:13:34 +030076out:
77 return rc;
Moni Shoua8700e3e2016-06-16 16:45:23 +030078}
79
80static int rxe_query_gid(struct ib_device *device,
81 u8 port_num, int index, union ib_gid *gid)
82{
83 int ret;
84
85 if (index > RXE_PORT_GID_TBL_LEN)
86 return -EINVAL;
87
88 ret = ib_get_cached_gid(device, port_num, index, gid, NULL);
89 if (ret == -EAGAIN) {
90 memcpy(gid, &zgid, sizeof(*gid));
91 return 0;
92 }
93
94 return ret;
95}
96
97static int rxe_add_gid(struct ib_device *device, u8 port_num, unsigned int
98 index, const union ib_gid *gid,
99 const struct ib_gid_attr *attr, void **context)
100{
101 if (index >= RXE_PORT_GID_TBL_LEN)
102 return -EINVAL;
103 return 0;
104}
105
106static int rxe_del_gid(struct ib_device *device, u8 port_num, unsigned int
107 index, void **context)
108{
109 if (index >= RXE_PORT_GID_TBL_LEN)
110 return -EINVAL;
111 return 0;
112}
113
114static struct net_device *rxe_get_netdev(struct ib_device *device,
115 u8 port_num)
116{
117 struct rxe_dev *rxe = to_rdev(device);
118
119 if (rxe->ndev) {
120 dev_hold(rxe->ndev);
121 return rxe->ndev;
122 }
123
124 return NULL;
125}
126
127static int rxe_query_pkey(struct ib_device *device,
128 u8 port_num, u16 index, u16 *pkey)
129{
130 struct rxe_dev *rxe = to_rdev(device);
131 struct rxe_port *port;
132
133 if (unlikely(port_num != 1)) {
Bart Van Assche85e9f1d2017-01-20 13:04:29 -0800134 dev_warn(device->dev.parent, "invalid port_num = %d\n",
Moni Shoua8700e3e2016-06-16 16:45:23 +0300135 port_num);
136 goto err1;
137 }
138
139 port = &rxe->port;
140
141 if (unlikely(index >= port->attr.pkey_tbl_len)) {
Bart Van Assche85e9f1d2017-01-20 13:04:29 -0800142 dev_warn(device->dev.parent, "invalid index = %d\n",
Moni Shoua8700e3e2016-06-16 16:45:23 +0300143 index);
144 goto err1;
145 }
146
147 *pkey = port->pkey_tbl[index];
148 return 0;
149
150err1:
151 return -EINVAL;
152}
153
154static int rxe_modify_device(struct ib_device *dev,
155 int mask, struct ib_device_modify *attr)
156{
157 struct rxe_dev *rxe = to_rdev(dev);
158
159 if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
160 rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
161
162 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
163 memcpy(rxe->ib_dev.node_desc,
164 attr->node_desc, sizeof(rxe->ib_dev.node_desc));
165 }
166
167 return 0;
168}
169
170static int rxe_modify_port(struct ib_device *dev,
171 u8 port_num, int mask, struct ib_port_modify *attr)
172{
173 struct rxe_dev *rxe = to_rdev(dev);
174 struct rxe_port *port;
175
176 if (unlikely(port_num != 1)) {
177 pr_warn("invalid port_num = %d\n", port_num);
178 goto err1;
179 }
180
181 port = &rxe->port;
182
183 port->attr.port_cap_flags |= attr->set_port_cap_mask;
184 port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
185
186 if (mask & IB_PORT_RESET_QKEY_CNTR)
187 port->attr.qkey_viol_cntr = 0;
188
189 return 0;
190
191err1:
192 return -EINVAL;
193}
194
195static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
196 u8 port_num)
197{
198 struct rxe_dev *rxe = to_rdev(dev);
199
Bart Van Assche839f5ac2017-01-10 11:15:53 -0800200 return rxe_link_layer(rxe, port_num);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300201}
202
203static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
204 struct ib_udata *udata)
205{
206 struct rxe_dev *rxe = to_rdev(dev);
207 struct rxe_ucontext *uc;
208
209 uc = rxe_alloc(&rxe->uc_pool);
210 return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
211}
212
213static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
214{
215 struct rxe_ucontext *uc = to_ruc(ibuc);
216
217 rxe_drop_ref(uc);
218 return 0;
219}
220
221static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
222 struct ib_port_immutable *immutable)
223{
224 int err;
225 struct ib_port_attr attr;
226
Or Gerlitzc4550c62017-01-24 13:02:39 +0200227 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
228
229 err = ib_query_port(dev, port_num, &attr);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300230 if (err)
231 return err;
232
233 immutable->pkey_tbl_len = attr.pkey_tbl_len;
234 immutable->gid_tbl_len = attr.gid_tbl_len;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300235 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
236
237 return 0;
238}
239
240static struct ib_pd *rxe_alloc_pd(struct ib_device *dev,
241 struct ib_ucontext *context,
242 struct ib_udata *udata)
243{
244 struct rxe_dev *rxe = to_rdev(dev);
245 struct rxe_pd *pd;
246
247 pd = rxe_alloc(&rxe->pd_pool);
248 return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
249}
250
251static int rxe_dealloc_pd(struct ib_pd *ibpd)
252{
253 struct rxe_pd *pd = to_rpd(ibpd);
254
255 rxe_drop_ref(pd);
256 return 0;
257}
258
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400259static int rxe_init_av(struct rxe_dev *rxe, struct rdma_ah_attr *attr,
Moni Shoua8700e3e2016-06-16 16:45:23 +0300260 struct rxe_av *av)
261{
262 int err;
263 union ib_gid sgid;
264 struct ib_gid_attr sgid_attr;
265
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400266 err = ib_get_cached_gid(&rxe->ib_dev, rdma_ah_get_port_num(attr),
267 rdma_ah_read_grh(attr)->sgid_index, &sgid,
Moni Shoua8700e3e2016-06-16 16:45:23 +0300268 &sgid_attr);
269 if (err) {
270 pr_err("Failed to query sgid. err = %d\n", err);
271 return err;
272 }
273
Zhu Yanjunca3d9fe2018-01-31 06:06:55 -0500274 rxe_av_from_attr(rdma_ah_get_port_num(attr), av, attr);
Zhu Yanjun316663c2018-01-31 06:06:59 -0500275 rxe_av_fill_ip_info(av, attr, &sgid_attr, &sgid);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300276
277 if (sgid_attr.ndev)
278 dev_put(sgid_attr.ndev);
Zhu Yanjun45a290f2018-01-31 06:06:58 -0500279 return 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300280}
281
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400282static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd,
283 struct rdma_ah_attr *attr,
Moni Shoua477864c2016-11-23 08:23:24 +0200284 struct ib_udata *udata)
285
Moni Shoua8700e3e2016-06-16 16:45:23 +0300286{
287 int err;
288 struct rxe_dev *rxe = to_rdev(ibpd->device);
289 struct rxe_pd *pd = to_rpd(ibpd);
290 struct rxe_ah *ah;
291
292 err = rxe_av_chk_attr(rxe, attr);
293 if (err)
294 goto err1;
295
296 ah = rxe_alloc(&rxe->ah_pool);
297 if (!ah) {
298 err = -ENOMEM;
299 goto err1;
300 }
301
302 rxe_add_ref(pd);
303 ah->pd = pd;
304
305 err = rxe_init_av(rxe, attr, &ah->av);
306 if (err)
307 goto err2;
308
309 return &ah->ibah;
310
311err2:
312 rxe_drop_ref(pd);
313 rxe_drop_ref(ah);
314err1:
315 return ERR_PTR(err);
316}
317
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400318static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300319{
320 int err;
321 struct rxe_dev *rxe = to_rdev(ibah->device);
322 struct rxe_ah *ah = to_rah(ibah);
323
324 err = rxe_av_chk_attr(rxe, attr);
325 if (err)
326 return err;
327
328 err = rxe_init_av(rxe, attr, &ah->av);
329 if (err)
330 return err;
331
332 return 0;
333}
334
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400335static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300336{
Moni Shoua8700e3e2016-06-16 16:45:23 +0300337 struct rxe_ah *ah = to_rah(ibah);
338
Dasaratharaman Chandramoulieca7ddf2017-04-29 14:41:17 -0400339 memset(attr, 0, sizeof(*attr));
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400340 attr->type = ibah->type;
Zhu Yanjun9c96f3d2018-01-31 06:06:56 -0500341 rxe_av_to_attr(&ah->av, attr);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300342 return 0;
343}
344
345static int rxe_destroy_ah(struct ib_ah *ibah)
346{
347 struct rxe_ah *ah = to_rah(ibah);
348
349 rxe_drop_ref(ah->pd);
350 rxe_drop_ref(ah);
351 return 0;
352}
353
354static int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr)
355{
356 int err;
357 int i;
358 u32 length;
359 struct rxe_recv_wqe *recv_wqe;
360 int num_sge = ibwr->num_sge;
361
362 if (unlikely(queue_full(rq->queue))) {
363 err = -ENOMEM;
364 goto err1;
365 }
366
367 if (unlikely(num_sge > rq->max_sge)) {
368 err = -EINVAL;
369 goto err1;
370 }
371
372 length = 0;
373 for (i = 0; i < num_sge; i++)
374 length += ibwr->sg_list[i].length;
375
376 recv_wqe = producer_addr(rq->queue);
377 recv_wqe->wr_id = ibwr->wr_id;
378 recv_wqe->num_sge = num_sge;
379
380 memcpy(recv_wqe->dma.sge, ibwr->sg_list,
381 num_sge * sizeof(struct ib_sge));
382
383 recv_wqe->dma.length = length;
384 recv_wqe->dma.resid = length;
385 recv_wqe->dma.num_sge = num_sge;
386 recv_wqe->dma.cur_sge = 0;
387 recv_wqe->dma.sge_offset = 0;
388
389 /* make sure all changes to the work queue are written before we
390 * update the producer pointer
391 */
392 smp_wmb();
393
394 advance_producer(rq->queue);
395 return 0;
396
397err1:
398 return err;
399}
400
401static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
402 struct ib_srq_init_attr *init,
403 struct ib_udata *udata)
404{
405 int err;
406 struct rxe_dev *rxe = to_rdev(ibpd->device);
407 struct rxe_pd *pd = to_rpd(ibpd);
408 struct rxe_srq *srq;
409 struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600410 struct rxe_create_srq_resp __user *uresp = NULL;
411
412 if (udata) {
413 if (udata->outlen < sizeof(*uresp))
414 return ERR_PTR(-EINVAL);
415 uresp = udata->outbuf;
416 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300417
418 err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
419 if (err)
420 goto err1;
421
422 srq = rxe_alloc(&rxe->srq_pool);
423 if (!srq) {
424 err = -ENOMEM;
425 goto err1;
426 }
427
428 rxe_add_index(srq);
429 rxe_add_ref(pd);
430 srq->pd = pd;
431
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600432 err = rxe_srq_from_init(rxe, srq, init, context, uresp);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300433 if (err)
434 goto err2;
435
436 return &srq->ibsrq;
437
438err2:
439 rxe_drop_ref(pd);
440 rxe_drop_index(srq);
441 rxe_drop_ref(srq);
442err1:
443 return ERR_PTR(err);
444}
445
446static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
447 enum ib_srq_attr_mask mask,
448 struct ib_udata *udata)
449{
450 int err;
451 struct rxe_srq *srq = to_rsrq(ibsrq);
452 struct rxe_dev *rxe = to_rdev(ibsrq->device);
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600453 struct rxe_modify_srq_cmd ucmd = {};
454
455 if (udata) {
456 if (udata->inlen < sizeof(ucmd))
457 return -EINVAL;
458
459 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
460 if (err)
461 return err;
462 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300463
464 err = rxe_srq_chk_attr(rxe, srq, attr, mask);
465 if (err)
466 goto err1;
467
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600468 err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300469 if (err)
470 goto err1;
471
472 return 0;
473
474err1:
475 return err;
476}
477
478static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
479{
480 struct rxe_srq *srq = to_rsrq(ibsrq);
481
482 if (srq->error)
483 return -EINVAL;
484
485 attr->max_wr = srq->rq.queue->buf->index_mask;
486 attr->max_sge = srq->rq.max_sge;
487 attr->srq_limit = srq->limit;
488 return 0;
489}
490
491static int rxe_destroy_srq(struct ib_srq *ibsrq)
492{
493 struct rxe_srq *srq = to_rsrq(ibsrq);
494
495 if (srq->rq.queue)
496 rxe_queue_cleanup(srq->rq.queue);
497
498 rxe_drop_ref(srq->pd);
499 rxe_drop_index(srq);
500 rxe_drop_ref(srq);
501
502 return 0;
503}
504
505static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
506 struct ib_recv_wr **bad_wr)
507{
508 int err = 0;
509 unsigned long flags;
510 struct rxe_srq *srq = to_rsrq(ibsrq);
511
512 spin_lock_irqsave(&srq->rq.producer_lock, flags);
513
514 while (wr) {
515 err = post_one_recv(&srq->rq, wr);
516 if (unlikely(err))
517 break;
518 wr = wr->next;
519 }
520
521 spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
522
523 if (err)
524 *bad_wr = wr;
525
526 return err;
527}
528
529static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
530 struct ib_qp_init_attr *init,
531 struct ib_udata *udata)
532{
533 int err;
534 struct rxe_dev *rxe = to_rdev(ibpd->device);
535 struct rxe_pd *pd = to_rpd(ibpd);
536 struct rxe_qp *qp;
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600537 struct rxe_create_qp_resp __user *uresp = NULL;
538
539 if (udata) {
540 if (udata->outlen < sizeof(*uresp))
541 return ERR_PTR(-EINVAL);
542 uresp = udata->outbuf;
543 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300544
545 err = rxe_qp_chk_init(rxe, init);
546 if (err)
547 goto err1;
548
549 qp = rxe_alloc(&rxe->qp_pool);
550 if (!qp) {
551 err = -ENOMEM;
552 goto err1;
553 }
554
555 if (udata) {
556 if (udata->inlen) {
557 err = -EINVAL;
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500558 goto err2;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300559 }
560 qp->is_user = 1;
561 }
562
563 rxe_add_index(qp);
564
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600565 err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300566 if (err)
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500567 goto err3;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300568
569 return &qp->ibqp;
570
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500571err3:
Moni Shoua8700e3e2016-06-16 16:45:23 +0300572 rxe_drop_index(qp);
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500573err2:
Moni Shoua8700e3e2016-06-16 16:45:23 +0300574 rxe_drop_ref(qp);
575err1:
576 return ERR_PTR(err);
577}
578
579static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
580 int mask, struct ib_udata *udata)
581{
582 int err;
583 struct rxe_dev *rxe = to_rdev(ibqp->device);
584 struct rxe_qp *qp = to_rqp(ibqp);
585
586 err = rxe_qp_chk_attr(rxe, qp, attr, mask);
587 if (err)
588 goto err1;
589
590 err = rxe_qp_from_attr(qp, attr, mask, udata);
591 if (err)
592 goto err1;
593
594 return 0;
595
596err1:
597 return err;
598}
599
600static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
601 int mask, struct ib_qp_init_attr *init)
602{
603 struct rxe_qp *qp = to_rqp(ibqp);
604
605 rxe_qp_to_init(qp, init);
606 rxe_qp_to_attr(qp, attr, mask);
607
608 return 0;
609}
610
611static int rxe_destroy_qp(struct ib_qp *ibqp)
612{
613 struct rxe_qp *qp = to_rqp(ibqp);
614
615 rxe_qp_destroy(qp);
616 rxe_drop_index(qp);
617 rxe_drop_ref(qp);
618 return 0;
619}
620
621static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr,
622 unsigned int mask, unsigned int length)
623{
624 int num_sge = ibwr->num_sge;
625 struct rxe_sq *sq = &qp->sq;
626
627 if (unlikely(num_sge > sq->max_sge))
628 goto err1;
629
630 if (unlikely(mask & WR_ATOMIC_MASK)) {
631 if (length < 8)
632 goto err1;
633
634 if (atomic_wr(ibwr)->remote_addr & 0x7)
635 goto err1;
636 }
637
638 if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
639 (length > sq->max_inline)))
640 goto err1;
641
642 return 0;
643
644err1:
645 return -EINVAL;
646}
647
648static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
649 struct ib_send_wr *ibwr)
650{
651 wr->wr_id = ibwr->wr_id;
652 wr->num_sge = ibwr->num_sge;
653 wr->opcode = ibwr->opcode;
654 wr->send_flags = ibwr->send_flags;
655
656 if (qp_type(qp) == IB_QPT_UD ||
657 qp_type(qp) == IB_QPT_SMI ||
658 qp_type(qp) == IB_QPT_GSI) {
659 wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
660 wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
661 if (qp_type(qp) == IB_QPT_GSI)
662 wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
663 if (wr->opcode == IB_WR_SEND_WITH_IMM)
664 wr->ex.imm_data = ibwr->ex.imm_data;
665 } else {
666 switch (wr->opcode) {
667 case IB_WR_RDMA_WRITE_WITH_IMM:
668 wr->ex.imm_data = ibwr->ex.imm_data;
Bart Van Asscheea6ee932017-10-11 10:49:24 -0700669 /* fall through */
Moni Shoua8700e3e2016-06-16 16:45:23 +0300670 case IB_WR_RDMA_READ:
671 case IB_WR_RDMA_WRITE:
672 wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
673 wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
674 break;
675 case IB_WR_SEND_WITH_IMM:
676 wr->ex.imm_data = ibwr->ex.imm_data;
677 break;
678 case IB_WR_SEND_WITH_INV:
679 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
680 break;
681 case IB_WR_ATOMIC_CMP_AND_SWP:
682 case IB_WR_ATOMIC_FETCH_AND_ADD:
683 wr->wr.atomic.remote_addr =
684 atomic_wr(ibwr)->remote_addr;
685 wr->wr.atomic.compare_add =
686 atomic_wr(ibwr)->compare_add;
687 wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
688 wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
689 break;
690 case IB_WR_LOCAL_INV:
691 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
692 break;
693 case IB_WR_REG_MR:
694 wr->wr.reg.mr = reg_wr(ibwr)->mr;
695 wr->wr.reg.key = reg_wr(ibwr)->key;
696 wr->wr.reg.access = reg_wr(ibwr)->access;
697 break;
698 default:
699 break;
700 }
701 }
702}
703
704static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
705 unsigned int mask, unsigned int length,
706 struct rxe_send_wqe *wqe)
707{
708 int num_sge = ibwr->num_sge;
709 struct ib_sge *sge;
710 int i;
711 u8 *p;
712
713 init_send_wr(qp, &wqe->wr, ibwr);
714
715 if (qp_type(qp) == IB_QPT_UD ||
716 qp_type(qp) == IB_QPT_SMI ||
717 qp_type(qp) == IB_QPT_GSI)
718 memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
719
720 if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
721 p = wqe->dma.inline_data;
722
723 sge = ibwr->sg_list;
724 for (i = 0; i < num_sge; i++, sge++) {
Jia-Ju Bai07d432b2017-06-05 20:23:40 +0800725 memcpy(p, (void *)(uintptr_t)sge->addr,
726 sge->length);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300727
728 p += sge->length;
729 }
730 } else if (mask & WR_REG_MASK) {
731 wqe->mask = mask;
732 wqe->state = wqe_state_posted;
733 return 0;
734 } else
735 memcpy(wqe->dma.sge, ibwr->sg_list,
736 num_sge * sizeof(struct ib_sge));
737
Bart Van Asschea6544a62018-03-01 14:00:29 -0800738 wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
739 mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300740 wqe->mask = mask;
741 wqe->dma.length = length;
742 wqe->dma.resid = length;
743 wqe->dma.num_sge = num_sge;
744 wqe->dma.cur_sge = 0;
745 wqe->dma.sge_offset = 0;
746 wqe->state = wqe_state_posted;
747 wqe->ssn = atomic_add_return(1, &qp->ssn);
748
749 return 0;
750}
751
752static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
Parav Pandite404f942016-09-28 20:26:26 +0000753 unsigned int mask, u32 length)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300754{
755 int err;
756 struct rxe_sq *sq = &qp->sq;
757 struct rxe_send_wqe *send_wqe;
758 unsigned long flags;
759
760 err = validate_send_wr(qp, ibwr, mask, length);
761 if (err)
762 return err;
763
764 spin_lock_irqsave(&qp->sq.sq_lock, flags);
765
766 if (unlikely(queue_full(sq->queue))) {
767 err = -ENOMEM;
768 goto err1;
769 }
770
771 send_wqe = producer_addr(sq->queue);
772
773 err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
774 if (unlikely(err))
775 goto err1;
776
777 /*
778 * make sure all changes to the work queue are
779 * written before we update the producer pointer
780 */
781 smp_wmb();
782
783 advance_producer(sq->queue);
784 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
785
786 return 0;
787
788err1:
789 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
790 return err;
791}
792
Parav Pandit063af592016-09-28 20:24:12 +0000793static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
794 struct ib_send_wr **bad_wr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300795{
796 int err = 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300797 unsigned int mask;
798 unsigned int length = 0;
799 int i;
800 int must_sched;
801
Moni Shoua8700e3e2016-06-16 16:45:23 +0300802 while (wr) {
803 mask = wr_opcode_mask(wr->opcode, qp);
804 if (unlikely(!mask)) {
805 err = -EINVAL;
806 *bad_wr = wr;
807 break;
808 }
809
810 if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
811 !(mask & WR_INLINE_MASK))) {
812 err = -EINVAL;
813 *bad_wr = wr;
814 break;
815 }
816
817 length = 0;
818 for (i = 0; i < wr->num_sge; i++)
819 length += wr->sg_list[i].length;
820
821 err = post_one_send(qp, wr, mask, length);
822
823 if (err) {
824 *bad_wr = wr;
825 break;
826 }
827 wr = wr->next;
828 }
829
830 /*
831 * Must sched in case of GSI QP because ib_send_mad() hold irq lock,
832 * and the requester call ip_local_out_sk() that takes spin_lock_bh.
833 */
834 must_sched = (qp_type(qp) == IB_QPT_GSI) ||
835 (queue_count(qp->sq.queue) > 1);
836
837 rxe_run_task(&qp->req.task, must_sched);
Bart Van Assche6f301e02018-01-09 11:23:40 -0800838 if (unlikely(qp->req.state == QP_STATE_ERROR))
839 rxe_run_task(&qp->comp.task, 1);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300840
841 return err;
842}
843
Parav Pandit063af592016-09-28 20:24:12 +0000844static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
845 struct ib_send_wr **bad_wr)
846{
847 struct rxe_qp *qp = to_rqp(ibqp);
848
849 if (unlikely(!qp->valid)) {
850 *bad_wr = wr;
851 return -EINVAL;
852 }
853
854 if (unlikely(qp->req.state < QP_STATE_READY)) {
855 *bad_wr = wr;
856 return -EINVAL;
857 }
858
859 if (qp->is_user) {
860 /* Utilize process context to do protocol processing */
861 rxe_run_task(&qp->req.task, 0);
862 return 0;
863 } else
864 return rxe_post_send_kernel(qp, wr, bad_wr);
865}
866
Moni Shoua8700e3e2016-06-16 16:45:23 +0300867static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
868 struct ib_recv_wr **bad_wr)
869{
870 int err = 0;
871 struct rxe_qp *qp = to_rqp(ibqp);
872 struct rxe_rq *rq = &qp->rq;
873 unsigned long flags;
874
875 if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
876 *bad_wr = wr;
877 err = -EINVAL;
878 goto err1;
879 }
880
881 if (unlikely(qp->srq)) {
882 *bad_wr = wr;
883 err = -EINVAL;
884 goto err1;
885 }
886
887 spin_lock_irqsave(&rq->producer_lock, flags);
888
889 while (wr) {
890 err = post_one_recv(rq, wr);
891 if (unlikely(err)) {
892 *bad_wr = wr;
893 break;
894 }
895 wr = wr->next;
896 }
897
898 spin_unlock_irqrestore(&rq->producer_lock, flags);
899
Vijay Immanuel12171972017-06-27 12:19:38 +0300900 if (qp->resp.state == QP_STATE_ERROR)
901 rxe_run_task(&qp->resp.task, 1);
902
Moni Shoua8700e3e2016-06-16 16:45:23 +0300903err1:
904 return err;
905}
906
907static struct ib_cq *rxe_create_cq(struct ib_device *dev,
908 const struct ib_cq_init_attr *attr,
909 struct ib_ucontext *context,
910 struct ib_udata *udata)
911{
912 int err;
913 struct rxe_dev *rxe = to_rdev(dev);
914 struct rxe_cq *cq;
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600915 struct rxe_create_cq_resp __user *uresp = NULL;
916
917 if (udata) {
918 if (udata->outlen < sizeof(*uresp))
919 return ERR_PTR(-EINVAL);
920 uresp = udata->outbuf;
921 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300922
923 if (attr->flags)
924 return ERR_PTR(-EINVAL);
925
Jason Gunthorpeb92ec0f2018-03-13 16:33:17 -0600926 err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300927 if (err)
928 goto err1;
929
930 cq = rxe_alloc(&rxe->cq_pool);
931 if (!cq) {
932 err = -ENOMEM;
933 goto err1;
934 }
935
936 err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600937 context, uresp);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300938 if (err)
939 goto err2;
940
941 return &cq->ibcq;
942
943err2:
944 rxe_drop_ref(cq);
945err1:
946 return ERR_PTR(err);
947}
948
949static int rxe_destroy_cq(struct ib_cq *ibcq)
950{
951 struct rxe_cq *cq = to_rcq(ibcq);
952
Andrew Boyerbfc3ae02017-08-28 16:11:50 -0400953 rxe_cq_disable(cq);
954
Moni Shoua8700e3e2016-06-16 16:45:23 +0300955 rxe_drop_ref(cq);
956 return 0;
957}
958
959static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
960{
961 int err;
962 struct rxe_cq *cq = to_rcq(ibcq);
963 struct rxe_dev *rxe = to_rdev(ibcq->device);
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600964 struct rxe_resize_cq_resp __user *uresp = NULL;
965
966 if (udata) {
967 if (udata->outlen < sizeof(*uresp))
968 return -EINVAL;
969 uresp = udata->outbuf;
970 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300971
Jason Gunthorpeb92ec0f2018-03-13 16:33:17 -0600972 err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300973 if (err)
974 goto err1;
975
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600976 err = rxe_cq_resize_queue(cq, cqe, uresp);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300977 if (err)
978 goto err1;
979
980 return 0;
981
982err1:
983 return err;
984}
985
986static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
987{
988 int i;
989 struct rxe_cq *cq = to_rcq(ibcq);
990 struct rxe_cqe *cqe;
991 unsigned long flags;
992
993 spin_lock_irqsave(&cq->cq_lock, flags);
994 for (i = 0; i < num_entries; i++) {
995 cqe = queue_head(cq->queue);
996 if (!cqe)
997 break;
998
999 memcpy(wc++, &cqe->ibwc, sizeof(*wc));
1000 advance_consumer(cq->queue);
1001 }
1002 spin_unlock_irqrestore(&cq->cq_lock, flags);
1003
1004 return i;
1005}
1006
1007static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
1008{
1009 struct rxe_cq *cq = to_rcq(ibcq);
1010 int count = queue_count(cq->queue);
1011
1012 return (count > wc_cnt) ? wc_cnt : count;
1013}
1014
1015static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
1016{
1017 struct rxe_cq *cq = to_rcq(ibcq);
Andrew Boyeraccacb82016-11-23 12:39:22 -05001018 unsigned long irq_flags;
1019 int ret = 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001020
Andrew Boyeraccacb82016-11-23 12:39:22 -05001021 spin_lock_irqsave(&cq->cq_lock, irq_flags);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001022 if (cq->notify != IB_CQ_NEXT_COMP)
1023 cq->notify = flags & IB_CQ_SOLICITED_MASK;
1024
Andrew Boyeraccacb82016-11-23 12:39:22 -05001025 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
1026 ret = 1;
1027
1028 spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
1029
1030 return ret;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001031}
1032
1033static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
1034{
1035 struct rxe_dev *rxe = to_rdev(ibpd->device);
1036 struct rxe_pd *pd = to_rpd(ibpd);
1037 struct rxe_mem *mr;
1038 int err;
1039
1040 mr = rxe_alloc(&rxe->mr_pool);
1041 if (!mr) {
1042 err = -ENOMEM;
1043 goto err1;
1044 }
1045
1046 rxe_add_index(mr);
1047
1048 rxe_add_ref(pd);
1049
1050 err = rxe_mem_init_dma(rxe, pd, access, mr);
1051 if (err)
1052 goto err2;
1053
1054 return &mr->ibmr;
1055
1056err2:
1057 rxe_drop_ref(pd);
1058 rxe_drop_index(mr);
1059 rxe_drop_ref(mr);
1060err1:
1061 return ERR_PTR(err);
1062}
1063
1064static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
1065 u64 start,
1066 u64 length,
1067 u64 iova,
1068 int access, struct ib_udata *udata)
1069{
1070 int err;
1071 struct rxe_dev *rxe = to_rdev(ibpd->device);
1072 struct rxe_pd *pd = to_rpd(ibpd);
1073 struct rxe_mem *mr;
1074
1075 mr = rxe_alloc(&rxe->mr_pool);
1076 if (!mr) {
1077 err = -ENOMEM;
1078 goto err2;
1079 }
1080
1081 rxe_add_index(mr);
1082
1083 rxe_add_ref(pd);
1084
1085 err = rxe_mem_init_user(rxe, pd, start, length, iova,
1086 access, udata, mr);
1087 if (err)
1088 goto err3;
1089
1090 return &mr->ibmr;
1091
1092err3:
1093 rxe_drop_ref(pd);
1094 rxe_drop_index(mr);
1095 rxe_drop_ref(mr);
1096err2:
1097 return ERR_PTR(err);
1098}
1099
1100static int rxe_dereg_mr(struct ib_mr *ibmr)
1101{
1102 struct rxe_mem *mr = to_rmr(ibmr);
1103
1104 mr->state = RXE_MEM_STATE_ZOMBIE;
1105 rxe_drop_ref(mr->pd);
1106 rxe_drop_index(mr);
1107 rxe_drop_ref(mr);
1108 return 0;
1109}
1110
1111static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
1112 enum ib_mr_type mr_type,
1113 u32 max_num_sg)
1114{
1115 struct rxe_dev *rxe = to_rdev(ibpd->device);
1116 struct rxe_pd *pd = to_rpd(ibpd);
1117 struct rxe_mem *mr;
1118 int err;
1119
1120 if (mr_type != IB_MR_TYPE_MEM_REG)
1121 return ERR_PTR(-EINVAL);
1122
1123 mr = rxe_alloc(&rxe->mr_pool);
1124 if (!mr) {
1125 err = -ENOMEM;
1126 goto err1;
1127 }
1128
1129 rxe_add_index(mr);
1130
1131 rxe_add_ref(pd);
1132
1133 err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr);
1134 if (err)
1135 goto err2;
1136
1137 return &mr->ibmr;
1138
1139err2:
1140 rxe_drop_ref(pd);
1141 rxe_drop_index(mr);
1142 rxe_drop_ref(mr);
1143err1:
1144 return ERR_PTR(err);
1145}
1146
1147static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
1148{
1149 struct rxe_mem *mr = to_rmr(ibmr);
1150 struct rxe_map *map;
1151 struct rxe_phys_buf *buf;
1152
1153 if (unlikely(mr->nbuf == mr->num_buf))
1154 return -ENOMEM;
1155
1156 map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
1157 buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
1158
1159 buf->addr = addr;
1160 buf->size = ibmr->page_size;
1161 mr->nbuf++;
1162
1163 return 0;
1164}
1165
Parav Pandite404f942016-09-28 20:26:26 +00001166static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1167 int sg_nents, unsigned int *sg_offset)
Moni Shoua8700e3e2016-06-16 16:45:23 +03001168{
1169 struct rxe_mem *mr = to_rmr(ibmr);
1170 int n;
1171
1172 mr->nbuf = 0;
1173
1174 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
1175
1176 mr->va = ibmr->iova;
1177 mr->iova = ibmr->iova;
1178 mr->length = ibmr->length;
1179 mr->page_shift = ilog2(ibmr->page_size);
1180 mr->page_mask = ibmr->page_size - 1;
1181 mr->offset = mr->iova & mr->page_mask;
1182
1183 return n;
1184}
1185
1186static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1187{
1188 int err;
1189 struct rxe_dev *rxe = to_rdev(ibqp->device);
1190 struct rxe_qp *qp = to_rqp(ibqp);
1191 struct rxe_mc_grp *grp;
1192
1193 /* takes a ref on grp if successful */
1194 err = rxe_mcast_get_grp(rxe, mgid, &grp);
1195 if (err)
1196 return err;
1197
1198 err = rxe_mcast_add_grp_elem(rxe, qp, grp);
1199
1200 rxe_drop_ref(grp);
1201 return err;
1202}
1203
1204static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1205{
1206 struct rxe_dev *rxe = to_rdev(ibqp->device);
1207 struct rxe_qp *qp = to_rqp(ibqp);
1208
1209 return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
1210}
1211
Kamal Heibc05d2662017-06-15 11:29:05 +03001212static ssize_t parent_show(struct device *device,
1213 struct device_attribute *attr, char *buf)
Moni Shoua8700e3e2016-06-16 16:45:23 +03001214{
1215 struct rxe_dev *rxe = container_of(device, struct rxe_dev,
1216 ib_dev.dev);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001217
Bart Van Assche839f5ac2017-01-10 11:15:53 -08001218 return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
Moni Shoua8700e3e2016-06-16 16:45:23 +03001219}
1220
Kamal Heibc05d2662017-06-15 11:29:05 +03001221static DEVICE_ATTR_RO(parent);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001222
1223static struct device_attribute *rxe_dev_attributes[] = {
1224 &dev_attr_parent,
1225};
1226
1227int rxe_register_device(struct rxe_dev *rxe)
1228{
1229 int err;
1230 int i;
1231 struct ib_device *dev = &rxe->ib_dev;
Thomas Bogendoerfer3192c532017-10-31 11:16:46 +01001232 struct crypto_shash *tfm;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001233
1234 strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
1235 strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1236
1237 dev->owner = THIS_MODULE;
1238 dev->node_type = RDMA_NODE_IB_CA;
1239 dev->phys_port_cnt = 1;
Sagi Grimberg67cf3622017-05-04 16:23:07 +03001240 dev->num_comp_vectors = num_possible_cpus();
Bart Van Assche85e9f1d2017-01-20 13:04:29 -08001241 dev->dev.parent = rxe_dma_device(rxe);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001242 dev->local_dma_lkey = 0;
Yuval Shaia4d6f2852017-03-14 16:01:57 +02001243 addrconf_addr_eui48((unsigned char *)&dev->node_guid,
1244 rxe->ndev->dev_addr);
Bart Van Assche0bbb3b72017-01-20 13:04:37 -08001245 dev->dev.dma_ops = &dma_virt_ops;
yonatanc56012e12017-06-22 17:10:00 +03001246 dma_coerce_mask_and_coherent(&dev->dev,
1247 dma_get_required_mask(dev->dev.parent));
Moni Shoua8700e3e2016-06-16 16:45:23 +03001248
1249 dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
1250 dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
1251 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
1252 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
1253 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
1254 | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
1255 | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
1256 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
1257 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
1258 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
1259 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
1260 | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
1261 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
1262 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
1263 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
1264 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
1265 | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
1266 | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
1267 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
1268 | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
1269 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
1270 | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
1271 | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
1272 | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
1273 | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
1274 | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
1275 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
1276 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
1277 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
1278 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
1279 | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
1280 | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
1281 ;
1282
1283 dev->query_device = rxe_query_device;
1284 dev->modify_device = rxe_modify_device;
1285 dev->query_port = rxe_query_port;
1286 dev->modify_port = rxe_modify_port;
1287 dev->get_link_layer = rxe_get_link_layer;
1288 dev->query_gid = rxe_query_gid;
1289 dev->get_netdev = rxe_get_netdev;
1290 dev->add_gid = rxe_add_gid;
1291 dev->del_gid = rxe_del_gid;
1292 dev->query_pkey = rxe_query_pkey;
1293 dev->alloc_ucontext = rxe_alloc_ucontext;
1294 dev->dealloc_ucontext = rxe_dealloc_ucontext;
1295 dev->mmap = rxe_mmap;
1296 dev->get_port_immutable = rxe_port_immutable;
1297 dev->alloc_pd = rxe_alloc_pd;
1298 dev->dealloc_pd = rxe_dealloc_pd;
1299 dev->create_ah = rxe_create_ah;
1300 dev->modify_ah = rxe_modify_ah;
1301 dev->query_ah = rxe_query_ah;
1302 dev->destroy_ah = rxe_destroy_ah;
1303 dev->create_srq = rxe_create_srq;
1304 dev->modify_srq = rxe_modify_srq;
1305 dev->query_srq = rxe_query_srq;
1306 dev->destroy_srq = rxe_destroy_srq;
1307 dev->post_srq_recv = rxe_post_srq_recv;
1308 dev->create_qp = rxe_create_qp;
1309 dev->modify_qp = rxe_modify_qp;
1310 dev->query_qp = rxe_query_qp;
1311 dev->destroy_qp = rxe_destroy_qp;
1312 dev->post_send = rxe_post_send;
1313 dev->post_recv = rxe_post_recv;
1314 dev->create_cq = rxe_create_cq;
1315 dev->destroy_cq = rxe_destroy_cq;
1316 dev->resize_cq = rxe_resize_cq;
1317 dev->poll_cq = rxe_poll_cq;
1318 dev->peek_cq = rxe_peek_cq;
1319 dev->req_notify_cq = rxe_req_notify_cq;
1320 dev->get_dma_mr = rxe_get_dma_mr;
1321 dev->reg_user_mr = rxe_reg_user_mr;
1322 dev->dereg_mr = rxe_dereg_mr;
1323 dev->alloc_mr = rxe_alloc_mr;
1324 dev->map_mr_sg = rxe_map_mr_sg;
1325 dev->attach_mcast = rxe_attach_mcast;
1326 dev->detach_mcast = rxe_detach_mcast;
Yonatan Cohen0b1e5b92017-03-10 18:23:56 +02001327 dev->get_hw_stats = rxe_ib_get_hw_stats;
1328 dev->alloc_hw_stats = rxe_ib_alloc_hw_stats;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001329
Thomas Bogendoerfer3192c532017-10-31 11:16:46 +01001330 tfm = crypto_alloc_shash("crc32", 0, 0);
1331 if (IS_ERR(tfm)) {
Colin Ian King27b0b832017-04-24 10:26:42 +01001332 pr_err("failed to allocate crc algorithm err:%ld\n",
Thomas Bogendoerfer3192c532017-10-31 11:16:46 +01001333 PTR_ERR(tfm));
1334 return PTR_ERR(tfm);
yonatanccee26882017-04-20 20:55:55 +03001335 }
Thomas Bogendoerfer3192c532017-10-31 11:16:46 +01001336 rxe->tfm = tfm;
yonatanccee26882017-04-20 20:55:55 +03001337
Moni Shoua8700e3e2016-06-16 16:45:23 +03001338 err = ib_register_device(dev, NULL);
1339 if (err) {
Kamal Heib61013822017-06-15 11:29:06 +03001340 pr_warn("%s failed with error %d\n", __func__, err);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001341 goto err1;
1342 }
1343
1344 for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
1345 err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
1346 if (err) {
Kamal Heib61013822017-06-15 11:29:06 +03001347 pr_warn("%s failed with error %d for attr number %d\n",
1348 __func__, err, i);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001349 goto err2;
1350 }
1351 }
1352
1353 return 0;
1354
1355err2:
1356 ib_unregister_device(dev);
1357err1:
yonatanccee26882017-04-20 20:55:55 +03001358 crypto_free_shash(rxe->tfm);
1359
Moni Shoua8700e3e2016-06-16 16:45:23 +03001360 return err;
1361}
1362
1363int rxe_unregister_device(struct rxe_dev *rxe)
1364{
1365 int i;
1366 struct ib_device *dev = &rxe->ib_dev;
1367
1368 for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i)
1369 device_remove_file(&dev->dev, rxe_dev_attributes[i]);
1370
1371 ib_unregister_device(dev);
1372
1373 return 0;
1374}